From 3ac9dd8e404fcbf94a1f6bffd05b61d7f2a7ec67 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Stefan=20B=C3=BCringer?= Date: Fri, 23 Aug 2019 07:25:20 +0200 Subject: [PATCH] [v1alpha2 Migration] add v1alpha2 controllers (#446) * migrate types & controllers to v1alpha2 * fix kubelet version --- .gitignore | 27 +- Dockerfile | 71 + Makefile | 63 +- PROJECT | 11 +- README.md | 2 +- api/v1alpha2/groupversion_info.go | 36 + api/v1alpha2/openstackcluster_types.go | 154 + api/v1alpha2/openstackmachine_types.go | 155 + api/v1alpha2/types.go | 222 ++ .../v1alpha2}/zz_generated.deepcopy.go | 222 +- cmd/clusterctl/.gitignore | 2 - cmd/clusterctl/Dockerfile | 32 - .../examples/openstack/cluster.yaml | 21 - .../machine-deployment.yaml.template | 40 - .../examples/openstack/machines.yaml.template | 70 - .../cluster-api/kustomization.yaml | 17 - .../cluster-api/manager_image_patch.yaml | 11 - cmd/manager/Dockerfile | 33 - cmd/manager/main.go | 132 - config/certmanager/certificate.yaml | 24 + config/certmanager/kustomization.yaml | 5 + config/certmanager/kustomizeconfig.yaml | 16 + ...re.cluster.x-k8s.io_openstackclusters.yaml | 732 ++++ ...re.cluster.x-k8s.io_openstackmachines.yaml | 676 ++++ config/crd/kustomization.yaml | 24 + config/crd/kustomizeconfig.yaml | 17 + .../cainjection_in_openstackclusters.yaml | 8 + .../cainjection_in_openstackmachines.yaml | 8 + .../patches/webhook_in_openstackclusters.yaml | 17 + .../patches/webhook_in_openstackmachines.yaml | 17 + ...v1alpha1_openstackclusterproviderspec.yaml | 513 --- ...alpha1_openstackclusterproviderstatus.yaml | 187 - ...alpha1_openstackmachineproviderstatus.yaml | 35 - ...config_v1alpha1_openstackproviderspec.yaml | 529 --- config/default/kustomization.yaml | 73 + config/default/manager_auth_proxy_patch.yaml | 25 + config/default/manager_image_patch.yaml | 12 + .../manager_prometheus_metrics_patch.yaml | 19 + config/default/manager_webhook_patch.yaml | 23 + config/default/webhookcainjection_patch.yaml | 15 + config/kustomization.yaml | 17 - config/manager/kustomization.yaml | 2 + config/manager/manager.yaml | 42 + config/manager/service.yaml | 14 - config/manager/statefulset.yaml | 67 - config/rbac/auth_proxy_role.yaml | 13 + ...ding.yaml => auth_proxy_role_binding.yaml} | 7 +- config/rbac/auth_proxy_service.yaml | 18 + config/rbac/kustomization.yaml | 11 + config/rbac/leader_election_role.yaml | 32 + ...yaml => leader_election_role_binding.yaml} | 7 +- .../rbac/role.yaml | 87 +- config/rbac/role_binding.yaml | 12 + config/rbac/secrets_role.yaml | 15 - ...rastructure_v1alpha2_openstackcluster.yaml | 7 + ...rastructure_v1alpha2_openstackmachine.yaml | 7 + config/webhook/kustomization.yaml | 6 + config/webhook/kustomizeconfig.yaml | 25 + config/webhook/manifests.yaml | 0 config/webhook/service.yaml | 12 + controllers/openstackcluster_controller.go | 276 ++ controllers/openstackmachine_controller.go | 422 ++ controllers/suite_test.go | 82 + go.mod | 34 +- go.sum | 629 +-- .../boilerplate.generatego.txt} | 0 hack/tools.go | 20 +- hack/update-vendor.sh | 14 +- main.go | 122 + overlays-config/coreos/kustomization.yaml | 4 - .../coreos/manager/deployment.yaml | 19 - overlays-config/generic/kustomization.yaml | 4 - .../generic/manager/deployment.yaml | 19 - pkg/apis/apis.go | 39 - .../v1alpha1/register.go | 158 - .../v1alpha1/securitygroup_types.go | 51 - .../openstackproviderconfig/v1alpha1/types.go | 371 -- pkg/cloud/openstack/cluster/actuator.go | 267 -- pkg/cloud/openstack/contants/constants.go | 6 - pkg/cloud/openstack/machine/actuator.go | 512 --- pkg/cloud/openstack/machine/instancestatus.go | 114 - .../services/certificates/BUILD.bazel | 27 - .../services/certificates/certificates.go | 42 +- .../certificates/certificates_test.go | 22 +- .../openstack/services/compute/instance.go | 90 +- .../openstack/services/kubeadm/configs.go | 4 +- .../services/kubeadm/configs_test.go | 4 +- .../services/loadbalancer/loadbalancer.go | 79 +- .../services/networking/floatingip.go | 26 +- .../openstack/services/networking/network.go | 34 +- .../openstack/services/networking/router.go | 30 +- .../services/networking/securitygroups.go | 73 +- .../openstack/services/provider/provider.go | 49 +- .../openstack/services/userdata/kubeadm.go | 34 +- .../services/userdata/machinescript.go | 171 +- pkg/deployer/deployer.go | 70 - .../examples/openstack => samples}/.gitignore | 0 .../examples/openstack => samples}/README.md | 5 +- .../openstack => samples}/generate-yaml.sh | 28 +- samples/templates/clouds-secrets/.gitignore | 1 + .../clouds-secrets/kustomization.yaml | 2 +- samples/templates/cluster.yaml | 40 + samples/templates/machines.yaml | 104 + .../templates/namespace/kustomization.yaml | 2 + .../templates/namespace}/namespace.yaml | 2 +- .../user-data/centos/kustomization.yaml | 0 .../centos/templates/master-user-data.sh | 2 +- .../centos/templates/worker-user-data.sh | 2 +- .../user-data/coreos/kustomization.yaml | 0 .../user-data/coreos/master-user-data.yaml | 353 ++ .../user-data/coreos/templates/common.yaml | 6 +- .../user-data/coreos/templates/master.yaml | 0 .../user-data/coreos/templates/worker.yaml | 0 .../user-data/coreos/worker-user-data.yaml | 264 ++ .../user-data/ubuntu/kustomization.yaml | 0 .../user-data/ubuntu/master-user-data.sh | 147 + .../ubuntu/templates/master-user-data.sh | 2 +- .../ubuntu/templates/worker-user-data.sh | 2 +- .../user-data/ubuntu/worker-user-data.sh | 95 + third_party/forked/README.md | 6 + .../forked/rerun-process-wrapper}/LICENSE | 0 .../forked/rerun-process-wrapper/README.md | 29 + .../forked/rerun-process-wrapper/restart.sh | 22 + .../forked/rerun-process-wrapper/start.sh | 41 + vendor/cloud.google.com/go/AUTHORS | 15 - vendor/cloud.google.com/go/CONTRIBUTORS | 40 - vendor/cloud.google.com/go/LICENSE | 202 - .../go/compute/metadata/metadata.go | 513 --- .../exporter/ocagent/.travis.yml | 17 - .../exporter/ocagent/CONTRIBUTING.md | 24 - .../exporter/ocagent/README.md | 61 - .../exporter/ocagent/common.go | 38 - .../exporter/ocagent/go.mod | 9 - .../exporter/ocagent/go.sum | 44 - .../exporter/ocagent/nodeinfo.go | 41 - .../exporter/ocagent/ocagent.go | 299 -- .../exporter/ocagent/options.go | 80 - .../exporter/ocagent/transform_spans.go | 162 - .../exporter/ocagent/version.go | 17 - vendor/github.com/Azure/go-autorest/LICENSE | 191 - .../Azure/go-autorest/autorest/adal/README.md | 292 -- .../Azure/go-autorest/autorest/adal/config.go | 91 - .../go-autorest/autorest/adal/devicetoken.go | 242 -- .../go-autorest/autorest/adal/persist.go | 73 - .../Azure/go-autorest/autorest/adal/sender.go | 60 - .../Azure/go-autorest/autorest/adal/token.go | 985 ----- .../go-autorest/autorest/adal/version.go | 45 - .../go-autorest/autorest/authorization.go | 260 -- .../Azure/go-autorest/autorest/autorest.go | 150 - .../Azure/go-autorest/autorest/azure/async.go | 992 ----- .../Azure/go-autorest/autorest/azure/azure.go | 326 -- .../autorest/azure/environments.go | 191 - .../autorest/azure/metadata_environment.go | 245 -- .../Azure/go-autorest/autorest/azure/rp.go | 200 - .../Azure/go-autorest/autorest/client.go | 270 -- .../Azure/go-autorest/autorest/date/date.go | 96 - .../Azure/go-autorest/autorest/date/time.go | 103 - .../go-autorest/autorest/date/timerfc1123.go | 100 - .../go-autorest/autorest/date/unixtime.go | 123 - .../go-autorest/autorest/date/utility.go | 25 - .../Azure/go-autorest/autorest/error.go | 98 - .../Azure/go-autorest/autorest/preparer.go | 480 --- .../Azure/go-autorest/autorest/responder.go | 250 -- .../go-autorest/autorest/retriablerequest.go | 52 - .../autorest/retriablerequest_1.7.go | 54 - .../autorest/retriablerequest_1.8.go | 66 - .../Azure/go-autorest/autorest/sender.go | 326 -- .../Azure/go-autorest/autorest/utility.go | 228 -- .../Azure/go-autorest/autorest/version.go | 41 - .../Azure/go-autorest/logger/logger.go | 328 -- .../Azure/go-autorest/tracing/tracing.go | 190 - .../github.com/appscode/jsonpatch/.gitignore | 26 - .../github.com/appscode/jsonpatch/.travis.yml | 10 - .../github.com/appscode/jsonpatch/README.md | 53 - vendor/github.com/appscode/jsonpatch/go.mod | 8 - vendor/github.com/appscode/jsonpatch/go.sum | 8 - .../opencensus-proto/AUTHORS | 1 - .../opencensus-proto/LICENSE | 202 - .../gen-go/agent/common/v1/common.pb.go | 356 -- .../gen-go/agent/trace/v1/trace_service.pb.go | 443 --- .../gen-go/resource/v1/resource.pb.go | 99 - .../gen-go/trace/v1/trace.pb.go | 1654 -------- .../gen-go/trace/v1/trace_config.pb.go | 406 -- vendor/github.com/coreos/go-semver/NOTICE | 5 + .../coreos/go-semver/semver/semver.go | 28 + .../coreos/go-systemd/unit/deserialize.go | 2 +- vendor/github.com/dgrijalva/jwt-go/.gitignore | 4 - .../github.com/dgrijalva/jwt-go/.travis.yml | 13 - .../dgrijalva/jwt-go/MIGRATION_GUIDE.md | 97 - vendor/github.com/dgrijalva/jwt-go/README.md | 100 - .../dgrijalva/jwt-go/VERSION_HISTORY.md | 118 - vendor/github.com/dgrijalva/jwt-go/claims.go | 134 - vendor/github.com/dgrijalva/jwt-go/doc.go | 4 - vendor/github.com/dgrijalva/jwt-go/ecdsa.go | 148 - .../dgrijalva/jwt-go/ecdsa_utils.go | 67 - vendor/github.com/dgrijalva/jwt-go/errors.go | 59 - vendor/github.com/dgrijalva/jwt-go/hmac.go | 95 - .../github.com/dgrijalva/jwt-go/map_claims.go | 94 - vendor/github.com/dgrijalva/jwt-go/none.go | 52 - vendor/github.com/dgrijalva/jwt-go/parser.go | 148 - vendor/github.com/dgrijalva/jwt-go/rsa.go | 101 - vendor/github.com/dgrijalva/jwt-go/rsa_pss.go | 126 - .../github.com/dgrijalva/jwt-go/rsa_utils.go | 101 - .../dgrijalva/jwt-go/signing_method.go | 35 - vendor/github.com/dgrijalva/jwt-go/token.go | 108 - .../github.com/evanphx/json-patch/.travis.yml | 16 + vendor/github.com/evanphx/json-patch/LICENSE | 25 + .../github.com/evanphx/json-patch/README.md | 297 ++ .../github.com/evanphx/json-patch/errors.go | 38 + vendor/github.com/evanphx/json-patch/merge.go | 383 ++ vendor/github.com/evanphx/json-patch/patch.go | 776 ++++ vendor/github.com/fatih/color/.travis.yml | 5 + vendor/github.com/fatih/color/Gopkg.lock | 27 + vendor/github.com/fatih/color/Gopkg.toml | 30 + vendor/github.com/fatih/color/LICENSE.md | 20 + vendor/github.com/fatih/color/README.md | 179 + vendor/github.com/fatih/color/color.go | 603 +++ vendor/github.com/fatih/color/doc.go | 133 + vendor/github.com/ghodss/yaml/.gitignore | 20 - vendor/github.com/ghodss/yaml/.travis.yml | 7 - vendor/github.com/ghodss/yaml/LICENSE | 50 - vendor/github.com/ghodss/yaml/README.md | 121 - vendor/github.com/ghodss/yaml/fields.go | 501 --- vendor/github.com/ghodss/yaml/yaml.go | 277 -- vendor/github.com/gobuffalo/envy/.env | 5 - vendor/github.com/gobuffalo/envy/.gitignore | 29 - vendor/github.com/gobuffalo/envy/.travis.yml | 36 - vendor/github.com/gobuffalo/envy/Makefile | 46 - vendor/github.com/gobuffalo/envy/README.md | 93 - vendor/github.com/gobuffalo/envy/envy.go | 258 -- vendor/github.com/gobuffalo/envy/go.mod | 9 - vendor/github.com/gobuffalo/envy/go.sum | 383 -- vendor/github.com/gobuffalo/envy/shoulders.md | 16 - vendor/github.com/gobuffalo/envy/version.go | 3 - .../diskv => gobuffalo/flect}/LICENSE | 12 +- vendor/github.com/gobuffalo/flect/Makefile | 61 +- vendor/github.com/gobuffalo/flect/README.md | 2 +- .../github.com/gobuffalo/flect/SHOULDERS.md | 10 + .../gobuffalo/flect/azure-pipelines.yml | 71 + .../gobuffalo/flect/azure-tests.yml | 19 + .../github.com/gobuffalo/flect/custom_data.go | 7 +- vendor/github.com/gobuffalo/flect/go.mod | 6 +- vendor/github.com/gobuffalo/flect/go.sum | 324 +- vendor/github.com/gobuffalo/flect/humanize.go | 31 + vendor/github.com/gobuffalo/flect/ident.go | 3 - vendor/github.com/gobuffalo/flect/titleize.go | 3 +- vendor/github.com/gobuffalo/flect/version.go | 2 +- .../github.com/gogo/protobuf/proto/decode.go | 1 - .../gogo/protobuf/proto/deprecated.go | 63 + .../gogo/protobuf/proto/extensions.go | 2 +- vendor/github.com/gogo/protobuf/proto/lib.go | 20 - .../gogo/protobuf/proto/message_set.go | 137 +- .../gogo/protobuf/proto/properties.go | 9 - .../gogo/protobuf/proto/table_marshal.go | 2 +- .../gogo/protobuf/proto/table_unmarshal.go | 4 +- .../golang/protobuf/proto/decode.go | 1 - .../golang/protobuf/proto/deprecated.go | 63 + .../github.com/golang/protobuf/proto/equal.go | 3 +- .../golang/protobuf/proto/extensions.go | 78 +- .../github.com/golang/protobuf/proto/lib.go | 38 +- .../golang/protobuf/proto/message_set.go | 137 +- .../golang/protobuf/proto/pointer_reflect.go | 5 +- .../golang/protobuf/proto/pointer_unsafe.go | 15 +- .../golang/protobuf/proto/properties.go | 31 +- .../golang/protobuf/proto/table_marshal.go | 45 +- .../golang/protobuf/proto/table_unmarshal.go | 72 +- .../golang/protobuf/ptypes/any/any.pb.go | 45 +- .../golang/protobuf/ptypes/any/any.proto | 21 +- .../golang/protobuf/ptypes/duration.go | 2 +- .../protobuf/ptypes/duration/duration.pb.go | 26 +- .../golang/protobuf/ptypes/timestamp.go | 6 +- .../protobuf/ptypes/timestamp/timestamp.pb.go | 34 +- .../protobuf/ptypes/timestamp/timestamp.proto | 8 +- .../protobuf/ptypes/wrappers/wrappers.pb.go | 443 --- .../protobuf/ptypes/wrappers/wrappers.proto | 118 - vendor/github.com/google/btree/.travis.yml | 1 - vendor/github.com/google/btree/LICENSE | 202 - vendor/github.com/google/btree/README.md | 12 - vendor/github.com/google/btree/btree.go | 890 ----- vendor/github.com/google/btree/btree_mem.go | 76 - vendor/github.com/google/gofuzz/go.mod | 3 - vendor/github.com/google/uuid/.travis.yml | 9 - vendor/github.com/google/uuid/CONTRIBUTING.md | 10 - vendor/github.com/google/uuid/CONTRIBUTORS | 9 - vendor/github.com/google/uuid/LICENSE | 27 - vendor/github.com/google/uuid/README.md | 19 - vendor/github.com/google/uuid/dce.go | 80 - vendor/github.com/google/uuid/doc.go | 12 - vendor/github.com/google/uuid/go.mod | 1 - vendor/github.com/google/uuid/hash.go | 53 - vendor/github.com/google/uuid/marshal.go | 37 - vendor/github.com/google/uuid/node.go | 90 - vendor/github.com/google/uuid/node_js.go | 12 - vendor/github.com/google/uuid/node_net.go | 33 - vendor/github.com/google/uuid/sql.go | 59 - vendor/github.com/google/uuid/time.go | 123 - vendor/github.com/google/uuid/util.go | 43 - vendor/github.com/google/uuid/uuid.go | 245 -- vendor/github.com/google/uuid/version1.go | 44 - vendor/github.com/google/uuid/version4.go | 38 - .../gregjones/httpcache/.travis.yml | 19 - .../gregjones/httpcache/LICENSE.txt | 7 - .../github.com/gregjones/httpcache/README.md | 25 - .../httpcache/diskcache/diskcache.go | 61 - .../gregjones/httpcache/httpcache.go | 551 --- vendor/github.com/hpcloud/tail/.gitignore | 3 + vendor/github.com/hpcloud/tail/.travis.yml | 18 + vendor/github.com/hpcloud/tail/CHANGES.md | 63 + vendor/github.com/hpcloud/tail/Dockerfile | 19 + vendor/github.com/hpcloud/tail/LICENSE.txt | 21 + vendor/github.com/hpcloud/tail/Makefile | 11 + vendor/github.com/hpcloud/tail/README.md | 28 + vendor/github.com/hpcloud/tail/appveyor.yml | 11 + .../tail/ratelimiter/Licence} | 3 +- .../hpcloud/tail/ratelimiter/leakybucket.go | 97 + .../hpcloud/tail/ratelimiter/memory.go | 58 + .../hpcloud/tail/ratelimiter/storage.go | 6 + vendor/github.com/hpcloud/tail/tail.go | 438 +++ vendor/github.com/hpcloud/tail/tail_posix.go | 11 + .../github.com/hpcloud/tail/tail_windows.go | 12 + vendor/github.com/hpcloud/tail/util/util.go | 48 + .../hpcloud/tail/watch/filechanges.go | 36 + .../github.com/hpcloud/tail/watch/inotify.go | 128 + .../hpcloud/tail/watch/inotify_tracker.go | 260 ++ .../github.com/hpcloud/tail/watch/polling.go | 118 + vendor/github.com/hpcloud/tail/watch/watch.go | 20 + .../hpcloud/tail/winfile/winfile.go | 92 + vendor/github.com/imdario/mergo/README.md | 18 +- vendor/github.com/imdario/mergo/map.go | 1 - vendor/github.com/imdario/mergo/merge.go | 19 +- vendor/github.com/joho/godotenv/.gitignore | 1 - vendor/github.com/joho/godotenv/.travis.yml | 8 - vendor/github.com/joho/godotenv/README.md | 163 - vendor/github.com/joho/godotenv/godotenv.go | 346 -- .../github.com/mattn/go-colorable/.travis.yml | 9 + vendor/github.com/mattn/go-colorable/LICENSE | 21 + .../github.com/mattn/go-colorable/README.md | 48 + .../mattn/go-colorable/colorable_appengine.go | 29 + .../mattn/go-colorable/colorable_others.go | 30 + .../mattn/go-colorable/colorable_windows.go | 980 +++++ vendor/github.com/mattn/go-colorable/go.mod | 3 + vendor/github.com/mattn/go-colorable/go.sum | 4 + .../mattn/go-colorable/noncolorable.go | 55 + vendor/github.com/mattn/go-isatty/.travis.yml | 13 + .../LICENSE.txt => mattn/go-isatty/LICENSE} | 5 +- vendor/github.com/mattn/go-isatty/README.md | 50 + vendor/github.com/mattn/go-isatty/doc.go | 2 + vendor/github.com/mattn/go-isatty/go.mod | 3 + vendor/github.com/mattn/go-isatty/go.sum | 2 + .../mattn/go-isatty/isatty_android.go | 23 + .../github.com/mattn/go-isatty/isatty_bsd.go | 24 + .../mattn/go-isatty/isatty_others.go | 15 + .../mattn/go-isatty/isatty_solaris.go | 22 + .../mattn/go-isatty/isatty_tcgets.go | 19 + .../mattn/go-isatty/isatty_windows.go | 94 + vendor/github.com/onsi/ginkgo/.gitignore | 7 + vendor/github.com/onsi/ginkgo/.travis.yml | 15 + vendor/github.com/onsi/ginkgo/CHANGELOG.md | 218 ++ vendor/github.com/onsi/ginkgo/CONTRIBUTING.md | 33 + .../godotenv/LICENCE => onsi/ginkgo/LICENSE} | 5 +- vendor/github.com/onsi/ginkgo/README.md | 121 + vendor/github.com/onsi/ginkgo/RELEASING.md | 14 + .../github.com/onsi/ginkgo/config/config.go | 200 + vendor/github.com/onsi/ginkgo/ginkgo_dsl.go | 619 +++ .../internal/codelocation/code_location.go | 32 + .../internal/containernode/container_node.go | 151 + .../onsi/ginkgo/internal/failer/failer.go | 92 + .../ginkgo/internal/leafnodes/benchmarker.go | 103 + .../ginkgo/internal/leafnodes/interfaces.go | 19 + .../onsi/ginkgo/internal/leafnodes/it_node.go | 47 + .../ginkgo/internal/leafnodes/measure_node.go | 62 + .../onsi/ginkgo/internal/leafnodes/runner.go | 117 + .../ginkgo/internal/leafnodes/setup_nodes.go | 48 + .../ginkgo/internal/leafnodes/suite_nodes.go | 55 + .../synchronized_after_suite_node.go | 90 + .../synchronized_before_suite_node.go | 181 + .../onsi/ginkgo/internal/remote/aggregator.go | 249 ++ .../internal/remote/forwarding_reporter.go | 147 + .../internal/remote/output_interceptor.go | 13 + .../remote/output_interceptor_unix.go | 83 + .../internal/remote/output_interceptor_win.go | 36 + .../onsi/ginkgo/internal/remote/server.go | 224 ++ .../remote/syscall_dup_linux_arm64.go | 11 + .../internal/remote/syscall_dup_solaris.go | 9 + .../internal/remote/syscall_dup_unix.go | 11 + .../onsi/ginkgo/internal/spec/spec.go | 247 ++ .../onsi/ginkgo/internal/spec/specs.go | 144 + .../internal/spec_iterator/index_computer.go | 55 + .../spec_iterator/parallel_spec_iterator.go | 59 + .../spec_iterator/serial_spec_iterator.go | 45 + .../sharded_parallel_spec_iterator.go | 47 + .../internal/spec_iterator/spec_iterator.go | 20 + .../ginkgo/internal/specrunner/random_id.go | 15 + .../ginkgo/internal/specrunner/spec_runner.go | 411 ++ .../onsi/ginkgo/internal/suite/suite.go | 190 + .../internal/testingtproxy/testing_t_proxy.go | 76 + .../ginkgo/internal/writer/fake_writer.go | 36 + .../onsi/ginkgo/internal/writer/writer.go | 89 + .../onsi/ginkgo/reporters/default_reporter.go | 84 + .../onsi/ginkgo/reporters/fake_reporter.go | 59 + .../onsi/ginkgo/reporters/junit_reporter.go | 152 + .../onsi/ginkgo/reporters/reporter.go | 15 + .../reporters/stenographer/console_logging.go | 64 + .../stenographer/fake_stenographer.go | 142 + .../reporters/stenographer/stenographer.go | 572 +++ .../stenographer/support/go-colorable/LICENSE | 21 + .../support/go-colorable/README.md | 43 + .../support/go-colorable/colorable_others.go | 24 + .../support/go-colorable/colorable_windows.go | 783 ++++ .../support/go-colorable/noncolorable.go | 57 + .../stenographer/support/go-isatty/LICENSE} | 5 +- .../stenographer/support/go-isatty/README.md | 37 + .../stenographer/support/go-isatty/doc.go | 2 + .../support/go-isatty/isatty_appengine.go | 9 + .../support/go-isatty/isatty_bsd.go | 18 + .../support/go-isatty/isatty_linux.go | 18 + .../support/go-isatty/isatty_solaris.go | 16 + .../support/go-isatty/isatty_windows.go | 19 + .../ginkgo/reporters/teamcity_reporter.go | 93 + .../onsi/ginkgo/types/code_location.go | 15 + .../onsi/ginkgo/types/synchronization.go | 30 + vendor/github.com/onsi/ginkgo/types/types.go | 174 + vendor/github.com/pborman/uuid/.travis.yml | 5 +- vendor/github.com/pborman/uuid/README.md | 2 - vendor/github.com/pborman/uuid/doc.go | 7 +- vendor/github.com/pborman/uuid/go.mod | 3 - vendor/github.com/pborman/uuid/go.sum | 2 - vendor/github.com/pborman/uuid/marshal.go | 10 +- vendor/github.com/pborman/uuid/node.go | 77 +- vendor/github.com/pborman/uuid/sql.go | 4 +- vendor/github.com/pborman/uuid/time.go | 87 +- vendor/github.com/pborman/uuid/util.go | 11 + vendor/github.com/pborman/uuid/uuid.go | 89 +- vendor/github.com/pborman/uuid/version1.go | 28 +- vendor/github.com/pborman/uuid/version4.go | 13 +- .../github.com/peterbourgon/diskv/README.md | 141 - .../peterbourgon/diskv/compression.go | 64 - vendor/github.com/peterbourgon/diskv/diskv.go | 624 --- vendor/github.com/peterbourgon/diskv/index.go | 115 - .../client_golang/prometheus/collector.go | 2 +- .../client_golang/prometheus/doc.go | 6 +- .../client_golang/prometheus/histogram.go | 4 +- .../client_golang/prometheus/http.go | 1 - .../prometheus/promhttp/delegator.go | 1 - .../client_golang/prometheus/promhttp/http.go | 1 - .../client_golang/prometheus/registry.go | 2 +- .../client_golang/prometheus/summary.go | 151 +- .../prometheus/common/expfmt/text_create.go | 12 +- .../github.com/rogpeppe/go-internal/LICENSE | 27 - .../rogpeppe/go-internal/modfile/gopkgin.go | 47 - .../rogpeppe/go-internal/modfile/print.go | 164 - .../rogpeppe/go-internal/modfile/read.go | 869 ----- .../rogpeppe/go-internal/modfile/rule.go | 724 ---- .../rogpeppe/go-internal/module/module.go | 540 --- .../rogpeppe/go-internal/semver/semver.go | 388 -- vendor/github.com/spf13/afero/.travis.yml | 21 - vendor/github.com/spf13/afero/LICENSE.txt | 174 - vendor/github.com/spf13/afero/README.md | 452 --- vendor/github.com/spf13/afero/afero.go | 108 - vendor/github.com/spf13/afero/appveyor.yml | 15 - vendor/github.com/spf13/afero/basepath.go | 180 - .../github.com/spf13/afero/cacheOnReadFs.go | 290 -- vendor/github.com/spf13/afero/const_bsds.go | 22 - .../github.com/spf13/afero/const_win_unix.go | 25 - .../github.com/spf13/afero/copyOnWriteFs.go | 293 -- vendor/github.com/spf13/afero/go.mod | 3 - vendor/github.com/spf13/afero/go.sum | 2 - vendor/github.com/spf13/afero/httpFs.go | 110 - vendor/github.com/spf13/afero/ioutil.go | 230 -- vendor/github.com/spf13/afero/lstater.go | 27 - vendor/github.com/spf13/afero/match.go | 110 - vendor/github.com/spf13/afero/mem/dir.go | 37 - vendor/github.com/spf13/afero/mem/dirmap.go | 43 - vendor/github.com/spf13/afero/mem/file.go | 317 -- vendor/github.com/spf13/afero/memmap.go | 365 -- vendor/github.com/spf13/afero/os.go | 101 - vendor/github.com/spf13/afero/path.go | 106 - vendor/github.com/spf13/afero/readonlyfs.go | 80 - vendor/github.com/spf13/afero/regexpfs.go | 214 - vendor/github.com/spf13/afero/unionFile.go | 320 -- vendor/github.com/spf13/afero/util.go | 330 -- vendor/go.opencensus.io/.gitignore | 9 - vendor/go.opencensus.io/.travis.yml | 27 - vendor/go.opencensus.io/AUTHORS | 1 - vendor/go.opencensus.io/CONTRIBUTING.md | 56 - vendor/go.opencensus.io/Gopkg.lock | 231 -- vendor/go.opencensus.io/Gopkg.toml | 36 - vendor/go.opencensus.io/LICENSE | 202 - vendor/go.opencensus.io/README.md | 263 -- vendor/go.opencensus.io/appveyor.yml | 24 - vendor/go.opencensus.io/exemplar/exemplar.go | 78 - vendor/go.opencensus.io/go.mod | 25 - vendor/go.opencensus.io/go.sum | 48 - vendor/go.opencensus.io/internal/internal.go | 37 - vendor/go.opencensus.io/internal/sanitize.go | 50 - .../internal/tagencoding/tagencoding.go | 72 - .../internal/traceinternals.go | 52 - vendor/go.opencensus.io/opencensus.go | 21 - .../go.opencensus.io/plugin/ochttp/client.go | 117 - .../plugin/ochttp/client_stats.go | 135 - vendor/go.opencensus.io/plugin/ochttp/doc.go | 19 - .../plugin/ochttp/propagation/b3/b3.go | 123 - .../propagation/tracecontext/propagation.go | 187 - .../go.opencensus.io/plugin/ochttp/route.go | 51 - .../go.opencensus.io/plugin/ochttp/server.go | 440 --- .../ochttp/span_annotating_client_trace.go | 169 - .../go.opencensus.io/plugin/ochttp/stats.go | 265 -- .../go.opencensus.io/plugin/ochttp/trace.go | 228 -- vendor/go.opencensus.io/stats/doc.go | 69 - .../go.opencensus.io/stats/internal/record.go | 25 - .../stats/internal/validation.go | 28 - vendor/go.opencensus.io/stats/measure.go | 123 - .../go.opencensus.io/stats/measure_float64.go | 36 - .../go.opencensus.io/stats/measure_int64.go | 36 - vendor/go.opencensus.io/stats/record.go | 69 - vendor/go.opencensus.io/stats/units.go | 25 - .../stats/view/aggregation.go | 120 - .../stats/view/aggregation_data.go | 235 -- .../go.opencensus.io/stats/view/collector.go | 87 - vendor/go.opencensus.io/stats/view/doc.go | 47 - vendor/go.opencensus.io/stats/view/export.go | 58 - vendor/go.opencensus.io/stats/view/view.go | 185 - vendor/go.opencensus.io/stats/view/worker.go | 229 -- .../stats/view/worker_commands.go | 183 - vendor/go.opencensus.io/tag/context.go | 67 - vendor/go.opencensus.io/tag/doc.go | 26 - vendor/go.opencensus.io/tag/key.go | 35 - vendor/go.opencensus.io/tag/map.go | 197 - vendor/go.opencensus.io/tag/map_codec.go | 234 -- vendor/go.opencensus.io/tag/profile_19.go | 31 - vendor/go.opencensus.io/tag/profile_not19.go | 23 - vendor/go.opencensus.io/tag/validate.go | 56 - vendor/go.opencensus.io/trace/basetypes.go | 114 - vendor/go.opencensus.io/trace/config.go | 48 - vendor/go.opencensus.io/trace/doc.go | 53 - vendor/go.opencensus.io/trace/exemplar.go | 43 - vendor/go.opencensus.io/trace/export.go | 90 - .../trace/internal/internal.go | 21 - .../trace/propagation/propagation.go | 108 - vendor/go.opencensus.io/trace/sampling.go | 75 - vendor/go.opencensus.io/trace/spanbucket.go | 130 - vendor/go.opencensus.io/trace/spanstore.go | 306 -- vendor/go.opencensus.io/trace/status_codes.go | 37 - vendor/go.opencensus.io/trace/trace.go | 516 --- vendor/go.opencensus.io/trace/trace_go11.go | 32 - .../go.opencensus.io/trace/trace_nongo11.go | 25 - .../trace/tracestate/tracestate.go | 147 - vendor/golang.org/x/net/html/atom/gen.go | 712 ---- .../x/net/internal/timeseries/timeseries.go | 525 --- vendor/golang.org/x/net/trace/events.go | 532 --- vendor/golang.org/x/net/trace/histogram.go | 365 -- vendor/golang.org/x/net/trace/trace.go | 1130 ------ vendor/golang.org/x/oauth2/README.md | 51 - .../golang.org/x/oauth2/google/appengine.go | 38 - .../x/oauth2/google/appengine_gen1.go | 77 - .../x/oauth2/google/appengine_gen2_flex.go | 27 - vendor/golang.org/x/oauth2/google/default.go | 155 - vendor/golang.org/x/oauth2/google/doc.go | 40 - vendor/golang.org/x/oauth2/google/google.go | 192 - vendor/golang.org/x/oauth2/google/jwt.go | 74 - vendor/golang.org/x/oauth2/google/sdk.go | 201 - vendor/golang.org/x/oauth2/internal/token.go | 219 +- vendor/golang.org/x/oauth2/jws/jws.go | 182 - vendor/golang.org/x/oauth2/jwt/jwt.go | 162 - vendor/golang.org/x/oauth2/oauth2.go | 47 +- vendor/golang.org/x/oauth2/token.go | 2 +- vendor/golang.org/x/sync/AUTHORS | 3 - vendor/golang.org/x/sync/CONTRIBUTORS | 3 - vendor/golang.org/x/sync/PATENTS | 22 - .../golang.org/x/sync/semaphore/semaphore.go | 127 - .../golang.org/x/sys/unix/asm_linux_riscv64.s | 54 - vendor/golang.org/x/sys/unix/mkall.sh | 14 +- vendor/golang.org/x/sys/unix/mkasm_darwin.go | 61 - vendor/golang.org/x/sys/unix/mkerrors.sh | 5 +- vendor/golang.org/x/sys/unix/mkpost.go | 122 - vendor/golang.org/x/sys/unix/mksyscall.go | 407 -- .../x/sys/unix/mksyscall_aix_ppc.go | 415 -- .../x/sys/unix/mksyscall_aix_ppc64.go | 614 --- .../x/sys/unix/mksyscall_solaris.go | 335 -- .../golang.org/x/sys/unix/mksysctl_openbsd.go | 355 -- vendor/golang.org/x/sys/unix/mksysnum.go | 190 - vendor/golang.org/x/sys/unix/sockcmsg_unix.go | 8 +- vendor/golang.org/x/sys/unix/syscall.go | 1 + vendor/golang.org/x/sys/unix/syscall_aix.go | 8 +- .../golang.org/x/sys/unix/syscall_aix_ppc.go | 16 - .../x/sys/unix/syscall_aix_ppc64.go | 47 - .../golang.org/x/sys/unix/syscall_freebsd.go | 50 +- vendor/golang.org/x/sys/unix/syscall_linux.go | 12 +- .../x/sys/unix/syscall_linux_arm.go | 13 - .../golang.org/x/sys/unix/syscall_netbsd.go | 25 +- .../golang.org/x/sys/unix/syscall_openbsd.go | 25 +- vendor/golang.org/x/sys/unix/types_aix.go | 237 -- vendor/golang.org/x/sys/unix/types_darwin.go | 283 -- .../golang.org/x/sys/unix/types_dragonfly.go | 263 -- vendor/golang.org/x/sys/unix/types_freebsd.go | 356 -- vendor/golang.org/x/sys/unix/types_netbsd.go | 289 -- vendor/golang.org/x/sys/unix/types_openbsd.go | 282 -- vendor/golang.org/x/sys/unix/types_solaris.go | 266 -- .../x/sys/unix/zerrors_linux_386.go | 106 - .../x/sys/unix/zerrors_linux_amd64.go | 106 - .../x/sys/unix/zerrors_linux_arm.go | 106 - .../x/sys/unix/zerrors_linux_arm64.go | 106 - .../x/sys/unix/zerrors_linux_mips.go | 106 - .../x/sys/unix/zerrors_linux_mips64.go | 106 - .../x/sys/unix/zerrors_linux_mips64le.go | 106 - .../x/sys/unix/zerrors_linux_mipsle.go | 106 - .../x/sys/unix/zerrors_linux_ppc64.go | 106 - .../x/sys/unix/zerrors_linux_ppc64le.go | 106 - .../x/sys/unix/zerrors_linux_riscv64.go | 106 - .../x/sys/unix/zerrors_linux_s390x.go | 106 - .../x/sys/unix/zerrors_linux_sparc64.go | 106 - .../golang.org/x/sys/unix/zsyscall_aix_ppc.go | 10 +- .../x/sys/unix/zsyscall_aix_ppc64.go | 10 +- .../x/sys/unix/zsyscall_aix_ppc64_gc.go | 4 +- .../x/sys/unix/zsyscall_aix_ppc64_gccgo.go | 4 +- .../x/sys/unix/zsyscall_freebsd_386.go | 2 +- .../x/sys/unix/zsyscall_freebsd_amd64.go | 2 +- .../x/sys/unix/zsyscall_freebsd_arm.go | 2 +- .../x/sys/unix/zsyscall_freebsd_arm64.go | 2 +- .../x/sys/unix/zsyscall_linux_386.go | 8 +- .../x/sys/unix/zsyscall_linux_amd64.go | 8 +- .../x/sys/unix/zsyscall_linux_arm.go | 23 +- .../x/sys/unix/zsyscall_linux_arm64.go | 8 +- .../x/sys/unix/zsyscall_linux_mips.go | 8 +- .../x/sys/unix/zsyscall_linux_mips64.go | 8 +- .../x/sys/unix/zsyscall_linux_mips64le.go | 8 +- .../x/sys/unix/zsyscall_linux_mipsle.go | 8 +- .../x/sys/unix/zsyscall_linux_ppc64.go | 8 +- .../x/sys/unix/zsyscall_linux_ppc64le.go | 8 +- .../x/sys/unix/zsyscall_linux_riscv64.go | 8 +- .../x/sys/unix/zsyscall_linux_s390x.go | 8 +- .../x/sys/unix/zsyscall_linux_sparc64.go | 8 +- .../x/sys/unix/zsyscall_netbsd_386.go | 2 +- .../x/sys/unix/zsyscall_netbsd_amd64.go | 2 +- .../x/sys/unix/zsyscall_netbsd_arm.go | 2 +- .../x/sys/unix/zsyscall_netbsd_arm64.go | 2 +- .../x/sys/unix/zsyscall_openbsd_386.go | 2 +- .../x/sys/unix/zsyscall_openbsd_amd64.go | 2 +- .../x/sys/unix/zsyscall_openbsd_arm.go | 2 +- .../x/sys/unix/zsyscall_openbsd_arm64.go | 2 +- .../x/sys/unix/zsysnum_freebsd_386.go | 23 +- .../x/sys/unix/zsysnum_freebsd_amd64.go | 23 +- .../x/sys/unix/zsysnum_freebsd_arm.go | 23 +- .../x/sys/unix/zsysnum_freebsd_arm64.go | 445 ++- .../x/sys/unix/zsysnum_linux_386.go | 800 ++-- .../x/sys/unix/zsysnum_linux_amd64.go | 4 - .../x/sys/unix/zsysnum_linux_arm.go | 736 ++-- .../x/sys/unix/zsysnum_linux_arm64.go | 4 - .../x/sys/unix/zsysnum_linux_mips.go | 770 ++-- .../x/sys/unix/zsysnum_linux_mips64.go | 4 - .../x/sys/unix/zsysnum_linux_mips64le.go | 4 - .../x/sys/unix/zsysnum_linux_mipsle.go | 770 ++-- .../x/sys/unix/zsysnum_linux_ppc64.go | 15 - .../x/sys/unix/zsysnum_linux_ppc64le.go | 15 - .../x/sys/unix/zsysnum_linux_riscv64.go | 4 - .../x/sys/unix/zsysnum_linux_s390x.go | 18 - .../x/sys/unix/zsysnum_linux_sparc64.go | 19 - .../golang.org/x/sys/unix/ztypes_aix_ppc.go | 45 +- .../golang.org/x/sys/unix/ztypes_aix_ppc64.go | 46 +- .../x/sys/unix/ztypes_darwin_386.go | 36 +- .../x/sys/unix/ztypes_darwin_amd64.go | 38 +- .../x/sys/unix/ztypes_darwin_arm.go | 36 +- .../x/sys/unix/ztypes_darwin_arm64.go | 38 +- .../x/sys/unix/ztypes_dragonfly_amd64.go | 38 +- .../x/sys/unix/ztypes_freebsd_386.go | 82 +- .../x/sys/unix/ztypes_freebsd_amd64.go | 72 +- .../x/sys/unix/ztypes_freebsd_arm.go | 72 +- .../x/sys/unix/ztypes_freebsd_arm64.go | 72 +- .../golang.org/x/sys/unix/ztypes_linux_386.go | 169 - .../x/sys/unix/ztypes_linux_amd64.go | 169 - .../golang.org/x/sys/unix/ztypes_linux_arm.go | 169 - .../x/sys/unix/ztypes_linux_arm64.go | 169 - .../x/sys/unix/ztypes_linux_mips.go | 169 - .../x/sys/unix/ztypes_linux_mips64.go | 169 - .../x/sys/unix/ztypes_linux_mips64le.go | 169 - .../x/sys/unix/ztypes_linux_mipsle.go | 169 - .../x/sys/unix/ztypes_linux_ppc64.go | 169 - .../x/sys/unix/ztypes_linux_ppc64le.go | 169 - .../x/sys/unix/ztypes_linux_riscv64.go | 169 - .../x/sys/unix/ztypes_linux_s390x.go | 169 - .../x/sys/unix/ztypes_linux_sparc64.go | 169 - .../x/sys/unix/ztypes_netbsd_386.go | 34 +- .../x/sys/unix/ztypes_netbsd_amd64.go | 40 +- .../x/sys/unix/ztypes_netbsd_arm.go | 40 +- .../x/sys/unix/ztypes_netbsd_arm64.go | 40 +- .../golang.org/x/sys/windows/env_windows.go | 34 +- vendor/golang.org/x/sys/windows/mkerrors.bash | 5 +- vendor/golang.org/x/sys/windows/mkerrors.go | 7 + .../x/sys/windows/mkknownfolderids.bash | 27 - vendor/golang.org/x/sys/windows/mksyscall.go | 2 - .../x/sys/windows/security_windows.go | 235 +- vendor/golang.org/x/sys/windows/service.go | 70 +- .../x/sys/windows/syscall_windows.go | 97 +- .../golang.org/x/sys/windows/types_windows.go | 218 +- .../x/sys/windows/zerrors_windows.go | 2 +- .../x/sys/windows/zknownfolderids_windows.go | 149 - .../x/sys/windows/zsyscall_windows.go | 566 +-- .../x/text/encoding/charmap/maketables.go | 556 --- .../x/text/encoding/htmlindex/gen.go | 173 - .../text/encoding/internal/identifier/gen.go | 142 - .../x/text/encoding/japanese/maketables.go | 161 - .../x/text/encoding/korean/maketables.go | 143 - .../encoding/simplifiedchinese/maketables.go | 161 - .../encoding/traditionalchinese/maketables.go | 140 - .../x/text/internal/language/compact/gen.go | 64 - .../internal/language/compact/gen_index.go | 113 - .../internal/language/compact/gen_parents.go | 54 - .../x/text/internal/language/gen.go | 1520 -------- .../x/text/internal/language/gen_common.go | 20 - vendor/golang.org/x/text/language/gen.go | 305 -- vendor/golang.org/x/text/unicode/bidi/gen.go | 133 - .../x/text/unicode/bidi/gen_ranges.go | 57 - .../x/text/unicode/bidi/gen_trieval.go | 64 - .../x/text/unicode/norm/maketables.go | 986 ----- .../golang.org/x/text/unicode/norm/triegen.go | 117 - vendor/golang.org/x/time/rate/rate.go | 16 +- vendor/golang.org/x/time/rate/rate_go16.go | 21 + vendor/golang.org/x/time/rate/rate_go17.go | 21 + .../x/tools/go/gcexportdata/main.go | 99 - .../golang.org/x/tools/go/internal/cgo/cgo.go | 220 -- .../x/tools/go/internal/cgo/cgo_pkgconfig.go | 39 - .../x/tools/go/internal/gcimporter/bexport.go | 4 +- .../x/tools/go/internal/gcimporter/bimport.go | 98 +- .../x/tools/go/internal/gcimporter/iexport.go | 723 ++++ .../x/tools/go/internal/gcimporter/iimport.go | 14 +- vendor/golang.org/x/tools/go/packages/doc.go | 6 +- .../x/tools/go/packages/external.go | 2 +- .../golang.org/x/tools/go/packages/golist.go | 135 +- .../x/tools/go/packages/golist_fallback.go | 450 --- .../go/packages/golist_fallback_testmain.go | 318 -- .../x/tools/go/packages/golist_overlay.go | 36 +- .../x/tools/go/packages/packages.go | 235 +- vendor/golang.org/x/tools/imports/fix.go | 38 +- vendor/golang.org/x/tools/imports/mkindex.go | 173 - vendor/golang.org/x/tools/imports/mkstdlib.go | 111 - vendor/golang.org/x/tools/imports/mod.go | 8 +- vendor/golang.org/x/tools/imports/zstdlib.go | 61 +- .../jsonpatch/v2}/LICENSE | 0 vendor/gomodules.xyz/jsonpatch/v2/go.mod | 9 + vendor/gomodules.xyz/jsonpatch/v2/go.sum | 11 + .../jsonpatch/v2}/jsonpatch.go | 8 +- vendor/google.golang.org/api/AUTHORS | 10 - vendor/google.golang.org/api/CONTRIBUTORS | 55 - vendor/google.golang.org/api/LICENSE | 27 - .../api/support/bundler/bundler.go | 349 -- .../google.golang.org/appengine/.travis.yml | 20 - .../appengine/CONTRIBUTING.md | 90 - vendor/google.golang.org/appengine/README.md | 73 - .../google.golang.org/appengine/appengine.go | 137 - .../appengine/appengine_vm.go | 20 - vendor/google.golang.org/appengine/errors.go | 46 - vendor/google.golang.org/appengine/go.mod | 7 - vendor/google.golang.org/appengine/go.sum | 6 - .../google.golang.org/appengine/identity.go | 142 - .../app_identity/app_identity_service.pb.go | 611 --- .../app_identity/app_identity_service.proto | 64 - .../internal/modules/modules_service.pb.go | 786 ---- .../internal/modules/modules_service.proto | 80 - .../google.golang.org/appengine/namespace.go | 25 - vendor/google.golang.org/appengine/timeout.go | 20 - .../appengine/travis_install.sh | 18 - .../appengine/travis_test.sh | 12 - vendor/google.golang.org/genproto/LICENSE | 202 - .../googleapis/rpc/status/status.pb.go | 156 - vendor/google.golang.org/grpc/.travis.yml | 37 - vendor/google.golang.org/grpc/AUTHORS | 1 - vendor/google.golang.org/grpc/CONTRIBUTING.md | 36 - vendor/google.golang.org/grpc/LICENSE | 202 - vendor/google.golang.org/grpc/Makefile | 60 - vendor/google.golang.org/grpc/README.md | 67 - vendor/google.golang.org/grpc/backoff.go | 38 - vendor/google.golang.org/grpc/balancer.go | 391 -- .../grpc/balancer/balancer.go | 303 -- .../grpc/balancer/base/balancer.go | 171 - .../grpc/balancer/base/base.go | 64 - .../grpc/balancer/roundrobin/roundrobin.go | 79 - .../grpc/balancer_conn_wrappers.go | 328 -- .../grpc/balancer_v1_wrapper.go | 326 -- .../grpc_binarylog_v1/binarylog.pb.go | 900 ----- vendor/google.golang.org/grpc/call.go | 74 - vendor/google.golang.org/grpc/clientconn.go | 1449 ------- vendor/google.golang.org/grpc/codec.go | 50 - vendor/google.golang.org/grpc/codegen.sh | 17 - .../grpc/codes/code_string.go | 62 - vendor/google.golang.org/grpc/codes/codes.go | 197 - .../grpc/connectivity/connectivity.go | 73 - .../grpc/credentials/credentials.go | 328 -- .../grpc/credentials/internal/syscallconn.go | 61 - .../internal/syscallconn_appengine.go | 30 - vendor/google.golang.org/grpc/dialoptions.go | 492 --- vendor/google.golang.org/grpc/doc.go | 24 - .../grpc/encoding/encoding.go | 118 - .../grpc/encoding/proto/proto.go | 110 - vendor/google.golang.org/grpc/go.mod | 20 - vendor/google.golang.org/grpc/go.sum | 32 - .../google.golang.org/grpc/grpclog/grpclog.go | 126 - .../google.golang.org/grpc/grpclog/logger.go | 85 - .../grpc/grpclog/loggerv2.go | 195 - vendor/google.golang.org/grpc/install_gae.sh | 6 - vendor/google.golang.org/grpc/interceptor.go | 77 - .../grpc/internal/backoff/backoff.go | 78 - .../grpc/internal/binarylog/binarylog.go | 167 - .../internal/binarylog/binarylog_testutil.go | 42 - .../grpc/internal/binarylog/env_config.go | 210 - .../grpc/internal/binarylog/method_logger.go | 426 -- .../grpc/internal/binarylog/regenerate.sh | 33 - .../grpc/internal/binarylog/sink.go | 162 - .../grpc/internal/binarylog/util.go | 41 - .../grpc/internal/channelz/funcs.go | 699 ---- .../grpc/internal/channelz/types.go | 702 ---- .../grpc/internal/channelz/types_linux.go | 53 - .../grpc/internal/channelz/types_nonlinux.go | 44 - .../grpc/internal/channelz/util_linux.go | 39 - .../grpc/internal/channelz/util_nonlinux.go | 26 - .../grpc/internal/envconfig/envconfig.go | 70 - .../grpc/internal/grpcrand/grpcrand.go | 56 - .../grpc/internal/grpcsync/event.go | 61 - .../grpc/internal/internal.go | 50 - .../grpc/internal/syscall/syscall_linux.go | 114 - .../grpc/internal/syscall/syscall_nonlinux.go | 63 - .../grpc/internal/transport/bdp_estimator.go | 141 - .../grpc/internal/transport/controlbuf.go | 852 ---- .../grpc/internal/transport/defaults.go | 49 - .../grpc/internal/transport/flowcontrol.go | 218 -- .../grpc/internal/transport/handler_server.go | 449 --- .../grpc/internal/transport/http2_client.go | 1380 ------- .../grpc/internal/transport/http2_server.go | 1180 ------ .../grpc/internal/transport/http_util.go | 623 --- .../grpc/internal/transport/log.go | 44 - .../grpc/internal/transport/transport.go | 758 ---- .../grpc/keepalive/keepalive.go | 83 - .../grpc/metadata/metadata.go | 209 - .../grpc/naming/dns_resolver.go | 293 -- .../google.golang.org/grpc/naming/naming.go | 69 - vendor/google.golang.org/grpc/peer/peer.go | 51 - .../google.golang.org/grpc/picker_wrapper.go | 180 - vendor/google.golang.org/grpc/pickfirst.go | 110 - vendor/google.golang.org/grpc/proxy.go | 152 - .../grpc/resolver/dns/dns_resolver.go | 436 --- .../grpc/resolver/passthrough/passthrough.go | 57 - .../grpc/resolver/resolver.go | 158 - .../grpc/resolver_conn_wrapper.go | 155 - vendor/google.golang.org/grpc/rpc_util.go | 816 ---- vendor/google.golang.org/grpc/server.go | 1486 ------- .../google.golang.org/grpc/service_config.go | 372 -- .../google.golang.org/grpc/stats/handlers.go | 63 - vendor/google.golang.org/grpc/stats/stats.go | 295 -- .../google.golang.org/grpc/status/status.go | 210 - vendor/google.golang.org/grpc/stream.go | 1486 ------- vendor/google.golang.org/grpc/tap/tap.go | 51 - vendor/google.golang.org/grpc/trace.go | 113 - vendor/google.golang.org/grpc/version.go | 22 - vendor/google.golang.org/grpc/vet.sh | 141 - vendor/gopkg.in/fsnotify.v1/.editorconfig | 5 + vendor/gopkg.in/fsnotify.v1/.gitignore | 6 + vendor/gopkg.in/fsnotify.v1/.travis.yml | 30 + vendor/gopkg.in/fsnotify.v1/AUTHORS | 52 + vendor/gopkg.in/fsnotify.v1/CHANGELOG.md | 317 ++ vendor/gopkg.in/fsnotify.v1/CONTRIBUTING.md | 77 + .../x/sync => gopkg.in/fsnotify.v1}/LICENSE | 3 +- vendor/gopkg.in/fsnotify.v1/README.md | 79 + vendor/gopkg.in/fsnotify.v1/fen.go | 37 + vendor/gopkg.in/fsnotify.v1/fsnotify.go | 66 + vendor/gopkg.in/fsnotify.v1/inotify.go | 337 ++ vendor/gopkg.in/fsnotify.v1/inotify_poller.go | 187 + vendor/gopkg.in/fsnotify.v1/kqueue.go | 521 +++ vendor/gopkg.in/fsnotify.v1/open_mode_bsd.go | 11 + .../gopkg.in/fsnotify.v1/open_mode_darwin.go | 12 + vendor/gopkg.in/fsnotify.v1/windows.go | 561 +++ vendor/gopkg.in/tomb.v1/LICENSE | 29 + vendor/gopkg.in/tomb.v1/README.md | 4 + vendor/gopkg.in/tomb.v1/tomb.go | 176 + vendor/k8s.io/api/admission/v1beta1/doc.go | 1 + .../v1alpha1/generated.proto | 107 - .../admissionregistration/v1alpha1/types.go | 106 - .../v1alpha1/types_swagger_doc_generated.go | 71 - .../api/admissionregistration/v1beta1/doc.go | 3 +- .../v1beta1/generated.pb.go | 242 +- .../v1beta1/generated.proto | 31 + .../admissionregistration/v1beta1/types.go | 43 + .../v1beta1/types_swagger_doc_generated.go | 17 +- .../v1beta1/zz_generated.deepcopy.go | 15 + vendor/k8s.io/api/apps/v1/doc.go | 1 + vendor/k8s.io/api/apps/v1/types.go | 2 +- vendor/k8s.io/api/apps/v1beta1/doc.go | 1 + .../k8s.io/api/apps/v1beta1/generated.proto | 2 +- vendor/k8s.io/api/apps/v1beta1/types.go | 6 +- .../v1beta1/types_swagger_doc_generated.go | 2 +- vendor/k8s.io/api/apps/v1beta2/doc.go | 1 + .../k8s.io/api/apps/v1beta2/generated.proto | 28 +- vendor/k8s.io/api/apps/v1beta2/types.go | 32 +- .../v1beta2/types_swagger_doc_generated.go | 28 +- .../api/auditregistration/v1alpha1/doc.go | 1 + vendor/k8s.io/api/authentication/v1/doc.go | 1 + .../api/authentication/v1/generated.proto | 5 +- vendor/k8s.io/api/authentication/v1/types.go | 5 +- .../v1/types_swagger_doc_generated.go | 2 +- .../k8s.io/api/authentication/v1beta1/doc.go | 1 + vendor/k8s.io/api/authorization/v1/doc.go | 1 + .../k8s.io/api/authorization/v1beta1/doc.go | 1 + vendor/k8s.io/api/autoscaling/v1/doc.go | 1 + vendor/k8s.io/api/autoscaling/v2beta1/doc.go | 1 + vendor/k8s.io/api/autoscaling/v2beta2/doc.go | 1 + vendor/k8s.io/api/batch/v1/doc.go | 1 + vendor/k8s.io/api/batch/v1beta1/doc.go | 1 + vendor/k8s.io/api/batch/v2alpha1/doc.go | 1 + vendor/k8s.io/api/certificates/v1beta1/doc.go | 1 + vendor/k8s.io/api/coordination/v1/doc.go | 23 + .../v1}/generated.pb.go | 552 +-- .../api/coordination/v1/generated.proto | 80 + .../v1alpha1 => coordination/v1}/register.go | 16 +- vendor/k8s.io/api/coordination/v1/types.go | 74 + .../v1/types_swagger_doc_generated.go | 63 + .../coordination/v1/zz_generated.deepcopy.go | 124 + vendor/k8s.io/api/coordination/v1beta1/doc.go | 1 + .../api/core/v1/annotation_key_constants.go | 6 + vendor/k8s.io/api/core/v1/doc.go | 1 + vendor/k8s.io/api/core/v1/generated.pb.go | 3450 ++++++++++------- vendor/k8s.io/api/core/v1/generated.proto | 73 +- vendor/k8s.io/api/core/v1/types.go | 75 +- .../core/v1/types_swagger_doc_generated.go | 26 +- .../k8s.io/api/core/v1/well_known_labels.go | 36 + .../api/core/v1/zz_generated.deepcopy.go | 43 + vendor/k8s.io/api/events/v1beta1/doc.go | 1 + vendor/k8s.io/api/extensions/v1beta1/doc.go | 1 + .../api/extensions/v1beta1/generated.pb.go | 735 ++-- .../api/extensions/v1beta1/generated.proto | 54 +- vendor/k8s.io/api/extensions/v1beta1/types.go | 56 +- .../v1beta1/types_swagger_doc_generated.go | 54 +- .../v1beta1/zz_generated.deepcopy.go | 21 + vendor/k8s.io/api/networking/v1/doc.go | 1 + .../k8s.io/api/networking/v1/generated.proto | 6 +- vendor/k8s.io/api/networking/v1/types.go | 6 +- .../v1/types_swagger_doc_generated.go | 6 +- vendor/k8s.io/api/networking/v1beta1/doc.go | 22 + .../api/networking/v1beta1/generated.pb.go | 1953 ++++++++++ .../api/networking/v1beta1/generated.proto | 186 + .../k8s.io/api/networking/v1beta1/register.go | 56 + vendor/k8s.io/api/networking/v1beta1/types.go | 192 + .../v1beta1/types_swagger_doc_generated.go | 127 + .../v1beta1/zz_generated.deepcopy.go | 252 ++ vendor/k8s.io/api/node/v1alpha1/doc.go | 22 + .../k8s.io/api/node/v1alpha1/generated.pb.go | 696 ++++ .../k8s.io/api/node/v1alpha1/generated.proto | 76 + vendor/k8s.io/api/node/v1alpha1/register.go | 52 + vendor/k8s.io/api/node/v1alpha1/types.go | 75 + .../v1alpha1/types_swagger_doc_generated.go | 59 + .../v1alpha1/zz_generated.deepcopy.go | 76 +- vendor/k8s.io/api/node/v1beta1/doc.go | 22 + .../k8s.io/api/node/v1beta1/generated.pb.go | 564 +++ .../k8s.io/api/node/v1beta1/generated.proto | 66 + vendor/k8s.io/api/node/v1beta1/register.go | 52 + vendor/k8s.io/api/node/v1beta1/types.go | 65 + .../v1beta1/types_swagger_doc_generated.go | 50 + .../api/node/v1beta1/zz_generated.deepcopy.go | 84 + vendor/k8s.io/api/policy/v1beta1/doc.go | 1 + .../k8s.io/api/policy/v1beta1/generated.pb.go | 435 ++- .../k8s.io/api/policy/v1beta1/generated.proto | 11 + vendor/k8s.io/api/policy/v1beta1/types.go | 10 + .../v1beta1/types_swagger_doc_generated.go | 10 + .../policy/v1beta1/zz_generated.deepcopy.go | 21 + vendor/k8s.io/api/rbac/v1/doc.go | 1 + vendor/k8s.io/api/rbac/v1/generated.proto | 2 + vendor/k8s.io/api/rbac/v1/types.go | 2 + vendor/k8s.io/api/rbac/v1alpha1/doc.go | 1 + .../k8s.io/api/rbac/v1alpha1/generated.proto | 2 + vendor/k8s.io/api/rbac/v1alpha1/types.go | 2 + vendor/k8s.io/api/rbac/v1beta1/doc.go | 1 + .../k8s.io/api/rbac/v1beta1/generated.proto | 2 + vendor/k8s.io/api/rbac/v1beta1/types.go | 2 + vendor/k8s.io/api/scheduling/v1/doc.go | 23 + .../k8s.io/api/scheduling/v1/generated.pb.go | 621 +++ .../k8s.io/api/scheduling/v1/generated.proto | 67 + vendor/k8s.io/api/scheduling/v1/register.go | 55 + vendor/k8s.io/api/scheduling/v1/types.go | 66 + .../v1/types_swagger_doc_generated.go | 52 + .../scheduling/v1/zz_generated.deepcopy.go | 84 + vendor/k8s.io/api/scheduling/v1alpha1/doc.go | 1 + .../api/scheduling/v1alpha1/generated.proto | 1 + .../k8s.io/api/scheduling/v1alpha1/types.go | 1 + .../v1alpha1/types_swagger_doc_generated.go | 2 +- vendor/k8s.io/api/scheduling/v1beta1/doc.go | 1 + .../api/scheduling/v1beta1/generated.proto | 5 +- vendor/k8s.io/api/scheduling/v1beta1/types.go | 5 +- .../v1beta1/types_swagger_doc_generated.go | 6 +- vendor/k8s.io/api/settings/v1alpha1/doc.go | 1 + .../api/settings/v1alpha1/generated.proto | 2 +- vendor/k8s.io/api/settings/v1alpha1/types.go | 2 +- .../v1alpha1/types_swagger_doc_generated.go | 2 +- vendor/k8s.io/api/storage/v1/doc.go | 3 +- vendor/k8s.io/api/storage/v1/generated.proto | 2 +- vendor/k8s.io/api/storage/v1/types.go | 2 +- .../storage/v1/types_swagger_doc_generated.go | 2 +- vendor/k8s.io/api/storage/v1alpha1/doc.go | 3 +- vendor/k8s.io/api/storage/v1beta1/doc.go | 1 + .../api/storage/v1beta1/generated.pb.go | 1438 ++++++- .../api/storage/v1beta1/generated.proto | 139 +- vendor/k8s.io/api/storage/v1beta1/register.go | 6 + vendor/k8s.io/api/storage/v1beta1/types.go | 159 +- .../v1beta1/types_swagger_doc_generated.go | 72 +- .../storage/v1beta1/zz_generated.deepcopy.go | 190 + .../pkg/apis/apiextensions/helpers.go | 127 +- .../pkg/apis/apiextensions/types.go | 9 + .../apis/apiextensions/types_jsonschema.go | 1 + .../apis/apiextensions/v1beta1/defaults.go | 3 + .../pkg/apis/apiextensions/v1beta1/doc.go | 1 + .../apiextensions/v1beta1/generated.pb.go | 431 +- .../apiextensions/v1beta1/generated.proto | 12 + .../pkg/apis/apiextensions/v1beta1/types.go | 10 + .../apiextensions/v1beta1/types_jsonschema.go | 9 +- .../v1beta1/zz_generated.conversion.go | 4 + .../v1beta1/zz_generated.deepcopy.go | 5 + .../apiextensions/zz_generated.deepcopy.go | 5 + .../client/clientset}/clientset/clientset.go | 26 +- .../pkg/client/clientset}/clientset/doc.go | 0 .../client/clientset}/clientset/scheme/doc.go | 0 .../clientset}/clientset/scheme/register.go | 4 +- .../v1beta1/apiextensions_client.go | 90 + .../v1beta1/customresourcedefinition.go | 180 + .../typed/apiextensions/v1beta1}/doc.go | 2 +- .../v1beta1/generated_expansion.go} | 5 +- .../apimachinery/pkg/api/equality/semantic.go | 49 + .../k8s.io/apimachinery/pkg/api/errors/OWNERS | 2 + .../apimachinery/pkg/api/errors/errors.go | 14 + .../k8s.io/apimachinery/pkg/api/meta/OWNERS | 2 + .../k8s.io/apimachinery/pkg/api/meta/help.go | 13 + .../k8s.io/apimachinery/pkg/api/meta/meta.go | 4 +- .../apimachinery/pkg/api/resource/OWNERS | 2 + .../apimachinery/pkg/api/resource/quantity.go | 2 +- .../apimachinery/pkg/apis/config/OWNERS | 7 - .../apimachinery/pkg/apis/config/types.go | 33 - .../apis/meta/internalversion/conversion.go | 2 - .../pkg/apis/meta/internalversion/doc.go | 2 +- .../pkg/apis/meta/internalversion/types.go | 3 - .../zz_generated.conversion.go | 2 - .../apimachinery/pkg/apis/meta/v1/OWNERS | 2 + .../apimachinery/pkg/apis/meta/v1/duration.go | 10 + .../pkg/apis/meta/v1/generated.pb.go | 1505 +++++-- .../pkg/apis/meta/v1/generated.proto | 127 +- .../apimachinery/pkg/apis/meta/v1/helpers.go | 33 + .../apimachinery/pkg/apis/meta/v1/meta.go | 10 +- .../apimachinery/pkg/apis/meta/v1/register.go | 2 + .../apimachinery/pkg/apis/meta/v1/types.go | 139 +- .../meta/v1/types_swagger_doc_generated.go | 100 +- .../pkg/apis/meta/v1/unstructured/helpers.go | 3 + .../apis/meta/v1/unstructured/unstructured.go | 50 +- .../pkg/apis/meta/v1/validation/validation.go | 58 +- .../pkg/apis/meta/v1/zz_generated.deepcopy.go | 95 + .../apimachinery/pkg/apis/meta/v1beta1/doc.go | 2 +- .../apimachinery/pkg/runtime/converter.go | 2 +- .../k8s.io/apimachinery/pkg/runtime/types.go | 1 + vendor/k8s.io/apimachinery/pkg/types/patch.go | 1 + .../apimachinery/pkg/util/errors/errors.go | 38 +- .../apimachinery/pkg/util/mergepatch/OWNERS | 2 + .../k8s.io/apimachinery/pkg/util/net/http.go | 9 +- .../pkg/util/strategicpatch/OWNERS | 2 + .../pkg/util/validation/validation.go | 13 +- .../apimachinery/pkg/util/version/version.go | 29 + .../k8s.io/apimachinery/pkg/util/wait/wait.go | 134 +- .../apimachinery/pkg/util/yaml/decoder.go | 2 - .../third_party/forked/golang/json/OWNERS | 2 + .../client-go/discovery/cached_discovery.go | 295 -- .../client-go/discovery/discovery_client.go | 96 +- .../client-go/discovery/round_tripper.go | 62 - vendor/k8s.io/client-go/dynamic/interface.go | 2 +- vendor/k8s.io/client-go/dynamic/simple.go | 2 +- .../k8s.io/client-go/kubernetes/clientset.go | 294 +- .../client-go/kubernetes/scheme/register.go | 14 +- .../v1alpha1/initializerconfiguration.go | 164 - .../coordination/v1/coordination_client.go | 90 + .../kubernetes/typed/coordination/v1}/doc.go | 4 +- .../coordination/v1/generated_expansion.go | 21 + .../kubernetes/typed/coordination/v1/lease.go | 174 + .../typed/networking/v1beta1/doc.go | 20 + .../networking/v1beta1/generated_expansion.go | 21 + .../typed/networking/v1beta1/ingress.go} | 112 +- .../networking/v1beta1/networking_client.go | 90 + .../v1alpha1/doc.go | 0 .../v1alpha1/generated_expansion.go | 2 +- .../v1alpha1/node_client.go} | 32 +- .../typed/node/v1alpha1/runtimeclass.go | 164 + .../kubernetes/typed/node/v1beta1/doc.go | 20 + .../typed/node/v1beta1/generated_expansion.go | 21 + .../typed/node/v1beta1/node_client.go | 90 + .../typed/node/v1beta1/runtimeclass.go | 164 + .../kubernetes/typed/scheduling/v1/doc.go | 20 + .../scheduling/v1/generated_expansion.go | 21 + .../typed/scheduling/v1/priorityclass.go | 164 + .../typed/scheduling/v1/scheduling_client.go | 90 + .../typed/storage/v1beta1/csidriver.go | 164 + .../typed/storage/v1beta1/csinode.go | 164 + .../storage/v1beta1/generated_expansion.go | 4 + .../typed/storage/v1beta1/storage_client.go | 10 + .../pkg/apis/clientauthentication/OWNERS | 2 + .../clientauthentication/v1alpha1/types.go | 2 +- .../client-go/plugin/pkg/client/auth/OWNERS | 7 - .../plugin/pkg/client/auth/azure/README.md | 50 - .../plugin/pkg/client/auth/azure/azure.go | 360 -- .../plugin/pkg/client/auth/exec/exec.go | 15 +- .../plugin/pkg/client/auth/gcp/OWNERS | 6 - .../plugin/pkg/client/auth/gcp/gcp.go | 383 -- .../plugin/pkg/client/auth/oidc/OWNERS | 5 - .../plugin/pkg/client/auth/oidc/oidc.go | 379 -- .../pkg/client/auth/openstack/openstack.go | 193 - .../plugin/pkg/client/auth/plugins.go | 25 - vendor/k8s.io/client-go/rest/OWNERS | 2 + vendor/k8s.io/client-go/rest/config.go | 109 +- vendor/k8s.io/client-go/rest/request.go | 6 +- vendor/k8s.io/client-go/rest/transport.go | 17 +- .../restmapper/category_expansion.go | 2 +- .../k8s.io/client-go/restmapper/discovery.go | 27 +- .../forked/golang/template/exec.go | 94 - .../forked/golang/template/funcs.go | 599 --- vendor/k8s.io/client-go/tools/auth/OWNERS | 2 + vendor/k8s.io/client-go/tools/cache/OWNERS | 3 +- .../client-go/tools/cache/controller.go | 72 +- .../client-go/tools/cache/delta_fifo.go | 6 +- .../client-go/tools/cache/expiration_cache.go | 14 +- .../tools/cache/fake_custom_store.go | 2 +- .../k8s.io/client-go/tools/cache/listers.go | 20 + .../k8s.io/client-go/tools/cache/listwatch.go | 14 +- .../k8s.io/client-go/tools/cache/reflector.go | 92 +- .../client-go/tools/cache/shared_informer.go | 2 +- .../tools/cache/thread_safe_store.go | 19 +- .../client-go/tools/clientcmd/api/types.go | 44 + .../tools/clientcmd/client_config.go | 7 +- .../tools/clientcmd/merged_client_builder.go | 7 +- .../client-go/tools/leaderelection/OWNERS | 2 + .../tools/leaderelection/leaderelection.go | 47 +- .../client-go/tools/leaderelection/metrics.go | 109 + .../resourcelock/configmaplock.go | 3 + .../resourcelock/endpointslock.go | 3 + .../leaderelection/resourcelock/interface.go | 36 +- .../leaderelection/resourcelock/leaselock.go | 124 + vendor/k8s.io/client-go/tools/metrics/OWNERS | 2 + vendor/k8s.io/client-go/tools/record/OWNERS | 2 + vendor/k8s.io/client-go/tools/record/event.go | 28 +- .../client-go/tools/record/util/util.go | 44 + vendor/k8s.io/client-go/transport/OWNERS | 2 + vendor/k8s.io/client-go/transport/config.go | 20 +- .../client-go/transport/round_trippers.go | 39 +- .../{rest => transport}/token_source.go | 14 +- .../k8s.io/client-go/transport/transport.go | 58 + vendor/k8s.io/client-go/util/cert/OWNERS | 2 + vendor/k8s.io/client-go/util/cert/cert.go | 71 +- vendor/k8s.io/client-go/util/cert/io.go | 95 - vendor/k8s.io/client-go/util/cert/pem.go | 208 - .../client-go/util/flowcontrol/backoff.go | 2 +- .../client-go/util/jsonpath/jsonpath.go | 517 --- vendor/k8s.io/client-go/util/jsonpath/node.go | 255 -- .../k8s.io/client-go/util/jsonpath/parser.go | 525 --- vendor/k8s.io/client-go/util/keyutil/OWNERS | 7 + vendor/k8s.io/client-go/util/keyutil/key.go | 323 ++ vendor/k8s.io/client-go/util/retry/OWNERS | 2 + .../util/workqueue/default_rate_limiters.go | 4 +- .../util/workqueue/delaying_queue.go | 17 +- .../client-go/util/workqueue/metrics.go | 116 +- ...itting_queue.go => rate_limiting_queue.go} | 0 .../code-generator/cmd/client-gen/OWNERS | 2 + .../code-generator/cmd/client-gen/README.md | 2 +- .../client-gen/generators/client_generator.go | 27 +- .../fake/generator_fake_for_clientset.go | 4 - .../generators/generator_for_clientset.go | 16 +- .../cmd/client-gen/types/helpers.go | 4 +- .../cmd/client-gen/types/types.go | 7 +- .../code-generator/cmd/conversion-gen/main.go | 63 +- .../cmd/informer-gen/generators/factory.go | 4 +- .../cmd/informer-gen/generators/generic.go | 8 +- .../code-generator/pkg/namer/tag-override.go | 58 + .../cli/flag/ciphersuites_flag.go | 106 - .../colon_separated_multimap_string_string.go | 102 - .../cli/flag/configuration_map.go | 53 - .../k8s.io/component-base/cli/flag/flags.go | 54 - .../langle_separated_map_string_string.go | 82 - .../cli/flag/map_string_bool.go | 90 - .../cli/flag/map_string_string.go | 112 - .../cli/flag/namedcertkey_flag.go | 113 - .../component-base/cli/flag/omitempty.go | 24 - .../component-base/cli/flag/sectioned.go | 79 - .../component-base/cli/flag/string_flag.go | 56 - .../component-base/cli/flag/tristate.go | 83 - vendor/k8s.io/component-base/config/OWNERS | 14 + .../pkg/apis => component-base}/config/doc.go | 2 +- vendor/k8s.io/component-base/config/types.go | 74 + .../config/zz_generated.deepcopy.go | 35 + .../deepcopy-gen/generators/deepcopy.go | 22 +- .../gengo/examples/set-gen/generators/sets.go | 6 +- .../gengo/examples/set-gen/sets/byte.go | 6 +- .../k8s.io/gengo/examples/set-gen/sets/int.go | 6 +- .../gengo/examples/set-gen/sets/int64.go | 6 +- .../gengo/examples/set-gen/sets/string.go | 6 +- vendor/k8s.io/klog/klogr/README.md | 8 + vendor/k8s.io/klog/klogr/klogr.go | 194 + .../kube-openapi/pkg/util/proto/document.go | 8 +- .../kube-openapi/pkg/util/proto/openapi.go | 2 - .../cmd/kubeadm/app/apis/kubeadm/types.go | 27 +- .../app/apis/kubeadm/v1beta1/defaults.go | 8 - .../app/apis/kubeadm/v1beta1/defaults_unix.go | 2 - .../apis/kubeadm/v1beta1/defaults_windows.go | 2 - .../kubeadm/app/apis/kubeadm/v1beta1/doc.go | 4 +- .../kubeadm/app/apis/kubeadm/v1beta1/types.go | 18 +- .../kubeadm/v1beta1/zz_generated.defaults.go | 2 - .../cmd/kubeadm/app/constants/BUILD | 6 +- .../cmd/kubeadm/app/constants/constants.go | 60 +- .../kubeadm/app/constants/constants_unix.go | 24 + .../app/constants/constants_windows.go | 24 + .../kubernetes/cmd/kubeadm/app/util/BUILD | 2 + .../cmd/kubeadm/app/util/arguments.go | 26 +- .../cmd/kubeadm/app/util/cgroupdriver.go | 9 +- .../cmd/kubeadm/app/util/endpoint.go | 38 +- .../kubernetes/cmd/kubeadm/app/util/error.go | 4 +- .../cmd/kubeadm/app/util/marshal.go | 16 +- .../cmd/kubeadm/app/util/version.go | 35 +- vendor/k8s.io/kubernetes/pkg/apis/core/OWNERS | 2 + .../pkg/apis/core/annotation_key_constants.go | 6 + .../k8s.io/kubernetes/pkg/apis/core/types.go | 64 +- .../pkg/apis/core/zz_generated.deepcopy.go | 43 + .../kubernetes/pkg/kubelet/apis/config/OWNERS | 2 + .../pkg/kubelet/apis/config/types.go | 6 +- .../kubernetes/pkg/proxy/apis/config/BUILD | 2 +- .../kubernetes/pkg/proxy/apis/config/OWNERS | 4 + .../kubernetes/pkg/proxy/apis/config/types.go | 23 +- .../apis/config/zz_generated.deepcopy.go | 17 + .../util => utils}/buffer/ring_growing.go | 0 .../util => utils}/integer/integer.go | 6 + vendor/k8s.io/utils/pointer/OWNERS | 10 + vendor/k8s.io/utils/pointer/pointer.go | 86 + vendor/k8s.io/utils/trace/trace.go | 96 + vendor/modules.txt | 583 ++- .../.github/ISSUE_TEMPLATE/bug_report.md | 25 + .../.github/ISSUE_TEMPLATE/feature_request.md | 14 + .../.github/PULL_REQUEST_TEMPLATE.md | 25 + vendor/sigs.k8s.io/cluster-api/.gitignore | 48 + vendor/sigs.k8s.io/cluster-api/.golangci.yml | 26 + vendor/sigs.k8s.io/cluster-api/BUILD.bazel | 24 + .../sigs.k8s.io/cluster-api/CONTRIBUTING.md | 52 +- vendor/sigs.k8s.io/cluster-api/Dockerfile | 13 +- vendor/sigs.k8s.io/cluster-api/Gopkg.toml | 41 - vendor/sigs.k8s.io/cluster-api/Makefile | 212 +- vendor/sigs.k8s.io/cluster-api/PROJECT | 17 +- vendor/sigs.k8s.io/cluster-api/README.md | 37 +- .../cluster-api/api/v1alpha2/BUILD.bazel | 31 + .../api/v1alpha2/cluster_phase_types.go | 55 + .../cluster-api/api/v1alpha2/cluster_types.go | 157 + .../cluster-api/api/v1alpha2/common_types.go | 128 + .../cluster-api/api/v1alpha2/defaults.go | 73 + .../api/v1alpha2/groupversion_info.go | 36 + .../api/v1alpha2/machine_phase_types.go | 64 + .../cluster-api/api/v1alpha2/machine_types.go | 223 ++ .../api/v1alpha2/machinedeployment_types.go | 200 + .../api/v1alpha2/machineset_types.go | 223 ++ .../api/v1alpha2/zz_generated.deepcopy.go | 705 ++++ .../build/run_in_workspace_with_goroot.bzl | 9 - .../cluster-api/cmd/clusterctl/BUILD.bazel | 6 +- .../cluster-api/cmd/clusterctl/README.md | 16 + .../cmd/clusterctl/clientcmd/BUILD.bazel | 2 +- .../cmd/clusterctl/clientcmd/configutil.go | 26 +- .../clusterctl/clusterdeployer/BUILD.bazel | 4 +- .../bootstrap/kind/BUILD.bazel | 1 + .../clusterdeployer/bootstrap/kind/kind.go | 5 +- .../bootstrap/minikube/BUILD.bazel | 1 + .../bootstrap/minikube/minikube.go | 13 +- .../clusterdeployer/clusterclient/BUILD.bazel | 4 +- .../clusterclient/clusterclient.go | 553 +-- .../clusterdeployer/clusterdeployer.go | 48 +- .../clusterdeployer/clusterdeployer_test.go | 127 +- .../clusterdeployer/provider/BUILD.bazel | 5 +- .../clusterdeployer/provider/provider.go | 10 - .../cmd/clusterctl/cmd/BUILD.bazel | 12 +- .../cmd/alpha_phase_get_kubeconfig.go | 9 +- .../cmd/clusterctl/cmd/create_cluster.go | 23 +- .../cmd/clusterctl/cmd/create_cluster_test.go | 41 - .../cmd/clusterctl/cmd/validate_cluster.go | 4 +- .../cluster-api/cmd/clusterctl/main.go | 7 +- .../cmd/clusterctl/phases/BUILD.bazel | 5 +- .../cmd/clusterctl/phases/applycluster.go | 2 +- .../phases/applyclusterapicomponents.go | 2 +- .../cmd/clusterctl/phases/applymachines.go | 2 +- .../cmd/clusterctl/phases/getkubeconfig.go | 19 +- .../cmd/clusterctl/phases/pivot.go | 87 +- .../cmd/clusterctl/phases/pivot_test.go | 74 +- .../clusterctl/providercomponents/BUILD.bazel | 1 - .../providercomponents_test.go | 3 +- ...create-cluster-no-args-invalid-flag.golden | 3 +- .../testdata/create-cluster-no-args.golden | 7 +- .../create-no-args-invalid-flag.golden | 2 +- .../clusterctl/testdata/create-no-args.golden | 2 +- ...delete-cluster-no-args-invalid-flag.golden | 2 +- .../testdata/delete-cluster-no-args.golden | 2 +- .../delete-no-args-invalid-flag.golden | 2 +- .../clusterctl/testdata/delete-no-args.golden | 2 +- .../testdata/no-args-invalid-flag.golden | 2 +- .../cmd/clusterctl/testdata/no-args.golden | 2 +- ...lidate-cluster-no-args-invalid-flag.golden | 2 +- .../validate-no-args-invalid-flag.golden | 2 +- .../testdata/validate-no-args.golden | 2 +- .../cmd/clusterctl/validation/BUILD.bazel | 11 +- .../validate_cluster_api_objects.go | 38 +- ...validate_cluster_api_objects_suite_test.go | 6 +- .../validate_cluster_api_objects_test.go | 105 +- .../clusterctl/validation/validate_pods.go | 4 +- .../cmd/example-provider/BUILD.bazel | 6 +- .../cluster-api/cmd/example-provider/main.go | 29 +- .../cluster-api/cmd/manager/BUILD.bazel | 2 +- .../cluster-api/cmd/manager/main.go | 4 +- .../cluster-api/config/BUILD.bazel | 2 +- .../config/ci/rbac/kustomization.yaml | 4 +- .../crd/bases/cluster.x-k8s.io_clusters.yaml | 158 + .../cluster.x-k8s.io_machinedeployments.yaml | 534 +++ .../crd/bases/cluster.x-k8s.io_machines.yaml | 377 ++ .../bases/cluster.x-k8s.io_machinesets.yaml | 496 +++ .../cluster-api/config/crd/kustomization.yaml | 30 + .../config/crd/kustomizeconfig.yaml | 17 + .../crd/patches/cainjection_in_clusters.yaml | 8 + .../cainjection_in_machinedeployments.yaml | 8 + .../crd/patches/cainjection_in_machines.yaml | 8 + .../patches/cainjection_in_machinesets.yaml | 8 + .../crd/patches/webhook_in_clusters.yaml | 17 + .../webhook_in_machinedeployments.yaml | 17 + .../crd/patches/webhook_in_machines.yaml | 17 + .../crd/patches/webhook_in_machinesets.yaml | 17 + ...ster.yaml => cluster.k8s.io_clusters.yaml} | 55 +- ...aml => cluster.k8s.io_machineclasses.yaml} | 11 +- .../cluster.k8s.io_machinedeployments.yaml | 595 +++ .../config/crds/cluster.k8s.io_machines.yaml | 493 +++ .../crds/cluster.k8s.io_machinesets.yaml | 557 +++ .../config/crds/cluster_v1alpha1_machine.yaml | 253 -- .../cluster_v1alpha1_machinedeployment.yaml | 265 -- .../crds/cluster_v1alpha1_machineset.yaml | 227 -- .../config/crds/kustomization.yaml | 10 +- .../config/default/kustomization.yaml | 48 +- .../config/default/manager_image_patch.yaml | 2 +- .../config/rbac/kustomization.yaml | 4 +- .../cluster-api/config/rbac/role.yaml | 47 +- ...er_role_binding.yaml => role_binding.yaml} | 0 .../samples/cluster_v1alpha1_cluster.yaml | 9 - .../samples/cluster_v1alpha1_machine.yaml | 9 - .../cluster_v1alpha1_machinedeployment.yaml | 16 - .../samples/cluster_v1alpha1_machineset.yaml | 16 - .../cluster-api/config/webhook/manifests.yaml | 0 .../cluster-api/controllers/BUILD.bazel | 36 + .../controllers/cluster_controller.go | 51 + .../controllers/machine_controller.go | 51 + .../machinedeployment_controller.go | 51 + .../controllers/machineset_controller.go | 51 + .../cluster-api/controllers/suite_test.go | 80 + .../cluster-api/docs/book/ABBREVIATIONS.md | 10 + .../cluster-api/docs/book/GLOSSARY.md | 182 +- .../cluster-api/docs/book/README.md | 2 +- .../sigs.k8s.io/cluster-api/docs/book/RELEASE | 48 +- .../cluster-api/docs/book/SUMMARY.md | 3 + .../docs/book/common_code/architecture.md | 4 +- .../book/common_code/cluster_controller.md | 8 +- .../book/common_code/machine_controller.md | 8 +- .../machinedeployment_controller.md | 10 +- .../book/common_code/machineset_controller.md | 10 +- .../docs/book/common_code/node_controller.md | 7 +- .../book/common_code/noderef_controller.md | 8 + .../create_actuators.md | 4 +- .../book/provider_implementations/overview.md | 8 +- .../cluster-api/docs/developer/releasing.md | 11 + .../v1alpha1-compared-to-v1alpha2.md | 67 + .../docs/examples/cluster/cluster.yaml | 2 +- .../docs/examples/machine/machine.yaml | 2 +- .../machinedeployment/machinedeployment.yaml | 2 +- .../docs/examples/machineset/machineset.yaml | 2 +- .../cluster-api/docs/how-to-use-clusterctl.md | 94 + .../docs/proposals/20181121-machine-api.md | 18 +- ...10-machine-states-preboot-bootstrapping.md | 502 +++ .../proposals/20190709-cluster-spec-crds.md | 233 ++ .../docs/proposals/YYYYMMDD-template.md | 31 + .../docs/proposals/images/Dockerfile | 37 + .../docs/proposals/images/Makefile | 32 + .../images/cluster-spec-crds/figure1.plantuml | 58 + .../images/cluster-spec-crds/figure1.png | Bin 0 -> 60035 bytes .../images/cluster-spec-crds/figure2.plantuml | 29 + .../images/cluster-spec-crds/figure2.png | Bin 0 -> 17984 bytes .../machine-states-preboot/Figure1.plantuml | 61 + .../images/machine-states-preboot/Figure1.png | Bin 0 -> 82569 bytes .../images/machine-states-preboot/Figure2.png | Bin 0 -> 60153 bytes .../machine-states-preboot/Figure3.plantuml | 58 + .../images/machine-states-preboot/Figure3.png | Bin 0 -> 68668 bytes .../machine-states-preboot/Figure4.plantuml | 45 + .../images/machine-states-preboot/Figure4.png | Bin 0 -> 45500 bytes .../machine-states-preboot/Figure5.plantuml | 37 + .../images/machine-states-preboot/Figure5.png | Bin 0 -> 34258 bytes .../machine-states-preboot/Figure6.plantuml | 49 + .../images/machine-states-preboot/Figure6.png | Bin 0 -> 53666 bytes .../machine-states-preboot/Figure7.plantuml | 32 + .../images/machine-states-preboot/Figure7.png | Bin 0 -> 20249 bytes .../machine-states-preboot/Figure8.plantuml | 44 + .../images/machine-states-preboot/Figure8.png | Bin 0 -> 32285 bytes .../images/machine-states-preboot/README.md | 27 + .../cluster-api/docs/scope-and-objectives.md | 23 + .../cluster-api/docs/staging-use-cases.md | 22 +- vendor/sigs.k8s.io/cluster-api/go.mod | 69 +- vendor/sigs.k8s.io/cluster-api/go.sum | 615 +-- vendor/sigs.k8s.io/cluster-api/go.vendor | 0 .../sigs.k8s.io/cluster-api/hack/BUILD.bazel | 8 - .../cluster-api/hack/boilerplate.go.txt | 16 + .../cluster-api/hack/build-gitbook.sh | 7 +- .../cluster-api/hack/pin-dependency.sh | 92 + vendor/sigs.k8s.io/cluster-api/hack/tools.go | 8 - .../cluster-api/hack/update-vendor.sh | 12 + .../cluster-api/hack/verify-bazel.sh | 2 +- ...erify_clientset.sh => verify-clientset.sh} | 14 +- .../cluster-api/hack/verify-doctoc.sh | 56 + vendor/sigs.k8s.io/cluster-api/main.go | 100 + vendor/sigs.k8s.io/cluster-api/netlify.toml | 12 +- .../cluster-api/pkg/apis/BUILD.bazel | 15 - .../sigs.k8s.io/cluster-api/pkg/apis/apis.go | 33 - .../pkg/apis/cluster/common/BUILD.bazel | 15 - .../pkg/apis/cluster/common/plugins.go | 51 - .../pkg/apis/cluster/v1alpha1/common_types.go | 58 - .../apis/{cluster => deprecated}/BUILD.bazel | 2 +- .../pkg/apis/{cluster => deprecated}/group.go | 2 +- .../v1alpha1/BUILD.bazel | 6 +- .../v1alpha1/cluster_types.go | 7 +- .../v1alpha1/cluster_types_test.go | 0 .../apis/deprecated/v1alpha1/common_types.go | 143 + .../v1alpha1/defaults.go | 5 +- .../{cluster => deprecated}/v1alpha1/doc.go | 1 + .../v1alpha1/machine_types.go | 12 +- .../v1alpha1/machine_types_test.go | 0 .../v1alpha1/machineclass_types.go | 3 +- .../v1alpha1/machinedeployment_types.go | 14 +- .../v1alpha1/machinedeployment_types_test.go | 0 .../v1alpha1/machineset_types.go | 11 +- .../v1alpha1/machineset_types_test.go | 0 .../v1alpha1/register.go | 5 +- .../v1alpha1/testutil/BUILD.bazel | 4 +- .../v1alpha1/testutil/testutil.go | 2 +- .../v1alpha1/v1alpha1_suite_test.go | 0 .../v1alpha1/zz_generated.deepcopy.go | 80 +- .../clientset_generated/clientset/BUILD.bazel | 17 - .../clientset/fake/BUILD.bazel | 27 - .../clientset/fake/clientset_generated.go | 82 - .../clientset/fake/register.go | 56 - .../clientset/scheme/BUILD.bazel | 19 - .../typed/cluster/v1alpha1/BUILD.bazel | 26 - .../typed/cluster/v1alpha1/cluster.go | 191 - .../typed/cluster/v1alpha1/cluster_client.go | 110 - .../typed/cluster/v1alpha1/fake/BUILD.bazel | 27 - .../cluster/v1alpha1/fake/fake_cluster.go | 140 - .../v1alpha1/fake/fake_cluster_client.go | 56 - .../cluster/v1alpha1/fake/fake_machine.go | 140 - .../v1alpha1/fake/fake_machineclass.go | 128 - .../v1alpha1/fake/fake_machinedeployment.go | 140 - .../cluster/v1alpha1/fake/fake_machineset.go | 140 - .../cluster/v1alpha1/generated_expansion.go | 29 - .../typed/cluster/v1alpha1/machineclass.go | 174 - .../cluster/v1alpha1/machinedeployment.go | 191 - .../typed/cluster/v1alpha1/machineset.go | 191 - .../externalversions/BUILD.bazel | 21 - .../externalversions/cluster/BUILD.bazel | 12 - .../externalversions/cluster/interface.go | 46 - .../cluster/v1alpha1/BUILD.bazel | 25 - .../cluster/v1alpha1/cluster.go | 89 - .../cluster/v1alpha1/interface.go | 73 - .../cluster/v1alpha1/machine.go | 89 - .../cluster/v1alpha1/machineclass.go | 89 - .../cluster/v1alpha1/machinedeployment.go | 89 - .../cluster/v1alpha1/machineset.go | 89 - .../externalversions/factory.go | 180 - .../externalversions/generic.go | 70 - .../internalinterfaces/BUILD.bazel | 14 - .../internalinterfaces/factory_interfaces.go | 40 - .../cluster/v1alpha1/BUILD.bazel | 21 - .../cluster/v1alpha1/cluster.go | 94 - .../cluster/v1alpha1/expansion_generated.go | 59 - .../cluster/v1alpha1/machine.go | 94 - .../cluster/v1alpha1/machineclass.go | 94 - .../cluster/v1alpha1/machinedeployment.go | 94 - .../cluster/v1alpha1/machineset.go | 94 - .../cluster-api/pkg/cmdrunner/BUILD.bazel | 14 - .../pkg/cmdrunner/cmd_runner_test.go | 83 - .../cluster-api/pkg/controller/BUILD.bazel | 8 +- .../{add_noderef.go => add_cluster.go} | 4 +- .../{add_node.go => add_machine.go} | 6 +- .../pkg/controller/cluster/BUILD.bazel | 22 +- .../pkg/controller/cluster/actuator.go | 33 - .../controller/cluster/cluster_controller.go | 318 +- .../cluster/cluster_controller_phases.go | 198 + .../cluster/cluster_reconciler_suite_test.go | 19 +- .../cluster/cluster_reconciler_test.go | 59 +- .../pkg/controller/cluster/testactuator.go | 68 - .../cluster-api/pkg/controller/controller.go | 7 +- .../pkg/controller/error/BUILD.bazel | 8 - .../pkg/controller/external/BUILD.bazel | 19 + .../pkg/controller/external/testing.go | 74 + .../pkg/controller/external/util.go | 92 + .../pkg/controller/machine/BUILD.bazel | 28 +- .../pkg/controller/machine/actuator.go | 39 - .../controller/machine/machine_controller.go | 307 +- .../machine/machine_controller_noderef.go | 129 + .../machine_controller_noderef_test.go} | 60 +- .../machine/machine_controller_phases.go | 288 ++ .../machine/machine_controller_phases_test.go | 700 ++++ .../machine/machine_controller_test.go | 241 +- .../machine/machine_reconciler_suite_test.go | 20 +- .../machine/machine_reconciler_test.go | 58 +- .../pkg/controller/machine/testactuator.go | 101 - .../controller/machinedeployment/BUILD.bazel | 15 +- .../machinedeployment_controller.go | 137 +- .../machinedeployment_controller_test.go | 82 +- ...machinedeployment_reconciler_suite_test.go | 27 +- .../machinedeployment_reconciler_test.go | 344 +- .../controller/machinedeployment/rolling.go | 20 +- .../pkg/controller/machinedeployment/sync.go | 57 +- .../machinedeployment/util/BUILD.bazel | 12 +- .../controller/machinedeployment/util/util.go | 81 +- .../machinedeployment/util/util_test.go | 206 +- .../pkg/controller/machineset/BUILD.bazel | 17 +- .../controller/machineset/delete_policy.go | 26 +- .../machineset/delete_policy_test.go | 134 +- .../pkg/controller/machineset/machine.go | 16 +- .../pkg/controller/machineset/machine_test.go | 18 +- .../machineset/machineset_controller.go | 199 +- .../machineset/machineset_controller_test.go | 79 +- .../machineset_reconciler_suite_test.go | 25 +- .../machineset/machineset_reconciler_test.go | 211 +- .../pkg/controller/machineset/status.go | 8 +- .../pkg/controller/node/BUILD.bazel | 51 - .../cluster-api/pkg/controller/node/node.go | 141 - .../pkg/controller/node/node_controller.go | 101 - .../node/node_reconciler_suite_test.go | 74 - .../controller/node/node_reconciler_test.go | 77 - .../pkg/controller/noderef/BUILD.bazel | 57 - .../controller/noderef/noderef_controller.go | 228 -- .../noderef/noderef_controller_suite_test.go | 75 - .../pkg/controller/remote/BUILD.bazel | 4 +- .../pkg/controller/remote/cluster.go | 6 +- .../pkg/controller/remote/cluster_test.go | 8 +- .../cluster-api/pkg/errors/BUILD.bazel | 8 +- .../cluster-api/pkg/errors/clusters.go | 10 +- .../{apis/cluster/common => errors}/consts.go | 10 +- .../controllers.go} | 10 +- .../cluster-api/pkg/errors/machines.go | 12 +- .../pointer.go} | 15 +- .../cluster-api/pkg/kubeadm/BUILD.bazel | 16 - .../cluster-api/pkg/kubeadm/kubeadm.go | 97 - .../cluster-api/pkg/kubeadm/kubeadm_test.go | 106 - .../example/actuators/cluster/BUILD.bazel | 14 - .../actuators/cluster/clusteractuator.go | 50 - .../example/actuators/machine/BUILD.bazel | 14 - .../actuators/machine/machineactuator.go | 64 - .../pkg/provider/example/container/Dockerfile | 11 +- .../cluster-api/pkg/testcmdrunner/BUILD.bazel | 8 - .../pkg/testcmdrunner/test_cmd_runner.go | 46 - .../cluster-api/pkg/util/BUILD.bazel | 19 +- .../sigs.k8s.io/cluster-api/pkg/util/util.go | 352 +- .../cluster-api/pkg/util/util_test.go | 361 +- .../cluster-api/sample/cluster.yaml | 5 - .../cluster-api/sample/machine.yaml | 5 - .../cluster-api/sample/machineclass.yaml | 31 - .../cluster-api/sample/machinedeployment.yaml | 26 - .../cluster-api/sample/machineset.yaml | 25 - .../cluster-api/scripts/ci-integration.sh | 33 +- .../scripts/ci-is-vendor-in-sync.sh | 2 + .../cluster-api/scripts/ci-test.sh | 2 +- .../cluster-api/scripts/fetch_ext_bins.sh | 3 +- .../test/integration/cluster/BUILD.bazel | 8 +- .../test/integration/cluster/cluster_test.go | 75 +- .../cluster-api/vendor/modules.txt | 411 +- .../sigs.k8s.io/controller-runtime/.gitignore | 21 + .../controller-runtime/.golangci.yml | 5 + .../controller-runtime/.travis.yml | 31 + .../controller-runtime/CONTRIBUTING.md | 28 + vendor/sigs.k8s.io/controller-runtime/FAQ.md | 81 + .../Gopkg.lock | 749 +--- .../sigs.k8s.io/controller-runtime/Gopkg.toml | 88 + vendor/sigs.k8s.io/controller-runtime/OWNERS | 11 + .../controller-runtime/OWNERS_ALIASES | 8 + .../sigs.k8s.io/controller-runtime/README.md | 61 + .../controller-runtime/SECURITY_CONTACTS | 15 + .../controller-runtime/TMP-LOGGING.md | 166 + .../controller-runtime/VERSIONING.md | 271 ++ .../sigs.k8s.io/controller-runtime/alias.go | 130 + .../controller-runtime/code-of-conduct.md | 3 + vendor/sigs.k8s.io/controller-runtime/doc.go | 124 + vendor/sigs.k8s.io/controller-runtime/go.mod | 50 + vendor/sigs.k8s.io/controller-runtime/go.sum | 122 + .../pkg/builder/controller.go | 215 + .../controller-runtime/pkg/builder/doc.go | 28 + .../controller-runtime/pkg/builder/webhook.go | 166 + .../controller-runtime/pkg/cache/cache.go | 48 +- .../controller-runtime/pkg/cache/doc.go | 19 + .../pkg/cache/informer_cache.go | 12 +- .../pkg/cache/internal/cache_reader.go | 42 +- .../pkg/cache/internal/informers_map.go | 89 +- .../pkg/cache/multi_namespace_cache.go | 220 ++ .../pkg/client/apiutil/apimachinery.go | 10 +- .../controller-runtime/pkg/client/client.go | 77 +- .../pkg/client/client_cache.go | 4 +- .../pkg/client/config/config.go | 74 +- .../pkg/client/config/doc.go | 2 +- .../controller-runtime/pkg/client/doc.go | 49 + .../pkg/client/interfaces.go | 231 +- .../controller-runtime/pkg/client/options.go | 491 +++ .../controller-runtime/pkg/client/patch.go | 95 + .../controller-runtime/pkg/client/split.go | 20 +- .../pkg/client/typed_client.go | 102 +- .../pkg/client/unstructured_client.go | 98 +- .../pkg/controller/controller.go | 30 +- .../controllerutil/controllerutil.go | 170 + .../pkg/controller/controllerutil/doc.go | 6 +- .../pkg/conversion/conversion.go | 40 + .../controller-runtime/pkg/envtest/crd.go | 268 ++ .../controller-runtime/pkg/envtest}/doc.go | 19 +- .../controller-runtime/pkg/envtest/ginkgo.go | 11 + .../pkg/envtest/printer/ginkgo.go | 53 + .../controller-runtime/pkg/envtest/server.go | 261 ++ .../controller-runtime/pkg/event/doc.go | 9 +- .../controller-runtime/pkg/event/event.go | 12 +- .../controller-runtime/pkg/handler/doc.go | 4 +- .../controller-runtime/pkg/handler/enqueue.go | 4 +- .../pkg/handler/enqueue_mapped.go | 11 + .../pkg/handler/enqueue_owner.go | 35 +- .../pkg/internal/controller/controller.go | 48 +- .../pkg/internal/log/log.go} | 32 +- .../pkg/leaderelection/doc.go | 6 +- .../pkg/leaderelection/leader_election.go | 3 +- .../pkg/{runtime => }/log/deleg.go | 21 +- .../controller-runtime/pkg/log/log.go | 48 + .../pkg/{runtime => }/log/null.go | 0 .../{runtime/log => log/zap}/kube_helpers.go | 6 +- .../{runtime/log/log.go => log/zap/zap.go} | 46 +- .../pkg/manager/internal.go | 176 +- .../controller-runtime/pkg/manager/manager.go | 124 +- .../pkg/{runtime => manager}/signals/doc.go | 4 +- .../{runtime => manager}/signals/signal.go | 2 +- .../signals/signal_posix.go | 0 .../signals/signal_windows.go | 0 .../pkg/metrics/client_go_adapter.go | 106 +- .../pkg/metrics/listener.go | 13 +- .../pkg/metrics/workqueue.go | 173 + .../controller-runtime/pkg/patch/patch.go | 45 - .../pkg/predicate/predicate.go | 49 +- .../pkg/reconcile/reconcile.go | 1 + .../pkg/recorder/recorder.go | 4 + .../pkg/runtime/inject/inject.go | 61 +- .../pkg/runtime/scheme/scheme.go | 43 +- .../controller-runtime/pkg/scheme/scheme.go | 94 + .../controller-runtime/pkg/source/doc.go | 2 +- .../pkg/source/internal/eventsource.go | 10 +- .../controller-runtime/pkg/source/source.go | 9 +- .../pkg/webhook/admission/decode.go | 60 +- .../pkg/webhook/admission/defaulter.go | 75 + .../pkg/webhook/admission/doc.go | 79 +- .../pkg/webhook/admission/http.go | 63 +- .../pkg/webhook/admission/inject.go} | 25 +- .../pkg/webhook/admission/multi.go | 126 + .../pkg/webhook/admission/response.go | 66 +- .../pkg/webhook/admission/types/types.go | 44 - .../pkg/webhook/admission/validator.go | 108 + .../pkg/webhook/admission/webhook.go | 307 +- .../controller-runtime/pkg/webhook/alias.go | 73 + .../pkg/webhook/conversion/conversion.go | 349 ++ .../pkg/webhook/conversion/decoder.go | 31 + .../controller-runtime/pkg/webhook/doc.go | 19 +- .../internal/certwatcher/certwatcher.go | 162 + .../controller-runtime/pkg/webhook/server.go | 196 + .../pkg/webhook/types/webhook.go | 28 - .../cmd/controller-gen/main.go | 345 +- .../controller-tools/pkg/crd/desc_visitor.go | 78 + .../controller-tools/pkg/crd/doc.go | 63 + .../controller-tools/pkg/crd/flatten.go | 441 +++ .../controller-tools/pkg/crd/gen.go | 189 + .../pkg/crd/generator/generator.go | 207 - .../controller-tools/pkg/crd/known_types.go | 90 + .../controller-tools/pkg/crd/markers/crd.go | 284 ++ .../controller-tools/pkg/crd/markers/doc.go | 46 + .../pkg/crd/markers/package.go | 37 + .../pkg/crd/markers/register.go | 83 + .../pkg/crd/markers/validation.go | 286 ++ .../crd/markers/zz_generated.markerhelp.go | 304 ++ .../controller-tools/pkg/crd/parser.go | 219 ++ .../controller-tools/pkg/crd/schema.go | 419 ++ .../pkg/crd/schema_visitor.go | 104 + .../controller-tools/pkg/crd/spec.go | 242 ++ .../controller-tools/pkg/crd/util/util.go | 117 - .../pkg/crd/zz_generated.markerhelp.go | 45 + .../controller-tools/pkg/deepcopy}/doc.go | 13 +- .../controller-tools/pkg/deepcopy/gen.go | 300 ++ .../controller-tools/pkg/deepcopy/traverse.go | 812 ++++ .../pkg/deepcopy/zz_generated.markerhelp.go | 45 + .../controller-tools/pkg/genall/doc.go | 58 + .../controller-tools/pkg/genall/genall.go | 174 + .../controller-tools/pkg/genall/help}/doc.go | 15 +- .../pkg/genall/help/pretty/doc.go | 30 + .../pkg/genall/help/pretty/help.go | 171 + .../pkg/genall/help/pretty/print.go | 304 ++ .../pkg/genall/help/pretty/table.go | 64 + .../controller-tools/pkg/genall/help/sort.go | 106 + .../controller-tools/pkg/genall/help/types.go | 215 + .../pkg/genall/input.go} | 30 +- .../controller-tools/pkg/genall/options.go | 192 + .../controller-tools/pkg/genall/output.go | 157 + .../pkg/genall/zz_generated.markerhelp.go | 89 + .../pkg/internal/codegen/parse/apis.go | 287 -- .../pkg/internal/codegen/parse/context.go | 42 - .../pkg/internal/codegen/parse/crd.go | 639 --- .../pkg/internal/codegen/parse/index.go | 161 - .../pkg/internal/codegen/parse/parser.go | 151 - .../pkg/internal/codegen/parse/util.go | 539 --- .../pkg/internal/codegen/types.go | 213 - .../pkg/internal/general/util.go | 102 - .../controller-tools/pkg/loader/doc.go | 60 + .../controller-tools/pkg/loader/errors.go | 63 + .../controller-tools/pkg/loader/loader.go | 361 ++ .../controller-tools/pkg/loader/paths.go | 32 + .../controller-tools/pkg/loader/refs.go | 254 ++ .../controller-tools/pkg/loader/visit.go | 81 + .../controller-tools/pkg/markers/collect.go | 422 ++ .../controller-tools/pkg/markers/doc.go | 113 + .../controller-tools/pkg/markers/help.go | 81 + .../controller-tools/pkg/markers/parse.go | 727 ++++ .../controller-tools/pkg/markers/reg.go | 153 + .../pkg/markers/regutil.go} | 33 +- .../controller-tools/pkg/markers/zip.go | 191 + .../controller-tools/pkg/rbac/manifests.go | 170 - .../controller-tools/pkg/rbac/parser.go | 265 +- .../pkg/rbac/zz_generated.markerhelp.go | 73 + .../controller-tools/pkg/util/util.go | 77 - .../controller-tools/pkg/webhook/admission.go | 89 - .../controller-tools/pkg/webhook/generator.go | 334 -- .../controller-tools/pkg/webhook/manifests.go | 151 - .../controller-tools/pkg/webhook/parser.go | 358 +- .../controller-tools/pkg/webhook/types.go | 38 - .../controller-tools/pkg/webhook/writer.go | 92 - .../pkg/webhook/zz_generated.markerhelp.go | 80 + .../integration/apiserver.go | 15 +- .../testing_frameworks/integration/etcd.go | 15 +- .../integration/internal/process.go | 11 +- 1734 files changed, 71528 insertions(+), 113443 deletions(-) create mode 100644 Dockerfile create mode 100644 api/v1alpha2/groupversion_info.go create mode 100644 api/v1alpha2/openstackcluster_types.go create mode 100644 api/v1alpha2/openstackmachine_types.go create mode 100644 api/v1alpha2/types.go rename {pkg/apis/openstackproviderconfig/v1alpha1 => api/v1alpha2}/zz_generated.deepcopy.go (73%) delete mode 100644 cmd/clusterctl/.gitignore delete mode 100644 cmd/clusterctl/Dockerfile delete mode 100644 cmd/clusterctl/examples/openstack/cluster.yaml delete mode 100644 cmd/clusterctl/examples/openstack/machine-deployment.yaml.template delete mode 100644 cmd/clusterctl/examples/openstack/machines.yaml.template delete mode 100644 cmd/clusterctl/examples/openstack/provider-component/cluster-api/kustomization.yaml delete mode 100644 cmd/clusterctl/examples/openstack/provider-component/cluster-api/manager_image_patch.yaml delete mode 100644 cmd/manager/Dockerfile delete mode 100644 cmd/manager/main.go create mode 100644 config/certmanager/certificate.yaml create mode 100644 config/certmanager/kustomization.yaml create mode 100644 config/certmanager/kustomizeconfig.yaml create mode 100644 config/crd/bases/infrastructure.cluster.x-k8s.io_openstackclusters.yaml create mode 100644 config/crd/bases/infrastructure.cluster.x-k8s.io_openstackmachines.yaml create mode 100644 config/crd/kustomization.yaml create mode 100644 config/crd/kustomizeconfig.yaml create mode 100644 config/crd/patches/cainjection_in_openstackclusters.yaml create mode 100644 config/crd/patches/cainjection_in_openstackmachines.yaml create mode 100644 config/crd/patches/webhook_in_openstackclusters.yaml create mode 100644 config/crd/patches/webhook_in_openstackmachines.yaml delete mode 100644 config/crds/openstackproviderconfig_v1alpha1_openstackclusterproviderspec.yaml delete mode 100644 config/crds/openstackproviderconfig_v1alpha1_openstackclusterproviderstatus.yaml delete mode 100644 config/crds/openstackproviderconfig_v1alpha1_openstackmachineproviderstatus.yaml delete mode 100644 config/crds/openstackproviderconfig_v1alpha1_openstackproviderspec.yaml create mode 100644 config/default/kustomization.yaml create mode 100644 config/default/manager_auth_proxy_patch.yaml create mode 100644 config/default/manager_image_patch.yaml create mode 100644 config/default/manager_prometheus_metrics_patch.yaml create mode 100644 config/default/manager_webhook_patch.yaml create mode 100644 config/default/webhookcainjection_patch.yaml delete mode 100644 config/kustomization.yaml create mode 100644 config/manager/kustomization.yaml create mode 100644 config/manager/manager.yaml delete mode 100644 config/manager/service.yaml delete mode 100644 config/manager/statefulset.yaml create mode 100644 config/rbac/auth_proxy_role.yaml rename config/rbac/{rbac_role_binding.yaml => auth_proxy_role_binding.yaml} (55%) create mode 100644 config/rbac/auth_proxy_service.yaml create mode 100644 config/rbac/kustomization.yaml create mode 100644 config/rbac/leader_election_role.yaml rename config/rbac/{secrets_role_binding.yaml => leader_election_role_binding.yaml} (52%) rename vendor/sigs.k8s.io/cluster-api/config/rbac/manager_role.yaml => config/rbac/role.yaml (54%) create mode 100644 config/rbac/role_binding.yaml delete mode 100644 config/rbac/secrets_role.yaml create mode 100644 config/samples/infrastructure_v1alpha2_openstackcluster.yaml create mode 100644 config/samples/infrastructure_v1alpha2_openstackmachine.yaml create mode 100644 config/webhook/kustomization.yaml create mode 100644 config/webhook/kustomizeconfig.yaml create mode 100644 config/webhook/manifests.yaml create mode 100644 config/webhook/service.yaml create mode 100644 controllers/openstackcluster_controller.go create mode 100644 controllers/openstackmachine_controller.go create mode 100644 controllers/suite_test.go rename hack/{boilerplate.go.txt => boilerplate/boilerplate.generatego.txt} (100%) create mode 100644 main.go delete mode 100644 overlays-config/coreos/kustomization.yaml delete mode 100644 overlays-config/coreos/manager/deployment.yaml delete mode 100644 overlays-config/generic/kustomization.yaml delete mode 100644 overlays-config/generic/manager/deployment.yaml delete mode 100644 pkg/apis/apis.go delete mode 100644 pkg/apis/openstackproviderconfig/v1alpha1/register.go delete mode 100644 pkg/apis/openstackproviderconfig/v1alpha1/securitygroup_types.go delete mode 100644 pkg/apis/openstackproviderconfig/v1alpha1/types.go delete mode 100644 pkg/cloud/openstack/cluster/actuator.go delete mode 100644 pkg/cloud/openstack/contants/constants.go delete mode 100644 pkg/cloud/openstack/machine/actuator.go delete mode 100644 pkg/cloud/openstack/machine/instancestatus.go delete mode 100644 pkg/cloud/openstack/services/certificates/BUILD.bazel delete mode 100644 pkg/deployer/deployer.go rename {cmd/clusterctl/examples/openstack => samples}/.gitignore (100%) rename {cmd/clusterctl/examples/openstack => samples}/README.md (94%) rename {cmd/clusterctl/examples/openstack => samples}/generate-yaml.sh (85%) create mode 100644 samples/templates/clouds-secrets/.gitignore rename {cmd/clusterctl/examples/openstack/provider-component => samples/templates}/clouds-secrets/kustomization.yaml (89%) create mode 100644 samples/templates/cluster.yaml create mode 100644 samples/templates/machines.yaml create mode 100644 samples/templates/namespace/kustomization.yaml rename {config/manager => samples/templates/namespace}/namespace.yaml (71%) rename {cmd/clusterctl/examples/openstack/provider-component => samples/templates}/user-data/centos/kustomization.yaml (100%) rename {cmd/clusterctl/examples/openstack/provider-component => samples/templates}/user-data/centos/templates/master-user-data.sh (98%) rename {cmd/clusterctl/examples/openstack/provider-component => samples/templates}/user-data/centos/templates/worker-user-data.sh (97%) rename {cmd/clusterctl/examples/openstack/provider-component => samples/templates}/user-data/coreos/kustomization.yaml (100%) create mode 100644 samples/templates/user-data/coreos/master-user-data.yaml rename {cmd/clusterctl/examples/openstack/provider-component => samples/templates}/user-data/coreos/templates/common.yaml (96%) rename {cmd/clusterctl/examples/openstack/provider-component => samples/templates}/user-data/coreos/templates/master.yaml (100%) rename {cmd/clusterctl/examples/openstack/provider-component => samples/templates}/user-data/coreos/templates/worker.yaml (100%) create mode 100644 samples/templates/user-data/coreos/worker-user-data.yaml rename {cmd/clusterctl/examples/openstack/provider-component => samples/templates}/user-data/ubuntu/kustomization.yaml (100%) create mode 100644 samples/templates/user-data/ubuntu/master-user-data.sh rename {cmd/clusterctl/examples/openstack/provider-component => samples/templates}/user-data/ubuntu/templates/master-user-data.sh (98%) rename {cmd/clusterctl/examples/openstack/provider-component => samples/templates}/user-data/ubuntu/templates/worker-user-data.sh (98%) create mode 100644 samples/templates/user-data/ubuntu/worker-user-data.sh create mode 100644 third_party/forked/README.md rename {vendor/contrib.go.opencensus.io/exporter/ocagent => third_party/forked/rerun-process-wrapper}/LICENSE (100%) create mode 100644 third_party/forked/rerun-process-wrapper/README.md create mode 100755 third_party/forked/rerun-process-wrapper/restart.sh create mode 100755 third_party/forked/rerun-process-wrapper/start.sh delete mode 100644 vendor/cloud.google.com/go/AUTHORS delete mode 100644 vendor/cloud.google.com/go/CONTRIBUTORS delete mode 100644 vendor/cloud.google.com/go/LICENSE delete mode 100644 vendor/cloud.google.com/go/compute/metadata/metadata.go delete mode 100644 vendor/contrib.go.opencensus.io/exporter/ocagent/.travis.yml delete mode 100644 vendor/contrib.go.opencensus.io/exporter/ocagent/CONTRIBUTING.md delete mode 100644 vendor/contrib.go.opencensus.io/exporter/ocagent/README.md delete mode 100644 vendor/contrib.go.opencensus.io/exporter/ocagent/common.go delete mode 100644 vendor/contrib.go.opencensus.io/exporter/ocagent/go.mod delete mode 100644 vendor/contrib.go.opencensus.io/exporter/ocagent/go.sum delete mode 100644 vendor/contrib.go.opencensus.io/exporter/ocagent/nodeinfo.go delete mode 100644 vendor/contrib.go.opencensus.io/exporter/ocagent/ocagent.go delete mode 100644 vendor/contrib.go.opencensus.io/exporter/ocagent/options.go delete mode 100644 vendor/contrib.go.opencensus.io/exporter/ocagent/transform_spans.go delete mode 100644 vendor/contrib.go.opencensus.io/exporter/ocagent/version.go delete mode 100644 vendor/github.com/Azure/go-autorest/LICENSE delete mode 100644 vendor/github.com/Azure/go-autorest/autorest/adal/README.md delete mode 100644 vendor/github.com/Azure/go-autorest/autorest/adal/config.go delete mode 100644 vendor/github.com/Azure/go-autorest/autorest/adal/devicetoken.go delete mode 100644 vendor/github.com/Azure/go-autorest/autorest/adal/persist.go delete mode 100644 vendor/github.com/Azure/go-autorest/autorest/adal/sender.go delete mode 100644 vendor/github.com/Azure/go-autorest/autorest/adal/token.go delete mode 100644 vendor/github.com/Azure/go-autorest/autorest/adal/version.go delete mode 100644 vendor/github.com/Azure/go-autorest/autorest/authorization.go delete mode 100644 vendor/github.com/Azure/go-autorest/autorest/autorest.go delete mode 100644 vendor/github.com/Azure/go-autorest/autorest/azure/async.go delete mode 100644 vendor/github.com/Azure/go-autorest/autorest/azure/azure.go delete mode 100644 vendor/github.com/Azure/go-autorest/autorest/azure/environments.go delete mode 100644 vendor/github.com/Azure/go-autorest/autorest/azure/metadata_environment.go delete mode 100644 vendor/github.com/Azure/go-autorest/autorest/azure/rp.go delete mode 100644 vendor/github.com/Azure/go-autorest/autorest/client.go delete mode 100644 vendor/github.com/Azure/go-autorest/autorest/date/date.go delete mode 100644 vendor/github.com/Azure/go-autorest/autorest/date/time.go delete mode 100644 vendor/github.com/Azure/go-autorest/autorest/date/timerfc1123.go delete mode 100644 vendor/github.com/Azure/go-autorest/autorest/date/unixtime.go delete mode 100644 vendor/github.com/Azure/go-autorest/autorest/date/utility.go delete mode 100644 vendor/github.com/Azure/go-autorest/autorest/error.go delete mode 100644 vendor/github.com/Azure/go-autorest/autorest/preparer.go delete mode 100644 vendor/github.com/Azure/go-autorest/autorest/responder.go delete mode 100644 vendor/github.com/Azure/go-autorest/autorest/retriablerequest.go delete mode 100644 vendor/github.com/Azure/go-autorest/autorest/retriablerequest_1.7.go delete mode 100644 vendor/github.com/Azure/go-autorest/autorest/retriablerequest_1.8.go delete mode 100644 vendor/github.com/Azure/go-autorest/autorest/sender.go delete mode 100644 vendor/github.com/Azure/go-autorest/autorest/utility.go delete mode 100644 vendor/github.com/Azure/go-autorest/autorest/version.go delete mode 100644 vendor/github.com/Azure/go-autorest/logger/logger.go delete mode 100644 vendor/github.com/Azure/go-autorest/tracing/tracing.go delete mode 100644 vendor/github.com/appscode/jsonpatch/.gitignore delete mode 100644 vendor/github.com/appscode/jsonpatch/.travis.yml delete mode 100644 vendor/github.com/appscode/jsonpatch/README.md delete mode 100644 vendor/github.com/appscode/jsonpatch/go.mod delete mode 100644 vendor/github.com/appscode/jsonpatch/go.sum delete mode 100644 vendor/github.com/census-instrumentation/opencensus-proto/AUTHORS delete mode 100644 vendor/github.com/census-instrumentation/opencensus-proto/LICENSE delete mode 100644 vendor/github.com/census-instrumentation/opencensus-proto/gen-go/agent/common/v1/common.pb.go delete mode 100644 vendor/github.com/census-instrumentation/opencensus-proto/gen-go/agent/trace/v1/trace_service.pb.go delete mode 100644 vendor/github.com/census-instrumentation/opencensus-proto/gen-go/resource/v1/resource.pb.go delete mode 100644 vendor/github.com/census-instrumentation/opencensus-proto/gen-go/trace/v1/trace.pb.go delete mode 100644 vendor/github.com/census-instrumentation/opencensus-proto/gen-go/trace/v1/trace_config.pb.go create mode 100644 vendor/github.com/coreos/go-semver/NOTICE delete mode 100644 vendor/github.com/dgrijalva/jwt-go/.gitignore delete mode 100644 vendor/github.com/dgrijalva/jwt-go/.travis.yml delete mode 100644 vendor/github.com/dgrijalva/jwt-go/MIGRATION_GUIDE.md delete mode 100644 vendor/github.com/dgrijalva/jwt-go/README.md delete mode 100644 vendor/github.com/dgrijalva/jwt-go/VERSION_HISTORY.md delete mode 100644 vendor/github.com/dgrijalva/jwt-go/claims.go delete mode 100644 vendor/github.com/dgrijalva/jwt-go/doc.go delete mode 100644 vendor/github.com/dgrijalva/jwt-go/ecdsa.go delete mode 100644 vendor/github.com/dgrijalva/jwt-go/ecdsa_utils.go delete mode 100644 vendor/github.com/dgrijalva/jwt-go/errors.go delete mode 100644 vendor/github.com/dgrijalva/jwt-go/hmac.go delete mode 100644 vendor/github.com/dgrijalva/jwt-go/map_claims.go delete mode 100644 vendor/github.com/dgrijalva/jwt-go/none.go delete mode 100644 vendor/github.com/dgrijalva/jwt-go/parser.go delete mode 100644 vendor/github.com/dgrijalva/jwt-go/rsa.go delete mode 100644 vendor/github.com/dgrijalva/jwt-go/rsa_pss.go delete mode 100644 vendor/github.com/dgrijalva/jwt-go/rsa_utils.go delete mode 100644 vendor/github.com/dgrijalva/jwt-go/signing_method.go delete mode 100644 vendor/github.com/dgrijalva/jwt-go/token.go create mode 100644 vendor/github.com/evanphx/json-patch/.travis.yml create mode 100644 vendor/github.com/evanphx/json-patch/LICENSE create mode 100644 vendor/github.com/evanphx/json-patch/README.md create mode 100644 vendor/github.com/evanphx/json-patch/errors.go create mode 100644 vendor/github.com/evanphx/json-patch/merge.go create mode 100644 vendor/github.com/evanphx/json-patch/patch.go create mode 100644 vendor/github.com/fatih/color/.travis.yml create mode 100644 vendor/github.com/fatih/color/Gopkg.lock create mode 100644 vendor/github.com/fatih/color/Gopkg.toml create mode 100644 vendor/github.com/fatih/color/LICENSE.md create mode 100644 vendor/github.com/fatih/color/README.md create mode 100644 vendor/github.com/fatih/color/color.go create mode 100644 vendor/github.com/fatih/color/doc.go delete mode 100644 vendor/github.com/ghodss/yaml/.gitignore delete mode 100644 vendor/github.com/ghodss/yaml/.travis.yml delete mode 100644 vendor/github.com/ghodss/yaml/LICENSE delete mode 100644 vendor/github.com/ghodss/yaml/README.md delete mode 100644 vendor/github.com/ghodss/yaml/fields.go delete mode 100644 vendor/github.com/ghodss/yaml/yaml.go delete mode 100644 vendor/github.com/gobuffalo/envy/.env delete mode 100644 vendor/github.com/gobuffalo/envy/.gitignore delete mode 100644 vendor/github.com/gobuffalo/envy/.travis.yml delete mode 100644 vendor/github.com/gobuffalo/envy/Makefile delete mode 100644 vendor/github.com/gobuffalo/envy/README.md delete mode 100644 vendor/github.com/gobuffalo/envy/envy.go delete mode 100644 vendor/github.com/gobuffalo/envy/go.mod delete mode 100644 vendor/github.com/gobuffalo/envy/go.sum delete mode 100644 vendor/github.com/gobuffalo/envy/shoulders.md delete mode 100644 vendor/github.com/gobuffalo/envy/version.go rename vendor/github.com/{peterbourgon/diskv => gobuffalo/flect}/LICENSE (87%) create mode 100644 vendor/github.com/gobuffalo/flect/SHOULDERS.md create mode 100644 vendor/github.com/gobuffalo/flect/azure-pipelines.yml create mode 100644 vendor/github.com/gobuffalo/flect/azure-tests.yml create mode 100644 vendor/github.com/gobuffalo/flect/humanize.go create mode 100644 vendor/github.com/gogo/protobuf/proto/deprecated.go create mode 100644 vendor/github.com/golang/protobuf/proto/deprecated.go delete mode 100644 vendor/github.com/golang/protobuf/ptypes/wrappers/wrappers.pb.go delete mode 100644 vendor/github.com/golang/protobuf/ptypes/wrappers/wrappers.proto delete mode 100644 vendor/github.com/google/btree/.travis.yml delete mode 100644 vendor/github.com/google/btree/LICENSE delete mode 100644 vendor/github.com/google/btree/README.md delete mode 100644 vendor/github.com/google/btree/btree.go delete mode 100644 vendor/github.com/google/btree/btree_mem.go delete mode 100644 vendor/github.com/google/gofuzz/go.mod delete mode 100644 vendor/github.com/google/uuid/.travis.yml delete mode 100644 vendor/github.com/google/uuid/CONTRIBUTING.md delete mode 100644 vendor/github.com/google/uuid/CONTRIBUTORS delete mode 100644 vendor/github.com/google/uuid/LICENSE delete mode 100644 vendor/github.com/google/uuid/README.md delete mode 100644 vendor/github.com/google/uuid/dce.go delete mode 100644 vendor/github.com/google/uuid/doc.go delete mode 100644 vendor/github.com/google/uuid/go.mod delete mode 100644 vendor/github.com/google/uuid/hash.go delete mode 100644 vendor/github.com/google/uuid/marshal.go delete mode 100644 vendor/github.com/google/uuid/node.go delete mode 100644 vendor/github.com/google/uuid/node_js.go delete mode 100644 vendor/github.com/google/uuid/node_net.go delete mode 100644 vendor/github.com/google/uuid/sql.go delete mode 100644 vendor/github.com/google/uuid/time.go delete mode 100644 vendor/github.com/google/uuid/util.go delete mode 100644 vendor/github.com/google/uuid/uuid.go delete mode 100644 vendor/github.com/google/uuid/version1.go delete mode 100644 vendor/github.com/google/uuid/version4.go delete mode 100644 vendor/github.com/gregjones/httpcache/.travis.yml delete mode 100644 vendor/github.com/gregjones/httpcache/LICENSE.txt delete mode 100644 vendor/github.com/gregjones/httpcache/README.md delete mode 100644 vendor/github.com/gregjones/httpcache/diskcache/diskcache.go delete mode 100644 vendor/github.com/gregjones/httpcache/httpcache.go create mode 100644 vendor/github.com/hpcloud/tail/.gitignore create mode 100644 vendor/github.com/hpcloud/tail/.travis.yml create mode 100644 vendor/github.com/hpcloud/tail/CHANGES.md create mode 100644 vendor/github.com/hpcloud/tail/Dockerfile create mode 100644 vendor/github.com/hpcloud/tail/LICENSE.txt create mode 100644 vendor/github.com/hpcloud/tail/Makefile create mode 100644 vendor/github.com/hpcloud/tail/README.md create mode 100644 vendor/github.com/hpcloud/tail/appveyor.yml rename vendor/github.com/{dgrijalva/jwt-go/LICENSE => hpcloud/tail/ratelimiter/Licence} (96%) create mode 100644 vendor/github.com/hpcloud/tail/ratelimiter/leakybucket.go create mode 100644 vendor/github.com/hpcloud/tail/ratelimiter/memory.go create mode 100644 vendor/github.com/hpcloud/tail/ratelimiter/storage.go create mode 100644 vendor/github.com/hpcloud/tail/tail.go create mode 100644 vendor/github.com/hpcloud/tail/tail_posix.go create mode 100644 vendor/github.com/hpcloud/tail/tail_windows.go create mode 100644 vendor/github.com/hpcloud/tail/util/util.go create mode 100644 vendor/github.com/hpcloud/tail/watch/filechanges.go create mode 100644 vendor/github.com/hpcloud/tail/watch/inotify.go create mode 100644 vendor/github.com/hpcloud/tail/watch/inotify_tracker.go create mode 100644 vendor/github.com/hpcloud/tail/watch/polling.go create mode 100644 vendor/github.com/hpcloud/tail/watch/watch.go create mode 100644 vendor/github.com/hpcloud/tail/winfile/winfile.go delete mode 100644 vendor/github.com/joho/godotenv/.gitignore delete mode 100644 vendor/github.com/joho/godotenv/.travis.yml delete mode 100644 vendor/github.com/joho/godotenv/README.md delete mode 100644 vendor/github.com/joho/godotenv/godotenv.go create mode 100644 vendor/github.com/mattn/go-colorable/.travis.yml create mode 100644 vendor/github.com/mattn/go-colorable/LICENSE create mode 100644 vendor/github.com/mattn/go-colorable/README.md create mode 100644 vendor/github.com/mattn/go-colorable/colorable_appengine.go create mode 100644 vendor/github.com/mattn/go-colorable/colorable_others.go create mode 100644 vendor/github.com/mattn/go-colorable/colorable_windows.go create mode 100644 vendor/github.com/mattn/go-colorable/go.mod create mode 100644 vendor/github.com/mattn/go-colorable/go.sum create mode 100644 vendor/github.com/mattn/go-colorable/noncolorable.go create mode 100644 vendor/github.com/mattn/go-isatty/.travis.yml rename vendor/github.com/{gobuffalo/envy/LICENSE.txt => mattn/go-isatty/LICENSE} (93%) create mode 100644 vendor/github.com/mattn/go-isatty/README.md create mode 100644 vendor/github.com/mattn/go-isatty/doc.go create mode 100644 vendor/github.com/mattn/go-isatty/go.mod create mode 100644 vendor/github.com/mattn/go-isatty/go.sum create mode 100644 vendor/github.com/mattn/go-isatty/isatty_android.go create mode 100644 vendor/github.com/mattn/go-isatty/isatty_bsd.go create mode 100644 vendor/github.com/mattn/go-isatty/isatty_others.go create mode 100644 vendor/github.com/mattn/go-isatty/isatty_solaris.go create mode 100644 vendor/github.com/mattn/go-isatty/isatty_tcgets.go create mode 100644 vendor/github.com/mattn/go-isatty/isatty_windows.go create mode 100644 vendor/github.com/onsi/ginkgo/.gitignore create mode 100644 vendor/github.com/onsi/ginkgo/.travis.yml create mode 100644 vendor/github.com/onsi/ginkgo/CHANGELOG.md create mode 100644 vendor/github.com/onsi/ginkgo/CONTRIBUTING.md rename vendor/github.com/{joho/godotenv/LICENCE => onsi/ginkgo/LICENSE} (95%) create mode 100644 vendor/github.com/onsi/ginkgo/README.md create mode 100644 vendor/github.com/onsi/ginkgo/RELEASING.md create mode 100644 vendor/github.com/onsi/ginkgo/config/config.go create mode 100644 vendor/github.com/onsi/ginkgo/ginkgo_dsl.go create mode 100644 vendor/github.com/onsi/ginkgo/internal/codelocation/code_location.go create mode 100644 vendor/github.com/onsi/ginkgo/internal/containernode/container_node.go create mode 100644 vendor/github.com/onsi/ginkgo/internal/failer/failer.go create mode 100644 vendor/github.com/onsi/ginkgo/internal/leafnodes/benchmarker.go create mode 100644 vendor/github.com/onsi/ginkgo/internal/leafnodes/interfaces.go create mode 100644 vendor/github.com/onsi/ginkgo/internal/leafnodes/it_node.go create mode 100644 vendor/github.com/onsi/ginkgo/internal/leafnodes/measure_node.go create mode 100644 vendor/github.com/onsi/ginkgo/internal/leafnodes/runner.go create mode 100644 vendor/github.com/onsi/ginkgo/internal/leafnodes/setup_nodes.go create mode 100644 vendor/github.com/onsi/ginkgo/internal/leafnodes/suite_nodes.go create mode 100644 vendor/github.com/onsi/ginkgo/internal/leafnodes/synchronized_after_suite_node.go create mode 100644 vendor/github.com/onsi/ginkgo/internal/leafnodes/synchronized_before_suite_node.go create mode 100644 vendor/github.com/onsi/ginkgo/internal/remote/aggregator.go create mode 100644 vendor/github.com/onsi/ginkgo/internal/remote/forwarding_reporter.go create mode 100644 vendor/github.com/onsi/ginkgo/internal/remote/output_interceptor.go create mode 100644 vendor/github.com/onsi/ginkgo/internal/remote/output_interceptor_unix.go create mode 100644 vendor/github.com/onsi/ginkgo/internal/remote/output_interceptor_win.go create mode 100644 vendor/github.com/onsi/ginkgo/internal/remote/server.go create mode 100644 vendor/github.com/onsi/ginkgo/internal/remote/syscall_dup_linux_arm64.go create mode 100644 vendor/github.com/onsi/ginkgo/internal/remote/syscall_dup_solaris.go create mode 100644 vendor/github.com/onsi/ginkgo/internal/remote/syscall_dup_unix.go create mode 100644 vendor/github.com/onsi/ginkgo/internal/spec/spec.go create mode 100644 vendor/github.com/onsi/ginkgo/internal/spec/specs.go create mode 100644 vendor/github.com/onsi/ginkgo/internal/spec_iterator/index_computer.go create mode 100644 vendor/github.com/onsi/ginkgo/internal/spec_iterator/parallel_spec_iterator.go create mode 100644 vendor/github.com/onsi/ginkgo/internal/spec_iterator/serial_spec_iterator.go create mode 100644 vendor/github.com/onsi/ginkgo/internal/spec_iterator/sharded_parallel_spec_iterator.go create mode 100644 vendor/github.com/onsi/ginkgo/internal/spec_iterator/spec_iterator.go create mode 100644 vendor/github.com/onsi/ginkgo/internal/specrunner/random_id.go create mode 100644 vendor/github.com/onsi/ginkgo/internal/specrunner/spec_runner.go create mode 100644 vendor/github.com/onsi/ginkgo/internal/suite/suite.go create mode 100644 vendor/github.com/onsi/ginkgo/internal/testingtproxy/testing_t_proxy.go create mode 100644 vendor/github.com/onsi/ginkgo/internal/writer/fake_writer.go create mode 100644 vendor/github.com/onsi/ginkgo/internal/writer/writer.go create mode 100644 vendor/github.com/onsi/ginkgo/reporters/default_reporter.go create mode 100644 vendor/github.com/onsi/ginkgo/reporters/fake_reporter.go create mode 100644 vendor/github.com/onsi/ginkgo/reporters/junit_reporter.go create mode 100644 vendor/github.com/onsi/ginkgo/reporters/reporter.go create mode 100644 vendor/github.com/onsi/ginkgo/reporters/stenographer/console_logging.go create mode 100644 vendor/github.com/onsi/ginkgo/reporters/stenographer/fake_stenographer.go create mode 100644 vendor/github.com/onsi/ginkgo/reporters/stenographer/stenographer.go create mode 100644 vendor/github.com/onsi/ginkgo/reporters/stenographer/support/go-colorable/LICENSE create mode 100644 vendor/github.com/onsi/ginkgo/reporters/stenographer/support/go-colorable/README.md create mode 100644 vendor/github.com/onsi/ginkgo/reporters/stenographer/support/go-colorable/colorable_others.go create mode 100644 vendor/github.com/onsi/ginkgo/reporters/stenographer/support/go-colorable/colorable_windows.go create mode 100644 vendor/github.com/onsi/ginkgo/reporters/stenographer/support/go-colorable/noncolorable.go rename vendor/github.com/{gobuffalo/flect/LICENSE.txt => onsi/ginkgo/reporters/stenographer/support/go-isatty/LICENSE} (93%) create mode 100644 vendor/github.com/onsi/ginkgo/reporters/stenographer/support/go-isatty/README.md create mode 100644 vendor/github.com/onsi/ginkgo/reporters/stenographer/support/go-isatty/doc.go create mode 100644 vendor/github.com/onsi/ginkgo/reporters/stenographer/support/go-isatty/isatty_appengine.go create mode 100644 vendor/github.com/onsi/ginkgo/reporters/stenographer/support/go-isatty/isatty_bsd.go create mode 100644 vendor/github.com/onsi/ginkgo/reporters/stenographer/support/go-isatty/isatty_linux.go create mode 100644 vendor/github.com/onsi/ginkgo/reporters/stenographer/support/go-isatty/isatty_solaris.go create mode 100644 vendor/github.com/onsi/ginkgo/reporters/stenographer/support/go-isatty/isatty_windows.go create mode 100644 vendor/github.com/onsi/ginkgo/reporters/teamcity_reporter.go create mode 100644 vendor/github.com/onsi/ginkgo/types/code_location.go create mode 100644 vendor/github.com/onsi/ginkgo/types/synchronization.go create mode 100644 vendor/github.com/onsi/ginkgo/types/types.go delete mode 100644 vendor/github.com/pborman/uuid/go.mod delete mode 100644 vendor/github.com/pborman/uuid/go.sum delete mode 100644 vendor/github.com/peterbourgon/diskv/README.md delete mode 100644 vendor/github.com/peterbourgon/diskv/compression.go delete mode 100644 vendor/github.com/peterbourgon/diskv/diskv.go delete mode 100644 vendor/github.com/peterbourgon/diskv/index.go delete mode 100644 vendor/github.com/rogpeppe/go-internal/LICENSE delete mode 100644 vendor/github.com/rogpeppe/go-internal/modfile/gopkgin.go delete mode 100644 vendor/github.com/rogpeppe/go-internal/modfile/print.go delete mode 100644 vendor/github.com/rogpeppe/go-internal/modfile/read.go delete mode 100644 vendor/github.com/rogpeppe/go-internal/modfile/rule.go delete mode 100644 vendor/github.com/rogpeppe/go-internal/module/module.go delete mode 100644 vendor/github.com/rogpeppe/go-internal/semver/semver.go delete mode 100644 vendor/github.com/spf13/afero/.travis.yml delete mode 100644 vendor/github.com/spf13/afero/LICENSE.txt delete mode 100644 vendor/github.com/spf13/afero/README.md delete mode 100644 vendor/github.com/spf13/afero/afero.go delete mode 100644 vendor/github.com/spf13/afero/appveyor.yml delete mode 100644 vendor/github.com/spf13/afero/basepath.go delete mode 100644 vendor/github.com/spf13/afero/cacheOnReadFs.go delete mode 100644 vendor/github.com/spf13/afero/const_bsds.go delete mode 100644 vendor/github.com/spf13/afero/const_win_unix.go delete mode 100644 vendor/github.com/spf13/afero/copyOnWriteFs.go delete mode 100644 vendor/github.com/spf13/afero/go.mod delete mode 100644 vendor/github.com/spf13/afero/go.sum delete mode 100644 vendor/github.com/spf13/afero/httpFs.go delete mode 100644 vendor/github.com/spf13/afero/ioutil.go delete mode 100644 vendor/github.com/spf13/afero/lstater.go delete mode 100644 vendor/github.com/spf13/afero/match.go delete mode 100644 vendor/github.com/spf13/afero/mem/dir.go delete mode 100644 vendor/github.com/spf13/afero/mem/dirmap.go delete mode 100644 vendor/github.com/spf13/afero/mem/file.go delete mode 100644 vendor/github.com/spf13/afero/memmap.go delete mode 100644 vendor/github.com/spf13/afero/os.go delete mode 100644 vendor/github.com/spf13/afero/path.go delete mode 100644 vendor/github.com/spf13/afero/readonlyfs.go delete mode 100644 vendor/github.com/spf13/afero/regexpfs.go delete mode 100644 vendor/github.com/spf13/afero/unionFile.go delete mode 100644 vendor/github.com/spf13/afero/util.go delete mode 100644 vendor/go.opencensus.io/.gitignore delete mode 100644 vendor/go.opencensus.io/.travis.yml delete mode 100644 vendor/go.opencensus.io/AUTHORS delete mode 100644 vendor/go.opencensus.io/CONTRIBUTING.md delete mode 100644 vendor/go.opencensus.io/Gopkg.lock delete mode 100644 vendor/go.opencensus.io/Gopkg.toml delete mode 100644 vendor/go.opencensus.io/LICENSE delete mode 100644 vendor/go.opencensus.io/README.md delete mode 100644 vendor/go.opencensus.io/appveyor.yml delete mode 100644 vendor/go.opencensus.io/exemplar/exemplar.go delete mode 100644 vendor/go.opencensus.io/go.mod delete mode 100644 vendor/go.opencensus.io/go.sum delete mode 100644 vendor/go.opencensus.io/internal/internal.go delete mode 100644 vendor/go.opencensus.io/internal/sanitize.go delete mode 100644 vendor/go.opencensus.io/internal/tagencoding/tagencoding.go delete mode 100644 vendor/go.opencensus.io/internal/traceinternals.go delete mode 100644 vendor/go.opencensus.io/opencensus.go delete mode 100644 vendor/go.opencensus.io/plugin/ochttp/client.go delete mode 100644 vendor/go.opencensus.io/plugin/ochttp/client_stats.go delete mode 100644 vendor/go.opencensus.io/plugin/ochttp/doc.go delete mode 100644 vendor/go.opencensus.io/plugin/ochttp/propagation/b3/b3.go delete mode 100644 vendor/go.opencensus.io/plugin/ochttp/propagation/tracecontext/propagation.go delete mode 100644 vendor/go.opencensus.io/plugin/ochttp/route.go delete mode 100644 vendor/go.opencensus.io/plugin/ochttp/server.go delete mode 100644 vendor/go.opencensus.io/plugin/ochttp/span_annotating_client_trace.go delete mode 100644 vendor/go.opencensus.io/plugin/ochttp/stats.go delete mode 100644 vendor/go.opencensus.io/plugin/ochttp/trace.go delete mode 100644 vendor/go.opencensus.io/stats/doc.go delete mode 100644 vendor/go.opencensus.io/stats/internal/record.go delete mode 100644 vendor/go.opencensus.io/stats/internal/validation.go delete mode 100644 vendor/go.opencensus.io/stats/measure.go delete mode 100644 vendor/go.opencensus.io/stats/measure_float64.go delete mode 100644 vendor/go.opencensus.io/stats/measure_int64.go delete mode 100644 vendor/go.opencensus.io/stats/record.go delete mode 100644 vendor/go.opencensus.io/stats/units.go delete mode 100644 vendor/go.opencensus.io/stats/view/aggregation.go delete mode 100644 vendor/go.opencensus.io/stats/view/aggregation_data.go delete mode 100644 vendor/go.opencensus.io/stats/view/collector.go delete mode 100644 vendor/go.opencensus.io/stats/view/doc.go delete mode 100644 vendor/go.opencensus.io/stats/view/export.go delete mode 100644 vendor/go.opencensus.io/stats/view/view.go delete mode 100644 vendor/go.opencensus.io/stats/view/worker.go delete mode 100644 vendor/go.opencensus.io/stats/view/worker_commands.go delete mode 100644 vendor/go.opencensus.io/tag/context.go delete mode 100644 vendor/go.opencensus.io/tag/doc.go delete mode 100644 vendor/go.opencensus.io/tag/key.go delete mode 100644 vendor/go.opencensus.io/tag/map.go delete mode 100644 vendor/go.opencensus.io/tag/map_codec.go delete mode 100644 vendor/go.opencensus.io/tag/profile_19.go delete mode 100644 vendor/go.opencensus.io/tag/profile_not19.go delete mode 100644 vendor/go.opencensus.io/tag/validate.go delete mode 100644 vendor/go.opencensus.io/trace/basetypes.go delete mode 100644 vendor/go.opencensus.io/trace/config.go delete mode 100644 vendor/go.opencensus.io/trace/doc.go delete mode 100644 vendor/go.opencensus.io/trace/exemplar.go delete mode 100644 vendor/go.opencensus.io/trace/export.go delete mode 100644 vendor/go.opencensus.io/trace/internal/internal.go delete mode 100644 vendor/go.opencensus.io/trace/propagation/propagation.go delete mode 100644 vendor/go.opencensus.io/trace/sampling.go delete mode 100644 vendor/go.opencensus.io/trace/spanbucket.go delete mode 100644 vendor/go.opencensus.io/trace/spanstore.go delete mode 100644 vendor/go.opencensus.io/trace/status_codes.go delete mode 100644 vendor/go.opencensus.io/trace/trace.go delete mode 100644 vendor/go.opencensus.io/trace/trace_go11.go delete mode 100644 vendor/go.opencensus.io/trace/trace_nongo11.go delete mode 100644 vendor/go.opencensus.io/trace/tracestate/tracestate.go delete mode 100644 vendor/golang.org/x/net/html/atom/gen.go delete mode 100644 vendor/golang.org/x/net/internal/timeseries/timeseries.go delete mode 100644 vendor/golang.org/x/net/trace/events.go delete mode 100644 vendor/golang.org/x/net/trace/histogram.go delete mode 100644 vendor/golang.org/x/net/trace/trace.go delete mode 100644 vendor/golang.org/x/oauth2/google/appengine.go delete mode 100644 vendor/golang.org/x/oauth2/google/appengine_gen1.go delete mode 100644 vendor/golang.org/x/oauth2/google/appengine_gen2_flex.go delete mode 100644 vendor/golang.org/x/oauth2/google/default.go delete mode 100644 vendor/golang.org/x/oauth2/google/doc.go delete mode 100644 vendor/golang.org/x/oauth2/google/google.go delete mode 100644 vendor/golang.org/x/oauth2/google/jwt.go delete mode 100644 vendor/golang.org/x/oauth2/google/sdk.go delete mode 100644 vendor/golang.org/x/oauth2/jws/jws.go delete mode 100644 vendor/golang.org/x/oauth2/jwt/jwt.go delete mode 100644 vendor/golang.org/x/sync/AUTHORS delete mode 100644 vendor/golang.org/x/sync/CONTRIBUTORS delete mode 100644 vendor/golang.org/x/sync/PATENTS delete mode 100644 vendor/golang.org/x/sync/semaphore/semaphore.go delete mode 100644 vendor/golang.org/x/sys/unix/asm_linux_riscv64.s delete mode 100644 vendor/golang.org/x/sys/unix/mkasm_darwin.go delete mode 100644 vendor/golang.org/x/sys/unix/mkpost.go delete mode 100644 vendor/golang.org/x/sys/unix/mksyscall.go delete mode 100644 vendor/golang.org/x/sys/unix/mksyscall_aix_ppc.go delete mode 100644 vendor/golang.org/x/sys/unix/mksyscall_aix_ppc64.go delete mode 100644 vendor/golang.org/x/sys/unix/mksyscall_solaris.go delete mode 100644 vendor/golang.org/x/sys/unix/mksysctl_openbsd.go delete mode 100644 vendor/golang.org/x/sys/unix/mksysnum.go delete mode 100644 vendor/golang.org/x/sys/unix/types_aix.go delete mode 100644 vendor/golang.org/x/sys/unix/types_darwin.go delete mode 100644 vendor/golang.org/x/sys/unix/types_dragonfly.go delete mode 100644 vendor/golang.org/x/sys/unix/types_freebsd.go delete mode 100644 vendor/golang.org/x/sys/unix/types_netbsd.go delete mode 100644 vendor/golang.org/x/sys/unix/types_openbsd.go delete mode 100644 vendor/golang.org/x/sys/unix/types_solaris.go create mode 100644 vendor/golang.org/x/sys/windows/mkerrors.go delete mode 100644 vendor/golang.org/x/sys/windows/mkknownfolderids.bash delete mode 100644 vendor/golang.org/x/sys/windows/zknownfolderids_windows.go delete mode 100644 vendor/golang.org/x/text/encoding/charmap/maketables.go delete mode 100644 vendor/golang.org/x/text/encoding/htmlindex/gen.go delete mode 100644 vendor/golang.org/x/text/encoding/internal/identifier/gen.go delete mode 100644 vendor/golang.org/x/text/encoding/japanese/maketables.go delete mode 100644 vendor/golang.org/x/text/encoding/korean/maketables.go delete mode 100644 vendor/golang.org/x/text/encoding/simplifiedchinese/maketables.go delete mode 100644 vendor/golang.org/x/text/encoding/traditionalchinese/maketables.go delete mode 100644 vendor/golang.org/x/text/internal/language/compact/gen.go delete mode 100644 vendor/golang.org/x/text/internal/language/compact/gen_index.go delete mode 100644 vendor/golang.org/x/text/internal/language/compact/gen_parents.go delete mode 100644 vendor/golang.org/x/text/internal/language/gen.go delete mode 100644 vendor/golang.org/x/text/internal/language/gen_common.go delete mode 100644 vendor/golang.org/x/text/language/gen.go delete mode 100644 vendor/golang.org/x/text/unicode/bidi/gen.go delete mode 100644 vendor/golang.org/x/text/unicode/bidi/gen_ranges.go delete mode 100644 vendor/golang.org/x/text/unicode/bidi/gen_trieval.go delete mode 100644 vendor/golang.org/x/text/unicode/norm/maketables.go delete mode 100644 vendor/golang.org/x/text/unicode/norm/triegen.go create mode 100644 vendor/golang.org/x/time/rate/rate_go16.go create mode 100644 vendor/golang.org/x/time/rate/rate_go17.go delete mode 100644 vendor/golang.org/x/tools/go/gcexportdata/main.go delete mode 100644 vendor/golang.org/x/tools/go/internal/cgo/cgo.go delete mode 100644 vendor/golang.org/x/tools/go/internal/cgo/cgo_pkgconfig.go create mode 100644 vendor/golang.org/x/tools/go/internal/gcimporter/iexport.go delete mode 100644 vendor/golang.org/x/tools/go/packages/golist_fallback.go delete mode 100644 vendor/golang.org/x/tools/go/packages/golist_fallback_testmain.go delete mode 100644 vendor/golang.org/x/tools/imports/mkindex.go delete mode 100644 vendor/golang.org/x/tools/imports/mkstdlib.go rename vendor/{github.com/appscode/jsonpatch => gomodules.xyz/jsonpatch/v2}/LICENSE (100%) create mode 100644 vendor/gomodules.xyz/jsonpatch/v2/go.mod create mode 100644 vendor/gomodules.xyz/jsonpatch/v2/go.sum rename vendor/{github.com/appscode/jsonpatch => gomodules.xyz/jsonpatch/v2}/jsonpatch.go (97%) delete mode 100644 vendor/google.golang.org/api/AUTHORS delete mode 100644 vendor/google.golang.org/api/CONTRIBUTORS delete mode 100644 vendor/google.golang.org/api/LICENSE delete mode 100644 vendor/google.golang.org/api/support/bundler/bundler.go delete mode 100644 vendor/google.golang.org/appengine/.travis.yml delete mode 100644 vendor/google.golang.org/appengine/CONTRIBUTING.md delete mode 100644 vendor/google.golang.org/appengine/README.md delete mode 100644 vendor/google.golang.org/appengine/appengine.go delete mode 100644 vendor/google.golang.org/appengine/appengine_vm.go delete mode 100644 vendor/google.golang.org/appengine/errors.go delete mode 100644 vendor/google.golang.org/appengine/go.mod delete mode 100644 vendor/google.golang.org/appengine/go.sum delete mode 100644 vendor/google.golang.org/appengine/identity.go delete mode 100644 vendor/google.golang.org/appengine/internal/app_identity/app_identity_service.pb.go delete mode 100644 vendor/google.golang.org/appengine/internal/app_identity/app_identity_service.proto delete mode 100644 vendor/google.golang.org/appengine/internal/modules/modules_service.pb.go delete mode 100644 vendor/google.golang.org/appengine/internal/modules/modules_service.proto delete mode 100644 vendor/google.golang.org/appengine/namespace.go delete mode 100644 vendor/google.golang.org/appengine/timeout.go delete mode 100644 vendor/google.golang.org/appengine/travis_install.sh delete mode 100644 vendor/google.golang.org/appengine/travis_test.sh delete mode 100644 vendor/google.golang.org/genproto/LICENSE delete mode 100644 vendor/google.golang.org/genproto/googleapis/rpc/status/status.pb.go delete mode 100644 vendor/google.golang.org/grpc/.travis.yml delete mode 100644 vendor/google.golang.org/grpc/AUTHORS delete mode 100644 vendor/google.golang.org/grpc/CONTRIBUTING.md delete mode 100644 vendor/google.golang.org/grpc/LICENSE delete mode 100644 vendor/google.golang.org/grpc/Makefile delete mode 100644 vendor/google.golang.org/grpc/README.md delete mode 100644 vendor/google.golang.org/grpc/backoff.go delete mode 100644 vendor/google.golang.org/grpc/balancer.go delete mode 100644 vendor/google.golang.org/grpc/balancer/balancer.go delete mode 100644 vendor/google.golang.org/grpc/balancer/base/balancer.go delete mode 100644 vendor/google.golang.org/grpc/balancer/base/base.go delete mode 100644 vendor/google.golang.org/grpc/balancer/roundrobin/roundrobin.go delete mode 100644 vendor/google.golang.org/grpc/balancer_conn_wrappers.go delete mode 100644 vendor/google.golang.org/grpc/balancer_v1_wrapper.go delete mode 100644 vendor/google.golang.org/grpc/binarylog/grpc_binarylog_v1/binarylog.pb.go delete mode 100644 vendor/google.golang.org/grpc/call.go delete mode 100644 vendor/google.golang.org/grpc/clientconn.go delete mode 100644 vendor/google.golang.org/grpc/codec.go delete mode 100644 vendor/google.golang.org/grpc/codegen.sh delete mode 100644 vendor/google.golang.org/grpc/codes/code_string.go delete mode 100644 vendor/google.golang.org/grpc/codes/codes.go delete mode 100644 vendor/google.golang.org/grpc/connectivity/connectivity.go delete mode 100644 vendor/google.golang.org/grpc/credentials/credentials.go delete mode 100644 vendor/google.golang.org/grpc/credentials/internal/syscallconn.go delete mode 100644 vendor/google.golang.org/grpc/credentials/internal/syscallconn_appengine.go delete mode 100644 vendor/google.golang.org/grpc/dialoptions.go delete mode 100644 vendor/google.golang.org/grpc/doc.go delete mode 100644 vendor/google.golang.org/grpc/encoding/encoding.go delete mode 100644 vendor/google.golang.org/grpc/encoding/proto/proto.go delete mode 100644 vendor/google.golang.org/grpc/go.mod delete mode 100644 vendor/google.golang.org/grpc/go.sum delete mode 100644 vendor/google.golang.org/grpc/grpclog/grpclog.go delete mode 100644 vendor/google.golang.org/grpc/grpclog/logger.go delete mode 100644 vendor/google.golang.org/grpc/grpclog/loggerv2.go delete mode 100644 vendor/google.golang.org/grpc/install_gae.sh delete mode 100644 vendor/google.golang.org/grpc/interceptor.go delete mode 100644 vendor/google.golang.org/grpc/internal/backoff/backoff.go delete mode 100644 vendor/google.golang.org/grpc/internal/binarylog/binarylog.go delete mode 100644 vendor/google.golang.org/grpc/internal/binarylog/binarylog_testutil.go delete mode 100644 vendor/google.golang.org/grpc/internal/binarylog/env_config.go delete mode 100644 vendor/google.golang.org/grpc/internal/binarylog/method_logger.go delete mode 100644 vendor/google.golang.org/grpc/internal/binarylog/regenerate.sh delete mode 100644 vendor/google.golang.org/grpc/internal/binarylog/sink.go delete mode 100644 vendor/google.golang.org/grpc/internal/binarylog/util.go delete mode 100644 vendor/google.golang.org/grpc/internal/channelz/funcs.go delete mode 100644 vendor/google.golang.org/grpc/internal/channelz/types.go delete mode 100644 vendor/google.golang.org/grpc/internal/channelz/types_linux.go delete mode 100644 vendor/google.golang.org/grpc/internal/channelz/types_nonlinux.go delete mode 100644 vendor/google.golang.org/grpc/internal/channelz/util_linux.go delete mode 100644 vendor/google.golang.org/grpc/internal/channelz/util_nonlinux.go delete mode 100644 vendor/google.golang.org/grpc/internal/envconfig/envconfig.go delete mode 100644 vendor/google.golang.org/grpc/internal/grpcrand/grpcrand.go delete mode 100644 vendor/google.golang.org/grpc/internal/grpcsync/event.go delete mode 100644 vendor/google.golang.org/grpc/internal/internal.go delete mode 100644 vendor/google.golang.org/grpc/internal/syscall/syscall_linux.go delete mode 100644 vendor/google.golang.org/grpc/internal/syscall/syscall_nonlinux.go delete mode 100644 vendor/google.golang.org/grpc/internal/transport/bdp_estimator.go delete mode 100644 vendor/google.golang.org/grpc/internal/transport/controlbuf.go delete mode 100644 vendor/google.golang.org/grpc/internal/transport/defaults.go delete mode 100644 vendor/google.golang.org/grpc/internal/transport/flowcontrol.go delete mode 100644 vendor/google.golang.org/grpc/internal/transport/handler_server.go delete mode 100644 vendor/google.golang.org/grpc/internal/transport/http2_client.go delete mode 100644 vendor/google.golang.org/grpc/internal/transport/http2_server.go delete mode 100644 vendor/google.golang.org/grpc/internal/transport/http_util.go delete mode 100644 vendor/google.golang.org/grpc/internal/transport/log.go delete mode 100644 vendor/google.golang.org/grpc/internal/transport/transport.go delete mode 100644 vendor/google.golang.org/grpc/keepalive/keepalive.go delete mode 100644 vendor/google.golang.org/grpc/metadata/metadata.go delete mode 100644 vendor/google.golang.org/grpc/naming/dns_resolver.go delete mode 100644 vendor/google.golang.org/grpc/naming/naming.go delete mode 100644 vendor/google.golang.org/grpc/peer/peer.go delete mode 100644 vendor/google.golang.org/grpc/picker_wrapper.go delete mode 100644 vendor/google.golang.org/grpc/pickfirst.go delete mode 100644 vendor/google.golang.org/grpc/proxy.go delete mode 100644 vendor/google.golang.org/grpc/resolver/dns/dns_resolver.go delete mode 100644 vendor/google.golang.org/grpc/resolver/passthrough/passthrough.go delete mode 100644 vendor/google.golang.org/grpc/resolver/resolver.go delete mode 100644 vendor/google.golang.org/grpc/resolver_conn_wrapper.go delete mode 100644 vendor/google.golang.org/grpc/rpc_util.go delete mode 100644 vendor/google.golang.org/grpc/server.go delete mode 100644 vendor/google.golang.org/grpc/service_config.go delete mode 100644 vendor/google.golang.org/grpc/stats/handlers.go delete mode 100644 vendor/google.golang.org/grpc/stats/stats.go delete mode 100644 vendor/google.golang.org/grpc/status/status.go delete mode 100644 vendor/google.golang.org/grpc/stream.go delete mode 100644 vendor/google.golang.org/grpc/tap/tap.go delete mode 100644 vendor/google.golang.org/grpc/trace.go delete mode 100644 vendor/google.golang.org/grpc/version.go delete mode 100644 vendor/google.golang.org/grpc/vet.sh create mode 100644 vendor/gopkg.in/fsnotify.v1/.editorconfig create mode 100644 vendor/gopkg.in/fsnotify.v1/.gitignore create mode 100644 vendor/gopkg.in/fsnotify.v1/.travis.yml create mode 100644 vendor/gopkg.in/fsnotify.v1/AUTHORS create mode 100644 vendor/gopkg.in/fsnotify.v1/CHANGELOG.md create mode 100644 vendor/gopkg.in/fsnotify.v1/CONTRIBUTING.md rename vendor/{golang.org/x/sync => gopkg.in/fsnotify.v1}/LICENSE (92%) create mode 100644 vendor/gopkg.in/fsnotify.v1/README.md create mode 100644 vendor/gopkg.in/fsnotify.v1/fen.go create mode 100644 vendor/gopkg.in/fsnotify.v1/fsnotify.go create mode 100644 vendor/gopkg.in/fsnotify.v1/inotify.go create mode 100644 vendor/gopkg.in/fsnotify.v1/inotify_poller.go create mode 100644 vendor/gopkg.in/fsnotify.v1/kqueue.go create mode 100644 vendor/gopkg.in/fsnotify.v1/open_mode_bsd.go create mode 100644 vendor/gopkg.in/fsnotify.v1/open_mode_darwin.go create mode 100644 vendor/gopkg.in/fsnotify.v1/windows.go create mode 100644 vendor/gopkg.in/tomb.v1/LICENSE create mode 100644 vendor/gopkg.in/tomb.v1/README.md create mode 100644 vendor/gopkg.in/tomb.v1/tomb.go delete mode 100644 vendor/k8s.io/api/admissionregistration/v1alpha1/generated.proto delete mode 100644 vendor/k8s.io/api/admissionregistration/v1alpha1/types.go delete mode 100644 vendor/k8s.io/api/admissionregistration/v1alpha1/types_swagger_doc_generated.go create mode 100644 vendor/k8s.io/api/coordination/v1/doc.go rename vendor/k8s.io/api/{admissionregistration/v1alpha1 => coordination/v1}/generated.pb.go (51%) create mode 100644 vendor/k8s.io/api/coordination/v1/generated.proto rename vendor/k8s.io/api/{admissionregistration/v1alpha1 => coordination/v1}/register.go (86%) create mode 100644 vendor/k8s.io/api/coordination/v1/types.go create mode 100644 vendor/k8s.io/api/coordination/v1/types_swagger_doc_generated.go create mode 100644 vendor/k8s.io/api/coordination/v1/zz_generated.deepcopy.go create mode 100644 vendor/k8s.io/api/core/v1/well_known_labels.go create mode 100644 vendor/k8s.io/api/networking/v1beta1/doc.go create mode 100644 vendor/k8s.io/api/networking/v1beta1/generated.pb.go create mode 100644 vendor/k8s.io/api/networking/v1beta1/generated.proto create mode 100644 vendor/k8s.io/api/networking/v1beta1/register.go create mode 100644 vendor/k8s.io/api/networking/v1beta1/types.go create mode 100644 vendor/k8s.io/api/networking/v1beta1/types_swagger_doc_generated.go create mode 100644 vendor/k8s.io/api/networking/v1beta1/zz_generated.deepcopy.go create mode 100644 vendor/k8s.io/api/node/v1alpha1/doc.go create mode 100644 vendor/k8s.io/api/node/v1alpha1/generated.pb.go create mode 100644 vendor/k8s.io/api/node/v1alpha1/generated.proto create mode 100644 vendor/k8s.io/api/node/v1alpha1/register.go create mode 100644 vendor/k8s.io/api/node/v1alpha1/types.go create mode 100644 vendor/k8s.io/api/node/v1alpha1/types_swagger_doc_generated.go rename vendor/k8s.io/api/{admissionregistration => node}/v1alpha1/zz_generated.deepcopy.go (51%) create mode 100644 vendor/k8s.io/api/node/v1beta1/doc.go create mode 100644 vendor/k8s.io/api/node/v1beta1/generated.pb.go create mode 100644 vendor/k8s.io/api/node/v1beta1/generated.proto create mode 100644 vendor/k8s.io/api/node/v1beta1/register.go create mode 100644 vendor/k8s.io/api/node/v1beta1/types.go create mode 100644 vendor/k8s.io/api/node/v1beta1/types_swagger_doc_generated.go create mode 100644 vendor/k8s.io/api/node/v1beta1/zz_generated.deepcopy.go create mode 100644 vendor/k8s.io/api/scheduling/v1/doc.go create mode 100644 vendor/k8s.io/api/scheduling/v1/generated.pb.go create mode 100644 vendor/k8s.io/api/scheduling/v1/generated.proto create mode 100644 vendor/k8s.io/api/scheduling/v1/register.go create mode 100644 vendor/k8s.io/api/scheduling/v1/types.go create mode 100644 vendor/k8s.io/api/scheduling/v1/types_swagger_doc_generated.go create mode 100644 vendor/k8s.io/api/scheduling/v1/zz_generated.deepcopy.go rename vendor/{sigs.k8s.io/cluster-api/pkg/client/clientset_generated => k8s.io/apiextensions-apiserver/pkg/client/clientset}/clientset/clientset.go (71%) rename vendor/{sigs.k8s.io/cluster-api/pkg/client/clientset_generated => k8s.io/apiextensions-apiserver/pkg/client/clientset}/clientset/doc.go (100%) rename vendor/{sigs.k8s.io/cluster-api/pkg/client/clientset_generated => k8s.io/apiextensions-apiserver/pkg/client/clientset}/clientset/scheme/doc.go (100%) rename vendor/{sigs.k8s.io/cluster-api/pkg/client/clientset_generated => k8s.io/apiextensions-apiserver/pkg/client/clientset}/clientset/scheme/register.go (93%) create mode 100644 vendor/k8s.io/apiextensions-apiserver/pkg/client/clientset/clientset/typed/apiextensions/v1beta1/apiextensions_client.go create mode 100644 vendor/k8s.io/apiextensions-apiserver/pkg/client/clientset/clientset/typed/apiextensions/v1beta1/customresourcedefinition.go rename vendor/{sigs.k8s.io/cluster-api/pkg/client/clientset_generated/clientset/typed/cluster/v1alpha1 => k8s.io/apiextensions-apiserver/pkg/client/clientset/clientset/typed/apiextensions/v1beta1}/doc.go (97%) rename vendor/{sigs.k8s.io/cluster-api/pkg/client/clientset_generated/clientset/typed/cluster/v1alpha1/fake/doc.go => k8s.io/apiextensions-apiserver/pkg/client/clientset/clientset/typed/apiextensions/v1beta1/generated_expansion.go} (89%) create mode 100644 vendor/k8s.io/apimachinery/pkg/api/equality/semantic.go delete mode 100644 vendor/k8s.io/apimachinery/pkg/apis/config/OWNERS delete mode 100644 vendor/k8s.io/apimachinery/pkg/apis/config/types.go delete mode 100644 vendor/k8s.io/client-go/discovery/cached_discovery.go delete mode 100644 vendor/k8s.io/client-go/discovery/round_tripper.go delete mode 100644 vendor/k8s.io/client-go/kubernetes/typed/admissionregistration/v1alpha1/initializerconfiguration.go create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/coordination/v1/coordination_client.go rename vendor/{sigs.k8s.io/cluster-api/pkg/client/clientset_generated/clientset/fake => k8s.io/client-go/kubernetes/typed/coordination/v1}/doc.go (88%) create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/coordination/v1/generated_expansion.go create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/coordination/v1/lease.go create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/networking/v1beta1/doc.go create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/networking/v1beta1/generated_expansion.go rename vendor/{sigs.k8s.io/cluster-api/pkg/client/clientset_generated/clientset/typed/cluster/v1alpha1/machine.go => k8s.io/client-go/kubernetes/typed/networking/v1beta1/ingress.go} (50%) create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/networking/v1beta1/networking_client.go rename vendor/k8s.io/client-go/kubernetes/typed/{admissionregistration => node}/v1alpha1/doc.go (100%) rename vendor/k8s.io/client-go/kubernetes/typed/{admissionregistration => node}/v1alpha1/generated_expansion.go (92%) rename vendor/k8s.io/client-go/kubernetes/typed/{admissionregistration/v1alpha1/admissionregistration_client.go => node/v1alpha1/node_client.go} (58%) create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/node/v1alpha1/runtimeclass.go create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/node/v1beta1/doc.go create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/node/v1beta1/generated_expansion.go create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/node/v1beta1/node_client.go create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/node/v1beta1/runtimeclass.go create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/scheduling/v1/doc.go create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/scheduling/v1/generated_expansion.go create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/scheduling/v1/priorityclass.go create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/scheduling/v1/scheduling_client.go create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/storage/v1beta1/csidriver.go create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/storage/v1beta1/csinode.go delete mode 100644 vendor/k8s.io/client-go/plugin/pkg/client/auth/OWNERS delete mode 100644 vendor/k8s.io/client-go/plugin/pkg/client/auth/azure/README.md delete mode 100644 vendor/k8s.io/client-go/plugin/pkg/client/auth/azure/azure.go delete mode 100644 vendor/k8s.io/client-go/plugin/pkg/client/auth/gcp/OWNERS delete mode 100644 vendor/k8s.io/client-go/plugin/pkg/client/auth/gcp/gcp.go delete mode 100644 vendor/k8s.io/client-go/plugin/pkg/client/auth/oidc/OWNERS delete mode 100644 vendor/k8s.io/client-go/plugin/pkg/client/auth/oidc/oidc.go delete mode 100644 vendor/k8s.io/client-go/plugin/pkg/client/auth/openstack/openstack.go delete mode 100644 vendor/k8s.io/client-go/plugin/pkg/client/auth/plugins.go delete mode 100644 vendor/k8s.io/client-go/third_party/forked/golang/template/exec.go delete mode 100644 vendor/k8s.io/client-go/third_party/forked/golang/template/funcs.go create mode 100644 vendor/k8s.io/client-go/tools/leaderelection/metrics.go create mode 100644 vendor/k8s.io/client-go/tools/leaderelection/resourcelock/leaselock.go create mode 100644 vendor/k8s.io/client-go/tools/record/util/util.go rename vendor/k8s.io/client-go/{rest => transport}/token_source.go (87%) delete mode 100644 vendor/k8s.io/client-go/util/jsonpath/jsonpath.go delete mode 100644 vendor/k8s.io/client-go/util/jsonpath/node.go delete mode 100644 vendor/k8s.io/client-go/util/jsonpath/parser.go create mode 100644 vendor/k8s.io/client-go/util/keyutil/OWNERS create mode 100644 vendor/k8s.io/client-go/util/keyutil/key.go rename vendor/k8s.io/client-go/util/workqueue/{rate_limitting_queue.go => rate_limiting_queue.go} (100%) create mode 100644 vendor/k8s.io/code-generator/pkg/namer/tag-override.go delete mode 100644 vendor/k8s.io/component-base/cli/flag/ciphersuites_flag.go delete mode 100644 vendor/k8s.io/component-base/cli/flag/colon_separated_multimap_string_string.go delete mode 100644 vendor/k8s.io/component-base/cli/flag/configuration_map.go delete mode 100644 vendor/k8s.io/component-base/cli/flag/flags.go delete mode 100644 vendor/k8s.io/component-base/cli/flag/langle_separated_map_string_string.go delete mode 100644 vendor/k8s.io/component-base/cli/flag/map_string_bool.go delete mode 100644 vendor/k8s.io/component-base/cli/flag/map_string_string.go delete mode 100644 vendor/k8s.io/component-base/cli/flag/namedcertkey_flag.go delete mode 100644 vendor/k8s.io/component-base/cli/flag/omitempty.go delete mode 100644 vendor/k8s.io/component-base/cli/flag/sectioned.go delete mode 100644 vendor/k8s.io/component-base/cli/flag/string_flag.go delete mode 100644 vendor/k8s.io/component-base/cli/flag/tristate.go create mode 100644 vendor/k8s.io/component-base/config/OWNERS rename vendor/k8s.io/{apimachinery/pkg/apis => component-base}/config/doc.go (90%) create mode 100644 vendor/k8s.io/component-base/config/types.go rename vendor/k8s.io/{apimachinery/pkg/apis => component-base}/config/zz_generated.deepcopy.go (51%) create mode 100644 vendor/k8s.io/klog/klogr/README.md create mode 100644 vendor/k8s.io/klog/klogr/klogr.go create mode 100644 vendor/k8s.io/kubernetes/cmd/kubeadm/app/constants/constants_unix.go create mode 100644 vendor/k8s.io/kubernetes/cmd/kubeadm/app/constants/constants_windows.go rename vendor/k8s.io/{client-go/util => utils}/buffer/ring_growing.go (100%) rename vendor/k8s.io/{client-go/util => utils}/integer/integer.go (81%) create mode 100644 vendor/k8s.io/utils/pointer/OWNERS create mode 100644 vendor/k8s.io/utils/pointer/pointer.go create mode 100644 vendor/k8s.io/utils/trace/trace.go create mode 100644 vendor/sigs.k8s.io/cluster-api/.github/ISSUE_TEMPLATE/bug_report.md create mode 100644 vendor/sigs.k8s.io/cluster-api/.github/ISSUE_TEMPLATE/feature_request.md create mode 100644 vendor/sigs.k8s.io/cluster-api/.github/PULL_REQUEST_TEMPLATE.md create mode 100644 vendor/sigs.k8s.io/cluster-api/.gitignore create mode 100644 vendor/sigs.k8s.io/cluster-api/.golangci.yml delete mode 100644 vendor/sigs.k8s.io/cluster-api/Gopkg.toml create mode 100644 vendor/sigs.k8s.io/cluster-api/api/v1alpha2/BUILD.bazel create mode 100644 vendor/sigs.k8s.io/cluster-api/api/v1alpha2/cluster_phase_types.go create mode 100644 vendor/sigs.k8s.io/cluster-api/api/v1alpha2/cluster_types.go create mode 100644 vendor/sigs.k8s.io/cluster-api/api/v1alpha2/common_types.go create mode 100644 vendor/sigs.k8s.io/cluster-api/api/v1alpha2/defaults.go create mode 100644 vendor/sigs.k8s.io/cluster-api/api/v1alpha2/groupversion_info.go create mode 100644 vendor/sigs.k8s.io/cluster-api/api/v1alpha2/machine_phase_types.go create mode 100644 vendor/sigs.k8s.io/cluster-api/api/v1alpha2/machine_types.go create mode 100644 vendor/sigs.k8s.io/cluster-api/api/v1alpha2/machinedeployment_types.go create mode 100644 vendor/sigs.k8s.io/cluster-api/api/v1alpha2/machineset_types.go create mode 100644 vendor/sigs.k8s.io/cluster-api/api/v1alpha2/zz_generated.deepcopy.go delete mode 100644 vendor/sigs.k8s.io/cluster-api/cmd/clusterctl/cmd/create_cluster_test.go create mode 100644 vendor/sigs.k8s.io/cluster-api/config/crd/bases/cluster.x-k8s.io_clusters.yaml create mode 100644 vendor/sigs.k8s.io/cluster-api/config/crd/bases/cluster.x-k8s.io_machinedeployments.yaml create mode 100644 vendor/sigs.k8s.io/cluster-api/config/crd/bases/cluster.x-k8s.io_machines.yaml create mode 100644 vendor/sigs.k8s.io/cluster-api/config/crd/bases/cluster.x-k8s.io_machinesets.yaml create mode 100644 vendor/sigs.k8s.io/cluster-api/config/crd/kustomization.yaml create mode 100644 vendor/sigs.k8s.io/cluster-api/config/crd/kustomizeconfig.yaml create mode 100644 vendor/sigs.k8s.io/cluster-api/config/crd/patches/cainjection_in_clusters.yaml create mode 100644 vendor/sigs.k8s.io/cluster-api/config/crd/patches/cainjection_in_machinedeployments.yaml create mode 100644 vendor/sigs.k8s.io/cluster-api/config/crd/patches/cainjection_in_machines.yaml create mode 100644 vendor/sigs.k8s.io/cluster-api/config/crd/patches/cainjection_in_machinesets.yaml create mode 100644 vendor/sigs.k8s.io/cluster-api/config/crd/patches/webhook_in_clusters.yaml create mode 100644 vendor/sigs.k8s.io/cluster-api/config/crd/patches/webhook_in_machinedeployments.yaml create mode 100644 vendor/sigs.k8s.io/cluster-api/config/crd/patches/webhook_in_machines.yaml create mode 100644 vendor/sigs.k8s.io/cluster-api/config/crd/patches/webhook_in_machinesets.yaml rename vendor/sigs.k8s.io/cluster-api/config/crds/{cluster_v1alpha1_cluster.yaml => cluster.k8s.io_clusters.yaml} (63%) rename vendor/sigs.k8s.io/cluster-api/config/crds/{cluster_v1alpha1_machineclass.yaml => cluster.k8s.io_machineclasses.yaml} (83%) create mode 100644 vendor/sigs.k8s.io/cluster-api/config/crds/cluster.k8s.io_machinedeployments.yaml create mode 100644 vendor/sigs.k8s.io/cluster-api/config/crds/cluster.k8s.io_machines.yaml create mode 100644 vendor/sigs.k8s.io/cluster-api/config/crds/cluster.k8s.io_machinesets.yaml delete mode 100644 vendor/sigs.k8s.io/cluster-api/config/crds/cluster_v1alpha1_machine.yaml delete mode 100644 vendor/sigs.k8s.io/cluster-api/config/crds/cluster_v1alpha1_machinedeployment.yaml delete mode 100644 vendor/sigs.k8s.io/cluster-api/config/crds/cluster_v1alpha1_machineset.yaml rename config/rbac/rbac_role.yaml => vendor/sigs.k8s.io/cluster-api/config/rbac/role.yaml (71%) rename vendor/sigs.k8s.io/cluster-api/config/rbac/{manager_role_binding.yaml => role_binding.yaml} (100%) delete mode 100644 vendor/sigs.k8s.io/cluster-api/config/samples/cluster_v1alpha1_cluster.yaml delete mode 100644 vendor/sigs.k8s.io/cluster-api/config/samples/cluster_v1alpha1_machine.yaml delete mode 100644 vendor/sigs.k8s.io/cluster-api/config/samples/cluster_v1alpha1_machinedeployment.yaml delete mode 100644 vendor/sigs.k8s.io/cluster-api/config/samples/cluster_v1alpha1_machineset.yaml create mode 100644 vendor/sigs.k8s.io/cluster-api/config/webhook/manifests.yaml create mode 100644 vendor/sigs.k8s.io/cluster-api/controllers/BUILD.bazel create mode 100644 vendor/sigs.k8s.io/cluster-api/controllers/cluster_controller.go create mode 100644 vendor/sigs.k8s.io/cluster-api/controllers/machine_controller.go create mode 100644 vendor/sigs.k8s.io/cluster-api/controllers/machinedeployment_controller.go create mode 100644 vendor/sigs.k8s.io/cluster-api/controllers/machineset_controller.go create mode 100644 vendor/sigs.k8s.io/cluster-api/controllers/suite_test.go create mode 100644 vendor/sigs.k8s.io/cluster-api/docs/book/ABBREVIATIONS.md create mode 100644 vendor/sigs.k8s.io/cluster-api/docs/book/common_code/noderef_controller.md create mode 100644 vendor/sigs.k8s.io/cluster-api/docs/developer/v1alpha1-compared-to-v1alpha2.md create mode 100644 vendor/sigs.k8s.io/cluster-api/docs/how-to-use-clusterctl.md create mode 100644 vendor/sigs.k8s.io/cluster-api/docs/proposals/20190610-machine-states-preboot-bootstrapping.md create mode 100644 vendor/sigs.k8s.io/cluster-api/docs/proposals/20190709-cluster-spec-crds.md create mode 100644 vendor/sigs.k8s.io/cluster-api/docs/proposals/images/Dockerfile create mode 100644 vendor/sigs.k8s.io/cluster-api/docs/proposals/images/Makefile create mode 100644 vendor/sigs.k8s.io/cluster-api/docs/proposals/images/cluster-spec-crds/figure1.plantuml create mode 100644 vendor/sigs.k8s.io/cluster-api/docs/proposals/images/cluster-spec-crds/figure1.png create mode 100644 vendor/sigs.k8s.io/cluster-api/docs/proposals/images/cluster-spec-crds/figure2.plantuml create mode 100644 vendor/sigs.k8s.io/cluster-api/docs/proposals/images/cluster-spec-crds/figure2.png create mode 100644 vendor/sigs.k8s.io/cluster-api/docs/proposals/images/machine-states-preboot/Figure1.plantuml create mode 100644 vendor/sigs.k8s.io/cluster-api/docs/proposals/images/machine-states-preboot/Figure1.png create mode 100644 vendor/sigs.k8s.io/cluster-api/docs/proposals/images/machine-states-preboot/Figure2.png create mode 100644 vendor/sigs.k8s.io/cluster-api/docs/proposals/images/machine-states-preboot/Figure3.plantuml create mode 100644 vendor/sigs.k8s.io/cluster-api/docs/proposals/images/machine-states-preboot/Figure3.png create mode 100644 vendor/sigs.k8s.io/cluster-api/docs/proposals/images/machine-states-preboot/Figure4.plantuml create mode 100644 vendor/sigs.k8s.io/cluster-api/docs/proposals/images/machine-states-preboot/Figure4.png create mode 100644 vendor/sigs.k8s.io/cluster-api/docs/proposals/images/machine-states-preboot/Figure5.plantuml create mode 100644 vendor/sigs.k8s.io/cluster-api/docs/proposals/images/machine-states-preboot/Figure5.png create mode 100644 vendor/sigs.k8s.io/cluster-api/docs/proposals/images/machine-states-preboot/Figure6.plantuml create mode 100644 vendor/sigs.k8s.io/cluster-api/docs/proposals/images/machine-states-preboot/Figure6.png create mode 100644 vendor/sigs.k8s.io/cluster-api/docs/proposals/images/machine-states-preboot/Figure7.plantuml create mode 100644 vendor/sigs.k8s.io/cluster-api/docs/proposals/images/machine-states-preboot/Figure7.png create mode 100644 vendor/sigs.k8s.io/cluster-api/docs/proposals/images/machine-states-preboot/Figure8.plantuml create mode 100644 vendor/sigs.k8s.io/cluster-api/docs/proposals/images/machine-states-preboot/Figure8.png create mode 100644 vendor/sigs.k8s.io/cluster-api/docs/proposals/images/machine-states-preboot/README.md create mode 100644 vendor/sigs.k8s.io/cluster-api/go.vendor create mode 100644 vendor/sigs.k8s.io/cluster-api/hack/boilerplate.go.txt create mode 100644 vendor/sigs.k8s.io/cluster-api/hack/pin-dependency.sh rename vendor/sigs.k8s.io/cluster-api/hack/{verify_clientset.sh => verify-clientset.sh} (81%) create mode 100644 vendor/sigs.k8s.io/cluster-api/hack/verify-doctoc.sh create mode 100644 vendor/sigs.k8s.io/cluster-api/main.go delete mode 100644 vendor/sigs.k8s.io/cluster-api/pkg/apis/apis.go delete mode 100644 vendor/sigs.k8s.io/cluster-api/pkg/apis/cluster/common/BUILD.bazel delete mode 100644 vendor/sigs.k8s.io/cluster-api/pkg/apis/cluster/common/plugins.go delete mode 100644 vendor/sigs.k8s.io/cluster-api/pkg/apis/cluster/v1alpha1/common_types.go rename vendor/sigs.k8s.io/cluster-api/pkg/apis/{cluster => deprecated}/BUILD.bazel (72%) rename vendor/sigs.k8s.io/cluster-api/pkg/apis/{cluster => deprecated}/group.go (97%) rename vendor/sigs.k8s.io/cluster-api/pkg/apis/{cluster => deprecated}/v1alpha1/BUILD.bazel (89%) rename vendor/sigs.k8s.io/cluster-api/pkg/apis/{cluster => deprecated}/v1alpha1/cluster_types.go (96%) rename vendor/sigs.k8s.io/cluster-api/pkg/apis/{cluster => deprecated}/v1alpha1/cluster_types_test.go (100%) create mode 100644 vendor/sigs.k8s.io/cluster-api/pkg/apis/deprecated/v1alpha1/common_types.go rename vendor/sigs.k8s.io/cluster-api/pkg/apis/{cluster => deprecated}/v1alpha1/defaults.go (90%) rename vendor/sigs.k8s.io/cluster-api/pkg/apis/{cluster => deprecated}/v1alpha1/doc.go (96%) rename vendor/sigs.k8s.io/cluster-api/pkg/apis/{cluster => deprecated}/v1alpha1/machine_types.go (93%) rename vendor/sigs.k8s.io/cluster-api/pkg/apis/{cluster => deprecated}/v1alpha1/machine_types_test.go (100%) rename vendor/sigs.k8s.io/cluster-api/pkg/apis/{cluster => deprecated}/v1alpha1/machineclass_types.go (97%) rename vendor/sigs.k8s.io/cluster-api/pkg/apis/{cluster => deprecated}/v1alpha1/machinedeployment_types.go (94%) rename vendor/sigs.k8s.io/cluster-api/pkg/apis/{cluster => deprecated}/v1alpha1/machinedeployment_types_test.go (100%) rename vendor/sigs.k8s.io/cluster-api/pkg/apis/{cluster => deprecated}/v1alpha1/machineset_types.go (96%) rename vendor/sigs.k8s.io/cluster-api/pkg/apis/{cluster => deprecated}/v1alpha1/machineset_types_test.go (100%) rename vendor/sigs.k8s.io/cluster-api/pkg/apis/{cluster => deprecated}/v1alpha1/register.go (91%) rename vendor/sigs.k8s.io/cluster-api/pkg/apis/{cluster => deprecated}/v1alpha1/testutil/BUILD.bazel (53%) rename vendor/sigs.k8s.io/cluster-api/pkg/apis/{cluster => deprecated}/v1alpha1/testutil/testutil.go (94%) rename vendor/sigs.k8s.io/cluster-api/pkg/apis/{cluster => deprecated}/v1alpha1/v1alpha1_suite_test.go (100%) rename vendor/sigs.k8s.io/cluster-api/pkg/apis/{cluster => deprecated}/v1alpha1/zz_generated.deepcopy.go (94%) delete mode 100644 vendor/sigs.k8s.io/cluster-api/pkg/client/clientset_generated/clientset/BUILD.bazel delete mode 100644 vendor/sigs.k8s.io/cluster-api/pkg/client/clientset_generated/clientset/fake/BUILD.bazel delete mode 100644 vendor/sigs.k8s.io/cluster-api/pkg/client/clientset_generated/clientset/fake/clientset_generated.go delete mode 100644 vendor/sigs.k8s.io/cluster-api/pkg/client/clientset_generated/clientset/fake/register.go delete mode 100644 vendor/sigs.k8s.io/cluster-api/pkg/client/clientset_generated/clientset/scheme/BUILD.bazel delete mode 100644 vendor/sigs.k8s.io/cluster-api/pkg/client/clientset_generated/clientset/typed/cluster/v1alpha1/BUILD.bazel delete mode 100644 vendor/sigs.k8s.io/cluster-api/pkg/client/clientset_generated/clientset/typed/cluster/v1alpha1/cluster.go delete mode 100644 vendor/sigs.k8s.io/cluster-api/pkg/client/clientset_generated/clientset/typed/cluster/v1alpha1/cluster_client.go delete mode 100644 vendor/sigs.k8s.io/cluster-api/pkg/client/clientset_generated/clientset/typed/cluster/v1alpha1/fake/BUILD.bazel delete mode 100644 vendor/sigs.k8s.io/cluster-api/pkg/client/clientset_generated/clientset/typed/cluster/v1alpha1/fake/fake_cluster.go delete mode 100644 vendor/sigs.k8s.io/cluster-api/pkg/client/clientset_generated/clientset/typed/cluster/v1alpha1/fake/fake_cluster_client.go delete mode 100644 vendor/sigs.k8s.io/cluster-api/pkg/client/clientset_generated/clientset/typed/cluster/v1alpha1/fake/fake_machine.go delete mode 100644 vendor/sigs.k8s.io/cluster-api/pkg/client/clientset_generated/clientset/typed/cluster/v1alpha1/fake/fake_machineclass.go delete mode 100644 vendor/sigs.k8s.io/cluster-api/pkg/client/clientset_generated/clientset/typed/cluster/v1alpha1/fake/fake_machinedeployment.go delete mode 100644 vendor/sigs.k8s.io/cluster-api/pkg/client/clientset_generated/clientset/typed/cluster/v1alpha1/fake/fake_machineset.go delete mode 100644 vendor/sigs.k8s.io/cluster-api/pkg/client/clientset_generated/clientset/typed/cluster/v1alpha1/generated_expansion.go delete mode 100644 vendor/sigs.k8s.io/cluster-api/pkg/client/clientset_generated/clientset/typed/cluster/v1alpha1/machineclass.go delete mode 100644 vendor/sigs.k8s.io/cluster-api/pkg/client/clientset_generated/clientset/typed/cluster/v1alpha1/machinedeployment.go delete mode 100644 vendor/sigs.k8s.io/cluster-api/pkg/client/clientset_generated/clientset/typed/cluster/v1alpha1/machineset.go delete mode 100644 vendor/sigs.k8s.io/cluster-api/pkg/client/informers_generated/externalversions/BUILD.bazel delete mode 100644 vendor/sigs.k8s.io/cluster-api/pkg/client/informers_generated/externalversions/cluster/BUILD.bazel delete mode 100644 vendor/sigs.k8s.io/cluster-api/pkg/client/informers_generated/externalversions/cluster/interface.go delete mode 100644 vendor/sigs.k8s.io/cluster-api/pkg/client/informers_generated/externalversions/cluster/v1alpha1/BUILD.bazel delete mode 100644 vendor/sigs.k8s.io/cluster-api/pkg/client/informers_generated/externalversions/cluster/v1alpha1/cluster.go delete mode 100644 vendor/sigs.k8s.io/cluster-api/pkg/client/informers_generated/externalversions/cluster/v1alpha1/interface.go delete mode 100644 vendor/sigs.k8s.io/cluster-api/pkg/client/informers_generated/externalversions/cluster/v1alpha1/machine.go delete mode 100644 vendor/sigs.k8s.io/cluster-api/pkg/client/informers_generated/externalversions/cluster/v1alpha1/machineclass.go delete mode 100644 vendor/sigs.k8s.io/cluster-api/pkg/client/informers_generated/externalversions/cluster/v1alpha1/machinedeployment.go delete mode 100644 vendor/sigs.k8s.io/cluster-api/pkg/client/informers_generated/externalversions/cluster/v1alpha1/machineset.go delete mode 100644 vendor/sigs.k8s.io/cluster-api/pkg/client/informers_generated/externalversions/factory.go delete mode 100644 vendor/sigs.k8s.io/cluster-api/pkg/client/informers_generated/externalversions/generic.go delete mode 100644 vendor/sigs.k8s.io/cluster-api/pkg/client/informers_generated/externalversions/internalinterfaces/BUILD.bazel delete mode 100644 vendor/sigs.k8s.io/cluster-api/pkg/client/informers_generated/externalversions/internalinterfaces/factory_interfaces.go delete mode 100644 vendor/sigs.k8s.io/cluster-api/pkg/client/listers_generated/cluster/v1alpha1/BUILD.bazel delete mode 100644 vendor/sigs.k8s.io/cluster-api/pkg/client/listers_generated/cluster/v1alpha1/cluster.go delete mode 100644 vendor/sigs.k8s.io/cluster-api/pkg/client/listers_generated/cluster/v1alpha1/expansion_generated.go delete mode 100644 vendor/sigs.k8s.io/cluster-api/pkg/client/listers_generated/cluster/v1alpha1/machine.go delete mode 100644 vendor/sigs.k8s.io/cluster-api/pkg/client/listers_generated/cluster/v1alpha1/machineclass.go delete mode 100644 vendor/sigs.k8s.io/cluster-api/pkg/client/listers_generated/cluster/v1alpha1/machinedeployment.go delete mode 100644 vendor/sigs.k8s.io/cluster-api/pkg/client/listers_generated/cluster/v1alpha1/machineset.go delete mode 100644 vendor/sigs.k8s.io/cluster-api/pkg/cmdrunner/BUILD.bazel delete mode 100644 vendor/sigs.k8s.io/cluster-api/pkg/cmdrunner/cmd_runner_test.go rename vendor/sigs.k8s.io/cluster-api/pkg/controller/{add_noderef.go => add_cluster.go} (86%) rename vendor/sigs.k8s.io/cluster-api/pkg/controller/{add_node.go => add_machine.go} (81%) delete mode 100644 vendor/sigs.k8s.io/cluster-api/pkg/controller/cluster/actuator.go create mode 100644 vendor/sigs.k8s.io/cluster-api/pkg/controller/cluster/cluster_controller_phases.go delete mode 100644 vendor/sigs.k8s.io/cluster-api/pkg/controller/cluster/testactuator.go delete mode 100644 vendor/sigs.k8s.io/cluster-api/pkg/controller/error/BUILD.bazel create mode 100644 vendor/sigs.k8s.io/cluster-api/pkg/controller/external/BUILD.bazel create mode 100644 vendor/sigs.k8s.io/cluster-api/pkg/controller/external/testing.go create mode 100644 vendor/sigs.k8s.io/cluster-api/pkg/controller/external/util.go delete mode 100644 vendor/sigs.k8s.io/cluster-api/pkg/controller/machine/actuator.go create mode 100644 vendor/sigs.k8s.io/cluster-api/pkg/controller/machine/machine_controller_noderef.go rename vendor/sigs.k8s.io/cluster-api/pkg/controller/{noderef/noderef_controller_test.go => machine/machine_controller_noderef_test.go} (64%) create mode 100644 vendor/sigs.k8s.io/cluster-api/pkg/controller/machine/machine_controller_phases.go create mode 100644 vendor/sigs.k8s.io/cluster-api/pkg/controller/machine/machine_controller_phases_test.go delete mode 100644 vendor/sigs.k8s.io/cluster-api/pkg/controller/machine/testactuator.go delete mode 100644 vendor/sigs.k8s.io/cluster-api/pkg/controller/node/BUILD.bazel delete mode 100644 vendor/sigs.k8s.io/cluster-api/pkg/controller/node/node.go delete mode 100644 vendor/sigs.k8s.io/cluster-api/pkg/controller/node/node_controller.go delete mode 100644 vendor/sigs.k8s.io/cluster-api/pkg/controller/node/node_reconciler_suite_test.go delete mode 100644 vendor/sigs.k8s.io/cluster-api/pkg/controller/node/node_reconciler_test.go delete mode 100644 vendor/sigs.k8s.io/cluster-api/pkg/controller/noderef/BUILD.bazel delete mode 100644 vendor/sigs.k8s.io/cluster-api/pkg/controller/noderef/noderef_controller.go delete mode 100644 vendor/sigs.k8s.io/cluster-api/pkg/controller/noderef/noderef_controller_suite_test.go rename vendor/sigs.k8s.io/cluster-api/pkg/{apis/cluster/common => errors}/consts.go (93%) rename vendor/sigs.k8s.io/cluster-api/pkg/{controller/error/requeue_error.go => errors/controllers.go} (86%) rename vendor/sigs.k8s.io/cluster-api/pkg/{apis/addtoscheme_cluster_v1alpha1.go => errors/pointer.go} (64%) delete mode 100644 vendor/sigs.k8s.io/cluster-api/pkg/kubeadm/BUILD.bazel delete mode 100644 vendor/sigs.k8s.io/cluster-api/pkg/kubeadm/kubeadm.go delete mode 100644 vendor/sigs.k8s.io/cluster-api/pkg/kubeadm/kubeadm_test.go delete mode 100644 vendor/sigs.k8s.io/cluster-api/pkg/provider/example/actuators/cluster/BUILD.bazel delete mode 100644 vendor/sigs.k8s.io/cluster-api/pkg/provider/example/actuators/cluster/clusteractuator.go delete mode 100644 vendor/sigs.k8s.io/cluster-api/pkg/provider/example/actuators/machine/BUILD.bazel delete mode 100644 vendor/sigs.k8s.io/cluster-api/pkg/provider/example/actuators/machine/machineactuator.go delete mode 100644 vendor/sigs.k8s.io/cluster-api/pkg/testcmdrunner/BUILD.bazel delete mode 100644 vendor/sigs.k8s.io/cluster-api/pkg/testcmdrunner/test_cmd_runner.go delete mode 100644 vendor/sigs.k8s.io/cluster-api/sample/cluster.yaml delete mode 100644 vendor/sigs.k8s.io/cluster-api/sample/machine.yaml delete mode 100644 vendor/sigs.k8s.io/cluster-api/sample/machineclass.yaml delete mode 100644 vendor/sigs.k8s.io/cluster-api/sample/machinedeployment.yaml delete mode 100644 vendor/sigs.k8s.io/cluster-api/sample/machineset.yaml create mode 100644 vendor/sigs.k8s.io/controller-runtime/.gitignore create mode 100644 vendor/sigs.k8s.io/controller-runtime/.golangci.yml create mode 100644 vendor/sigs.k8s.io/controller-runtime/.travis.yml create mode 100644 vendor/sigs.k8s.io/controller-runtime/CONTRIBUTING.md create mode 100644 vendor/sigs.k8s.io/controller-runtime/FAQ.md rename vendor/sigs.k8s.io/{cluster-api => controller-runtime}/Gopkg.lock (52%) create mode 100644 vendor/sigs.k8s.io/controller-runtime/Gopkg.toml create mode 100644 vendor/sigs.k8s.io/controller-runtime/OWNERS create mode 100644 vendor/sigs.k8s.io/controller-runtime/OWNERS_ALIASES create mode 100644 vendor/sigs.k8s.io/controller-runtime/README.md create mode 100644 vendor/sigs.k8s.io/controller-runtime/SECURITY_CONTACTS create mode 100644 vendor/sigs.k8s.io/controller-runtime/TMP-LOGGING.md create mode 100644 vendor/sigs.k8s.io/controller-runtime/VERSIONING.md create mode 100644 vendor/sigs.k8s.io/controller-runtime/alias.go create mode 100644 vendor/sigs.k8s.io/controller-runtime/code-of-conduct.md create mode 100644 vendor/sigs.k8s.io/controller-runtime/doc.go create mode 100644 vendor/sigs.k8s.io/controller-runtime/go.mod create mode 100644 vendor/sigs.k8s.io/controller-runtime/go.sum create mode 100644 vendor/sigs.k8s.io/controller-runtime/pkg/builder/controller.go create mode 100644 vendor/sigs.k8s.io/controller-runtime/pkg/builder/doc.go create mode 100644 vendor/sigs.k8s.io/controller-runtime/pkg/builder/webhook.go create mode 100644 vendor/sigs.k8s.io/controller-runtime/pkg/cache/doc.go create mode 100644 vendor/sigs.k8s.io/controller-runtime/pkg/cache/multi_namespace_cache.go create mode 100644 vendor/sigs.k8s.io/controller-runtime/pkg/client/doc.go create mode 100644 vendor/sigs.k8s.io/controller-runtime/pkg/client/options.go create mode 100644 vendor/sigs.k8s.io/controller-runtime/pkg/client/patch.go create mode 100644 vendor/sigs.k8s.io/controller-runtime/pkg/controller/controllerutil/controllerutil.go rename pkg/apis/openstackproviderconfig/group.go => vendor/sigs.k8s.io/controller-runtime/pkg/controller/controllerutil/doc.go (81%) create mode 100644 vendor/sigs.k8s.io/controller-runtime/pkg/conversion/conversion.go create mode 100644 vendor/sigs.k8s.io/controller-runtime/pkg/envtest/crd.go rename vendor/{k8s.io/api/admissionregistration/v1alpha1 => sigs.k8s.io/controller-runtime/pkg/envtest}/doc.go (52%) create mode 100644 vendor/sigs.k8s.io/controller-runtime/pkg/envtest/ginkgo.go create mode 100644 vendor/sigs.k8s.io/controller-runtime/pkg/envtest/printer/ginkgo.go create mode 100644 vendor/sigs.k8s.io/controller-runtime/pkg/envtest/server.go rename vendor/{k8s.io/component-base/cli/flag/noop.go => sigs.k8s.io/controller-runtime/pkg/internal/log/log.go} (60%) rename vendor/sigs.k8s.io/controller-runtime/pkg/{runtime => }/log/deleg.go (90%) create mode 100644 vendor/sigs.k8s.io/controller-runtime/pkg/log/log.go rename vendor/sigs.k8s.io/controller-runtime/pkg/{runtime => }/log/null.go (100%) rename vendor/sigs.k8s.io/controller-runtime/pkg/{runtime/log => log/zap}/kube_helpers.go (96%) rename vendor/sigs.k8s.io/controller-runtime/pkg/{runtime/log/log.go => log/zap/zap.go} (64%) rename vendor/sigs.k8s.io/controller-runtime/pkg/{runtime => manager}/signals/doc.go (78%) rename vendor/sigs.k8s.io/controller-runtime/pkg/{runtime => manager}/signals/signal.go (92%) rename vendor/sigs.k8s.io/controller-runtime/pkg/{runtime => manager}/signals/signal_posix.go (100%) rename vendor/sigs.k8s.io/controller-runtime/pkg/{runtime => manager}/signals/signal_windows.go (100%) create mode 100644 vendor/sigs.k8s.io/controller-runtime/pkg/metrics/workqueue.go delete mode 100644 vendor/sigs.k8s.io/controller-runtime/pkg/patch/patch.go create mode 100644 vendor/sigs.k8s.io/controller-runtime/pkg/scheme/scheme.go create mode 100644 vendor/sigs.k8s.io/controller-runtime/pkg/webhook/admission/defaulter.go rename vendor/{k8s.io/code-generator/cmd/client-gen/generators/tags.go => sigs.k8s.io/controller-runtime/pkg/webhook/admission/inject.go} (50%) create mode 100644 vendor/sigs.k8s.io/controller-runtime/pkg/webhook/admission/multi.go delete mode 100644 vendor/sigs.k8s.io/controller-runtime/pkg/webhook/admission/types/types.go create mode 100644 vendor/sigs.k8s.io/controller-runtime/pkg/webhook/admission/validator.go create mode 100644 vendor/sigs.k8s.io/controller-runtime/pkg/webhook/alias.go create mode 100644 vendor/sigs.k8s.io/controller-runtime/pkg/webhook/conversion/conversion.go create mode 100644 vendor/sigs.k8s.io/controller-runtime/pkg/webhook/conversion/decoder.go rename cmd/clusterctl/main.go => vendor/sigs.k8s.io/controller-runtime/pkg/webhook/doc.go (65%) create mode 100644 vendor/sigs.k8s.io/controller-runtime/pkg/webhook/internal/certwatcher/certwatcher.go create mode 100644 vendor/sigs.k8s.io/controller-runtime/pkg/webhook/server.go delete mode 100644 vendor/sigs.k8s.io/controller-runtime/pkg/webhook/types/webhook.go create mode 100644 vendor/sigs.k8s.io/controller-tools/pkg/crd/desc_visitor.go create mode 100644 vendor/sigs.k8s.io/controller-tools/pkg/crd/doc.go create mode 100644 vendor/sigs.k8s.io/controller-tools/pkg/crd/flatten.go create mode 100644 vendor/sigs.k8s.io/controller-tools/pkg/crd/gen.go delete mode 100644 vendor/sigs.k8s.io/controller-tools/pkg/crd/generator/generator.go create mode 100644 vendor/sigs.k8s.io/controller-tools/pkg/crd/known_types.go create mode 100644 vendor/sigs.k8s.io/controller-tools/pkg/crd/markers/crd.go create mode 100644 vendor/sigs.k8s.io/controller-tools/pkg/crd/markers/doc.go create mode 100644 vendor/sigs.k8s.io/controller-tools/pkg/crd/markers/package.go create mode 100644 vendor/sigs.k8s.io/controller-tools/pkg/crd/markers/register.go create mode 100644 vendor/sigs.k8s.io/controller-tools/pkg/crd/markers/validation.go create mode 100644 vendor/sigs.k8s.io/controller-tools/pkg/crd/markers/zz_generated.markerhelp.go create mode 100644 vendor/sigs.k8s.io/controller-tools/pkg/crd/parser.go create mode 100644 vendor/sigs.k8s.io/controller-tools/pkg/crd/schema.go create mode 100644 vendor/sigs.k8s.io/controller-tools/pkg/crd/schema_visitor.go create mode 100644 vendor/sigs.k8s.io/controller-tools/pkg/crd/spec.go delete mode 100644 vendor/sigs.k8s.io/controller-tools/pkg/crd/util/util.go create mode 100644 vendor/sigs.k8s.io/controller-tools/pkg/crd/zz_generated.markerhelp.go rename vendor/{k8s.io/client-go/util/jsonpath => sigs.k8s.io/controller-tools/pkg/deepcopy}/doc.go (58%) create mode 100644 vendor/sigs.k8s.io/controller-tools/pkg/deepcopy/gen.go create mode 100644 vendor/sigs.k8s.io/controller-tools/pkg/deepcopy/traverse.go create mode 100644 vendor/sigs.k8s.io/controller-tools/pkg/deepcopy/zz_generated.markerhelp.go create mode 100644 vendor/sigs.k8s.io/controller-tools/pkg/genall/doc.go create mode 100644 vendor/sigs.k8s.io/controller-tools/pkg/genall/genall.go rename {pkg/apis/openstackproviderconfig/v1alpha1 => vendor/sigs.k8s.io/controller-tools/pkg/genall/help}/doc.go (56%) create mode 100644 vendor/sigs.k8s.io/controller-tools/pkg/genall/help/pretty/doc.go create mode 100644 vendor/sigs.k8s.io/controller-tools/pkg/genall/help/pretty/help.go create mode 100644 vendor/sigs.k8s.io/controller-tools/pkg/genall/help/pretty/print.go create mode 100644 vendor/sigs.k8s.io/controller-tools/pkg/genall/help/pretty/table.go create mode 100644 vendor/sigs.k8s.io/controller-tools/pkg/genall/help/sort.go create mode 100644 vendor/sigs.k8s.io/controller-tools/pkg/genall/help/types.go rename vendor/sigs.k8s.io/{cluster-api/pkg/cmdrunner/cmd_runner.go => controller-tools/pkg/genall/input.go} (50%) create mode 100644 vendor/sigs.k8s.io/controller-tools/pkg/genall/options.go create mode 100644 vendor/sigs.k8s.io/controller-tools/pkg/genall/output.go create mode 100644 vendor/sigs.k8s.io/controller-tools/pkg/genall/zz_generated.markerhelp.go delete mode 100644 vendor/sigs.k8s.io/controller-tools/pkg/internal/codegen/parse/apis.go delete mode 100644 vendor/sigs.k8s.io/controller-tools/pkg/internal/codegen/parse/context.go delete mode 100644 vendor/sigs.k8s.io/controller-tools/pkg/internal/codegen/parse/crd.go delete mode 100644 vendor/sigs.k8s.io/controller-tools/pkg/internal/codegen/parse/index.go delete mode 100644 vendor/sigs.k8s.io/controller-tools/pkg/internal/codegen/parse/parser.go delete mode 100644 vendor/sigs.k8s.io/controller-tools/pkg/internal/codegen/parse/util.go delete mode 100644 vendor/sigs.k8s.io/controller-tools/pkg/internal/codegen/types.go delete mode 100644 vendor/sigs.k8s.io/controller-tools/pkg/internal/general/util.go create mode 100644 vendor/sigs.k8s.io/controller-tools/pkg/loader/doc.go create mode 100644 vendor/sigs.k8s.io/controller-tools/pkg/loader/errors.go create mode 100644 vendor/sigs.k8s.io/controller-tools/pkg/loader/loader.go create mode 100644 vendor/sigs.k8s.io/controller-tools/pkg/loader/paths.go create mode 100644 vendor/sigs.k8s.io/controller-tools/pkg/loader/refs.go create mode 100644 vendor/sigs.k8s.io/controller-tools/pkg/loader/visit.go create mode 100644 vendor/sigs.k8s.io/controller-tools/pkg/markers/collect.go create mode 100644 vendor/sigs.k8s.io/controller-tools/pkg/markers/doc.go create mode 100644 vendor/sigs.k8s.io/controller-tools/pkg/markers/help.go create mode 100644 vendor/sigs.k8s.io/controller-tools/pkg/markers/parse.go create mode 100644 vendor/sigs.k8s.io/controller-tools/pkg/markers/reg.go rename vendor/sigs.k8s.io/{controller-runtime/pkg/patch/doc.go => controller-tools/pkg/markers/regutil.go} (54%) create mode 100644 vendor/sigs.k8s.io/controller-tools/pkg/markers/zip.go delete mode 100644 vendor/sigs.k8s.io/controller-tools/pkg/rbac/manifests.go create mode 100644 vendor/sigs.k8s.io/controller-tools/pkg/rbac/zz_generated.markerhelp.go delete mode 100644 vendor/sigs.k8s.io/controller-tools/pkg/util/util.go delete mode 100644 vendor/sigs.k8s.io/controller-tools/pkg/webhook/admission.go delete mode 100644 vendor/sigs.k8s.io/controller-tools/pkg/webhook/generator.go delete mode 100644 vendor/sigs.k8s.io/controller-tools/pkg/webhook/manifests.go delete mode 100644 vendor/sigs.k8s.io/controller-tools/pkg/webhook/types.go delete mode 100644 vendor/sigs.k8s.io/controller-tools/pkg/webhook/writer.go create mode 100644 vendor/sigs.k8s.io/controller-tools/pkg/webhook/zz_generated.markerhelp.go diff --git a/.gitignore b/.gitignore index ebb5c0cb68..f08c41c8e5 100644 --- a/.gitignore +++ b/.gitignore @@ -1,9 +1,32 @@ +# Binaries for programs and plugins +*.exe +*.exe~ +*.dll +*.so +*.dylib +bin + +# Test binary, build with `go test -c` +*.test + +# Output of the go coverage tool, specifically when used with LiteIDE +*.out + +# Kubernetes Generated files - skip generated files, except for vendored files + +!vendor/**/zz_generated.* + +# editor and IDE paraphernalia +.idea +*.swp +*.swo +*~ + + *.json *.sublime-project *.sublime-workspace -*.swp -.idea .DS_Store # OSX leaves these everywhere on SMB shares diff --git a/Dockerfile b/Dockerfile new file mode 100644 index 0000000000..c82d66cdb3 --- /dev/null +++ b/Dockerfile @@ -0,0 +1,71 @@ +# Copyright 2019 The Kubernetes Authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# Build the manager binary +#FROM golang:1.12.7 as builder +# +## Copy in the go src +#WORKDIR ${GOPATH}/src/sigs.k8s.io/cluster-api-provider-openstack +#COPY pkg/ pkg/ +#COPY cmd/ cmd/ +#COPY vendor/ vendor/ +#COPY api/ api/ +#COPY controllers/ controllers/ +#COPY main.go main.go +#COPY go.mod go.mod +#COPY go.sum go.sum +# +## Build +#RUN CGO_ENABLED=0 GOOS=linux GO111MODULE=on GOFLAGS="-mod=vendor" \ +# go build -a -ldflags '-extldflags "-static"' \ +# -o manager sigs.k8s.io/cluster-api-provider-openstack +# +## Copy the controller-manager into a thin image +#FROM gcr.io/distroless/static:latest +#WORKDIR / +#COPY --from=builder /go/src/sigs.k8s.io/cluster-api-provider-openstack/manager . +#USER nobody +#ENTRYPOINT ["/manager"] + +# Build the manager binary +FROM golang:1.12.7 + +# default the go proxy +ARG goproxy=https://proxy.golang.org + +# run this with docker build --build_arg $(go env GOPROXY) to override the goproxy +ENV GOPROXY=$goproxy + +WORKDIR /workspace +COPY go.mod go.mod +COPY go.sum go.sum +# cache deps before building and copying source so that we don't need to re-download as much +# and so that source changes don't invalidate our downloaded layer +RUN go mod download + +# Copy the go source +COPY main.go main.go +COPY api/ api/ +COPY controllers/ controllers/ +COPY pkg/ pkg/ + +# Allow containerd to restart pods by calling /restart.sh (mostly for tilt + fast dev cycles) +# TODO: Remove this on prod and use a multi-stage build +COPY third_party/forked/rerun-process-wrapper/start.sh . +COPY third_party/forked/rerun-process-wrapper/restart.sh . + +# Build and run +RUN go install -v . +RUN mv /go/bin/cluster-api-provider-openstack /manager +ENTRYPOINT ["./start.sh", "/manager"] diff --git a/Makefile b/Makefile index 3a2a0759a9..2e9abcd877 100644 --- a/Makefile +++ b/Makefile @@ -1,4 +1,13 @@ + +# Allow overriding manifest generation destination directory +MANIFEST_ROOT ?= "config" +CRD_ROOT ?= "$(MANIFEST_ROOT)/crd/bases" +WEBHOOK_ROOT ?= "$(MANIFEST_ROOT)/webhook" +RBAC_ROOT ?= "$(MANIFEST_ROOT)/rbac" + + + GIT_HOST = sigs.k8s.io PWD := $(shell pwd) BASE_DIR := $(shell basename $(PWD)) @@ -12,7 +21,7 @@ GOX_PARALLEL ?= 3 TARGETS ?= darwin/amd64 linux/amd64 linux/386 linux/arm linux/arm64 linux/ppc64le DIST_DIRS = find * -type d -exec -GENERATE_YAML_PATH=cmd/clusterctl/examples/openstack +GENERATE_YAML_PATH=samples GENERATE_YAML_EXEC=generate-yaml.sh GENERATE_YAML_TEST_FOLDER=dummy-make-auto-test @@ -24,6 +33,13 @@ TAGS := LDFLAGS := "-w -s -X 'main.version=${VERSION}'" REGISTRY ?= k8scloudprovider +MANAGER_IMAGE_NAME ?= cluster-api-provider-openstack +MANAGER_IMAGE_TAG ?= dev +PULL_POLICY ?= Always + +# Used in docker-* targets. +MANAGER_IMAGE ?= $(REGISTRY)/$(MANAGER_IMAGE_NAME):$(MANAGER_IMAGE_TAG) + .PHONY: vendor vendor: ## Runs go mod to ensure proper vendoring. ./hack/update-vendor.sh @@ -115,20 +131,12 @@ realclean: clean shell: $(SHELL) -i -# Generate code -generate: manifests - go generate ./pkg/... ./cmd/... - -manifests: - go run vendor/sigs.k8s.io/controller-tools/cmd/controller-gen/main.go crd - -images: openstack-cluster-api-controller clusterctl-image +images: docker-build -openstack-cluster-api-controller: generate manifests - docker build . -f cmd/manager/Dockerfile --network=host -t "$(REGISTRY)/openstack-cluster-api-controller:$(VERSION)" - -clusterctl-image: generate manifests - docker build . -f cmd/clusterctl/Dockerfile --network=host -t "$(REGISTRY)/openstack-cluster-api-clusterctl:$(VERSION)" +# Build the docker image +.PHONY: docker-build +docker-build: + docker build . -t ${MANAGER_IMAGE} upload-images: images @echo "push images to $(REGISTRY)" @@ -157,5 +165,32 @@ dist: build-cross $(DIST_DIRS) zip -r cluster-api-provider-openstack-$(VERSION)-{}.zip {} \; \ ) +# Generate code +.PHONY: generate +generate: + $(MAKE) generate-manifests +#TODO(sbueringer) will work after we migrated to kubeadm (because there are problems generating structs with kubeadm structs embedded) +# $(MAKE) generate-kubebuilder-code + +# Generate manifests e.g. CRD, RBAC etc. +.PHONY: generate-manifests +#generate-manifests: $(CONTROLLER_GEN) +# go run vendor/sigs.k8s.io/controller-tools/cmd/controller-gen/main.go \ +# paths=./api/... \ +# crd:trivialVersions=true \ +# output:crd:dir=$(CRD_ROOT) \ +# output:webhook:dir=$(WEBHOOK_ROOT) \ +# webhook +# go run vendor/sigs.k8s.io/controller-tools/cmd/controller-gen/main.go \ +# paths=./controllers/... \ +# output:rbac:dir=$(RBAC_ROOT) \ +# rbac:roleName=manager-role + +.PHONY: generate-kubebuilder-code +generate-kubebuilder-code: ## Runs controller-gen + go run vendor/sigs.k8s.io/controller-tools/cmd/controller-gen/main.go \ + paths=./api/... \ + object:headerFile=./hack/boilerplate/boilerplate.generatego.txt + .PHONY: build clean cover vendor docs fmt functional lint realclean \ relnotes test translation version build-cross dist manifests diff --git a/PROJECT b/PROJECT index 61a5cd296b..7cfe30b9ba 100644 --- a/PROJECT +++ b/PROJECT @@ -1,3 +1,10 @@ -version: "1" -domain: k8s.io +version: "2" +domain: cluster.x-k8s.io repo: sigs.k8s.io/cluster-api-provider-openstack +resources: +- group: infrastructure + version: v1alpha2 + kind: OpenStackCluster +- group: infrastructure + version: v1alpha2 + kind: OpenStackMachine diff --git a/README.md b/README.md index 9dc99fd416..e4bc68daca 100644 --- a/README.md +++ b/README.md @@ -72,7 +72,7 @@ policy may be made to more closely align with other providers in the Cluster API ## Getting Started ### Notice -Currenlty `cluster-api-provider-openstack` project is evolving into `cluster-api v1alpha2`, please use `release-0.1` branch for `cluster-api v1alpha1` development as it provides function workable code and configurations. +Currently `cluster-api-provider-openstack` project is evolving into `cluster-api v1alpha2`, please use `release-0.1` branch for `cluster-api v1alpha1` development as it provides function workable code and configurations. For more information, please refer to [v1alpha2](https://github.com/kubernetes-sigs/cluster-api-provider-openstack/issues/380) diff --git a/api/v1alpha2/groupversion_info.go b/api/v1alpha2/groupversion_info.go new file mode 100644 index 0000000000..524a6cefb3 --- /dev/null +++ b/api/v1alpha2/groupversion_info.go @@ -0,0 +1,36 @@ +/* +Copyright 2019 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Package v1alpha2 contains API Schema definitions for the infrastructure v1alpha2 API group +// +kubebuilder:object:generate=true +// +groupName=infrastructure.cluster.x-k8s.io +package v1alpha2 + +import ( + "k8s.io/apimachinery/pkg/runtime/schema" + "sigs.k8s.io/controller-runtime/pkg/scheme" +) + +var ( + // GroupVersion is group version used to register these objects + GroupVersion = schema.GroupVersion{Group: "infrastructure.cluster.x-k8s.io", Version: "v1alpha2"} + + // SchemeBuilder is used to add go types to the GroupVersionKind scheme + SchemeBuilder = &scheme.Builder{GroupVersion: GroupVersion} + + // AddToScheme adds the types in this group-version to the given scheme. + AddToScheme = SchemeBuilder.AddToScheme +) diff --git a/api/v1alpha2/openstackcluster_types.go b/api/v1alpha2/openstackcluster_types.go new file mode 100644 index 0000000000..843b0c4c78 --- /dev/null +++ b/api/v1alpha2/openstackcluster_types.go @@ -0,0 +1,154 @@ +/* + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1alpha2 + +import ( + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + + kubeadmv1beta1 "k8s.io/kubernetes/cmd/kubeadm/app/apis/kubeadm/v1beta1" +) + +// EDIT THIS FILE! THIS IS SCAFFOLDING FOR YOU TO OWN! +// NOTE: json tags are required. Any new fields you add must have json tags for the fields to be serialized. + +// OpenStackClusterSpec defines the desired state of OpenStackCluster +type OpenStackClusterSpec struct { + + // The name of the secret containing the openstack credentials + // +optional + CloudsSecret *corev1.SecretReference `json:"cloudsSecret"` + + // The name of the cloud to use from the clouds secret + // +optional + CloudName string `json:"cloudName"` + + // NodeCIDR is the OpenStack Subnet to be created. Cluster actuator will create a + // network, a subnet with NodeCIDR, and a router connected to this subnet. + // If you leave this empty, no network will be created. + NodeCIDR string `json:"nodeCidr,omitempty"` + // DNSNameservers is the list of nameservers for OpenStack Subnet being created. + DNSNameservers []string `json:"dnsNameservers,omitempty"` + // ExternalRouterIPs is an array of externalIPs on the respective subnets. + // This is necessary if the router needs a fixed ip in a specific subnet. + ExternalRouterIPs []ExternalRouterIPParam `json:"externalRouterIPs,omitempty"` + // ExternalNetworkID is the ID of an external OpenStack Network. This is necessary + // to get public internet to the VMs. + ExternalNetworkID string `json:"externalNetworkId,omitempty"` + + // UseOctavia is weather LoadBalancer Service is Octavia or not + // +optional + UseOctavia bool `json:"useOctavia, omitempty"` + + // ManagedAPIServerLoadBalancer defines whether a LoadBalancer for the + // APIServer should be created. If set to true the following properties are + // mandatory: APIServerLoadBalancerFloatingIP, APIServerLoadBalancerPort + // +optional + ManagedAPIServerLoadBalancer bool `json:"managedAPIServerLoadBalancer"` + + // APIServerLoadBalancerFloatingIP is the floatingIP which will be associated + // to the APIServer loadbalancer. The floatingIP will be created if it not + // already exists. + APIServerLoadBalancerFloatingIP string `json:"apiServerLoadBalancerFloatingIP,omitempty"` + + // APIServerLoadBalancerPort is the port on which the listener on the APIServer + // loadbalancer will be created + APIServerLoadBalancerPort int `json:"apiServerLoadBalancerPort,omitempty"` + + // APIServerLoadBalancerAdditionalPorts adds additional ports to the APIServerLoadBalancer + APIServerLoadBalancerAdditionalPorts []int `json:"apiServerLoadBalancerAdditionalPorts,omitempty"` + + // ManagedSecurityGroups defines that kubernetes manages the OpenStack security groups + // for now, that means that we'll create two security groups, one allowing SSH + // and API access from everywhere, and another one that allows all traffic to/from + // machines belonging to that group. In the future, we could make this more flexible. + // +optional + ManagedSecurityGroups bool `json:"managedSecurityGroups"` + + // DisablePortSecurity disables the port security of the network created for the + // Kubernetes cluster, which also disables SecurityGroups + DisablePortSecurity bool `json:"disablePortSecurity,omitempty"` + + // Tags for all resources in cluster + Tags []string `json:"tags,omitempty"` + + // Default: True. In case of server tag errors, set to False + DisableServerTags bool `json:"disableServerTags,omitempty"` + + // CAKeyPair is the key pair for ca certs. + CAKeyPair KeyPair `json:"caKeyPair,omitempty"` + + //EtcdCAKeyPair is the key pair for etcd. + EtcdCAKeyPair KeyPair `json:"etcdCAKeyPair,omitempty"` + + // FrontProxyCAKeyPair is the key pair for FrontProxyKeyPair. + FrontProxyCAKeyPair KeyPair `json:"frontProxyCAKeyPair,omitempty"` + + // SAKeyPair is the service account key pair. + SAKeyPair KeyPair `json:"saKeyPair,omitempty"` + + // ClusterConfiguration holds the cluster-wide information used during a + // kubeadm init call. + ClusterConfiguration kubeadmv1beta1.ClusterConfiguration `json:"clusterConfiguration,omitempty"` +} + +// OpenStackClusterStatus defines the observed state of OpenStackCluster +type OpenStackClusterStatus struct { + Ready bool `json:"ready"` + // APIEndpoints represents the endpoints to communicate with the control plane. + // +optional + APIEndpoints []APIEndpoint `json:"apiEndpoints,omitempty"` + + // Network contains all information about the created OpenStack Network. + // It includes Subnets and Router. + Network *Network `json:"network,omitempty"` + + // ControlPlaneSecurityGroups contains all the information about the OpenStack + // Security Group that needs to be applied to control plane nodes. + // TODO: Maybe instead of two properties, we add a property to the group? + ControlPlaneSecurityGroup *SecurityGroup `json:"controlPlaneSecurityGroup,omitempty"` + + // GlobalSecurityGroup contains all the information about the OpenStack Security + // Group that needs to be applied to all nodes, both control plane and worker nodes. + GlobalSecurityGroup *SecurityGroup `json:"globalSecurityGroup,omitempty"` +} + +// +kubebuilder:object:root=true +// +kubebuilder:resource:path=openstackclusters,scope=Namespaced +// +kubebuilder:storageversion +// +kubebuilder:subresource:status + +// OpenStackCluster is the Schema for the openstackclusters API +type OpenStackCluster struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + + Spec OpenStackClusterSpec `json:"spec,omitempty"` + Status OpenStackClusterStatus `json:"status,omitempty"` +} + +// +kubebuilder:object:root=true + +// OpenStackClusterList contains a list of OpenStackCluster +type OpenStackClusterList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty"` + Items []OpenStackCluster `json:"items"` +} + +func init() { + SchemeBuilder.Register(&OpenStackCluster{}, &OpenStackClusterList{}) +} diff --git a/api/v1alpha2/openstackmachine_types.go b/api/v1alpha2/openstackmachine_types.go new file mode 100644 index 0000000000..793b5c4924 --- /dev/null +++ b/api/v1alpha2/openstackmachine_types.go @@ -0,0 +1,155 @@ +/* + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1alpha2 + +import ( + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "sigs.k8s.io/cluster-api/pkg/errors" +) + +const ( + // MachineFinalizer allows ReconcileOpenStackMachine to clean up OpenStack resources associated with OpenStackMachine before + // removing it from the apiserver. + MachineFinalizer = "openstackmachine.infrastructure.cluster.x-k8s.io" +) + +// EDIT THIS FILE! THIS IS SCAFFOLDING FOR YOU TO OWN! +// NOTE: json tags are required. Any new fields you add must have json tags for the fields to be serialized. + +// OpenStackMachineSpec defines the desired state of OpenStackMachine +type OpenStackMachineSpec struct { + + // ProviderID is the unique identifier as specified by the cloud provider. + ProviderID *string `json:"providerID,omitempty"` + + // The name of the secret containing the openstack credentials + // +optional + CloudsSecret *corev1.SecretReference `json:"cloudsSecret"` + + // The name of the cloud to use from the clouds secret + // +optional + CloudName string `json:"cloudName"` + + // The flavor reference for the flavor for your server instance. + Flavor string `json:"flavor"` + + // The name of the image to use for your server instance. + // If the RootVolume is specified, this will be ignored and use rootVolume directly. + Image string `json:"image"` + + // The ssh key to inject in the instance + KeyName string `json:"keyName,omitempty"` + + // A networks object. Required parameter when there are multiple networks defined for the tenant. + // When you do not specify the networks parameter, the server attaches to the only network created for the current tenant. + Networks []NetworkParam `json:"networks,omitempty"` + // The floatingIP which will be associated to the machine, only used for master. + // The floatingIP should have been created and haven't been associated. + FloatingIP string `json:"floatingIP,omitempty"` + + // The availability zone from which to launch the server. + AvailabilityZone string `json:"availabilityZone,omitempty"` + + // The names of the security groups to assign to the instance + SecurityGroups []SecurityGroupParam `json:"securityGroups,omitempty"` + + // The name of the secret containing the user data (startup script in most cases) + UserDataSecret *corev1.SecretReference `json:"userDataSecret,omitempty"` + + // Whether the server instance is created on a trunk port or not. + Trunk bool `json:"trunk,omitempty"` + + // Machine tags + // Requires Nova api 2.52 minimum! + Tags []string `json:"tags,omitempty"` + + // Metadata mapping. Allows you to create a map of key value pairs to add to the server instance. + ServerMetadata map[string]string `json:"serverMetadata,omitempty"` + + // Config Drive support + ConfigDrive *bool `json:"configDrive,omitempty"` + + // The volume metadata to boot from + RootVolume *RootVolume `json:"rootVolume,omitempty"` + + // KubeadmConfiguration holds the kubeadm configuration options + // +optional + KubeadmConfiguration KubeadmConfiguration `json:"kubeadmConfiguration,omitempty"` +} + +// OpenStackMachineStatus defines the observed state of OpenStackMachine +type OpenStackMachineStatus struct { + + // Ready is true when the provider resource is ready. + // +optional + Ready bool `json:"ready"` + + // Addresses contains the OpenStack instance associated addresses. + Addresses []corev1.NodeAddress `json:"addresses,omitempty"` + + // InstanceState is the state of the OpenStack instance for this machine. + // +optional + InstanceState *InstanceState `json:"instanceState,omitempty"` + + ErrorReason *errors.MachineStatusError `json:"errorReason,omitempty"` + + // ErrorMessage will be set in the event that there is a terminal problem + // reconciling the Machine and will contain a more verbose string suitable + // for logging and human consumption. + // + // This field should not be set for transitive errors that a controller + // faces that are expected to be fixed automatically over + // time (like service outages), but instead indicate that something is + // fundamentally wrong with the Machine's spec or the configuration of + // the controller, and that manual intervention is required. Examples + // of terminal errors would be invalid combinations of settings in the + // spec, values that are unsupported by the controller, or the + // responsible controller itself being critically misconfigured. + // + // Any transient errors that occur during the reconciliation of Machines + // can be added as events to the Machine object and/or logged in the + // controller's output. + // +optional + ErrorMessage *string `json:"errorMessage,omitempty"` +} + +// +kubebuilder:object:root=true +// +kubebuilder:resource:path=openstackmachines,scope=Namespaced +// +kubebuilder:storageversion +// +kubebuilder:subresource:status + +// OpenStackMachine is the Schema for the openstackmachines API +type OpenStackMachine struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + + Spec OpenStackMachineSpec `json:"spec,omitempty"` + Status OpenStackMachineStatus `json:"status,omitempty"` +} + +// +kubebuilder:object:root=true + +// OpenStackMachineList contains a list of OpenStackMachine +type OpenStackMachineList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty"` + Items []OpenStackMachine `json:"items"` +} + +func init() { + SchemeBuilder.Register(&OpenStackMachine{}, &OpenStackMachineList{}) +} diff --git a/api/v1alpha2/types.go b/api/v1alpha2/types.go new file mode 100644 index 0000000000..1f5947b4be --- /dev/null +++ b/api/v1alpha2/types.go @@ -0,0 +1,222 @@ +package v1alpha2 + +import ( + kubeadmv1beta1 "k8s.io/kubernetes/cmd/kubeadm/app/apis/kubeadm/v1beta1" +) + +type ExternalRouterIPParam struct { + // The FixedIP in the corresponding subnet + FixedIP string `json:"fixedIP,omitempty"` + // The subnet in which the FixedIP is used for the Gateway of this router + Subnet SubnetParam `json:"subnet"` +} + +// KubeadmConfiguration holds the various configurations that kubeadm uses +type KubeadmConfiguration struct { + // JoinConfiguration is used to customize any kubeadm join configuration + // parameters. + Join kubeadmv1beta1.JoinConfiguration `json:"join,omitempty"` + + // InitConfiguration is used to customize any kubeadm init configuration + // parameters. + Init kubeadmv1beta1.InitConfiguration `json:"init,omitempty"` +} + +type SecurityGroupParam struct { + // Security Group UID + UUID string `json:"uuid,omitempty"` + // Security Group name + Name string `json:"name,omitempty"` + // Filters used to query security groups in openstack + Filter SecurityGroupFilter `json:"filter,omitempty"` +} + +type SecurityGroupFilter struct { + ID string `json:"id,omitempty"` + Name string `json:"name,omitempty"` + Description string `json:"description,omitempty"` + TenantID string `json:"tenantId,omitempty"` + ProjectID string `json:"projectId,omitempty"` + Limit int `json:"limit,omitempty"` + Marker string `json:"marker,omitempty"` + SortKey string `json:"sortKey,omitempty"` + SortDir string `json:"sortDir,omitempty"` + Tags string `json:"tags,omitempty"` + TagsAny string `json:"tagsAny,omitempty"` + NotTags string `json:"notTags,omitempty"` + NotTagsAny string `json:"notTagsAny,omitempty"` +} + +type NetworkParam struct { + // The UUID of the network. Required if you omit the port attribute. + UUID string `json:"uuid,omitempty"` + // A fixed IPv4 address for the NIC. + FixedIp string `json:"fixedIp,omitempty"` + // Filters for optional network query + Filter Filter `json:"filter,omitempty"` + // Subnet within a network to use + Subnets []SubnetParam `json:"subnets,omitempty"` +} + +type Filter struct { + Status string `json:"status,omitempty"` + Name string `json:"name,omitempty"` + Description string `json:"description,omitempty"` + AdminStateUp *bool `json:"adminStateUp,omitempty"` + TenantID string `json:"tenantId,omitempty"` + ProjectID string `json:"projectId,omitempty"` + Shared *bool `json:"shared,omitempty"` + ID string `json:"id,omitempty"` + Marker string `json:"marker,omitempty"` + Limit int `json:"limit,omitempty"` + SortKey string `json:"sortKey,omitempty"` + SortDir string `json:"sortDir,omitempty"` + Tags string `json:"tags,omitempty"` + TagsAny string `json:"tagsAny,omitempty"` + NotTags string `json:"notTags,omitempty"` + NotTagsAny string `json:"notTagsAny,omitempty"` +} + +type SubnetParam struct { + // The UUID of the network. Required if you omit the port attribute. + UUID string `json:"uuid,omitempty"` + + // Filters for optional network query + Filter SubnetFilter `json:"filter,omitempty"` +} + +type SubnetFilter struct { + Name string `json:"name,omitempty"` + Description string `json:"description,omitempty"` + EnableDHCP *bool `json:"enableDhcp,omitempty"` + NetworkID string `json:"networkId,omitempty"` + TenantID string `json:"tenantId,omitempty"` + ProjectID string `json:"projectId,omitempty"` + IPVersion int `json:"ipVersion,omitempty"` + GatewayIP string `json:"gateway_ip,omitempty"` + CIDR string `json:"cidr,omitempty"` + IPv6AddressMode string `json:"ipv6AddressMode,omitempty"` + IPv6RAMode string `json:"ipv6RaMode,omitempty"` + ID string `json:"id,omitempty"` + SubnetPoolID string `json:"subnetpoolId,omitempty"` + Limit int `json:"limit,omitempty"` + Marker string `json:"marker,omitempty"` + SortKey string `json:"sortKey,omitempty"` + SortDir string `json:"sortDir,omitempty"` + Tags string `json:"tags,omitempty"` + TagsAny string `json:"tagsAny,omitempty"` + NotTags string `json:"notTags,omitempty"` + NotTagsAny string `json:"notTagsAny,omitempty"` +} + +// APIEndpoint represents a reachable Kubernetes API endpoint. +type APIEndpoint struct { + // The hostname on which the API server is serving. + Host string `json:"host"` + + // The port on which the API server is serving. + Port int `json:"port"` +} + +type RootVolume struct { + SourceType string `json:"sourceType,omitempty"` + SourceUUID string `json:"sourceUUID,omitempty"` + DeviceType string `json:"deviceType,omitempty"` + Size int `json:"diskSize,omitempty"` +} + +// KeyPair is how operators can supply custom keypairs for kubeadm to use. +type KeyPair struct { + // base64 encoded cert and key + Cert []byte `json:"cert,omitempty"` + Key []byte `json:"key,omitempty"` +} + +// HasCertAndKey returns whether a keypair contains cert and key of non-zero length. +func (kp *KeyPair) HasCertAndKey() bool { + return len(kp.Cert) != 0 && len(kp.Key) != 0 +} + +// Network represents basic information about the associated OpenStach Neutron Network +type Network struct { + Name string `json:"name"` + ID string `json:"id"` + + Subnet *Subnet `json:"subnet,omitempty"` + Router *Router `json:"router,omitempty"` + + // Be careful when using APIServerLoadBalancer, because this field is optional and therefore not + // set in all cases + APIServerLoadBalancer *LoadBalancer `json:"apiServerLoadBalancer,omitempty"` +} + +// Subnet represents basic information about the associated OpenStack Neutron Subnet +type Subnet struct { + Name string `json:"name"` + ID string `json:"id"` + + CIDR string `json:"cidr"` +} + +// Router represents basic information about the associated OpenStack Neutron Router +type Router struct { + Name string `json:"name"` + ID string `json:"id"` +} + +// LoadBalancer represents basic information about the associated OpenStack LoadBalancer +type LoadBalancer struct { + Name string `json:"name"` + ID string `json:"id"` + IP string `json:"ip"` + InternalIP string `json:"internalIP"` +} + +// SecurityGroup represents the basic information of the associated +// OpenStack Neutron Security Group. +type SecurityGroup struct { + Name string `json:"name"` + ID string `json:"id"` + Rules []SecurityGroupRule `json:"rules"` +} + +// SecurityGroupRule represent the basic information of the associated OpenStack +// Security Group Role. +type SecurityGroupRule struct { + ID string `json:"name"` + Direction string `json:"direction"` + EtherType string `json:"etherType"` + SecurityGroupID string `json:"securityGroupID"` + PortRangeMin int `json:"portRangeMin"` + PortRangeMax int `json:"portRangeMax"` + Protocol string `json:"protocol"` + RemoteGroupID string `json:"remoteGroupID"` + RemoteIPPrefix string `json:"remoteIPPrefix"` +} + +// Equal checks if two SecurityGroupRules are the same. +func (r SecurityGroupRule) Equal(x SecurityGroupRule) bool { + return (r.Direction == x.Direction && + r.EtherType == x.EtherType && + r.PortRangeMin == x.PortRangeMin && + r.PortRangeMax == x.PortRangeMax && + r.Protocol == x.Protocol && + r.RemoteGroupID == x.RemoteGroupID && + r.RemoteIPPrefix == x.RemoteIPPrefix) + +} + +// InstanceState describes the state of an OpenStack instance. +type InstanceState string + +var ( + InstanceStateBuilding = InstanceState("BUILDING") + + InstanceStateActive = InstanceState("ACTIVE") + + InstanceStateError = InstanceState("ERROR") + + InstanceStateStopped = InstanceState("STOPPED") + + InstanceStateShutoff = InstanceState("SHUTOFF") +) diff --git a/pkg/apis/openstackproviderconfig/v1alpha1/zz_generated.deepcopy.go b/api/v1alpha2/zz_generated.deepcopy.go similarity index 73% rename from pkg/apis/openstackproviderconfig/v1alpha1/zz_generated.deepcopy.go rename to api/v1alpha2/zz_generated.deepcopy.go index df3c9eac61..e9e1f3c373 100644 --- a/pkg/apis/openstackproviderconfig/v1alpha1/zz_generated.deepcopy.go +++ b/api/v1alpha2/zz_generated.deepcopy.go @@ -16,20 +16,35 @@ See the License for the specific language governing permissions and limitations under the License. */ -// Code generated by main. DO NOT EDIT. +// Code generated by controller-gen. DO NOT EDIT. -package v1alpha1 +package v1alpha2 import ( - v1 "k8s.io/api/core/v1" + "k8s.io/api/core/v1" runtime "k8s.io/apimachinery/pkg/runtime" + "sigs.k8s.io/cluster-api/pkg/errors" ) +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *APIEndpoint) DeepCopyInto(out *APIEndpoint) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new APIEndpoint. +func (in *APIEndpoint) DeepCopy() *APIEndpoint { + if in == nil { + return nil + } + out := new(APIEndpoint) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *ExternalRouterIPParam) DeepCopyInto(out *ExternalRouterIPParam) { *out = *in in.Subnet.DeepCopyInto(&out.Subnet) - return } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ExternalRouterIPParam. @@ -55,7 +70,6 @@ func (in *Filter) DeepCopyInto(out *Filter) { *out = new(bool) **out = **in } - return } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Filter. @@ -81,7 +95,6 @@ func (in *KeyPair) DeepCopyInto(out *KeyPair) { *out = make([]byte, len(*in)) copy(*out, *in) } - return } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new KeyPair. @@ -99,7 +112,6 @@ func (in *KubeadmConfiguration) DeepCopyInto(out *KubeadmConfiguration) { *out = *in in.Join.DeepCopyInto(&out.Join) in.Init.DeepCopyInto(&out.Init) - return } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new KubeadmConfiguration. @@ -115,7 +127,6 @@ func (in *KubeadmConfiguration) DeepCopy() *KubeadmConfiguration { // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *LoadBalancer) DeepCopyInto(out *LoadBalancer) { *out = *in - return } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LoadBalancer. @@ -146,7 +157,6 @@ func (in *Network) DeepCopyInto(out *Network) { *out = new(LoadBalancer) **out = **in } - return } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Network. @@ -170,7 +180,6 @@ func (in *NetworkParam) DeepCopyInto(out *NetworkParam) { (*in)[i].DeepCopyInto(&(*out)[i]) } } - return } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NetworkParam. @@ -184,10 +193,67 @@ func (in *NetworkParam) DeepCopy() *NetworkParam { } // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *OpenstackClusterProviderSpec) DeepCopyInto(out *OpenstackClusterProviderSpec) { +func (in *OpenStackCluster) DeepCopyInto(out *OpenStackCluster) { *out = *in out.TypeMeta = in.TypeMeta in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OpenStackCluster. +func (in *OpenStackCluster) DeepCopy() *OpenStackCluster { + if in == nil { + return nil + } + out := new(OpenStackCluster) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *OpenStackCluster) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *OpenStackClusterList) DeepCopyInto(out *OpenStackClusterList) { + *out = *in + out.TypeMeta = in.TypeMeta + out.ListMeta = in.ListMeta + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]OpenStackCluster, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OpenStackClusterList. +func (in *OpenStackClusterList) DeepCopy() *OpenStackClusterList { + if in == nil { + return nil + } + out := new(OpenStackClusterList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *OpenStackClusterList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *OpenStackClusterSpec) DeepCopyInto(out *OpenStackClusterSpec) { + *out = *in if in.CloudsSecret != nil { in, out := &in.CloudsSecret, &out.CloudsSecret *out = new(v1.SecretReference) @@ -220,32 +286,26 @@ func (in *OpenstackClusterProviderSpec) DeepCopyInto(out *OpenstackClusterProvid in.FrontProxyCAKeyPair.DeepCopyInto(&out.FrontProxyCAKeyPair) in.SAKeyPair.DeepCopyInto(&out.SAKeyPair) in.ClusterConfiguration.DeepCopyInto(&out.ClusterConfiguration) - return } -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OpenstackClusterProviderSpec. -func (in *OpenstackClusterProviderSpec) DeepCopy() *OpenstackClusterProviderSpec { +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OpenStackClusterSpec. +func (in *OpenStackClusterSpec) DeepCopy() *OpenStackClusterSpec { if in == nil { return nil } - out := new(OpenstackClusterProviderSpec) + out := new(OpenStackClusterSpec) in.DeepCopyInto(out) return out } -// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. -func (in *OpenstackClusterProviderSpec) DeepCopyObject() runtime.Object { - if c := in.DeepCopy(); c != nil { - return c - } - return nil -} - // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *OpenstackClusterProviderStatus) DeepCopyInto(out *OpenstackClusterProviderStatus) { +func (in *OpenStackClusterStatus) DeepCopyInto(out *OpenStackClusterStatus) { *out = *in - out.TypeMeta = in.TypeMeta - in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + if in.APIEndpoints != nil { + in, out := &in.APIEndpoints, &out.APIEndpoints + *out = make([]APIEndpoint, len(*in)) + copy(*out, *in) + } if in.Network != nil { in, out := &in.Network, &out.Network *out = new(Network) @@ -261,21 +321,39 @@ func (in *OpenstackClusterProviderStatus) DeepCopyInto(out *OpenstackClusterProv *out = new(SecurityGroup) (*in).DeepCopyInto(*out) } - return } -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OpenstackClusterProviderStatus. -func (in *OpenstackClusterProviderStatus) DeepCopy() *OpenstackClusterProviderStatus { +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OpenStackClusterStatus. +func (in *OpenStackClusterStatus) DeepCopy() *OpenStackClusterStatus { + if in == nil { + return nil + } + out := new(OpenStackClusterStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *OpenStackMachine) DeepCopyInto(out *OpenStackMachine) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OpenStackMachine. +func (in *OpenStackMachine) DeepCopy() *OpenStackMachine { if in == nil { return nil } - out := new(OpenstackClusterProviderStatus) + out := new(OpenStackMachine) in.DeepCopyInto(out) return out } // DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. -func (in *OpenstackClusterProviderStatus) DeepCopyObject() runtime.Object { +func (in *OpenStackMachine) DeepCopyObject() runtime.Object { if c := in.DeepCopy(); c != nil { return c } @@ -283,25 +361,31 @@ func (in *OpenstackClusterProviderStatus) DeepCopyObject() runtime.Object { } // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *OpenstackMachineProviderStatus) DeepCopyInto(out *OpenstackMachineProviderStatus) { +func (in *OpenStackMachineList) DeepCopyInto(out *OpenStackMachineList) { *out = *in out.TypeMeta = in.TypeMeta - in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) - return + out.ListMeta = in.ListMeta + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]OpenStackMachine, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } } -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OpenstackMachineProviderStatus. -func (in *OpenstackMachineProviderStatus) DeepCopy() *OpenstackMachineProviderStatus { +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OpenStackMachineList. +func (in *OpenStackMachineList) DeepCopy() *OpenStackMachineList { if in == nil { return nil } - out := new(OpenstackMachineProviderStatus) + out := new(OpenStackMachineList) in.DeepCopyInto(out) return out } // DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. -func (in *OpenstackMachineProviderStatus) DeepCopyObject() runtime.Object { +func (in *OpenStackMachineList) DeepCopyObject() runtime.Object { if c := in.DeepCopy(); c != nil { return c } @@ -309,10 +393,13 @@ func (in *OpenstackMachineProviderStatus) DeepCopyObject() runtime.Object { } // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *OpenstackProviderSpec) DeepCopyInto(out *OpenstackProviderSpec) { +func (in *OpenStackMachineSpec) DeepCopyInto(out *OpenStackMachineSpec) { *out = *in - out.TypeMeta = in.TypeMeta - in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + if in.ProviderID != nil { + in, out := &in.ProviderID, &out.ProviderID + *out = new(string) + **out = **in + } if in.CloudsSecret != nil { in, out := &in.CloudsSecret, &out.CloudsSecret *out = new(v1.SecretReference) @@ -358,31 +445,56 @@ func (in *OpenstackProviderSpec) DeepCopyInto(out *OpenstackProviderSpec) { **out = **in } in.KubeadmConfiguration.DeepCopyInto(&out.KubeadmConfiguration) - return } -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OpenstackProviderSpec. -func (in *OpenstackProviderSpec) DeepCopy() *OpenstackProviderSpec { +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OpenStackMachineSpec. +func (in *OpenStackMachineSpec) DeepCopy() *OpenStackMachineSpec { if in == nil { return nil } - out := new(OpenstackProviderSpec) + out := new(OpenStackMachineSpec) in.DeepCopyInto(out) return out } -// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. -func (in *OpenstackProviderSpec) DeepCopyObject() runtime.Object { - if c := in.DeepCopy(); c != nil { - return c +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *OpenStackMachineStatus) DeepCopyInto(out *OpenStackMachineStatus) { + *out = *in + if in.Addresses != nil { + in, out := &in.Addresses, &out.Addresses + *out = make([]v1.NodeAddress, len(*in)) + copy(*out, *in) } - return nil + if in.InstanceState != nil { + in, out := &in.InstanceState, &out.InstanceState + *out = new(InstanceState) + **out = **in + } + if in.ErrorReason != nil { + in, out := &in.ErrorReason, &out.ErrorReason + *out = new(errors.MachineStatusError) + **out = **in + } + if in.ErrorMessage != nil { + in, out := &in.ErrorMessage, &out.ErrorMessage + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OpenStackMachineStatus. +func (in *OpenStackMachineStatus) DeepCopy() *OpenStackMachineStatus { + if in == nil { + return nil + } + out := new(OpenStackMachineStatus) + in.DeepCopyInto(out) + return out } // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *RootVolume) DeepCopyInto(out *RootVolume) { *out = *in - return } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RootVolume. @@ -398,7 +510,6 @@ func (in *RootVolume) DeepCopy() *RootVolume { // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *Router) DeepCopyInto(out *Router) { *out = *in - return } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Router. @@ -419,7 +530,6 @@ func (in *SecurityGroup) DeepCopyInto(out *SecurityGroup) { *out = make([]SecurityGroupRule, len(*in)) copy(*out, *in) } - return } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SecurityGroup. @@ -435,7 +545,6 @@ func (in *SecurityGroup) DeepCopy() *SecurityGroup { // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *SecurityGroupFilter) DeepCopyInto(out *SecurityGroupFilter) { *out = *in - return } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SecurityGroupFilter. @@ -452,7 +561,6 @@ func (in *SecurityGroupFilter) DeepCopy() *SecurityGroupFilter { func (in *SecurityGroupParam) DeepCopyInto(out *SecurityGroupParam) { *out = *in out.Filter = in.Filter - return } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SecurityGroupParam. @@ -468,7 +576,6 @@ func (in *SecurityGroupParam) DeepCopy() *SecurityGroupParam { // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *SecurityGroupRule) DeepCopyInto(out *SecurityGroupRule) { *out = *in - return } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SecurityGroupRule. @@ -484,7 +591,6 @@ func (in *SecurityGroupRule) DeepCopy() *SecurityGroupRule { // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *Subnet) DeepCopyInto(out *Subnet) { *out = *in - return } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Subnet. @@ -505,7 +611,6 @@ func (in *SubnetFilter) DeepCopyInto(out *SubnetFilter) { *out = new(bool) **out = **in } - return } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SubnetFilter. @@ -522,7 +627,6 @@ func (in *SubnetFilter) DeepCopy() *SubnetFilter { func (in *SubnetParam) DeepCopyInto(out *SubnetParam) { *out = *in in.Filter.DeepCopyInto(&out.Filter) - return } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SubnetParam. diff --git a/cmd/clusterctl/.gitignore b/cmd/clusterctl/.gitignore deleted file mode 100644 index 64933baf87..0000000000 --- a/cmd/clusterctl/.gitignore +++ /dev/null @@ -1,2 +0,0 @@ -clusterctl -kubeconfig diff --git a/cmd/clusterctl/Dockerfile b/cmd/clusterctl/Dockerfile deleted file mode 100644 index a0dc7a4558..0000000000 --- a/cmd/clusterctl/Dockerfile +++ /dev/null @@ -1,32 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -# Build the cluster binary -FROM golang:1.12.5 as builder - -# Copy in the go src -WORKDIR /go/src/sigs.k8s.io/cluster-api-provider-openstack -COPY pkg/ pkg/ -COPY cmd/ cmd/ -COPY vendor/ vendor/ - -# Build -RUN CGO_ENABLED=0 GOOS=linux GOARCH=amd64 go build -a -o clusterctl sigs.k8s.io/cluster-api-provider-openstack/cmd/clusterctl - -# Copy clusterctl into a thin image -FROM debian:stretch-slim -WORKDIR / - -RUN apt-get update && apt-get install -y ca-certificates openssh-client - -COPY --from=builder /go/src/sigs.k8s.io/cluster-api-provider-openstack/clusterctl . -ENTRYPOINT ["/clusterctl"] diff --git a/cmd/clusterctl/examples/openstack/cluster.yaml b/cmd/clusterctl/examples/openstack/cluster.yaml deleted file mode 100644 index 4624a9cbc5..0000000000 --- a/cmd/clusterctl/examples/openstack/cluster.yaml +++ /dev/null @@ -1,21 +0,0 @@ -apiVersion: "cluster.k8s.io/v1alpha1" -kind: Cluster -metadata: - name: test1 -spec: - clusterNetwork: - services: - cidrBlocks: ["10.96.0.0/12"] - pods: - cidrBlocks: ["192.168.0.0/16"] - serviceDomain: "cluster.local" - providerSpec: - value: - apiVersion: "openstackproviderconfig/v1alpha1" - kind: "OpenstackProviderSpec" - tags: - - a_cluster_wide_tag - clusterConfiguration: - controlPlaneEndpoint: :6443 - kubernetesVersion: 1.15.0 - diff --git a/cmd/clusterctl/examples/openstack/machine-deployment.yaml.template b/cmd/clusterctl/examples/openstack/machine-deployment.yaml.template deleted file mode 100644 index 599d09c2db..0000000000 --- a/cmd/clusterctl/examples/openstack/machine-deployment.yaml.template +++ /dev/null @@ -1,40 +0,0 @@ -apiVersion: "cluster.k8s.io/v1alpha1" -kind: MachineDeployment -metadata: - name: test1-machinedeployment - labels: - cluster.k8s.io/cluster-name: test1 -spec: - replicas: 1 - selector: - matchLabels: - cluster.k8s.io/cluster-name: test1 - set: node - template: - metadata: - labels: - cluster.k8s.io/cluster-name: test1 - set: node - spec: - providerSpec: - value: - apiVersion: "openstackproviderconfig/v1alpha1" - kind: "OpenstackProviderSpec" - flavor: m1.medium - image: - keyName: cluster-api-provider-openstack - availabilityZone: nova - networks: - - uuid: - securityGroups: - - uuid: - userDataSecret: - name: worker-user-data - namespace: openstack-provider-system - trunk: false - tags: - - a-machine-specific-tag - serverMetadata: - key: value - versions: - kubelet: 1.15.0 diff --git a/cmd/clusterctl/examples/openstack/machines.yaml.template b/cmd/clusterctl/examples/openstack/machines.yaml.template deleted file mode 100644 index 2fd4560bab..0000000000 --- a/cmd/clusterctl/examples/openstack/machines.yaml.template +++ /dev/null @@ -1,70 +0,0 @@ -apiVersion: "cluster.k8s.io/v1alpha1" -kind: MachineList -items: -- apiVersion: "cluster.k8s.io/v1alpha1" - kind: Machine - metadata: - generateName: openstack-master- - labels: - set: master - cluster.k8s.io/cluster-name: test1 - spec: - providerSpec: - value: - apiVersion: "openstackproviderconfig/v1alpha1" - kind: "OpenstackProviderSpec" - flavor: m1.medium - image: - keyName: cluster-api-provider-openstack - availabilityZone: nova - networks: - - uuid: - floatingIP: - rootVolume: - diskSize: 0 - sourceType: "" - SourceUUID: "" - securityGroups: - - uuid: - userDataSecret: - name: master-user-data - namespace: openstack-provider-system - trunk: false - tags: - - a-machine-specific-tag - serverMetadata: - key: value - versions: - kubelet: 1.15.0 - controlPlane: 1.15.0 -- apiVersion: "cluster.k8s.io/v1alpha1" - kind: Machine - metadata: - generateName: openstack-node- - labels: - set: node - cluster.k8s.io/cluster-name: test1 - spec: - providerSpec: - value: - apiVersion: "openstackproviderconfig/v1alpha1" - kind: "OpenstackProviderSpec" - flavor: m1.medium - image: - keyName: cluster-api-provider-openstack - availabilityZone: nova - networks: - - uuid: - floatingIP: - securityGroups: - - uuid: - userDataSecret: - name: worker-user-data - namespace: openstack-provider-system - trunk: false - tags: - - a-machine-specific-tag - serverMetadata: - key: value - versions: - kubelet: 1.15.0 diff --git a/cmd/clusterctl/examples/openstack/provider-component/cluster-api/kustomization.yaml b/cmd/clusterctl/examples/openstack/provider-component/cluster-api/kustomization.yaml deleted file mode 100644 index 098285e91d..0000000000 --- a/cmd/clusterctl/examples/openstack/provider-component/cluster-api/kustomization.yaml +++ /dev/null @@ -1,17 +0,0 @@ -apiVersion: kustomize.config.k8s.io/v1beta1 -kind: Kustomization -generatorOptions: - disableNameSuffixHash: true - -resources: -- config/rbac/manager_role_binding.yaml -- config/rbac/manager_role.yaml -- config/crds/cluster_v1alpha1_cluster.yaml -- config/crds/cluster_v1alpha1_machine.yaml -- config/crds/cluster_v1alpha1_machineclass.yaml -- config/crds/cluster_v1alpha1_machinedeployment.yaml -- config/crds/cluster_v1alpha1_machineset.yaml -- config/manager/manager.yaml - -patchesStrategicMerge: - - manager_image_patch.yaml diff --git a/cmd/clusterctl/examples/openstack/provider-component/cluster-api/manager_image_patch.yaml b/cmd/clusterctl/examples/openstack/provider-component/cluster-api/manager_image_patch.yaml deleted file mode 100644 index 5405548e2a..0000000000 --- a/cmd/clusterctl/examples/openstack/provider-component/cluster-api/manager_image_patch.yaml +++ /dev/null @@ -1,11 +0,0 @@ -apiVersion: apps/v1 -kind: StatefulSet -metadata: - name: controller-manager - namespace: system -spec: - template: - spec: - containers: - - image: us.gcr.io/k8s-artifacts-prod/cluster-api/cluster-api-controller:v0.1.9 - name: manager diff --git a/cmd/manager/Dockerfile b/cmd/manager/Dockerfile deleted file mode 100644 index 62257011a4..0000000000 --- a/cmd/manager/Dockerfile +++ /dev/null @@ -1,33 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -# Build the cluster binary -FROM golang:1.12.5 as builder - -# Copy in the go src -WORKDIR /go/src/sigs.k8s.io/cluster-api-provider-openstack -COPY pkg/ pkg/ -COPY cmd/ cmd/ -COPY vendor/ vendor/ - -# Build -RUN CGO_ENABLED=0 GOOS=linux GOARCH=amd64 go build -a -o manager sigs.k8s.io/cluster-api-provider-openstack/cmd/manager - -# Copy manager into a thin image -FROM alpine:latest -WORKDIR / - -RUN apk update && apk add ca-certificates && rm -rf /var/cache/apk/* - -COPY --from=builder /go/src/sigs.k8s.io/cluster-api-provider-openstack/manager . - -CMD ["/manager"] diff --git a/cmd/manager/main.go b/cmd/manager/main.go deleted file mode 100644 index cb65c29fd3..0000000000 --- a/cmd/manager/main.go +++ /dev/null @@ -1,132 +0,0 @@ -/* -Copyright 2018 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package main - -import ( - "flag" - "k8s.io/apimachinery/pkg/util/wait" - "k8s.io/client-go/kubernetes" - "k8s.io/client-go/tools/clientcmd" - "os" - "sigs.k8s.io/cluster-api-provider-openstack/pkg/cloud/openstack/cluster" - "sigs.k8s.io/cluster-api-provider-openstack/pkg/cloud/openstack/machine" - "sigs.k8s.io/cluster-api/pkg/apis/cluster/common" - "sigs.k8s.io/cluster-api/pkg/client/clientset_generated/clientset" - "time" - - "k8s.io/klog" - "sigs.k8s.io/cluster-api-provider-openstack/pkg/apis" - _ "sigs.k8s.io/cluster-api-provider-openstack/pkg/cloud/openstack/options" - "sigs.k8s.io/cluster-api-provider-openstack/pkg/record" - clusterapis "sigs.k8s.io/cluster-api/pkg/apis" - capicluster "sigs.k8s.io/cluster-api/pkg/controller/cluster" - capimachine "sigs.k8s.io/cluster-api/pkg/controller/machine" - "sigs.k8s.io/controller-runtime/pkg/manager" - "sigs.k8s.io/controller-runtime/pkg/runtime/signals" -) - -const ProviderName = "openstack" - -var logFlushFreq = flag.Duration("log-flush-frequency", 5*time.Second, "Maximum number of seconds between log flushes") - -func main() { - klog.InitFlags(nil) - flag.Set("logtostderr", "true") - watchNamespace := flag.String("namespace", "", "Namespace that the controller watches to reconcile cluster-api objects. If unspecified, the controller watches for cluster-api objects across all namespaces.") - flag.Parse() - - // The default klog flush interval is 30 seconds, which is frighteningly long. - go wait.Until(klog.Flush, *logFlushFreq, wait.NeverStop) - - // TODO implement with v1alpha2 via: https://github.com/kubernetes-sigs/controller-runtime/pull/489 - cfg, err := clientcmd.NewNonInteractiveDeferredLoadingClientConfig( - &clientcmd.ClientConfigLoadingRules{ExplicitPath: os.Getenv("KUBECONFIG")}, - &clientcmd.ConfigOverrides{CurrentContext: os.Getenv("KUBECONTEXT")}).ClientConfig() - if err != nil { - klog.Fatal(err) - } - - // Setup a Manager - syncPeriod := 10 * time.Minute - opts := manager.Options{ - SyncPeriod: &syncPeriod, - } - - if *watchNamespace != "" { - opts.Namespace = *watchNamespace - klog.Infof("Watching cluster-api objects only in namespace %q for reconciliation.", opts.Namespace) - } - - // Create a new Cmd to provide shared dependencies and start components - mgr, err := manager.New(cfg, opts) - if err != nil { - klog.Fatal(err) - } - - cs, err := clientset.NewForConfig(cfg) - if err != nil { - klog.Fatalf("Could not create clientset to talk to the apiserver: %v", err) - } - - kubeClient, err := kubernetes.NewForConfig(cfg) - if err != nil { - klog.Fatalf("Could not create kubernetes client to talk to the apiserver: %v", err) - } - - // Initialize event recorder. - record.InitFromRecorder(mgr.GetRecorder("openstack-controller")) - - // Initialize cluster actuator. - clusterActuator := cluster.NewActuator(cluster.ActuatorParams{ - Client: mgr.GetClient(), - KubeClient: kubeClient, - ClusterClient: cs.ClusterV1alpha1(), - Scheme: mgr.GetScheme(), - EventRecorder: mgr.GetRecorder("openstack-controller"), - }) - - machineActuator := machine.NewActuator(machine.ActuatorParams{ - Client: mgr.GetClient(), - KubeClient: kubeClient, - ClusterClient: cs.ClusterV1alpha1(), - Scheme: mgr.GetScheme(), - EventRecorder: mgr.GetRecorder("openstack-controller"), - }) - - // Register our cluster deployer (the interface is in clusterctl and we define the Deployer interface on the actuator) - common.RegisterClusterProvisioner(ProviderName, clusterActuator) - - // Setup Scheme for all resources - if err := apis.AddToScheme(mgr.GetScheme()); err != nil { - klog.Fatal(err) - } - - if err := clusterapis.AddToScheme(mgr.GetScheme()); err != nil { - klog.Fatal(err) - } - - if err := capimachine.AddWithActuator(mgr, machineActuator); err != nil { - klog.Fatal(err) - } - if err := capicluster.AddWithActuator(mgr, clusterActuator); err != nil { - klog.Fatal(err) - } - - if err := mgr.Start(signals.SetupSignalHandler()); err != nil { - klog.Fatalf("Failed to run manager: %v", err) - } -} diff --git a/config/certmanager/certificate.yaml b/config/certmanager/certificate.yaml new file mode 100644 index 0000000000..02bdc1bfff --- /dev/null +++ b/config/certmanager/certificate.yaml @@ -0,0 +1,24 @@ +# The following manifests contain a self-signed issuer CR and a certificate CR. +# More document can be found at https://docs.cert-manager.io +apiVersion: certmanager.k8s.io/v1alpha1 +kind: Issuer +metadata: + name: selfsigned-issuer + namespace: system +spec: + selfSigned: {} +--- +apiVersion: certmanager.k8s.io/v1alpha1 +kind: Certificate +metadata: + name: serving-cert # this name should match the one appeared in kustomizeconfig.yaml + namespace: system +spec: + # $(SERVICE_NAME) and $(SERVICE_NAMESPACE) will be substituted by kustomize + commonName: $(SERVICE_NAME).$(SERVICE_NAMESPACE).svc + dnsNames: + - $(SERVICE_NAME).$(SERVICE_NAMESPACE).svc.cluster.local + issuerRef: + kind: Issuer + name: selfsigned-issuer + secretName: webhook-server-cert # this secret will not be prefixed, since it's not managed by kustomize diff --git a/config/certmanager/kustomization.yaml b/config/certmanager/kustomization.yaml new file mode 100644 index 0000000000..bebea5a595 --- /dev/null +++ b/config/certmanager/kustomization.yaml @@ -0,0 +1,5 @@ +resources: +- certificate.yaml + +configurations: +- kustomizeconfig.yaml diff --git a/config/certmanager/kustomizeconfig.yaml b/config/certmanager/kustomizeconfig.yaml new file mode 100644 index 0000000000..55c1246dde --- /dev/null +++ b/config/certmanager/kustomizeconfig.yaml @@ -0,0 +1,16 @@ +# This configuration is for teaching kustomize how to update name ref and var substitution +nameReference: +- kind: Issuer + group: certmanager.k8s.io + fieldSpecs: + - kind: Certificate + group: certmanager.k8s.io + path: spec/issuerRef/name + +varReference: +- kind: Certificate + group: certmanager.k8s.io + path: spec/commonName +- kind: Certificate + group: certmanager.k8s.io + path: spec/dnsNames diff --git a/config/crd/bases/infrastructure.cluster.x-k8s.io_openstackclusters.yaml b/config/crd/bases/infrastructure.cluster.x-k8s.io_openstackclusters.yaml new file mode 100644 index 0000000000..35d7ca98d1 --- /dev/null +++ b/config/crd/bases/infrastructure.cluster.x-k8s.io_openstackclusters.yaml @@ -0,0 +1,732 @@ + +--- +apiVersion: apiextensions.k8s.io/v1beta1 +kind: CustomResourceDefinition +metadata: + creationTimestamp: null + name: openstackclusters.infrastructure.cluster.x-k8s.io +spec: + group: infrastructure.cluster.x-k8s.io + names: + kind: OpenStackCluster + plural: openstackclusters + scope: Namespaced + subresources: + status: {} + validation: + openAPIV3Schema: + description: OpenStackCluster is the Schema for the openstackclusters API + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation + of an object. Servers should convert recognized schemas to the latest + internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this + object represents. Servers may infer this from the endpoint the client + submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds' + type: string + metadata: + type: object + spec: + description: OpenStackClusterSpec defines the desired state of OpenStackCluster + properties: + apiServerLoadBalancerAdditionalPorts: + description: APIServerLoadBalancerAdditionalPorts adds additional ports + to the APIServerLoadBalancer + items: + type: integer + type: array + apiServerLoadBalancerFloatingIP: + description: APIServerLoadBalancerFloatingIP is the floatingIP which + will be associated to the APIServer loadbalancer. The floatingIP will + be created if it not already exists. + type: string + apiServerLoadBalancerPort: + description: APIServerLoadBalancerPort is the port on which the listener + on the APIServer loadbalancer will be created + type: integer + caKeyPair: + description: CAKeyPair is the key pair for ca certs. + properties: + cert: + description: base64 encoded cert and key + format: byte + type: string + key: + format: byte + type: string + type: object + cloudName: + description: The name of the cloud to use from the clouds secret + type: string + cloudsSecret: + description: The name of the secret containing the openstack credentials + properties: + name: + description: Name is unique within a namespace to reference a secret + resource. + type: string + namespace: + description: Namespace defines the space within which the secret + name must be unique. + type: string + type: object + clusterConfiguration: + description: ClusterConfiguration holds the cluster-wide information + used during a kubeadm init call. + properties: + apiServer: + description: APIServer contains extra settings for the API server + control plane component + properties: + certSANs: + description: CertSANs sets extra Subject Alternative Names for + the API Server signing cert. + items: + type: string + type: array + extraArgs: + additionalProperties: + type: string + description: 'ExtraArgs is an extra set of flags to pass to + the control plane component. TODO: This is temporary and ideally + we would like to switch all components to use ComponentConfig + + ConfigMaps.' + type: object + extraVolumes: + description: ExtraVolumes is an extra set of host volumes, mounted + to the control plane component. + items: + description: HostPathMount contains elements describing volumes + that are mounted from the host. + properties: + hostPath: + description: HostPath is the path in the host that will + be mounted inside the pod. + type: string + mountPath: + description: MountPath is the path inside the pod where + hostPath will be mounted. + type: string + name: + description: Name of the volume inside the pod template. + type: string + pathType: + description: PathType is the type of the HostPath. + type: string + readOnly: + description: ReadOnly controls write access to the volume + type: boolean + required: + - hostPath + - mountPath + - name + type: object + type: array + timeoutForControlPlane: + description: TimeoutForControlPlane controls the timeout that + we use for API server to appear + type: string + type: object + apiVersion: + description: 'APIVersion defines the versioned schema of this representation + of an object. Servers should convert recognized schemas to the + latest internal value, and may reject unrecognized values. More + info: https://git.k8s.io/community/contributors/devel/api-conventions.md#resources' + type: string + certificatesDir: + description: CertificatesDir specifies where to store or look for + all required certificates. + type: string + clusterName: + description: The cluster name + type: string + controlPlaneEndpoint: + description: 'ControlPlaneEndpoint sets a stable IP address or DNS + name for the control plane; it can be a valid IP address or a + RFC-1123 DNS subdomain, both with optional TCP port. In case the + ControlPlaneEndpoint is not specified, the AdvertiseAddress + + BindPort are used; in case the ControlPlaneEndpoint is specified + but without a TCP port, the BindPort is used. Possible usages + are: e.g. In a cluster with more than one control plane instances, + this field should be assigned the address of the external load + balancer in front of the control plane instances. e.g. in environments + with enforced node recycling, the ControlPlaneEndpoint could be + used for assigning a stable DNS to the control plane.' + type: string + controllerManager: + description: ControllerManager contains extra settings for the controller + manager control plane component + properties: + extraArgs: + additionalProperties: + type: string + description: 'ExtraArgs is an extra set of flags to pass to + the control plane component. TODO: This is temporary and ideally + we would like to switch all components to use ComponentConfig + + ConfigMaps.' + type: object + extraVolumes: + description: ExtraVolumes is an extra set of host volumes, mounted + to the control plane component. + items: + description: HostPathMount contains elements describing volumes + that are mounted from the host. + properties: + hostPath: + description: HostPath is the path in the host that will + be mounted inside the pod. + type: string + mountPath: + description: MountPath is the path inside the pod where + hostPath will be mounted. + type: string + name: + description: Name of the volume inside the pod template. + type: string + pathType: + description: PathType is the type of the HostPath. + type: string + readOnly: + description: ReadOnly controls write access to the volume + type: boolean + required: + - hostPath + - mountPath + - name + type: object + type: array + type: object + dns: + description: DNS defines the options for the DNS add-on installed + in the cluster. + properties: + imageRepository: + description: ImageRepository sets the container registry to + pull images from. if not set, the ImageRepository defined + in ClusterConfiguration will be used instead. + type: string + imageTag: + description: ImageTag allows to specify a tag for the image. + In case this value is set, kubeadm does not change automatically + the version of the above components during upgrades. + type: string + type: + description: Type defines the DNS add-on to be used + type: string + required: + - type + type: object + etcd: + description: Etcd holds configuration for etcd. + properties: + external: + description: External describes how to connect to an external + etcd cluster Local and External are mutually exclusive + properties: + caFile: + description: CAFile is an SSL Certificate Authority file + used to secure etcd communication. Required if using a + TLS connection. + type: string + certFile: + description: CertFile is an SSL certification file used + to secure etcd communication. Required if using a TLS + connection. + type: string + endpoints: + description: Endpoints of etcd members. Required for ExternalEtcd. + items: + type: string + type: array + keyFile: + description: KeyFile is an SSL key file used to secure etcd + communication. Required if using a TLS connection. + type: string + required: + - caFile + - certFile + - endpoints + - keyFile + type: object + local: + description: Local provides configuration knobs for configuring + the local etcd instance Local and External are mutually exclusive + properties: + dataDir: + description: DataDir is the directory etcd will place its + data. Defaults to "/var/lib/etcd". + type: string + extraArgs: + additionalProperties: + type: string + description: ExtraArgs are extra arguments provided to the + etcd binary when run inside a static pod. + type: object + imageRepository: + description: ImageRepository sets the container registry + to pull images from. if not set, the ImageRepository defined + in ClusterConfiguration will be used instead. + type: string + imageTag: + description: ImageTag allows to specify a tag for the image. + In case this value is set, kubeadm does not change automatically + the version of the above components during upgrades. + type: string + peerCertSANs: + description: PeerCertSANs sets extra Subject Alternative + Names for the etcd peer signing cert. + items: + type: string + type: array + serverCertSANs: + description: ServerCertSANs sets extra Subject Alternative + Names for the etcd server signing cert. + items: + type: string + type: array + required: + - dataDir + type: object + type: object + featureGates: + additionalProperties: + type: boolean + description: FeatureGates enabled by the user. + type: object + imageRepository: + description: ImageRepository sets the container registry to pull + images from. If empty, `k8s.gcr.io` will be used by default; in + case of kubernetes version is a CI build (kubernetes version starts + with `ci/` or `ci-cross/`) `gcr.io/kubernetes-ci-images` will + be used as a default for control plane components and for kube-proxy, + while `k8s.gcr.io` will be used for all the other images. + type: string + kind: + description: 'Kind is a string value representing the REST resource + this object represents. Servers may infer this from the endpoint + the client submits requests to. Cannot be updated. In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds' + type: string + kubernetesVersion: + description: KubernetesVersion is the target version of the control + plane. + type: string + networking: + description: Networking holds configuration for the networking topology + of the cluster. + properties: + dnsDomain: + description: DNSDomain is the dns domain used by k8s services. + Defaults to "cluster.local". + type: string + podSubnet: + description: PodSubnet is the subnet used by pods. + type: string + serviceSubnet: + description: ServiceSubnet is the subnet used by k8s services. + Defaults to "10.96.0.0/12". + type: string + required: + - dnsDomain + - podSubnet + - serviceSubnet + type: object + scheduler: + description: Scheduler contains extra settings for the scheduler + control plane component + properties: + extraArgs: + additionalProperties: + type: string + description: 'ExtraArgs is an extra set of flags to pass to + the control plane component. TODO: This is temporary and ideally + we would like to switch all components to use ComponentConfig + + ConfigMaps.' + type: object + extraVolumes: + description: ExtraVolumes is an extra set of host volumes, mounted + to the control plane component. + items: + description: HostPathMount contains elements describing volumes + that are mounted from the host. + properties: + hostPath: + description: HostPath is the path in the host that will + be mounted inside the pod. + type: string + mountPath: + description: MountPath is the path inside the pod where + hostPath will be mounted. + type: string + name: + description: Name of the volume inside the pod template. + type: string + pathType: + description: PathType is the type of the HostPath. + type: string + readOnly: + description: ReadOnly controls write access to the volume + type: boolean + required: + - hostPath + - mountPath + - name + type: object + type: array + type: object + useHyperKubeImage: + description: UseHyperKubeImage controls if hyperkube should be used + for Kubernetes components instead of their respective separate + images + type: boolean + required: + - certificatesDir + - controlPlaneEndpoint + - dns + - etcd + - imageRepository + - kubernetesVersion + - networking + type: object + disablePortSecurity: + description: DisablePortSecurity disables the port security of the network + created for the Kubernetes cluster, which also disables SecurityGroups + type: boolean + disableServerTags: + description: 'Default: True. In case of server tag errors, set to False' + type: boolean + dnsNameservers: + description: DNSNameservers is the list of nameservers for OpenStack + Subnet being created. + items: + type: string + type: array + etcdCAKeyPair: + description: EtcdCAKeyPair is the key pair for etcd. + properties: + cert: + description: base64 encoded cert and key + format: byte + type: string + key: + format: byte + type: string + type: object + externalNetworkId: + description: ExternalNetworkID is the ID of an external OpenStack Network. + This is necessary to get public internet to the VMs. + type: string + externalRouterIPs: + description: ExternalRouterIPs is an array of externalIPs on the respective + subnets. This is necessary if the router needs a fixed ip in a specific + subnet. + items: + properties: + fixedIP: + description: The FixedIP in the corresponding subnet + type: string + subnet: + description: The subnet in which the FixedIP is used for the Gateway + of this router + properties: + filter: + description: Filters for optional network query + properties: + cidr: + type: string + description: + type: string + enableDhcp: + type: boolean + gateway_ip: + type: string + id: + type: string + ipVersion: + type: integer + ipv6AddressMode: + type: string + ipv6RaMode: + type: string + limit: + type: integer + marker: + type: string + name: + type: string + networkId: + type: string + notTags: + type: string + notTagsAny: + type: string + projectId: + type: string + sortDir: + type: string + sortKey: + type: string + subnetpoolId: + type: string + tags: + type: string + tagsAny: + type: string + tenantId: + type: string + type: object + uuid: + description: The UUID of the network. Required if you omit + the port attribute. + type: string + type: object + required: + - subnet + type: object + type: array + frontProxyCAKeyPair: + description: FrontProxyCAKeyPair is the key pair for FrontProxyKeyPair. + properties: + cert: + description: base64 encoded cert and key + format: byte + type: string + key: + format: byte + type: string + type: object + managedAPIServerLoadBalancer: + description: 'ManagedAPIServerLoadBalancer defines whether a LoadBalancer + for the APIServer should be created. If set to true the following + properties are mandatory: APIServerLoadBalancerFloatingIP, APIServerLoadBalancerPort' + type: boolean + managedSecurityGroups: + description: ManagedSecurityGroups defines that kubernetes manages the + OpenStack security groups for now, that means that we'll create two + security groups, one allowing SSH and API access from everywhere, + and another one that allows all traffic to/from machines belonging + to that group. In the future, we could make this more flexible. + type: boolean + nodeCidr: + description: NodeCIDR is the OpenStack Subnet to be created. Cluster + actuator will create a network, a subnet with NodeCIDR, and a router + connected to this subnet. If you leave this empty, no network will + be created. + type: string + saKeyPair: + description: SAKeyPair is the service account key pair. + properties: + cert: + description: base64 encoded cert and key + format: byte + type: string + key: + format: byte + type: string + type: object + tags: + description: Tags for all resources in cluster + items: + type: string + type: array + useOctavia: + description: UseOctavia is weather LoadBalancer Service is Octavia or + not + type: boolean + type: object + status: + description: OpenStackClusterStatus defines the observed state of OpenStackCluster + properties: + apiEndpoints: + description: APIEndpoints represents the endpoints to communicate with + the control plane. + items: + description: APIEndpoint represents a reachable Kubernetes API endpoint. + properties: + host: + description: The hostname on which the API server is serving. + type: string + port: + description: The port on which the API server is serving. + type: integer + required: + - host + - port + type: object + type: array + controlPlaneSecurityGroup: + description: 'ControlPlaneSecurityGroups contains all the information + about the OpenStack Security Group that needs to be applied to control + plane nodes. TODO: Maybe instead of two properties, we add a property + to the group?' + properties: + id: + type: string + name: + type: string + rules: + items: + description: SecurityGroupRule represent the basic information + of the associated OpenStack Security Group Role. + properties: + direction: + type: string + etherType: + type: string + name: + type: string + portRangeMax: + type: integer + portRangeMin: + type: integer + protocol: + type: string + remoteGroupID: + type: string + remoteIPPrefix: + type: string + securityGroupID: + type: string + required: + - direction + - etherType + - name + - portRangeMax + - portRangeMin + - protocol + - remoteGroupID + - remoteIPPrefix + - securityGroupID + type: object + type: array + required: + - id + - name + - rules + type: object + globalSecurityGroup: + description: GlobalSecurityGroup contains all the information about + the OpenStack Security Group that needs to be applied to all nodes, + both control plane and worker nodes. + properties: + id: + type: string + name: + type: string + rules: + items: + description: SecurityGroupRule represent the basic information + of the associated OpenStack Security Group Role. + properties: + direction: + type: string + etherType: + type: string + name: + type: string + portRangeMax: + type: integer + portRangeMin: + type: integer + protocol: + type: string + remoteGroupID: + type: string + remoteIPPrefix: + type: string + securityGroupID: + type: string + required: + - direction + - etherType + - name + - portRangeMax + - portRangeMin + - protocol + - remoteGroupID + - remoteIPPrefix + - securityGroupID + type: object + type: array + required: + - id + - name + - rules + type: object + network: + description: Network contains all information about the created OpenStack + Network. It includes Subnets and Router. + properties: + apiServerLoadBalancer: + description: Be careful when using APIServerLoadBalancer, because + this field is optional and therefore not set in all cases + properties: + id: + type: string + internalIP: + type: string + ip: + type: string + name: + type: string + required: + - id + - internalIP + - ip + - name + type: object + id: + type: string + name: + type: string + router: + description: Router represents basic information about the associated + OpenStack Neutron Router + properties: + id: + type: string + name: + type: string + required: + - id + - name + type: object + subnet: + description: Subnet represents basic information about the associated + OpenStack Neutron Subnet + properties: + cidr: + type: string + id: + type: string + name: + type: string + required: + - cidr + - id + - name + type: object + required: + - id + - name + type: object + ready: + type: boolean + required: + - ready + type: object + type: object + version: v1alpha2 + versions: + - name: v1alpha2 + served: true + storage: true +status: + acceptedNames: + kind: "" + plural: "" + conditions: [] + storedVersions: [] diff --git a/config/crd/bases/infrastructure.cluster.x-k8s.io_openstackmachines.yaml b/config/crd/bases/infrastructure.cluster.x-k8s.io_openstackmachines.yaml new file mode 100644 index 0000000000..6e84b2821d --- /dev/null +++ b/config/crd/bases/infrastructure.cluster.x-k8s.io_openstackmachines.yaml @@ -0,0 +1,676 @@ + +--- +apiVersion: apiextensions.k8s.io/v1beta1 +kind: CustomResourceDefinition +metadata: + creationTimestamp: null + name: openstackmachines.infrastructure.cluster.x-k8s.io +spec: + group: infrastructure.cluster.x-k8s.io + names: + kind: OpenStackMachine + plural: openstackmachines + scope: Namespaced + subresources: + status: {} + validation: + openAPIV3Schema: + description: OpenStackMachine is the Schema for the openstackmachines API + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation + of an object. Servers should convert recognized schemas to the latest + internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this + object represents. Servers may infer this from the endpoint the client + submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds' + type: string + metadata: + type: object + spec: + description: OpenStackMachineSpec defines the desired state of OpenStackMachine + properties: + availabilityZone: + description: The availability zone from which to launch the server. + type: string + cloudName: + description: The name of the cloud to use from the clouds secret + type: string + cloudsSecret: + description: The name of the secret containing the openstack credentials + properties: + name: + description: Name is unique within a namespace to reference a secret + resource. + type: string + namespace: + description: Namespace defines the space within which the secret + name must be unique. + type: string + type: object + configDrive: + description: Config Drive support + type: boolean + flavor: + description: The flavor reference for the flavor for your server instance. + type: string + floatingIP: + description: The floatingIP which will be associated to the machine, + only used for master. The floatingIP should have been created and + haven't been associated. + type: string + image: + description: The name of the image to use for your server instance. + If the RootVolume is specified, this will be ignored and use rootVolume + directly. + type: string + keyName: + description: The ssh key to inject in the instance + type: string + kubeadmConfiguration: + description: KubeadmConfiguration holds the kubeadm configuration options + properties: + init: + description: InitConfiguration is used to customize any kubeadm + init configuration parameters. + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this + representation of an object. Servers should convert recognized + schemas to the latest internal value, and may reject unrecognized + values. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#resources' + type: string + bootstrapTokens: + description: '`kubeadm init`-only information. These fields + are solely used the first time `kubeadm init` runs. After + that, the information in the fields IS NOT uploaded to the + `kubeadm-config` ConfigMap that is used by `kubeadm upgrade` + for instance. These fields must be omitempty. BootstrapTokens + is respected at `kubeadm init` time and describes a set of + Bootstrap Tokens to create. This information IS NOT uploaded + to the kubeadm cluster configmap, partly because of its sensitive + nature' + items: + description: BootstrapToken describes one bootstrap token, + stored as a Secret in the cluster + properties: + description: + description: Description sets a human-friendly message + why this token exists and what it's used for, so other + administrators can know its purpose. + type: string + expires: + description: Expires specifies the timestamp when this + token expires. Defaults to being set dynamically at + runtime based on the TTL. Expires and TTL are mutually + exclusive. + format: date-time + type: string + groups: + description: Groups specifies the extra groups that this + token will authenticate as when/if used for authentication + items: + type: string + type: array + token: + description: Token is used for establishing bidirectional + trust between nodes and control-planes. Used for joining + nodes in the cluster. + type: object + ttl: + description: TTL defines the time to live for this token. + Defaults to 24h. Expires and TTL are mutually exclusive. + type: string + usages: + description: Usages describes the ways in which this token + can be used. Can by default be used for establishing + bidirectional trust, but that can be changed here. + items: + type: string + type: array + required: + - token + type: object + type: array + kind: + description: 'Kind is a string value representing the REST resource + this object represents. Servers may infer this from the endpoint + the client submits requests to. Cannot be updated. In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds' + type: string + localAPIEndpoint: + description: LocalAPIEndpoint represents the endpoint of the + API server instance that's deployed on this control plane + node In HA setups, this differs from ClusterConfiguration.ControlPlaneEndpoint + in the sense that ControlPlaneEndpoint is the global endpoint + for the cluster, which then loadbalances the requests to each + individual API server. This configuration object lets you + customize what IP/DNS name and port the local API server advertises + it's accessible on. By default, kubeadm tries to auto-detect + the IP of the default interface and use that, but in case + that process fails you may set the desired value here. + properties: + advertiseAddress: + description: AdvertiseAddress sets the IP address for the + API server to advertise. + type: string + bindPort: + description: BindPort sets the secure port for the API Server + to bind to. Defaults to 6443. + format: int32 + type: integer + required: + - advertiseAddress + - bindPort + type: object + nodeRegistration: + description: NodeRegistration holds fields that relate to registering + the new control-plane node to the cluster + properties: + criSocket: + description: CRISocket is used to retrieve container runtime + info. This information will be annotated to the Node API + object, for later re-use + type: string + kubeletExtraArgs: + additionalProperties: + type: string + description: KubeletExtraArgs passes through extra arguments + to the kubelet. The arguments here are passed to the kubelet + command line via the environment file kubeadm writes at + runtime for the kubelet to source. This overrides the + generic base-level configuration in the kubelet-config-1.X + ConfigMap Flags have higher priority when parsing. These + values are local and specific to the node kubeadm is executing + on. + type: object + name: + description: Name is the `.Metadata.Name` field of the Node + API object that will be created in this `kubeadm init` + or `kubeadm join` operation. This field is also used in + the CommonName field of the kubelet's client certificate + to the API server. Defaults to the hostname of the node + if not provided. + type: string + taints: + description: 'Taints specifies the taints the Node API object + should be registered with. If this field is unset, i.e. + nil, in the `kubeadm init` process it will be defaulted + to []v1.Taint{''node-role.kubernetes.io/master=""''}. + If you don''t want to taint your control-plane node, set + this field to an empty slice, i.e. `taints: {}` in the + YAML file. This field is solely used for Node registration.' + items: + description: The node this Taint is attached to has the + "effect" on any pod that does not tolerate the Taint. + properties: + effect: + description: Required. The effect of the taint on + pods that do not tolerate the taint. Valid effects + are NoSchedule, PreferNoSchedule and NoExecute. + type: string + key: + description: Required. The taint key to be applied + to a node. + type: string + timeAdded: + description: TimeAdded represents the time at which + the taint was added. It is only written for NoExecute + taints. + format: date-time + type: string + value: + description: Required. The taint value corresponding + to the taint key. + type: string + required: + - effect + - key + type: object + type: array + type: object + type: object + join: + description: JoinConfiguration is used to customize any kubeadm + join configuration parameters. + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this + representation of an object. Servers should convert recognized + schemas to the latest internal value, and may reject unrecognized + values. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#resources' + type: string + caCertPath: + description: CACertPath is the path to the SSL certificate authority + used to secure comunications between node and control-plane. + Defaults to "/etc/kubernetes/pki/ca.crt". + type: string + controlPlane: + description: ControlPlane defines the additional control plane + instance to be deployed on the joining node. If nil, no additional + control plane instance will be deployed. + properties: + localAPIEndpoint: + description: LocalAPIEndpoint represents the endpoint of + the API server instance to be deployed on this node. + properties: + advertiseAddress: + description: AdvertiseAddress sets the IP address for + the API server to advertise. + type: string + bindPort: + description: BindPort sets the secure port for the API + Server to bind to. Defaults to 6443. + format: int32 + type: integer + required: + - advertiseAddress + - bindPort + type: object + type: object + discovery: + description: Discovery specifies the options for the kubelet + to use during the TLS Bootstrap process + properties: + bootstrapToken: + description: BootstrapToken is used to set the options for + bootstrap token based discovery BootstrapToken and File + are mutually exclusive + properties: + apiServerEndpoint: + description: APIServerEndpoint is an IP or domain name + to the API server from which info will be fetched. + type: string + caCertHashes: + description: 'CACertHashes specifies a set of public + key pins to verify when token-based discovery is used. + The root CA found during discovery must match one + of these values. Specifying an empty set disables + root CA pinning, which can be unsafe. Each hash is + specified as ":", where the only currently + supported type is "sha256". This is a hex-encoded + SHA-256 hash of the Subject Public Key Info (SPKI) + object in DER-encoded ASN.1. These hashes can be calculated + using, for example, OpenSSL: openssl x509 -pubkey + -in ca.crt openssl rsa -pubin -outform der 2>&/dev/null + | openssl dgst -sha256 -hex' + items: + type: string + type: array + token: + description: Token is a token used to validate cluster + information fetched from the control-plane. + type: string + unsafeSkipCAVerification: + description: UnsafeSkipCAVerification allows token-based + discovery without CA verification via CACertHashes. + This can weaken the security of kubeadm since other + nodes can impersonate the control-plane. + type: boolean + required: + - token + - unsafeSkipCAVerification + type: object + file: + description: File is used to specify a file or URL to a + kubeconfig file from which to load cluster information + BootstrapToken and File are mutually exclusive + properties: + kubeConfigPath: + description: KubeConfigPath is used to specify the actual + file path or URL to the kubeconfig file from which + to load cluster information + type: string + required: + - kubeConfigPath + type: object + timeout: + description: Timeout modifies the discovery timeout + type: string + tlsBootstrapToken: + description: TLSBootstrapToken is a token used for TLS bootstrapping. + If .BootstrapToken is set, this field is defaulted to + .BootstrapToken.Token, but can be overridden. If .File + is set, this field **must be set** in case the KubeConfigFile + does not contain any other authentication information + type: string + required: + - tlsBootstrapToken + type: object + kind: + description: 'Kind is a string value representing the REST resource + this object represents. Servers may infer this from the endpoint + the client submits requests to. Cannot be updated. In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds' + type: string + nodeRegistration: + description: NodeRegistration holds fields that relate to registering + the new control-plane node to the cluster + properties: + criSocket: + description: CRISocket is used to retrieve container runtime + info. This information will be annotated to the Node API + object, for later re-use + type: string + kubeletExtraArgs: + additionalProperties: + type: string + description: KubeletExtraArgs passes through extra arguments + to the kubelet. The arguments here are passed to the kubelet + command line via the environment file kubeadm writes at + runtime for the kubelet to source. This overrides the + generic base-level configuration in the kubelet-config-1.X + ConfigMap Flags have higher priority when parsing. These + values are local and specific to the node kubeadm is executing + on. + type: object + name: + description: Name is the `.Metadata.Name` field of the Node + API object that will be created in this `kubeadm init` + or `kubeadm join` operation. This field is also used in + the CommonName field of the kubelet's client certificate + to the API server. Defaults to the hostname of the node + if not provided. + type: string + taints: + description: 'Taints specifies the taints the Node API object + should be registered with. If this field is unset, i.e. + nil, in the `kubeadm init` process it will be defaulted + to []v1.Taint{''node-role.kubernetes.io/master=""''}. + If you don''t want to taint your control-plane node, set + this field to an empty slice, i.e. `taints: {}` in the + YAML file. This field is solely used for Node registration.' + items: + description: The node this Taint is attached to has the + "effect" on any pod that does not tolerate the Taint. + properties: + effect: + description: Required. The effect of the taint on + pods that do not tolerate the taint. Valid effects + are NoSchedule, PreferNoSchedule and NoExecute. + type: string + key: + description: Required. The taint key to be applied + to a node. + type: string + timeAdded: + description: TimeAdded represents the time at which + the taint was added. It is only written for NoExecute + taints. + format: date-time + type: string + value: + description: Required. The taint value corresponding + to the taint key. + type: string + required: + - effect + - key + type: object + type: array + type: object + required: + - caCertPath + - discovery + - nodeRegistration + type: object + type: object + networks: + description: A networks object. Required parameter when there are multiple + networks defined for the tenant. When you do not specify the networks + parameter, the server attaches to the only network created for the + current tenant. + items: + properties: + filter: + description: Filters for optional network query + properties: + adminStateUp: + type: boolean + description: + type: string + id: + type: string + limit: + type: integer + marker: + type: string + name: + type: string + notTags: + type: string + notTagsAny: + type: string + projectId: + type: string + shared: + type: boolean + sortDir: + type: string + sortKey: + type: string + status: + type: string + tags: + type: string + tagsAny: + type: string + tenantId: + type: string + type: object + fixedIp: + description: A fixed IPv4 address for the NIC. + type: string + subnets: + description: Subnet within a network to use + items: + properties: + filter: + description: Filters for optional network query + properties: + cidr: + type: string + description: + type: string + enableDhcp: + type: boolean + gateway_ip: + type: string + id: + type: string + ipVersion: + type: integer + ipv6AddressMode: + type: string + ipv6RaMode: + type: string + limit: + type: integer + marker: + type: string + name: + type: string + networkId: + type: string + notTags: + type: string + notTagsAny: + type: string + projectId: + type: string + sortDir: + type: string + sortKey: + type: string + subnetpoolId: + type: string + tags: + type: string + tagsAny: + type: string + tenantId: + type: string + type: object + uuid: + description: The UUID of the network. Required if you omit + the port attribute. + type: string + type: object + type: array + uuid: + description: The UUID of the network. Required if you omit the + port attribute. + type: string + type: object + type: array + providerID: + description: ProviderID is the unique identifier as specified by the + cloud provider. + type: string + rootVolume: + description: The volume metadata to boot from + properties: + deviceType: + type: string + diskSize: + type: integer + sourceType: + type: string + sourceUUID: + type: string + type: object + securityGroups: + description: The names of the security groups to assign to the instance + items: + properties: + filter: + description: Filters used to query security groups in openstack + properties: + description: + type: string + id: + type: string + limit: + type: integer + marker: + type: string + name: + type: string + notTags: + type: string + notTagsAny: + type: string + projectId: + type: string + sortDir: + type: string + sortKey: + type: string + tags: + type: string + tagsAny: + type: string + tenantId: + type: string + type: object + name: + description: Security Group name + type: string + uuid: + description: Security Group UID + type: string + type: object + type: array + serverMetadata: + additionalProperties: + type: string + description: Metadata mapping. Allows you to create a map of key value + pairs to add to the server instance. + type: object + tags: + description: Machine tags Requires Nova api 2.52 minimum! + items: + type: string + type: array + trunk: + description: Whether the server instance is created on a trunk port + or not. + type: boolean + userDataSecret: + description: The name of the secret containing the user data (startup + script in most cases) + properties: + name: + description: Name is unique within a namespace to reference a secret + resource. + type: string + namespace: + description: Namespace defines the space within which the secret + name must be unique. + type: string + type: object + required: + - flavor + - image + type: object + status: + description: OpenStackMachineStatus defines the observed state of OpenStackMachine + properties: + addresses: + description: Addresses contains the OpenStack instance associated addresses. + items: + description: NodeAddress contains information for the node's address. + properties: + address: + description: The node address. + type: string + type: + description: Node address type, one of Hostname, ExternalIP or + InternalIP. + type: string + required: + - address + - type + type: object + type: array + errorMessage: + description: "ErrorMessage will be set in the event that there is a + terminal problem reconciling the Machine and will contain a more verbose + string suitable for logging and human consumption. \n This field should + not be set for transitive errors that a controller faces that are + expected to be fixed automatically over time (like service outages), + but instead indicate that something is fundamentally wrong with the + Machine's spec or the configuration of the controller, and that manual + intervention is required. Examples of terminal errors would be invalid + combinations of settings in the spec, values that are unsupported + by the controller, or the responsible controller itself being critically + misconfigured. \n Any transient errors that occur during the reconciliation + of Machines can be added as events to the Machine object and/or logged + in the controller's output." + type: string + errorReason: + description: Constants aren't automatically generated for unversioned + packages. Instead share the same constant for all versioned packages + type: string + instanceState: + description: InstanceState is the state of the OpenStack instance for + this machine. + type: string + ready: + description: Ready is true when the provider resource is ready. + type: boolean + type: object + type: object + version: v1alpha2 + versions: + - name: v1alpha2 + served: true + storage: true +status: + acceptedNames: + kind: "" + plural: "" + conditions: [] + storedVersions: [] diff --git a/config/crd/kustomization.yaml b/config/crd/kustomization.yaml new file mode 100644 index 0000000000..f74481cf3c --- /dev/null +++ b/config/crd/kustomization.yaml @@ -0,0 +1,24 @@ +# This kustomization.yaml is not intended to be run by itself, +# since it depends on service name and namespace that are out of this kustomize package. +# It should be run by config/default +resources: +- bases/infrastructure.cluster.x-k8s.io_openstackclusters.yaml +- bases/infrastructure.cluster.x-k8s.io_openstackmachines.yaml +# +kubebuilder:scaffold:crdkustomizeresource + +patchesStrategicMerge: +# [WEBHOOK] To enable webhook, uncomment all the sections with [WEBHOOK] prefix. +# patches here are for enabling the conversion webhook for each CRD +#- patches/webhook_in_openstackclusters.yaml +#- patches/webhook_in_openstackmachines.yaml +# +kubebuilder:scaffold:crdkustomizewebhookpatch + +# [CERTMANAGER] To enable webhook, uncomment all the sections with [CERTMANAGER] prefix. +# patches here are for enabling the CA injection for each CRD +#- patches/cainjection_in_openstackclusters.yaml +#- patches/cainjection_in_openstackmachines.yaml +# +kubebuilder:scaffold:crdkustomizecainjectionpatch + +# the following config is for teaching kustomize how to do kustomization for CRDs. +configurations: +- kustomizeconfig.yaml diff --git a/config/crd/kustomizeconfig.yaml b/config/crd/kustomizeconfig.yaml new file mode 100644 index 0000000000..6f83d9a94b --- /dev/null +++ b/config/crd/kustomizeconfig.yaml @@ -0,0 +1,17 @@ +# This file is for teaching kustomize how to substitute name and namespace reference in CRD +nameReference: +- kind: Service + version: v1 + fieldSpecs: + - kind: CustomResourceDefinition + group: apiextensions.k8s.io + path: spec/conversion/webhookClientConfig/service/name + +namespace: +- kind: CustomResourceDefinition + group: apiextensions.k8s.io + path: spec/conversion/webhookClientConfig/service/namespace + create: false + +varReference: +- path: metadata/annotations diff --git a/config/crd/patches/cainjection_in_openstackclusters.yaml b/config/crd/patches/cainjection_in_openstackclusters.yaml new file mode 100644 index 0000000000..bc88da6cf0 --- /dev/null +++ b/config/crd/patches/cainjection_in_openstackclusters.yaml @@ -0,0 +1,8 @@ +# The following patch adds a directive for certmanager to inject CA into the CRD +# CRD conversion requires k8s 1.13 or later. +apiVersion: apiextensions.k8s.io/v1beta1 +kind: CustomResourceDefinition +metadata: + annotations: + certmanager.k8s.io/inject-ca-from: $(CERTIFICATE_NAMESPACE)/$(CERTIFICATE_NAME) + name: openstackclusters.infrastructure.cluster.x-k8s.io diff --git a/config/crd/patches/cainjection_in_openstackmachines.yaml b/config/crd/patches/cainjection_in_openstackmachines.yaml new file mode 100644 index 0000000000..f9267836f0 --- /dev/null +++ b/config/crd/patches/cainjection_in_openstackmachines.yaml @@ -0,0 +1,8 @@ +# The following patch adds a directive for certmanager to inject CA into the CRD +# CRD conversion requires k8s 1.13 or later. +apiVersion: apiextensions.k8s.io/v1beta1 +kind: CustomResourceDefinition +metadata: + annotations: + certmanager.k8s.io/inject-ca-from: $(CERTIFICATE_NAMESPACE)/$(CERTIFICATE_NAME) + name: openstackmachines.infrastructure.cluster.x-k8s.io diff --git a/config/crd/patches/webhook_in_openstackclusters.yaml b/config/crd/patches/webhook_in_openstackclusters.yaml new file mode 100644 index 0000000000..ff50d1d807 --- /dev/null +++ b/config/crd/patches/webhook_in_openstackclusters.yaml @@ -0,0 +1,17 @@ +# The following patch enables conversion webhook for CRD +# CRD conversion requires k8s 1.13 or later. +apiVersion: apiextensions.k8s.io/v1beta1 +kind: CustomResourceDefinition +metadata: + name: openstackclusters.infrastructure.cluster.x-k8s.io +spec: + conversion: + strategy: Webhook + webhookClientConfig: + # this is "\n" used as a placeholder, otherwise it will be rejected by the apiserver for being blank, + # but we're going to set it later using the cert-manager (or potentially a patch if not using cert-manager) + caBundle: Cg== + service: + namespace: system + name: webhook-service + path: /convert diff --git a/config/crd/patches/webhook_in_openstackmachines.yaml b/config/crd/patches/webhook_in_openstackmachines.yaml new file mode 100644 index 0000000000..7698ad5ac5 --- /dev/null +++ b/config/crd/patches/webhook_in_openstackmachines.yaml @@ -0,0 +1,17 @@ +# The following patch enables conversion webhook for CRD +# CRD conversion requires k8s 1.13 or later. +apiVersion: apiextensions.k8s.io/v1beta1 +kind: CustomResourceDefinition +metadata: + name: openstackmachines.infrastructure.cluster.x-k8s.io +spec: + conversion: + strategy: Webhook + webhookClientConfig: + # this is "\n" used as a placeholder, otherwise it will be rejected by the apiserver for being blank, + # but we're going to set it later using the cert-manager (or potentially a patch if not using cert-manager) + caBundle: Cg== + service: + namespace: system + name: webhook-service + path: /convert diff --git a/config/crds/openstackproviderconfig_v1alpha1_openstackclusterproviderspec.yaml b/config/crds/openstackproviderconfig_v1alpha1_openstackclusterproviderspec.yaml deleted file mode 100644 index 45ffb2d75d..0000000000 --- a/config/crds/openstackproviderconfig_v1alpha1_openstackclusterproviderspec.yaml +++ /dev/null @@ -1,513 +0,0 @@ -apiVersion: apiextensions.k8s.io/v1beta1 -kind: CustomResourceDefinition -metadata: - creationTimestamp: null - labels: - controller-tools.k8s.io: "1.0" - name: openstackclusterproviderspecs.openstackproviderconfig.k8s.io -spec: - group: openstackproviderconfig.k8s.io - names: - kind: OpenstackClusterProviderSpec - plural: openstackclusterproviderspecs - scope: Namespaced - validation: - openAPIV3Schema: - properties: - apiServerLoadBalancerAdditionalPorts: - description: APIServerLoadBalancerAdditionalPorts adds additional ports - to the APIServerLoadBalancer - items: - format: int64 - type: integer - type: array - apiServerLoadBalancerFloatingIP: - description: APIServerLoadBalancerFloatingIP is the floatingIP which will - be associated to the APIServer loadbalancer. The floatingIP will be created - if it not already exists. - type: string - apiServerLoadBalancerPort: - description: APIServerLoadBalancerPort is the port on which the listener - on the APIServer loadbalancer will be created - format: int64 - type: integer - apiVersion: - description: 'APIVersion defines the versioned schema of this representation - of an object. Servers should convert recognized schemas to the latest - internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#resources' - type: string - caKeyPair: - description: CAKeyPair is the key pair for ca certs. - properties: - cert: - description: base64 encoded cert and key - format: byte - type: string - key: - format: byte - type: string - type: object - cloudName: - description: The name of the cloud to use from the clouds secret - type: string - cloudsSecret: - description: The name of the secret containing the openstack credentials - type: object - clusterConfiguration: - description: ClusterConfiguration holds the cluster-wide information used - during a kubeadm init call. - properties: - apiServer: - description: APIServer contains extra settings for the API server control - plane component - properties: - certSANs: - description: CertSANs sets extra Subject Alternative Names for the - API Server signing cert. - items: - type: string - type: array - extraArgs: - description: 'ExtraArgs is an extra set of flags to pass to the - control plane component. TODO: This is temporary and ideally we - would like to switch all components to use ComponentConfig + ConfigMaps.' - type: object - extraVolumes: - description: ExtraVolumes is an extra set of host volumes, mounted - to the control plane component. - items: - properties: - hostPath: - description: HostPath is the path in the host that will be - mounted inside the pod. - type: string - mountPath: - description: MountPath is the path inside the pod where hostPath - will be mounted. - type: string - name: - description: Name of the volume inside the pod template. - type: string - pathType: - description: PathType is the type of the HostPath. - type: string - readOnly: - description: ReadOnly controls write access to the volume - type: boolean - required: - - name - - hostPath - - mountPath - type: object - type: array - timeoutForControlPlane: - description: TimeoutForControlPlane controls the timeout that we - use for API server to appear - type: string - type: object - apiVersion: - description: 'APIVersion defines the versioned schema of this representation - of an object. Servers should convert recognized schemas to the latest - internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#resources' - type: string - certificatesDir: - description: CertificatesDir specifies where to store or look for all - required certificates. - type: string - clusterName: - description: The cluster name - type: string - controlPlaneEndpoint: - description: 'ControlPlaneEndpoint sets a stable IP address or DNS name - for the control plane; it can be a valid IP address or a RFC-1123 - DNS subdomain, both with optional TCP port. In case the ControlPlaneEndpoint - is not specified, the AdvertiseAddress + BindPort are used; in case - the ControlPlaneEndpoint is specified but without a TCP port, the - BindPort is used. Possible usages are: e.g. In a cluster with more - than one control plane instances, this field should be assigned the - address of the external load balancer in front of the control plane - instances. e.g. in environments with enforced node recycling, the - ControlPlaneEndpoint could be used for assigning a stable DNS to the - control plane.' - type: string - controllerManager: - description: ControllerManager contains extra settings for the controller - manager control plane component - properties: - extraArgs: - description: 'ExtraArgs is an extra set of flags to pass to the - control plane component. TODO: This is temporary and ideally we - would like to switch all components to use ComponentConfig + ConfigMaps.' - type: object - extraVolumes: - description: ExtraVolumes is an extra set of host volumes, mounted - to the control plane component. - items: - properties: - hostPath: - description: HostPath is the path in the host that will be - mounted inside the pod. - type: string - mountPath: - description: MountPath is the path inside the pod where hostPath - will be mounted. - type: string - name: - description: Name of the volume inside the pod template. - type: string - pathType: - description: PathType is the type of the HostPath. - type: string - readOnly: - description: ReadOnly controls write access to the volume - type: boolean - required: - - name - - hostPath - - mountPath - type: object - type: array - type: object - dns: - description: DNS defines the options for the DNS add-on installed in - the cluster. - properties: - imageRepository: - description: ImageRepository sets the container registry to pull - images from. if not set, the ImageRepository defined in ClusterConfiguration - will be used instead. - type: string - imageTag: - description: ImageTag allows to specify a tag for the image. In - case this value is set, kubeadm does not change automatically - the version of the above components during upgrades. - type: string - type: - description: Type defines the DNS add-on to be used - type: string - required: - - type - type: object - etcd: - description: Etcd holds configuration for etcd. - properties: - external: - description: External describes how to connect to an external etcd - cluster Local and External are mutually exclusive - properties: - caFile: - description: CAFile is an SSL Certificate Authority file used - to secure etcd communication. Required if using a TLS connection. - type: string - certFile: - description: CertFile is an SSL certification file used to secure - etcd communication. Required if using a TLS connection. - type: string - endpoints: - description: Endpoints of etcd members. Required for ExternalEtcd. - items: - type: string - type: array - keyFile: - description: KeyFile is an SSL key file used to secure etcd - communication. Required if using a TLS connection. - type: string - required: - - endpoints - - caFile - - certFile - - keyFile - type: object - local: - description: Local provides configuration knobs for configuring - the local etcd instance Local and External are mutually exclusive - properties: - dataDir: - description: DataDir is the directory etcd will place its data. - Defaults to "/var/lib/etcd". - type: string - extraArgs: - description: ExtraArgs are extra arguments provided to the etcd - binary when run inside a static pod. - type: object - imageRepository: - description: ImageRepository sets the container registry to - pull images from. if not set, the ImageRepository defined - in ClusterConfiguration will be used instead. - type: string - imageTag: - description: ImageTag allows to specify a tag for the image. - In case this value is set, kubeadm does not change automatically - the version of the above components during upgrades. - type: string - peerCertSANs: - description: PeerCertSANs sets extra Subject Alternative Names - for the etcd peer signing cert. - items: - type: string - type: array - serverCertSANs: - description: ServerCertSANs sets extra Subject Alternative Names - for the etcd server signing cert. - items: - type: string - type: array - required: - - dataDir - type: object - type: object - featureGates: - description: FeatureGates enabled by the user. - type: object - imageRepository: - description: ImageRepository sets the container registry to pull images - from. If empty, `k8s.gcr.io` will be used by default; in case of kubernetes - version is a CI build (kubernetes version starts with `ci/` or `ci-cross/`) - `gcr.io/kubernetes-ci-images` will be used as a default for control - plane components and for kube-proxy, while `k8s.gcr.io` will be used - for all the other images. - type: string - kind: - description: 'Kind is a string value representing the REST resource - this object represents. Servers may infer this from the endpoint the - client submits requests to. Cannot be updated. In CamelCase. More - info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds' - type: string - kubernetesVersion: - description: KubernetesVersion is the target version of the control - plane. - type: string - networking: - description: Networking holds configuration for the networking topology - of the cluster. - properties: - dnsDomain: - description: DNSDomain is the dns domain used by k8s services. Defaults - to "cluster.local". - type: string - podSubnet: - description: PodSubnet is the subnet used by pods. - type: string - serviceSubnet: - description: ServiceSubnet is the subnet used by k8s services. Defaults - to "10.96.0.0/12". - type: string - required: - - serviceSubnet - - podSubnet - - dnsDomain - type: object - scheduler: - description: Scheduler contains extra settings for the scheduler control - plane component - properties: - extraArgs: - description: 'ExtraArgs is an extra set of flags to pass to the - control plane component. TODO: This is temporary and ideally we - would like to switch all components to use ComponentConfig + ConfigMaps.' - type: object - extraVolumes: - description: ExtraVolumes is an extra set of host volumes, mounted - to the control plane component. - items: - properties: - hostPath: - description: HostPath is the path in the host that will be - mounted inside the pod. - type: string - mountPath: - description: MountPath is the path inside the pod where hostPath - will be mounted. - type: string - name: - description: Name of the volume inside the pod template. - type: string - pathType: - description: PathType is the type of the HostPath. - type: string - readOnly: - description: ReadOnly controls write access to the volume - type: boolean - required: - - name - - hostPath - - mountPath - type: object - type: array - type: object - useHyperKubeImage: - description: UseHyperKubeImage controls if hyperkube should be used - for Kubernetes components instead of their respective separate images - type: boolean - required: - - etcd - - networking - - kubernetesVersion - - controlPlaneEndpoint - - dns - - certificatesDir - - imageRepository - type: object - disablePortSecurity: - description: DisablePortSecurity disables the port security of the network - created for the Kubernetes cluster, which also disables SecurityGroups - type: boolean - disableServerTags: - description: 'Default: True. In case of server tag errors, set to False' - type: boolean - dnsNameservers: - description: DNSNameservers is the list of nameservers for OpenStack Subnet - being created. - items: - type: string - type: array - etcdCAKeyPair: - description: EtcdCAKeyPair is the key pair for etcd. - properties: - cert: - description: base64 encoded cert and key - format: byte - type: string - key: - format: byte - type: string - type: object - externalNetworkId: - description: ExternalNetworkID is the ID of an external OpenStack Network. - This is necessary to get public internet to the VMs. - type: string - externalRouterIPs: - description: ExternalRouterIPs is an array of externalIPs on the respective - subnets. This is necessary if the router needs a fixed ip in a specific - subnet. - items: - properties: - fixedIP: - description: The FixedIP in the corresponding subnet - type: string - subnet: - description: The subnet in which the FixedIP is used for the Gateway - of this router - properties: - filter: - description: Filters for optional network query - properties: - cidr: - type: string - description: - type: string - enableDhcp: - type: boolean - id: - type: string - ipVersion: - format: int64 - type: integer - ipv6AddressMode: - type: string - ipv6RaMode: - type: string - limit: - format: int64 - type: integer - marker: - type: string - name: - type: string - networkId: - type: string - notTags: - type: string - notTagsAny: - type: string - projectId: - type: string - sortDir: - type: string - sortKey: - type: string - subnetpoolId: - type: string - tags: - type: string - tagsAny: - type: string - tenantId: - type: string - type: object - uuid: - description: The UUID of the network. Required if you omit the - port attribute. - type: string - type: object - required: - - subnet - type: object - type: array - frontProxyCAKeyPair: - description: FrontProxyCAKeyPair is the key pair for FrontProxyKeyPair. - properties: - cert: - description: base64 encoded cert and key - format: byte - type: string - key: - format: byte - type: string - type: object - kind: - description: 'Kind is a string value representing the REST resource this - object represents. Servers may infer this from the endpoint the client - submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds' - type: string - managedAPIServerLoadBalancer: - description: 'ManagedAPIServerLoadBalancer defines whether a LoadBalancer - for the APIServer should be created. If set to true the following properties - are mandatory: APIServerLoadBalancerFloatingIP, APIServerLoadBalancerPort' - type: boolean - managedSecurityGroups: - description: ManagedSecurityGroups defines that kubernetes manages the OpenStack - security groups for now, that means that we'll create two security groups, - one allowing SSH and API access from everywhere, and another one that - allows all traffic to/from machines belonging to that group. In the future, - we could make this more flexible. - type: boolean - metadata: - type: object - nodeCidr: - description: NodeCIDR is the OpenStack Subnet to be created. Cluster actuator - will create a network, a subnet with NodeCIDR, and a router connected - to this subnet. If you leave this empty, no network will be created. - type: string - saKeyPair: - description: SAKeyPair is the service account key pair. - properties: - cert: - description: base64 encoded cert and key - format: byte - type: string - key: - format: byte - type: string - type: object - tags: - description: Tags for all resources in cluster - items: - type: string - type: array - useOctavia: - description: UseOctavia is weather LoadBalancer Service is Octavia or not - type: boolean - required: - - cloudsSecret - - cloudName - - useOctavia - - managedAPIServerLoadBalancer - - managedSecurityGroups - version: v1alpha1 -status: - acceptedNames: - kind: "" - plural: "" - conditions: [] - storedVersions: [] diff --git a/config/crds/openstackproviderconfig_v1alpha1_openstackclusterproviderstatus.yaml b/config/crds/openstackproviderconfig_v1alpha1_openstackclusterproviderstatus.yaml deleted file mode 100644 index 3691fd533e..0000000000 --- a/config/crds/openstackproviderconfig_v1alpha1_openstackclusterproviderstatus.yaml +++ /dev/null @@ -1,187 +0,0 @@ -apiVersion: apiextensions.k8s.io/v1beta1 -kind: CustomResourceDefinition -metadata: - creationTimestamp: null - labels: - controller-tools.k8s.io: "1.0" - name: openstackclusterproviderstatuses.openstackproviderconfig.k8s.io -spec: - group: openstackproviderconfig.k8s.io - names: - kind: OpenstackClusterProviderStatus - plural: openstackclusterproviderstatuses - scope: Namespaced - validation: - openAPIV3Schema: - properties: - apiVersion: - description: 'APIVersion defines the versioned schema of this representation - of an object. Servers should convert recognized schemas to the latest - internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#resources' - type: string - controlPlaneSecurityGroup: - description: 'ControlPlaneSecurityGroups contains all the information about - the OpenStack Security Group that needs to be applied to control plane - nodes. TODO: Maybe instead of two properties, we add a property to the - group?' - properties: - id: - type: string - name: - type: string - rules: - items: - properties: - direction: - type: string - etherType: - type: string - name: - type: string - portRangeMax: - format: int64 - type: integer - portRangeMin: - format: int64 - type: integer - protocol: - type: string - remoteGroupID: - type: string - remoteIPPrefix: - type: string - securityGroupID: - type: string - required: - - name - - direction - - etherType - - securityGroupID - - portRangeMin - - portRangeMax - - protocol - - remoteGroupID - - remoteIPPrefix - type: object - type: array - required: - - name - - id - - rules - type: object - globalSecurityGroup: - description: GlobalSecurityGroup contains all the information about the - OpenStack Security Group that needs to be applied to all nodes, both control - plane and worker nodes. - properties: - id: - type: string - name: - type: string - rules: - items: - properties: - direction: - type: string - etherType: - type: string - name: - type: string - portRangeMax: - format: int64 - type: integer - portRangeMin: - format: int64 - type: integer - protocol: - type: string - remoteGroupID: - type: string - remoteIPPrefix: - type: string - securityGroupID: - type: string - required: - - name - - direction - - etherType - - securityGroupID - - portRangeMin - - portRangeMax - - protocol - - remoteGroupID - - remoteIPPrefix - type: object - type: array - required: - - name - - id - - rules - type: object - kind: - description: 'Kind is a string value representing the REST resource this - object represents. Servers may infer this from the endpoint the client - submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds' - type: string - metadata: - type: object - network: - description: Network contains all information about the created OpenStack - Network. It includes Subnets and Router. - properties: - apiServerLoadBalancer: - description: Be careful when using APIServerLoadBalancer, because this - field is optional and therefore not set in all cases - properties: - id: - type: string - internalIP: - type: string - ip: - type: string - name: - type: string - required: - - name - - id - - ip - - internalIP - type: object - id: - type: string - name: - type: string - router: - properties: - id: - type: string - name: - type: string - required: - - name - - id - type: object - subnet: - properties: - cidr: - type: string - id: - type: string - name: - type: string - required: - - name - - id - - cidr - type: object - required: - - name - - id - type: object - version: v1alpha1 -status: - acceptedNames: - kind: "" - plural: "" - conditions: [] - storedVersions: [] diff --git a/config/crds/openstackproviderconfig_v1alpha1_openstackmachineproviderstatus.yaml b/config/crds/openstackproviderconfig_v1alpha1_openstackmachineproviderstatus.yaml deleted file mode 100644 index fdf91a4ab3..0000000000 --- a/config/crds/openstackproviderconfig_v1alpha1_openstackmachineproviderstatus.yaml +++ /dev/null @@ -1,35 +0,0 @@ -apiVersion: apiextensions.k8s.io/v1beta1 -kind: CustomResourceDefinition -metadata: - creationTimestamp: null - labels: - controller-tools.k8s.io: "1.0" - name: openstackmachineproviderstatuses.openstackproviderconfig.k8s.io -spec: - group: openstackproviderconfig.k8s.io - names: - kind: OpenstackMachineProviderStatus - plural: openstackmachineproviderstatuses - scope: Namespaced - validation: - openAPIV3Schema: - properties: - apiVersion: - description: 'APIVersion defines the versioned schema of this representation - of an object. Servers should convert recognized schemas to the latest - internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#resources' - type: string - kind: - description: 'Kind is a string value representing the REST resource this - object represents. Servers may infer this from the endpoint the client - submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds' - type: string - metadata: - type: object - version: v1alpha1 -status: - acceptedNames: - kind: "" - plural: "" - conditions: [] - storedVersions: [] diff --git a/config/crds/openstackproviderconfig_v1alpha1_openstackproviderspec.yaml b/config/crds/openstackproviderconfig_v1alpha1_openstackproviderspec.yaml deleted file mode 100644 index 02ccbfc3f6..0000000000 --- a/config/crds/openstackproviderconfig_v1alpha1_openstackproviderspec.yaml +++ /dev/null @@ -1,529 +0,0 @@ -apiVersion: apiextensions.k8s.io/v1beta1 -kind: CustomResourceDefinition -metadata: - creationTimestamp: null - labels: - controller-tools.k8s.io: "1.0" - name: openstackproviderspecs.openstackproviderconfig.k8s.io -spec: - group: openstackproviderconfig.k8s.io - names: - kind: OpenstackProviderSpec - plural: openstackproviderspecs - scope: Namespaced - validation: - openAPIV3Schema: - properties: - apiVersion: - description: 'APIVersion defines the versioned schema of this representation - of an object. Servers should convert recognized schemas to the latest - internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#resources' - type: string - availabilityZone: - description: The availability zone from which to launch the server. - type: string - cloudName: - description: The name of the cloud to use from the clouds secret - type: string - cloudsSecret: - description: The name of the secret containing the openstack credentials - type: object - configDrive: - description: Config Drive support - type: boolean - flavor: - description: The flavor reference for the flavor for your server instance. - type: string - floatingIP: - description: The floatingIP which will be associated to the machine, only - used for master. The floatingIP should have been created and haven't been - associated. - type: string - image: - description: The name of the image to use for your server instance. If the - RootVolume is specified, this will be ignored and use rootVolume directly. - type: string - keyName: - description: The ssh key to inject in the instance - type: string - kind: - description: 'Kind is a string value representing the REST resource this - object represents. Servers may infer this from the endpoint the client - submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds' - type: string - kubeadmConfiguration: - description: KubeadmConfiguration holds the kubeadm configuration options - properties: - init: - description: InitConfiguration is used to customize any kubeadm init - configuration parameters. - properties: - apiVersion: - description: 'APIVersion defines the versioned schema of this representation - of an object. Servers should convert recognized schemas to the - latest internal value, and may reject unrecognized values. More - info: https://git.k8s.io/community/contributors/devel/api-conventions.md#resources' - type: string - bootstrapTokens: - description: BootstrapTokens is respected at `kubeadm init` time - and describes a set of Bootstrap Tokens to create. This information - IS NOT uploaded to the kubeadm cluster configmap, partly because - of its sensitive nature - items: - properties: - description: - description: Description sets a human-friendly message why - this token exists and what it's used for, so other administrators - can know its purpose. - type: string - expires: - description: Expires specifies the timestamp when this token - expires. Defaults to being set dynamically at runtime based - on the TTL. Expires and TTL are mutually exclusive. - format: date-time - type: string - groups: - description: Groups specifies the extra groups that this token - will authenticate as when/if used for authentication - items: - type: string - type: array - token: - description: Token is used for establishing bidirectional - trust between nodes and masters. Used for joining nodes - in the cluster. - type: object - ttl: - description: TTL defines the time to live for this token. - Defaults to 24h. Expires and TTL are mutually exclusive. - type: string - usages: - description: Usages describes the ways in which this token - can be used. Can by default be used for establishing bidirectional - trust, but that can be changed here. - items: - type: string - type: array - required: - - token - type: object - type: array - kind: - description: 'Kind is a string value representing the REST resource - this object represents. Servers may infer this from the endpoint - the client submits requests to. Cannot be updated. In CamelCase. - More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds' - type: string - localAPIEndpoint: - description: LocalAPIEndpoint represents the endpoint of the API - server instance that's deployed on this control plane node In - HA setups, this differs from ClusterConfiguration.ControlPlaneEndpoint - in the sense that ControlPlaneEndpoint is the global endpoint - for the cluster, which then loadbalances the requests to each - individual API server. This configuration object lets you customize - what IP/DNS name and port the local API server advertises it's - accessible on. By default, kubeadm tries to auto-detect the IP - of the default interface and use that, but in case that process - fails you may set the desired value here. - properties: - advertiseAddress: - description: AdvertiseAddress sets the IP address for the API - server to advertise. - type: string - bindPort: - description: BindPort sets the secure port for the API Server - to bind to. Defaults to 6443. - format: int32 - type: integer - required: - - advertiseAddress - - bindPort - type: object - nodeRegistration: - description: NodeRegistration holds fields that relate to registering - the new master node to the cluster - properties: - criSocket: - description: CRISocket is used to retrieve container runtime - info. This information will be annotated to the Node API object, - for later re-use - type: string - kubeletExtraArgs: - description: KubeletExtraArgs passes through extra arguments - to the kubelet. The arguments here are passed to the kubelet - command line via the environment file kubeadm writes at runtime - for the kubelet to source. This overrides the generic base-level - configuration in the kubelet-config-1.X ConfigMap Flags have - higher priority when parsing. These values are local and specific - to the node kubeadm is executing on. - type: object - name: - description: Name is the `.Metadata.Name` field of the Node - API object that will be created in this `kubeadm init` or - `kubeadm joiń` operation. This field is also used in the CommonName - field of the kubelet's client certificate to the API server. - Defaults to the hostname of the node if not provided. - type: string - taints: - description: 'Taints specifies the taints the Node API object - should be registered with. If this field is unset, i.e. nil, - in the `kubeadm init` process it will be defaulted to []v1.Taint{''node-role.kubernetes.io/master=""''}. - If you don''t want to taint your master node, set this field - to an empty slice, i.e. `taints: {}` in the YAML file. This - field is solely used for Node registration.' - items: - type: object - type: array - type: object - type: object - join: - description: JoinConfiguration is used to customize any kubeadm join - configuration parameters. - properties: - apiVersion: - description: 'APIVersion defines the versioned schema of this representation - of an object. Servers should convert recognized schemas to the - latest internal value, and may reject unrecognized values. More - info: https://git.k8s.io/community/contributors/devel/api-conventions.md#resources' - type: string - caCertPath: - description: CACertPath is the path to the SSL certificate authority - used to secure comunications between node and master. Defaults - to "/etc/kubernetes/pki/ca.crt". - type: string - controlPlane: - description: ControlPlane defines the additional control plane instance - to be deployed on the joining node. If nil, no additional control - plane instance will be deployed. - properties: - localAPIEndpoint: - description: LocalAPIEndpoint represents the endpoint of the - API server instance to be deployed on this node. - properties: - advertiseAddress: - description: AdvertiseAddress sets the IP address for the - API server to advertise. - type: string - bindPort: - description: BindPort sets the secure port for the API Server - to bind to. Defaults to 6443. - format: int32 - type: integer - required: - - advertiseAddress - - bindPort - type: object - type: object - discovery: - description: Discovery specifies the options for the kubelet to - use during the TLS Bootstrap process - properties: - bootstrapToken: - description: BootstrapToken is used to set the options for bootstrap - token based discovery BootstrapToken and File are mutually - exclusive - properties: - apiServerEndpoint: - description: APIServerEndpoint is an IP or domain name to - the API server from which info will be fetched. - type: string - caCertHashes: - description: 'CACertHashes specifies a set of public key - pins to verify when token-based discovery is used. The - root CA found during discovery must match one of these - values. Specifying an empty set disables root CA pinning, - which can be unsafe. Each hash is specified as ":", - where the only currently supported type is "sha256". This - is a hex-encoded SHA-256 hash of the Subject Public Key - Info (SPKI) object in DER-encoded ASN.1. These hashes - can be calculated using, for example, OpenSSL: openssl - x509 -pubkey -in ca.crt openssl rsa -pubin -outform der - 2>&/dev/null | openssl dgst -sha256 -hex' - items: - type: string - type: array - token: - description: Token is a token used to validate cluster information - fetched from the master. - type: string - unsafeSkipCAVerification: - description: UnsafeSkipCAVerification allows token-based - discovery without CA verification via CACertHashes. This - can weaken the security of kubeadm since other nodes can - impersonate the master. - type: boolean - required: - - token - - unsafeSkipCAVerification - type: object - file: - description: File is used to specify a file or URL to a kubeconfig - file from which to load cluster information BootstrapToken - and File are mutually exclusive - properties: - kubeConfigPath: - description: KubeConfigPath is used to specify the actual - file path or URL to the kubeconfig file from which to - load cluster information - type: string - required: - - kubeConfigPath - type: object - timeout: - description: Timeout modifies the discovery timeout - type: string - tlsBootstrapToken: - description: TLSBootstrapToken is a token used for TLS bootstrapping. - If .BootstrapToken is set, this field is defaulted to .BootstrapToken.Token, - but can be overridden. If .File is set, this field **must - be set** in case the KubeConfigFile does not contain any other - authentication information - type: string - required: - - tlsBootstrapToken - type: object - kind: - description: 'Kind is a string value representing the REST resource - this object represents. Servers may infer this from the endpoint - the client submits requests to. Cannot be updated. In CamelCase. - More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds' - type: string - nodeRegistration: - description: NodeRegistration holds fields that relate to registering - the new master node to the cluster - properties: - criSocket: - description: CRISocket is used to retrieve container runtime - info. This information will be annotated to the Node API object, - for later re-use - type: string - kubeletExtraArgs: - description: KubeletExtraArgs passes through extra arguments - to the kubelet. The arguments here are passed to the kubelet - command line via the environment file kubeadm writes at runtime - for the kubelet to source. This overrides the generic base-level - configuration in the kubelet-config-1.X ConfigMap Flags have - higher priority when parsing. These values are local and specific - to the node kubeadm is executing on. - type: object - name: - description: Name is the `.Metadata.Name` field of the Node - API object that will be created in this `kubeadm init` or - `kubeadm joiń` operation. This field is also used in the CommonName - field of the kubelet's client certificate to the API server. - Defaults to the hostname of the node if not provided. - type: string - taints: - description: 'Taints specifies the taints the Node API object - should be registered with. If this field is unset, i.e. nil, - in the `kubeadm init` process it will be defaulted to []v1.Taint{''node-role.kubernetes.io/master=""''}. - If you don''t want to taint your master node, set this field - to an empty slice, i.e. `taints: {}` in the YAML file. This - field is solely used for Node registration.' - items: - type: object - type: array - type: object - required: - - nodeRegistration - - caCertPath - - discovery - type: object - type: object - metadata: - type: object - networks: - description: A networks object. Required parameter when there are multiple - networks defined for the tenant. When you do not specify the networks - parameter, the server attaches to the only network created for the current - tenant. - items: - properties: - filter: - description: Filters for optional network query - properties: - adminStateUp: - type: boolean - description: - type: string - id: - type: string - limit: - format: int64 - type: integer - marker: - type: string - name: - type: string - notTags: - type: string - notTagsAny: - type: string - projectId: - type: string - shared: - type: boolean - sortDir: - type: string - sortKey: - type: string - status: - type: string - tags: - type: string - tagsAny: - type: string - tenantId: - type: string - type: object - fixedIp: - description: A fixed IPv4 address for the NIC. - type: string - subnets: - description: Subnet within a network to use - items: - properties: - filter: - description: Filters for optional network query - properties: - cidr: - type: string - description: - type: string - enableDhcp: - type: boolean - id: - type: string - ipVersion: - format: int64 - type: integer - ipv6AddressMode: - type: string - ipv6RaMode: - type: string - limit: - format: int64 - type: integer - marker: - type: string - name: - type: string - networkId: - type: string - notTags: - type: string - notTagsAny: - type: string - projectId: - type: string - sortDir: - type: string - sortKey: - type: string - subnetpoolId: - type: string - tags: - type: string - tagsAny: - type: string - tenantId: - type: string - type: object - uuid: - description: The UUID of the network. Required if you omit the - port attribute. - type: string - type: object - type: array - uuid: - description: The UUID of the network. Required if you omit the port - attribute. - type: string - type: object - type: array - rootVolume: - description: The volume metadata to boot from - properties: - deviceType: - type: string - diskSize: - format: int64 - type: integer - sourceType: - type: string - sourceUUID: - type: string - required: - - deviceType - type: object - securityGroups: - description: The names of the security groups to assign to the instance - items: - properties: - filter: - description: Filters used to query security groups in openstack - properties: - description: - type: string - id: - type: string - limit: - format: int64 - type: integer - marker: - type: string - name: - type: string - notTags: - type: string - notTagsAny: - type: string - projectId: - type: string - sortDir: - type: string - sortKey: - type: string - tags: - type: string - tagsAny: - type: string - tenantId: - type: string - type: object - name: - description: Security Group name - type: string - uuid: - description: Security Group UID - type: string - type: object - type: array - serverMetadata: - description: Metadata mapping. Allows you to create a map of key value pairs - to add to the server instance. - type: object - tags: - description: Machine tags Requires Nova api 2.52 minimum! - items: - type: string - type: array - trunk: - description: Whether the server instance is created on a trunk port or not. - type: boolean - userDataSecret: - description: The name of the secret containing the user data (startup script - in most cases) - type: object - required: - - cloudsSecret - - cloudName - - flavor - - image - version: v1alpha1 -status: - acceptedNames: - kind: "" - plural: "" - conditions: [] - storedVersions: [] diff --git a/config/default/kustomization.yaml b/config/default/kustomization.yaml new file mode 100644 index 0000000000..1ce04d5209 --- /dev/null +++ b/config/default/kustomization.yaml @@ -0,0 +1,73 @@ +# Adds namespace to all resources. +namespace: capo-system + +# Value of this field is prepended to the +# names of all resources, e.g. a deployment named +# "wordpress" becomes "alices-wordpress". +# Note that it should also match with the prefix (text before '-') of the namespace +# field above. +namePrefix: capo- + +# Labels to add to all resources and selectors. +#commonLabels: +# someName: someValue + +bases: +- ../crd +- ../rbac +- ../manager +# [WEBHOOK] To enable webhook, uncomment all the sections with [WEBHOOK] prefix including the one in crd/kustomization.yaml +#- ../webhook +# [CERTMANAGER] To enable cert-manager, uncomment all sections with 'CERTMANAGER'. 'WEBHOOK' components are required. +#- ../certmanager + +patchesStrategicMerge: +- manager_image_patch.yaml + # Protect the /metrics endpoint by putting it behind auth. + # Only one of manager_auth_proxy_patch.yaml and + # manager_prometheus_metrics_patch.yaml should be enabled. +- manager_auth_proxy_patch.yaml + # If you want your controller-manager to expose the /metrics + # endpoint w/o any authn/z, uncomment the following line and + # comment manager_auth_proxy_patch.yaml. + # Only one of manager_auth_proxy_patch.yaml and + # manager_prometheus_metrics_patch.yaml should be enabled. +#- manager_prometheus_metrics_patch.yaml + +# [WEBHOOK] To enable webhook, uncomment all the sections with [WEBHOOK] prefix including the one in crd/kustomization.yaml +#- manager_webhook_patch.yaml + +# [CERTMANAGER] To enable cert-manager, uncomment all sections with 'CERTMANAGER'. +# Uncomment 'CERTMANAGER' sections in crd/kustomization.yaml to enable the CA injection in the admission webhooks. +# 'CERTMANAGER' needs to be enabled to use ca injection +#- webhookcainjection_patch.yaml + +# the following config is for teaching kustomize how to do var substitution +vars: +# [CERTMANAGER] To enable cert-manager, uncomment all sections with 'CERTMANAGER' prefix. +#- name: CERTIFICATE_NAMESPACE # namespace of the certificate CR +# objref: +# kind: Certificate +# group: certmanager.k8s.io +# version: v1alpha1 +# name: serving-cert # this name should match the one in certificate.yaml +# fieldref: +# fieldpath: metadata.namespace +#- name: CERTIFICATE_NAME +# objref: +# kind: Certificate +# group: certmanager.k8s.io +# version: v1alpha1 +# name: serving-cert # this name should match the one in certificate.yaml +#- name: SERVICE_NAMESPACE # namespace of the service +# objref: +# kind: Service +# version: v1 +# name: webhook-service +# fieldref: +# fieldpath: metadata.namespace +#- name: SERVICE_NAME +# objref: +# kind: Service +# version: v1 +# name: webhook-service diff --git a/config/default/manager_auth_proxy_patch.yaml b/config/default/manager_auth_proxy_patch.yaml new file mode 100644 index 0000000000..989d698878 --- /dev/null +++ b/config/default/manager_auth_proxy_patch.yaml @@ -0,0 +1,25 @@ +# This patch inject a sidecar container which is a HTTP proxy for the controller manager, +# it performs RBAC authorization against the Kubernetes API using SubjectAccessReviews. +apiVersion: apps/v1 +kind: Deployment +metadata: + name: controller-manager + namespace: system +spec: + template: + spec: + containers: + - name: kube-rbac-proxy + image: gcr.io/kubebuilder/kube-rbac-proxy:v0.4.0 + args: + - "--secure-listen-address=0.0.0.0:8443" + - "--upstream=http://127.0.0.1:8080/" + - "--logtostderr=true" + - "--v=10" + ports: + - containerPort: 8443 + name: https + - name: manager + args: + - "--metrics-addr=127.0.0.1:8080" + - "--enable-leader-election" diff --git a/config/default/manager_image_patch.yaml b/config/default/manager_image_patch.yaml new file mode 100644 index 0000000000..9796f74b1e --- /dev/null +++ b/config/default/manager_image_patch.yaml @@ -0,0 +1,12 @@ +apiVersion: apps/v1 +kind: Deployment +metadata: + name: controller-manager + namespace: system +spec: + template: + spec: + containers: + # Change the value of image field below to your controller image URL + - image: docker.io/sbueringer/cluster-api-provider-openstack:dev + name: manager diff --git a/config/default/manager_prometheus_metrics_patch.yaml b/config/default/manager_prometheus_metrics_patch.yaml new file mode 100644 index 0000000000..0b96c6813e --- /dev/null +++ b/config/default/manager_prometheus_metrics_patch.yaml @@ -0,0 +1,19 @@ +# This patch enables Prometheus scraping for the manager pod. +apiVersion: apps/v1 +kind: Deployment +metadata: + name: controller-manager + namespace: system +spec: + template: + metadata: + annotations: + prometheus.io/scrape: 'true' + spec: + containers: + # Expose the prometheus metrics on default port + - name: manager + ports: + - containerPort: 8080 + name: metrics + protocol: TCP diff --git a/config/default/manager_webhook_patch.yaml b/config/default/manager_webhook_patch.yaml new file mode 100644 index 0000000000..f2f7157b46 --- /dev/null +++ b/config/default/manager_webhook_patch.yaml @@ -0,0 +1,23 @@ +apiVersion: apps/v1 +kind: Deployment +metadata: + name: controller-manager + namespace: system +spec: + template: + spec: + containers: + - name: manager + ports: + - containerPort: 443 + name: webhook-server + protocol: TCP + volumeMounts: + - mountPath: /tmp/k8s-webhook-server/serving-certs + name: cert + readOnly: true + volumes: + - name: cert + secret: + defaultMode: 420 + secretName: webhook-server-cert diff --git a/config/default/webhookcainjection_patch.yaml b/config/default/webhookcainjection_patch.yaml new file mode 100644 index 0000000000..5e3e077df9 --- /dev/null +++ b/config/default/webhookcainjection_patch.yaml @@ -0,0 +1,15 @@ +# This patch add annotation to admission webhook config and +# the variables $(CERTIFICATE_NAMESPACE) and $(CERTIFICATE_NAME) will be substituted by kustomize. +apiVersion: admissionregistration.k8s.io/v1beta1 +kind: MutatingWebhookConfiguration +metadata: + name: mutating-webhook-configuration + annotations: + certmanager.k8s.io/inject-ca-from: $(CERTIFICATE_NAMESPACE)/$(CERTIFICATE_NAME) +--- +apiVersion: admissionregistration.k8s.io/v1beta1 +kind: ValidatingWebhookConfiguration +metadata: + name: validating-webhook-configuration + annotations: + certmanager.k8s.io/inject-ca-from: $(CERTIFICATE_NAMESPACE)/$(CERTIFICATE_NAME) diff --git a/config/kustomization.yaml b/config/kustomization.yaml deleted file mode 100644 index 8a2152b579..0000000000 --- a/config/kustomization.yaml +++ /dev/null @@ -1,17 +0,0 @@ -apiVersion: kustomize.config.k8s.io/v1beta1 -kind: Kustomization -generatorOptions: - disableNameSuffixHash: true - -resources: -- rbac/secrets_role_binding.yaml -- rbac/rbac_role_binding.yaml -- rbac/secrets_role.yaml -- rbac/rbac_role.yaml -- manager/namespace.yaml -- manager/service.yaml -- manager/statefulset.yaml -- crds/openstackproviderconfig_v1alpha1_openstackclusterproviderstatus.yaml -- crds/openstackproviderconfig_v1alpha1_openstackclusterproviderspec.yaml -- crds/openstackproviderconfig_v1alpha1_openstackproviderspec.yaml -- crds/openstackproviderconfig_v1alpha1_openstackmachineproviderstatus.yaml diff --git a/config/manager/kustomization.yaml b/config/manager/kustomization.yaml new file mode 100644 index 0000000000..5c5f0b84cb --- /dev/null +++ b/config/manager/kustomization.yaml @@ -0,0 +1,2 @@ +resources: +- manager.yaml diff --git a/config/manager/manager.yaml b/config/manager/manager.yaml new file mode 100644 index 0000000000..a750a4407d --- /dev/null +++ b/config/manager/manager.yaml @@ -0,0 +1,42 @@ +apiVersion: v1 +kind: Namespace +metadata: + labels: + control-plane: capo-controller-manager + name: system +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + name: controller-manager + namespace: system + labels: + control-plane: capo-controller-manager +spec: + selector: + matchLabels: + control-plane: capo-controller-manager + replicas: 1 + template: + metadata: + labels: + control-plane: capo-controller-manager + spec: + containers: + - name: manager + image: controller:latest + imagePullPolicy: Always + volumeMounts: + - name: cloud-config + mountPath: /etc/openstack + env: + - name: OS_CLOUD + valueFrom: + secretKeyRef: + name: cloud-selector + key: OS_CLOUD + volumes: + - name: cloud-config + secret: + secretName: cloud-config + terminationGracePeriodSeconds: 10 diff --git a/config/manager/service.yaml b/config/manager/service.yaml deleted file mode 100644 index 2487477682..0000000000 --- a/config/manager/service.yaml +++ /dev/null @@ -1,14 +0,0 @@ -apiVersion: v1 -kind: Service -metadata: - labels: - control-plane: controller-manager - controller-tools.k8s.io: "1.0" - name: openstack-provider-controller-manager-service - namespace: openstack-provider-system -spec: - ports: - - port: 443 - selector: - control-plane: controller-manager - controller-tools.k8s.io: "1.0" diff --git a/config/manager/statefulset.yaml b/config/manager/statefulset.yaml deleted file mode 100644 index 1ebbf9705f..0000000000 --- a/config/manager/statefulset.yaml +++ /dev/null @@ -1,67 +0,0 @@ -apiVersion: apps/v1 -kind: StatefulSet -metadata: - name: clusterapi-controllers - namespace: openstack-provider-system - labels: - control-plane: controller-manager - controller-tools.k8s.io: "1.0" -spec: - replicas: 1 - selector: - matchLabels: - control-plane: controller-manager - controller-tools.k8s.io: "1.0" - serviceName: openstack-provider-controller-manager-service - template: - metadata: - labels: - control-plane: controller-manager - controller-tools.k8s.io: "1.0" - spec: - tolerations: - - effect: NoSchedule - key: node-role.kubernetes.io/master - - key: CriticalAddonsOnly - operator: Exists - - effect: NoExecute - key: node.kubernetes.io/not-ready - operator: Exists - - effect: NoExecute - key: node.kubernetes.io/unreachable - operator: Exists - containers: - - name: openstack-machine-controller - image: k8scloudprovider/openstack-cluster-api-controller:latest - volumeMounts: - - name: config - mountPath: /etc/kubernetes - - name: cloud-config - mountPath: /etc/openstack - - name: kubeadm - mountPath: /usr/bin/kubeadm - resources: - requests: - cpu: 100m - memory: 20Mi - limits: - cpu: 100m - memory: 30Mi - env: - - name: USER - value: root - - name: OS_CLOUD - valueFrom: - secretKeyRef: - name: cloud-selector - key: OS_CLOUD - volumes: - - name: config - hostPath: - path: /etc/kubernetes - - name: cloud-config - secret: - secretName: cloud-config - - name: kubeadm - hostPath: - path: /usr/bin/kubeadm diff --git a/config/rbac/auth_proxy_role.yaml b/config/rbac/auth_proxy_role.yaml new file mode 100644 index 0000000000..618f5e4177 --- /dev/null +++ b/config/rbac/auth_proxy_role.yaml @@ -0,0 +1,13 @@ +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + name: proxy-role +rules: +- apiGroups: ["authentication.k8s.io"] + resources: + - tokenreviews + verbs: ["create"] +- apiGroups: ["authorization.k8s.io"] + resources: + - subjectaccessreviews + verbs: ["create"] diff --git a/config/rbac/rbac_role_binding.yaml b/config/rbac/auth_proxy_role_binding.yaml similarity index 55% rename from config/rbac/rbac_role_binding.yaml rename to config/rbac/auth_proxy_role_binding.yaml index 4a35ee480e..48ed1e4b85 100644 --- a/config/rbac/rbac_role_binding.yaml +++ b/config/rbac/auth_proxy_role_binding.yaml @@ -1,13 +1,12 @@ apiVersion: rbac.authorization.k8s.io/v1 kind: ClusterRoleBinding metadata: - creationTimestamp: null - name: openstack-provider-manager-rolebinding + name: proxy-rolebinding roleRef: apiGroup: rbac.authorization.k8s.io kind: ClusterRole - name: openstack-provider-manager-role + name: proxy-role subjects: - kind: ServiceAccount name: default - namespace: openstack-provider-system + namespace: system diff --git a/config/rbac/auth_proxy_service.yaml b/config/rbac/auth_proxy_service.yaml new file mode 100644 index 0000000000..d61e5469fb --- /dev/null +++ b/config/rbac/auth_proxy_service.yaml @@ -0,0 +1,18 @@ +apiVersion: v1 +kind: Service +metadata: + annotations: + prometheus.io/port: "8443" + prometheus.io/scheme: https + prometheus.io/scrape: "true" + labels: + control-plane: controller-manager + name: controller-manager-metrics-service + namespace: system +spec: + ports: + - name: https + port: 8443 + targetPort: https + selector: + control-plane: controller-manager diff --git a/config/rbac/kustomization.yaml b/config/rbac/kustomization.yaml new file mode 100644 index 0000000000..817f1fe613 --- /dev/null +++ b/config/rbac/kustomization.yaml @@ -0,0 +1,11 @@ +resources: +- role.yaml +- role_binding.yaml +- leader_election_role.yaml +- leader_election_role_binding.yaml +# Comment the following 3 lines if you want to disable +# the auth proxy (https://github.com/brancz/kube-rbac-proxy) +# which protects your /metrics endpoint. +- auth_proxy_service.yaml +- auth_proxy_role.yaml +- auth_proxy_role_binding.yaml diff --git a/config/rbac/leader_election_role.yaml b/config/rbac/leader_election_role.yaml new file mode 100644 index 0000000000..eaa79158fb --- /dev/null +++ b/config/rbac/leader_election_role.yaml @@ -0,0 +1,32 @@ +# permissions to do leader election. +apiVersion: rbac.authorization.k8s.io/v1 +kind: Role +metadata: + name: leader-election-role +rules: +- apiGroups: + - "" + resources: + - configmaps + verbs: + - get + - list + - watch + - create + - update + - patch + - delete +- apiGroups: + - "" + resources: + - configmaps/status + verbs: + - get + - update + - patch +- apiGroups: + - "" + resources: + - events + verbs: + - create diff --git a/config/rbac/secrets_role_binding.yaml b/config/rbac/leader_election_role_binding.yaml similarity index 52% rename from config/rbac/secrets_role_binding.yaml rename to config/rbac/leader_election_role_binding.yaml index b97acef830..eed16906f4 100644 --- a/config/rbac/secrets_role_binding.yaml +++ b/config/rbac/leader_election_role_binding.yaml @@ -1,13 +1,12 @@ apiVersion: rbac.authorization.k8s.io/v1 kind: RoleBinding metadata: - name: openstack-provider-manager-secrets-binding - namespace: kube-system + name: leader-election-rolebinding roleRef: apiGroup: rbac.authorization.k8s.io kind: Role - name: openstack-provider-manager-secrets + name: leader-election-role subjects: - kind: ServiceAccount name: default - namespace: openstack-provider-system + namespace: system diff --git a/vendor/sigs.k8s.io/cluster-api/config/rbac/manager_role.yaml b/config/rbac/role.yaml similarity index 54% rename from vendor/sigs.k8s.io/cluster-api/config/rbac/manager_role.yaml rename to config/rbac/role.yaml index 317c6f54dc..e68b0a20e3 100644 --- a/vendor/sigs.k8s.io/cluster-api/config/rbac/manager_role.yaml +++ b/config/rbac/role.yaml @@ -1,3 +1,5 @@ + +--- apiVersion: rbac.authorization.k8s.io/v1 kind: ClusterRole metadata: @@ -5,107 +7,70 @@ metadata: name: manager-role rules: - apiGroups: - - cluster.k8s.io + - "" resources: - - clusters + - secrets verbs: + - create - get - list - - watch - - create - - update - patch - - delete + - update + - watch - apiGroups: - - "" + - cluster.x-k8s.io resources: - - events + - clusters verbs: - get - list - watch - - create - - patch - apiGroups: - - cluster.k8s.io + - cluster.x-k8s.io resources: + - clusters - machines - - machines/status verbs: - get - list - watch - - create - - update - - patch - - delete - apiGroups: - - cluster.k8s.io + - infrastructure.cluster.x-k8s.io resources: - - machinedeployments - - machinedeployments/status + - openstackclusters verbs: - - get - - list - - watch - create - - update - - patch - delete -- apiGroups: - - cluster.k8s.io - resources: - - machinesets - - machinesets/status - verbs: - get - list - - watch - - create - - update - patch - - delete + - update + - watch - apiGroups: - - cluster.k8s.io + - infrastructure.cluster.x-k8s.io resources: - - machines + - openstackclusters/status verbs: - get - - list - - watch - - create - - update - patch - - delete + - update - apiGroups: - - "" + - infrastructure.cluster.x-k8s.io resources: - - nodes + - openstackmachines verbs: - - get - - list - - watch - create - - update - - patch - delete -- apiGroups: - - cluster.k8s.io - resources: - - machines - verbs: - get - list - - watch - - create - - update - patch - - delete + - update + - watch - apiGroups: - - "" + - infrastructure.cluster.x-k8s.io resources: - - secrets + - openstackmachines/status verbs: - get - - list - - watch + - patch + - update diff --git a/config/rbac/role_binding.yaml b/config/rbac/role_binding.yaml new file mode 100644 index 0000000000..8f2658702c --- /dev/null +++ b/config/rbac/role_binding.yaml @@ -0,0 +1,12 @@ +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + name: manager-rolebinding +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: manager-role +subjects: +- kind: ServiceAccount + name: default + namespace: system diff --git a/config/rbac/secrets_role.yaml b/config/rbac/secrets_role.yaml deleted file mode 100644 index 55b41db9e2..0000000000 --- a/config/rbac/secrets_role.yaml +++ /dev/null @@ -1,15 +0,0 @@ -apiVersion: rbac.authorization.k8s.io/v1 -kind: Role -metadata: - name: openstack-provider-manager-secrets - namespace: kube-system -rules: -- apiGroups: - - "" - resources: - - secrets - verbs: - - create - - get - - list - - watch diff --git a/config/samples/infrastructure_v1alpha2_openstackcluster.yaml b/config/samples/infrastructure_v1alpha2_openstackcluster.yaml new file mode 100644 index 0000000000..602a7e5739 --- /dev/null +++ b/config/samples/infrastructure_v1alpha2_openstackcluster.yaml @@ -0,0 +1,7 @@ +apiVersion: infrastructure.cluster.x-k8s.io/v1alpha2 +kind: OpenStackCluster +metadata: + name: openstackcluster-sample +spec: + # Add fields here + foo: bar diff --git a/config/samples/infrastructure_v1alpha2_openstackmachine.yaml b/config/samples/infrastructure_v1alpha2_openstackmachine.yaml new file mode 100644 index 0000000000..617f7a40eb --- /dev/null +++ b/config/samples/infrastructure_v1alpha2_openstackmachine.yaml @@ -0,0 +1,7 @@ +apiVersion: infrastructure.cluster.x-k8s.io/v1alpha2 +kind: OpenStackMachine +metadata: + name: openstackmachine-sample +spec: + # Add fields here + foo: bar diff --git a/config/webhook/kustomization.yaml b/config/webhook/kustomization.yaml new file mode 100644 index 0000000000..9cf26134e4 --- /dev/null +++ b/config/webhook/kustomization.yaml @@ -0,0 +1,6 @@ +resources: +- manifests.yaml +- service.yaml + +configurations: +- kustomizeconfig.yaml diff --git a/config/webhook/kustomizeconfig.yaml b/config/webhook/kustomizeconfig.yaml new file mode 100644 index 0000000000..25e21e3c96 --- /dev/null +++ b/config/webhook/kustomizeconfig.yaml @@ -0,0 +1,25 @@ +# the following config is for teaching kustomize where to look at when substituting vars. +# It requires kustomize v2.1.0 or newer to work properly. +nameReference: +- kind: Service + version: v1 + fieldSpecs: + - kind: MutatingWebhookConfiguration + group: admissionregistration.k8s.io + path: webhooks/clientConfig/service/name + - kind: ValidatingWebhookConfiguration + group: admissionregistration.k8s.io + path: webhooks/clientConfig/service/name + +namespace: +- kind: MutatingWebhookConfiguration + group: admissionregistration.k8s.io + path: webhooks/clientConfig/service/namespace + create: true +- kind: ValidatingWebhookConfiguration + group: admissionregistration.k8s.io + path: webhooks/clientConfig/service/namespace + create: true + +varReference: +- path: metadata/annotations diff --git a/config/webhook/manifests.yaml b/config/webhook/manifests.yaml new file mode 100644 index 0000000000..e69de29bb2 diff --git a/config/webhook/service.yaml b/config/webhook/service.yaml new file mode 100644 index 0000000000..b4861025ab --- /dev/null +++ b/config/webhook/service.yaml @@ -0,0 +1,12 @@ + +apiVersion: v1 +kind: Service +metadata: + name: webhook-service + namespace: system +spec: + ports: + - port: 443 + targetPort: 443 + selector: + control-plane: controller-manager diff --git a/controllers/openstackcluster_controller.go b/controllers/openstackcluster_controller.go new file mode 100644 index 0000000000..dce888c7c3 --- /dev/null +++ b/controllers/openstackcluster_controller.go @@ -0,0 +1,276 @@ +/* + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package controllers + +import ( + "context" + "fmt" + "github.com/go-logr/logr" + "github.com/pkg/errors" + v1 "k8s.io/api/core/v1" + apierrors "k8s.io/apimachinery/pkg/api/errors" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/klog" + infrav1 "sigs.k8s.io/cluster-api-provider-openstack/api/v1alpha2" + "sigs.k8s.io/cluster-api-provider-openstack/pkg/cloud/openstack/services/certificates" + "sigs.k8s.io/cluster-api-provider-openstack/pkg/cloud/openstack/services/loadbalancer" + "sigs.k8s.io/cluster-api-provider-openstack/pkg/cloud/openstack/services/networking" + "sigs.k8s.io/cluster-api-provider-openstack/pkg/cloud/openstack/services/provider" + "sigs.k8s.io/cluster-api-provider-openstack/pkg/cloud/openstack/services/userdata" + "sigs.k8s.io/cluster-api/api/v1alpha2" + "sigs.k8s.io/cluster-api/pkg/util" + ctrl "sigs.k8s.io/controller-runtime" + "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/controller-runtime/pkg/log" + "sigs.k8s.io/controller-runtime/pkg/reconcile" + "strconv" + "strings" + "time" +) + +const ( + clusterControllerName = "openstackcluster-controller" +) + +// OpenStackClusterReconciler reconciles a OpenStackCluster object +type OpenStackClusterReconciler struct { + client.Client + Log logr.Logger +} + +// +kubebuilder:rbac:groups=infrastructure.cluster.x-k8s.io,resources=openstackclusters,verbs=get;list;watch;create;update;patch;delete +// +kubebuilder:rbac:groups=infrastructure.cluster.x-k8s.io,resources=openstackclusters/status,verbs=get;update;patch +// +kubebuilder:rbac:groups=cluster.x-k8s.io,resources=clusters,verbs=get;list;watch + +func (r *OpenStackClusterReconciler) Reconcile(request ctrl.Request) (ctrl.Result, error) { + ctx := context.TODO() + logger := log.Log.WithName(clusterControllerName). + WithName(fmt.Sprintf("namespace=%s", request.Namespace)). + WithName(fmt.Sprintf("openStackCluster=%s", request.Name)) + + // Fetch the OpenStackCluster instance + openStackCluster := &infrav1.OpenStackCluster{} + err := r.Get(ctx, request.NamespacedName, openStackCluster) + if err != nil { + if apierrors.IsNotFound(err) { + return reconcile.Result{}, nil + } + return reconcile.Result{}, err + } + + logger = logger.WithName(openStackCluster.APIVersion) + + // Fetch the Cluster. + cluster, err := util.GetOwnerCluster(ctx, r.Client, openStackCluster.ObjectMeta) + if err != nil { + return reconcile.Result{}, err + } + if cluster == nil { + logger.Info("Waiting for Cluster Controller to set OwnerRef on OpenStackCluster") + return reconcile.Result{RequeueAfter: 10 * time.Second}, nil + } + + logger = logger.WithName(fmt.Sprintf("cluster=%s", cluster.Name)) + + // Handle deleted clusters + if !openStackCluster.DeletionTimestamp.IsZero() { + return r.reconcileClusterDelete(logger, cluster, openStackCluster) + } + + // Handle non-deleted clusters + return r.reconcileCluster(logger, cluster, openStackCluster) +} + +func (r *OpenStackClusterReconciler) reconcileCluster(logger logr.Logger, cluster *v1alpha2.Cluster, openStackCluster *infrav1.OpenStackCluster) (_ ctrl.Result, reterr error) { + klog.Infof("Reconciling Cluster %s/%s", cluster.Namespace, cluster.Name) + + // openstackClusterPath is used for patch generation during storeCluster + openstackClusterPatch := client.MergeFrom(openStackCluster.DeepCopy()) + clusterName := fmt.Sprintf("%s-%s", cluster.Namespace, cluster.Name) + + osProviderClient, clientOpts, err := provider.NewClientFromCluster(r.Client, openStackCluster) + if err != nil { + return reconcile.Result{}, err + } + + networkingService, err := networking.NewService(osProviderClient, clientOpts) + if err != nil { + return reconcile.Result{}, err + } + + certificatesService := certificates.NewService() + + loadbalancerService, err := loadbalancer.NewService(osProviderClient, clientOpts, openStackCluster.Spec.UseOctavia) + if err != nil { + return reconcile.Result{}, err + } + + defer func() { + if err := storeCluster(r.Client, openStackCluster, openstackClusterPatch); err != nil && reterr == nil { + reterr = err + } + }() + + klog.Infof("Reconciling certificates for cluster %s", clusterName) + // Store cert material in spec. + if err := certificatesService.ReconcileCertificates(clusterName, openStackCluster); err != nil { + return reconcile.Result{}, errors.Wrapf(err, "failed to reconcile certificates for cluster %q", cluster.Name) + } + + klog.Infof("Reconciling network components for cluster %s", clusterName) + if openStackCluster.Spec.NodeCIDR == "" { + klog.V(4).Infof("No need to reconcile network for cluster %s", clusterName) + } else { + err := networkingService.ReconcileNetwork(clusterName, openStackCluster) + if err != nil { + return reconcile.Result{}, errors.Errorf("failed to reconcile network: %v", err) + } + err = networkingService.ReconcileSubnet(clusterName, openStackCluster) + if err != nil { + return reconcile.Result{}, errors.Errorf("failed to reconcile subnets: %v", err) + } + err = networkingService.ReconcileRouter(clusterName, openStackCluster) + if err != nil { + return reconcile.Result{}, errors.Errorf("failed to reconcile router: %v", err) + } + if openStackCluster.Spec.ManagedAPIServerLoadBalancer { + err = loadbalancerService.ReconcileLoadBalancer(clusterName, openStackCluster) + if err != nil { + return reconcile.Result{}, errors.Errorf("failed to reconcile load balancer: %v", err) + } + } + } + + err = networkingService.ReconcileSecurityGroups(clusterName, openStackCluster) + if err != nil { + return reconcile.Result{}, errors.Errorf("failed to reconcile security groups: %v", err) + } + + // Set APIEndpoints so the Cluster API Cluster Controller can pull them + controlPlaneURI := strings.Split(openStackCluster.Spec.ClusterConfiguration.ControlPlaneEndpoint, ":") + apiServerHost := controlPlaneURI[0] + apiServerPortStr := controlPlaneURI[1] + apiServerPort, err := strconv.Atoi(apiServerPortStr) + if err != nil { + return reconcile.Result{}, errors.Wrapf(err, "could not parse port of controlPlaneEndpoint %s", openStackCluster.Spec.ClusterConfiguration.ControlPlaneEndpoint) + } + openStackCluster.Status.APIEndpoints = []infrav1.APIEndpoint{ + { + Host: apiServerHost, + Port: apiServerPort, + }, + } + + // No errors, so mark us ready so the Cluster API Cluster Controller can pull it + openStackCluster.Status.Ready = true + + // TODO remove after migration to kubeadm bootstrapper + // Upload kubeconfig (just for us) can be deleted after we migrated to kubeadm bootstrapper + // because kubeadm bootstrapper already creates the secret "{cluster.Name}-kubeconfig" + kubeConfig, err := userdata.GetKubeConfig(openStackCluster) + if err != nil { + return reconcile.Result{}, errors.Wrapf(err, "failed to get kubeconfig for cluster %q", cluster.Name) + } + secret := &v1.Secret{ + ObjectMeta: metav1.ObjectMeta{ + Name: fmt.Sprintf("%s-tmp-kubeconfig", cluster.Name), + Namespace: cluster.Namespace, + }, + StringData: map[string]string{ + "value": kubeConfig, + }, + } + err = r.Client.Create(context.TODO(), secret) + if err != nil { + if apierrors.IsAlreadyExists(err) { + if err = r.Client.Update(context.TODO(), secret); err != nil { + return reconcile.Result{}, errors.Wrapf(err, "failed to update kubeconfig secret for cluster %q", cluster.Name) + } + } else { + return reconcile.Result{}, errors.Wrapf(err, "failed to create kubeconfig secret for cluster %q", cluster.Name) + } + } + + klog.Infof("Reconciled Cluster %s/%s successfully", cluster.Namespace, cluster.Name) + return reconcile.Result{}, nil +} + +func (r *OpenStackClusterReconciler) reconcileClusterDelete(logger logr.Logger, cluster *v1alpha2.Cluster, openStackCluster *infrav1.OpenStackCluster) (ctrl.Result, error) { + + klog.Infof("Reconcile Cluster delete %s/%s", cluster.Namespace, cluster.Name) + clusterName := fmt.Sprintf("%s-%s", cluster.Namespace, cluster.Name) + osProviderClient, clientOpts, err := provider.NewClientFromCluster(r.Client, openStackCluster) + if err != nil { + return reconcile.Result{}, err + } + + networkingService, err := networking.NewService(osProviderClient, clientOpts) + if err != nil { + return reconcile.Result{}, err + } + + loadbalancerService, err := loadbalancer.NewService(osProviderClient, clientOpts, openStackCluster.Spec.UseOctavia) + if err != nil { + return reconcile.Result{}, err + } + + if openStackCluster.Spec.ManagedAPIServerLoadBalancer { + err = loadbalancerService.DeleteLoadBalancer(clusterName, openStackCluster) + if err != nil { + return reconcile.Result{}, errors.Errorf("failed to delete load balancer: %v", err) + } + } + + // Delete other things + if openStackCluster.Status.GlobalSecurityGroup != nil { + klog.Infof("Deleting global security group %q", openStackCluster.Status.GlobalSecurityGroup.Name) + err := networkingService.DeleteSecurityGroups(openStackCluster.Status.GlobalSecurityGroup) + if err != nil { + return reconcile.Result{}, errors.Errorf("failed to delete security group: %v", err) + } + } + + if openStackCluster.Status.ControlPlaneSecurityGroup != nil { + klog.Infof("Deleting control plane security group %q", openStackCluster.Status.ControlPlaneSecurityGroup.Name) + err := networkingService.DeleteSecurityGroups(openStackCluster.Status.ControlPlaneSecurityGroup) + if err != nil { + return reconcile.Result{}, errors.Errorf("failed to delete security group: %v", err) + } + } + return reconcile.Result{}, nil +} + +func storeCluster(ctrlClient client.Client, openStackCluster *infrav1.OpenStackCluster, openStackClusterPatch client.Patch) error { + ctx := context.TODO() + + // Patch Cluster object. + if err := ctrlClient.Patch(ctx, openStackCluster, openStackClusterPatch); err != nil { + return errors.Wrapf(err, "error patching OpenStackCluster %s/%s", openStackCluster.Namespace, openStackCluster.Name) + } + + // Patch Cluster status. + if err := ctrlClient.Status().Patch(ctx, openStackCluster, openStackClusterPatch); err != nil { + return errors.Wrapf(err, "error patching OpenStackCluster %s/%s status", openStackCluster.Namespace, openStackCluster.Name) + } + + return nil +} + +func (r *OpenStackClusterReconciler) SetupWithManager(mgr ctrl.Manager) error { + return ctrl.NewControllerManagedBy(mgr). + For(&infrav1.OpenStackCluster{}). + Complete(r) +} diff --git a/controllers/openstackmachine_controller.go b/controllers/openstackmachine_controller.go new file mode 100644 index 0000000000..9d6294c573 --- /dev/null +++ b/controllers/openstackmachine_controller.go @@ -0,0 +1,422 @@ +/* + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package controllers + +import ( + "context" + "encoding/json" + "fmt" + "github.com/gophercloud/gophercloud" + "github.com/gophercloud/utils/openstack/clientconfig" + "github.com/pkg/errors" + "k8s.io/apimachinery/pkg/types" + "k8s.io/klog" + "k8s.io/utils/pointer" + "net" + "os" + "sigs.k8s.io/cluster-api-provider-openstack/pkg/cloud/openstack/services/compute" + "sigs.k8s.io/cluster-api-provider-openstack/pkg/cloud/openstack/services/loadbalancer" + "sigs.k8s.io/cluster-api-provider-openstack/pkg/cloud/openstack/services/networking" + "sigs.k8s.io/cluster-api-provider-openstack/pkg/cloud/openstack/services/provider" + "sigs.k8s.io/cluster-api-provider-openstack/pkg/cloud/openstack/services/userdata" + "sigs.k8s.io/cluster-api/api/v1alpha2" + "sigs.k8s.io/controller-runtime/pkg/log" + "sigs.k8s.io/controller-runtime/pkg/reconcile" + "strconv" + "time" + + "github.com/go-logr/logr" + ctrl "sigs.k8s.io/controller-runtime" + "sigs.k8s.io/controller-runtime/pkg/client" + + apierrors "k8s.io/apimachinery/pkg/api/errors" + infrav1 "sigs.k8s.io/cluster-api-provider-openstack/api/v1alpha2" + capierrors "sigs.k8s.io/cluster-api/pkg/errors" + "sigs.k8s.io/cluster-api/pkg/util" +) + +const ( + waitForClusterInfrastructureReadyDuration = 15 * time.Second + machineControllerName = "openstackmachine-controller" + TimeoutInstanceCreate = 5 + TimeoutInstanceDelete = 5 + RetryIntervalInstanceStatus = 10 * time.Second +) + +// OpenStackMachineReconciler reconciles a OpenStackMachine object +type OpenStackMachineReconciler struct { + client.Client + Log logr.Logger +} + +// +kubebuilder:rbac:groups=infrastructure.cluster.x-k8s.io,resources=openstackmachines,verbs=get;list;watch;create;update;patch;delete +// +kubebuilder:rbac:groups=infrastructure.cluster.x-k8s.io,resources=openstackmachines/status,verbs=get;update;patch +// +kubebuilder:rbac:groups=cluster.x-k8s.io,resources=clusters;machines,verbs=get;list;watch +// +kubebuilder:rbac:groups="",resources=secrets,verbs=get;list;watch;create;update;patch + +func (r *OpenStackMachineReconciler) Reconcile(request ctrl.Request) (ctrl.Result, error) { + ctx := context.TODO() + logger := log.Log. + WithName(machineControllerName). + WithName(fmt.Sprintf("namespace=%s", request.Namespace)). + WithName(fmt.Sprintf("openStackMachine=%s", request.Name)) + + // Fetch the OpenStackMachine instance. + openStackMachine := &infrav1.OpenStackMachine{} + err := r.Get(ctx, request.NamespacedName, openStackMachine) + if err != nil { + if apierrors.IsNotFound(err) { + return reconcile.Result{}, nil + } + return reconcile.Result{}, err + } + + logger = logger.WithName(openStackMachine.APIVersion) + + // Fetch the Machine. + machine, err := util.GetOwnerMachine(ctx, r.Client, openStackMachine.ObjectMeta) + if err != nil { + return reconcile.Result{}, err + } + if machine == nil { + logger.Info("Waiting for Machine Controller to set OwnerRef on OpenStackMachine") + return reconcile.Result{RequeueAfter: 10 * time.Second}, nil + } + + logger = logger.WithName(fmt.Sprintf("machine=%s", machine.Name)) + + // Fetch the Cluster. + cluster, err := util.GetClusterFromMetadata(ctx, r.Client, machine.ObjectMeta) + if err != nil { + logger.Info("Machine is missing cluster label or cluster does not exist") + return reconcile.Result{}, nil + } + + logger = logger.WithName(fmt.Sprintf("cluster=%s", cluster.Name)) + + openStackCluster := &infrav1.OpenStackCluster{} + openStackClusterName := types.NamespacedName{ + Namespace: openStackMachine.Namespace, + Name: cluster.Spec.InfrastructureRef.Name, + } + if err := r.Client.Get(ctx, openStackClusterName, openStackCluster); err != nil { + logger.Info("Waiting for OpenStackCluster") + return reconcile.Result{RequeueAfter: 10 * time.Second}, nil + } + + logger = logger.WithName(fmt.Sprintf("openStackCluster=%s", openStackCluster.Name)) + + // Handle deleted clusters + if !openStackMachine.DeletionTimestamp.IsZero() { + return r.reconcileMachineDelete(logger, machine, openStackMachine, cluster, openStackCluster) + } + + // Handle non-deleted clusters + return r.reconcileMachine(logger, machine, openStackMachine, cluster, openStackCluster) +} + +func (r *OpenStackMachineReconciler) reconcileMachine(logger logr.Logger, machine *v1alpha2.Machine, openStackMachine *infrav1.OpenStackMachine, cluster *v1alpha2.Cluster, openStackCluster *infrav1.OpenStackCluster) (_ ctrl.Result, reterr error) { + // If the OpenStackMachine is in an error state, return early. + if openStackMachine.Status.ErrorReason != nil || openStackMachine.Status.ErrorMessage != nil { + logger.Info("Error state detected, skipping reconciliation") + return reconcile.Result{}, nil + } + + // If the OpenStackMachine doesn't have our finalizer, add it. + if !util.Contains(openStackMachine.Finalizers, infrav1.MachineFinalizer) { + openStackMachine.Finalizers = append(openStackMachine.Finalizers, infrav1.MachineFinalizer) + } + + if !cluster.Status.InfrastructureReady { + logger.Info("Cluster infrastructure is not ready yet, requeuing machine") + return reconcile.Result{RequeueAfter: waitForClusterInfrastructureReadyDuration}, nil + } + + // TODO enable when using kubeadm bootstrapper + // Make sure bootstrap data is available and populated. + //if machine.Spec.Bootstrap.Data == nil { + // logger.Info("Waiting for bootstrap data to be available") + // return reconcile.Result{RequeueAfter: 10 * time.Second}, nil + //} + + klog.Infof("Creating Machine %s/%s: %s", cluster.Namespace, cluster.Name, machine.Name) + + clusterName := fmt.Sprintf("%s-%s", cluster.ObjectMeta.Namespace, cluster.Name) + + openstackMachinePatch := client.MergeFrom(openStackMachine.DeepCopy()) + + osProviderClient, clientOpts, err := provider.NewClientFromMachine(r.Client, openStackMachine) + if err != nil { + return reconcile.Result{}, err + } + + computeService, err := compute.NewService(osProviderClient, clientOpts) + if err != nil { + return reconcile.Result{}, err + } + + networkingService, err := networking.NewService(osProviderClient, clientOpts) + if err != nil { + return reconcile.Result{}, err + } + + defer func() { + if err := storeMachine(r.Client, openStackMachine, openstackMachinePatch); err != nil && reterr == nil { + reterr = err + } + }() + + instance, err := r.getOrCreate(computeService, machine, openStackMachine, cluster, openStackCluster) + if err != nil { + handleMachineError(openStackMachine, capierrors.UpdateMachineError, errors.Errorf("OpenStack instance cannot be created: %v", err)) + return reconcile.Result{}, err + } + + // Set an error message if we couldn't find the instance. + if instance == nil { + handleMachineError(openStackMachine, capierrors.UpdateMachineError, errors.New("OpenStack instance cannot be found")) + return reconcile.Result{}, nil + } + + // TODO(sbueringer) From CAPA: TODO(ncdc): move this validation logic into a validating webhook (for us: create validation logic in webhook) + + openStackMachine.Spec.ProviderID = pointer.StringPtr(fmt.Sprintf("openstack:////%s", instance.ID)) + + openStackMachine.Status.InstanceState = &instance.State + + // TODO(sbueringer) From CAPA: TODO(vincepri): Remove this annotation when clusterctl is no longer relevant. + if openStackMachine.Annotations == nil { + openStackMachine.Annotations = map[string]string{} + } + openStackMachine.Annotations["cluster-api-provider-openstack"] = "true" + + switch instance.State { + case infrav1.InstanceStateActive: + logger.Info("Machine instance is ACTIVE", "instance-id", instance.ID) + case infrav1.InstanceStateBuilding: + logger.Info("Machine instance is BUILDING", "instance-id", instance.ID) + default: + handleMachineError(openStackMachine, capierrors.UpdateMachineError, errors.Errorf("OpenStack instance state %q is unexpected", instance.State)) + return reconcile.Result{}, nil + } + + if openStackMachine.Spec.FloatingIP != "" { + err = r.reconcileFloatingIP(computeService, networkingService, instance, openStackMachine, openStackCluster) + if err != nil { + handleMachineError(openStackMachine, capierrors.UpdateMachineError, errors.Errorf("FloatingIP cannot be reconciled: %v", err)) + return reconcile.Result{}, nil + } + } + + if openStackCluster.Spec.ManagedAPIServerLoadBalancer { + err = r.reconcileLoadBalancerMember(osProviderClient, clientOpts, instance, clusterName, machine, openStackMachine, openStackCluster) + if err != nil { + handleMachineError(openStackMachine, capierrors.UpdateMachineError, errors.Errorf("LoadBalancerMember cannot be reconciled: %v", err)) + return reconcile.Result{}, nil + } + } + + // TODO(sbueringer) check if that's the right place to set the machine to ready + openStackMachine.Status.Ready = true + + klog.Infof("Created Machine %s/%s: %s successfully", cluster.Namespace, cluster.Name, machine.Name) + return reconcile.Result{}, nil +} + +func (r *OpenStackMachineReconciler) reconcileMachineDelete(logger logr.Logger, machine *v1alpha2.Machine, openStackMachine *infrav1.OpenStackMachine, cluster *v1alpha2.Cluster, openStackCluster *infrav1.OpenStackCluster) (ctrl.Result, error) { + + klog.Infof("Deleting Machine %s/%s: %s", cluster.Namespace, cluster.Name, machine.Name) + + clusterName := fmt.Sprintf("%s-%s", cluster.ObjectMeta.Namespace, cluster.Name) + + osProviderClient, clientOpts, err := provider.NewClientFromMachine(r.Client, openStackMachine) + if err != nil { + return reconcile.Result{}, err + } + + computeService, err := compute.NewService(osProviderClient, clientOpts) + if err != nil { + return reconcile.Result{}, err + } + + loadbalancerService, err := loadbalancer.NewService(osProviderClient, clientOpts, openStackCluster.Spec.UseOctavia) + if err != nil { + return reconcile.Result{}, err + } + if openStackCluster.Spec.ManagedAPIServerLoadBalancer { + err = loadbalancerService.DeleteLoadBalancerMember(clusterName, machine, openStackMachine, openStackCluster) + if err != nil { + return reconcile.Result{}, err + } + } + + instance, err := computeService.InstanceExists(openStackMachine) + if err != nil { + return reconcile.Result{}, err + } + + if instance == nil { + klog.Infof("Skipped deleting %s that is already deleted.\n", machine.Name) + return reconcile.Result{}, nil + } + + err = computeService.InstanceDelete(machine) + if err != nil { + handleMachineError(openStackMachine, capierrors.UpdateMachineError, errors.Errorf("error deleting Openstack instance: %v", err)) + return reconcile.Result{}, nil + } + return reconcile.Result{}, nil +} + +func (r *OpenStackMachineReconciler) getOrCreate(computeService *compute.Service, machine *v1alpha2.Machine, openStackMachine *infrav1.OpenStackMachine, cluster *v1alpha2.Cluster, openStackCluster *infrav1.OpenStackCluster) (*compute.Instance, error) { + + instance, err := computeService.InstanceExists(openStackMachine) + if err != nil { + return nil, err + } + + if instance == nil { + userData, err := userdata.GetUserData(r.Client, machine, openStackMachine, cluster, openStackCluster) + if err != nil { + return nil, err + } + + instance, err = computeService.InstanceCreate(cluster.Name, machine.Name, openStackCluster, openStackMachine, userData) + if err != nil { + return nil, errors.Errorf("error creating Openstack instance: %v", err) + } + instanceCreateTimeout := getTimeout("CLUSTER_API_OPENSTACK_INSTANCE_CREATE_TIMEOUT", TimeoutInstanceCreate) + instanceCreateTimeout = instanceCreateTimeout * time.Minute + // instance in PollImmediate has to overwrites instance of the outer scope to get an updated instance state, + // which is then returned at the end of getOrCreate + err = util.PollImmediate(RetryIntervalInstanceStatus, instanceCreateTimeout, func() (bool, error) { + instance, err = computeService.GetInstance(instance.ID) + if err != nil { + return false, nil + } + return instance.Status == "ACTIVE", nil + }) + if err != nil { + return nil, errors.Errorf("error creating Openstack instance: %v", err) + } + } + + return instance, nil +} + +func handleMachineError(openstackMachine *infrav1.OpenStackMachine, reason capierrors.MachineStatusError, message error) { + openstackMachine.Status.ErrorReason = &reason + openstackMachine.Status.ErrorMessage = pointer.StringPtr(message.Error()) + // TODO remove if this error is logged redundantly + klog.Errorf("Machine error %s: %v", openstackMachine.Name, message.Error()) +} + +func getTimeout(name string, timeout int) time.Duration { + if v := os.Getenv(name); v != "" { + timeout, err := strconv.Atoi(v) + if err == nil { + return time.Duration(timeout) + } + } + return time.Duration(timeout) +} + +func storeMachine(ctrlClient client.Client, openStackMachine *infrav1.OpenStackMachine, openStackMachinePatch client.Patch) error { + ctx := context.TODO() + + // Patch Cluster object. + if err := ctrlClient.Patch(ctx, openStackMachine, openStackMachinePatch); err != nil { + return errors.Wrapf(err, "error patching OpenStackMachine %s/%s", openStackMachine.Namespace, openStackMachine.Name) + } + + // Patch Cluster status. + if err := ctrlClient.Status().Patch(ctx, openStackMachine, openStackMachinePatch); err != nil { + return errors.Wrapf(err, "error patching OpenStackMachine %s/%s status", openStackMachine.Namespace, openStackMachine.Name) + } + + return nil +} + +func (r *OpenStackMachineReconciler) SetupWithManager(mgr ctrl.Manager) error { + return ctrl.NewControllerManagedBy(mgr). + For(&infrav1.OpenStackMachine{}). + Complete(r) +} + +func (r *OpenStackMachineReconciler) reconcileFloatingIP(computeService *compute.Service, networkingService *networking.Service, instance *compute.Instance, openStackMachine *infrav1.OpenStackMachine, openStackCluster *infrav1.OpenStackCluster) error { + err := networkingService.GetOrCreateFloatingIP(openStackCluster, openStackMachine.Spec.FloatingIP) + if err != nil { + return fmt.Errorf("error creating floatingIP: %v", err) + } + + err = computeService.AssociateFloatingIP(instance.ID, openStackMachine.Spec.FloatingIP) + if err != nil { + return fmt.Errorf("error associationg floatingIP: %v", err) + } + return nil +} + +func (r *OpenStackMachineReconciler) reconcileLoadBalancerMember(osProviderClient *gophercloud.ProviderClient, clientOpts *clientconfig.ClientOpts, instance *compute.Instance, clusterName string, machine *v1alpha2.Machine, openStackMachine *infrav1.OpenStackMachine, openStackCluster *infrav1.OpenStackCluster) error { + ip, err := getIPFromInstance(instance) + if err != nil { + return err + } + loadbalancerService, err := loadbalancer.NewService(osProviderClient, clientOpts, openStackCluster.Spec.UseOctavia) + if err != nil { + return err + } + + if err := loadbalancerService.ReconcileLoadBalancerMember(clusterName, machine, openStackMachine, openStackCluster, ip); err != nil { + return err + } + return nil +} + +func getIPFromInstance(instance *compute.Instance) (string, error) { + if instance.AccessIPv4 != "" && net.ParseIP(instance.AccessIPv4) != nil { + return instance.AccessIPv4, nil + } + type networkInterface struct { + Address string `json:"addr"` + Version float64 `json:"version"` + Type string `json:"OS-EXT-IPS:type"` + } + var addrList []string + + for _, b := range instance.Addresses { + list, err := json.Marshal(b) + if err != nil { + return "", fmt.Errorf("extract IP from instance err: %v", err) + } + var networks []interface{} + json.Unmarshal(list, &networks) + for _, network := range networks { + var netInterface networkInterface + b, _ := json.Marshal(network) + json.Unmarshal(b, &netInterface) + if netInterface.Version == 4.0 { + if netInterface.Type == "floating" { + return netInterface.Address, nil + } + addrList = append(addrList, netInterface.Address) + } + } + } + if len(addrList) != 0 { + return addrList[0], nil + } + return "", fmt.Errorf("extract IP from instance err") +} diff --git a/controllers/suite_test.go b/controllers/suite_test.go new file mode 100644 index 0000000000..9d960184ec --- /dev/null +++ b/controllers/suite_test.go @@ -0,0 +1,82 @@ +/* + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package controllers + +import ( + "path/filepath" + "testing" + + . "github.com/onsi/ginkgo" + . "github.com/onsi/gomega" + + "k8s.io/client-go/kubernetes/scheme" + "k8s.io/client-go/rest" + infrastructurev1alpha2 "sigs.k8s.io/cluster-api-provider-openstack/api/v1alpha2" + "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/controller-runtime/pkg/envtest" + logf "sigs.k8s.io/controller-runtime/pkg/log" + "sigs.k8s.io/controller-runtime/pkg/log/zap" + // +kubebuilder:scaffold:imports +) + +// These tests use Ginkgo (BDD-style Go testing framework). Refer to +// http://onsi.github.io/ginkgo/ to learn more about Ginkgo. + +var cfg *rest.Config +var k8sClient client.Client +var testEnv *envtest.Environment + +func TestAPIs(t *testing.T) { + RegisterFailHandler(Fail) + + RunSpecsWithDefaultAndCustomReporters(t, + "Controller Suite", + []Reporter{envtest.NewlineReporter{}}) +} + +var _ = BeforeSuite(func(done Done) { + logf.SetLogger(zap.LoggerTo(GinkgoWriter, true)) + + By("bootstrapping test environment") + testEnv = &envtest.Environment{ + CRDDirectoryPaths: []string{filepath.Join("..", "config", "crd", "bases")}, + } + + var err error + cfg, err = testEnv.Start() + Expect(err).ToNot(HaveOccurred()) + Expect(cfg).ToNot(BeNil()) + + err = infrastructurev1alpha2.AddToScheme(scheme.Scheme) + Expect(err).NotTo(HaveOccurred()) + + err = infrastructurev1alpha2.AddToScheme(scheme.Scheme) + Expect(err).NotTo(HaveOccurred()) + + // +kubebuilder:scaffold:scheme + + k8sClient, err = client.New(cfg, client.Options{Scheme: scheme.Scheme}) + Expect(err).ToNot(HaveOccurred()) + Expect(k8sClient).ToNot(BeNil()) + + close(done) +}, 60) + +var _ = AfterSuite(func() { + By("tearing down the test environment") + err := testEnv.Stop() + Expect(err).ToNot(HaveOccurred()) +}) diff --git a/go.mod b/go.mod index 4a2bb171a9..0529a1e8fe 100644 --- a/go.mod +++ b/go.mod @@ -6,27 +6,37 @@ require ( github.com/ajeddeloh/go-json v0.0.0-20170920214419-6a2fe990e083 // indirect github.com/ajeddeloh/yaml v0.0.0-20170912190910-6b94386aeefd // indirect github.com/coreos/container-linux-config-transpiler v0.9.0 + github.com/coreos/go-semver v0.3.0 // indirect + github.com/coreos/go-systemd v0.0.0-20190719114852-fd7a80b32e1f // indirect github.com/coreos/ignition v0.33.0 // indirect + github.com/go-logr/logr v0.1.0 github.com/gophercloud/gophercloud v0.3.0 github.com/gophercloud/utils v0.0.0-20190527093828-25f1b77b8c03 + github.com/onsi/ginkgo v1.8.0 + github.com/onsi/gomega v1.5.0 github.com/pkg/errors v0.8.1 github.com/vincent-petithory/dataurl v0.0.0-20160330182126-9a301d65acbb // indirect + go4.org v0.0.0-20190313082347-94abd6928b1d // indirect gopkg.in/yaml.v2 v2.2.2 - k8s.io/api v0.0.0-20190814101207-0772a1bdf941 - k8s.io/apimachinery v0.0.0-20190814100815-533d101be9a6 - k8s.io/client-go v10.0.0+incompatible - k8s.io/cluster-bootstrap v0.0.0-20190814110523-aeed25068f87 - k8s.io/code-generator v0.0.0-20181117043124-c2090bec4d9b + k8s.io/api v0.0.0-20190711103429-37c3b8b1ca65 + k8s.io/apimachinery v0.0.0-20190711103026-7bf792636534 + k8s.io/client-go v11.0.1-0.20190409021438-1a26190bd76a+incompatible + k8s.io/cluster-bootstrap v0.0.0-20190711112844-b7409fb13d1b + k8s.io/code-generator v0.0.0-20190311093542-50b561225d70 + k8s.io/gengo v0.0.0-20190813173942-955ffa8fcfc9 // indirect k8s.io/klog v0.4.0 - k8s.io/kubernetes v1.13.4 + k8s.io/kubernetes v1.14.2 + k8s.io/utils v0.0.0-20190506122338-8fab8cb257d5 sigs.k8s.io/cluster-api v0.1.9 - sigs.k8s.io/controller-runtime v0.1.12 - sigs.k8s.io/controller-tools v0.1.12 - sigs.k8s.io/testing_frameworks v0.1.1 - sigs.k8s.io/yaml v1.1.0 + sigs.k8s.io/controller-runtime v0.2.0-rc.0 + sigs.k8s.io/controller-tools v0.2.0-rc.0 + sigs.k8s.io/testing_frameworks v0.1.2-0.20190130140139-57f07443c2d4 ) replace ( - k8s.io/api => k8s.io/api v0.0.0-20190222213804-5cb15d344471 - k8s.io/apimachinery => k8s.io/apimachinery v0.0.0-20190221213512-86fb29eff628 + // Workaround for https://github.com/kubernetes/test-infra/pull/13918#issuecomment-522807578 + gomodules.xyz/jsonpatch/v2 => gomodules.xyz/jsonpatch/v2 v2.0.0-20190626003512-87910169748d + k8s.io/api => k8s.io/api v0.0.0-20190704095032-f4ca3d3bdf1d + k8s.io/apimachinery => k8s.io/apimachinery v0.0.0-20190704094733-8f6ac2502e51 + sigs.k8s.io/cluster-api => sigs.k8s.io/cluster-api v0.0.0-20190813192342-65800b3b20e8 ) diff --git a/go.sum b/go.sum index 6e93bf03dc..b7408a86ab 100644 --- a/go.sum +++ b/go.sum @@ -1,50 +1,42 @@ cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= -cloud.google.com/go v0.31.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= +cloud.google.com/go v0.34.0 h1:eOI3/cP2VTU6uZLDYAoic+eyzzB9YyGmJ7eIjl8rOPg= cloud.google.com/go v0.34.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= -cloud.google.com/go v0.35.1 h1:LMe/Btq0Eijsc97JyBwMc0KMXOe0orqAMdg7/EkywN8= -cloud.google.com/go v0.35.1/go.mod h1:wfjPZNvXCBYESy3fIynybskMP48KVPrjSPCnXiK7Prg= -contrib.go.opencensus.io/exporter/ocagent v0.2.0 h1:Q/jXnVbliDYozuWJni9452xsSUuo+y8yrioxRgofBhE= -contrib.go.opencensus.io/exporter/ocagent v0.2.0/go.mod h1:0fnkYHF+ORKj7HWzOExKkUHeFX79gXSKUQbpnAM+wzo= -dmitri.shuralyov.com/app/changes v0.0.0-20180602232624-0a106ad413e3/go.mod h1:Yl+fi1br7+Rr3LqpNJf1/uxUdtRUV+Tnj0o93V2B9MU= -dmitri.shuralyov.com/html/belt v0.0.0-20180602232347-f7d459c86be0/go.mod h1:JLBrvjyP0v+ecvNYvCpyZgu5/xkfAUhi6wJj28eUfSU= -dmitri.shuralyov.com/service/change v0.0.0-20181023043359-a85b471d5412/go.mod h1:a1inKt/atXimZ4Mv927x+r7UpyzRUf4emIoiiSC2TN4= -dmitri.shuralyov.com/state v0.0.0-20180228185332-28bcc343414c/go.mod h1:0PRwlb0D6DFvNNtx+9ybjezNCa8XF0xaYcETyp6rHWU= -git.apache.org/thrift.git v0.0.0-20180902110319-2566ecd5d999/go.mod h1:fPE2ZNJGynbRyZ4dJvy6G277gSllfV2HJqblrnkyeyg= -github.com/Azure/go-autorest v11.5.0+incompatible h1:zp9GQJhEX+EBqEYC2MEGQ+gjKFEPRAWtfwcmstS2hGk= -github.com/Azure/go-autorest v11.5.0+incompatible/go.mod h1:r+4oMnoxhatjLLJ6zxSWATqVooLgysK6ZNox3g/xq24= +contrib.go.opencensus.io/exporter/ocagent v0.4.12 h1:jGFvw3l57ViIVEPKKEUXPcLYIXJmQxLUh6ey1eJhwyc= +contrib.go.opencensus.io/exporter/ocagent v0.4.12/go.mod h1:450APlNTSR6FrvC3CTRqYosuDstRB9un7SOx2k/9ckA= +github.com/Azure/go-autorest/autorest v0.2.0 h1:zBtSTOQTtjzHVRe+mhkiHvHwRTKHhjBEyo1m6DfI3So= +github.com/Azure/go-autorest/autorest v0.2.0/go.mod h1:AKyIcETwSUFxIcs/Wnq/C+kwCtlEYGUVd7FPNb2slmg= +github.com/Azure/go-autorest/autorest/adal v0.1.0 h1:RSw/7EAullliqwkZvgIGDYZWQm1PGKXI8c4aY/87yuU= +github.com/Azure/go-autorest/autorest/adal v0.1.0/go.mod h1:MeS4XhScH55IST095THyTxElntu7WqB7pNbZo8Q5G3E= +github.com/Azure/go-autorest/autorest/date v0.1.0 h1:YGrhWfrgtFs84+h0o46rJrlmsZtyZRg470CqAXTZaGM= +github.com/Azure/go-autorest/autorest/date v0.1.0/go.mod h1:plvfp3oPSKwf2DNjlBjWF/7vwR+cUD/ELuzDCXwHUVA= +github.com/Azure/go-autorest/autorest/mocks v0.1.0 h1:Kx+AUU2Te+A3JIyYn6Dfs+cFgx5XorQKuIXrZGoq/SI= +github.com/Azure/go-autorest/autorest/mocks v0.1.0/go.mod h1:OTyCOPRA2IgIlWxVYxBee2F5Gr4kF2zd2J5cFRaIDN0= +github.com/Azure/go-autorest/logger v0.1.0 h1:ruG4BSDXONFRrZZJ2GUXDiUyVpayPmb1GnWeHDdaNKY= +github.com/Azure/go-autorest/logger v0.1.0/go.mod h1:oExouG+K6PryycPJfVSxi/koC6LSNgds39diKLz7Vrc= +github.com/Azure/go-autorest/tracing v0.1.0 h1:TRBxC5Pj/fIuh4Qob0ZpkggbfT8RC0SubHbpV3p4/Vc= +github.com/Azure/go-autorest/tracing v0.1.0/go.mod h1:ROEEAFwXycQw7Sn3DXNtEedEvdeRAgDr0izn4z5Ij88= github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= -github.com/Masterminds/semver v1.4.2/go.mod h1:MB6lktGJrhw8PrUyiEoblNEGEQ+RzHPF078ddwwvV3Y= +github.com/Shopify/sarama v1.19.0/go.mod h1:FVkBWblsNy7DGZRfXLU0O9RCGt5g3g3yEuWXgklEdEo= +github.com/Shopify/toxiproxy v2.1.4+incompatible/go.mod h1:OXgGpZ6Cli1/URJOF1DMxUHB2q5Ap20/P/eIdh4G0pI= github.com/ajeddeloh/go-json v0.0.0-20170920214419-6a2fe990e083 h1:uwcvnXW76Y0rHM+qs7y8iHknWUWXYFNlD6FEVhc47TU= github.com/ajeddeloh/go-json v0.0.0-20170920214419-6a2fe990e083/go.mod h1:otnto4/Icqn88WCcM4bhIJNSgsh9VLBuspyyCfvof9c= github.com/ajeddeloh/yaml v0.0.0-20170912190910-6b94386aeefd h1:NlKlOv3aVJ5ODMC0JWPvddw05KENkL3cZttIuu8kJRo= github.com/ajeddeloh/yaml v0.0.0-20170912190910-6b94386aeefd/go.mod h1:idhzw68Q7v4j+rQ2AGyq3OlZW2Jij9mdmGA4/Sk6J0E= -github.com/ajg/form v0.0.0-20160822230020-523a5da1a92f/go.mod h1:uL1WgH+h2mgNtvBq0339dVnzXdBETtL2LeUXaIv25UY= github.com/alecthomas/template v0.0.0-20160405071501-a0175ee3bccc/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc= github.com/alecthomas/units v0.0.0-20151022065526-2efee857e7cf h1:qet1QNfXsQxTZqLG4oE62mJzwPIB8+Tee4RNCL9ulrY= github.com/alecthomas/units v0.0.0-20151022065526-2efee857e7cf/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0= -github.com/anmitsu/go-shlex v0.0.0-20161002113705-648efa622239/go.mod h1:2FmKhYUyUczH0OGQWaF5ceTx0UBShxjsH6f8oGKYe2c= -github.com/appscode/jsonpatch v0.0.0-20190108182946-7c0e3b262f30 h1:Kn3rqvbUFqSepE2OqVu0Pn1CbDw9IuMlONapol0zuwk= -github.com/appscode/jsonpatch v0.0.0-20190108182946-7c0e3b262f30/go.mod h1:4AJxUpXUhv4N+ziTvIcWWXgeorXpxPZOfk9HdEVr96M= -github.com/armon/consul-api v0.0.0-20180202201655-eb2c6b5be1b6/go.mod h1:grANhF5doyWs3UAsr3K4I6qtAmlQcZDesFNEHPZAzj8= +github.com/apache/thrift v0.12.0/go.mod h1:cp2SuWMxlEZw2r+iP2GNCdIi4C1qmUzdZFSVb+bacwQ= github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973 h1:xJ4a3vCFaGF/jqvzLMYoU8P317H5OQ+Via4RmuPwCS0= github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q= -github.com/blang/semver v3.5.0+incompatible/go.mod h1:kRBLl5iJ+tD4TcOOxsy/0fnwebNt5EWlYSAyrTnjyyk= -github.com/bradfitz/go-smtpd v0.0.0-20170404230938-deb6d6237625/go.mod h1:HYsPBTaaSFSlLx/70C2HPIMNZpVV8+vt/A+FMnYP11g= -github.com/census-instrumentation/opencensus-proto v0.0.2-0.20180913191712-f303ae3f8d6a/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= -github.com/census-instrumentation/opencensus-proto v0.1.0 h1:VwZ9smxzX8u14/125wHIX7ARV+YhR+L4JADswwxWK0Y= -github.com/census-instrumentation/opencensus-proto v0.1.0/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= +github.com/census-instrumentation/opencensus-proto v0.2.0 h1:LzQXZOgg4CQfE6bFvXGM30YZL1WW/M337pXml+GrcZ4= +github.com/census-instrumentation/opencensus-proto v0.2.0/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw= -github.com/cockroachdb/apd v1.1.0/go.mod h1:8Sl8LxpKi29FqWXR16WEFZRNSz3SoPzUzeMeY4+DwBQ= -github.com/cockroachdb/cockroach-go v0.0.0-20181001143604-e0a95dfd547c/go.mod h1:XGLbWH/ujMcbPbhZq52Nv6UrCghb1yGn//133kEsvDk= -github.com/codegangsta/negroni v1.0.0/go.mod h1:v0y3T5G7Y1UlFfyxFn/QLRU4a2EuNau2iZY63YTKWo0= github.com/coreos/container-linux-config-transpiler v0.9.0 h1:UBGpT8qWqzi48hNLrzMAgAUNJsR0LW8Gk5/dR/caI8U= github.com/coreos/container-linux-config-transpiler v0.9.0/go.mod h1:SlcxXZQ2c42knj8pezMiQsM1f+ADxFMjGetuMKR/YSQ= -github.com/coreos/etcd v3.3.10+incompatible/go.mod h1:uF7uidLiAD3TWHmW31ZFd/JWoc32PjwdhPthX9715RE= -github.com/coreos/go-etcd v2.0.0+incompatible/go.mod h1:Jez6KQU2B/sWsbdaef3ED8NzMklzPG4d5KIOhIy30Tk= -github.com/coreos/go-semver v0.2.0 h1:3Jm3tLmsgAYcjC+4Up7hJrFBPr+n7rAqYeSw/SZazuY= -github.com/coreos/go-semver v0.2.0/go.mod h1:nnelYz7RCh+5ahJtPPxZlU+153eP4D4r3EedlOD2RNk= -github.com/coreos/go-systemd v0.0.0-20181012123002-c6f51f82210d h1:t5Wuyh53qYyg9eqn4BbnlIT+vmhyww0TatL+zT3uWgI= -github.com/coreos/go-systemd v0.0.0-20181012123002-c6f51f82210d/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4= +github.com/coreos/go-semver v0.3.0 h1:wkHLiw0WNATZnSG7epLsujiMCgPAc9xhjJ4tgnAxmfM= +github.com/coreos/go-semver v0.3.0/go.mod h1:nnelYz7RCh+5ahJtPPxZlU+153eP4D4r3EedlOD2RNk= +github.com/coreos/go-systemd v0.0.0-20190719114852-fd7a80b32e1f h1:JOrtw2xFKzlg+cbHpyrpLDmnN1HqhBfnX7WDiW7eG2c= +github.com/coreos/go-systemd v0.0.0-20190719114852-fd7a80b32e1f/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4= github.com/coreos/ignition v0.33.0 h1:rYJoGv5v/5rCJAzyMaE9gU8pn7w7pv0M4rDzHvDK6T4= github.com/coreos/ignition v0.33.0/go.mod h1:WJQapxzEn9DE0ryxsGvm8QnBajm/XsS/PkrDqSpz+bA= github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= @@ -52,629 +44,286 @@ github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/dgrijalva/jwt-go v3.2.0+incompatible h1:7qlOGliEKZXTDg6OTjfoBKDXWrumCAMpl/TFQ4/5kLM= github.com/dgrijalva/jwt-go v3.2.0+incompatible/go.mod h1:E3ru+11k8xSBh+hMPgOLZmtrrCbhqsmaPHjLKYnJCaQ= -github.com/dustin/go-humanize v0.0.0-20180713052910-9f541cc9db5d/go.mod h1:HtrtbFcZ19U5GC7JDqmcUSB87Iq5E25KnS6fMYU6eOk= -github.com/dustin/go-humanize v1.0.0/go.mod h1:HtrtbFcZ19U5GC7JDqmcUSB87Iq5E25KnS6fMYU6eOk= -github.com/evanphx/json-patch v4.0.0+incompatible h1:xregGRMLBeuRcwiOTHRCsPPuzCQlqhxUPbqdw+zNkLc= -github.com/evanphx/json-patch v4.0.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk= -github.com/evanphx/json-patch v4.2.0+incompatible h1:fUDGZCv/7iAN7u0puUVhvKCcsR6vRfwrJatElLBEf0I= -github.com/evanphx/json-patch v4.2.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk= +github.com/eapache/go-resiliency v1.1.0/go.mod h1:kFI+JgMyC7bLPUVY133qvEBtVayf5mFgVsvEsIPBvNs= +github.com/eapache/go-xerial-snappy v0.0.0-20180814174437-776d5712da21/go.mod h1:+020luEh2TKB4/GOp8oxxtq0Daoen/Cii55CzbTV6DU= +github.com/eapache/queue v1.1.0/go.mod h1:6eCeP0CKFpHLu8blIFXhExK/dRa7WDZfr6jVFPTqq+I= +github.com/evanphx/json-patch v4.5.0+incompatible h1:ouOWdg56aJriqS0huScTkVXPC5IcNrDCXZ6OoTAWu7M= +github.com/evanphx/json-patch v4.5.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk= +github.com/fatih/color v1.7.0 h1:DkWD4oS2D8LGGgTQ6IvwJJXSL5Vp2ffcQg58nFV38Ys= github.com/fatih/color v1.7.0/go.mod h1:Zm6kSWBoL9eyXnKyktHP6abPY2pDugNf5KwzbycvMj4= -github.com/fatih/structs v1.0.0/go.mod h1:9NiDSp5zOcgEDl+j00MP/WkGVPOlPRLejGD8Ga6PJ7M= -github.com/fatih/structs v1.1.0/go.mod h1:9NiDSp5zOcgEDl+j00MP/WkGVPOlPRLejGD8Ga6PJ7M= -github.com/flynn/go-shlex v0.0.0-20150515145356-3f9db97f8568/go.mod h1:xEzjJPgXI435gkrCt3MPfRiAkVrwSbHsst4LCFVfpJc= github.com/fsnotify/fsnotify v1.4.7 h1:IXs+QLmnXW2CcXuY+8Mzv/fWEsPGWxqefPtCP5CnV9I= github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo= github.com/ghodss/yaml v1.0.0 h1:wQHKEahhL6wmXdzwWG11gIVCkOv05bNOh+Rxn0yngAk= github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04= -github.com/gliderlabs/ssh v0.1.1/go.mod h1:U7qILu1NlMHj9FlMhZLlkCdDnU1DBEAqr0aevW3Awn0= github.com/go-kit/kit v0.8.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as= github.com/go-logfmt/logfmt v0.3.0/go.mod h1:Qt1PoO58o5twSAckw1HlFXLmHsOX5/0LbT9GBnD5lWE= github.com/go-logr/logr v0.1.0 h1:M1Tv3VzNlEHg6uyACnRdtrploV2P7wZqH8BoQMtz0cg= github.com/go-logr/logr v0.1.0/go.mod h1:ixOQHD9gLJUVQQ2ZOR7zLEifBX6tGkNJF4QyIY7sIas= github.com/go-logr/zapr v0.1.0 h1:h+WVe9j6HAA01niTJPA/kKH0i7e0rLZBCwauQFcRE54= github.com/go-logr/zapr v0.1.0/go.mod h1:tabnROwaDl0UNxkVeFRbY8bwB37GwRv0P8lg6aAiEnk= -github.com/go-sql-driver/mysql v1.4.0/go.mod h1:zAC/RDZ24gD3HViQzih4MyKcchzm+sOG5ZlKdlhCg5w= github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY= -github.com/gobuffalo/buffalo v0.12.8-0.20181004233540-fac9bb505aa8/go.mod h1:sLyT7/dceRXJUxSsE813JTQtA3Eb1vjxWfo/N//vXIY= -github.com/gobuffalo/buffalo v0.13.0/go.mod h1:Mjn1Ba9wpIbpbrD+lIDMy99pQ0H0LiddMIIDGse7qT4= -github.com/gobuffalo/buffalo-plugins v1.0.2/go.mod h1:pOp/uF7X3IShFHyobahTkTLZaeUXwb0GrUTb9ngJWTs= -github.com/gobuffalo/buffalo-plugins v1.0.4/go.mod h1:pWS1vjtQ6uD17MVFWf7i3zfThrEKWlI5+PYLw/NaDB4= -github.com/gobuffalo/buffalo-plugins v1.4.3/go.mod h1:uCzTY0woez4nDMdQjkcOYKanngeUVRO2HZi7ezmAjWY= -github.com/gobuffalo/buffalo-plugins v1.5.1/go.mod h1:jbmwSZK5+PiAP9cC09VQOrGMZFCa/P0UMlIS3O12r5w= -github.com/gobuffalo/buffalo-plugins v1.6.4/go.mod h1:/+N1aophkA2jZ1ifB2O3Y9yGwu6gKOVMtUmJnbg+OZI= -github.com/gobuffalo/buffalo-plugins v1.6.5/go.mod h1:0HVkbgrVs/MnPZ/FOseDMVanCTm2RNcdM0PuXcL1NNI= -github.com/gobuffalo/buffalo-plugins v1.6.7/go.mod h1:ZGZRkzz2PiKWHs0z7QsPBOTo2EpcGRArMEym6ghKYgk= -github.com/gobuffalo/buffalo-plugins v1.6.9/go.mod h1:yYlYTrPdMCz+6/+UaXg5Jm4gN3xhsvsQ2ygVatZV5vw= -github.com/gobuffalo/buffalo-plugins v1.6.11/go.mod h1:eAA6xJIL8OuynJZ8amXjRmHND6YiusVAaJdHDN1Lu8Q= -github.com/gobuffalo/buffalo-plugins v1.8.2/go.mod h1:9te6/VjEQ7pKp7lXlDIMqzxgGpjlKoAcAANdCgoR960= -github.com/gobuffalo/buffalo-plugins v1.8.3/go.mod h1:IAWq6vjZJVXebIq2qGTLOdlXzmpyTZ5iJG5b59fza5U= -github.com/gobuffalo/buffalo-plugins v1.9.4/go.mod h1:grCV6DGsQlVzQwk6XdgcL3ZPgLm9BVxlBmXPMF8oBHI= -github.com/gobuffalo/buffalo-plugins v1.10.0/go.mod h1:4osg8d9s60txLuGwXnqH+RCjPHj9K466cDFRl3PErHI= -github.com/gobuffalo/buffalo-pop v1.0.5/go.mod h1:Fw/LfFDnSmB/vvQXPvcXEjzP98Tc+AudyNWUBWKCwQ8= -github.com/gobuffalo/envy v1.6.4/go.mod h1:Abh+Jfw475/NWtYMEt+hnJWRiC8INKWibIMyNt1w2Mc= -github.com/gobuffalo/envy v1.6.5/go.mod h1:N+GkhhZ/93bGZc6ZKhJLP6+m+tCNPKwgSpH9kaifseQ= -github.com/gobuffalo/envy v1.6.6/go.mod h1:N+GkhhZ/93bGZc6ZKhJLP6+m+tCNPKwgSpH9kaifseQ= -github.com/gobuffalo/envy v1.6.7/go.mod h1:N+GkhhZ/93bGZc6ZKhJLP6+m+tCNPKwgSpH9kaifseQ= -github.com/gobuffalo/envy v1.6.8/go.mod h1:N+GkhhZ/93bGZc6ZKhJLP6+m+tCNPKwgSpH9kaifseQ= -github.com/gobuffalo/envy v1.6.9/go.mod h1:N+GkhhZ/93bGZc6ZKhJLP6+m+tCNPKwgSpH9kaifseQ= -github.com/gobuffalo/envy v1.6.10/go.mod h1:X0CFllQjTV5ogsnUrg+Oks2yTI+PU2dGYBJOEI2D1Uo= -github.com/gobuffalo/envy v1.6.11/go.mod h1:Fiq52W7nrHGDggFPhn2ZCcHw4u/rqXkqo+i7FB6EAcg= -github.com/gobuffalo/envy v1.6.12 h1:zkhss8DXz/pty2HAyA8BnvWMTYxo4gjd4+WCnYovoxY= -github.com/gobuffalo/envy v1.6.12/go.mod h1:qJNrJhKkZpEW0glh5xP2syQHH5kgdmgsKss2Kk8PTP0= -github.com/gobuffalo/events v1.0.3/go.mod h1:Txo8WmqScapa7zimEQIwgiJBvMECMe9gJjsKNPN3uZw= -github.com/gobuffalo/events v1.0.7/go.mod h1:z8txf6H9jWhQ5Scr7YPLWg/cgXBRj8Q4uYI+rsVCCSQ= -github.com/gobuffalo/events v1.0.8/go.mod h1:A5KyqT1sA+3GJiBE4QKZibse9mtOcI9nw8gGrDdqYGs= -github.com/gobuffalo/events v1.1.3/go.mod h1:9yPGWYv11GENtzrIRApwQRMYSbUgCsZ1w6R503fCfrk= -github.com/gobuffalo/events v1.1.4/go.mod h1:09/YRRgZHEOts5Isov+g9X2xajxdvOAcUuAHIX/O//A= -github.com/gobuffalo/events v1.1.5/go.mod h1:3YUSzgHfYctSjEjLCWbkXP6djH2M+MLaVRzb4ymbAK0= -github.com/gobuffalo/events v1.1.7/go.mod h1:6fGqxH2ing5XMb3EYRq9LEkVlyPGs4oO/eLzh+S8CxY= -github.com/gobuffalo/events v1.1.8/go.mod h1:UFy+W6X6VbCWS8k2iT81HYX65dMtiuVycMy04cplt/8= -github.com/gobuffalo/events v1.1.9/go.mod h1:/0nf8lMtP5TkgNbzYxR6Bl4GzBy5s5TebgNTdRfRbPM= -github.com/gobuffalo/fizz v1.0.12/go.mod h1:C0sltPxpYK8Ftvf64kbsQa2yiCZY4RZviurNxXdAKwc= -github.com/gobuffalo/flect v0.0.0-20180907193754-dc14d8acaf9f/go.mod h1:rCiQgmAE4axgBNl3jZWzS5rETRYTGOsrixTRaCPzNdA= -github.com/gobuffalo/flect v0.0.0-20181002182613-4571df4b1daf/go.mod h1:rCiQgmAE4axgBNl3jZWzS5rETRYTGOsrixTRaCPzNdA= -github.com/gobuffalo/flect v0.0.0-20181007231023-ae7ed6bfe683/go.mod h1:rCiQgmAE4axgBNl3jZWzS5rETRYTGOsrixTRaCPzNdA= -github.com/gobuffalo/flect v0.0.0-20181018182602-fd24a256709f/go.mod h1:rCiQgmAE4axgBNl3jZWzS5rETRYTGOsrixTRaCPzNdA= -github.com/gobuffalo/flect v0.0.0-20181019110701-3d6f0b585514/go.mod h1:rCiQgmAE4axgBNl3jZWzS5rETRYTGOsrixTRaCPzNdA= -github.com/gobuffalo/flect v0.0.0-20181024204909-8f6be1a8c6c2/go.mod h1:rCiQgmAE4axgBNl3jZWzS5rETRYTGOsrixTRaCPzNdA= -github.com/gobuffalo/flect v0.0.0-20181104133451-1f6e9779237a/go.mod h1:rCiQgmAE4axgBNl3jZWzS5rETRYTGOsrixTRaCPzNdA= -github.com/gobuffalo/flect v0.0.0-20181114183036-47375f6d8328/go.mod h1:0HvNbHdfh+WOvDSIASqJOSxTOWSxCCUF++k/Y53v9rI= -github.com/gobuffalo/flect v0.0.0-20181210151238-24a2b68e0316/go.mod h1:en58vff74S9b99Eg42Dr+/9yPu437QjlNsO/hBYPuOk= -github.com/gobuffalo/flect v0.0.0-20190104192022-4af577e09bf2 h1:51NF9n6h4nGMooU5ALB+uJCM0UOmcWjAegIZb/ePtoE= -github.com/gobuffalo/flect v0.0.0-20190104192022-4af577e09bf2/go.mod h1:en58vff74S9b99Eg42Dr+/9yPu437QjlNsO/hBYPuOk= -github.com/gobuffalo/genny v0.0.0-20180924032338-7af3a40f2252/go.mod h1:tUTQOogrr7tAQnhajMSH6rv1BVev34H2sa1xNHMy94g= -github.com/gobuffalo/genny v0.0.0-20181003150629-3786a0744c5d/go.mod h1:WAd8HmjMVrnkAZbmfgH5dLBUchsZfqzp/WS5sQz+uTM= -github.com/gobuffalo/genny v0.0.0-20181005145118-318a41a134cc/go.mod h1:WAd8HmjMVrnkAZbmfgH5dLBUchsZfqzp/WS5sQz+uTM= -github.com/gobuffalo/genny v0.0.0-20181007153042-b8de7d566757/go.mod h1:+oG5Ljrw04czAHbPXREwaFojJbpUvcIy4DiOnbEJFTA= -github.com/gobuffalo/genny v0.0.0-20181012161047-33e5f43d83a6/go.mod h1:+oG5Ljrw04czAHbPXREwaFojJbpUvcIy4DiOnbEJFTA= -github.com/gobuffalo/genny v0.0.0-20181017160347-90a774534246/go.mod h1:+oG5Ljrw04czAHbPXREwaFojJbpUvcIy4DiOnbEJFTA= -github.com/gobuffalo/genny v0.0.0-20181024195656-51392254bf53/go.mod h1:o9GEH5gn5sCKLVB5rHFC4tq40rQ3VRUzmx6WwmaqISE= -github.com/gobuffalo/genny v0.0.0-20181025145300-af3f81d526b8/go.mod h1:uZ1fFYvdcP8mu0B/Ynarf6dsGvp7QFIpk/QACUuFUVI= -github.com/gobuffalo/genny v0.0.0-20181027191429-94d6cfb5c7fc/go.mod h1:x7SkrQQBx204Y+O9EwRXeszLJDTaWN0GnEasxgLrQTA= -github.com/gobuffalo/genny v0.0.0-20181027195209-3887b7171c4f/go.mod h1:JbKx8HSWICu5zyqWOa0dVV1pbbXOHusrSzQUprW6g+w= -github.com/gobuffalo/genny v0.0.0-20181106193839-7dcb0924caf1/go.mod h1:x61yHxvbDCgQ/7cOAbJCacZQuHgB0KMSzoYcw5debjU= -github.com/gobuffalo/genny v0.0.0-20181107223128-f18346459dbe/go.mod h1:utQD3aKKEsdb03oR+Vi/6ztQb1j7pO10N3OBoowRcSU= -github.com/gobuffalo/genny v0.0.0-20181114215459-0a4decd77f5d/go.mod h1:kN2KZ8VgXF9VIIOj/GM0Eo7YK+un4Q3tTreKOf0q1ng= -github.com/gobuffalo/genny v0.0.0-20181119162812-e8ff4adce8bb/go.mod h1:BA9htSe4bZwBDJLe8CUkoqkypq3hn3+CkoHqVOW718E= -github.com/gobuffalo/genny v0.0.0-20181127225641-2d959acc795b/go.mod h1:l54xLXNkteX/PdZ+HlgPk1qtcrgeOr3XUBBPDbH+7CQ= -github.com/gobuffalo/genny v0.0.0-20181128191930-77e34f71ba2a/go.mod h1:FW/D9p7cEEOqxYA71/hnrkOWm62JZ5ZNxcNIVJEaWBU= -github.com/gobuffalo/genny v0.0.0-20181203165245-fda8bcce96b1/go.mod h1:wpNSANu9UErftfiaAlz1pDZclrYzLtO5lALifODyjuM= -github.com/gobuffalo/genny v0.0.0-20181203201232-849d2c9534ea/go.mod h1:wpNSANu9UErftfiaAlz1pDZclrYzLtO5lALifODyjuM= -github.com/gobuffalo/genny v0.0.0-20181206121324-d6fb8a0dbe36/go.mod h1:wpNSANu9UErftfiaAlz1pDZclrYzLtO5lALifODyjuM= -github.com/gobuffalo/genny v0.0.0-20181207164119-84844398a37d/go.mod h1:y0ysCHGGQf2T3vOhCrGHheYN54Y/REj0ayd0Suf4C/8= -github.com/gobuffalo/genny v0.0.0-20181211165820-e26c8466f14d/go.mod h1:sHnK+ZSU4e2feXP3PA29ouij6PUEiN+RCwECjCTB3yM= -github.com/gobuffalo/genny v0.0.0-20190104222617-a71664fc38e7/go.mod h1:QPsQ1FnhEsiU8f+O0qKWXz2RE4TiDqLVChWkBuh1WaY= -github.com/gobuffalo/genny v0.0.0-20190112155932-f31a84fcacf5/go.mod h1:CIaHCrSIuJ4il6ka3Hub4DR4adDrGoXGEEt2FbBxoIo= -github.com/gobuffalo/github_flavored_markdown v1.0.4/go.mod h1:uRowCdK+q8d/RF0Kt3/DSalaIXbb0De/dmTqMQdkQ4I= -github.com/gobuffalo/github_flavored_markdown v1.0.5/go.mod h1:U0643QShPF+OF2tJvYNiYDLDGDuQmJZXsf/bHOJPsMY= -github.com/gobuffalo/github_flavored_markdown v1.0.7/go.mod h1:w93Pd9Lz6LvyQXEG6DktTPHkOtCbr+arAD5mkwMzXLI= -github.com/gobuffalo/httptest v1.0.2/go.mod h1:7T1IbSrg60ankme0aDLVnEY0h056g9M1/ZvpVThtB7E= -github.com/gobuffalo/licenser v0.0.0-20180924033006-eae28e638a42/go.mod h1:Ubo90Np8gpsSZqNScZZkVXXAo5DGhTb+WYFIjlnog8w= -github.com/gobuffalo/licenser v0.0.0-20181025145548-437d89de4f75/go.mod h1:x3lEpYxkRG/XtGCUNkio+6RZ/dlOvLzTI9M1auIwFcw= -github.com/gobuffalo/licenser v0.0.0-20181027200154-58051a75da95/go.mod h1:BzhaaxGd1tq1+OLKObzgdCV9kqVhbTulxOpYbvMQWS0= -github.com/gobuffalo/licenser v0.0.0-20181109171355-91a2a7aac9a7/go.mod h1:m+Ygox92pi9bdg+gVaycvqE8RVSjZp7mWw75+K5NPHk= -github.com/gobuffalo/licenser v0.0.0-20181128165715-cc7305f8abed/go.mod h1:oU9F9UCE+AzI/MueCKZamsezGOOHfSirltllOVeRTAE= -github.com/gobuffalo/licenser v0.0.0-20181203160806-fe900bbede07/go.mod h1:ph6VDNvOzt1CdfaWC+9XwcBnlSTBz2j49PBwum6RFaU= -github.com/gobuffalo/licenser v0.0.0-20181211173111-f8a311c51159/go.mod h1:ve/Ue99DRuvnTaLq2zKa6F4KtHiYf7W046tDjuGYPfM= -github.com/gobuffalo/logger v0.0.0-20181022175615-46cfb361fc27/go.mod h1:8sQkgyhWipz1mIctHF4jTxmJh1Vxhp7mP8IqbljgJZo= -github.com/gobuffalo/logger v0.0.0-20181027144941-73d08d2bb969/go.mod h1:7uGg2duHKpWnN4+YmyKBdLXfhopkAdVM6H3nKbyFbz8= -github.com/gobuffalo/logger v0.0.0-20181027193913-9cf4dd0efe46/go.mod h1:7uGg2duHKpWnN4+YmyKBdLXfhopkAdVM6H3nKbyFbz8= -github.com/gobuffalo/logger v0.0.0-20181109185836-3feeab578c17/go.mod h1:oNErH0xLe+utO+OW8ptXMSA5DkiSEDW1u3zGIt8F9Ew= -github.com/gobuffalo/logger v0.0.0-20181117211126-8e9b89b7c264/go.mod h1:5etB91IE0uBlw9k756fVKZJdS+7M7ejVhmpXXiSFj0I= -github.com/gobuffalo/logger v0.0.0-20181127160119-5b956e21995c/go.mod h1:+HxKANrR9VGw9yN3aOAppJKvhO05ctDi63w4mDnKv2U= -github.com/gobuffalo/makr v1.1.5/go.mod h1:Y+o0btAH1kYAMDJW/TX3+oAXEu0bmSLLoC9mIFxtzOw= -github.com/gobuffalo/mapi v1.0.0/go.mod h1:4VAGh89y6rVOvm5A8fKFxYG+wIW6LO1FMTG9hnKStFc= -github.com/gobuffalo/mapi v1.0.1/go.mod h1:4VAGh89y6rVOvm5A8fKFxYG+wIW6LO1FMTG9hnKStFc= -github.com/gobuffalo/meta v0.0.0-20181018155829-df62557efcd3/go.mod h1:XTTOhwMNryif3x9LkTTBO/Llrveezd71u3quLd0u7CM= -github.com/gobuffalo/meta v0.0.0-20181018192820-8c6cef77dab3/go.mod h1:E94EPzx9NERGCY69UWlcj6Hipf2uK/vnfrF4QD0plVE= -github.com/gobuffalo/meta v0.0.0-20181025145500-3a985a084b0a/go.mod h1:YDAKBud2FP7NZdruCSlmTmDOZbVSa6bpK7LJ/A/nlKg= -github.com/gobuffalo/meta v0.0.0-20181114191255-b130ebedd2f7/go.mod h1:K6cRZ29ozr4Btvsqkjvg5nDFTLOgTqf03KA70Ks0ypE= -github.com/gobuffalo/meta v0.0.0-20181127070345-0d7e59dd540b/go.mod h1:RLO7tMvE0IAKAM8wny1aN12pvEKn7EtkBLkUZR00Qf8= -github.com/gobuffalo/mw-basicauth v1.0.3/go.mod h1:dg7+ilMZOKnQFHDefUzUHufNyTswVUviCBgF244C1+0= -github.com/gobuffalo/mw-contenttype v0.0.0-20180802152300-74f5a47f4d56/go.mod h1:7EvcmzBbeCvFtQm5GqF9ys6QnCxz2UM1x0moiWLq1No= -github.com/gobuffalo/mw-csrf v0.0.0-20180802151833-446ff26e108b/go.mod h1:sbGtb8DmDZuDUQoxjr8hG1ZbLtZboD9xsn6p77ppcHo= -github.com/gobuffalo/mw-forcessl v0.0.0-20180802152810-73921ae7a130/go.mod h1:JvNHRj7bYNAMUr/5XMkZaDcw3jZhUZpsmzhd//FFWmQ= -github.com/gobuffalo/mw-i18n v0.0.0-20180802152014-e3060b7e13d6/go.mod h1:91AQfukc52A6hdfIfkxzyr+kpVYDodgAeT5cjX1UIj4= -github.com/gobuffalo/mw-paramlogger v0.0.0-20181005191442-d6ee392ec72e/go.mod h1:6OJr6VwSzgJMqWMj7TYmRUqzNe2LXu/W1rRW4MAz/ME= -github.com/gobuffalo/mw-tokenauth v0.0.0-20181001105134-8545f626c189/go.mod h1:UqBF00IfKvd39ni5+yI5MLMjAf4gX7cDKN/26zDOD6c= -github.com/gobuffalo/packd v0.0.0-20181027182251-01ad393492c8/go.mod h1:SmdBdhj6uhOsg1Ui4SFAyrhuc7U4VCildosO5IDJ3lc= -github.com/gobuffalo/packd v0.0.0-20181027190505-aafc0d02c411/go.mod h1:SmdBdhj6uhOsg1Ui4SFAyrhuc7U4VCildosO5IDJ3lc= -github.com/gobuffalo/packd v0.0.0-20181027194105-7ae579e6d213/go.mod h1:SmdBdhj6uhOsg1Ui4SFAyrhuc7U4VCildosO5IDJ3lc= -github.com/gobuffalo/packd v0.0.0-20181031195726-c82734870264/go.mod h1:Yf2toFaISlyQrr5TfO3h6DB9pl9mZRmyvBGQb/aQ/pI= -github.com/gobuffalo/packd v0.0.0-20181104210303-d376b15f8e96/go.mod h1:Yf2toFaISlyQrr5TfO3h6DB9pl9mZRmyvBGQb/aQ/pI= -github.com/gobuffalo/packd v0.0.0-20181111195323-b2e760a5f0ff/go.mod h1:Yf2toFaISlyQrr5TfO3h6DB9pl9mZRmyvBGQb/aQ/pI= -github.com/gobuffalo/packd v0.0.0-20181114190715-f25c5d2471d7/go.mod h1:Yf2toFaISlyQrr5TfO3h6DB9pl9mZRmyvBGQb/aQ/pI= -github.com/gobuffalo/packd v0.0.0-20181124090624-311c6248e5fb/go.mod h1:Foenia9ZvITEvG05ab6XpiD5EfBHPL8A6hush8SJ0o8= -github.com/gobuffalo/packd v0.0.0-20181207120301-c49825f8f6f4/go.mod h1:LYc0TGKFBBFTRC9dg2pcRcMqGCTMD7T2BIMP7OBuQAA= -github.com/gobuffalo/packd v0.0.0-20181212173646-eca3b8fd6687/go.mod h1:LYc0TGKFBBFTRC9dg2pcRcMqGCTMD7T2BIMP7OBuQAA= -github.com/gobuffalo/packr v1.13.7/go.mod h1:KkinLIn/n6+3tVXMwg6KkNvWwVsrRAz4ph+jgpk3Z24= -github.com/gobuffalo/packr v1.15.0/go.mod h1:t5gXzEhIviQwVlNx/+3SfS07GS+cZ2hn76WLzPp6MGI= -github.com/gobuffalo/packr v1.15.1/go.mod h1:IeqicJ7jm8182yrVmNbM6PR4g79SjN9tZLH8KduZZwE= -github.com/gobuffalo/packr v1.19.0/go.mod h1:MstrNkfCQhd5o+Ct4IJ0skWlxN8emOq8DsoT1G98VIU= -github.com/gobuffalo/packr v1.20.0/go.mod h1:JDytk1t2gP+my1ig7iI4NcVaXr886+N0ecUga6884zw= -github.com/gobuffalo/packr v1.21.0/go.mod h1:H00jGfj1qFKxscFJSw8wcL4hpQtPe1PfU2wa6sg/SR0= -github.com/gobuffalo/packr/v2 v2.0.0-rc.8/go.mod h1:y60QCdzwuMwO2R49fdQhsjCPv7tLQFR0ayzxxla9zes= -github.com/gobuffalo/packr/v2 v2.0.0-rc.9/go.mod h1:fQqADRfZpEsgkc7c/K7aMew3n4aF1Kji7+lIZeR98Fc= -github.com/gobuffalo/packr/v2 v2.0.0-rc.10/go.mod h1:4CWWn4I5T3v4c1OsJ55HbHlUEKNWMITG5iIkdr4Px4w= -github.com/gobuffalo/packr/v2 v2.0.0-rc.11/go.mod h1:JoieH/3h3U4UmatmV93QmqyPUdf4wVM9HELaHEu+3fk= -github.com/gobuffalo/packr/v2 v2.0.0-rc.12/go.mod h1:FV1zZTsVFi1DSCboO36Xgs4pzCZBjB/tDV9Cz/lSaR8= -github.com/gobuffalo/packr/v2 v2.0.0-rc.13/go.mod h1:2Mp7GhBFMdJlOK8vGfl7SYtfMP3+5roE39ejlfjw0rA= -github.com/gobuffalo/packr/v2 v2.0.0-rc.14/go.mod h1:06otbrNvDKO1eNQ3b8hst+1010UooI2MFg+B2Ze4MV8= -github.com/gobuffalo/plush v3.7.16+incompatible/go.mod h1:rQ4zdtUUyZNqULlc6bqd5scsPfLKfT0+TGMChgduDvI= -github.com/gobuffalo/plush v3.7.20+incompatible/go.mod h1:rQ4zdtUUyZNqULlc6bqd5scsPfLKfT0+TGMChgduDvI= -github.com/gobuffalo/plush v3.7.21+incompatible/go.mod h1:rQ4zdtUUyZNqULlc6bqd5scsPfLKfT0+TGMChgduDvI= -github.com/gobuffalo/plush v3.7.22+incompatible/go.mod h1:rQ4zdtUUyZNqULlc6bqd5scsPfLKfT0+TGMChgduDvI= -github.com/gobuffalo/plush v3.7.23+incompatible/go.mod h1:rQ4zdtUUyZNqULlc6bqd5scsPfLKfT0+TGMChgduDvI= -github.com/gobuffalo/plush v3.7.30+incompatible/go.mod h1:rQ4zdtUUyZNqULlc6bqd5scsPfLKfT0+TGMChgduDvI= -github.com/gobuffalo/plush v3.7.31+incompatible/go.mod h1:rQ4zdtUUyZNqULlc6bqd5scsPfLKfT0+TGMChgduDvI= -github.com/gobuffalo/plush v3.7.32+incompatible/go.mod h1:rQ4zdtUUyZNqULlc6bqd5scsPfLKfT0+TGMChgduDvI= -github.com/gobuffalo/plushgen v0.0.0-20181128164830-d29dcb966cb2/go.mod h1:r9QwptTFnuvSaSRjpSp4S2/4e2D3tJhARYbvEBcKSb4= -github.com/gobuffalo/plushgen v0.0.0-20181203163832-9fc4964505c2/go.mod h1:opEdT33AA2HdrIwK1aibqnTJDVVKXC02Bar/GT1YRVs= -github.com/gobuffalo/plushgen v0.0.0-20181207152837-eedb135bd51b/go.mod h1:Lcw7HQbEVm09sAQrCLzIxuhFbB3nAgp4c55E+UlynR0= -github.com/gobuffalo/plushgen v0.0.0-20190104222512-177cd2b872b3/go.mod h1:tYxCozi8X62bpZyKXYHw1ncx2ZtT2nFvG42kuLwYjoc= -github.com/gobuffalo/pop v4.8.2+incompatible/go.mod h1:DwBz3SD5SsHpTZiTubcsFWcVDpJWGsxjVjMPnkiThWg= -github.com/gobuffalo/pop v4.8.3+incompatible/go.mod h1:DwBz3SD5SsHpTZiTubcsFWcVDpJWGsxjVjMPnkiThWg= -github.com/gobuffalo/pop v4.8.4+incompatible/go.mod h1:DwBz3SD5SsHpTZiTubcsFWcVDpJWGsxjVjMPnkiThWg= -github.com/gobuffalo/release v1.0.35/go.mod h1:VtHFAKs61vO3wboCec5xr9JPTjYyWYcvaM3lclkc4x4= -github.com/gobuffalo/release v1.0.38/go.mod h1:VtHFAKs61vO3wboCec5xr9JPTjYyWYcvaM3lclkc4x4= -github.com/gobuffalo/release v1.0.42/go.mod h1:RPs7EtafH4oylgetOJpGP0yCZZUiO4vqHfTHJjSdpug= -github.com/gobuffalo/release v1.0.52/go.mod h1:RPs7EtafH4oylgetOJpGP0yCZZUiO4vqHfTHJjSdpug= -github.com/gobuffalo/release v1.0.53/go.mod h1:FdF257nd8rqhNaqtDWFGhxdJ/Ig4J7VcS3KL7n/a+aA= -github.com/gobuffalo/release v1.0.54/go.mod h1:Pe5/RxRa/BE8whDpGfRqSI7D1a0evGK1T4JDm339tJc= -github.com/gobuffalo/release v1.0.61/go.mod h1:mfIO38ujUNVDlBziIYqXquYfBF+8FDHUjKZgYC1Hj24= -github.com/gobuffalo/release v1.0.72/go.mod h1:NP5NXgg/IX3M5XmHmWR99D687/3Dt9qZtTK/Lbwc1hU= -github.com/gobuffalo/release v1.1.1/go.mod h1:Sluak1Xd6kcp6snkluR1jeXAogdJZpFFRzTYRs/2uwg= -github.com/gobuffalo/release v1.1.3/go.mod h1:CuXc5/m+4zuq8idoDt1l4va0AXAn/OSs08uHOfMVr8E= -github.com/gobuffalo/release v1.1.6/go.mod h1:18naWa3kBsqO0cItXZNJuefCKOENpbbUIqRL1g+p6z0= -github.com/gobuffalo/shoulders v1.0.1/go.mod h1:V33CcVmaQ4gRUmHKwq1fiTXuf8Gp/qjQBUL5tHPmvbA= -github.com/gobuffalo/syncx v0.0.0-20181120191700-98333ab04150/go.mod h1:HhnNqWY95UYwwW3uSASeV7vtgYkT2t16hJgV3AEPUpw= -github.com/gobuffalo/syncx v0.0.0-20181120194010-558ac7de985f/go.mod h1:HhnNqWY95UYwwW3uSASeV7vtgYkT2t16hJgV3AEPUpw= -github.com/gobuffalo/tags v2.0.11+incompatible/go.mod h1:9XmhOkyaB7UzvuY4UoZO4s67q8/xRMVJEaakauVQYeY= -github.com/gobuffalo/tags v2.0.14+incompatible/go.mod h1:9XmhOkyaB7UzvuY4UoZO4s67q8/xRMVJEaakauVQYeY= -github.com/gobuffalo/tags v2.0.15+incompatible/go.mod h1:9XmhOkyaB7UzvuY4UoZO4s67q8/xRMVJEaakauVQYeY= -github.com/gobuffalo/uuid v2.0.3+incompatible/go.mod h1:ErhIzkRhm0FtRuiE/PeORqcw4cVi1RtSpnwYrxuvkfE= -github.com/gobuffalo/uuid v2.0.4+incompatible/go.mod h1:ErhIzkRhm0FtRuiE/PeORqcw4cVi1RtSpnwYrxuvkfE= -github.com/gobuffalo/uuid v2.0.5+incompatible/go.mod h1:ErhIzkRhm0FtRuiE/PeORqcw4cVi1RtSpnwYrxuvkfE= -github.com/gobuffalo/validate v2.0.3+incompatible/go.mod h1:N+EtDe0J8252BgfzQUChBgfd6L93m9weay53EWFVsMM= -github.com/gobuffalo/x v0.0.0-20181003152136-452098b06085/go.mod h1:WevpGD+5YOreDJznWevcn8NTmQEW5STSBgIkpkjzqXc= -github.com/gobuffalo/x v0.0.0-20181007152206-913e47c59ca7/go.mod h1:9rDPXaB3kXdKWzMc4odGQQdG2e2DIEmANy5aSJ9yesY= -github.com/gofrs/uuid v3.1.0+incompatible/go.mod h1:b2aQJv3Z4Fp6yNu3cdSllBxTCLRxnplIgP/c0N/04lM= +github.com/gobuffalo/flect v0.1.5 h1:xpKq9ap8MbYfhuPCF0dBH854Gp9CxZjr/IocxELFflo= +github.com/gobuffalo/flect v0.1.5/go.mod h1:W3K3X9ksuZfir8f/LrfVtWmCDQFfayuylOJ7sz/Fj80= github.com/gogo/protobuf v1.1.1/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ= github.com/gogo/protobuf v1.2.0 h1:xU6/SpYbvkNYiptHJYEDRseDLvYE7wSqhYYNy0QSUzI= github.com/gogo/protobuf v1.2.0/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ= +github.com/gogo/protobuf v1.2.1 h1:/s5zKNz0uPFCZ5hddgPdo2TK2TVrUNMn0OOX8/aZMTE= +github.com/gogo/protobuf v1.2.1/go.mod h1:hp+jE20tsWTFYpLwKvXlhS1hjn+gTNwPg2I6zVXpSg4= github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b h1:VKtxabqXZkF25pY9ekfRL6a582T4P37/31XEstQ5p58= github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q= -github.com/golang/groupcache v0.0.0-20181024230925-c65c006176ff h1:kOkM9whyQYodu09SJ6W3NCsHG7crFaJILQ22Gozp3lg= -github.com/golang/groupcache v0.0.0-20181024230925-c65c006176ff/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= -github.com/golang/lint v0.0.0-20180702182130-06c8688daad7/go.mod h1:tluoj9z5200jBnyusfRPU2LqT6J+DAorxEvtC7LHB+E= +github.com/golang/groupcache v0.0.0-20180513044358-24b0969c4cb7 h1:u4bArs140e9+AfE52mFHOXVFnOSBJBRlzTHrOPLOIhE= +github.com/golang/groupcache v0.0.0-20180513044358-24b0969c4cb7/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= -github.com/golang/mock v1.2.0/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= -github.com/golang/protobuf v1.1.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= github.com/golang/protobuf v1.2.0 h1:P3YflyNX/ehuJFLhxviNdFxQPkGK5cDcApsge1SqnvM= github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= -github.com/google/btree v0.0.0-20180813153112-4030bb1f1f0c/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= -github.com/google/btree v1.0.0 h1:0udJVsspx3VBr5FwtLhQQtuAsVc79tTq0ocGIPAU6qo= -github.com/google/btree v1.0.0/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= +github.com/golang/protobuf v1.3.1 h1:YF8+flBXS5eO826T4nzqPrxfhQThhXl0YzfuUPu4SBg= +github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= +github.com/golang/snappy v0.0.0-20180518054509-2e65f85255db/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M= github.com/google/go-cmp v0.3.0 h1:crn/baboCvb5fXaQ0IJ1SGTsTVrWpDsCWC8EGETZijY= github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= -github.com/google/go-github v17.0.0+incompatible/go.mod h1:zLgOLi98H3fifZn+44m+umXrS52loVEgC2AApnigrVQ= -github.com/google/go-querystring v1.0.0/go.mod h1:odCYkC5MyYFN7vkCjXpyrEuKhc/BUO6wN/zVPAxq5ck= -github.com/google/gofuzz v1.0.0 h1:A8PeW59pxE9IoFRqBp37U+mSNaQoZ46F1f0f863XSXw= -github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= -github.com/google/martian v2.1.0+incompatible/go.mod h1:9I4somxYTbIHy5NJKHRl3wXiIaQGbYVAs8BPL6v8lEs= -github.com/google/pprof v0.0.0-20181206194817-3ea8567a2e57/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc= -github.com/google/uuid v1.0.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= -github.com/google/uuid v1.1.1 h1:Gkbcsh/GbpXz7lPftLA3P6TYMwjCLYm83jiFQZF/3gY= -github.com/google/uuid v1.1.1/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= -github.com/googleapis/gax-go v2.0.0+incompatible/go.mod h1:SFVmujtThgffbyetf+mdk2eWhX2bMyUtNHzFKcPA9HY= -github.com/googleapis/gax-go/v2 v2.0.3/go.mod h1:LLvjysVCY1JZeum8Z6l8qUty8fiNwE08qbEPm1M08qg= +github.com/google/gofuzz v0.0.0-20170612174753-24818f796faf h1:+RRA9JqSOZFfKrOeqr2z77+8R2RKyh8PG66dcu1V0ck= +github.com/google/gofuzz v0.0.0-20170612174753-24818f796faf/go.mod h1:HP5RmnzzSNb993RKQDq4+1A4ia9nllfqcQFTQJedwGI= github.com/googleapis/gnostic v0.2.0 h1:l6N3VoaVzTncYYW+9yOz2LJJammFZGBO13sqgEhpy9g= github.com/googleapis/gnostic v0.2.0/go.mod h1:sJBsCZ4ayReDTBIg8b9dl28c5xFWyhBTVRp3pOg5EKY= github.com/gophercloud/gophercloud v0.0.0-20190212181753-892256c46858/go.mod h1:vxM41WHh5uqHVBMZHzuwNOHh8XEoIEcSTewFxm1c5g8= -github.com/gophercloud/gophercloud v0.0.0-20190221164956-3f3cc5a566b2/go.mod h1:vxM41WHh5uqHVBMZHzuwNOHh8XEoIEcSTewFxm1c5g8= +github.com/gophercloud/gophercloud v0.2.0/go.mod h1:vxM41WHh5uqHVBMZHzuwNOHh8XEoIEcSTewFxm1c5g8= github.com/gophercloud/gophercloud v0.3.0 h1:6sjpKIpVwRIIwmcEGp+WwNovNsem+c+2vm6oxshRpL8= github.com/gophercloud/gophercloud v0.3.0/go.mod h1:vxM41WHh5uqHVBMZHzuwNOHh8XEoIEcSTewFxm1c5g8= github.com/gophercloud/utils v0.0.0-20190527093828-25f1b77b8c03 h1:QQqgDN0yxFHrhPV1BWFGP6hEZ82s18CGzu/bLQKT8RY= github.com/gophercloud/utils v0.0.0-20190527093828-25f1b77b8c03/go.mod h1:SZ9FTKibIotDtCrxAU/evccoyu1yhKST6hgBvwTB5Eg= -github.com/gopherjs/gopherjs v0.0.0-20181017120253-0766667cb4d1/go.mod h1:wJfORRmW1u3UXTncJ5qlYoELFm8eSnnEO6hX4iZ3EWY= github.com/gorilla/context v1.1.1/go.mod h1:kBGZzfjB9CEq2AlWe17Uuf7NDRt0dE0s8S51q0aT7Yg= github.com/gorilla/mux v1.6.2/go.mod h1:1lud6UwP+6orDFRuTfBEV8e9/aOM/c4fVVCaMa2zaAs= -github.com/gorilla/pat v0.0.0-20180118222023-199c85a7f6d1/go.mod h1:YeAe0gNeiNT5hoiZRI4yiOky6jVdNvfO2N6Kav/HmxY= -github.com/gorilla/securecookie v1.1.1/go.mod h1:ra0sb63/xPlUeL+yeDciTfxMRAA+MP+HVt/4epWDjd4= -github.com/gorilla/sessions v1.1.2/go.mod h1:8KCfur6+4Mqcc6S0FEfKuN15Vl5MgXW92AE8ovaJD0w= -github.com/gorilla/sessions v1.1.3/go.mod h1:8KCfur6+4Mqcc6S0FEfKuN15Vl5MgXW92AE8ovaJD0w= -github.com/gregjones/httpcache v0.0.0-20180305231024-9cad4c3443a7/go.mod h1:FecbI9+v66THATjSRHfNgh1IVFe/9kFxbXtjV0ctIMA= -github.com/gregjones/httpcache v0.0.0-20181110185634-c63ab54fda8f h1:ShTPMJQes6tubcjzGMODIVG5hlrCeImaBnZzKF2N8SM= -github.com/gregjones/httpcache v0.0.0-20181110185634-c63ab54fda8f/go.mod h1:FecbI9+v66THATjSRHfNgh1IVFe/9kFxbXtjV0ctIMA= -github.com/grpc-ecosystem/grpc-gateway v1.5.0/go.mod h1:RSKVYQBd5MCa4OVpNdGskqpgL2+G+NZTnrVHpWWfpdw= +github.com/grpc-ecosystem/grpc-gateway v1.8.5 h1:2+KSC78XiO6Qy0hIjfc1OD9H+hsaJdJlb8Kqsd41CTE= +github.com/grpc-ecosystem/grpc-gateway v1.8.5/go.mod h1:vNeuVxBJEsws4ogUvrchl83t/GYV9WGTSLVdBhOQFDY= github.com/hashicorp/go-uuid v1.0.1/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro= +github.com/hashicorp/golang-lru v0.0.0-20180201235237-0fb14efe8c47/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= github.com/hashicorp/golang-lru v0.5.0 h1:CL2msUPvZTLb5O648aiLNJw3hnBxN2+1Jq8rCOH9wdo= github.com/hashicorp/golang-lru v0.5.0/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= -github.com/hashicorp/hcl v1.0.0/go.mod h1:E5yfLk+7swimpb2L/Alb/PJmXilQ/rhwaUYs4T20WEQ= github.com/hpcloud/tail v1.0.0 h1:nfCOvKYfkgYP8hkirhJocXT2+zOD8yUNjXaWfTlyFKI= github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU= -github.com/imdario/mergo v0.3.7 h1:Y+UAYTZ7gDEuOfhxKWy+dvb5dRQ6rJjFSdX2HZY1/gI= -github.com/imdario/mergo v0.3.7/go.mod h1:2EnlNZ0deacrJVfApfmtdGgDfMuh/nq6Ok1EcJh5FfA= +github.com/imdario/mergo v0.3.6 h1:xTNEAn+kxVO7dTZGu0CegyqKZmoWFI0rF8UxjlB2d28= +github.com/imdario/mergo v0.3.6/go.mod h1:2EnlNZ0deacrJVfApfmtdGgDfMuh/nq6Ok1EcJh5FfA= github.com/inconshreveable/mousetrap v1.0.0 h1:Z8tu5sraLXCXIcARxBp/8cbvlwVa7Z1NHg9XEKhtSvM= github.com/inconshreveable/mousetrap v1.0.0/go.mod h1:PxqpIevigyE2G7u3NXJIT2ANytuPF1OarO4DADm73n8= -github.com/jackc/fake v0.0.0-20150926172116-812a484cc733/go.mod h1:WrMFNQdiFJ80sQsxDoMokWK1W5TQtxBFNpzWTD84ibQ= -github.com/jackc/pgx v3.2.0+incompatible/go.mod h1:0ZGrqGqkRlliWnWB4zKnWtjbSWbGkVEFm4TeybAXq+I= -github.com/jellevandenhooff/dkim v0.0.0-20150330215556-f50fe3d243e1/go.mod h1:E0B/fFc00Y+Rasa88328GlI/XbtyysCtTHZS8h7IrBU= -github.com/jmoiron/sqlx v0.0.0-20180614180643-0dae4fefe7c0/go.mod h1:IiEW3SEiiErVyFdH8NTuWjSifiEQKUoyK3LNqr2kCHU= -github.com/joho/godotenv v1.2.0/go.mod h1:7hK45KPybAkOC6peb+G5yklZfMxEjkZhHbwpqxOKXbg= -github.com/joho/godotenv v1.3.0 h1:Zjp+RcGpHhGlrMbJzXTrZZPrWj+1vfm90La1wgB6Bhc= -github.com/joho/godotenv v1.3.0/go.mod h1:7hK45KPybAkOC6peb+G5yklZfMxEjkZhHbwpqxOKXbg= +github.com/json-iterator/go v1.1.5/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU= github.com/json-iterator/go v1.1.6 h1:MrUvLMLTMxbqFJ9kzlvat/rYZqZnW3u4wkLzWTaFwKs= github.com/json-iterator/go v1.1.6/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU= github.com/julienschmidt/httprouter v1.2.0/go.mod h1:SYymIcj16QtmaHHD7aYtjjsJG7VTCxuUUipMqKk8s4w= -github.com/karrick/godirwalk v1.7.5/go.mod h1:2c9FRhkDxdIbgkOnCEvnSWs71Bhugbl46shStcFDJ34= -github.com/karrick/godirwalk v1.7.7/go.mod h1:2c9FRhkDxdIbgkOnCEvnSWs71Bhugbl46shStcFDJ34= -github.com/karrick/godirwalk v1.7.8/go.mod h1:2c9FRhkDxdIbgkOnCEvnSWs71Bhugbl46shStcFDJ34= -github.com/kballard/go-shellquote v0.0.0-20180428030007-95032a82bc51/go.mod h1:CzGEWj7cYgsdH8dAjBGEr58BoE7ScuLd+fwFZ44+/x8= +github.com/kisielk/errcheck v1.1.0/go.mod h1:EZBBE59ingxPouuu3KfxchcWSUPOHkagtvWXihfKN4Q= github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= -github.com/konsorten/go-windows-terminal-sequences v0.0.0-20180402223658-b729f2633dfe/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= github.com/kr/logfmt v0.0.0-20140226030751-b84e30acd515/go.mod h1:+0opPa2QZZtGFBFZlji/RkVcI2GknAs/DXo4wKdlNEc= github.com/kr/pretty v0.1.0 h1:L/CwN0zerZDmRFUapSPitk6f+Q3+0za1rQkzVuMiMFI= github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= -github.com/kr/pty v1.1.3/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= github.com/kr/text v0.1.0 h1:45sCR5RtlFHMR4UwH9sdQ5TC8v0qDQCHnXt+kaKSTVE= github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= -github.com/lib/pq v1.0.0/go.mod h1:5WUZQaWbwv1U+lTReE5YruASi9Al49XbQIvNi/34Woo= -github.com/magiconair/properties v1.8.0/go.mod h1:PppfXfuXeibc/6YijjN8zIbojt8czPbwD3XqdrwzmxQ= -github.com/markbates/deplist v1.0.4/go.mod h1:gRRbPbbuA8TmMiRvaOzUlRfzfjeCCBqX2A6arxN01MM= -github.com/markbates/deplist v1.0.5/go.mod h1:gRRbPbbuA8TmMiRvaOzUlRfzfjeCCBqX2A6arxN01MM= -github.com/markbates/going v1.0.2/go.mod h1:UWCk3zm0UKefHZ7l8BNqi26UyiEMniznk8naLdTcy6c= -github.com/markbates/grift v1.0.4/go.mod h1:wbmtW74veyx+cgfwFhlnnMWqhoz55rnHR47oMXzsyVs= -github.com/markbates/hmax v1.0.0/go.mod h1:cOkR9dktiESxIMu+65oc/r/bdY4bE8zZw3OLhLx0X2c= -github.com/markbates/inflect v1.0.0/go.mod h1:oTeZL2KHA7CUX6X+fovmK9OvIOFuqu0TwdQrZjLTh88= -github.com/markbates/inflect v1.0.1/go.mod h1:uv3UVNBe5qBIfCm8O8Q+DW+S1EopeyINj+Ikhc7rnCk= -github.com/markbates/inflect v1.0.3/go.mod h1:1fR9+pO2KHEO9ZRtto13gDwwZaAKstQzferVeWqbgNs= -github.com/markbates/inflect v1.0.4 h1:5fh1gzTFhfae06u3hzHYO9xe3l3v3nW5Pwt3naLTP5g= -github.com/markbates/inflect v1.0.4/go.mod h1:1fR9+pO2KHEO9ZRtto13gDwwZaAKstQzferVeWqbgNs= -github.com/markbates/oncer v0.0.0-20180924031910-e862a676800b/go.mod h1:Ld9puTsIW75CHf65OeIOkyKbteujpZVXDpWK6YGZbxE= -github.com/markbates/oncer v0.0.0-20180924034138-723ad0170a46/go.mod h1:Ld9puTsIW75CHf65OeIOkyKbteujpZVXDpWK6YGZbxE= -github.com/markbates/oncer v0.0.0-20181014194634-05fccaae8fc4/go.mod h1:Ld9puTsIW75CHf65OeIOkyKbteujpZVXDpWK6YGZbxE= -github.com/markbates/oncer v0.0.0-20181203154359-bf2de49a0be2/go.mod h1:Ld9puTsIW75CHf65OeIOkyKbteujpZVXDpWK6YGZbxE= -github.com/markbates/refresh v1.4.10/go.mod h1:NDPHvotuZmTmesXxr95C9bjlw1/0frJwtME2dzcVKhc= -github.com/markbates/safe v1.0.0/go.mod h1:nAqgmRi7cY2nqMc92/bSEeQA+R4OheNU2T1kNSCBdG0= -github.com/markbates/safe v1.0.1/go.mod h1:nAqgmRi7cY2nqMc92/bSEeQA+R4OheNU2T1kNSCBdG0= -github.com/markbates/sigtx v1.0.0/go.mod h1:QF1Hv6Ic6Ca6W+T+DL0Y/ypborFKyvUY9HmuCD4VeTc= -github.com/markbates/willie v1.0.9/go.mod h1:fsrFVWl91+gXpx/6dv715j7i11fYPfZ9ZGfH0DQzY7w= -github.com/mattn/go-colorable v0.0.9/go.mod h1:9vuHe8Xs5qXnSaW/c/ABM9alt+Vo+STaOChaDxuIBZU= -github.com/mattn/go-isatty v0.0.4/go.mod h1:M+lRXTBqGeGNdLjl/ufCoiOlB5xdOkqRJdNxMWT7Zi4= -github.com/mattn/go-sqlite3 v1.9.0/go.mod h1:FPy6KqzDD04eiIsT53CuJW3U88zkxoIYsOqkbpncsNc= +github.com/mattn/go-colorable v0.1.2 h1:/bC9yWikZXAL9uJdulbSfyVNIR3n3trXl+v8+1sx8mU= +github.com/mattn/go-colorable v0.1.2/go.mod h1:U0ppj6V5qS13XJ6of8GYAs25YV2eR4EVcfRqFIhoBtE= +github.com/mattn/go-isatty v0.0.8 h1:HLtExJ+uU2HOZ+wI0Tt5DtUDrx8yhUqDcp7fYERX4CE= +github.com/mattn/go-isatty v0.0.8/go.mod h1:Iq45c/XA43vh69/j3iqttzPXn0bhXyGjM0Hdxcsrc5s= github.com/matttproud/golang_protobuf_extensions v1.0.1 h1:4hp9jkHxhMHkqkrB3Ix0jegS5sx/RkqARlsWZ6pIwiU= github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0= -github.com/microcosm-cc/bluemonday v1.0.1/go.mod h1:hsXNsILzKxV+sX77C5b8FSuKF00vh2OMYv+xgHpAMF4= -github.com/microcosm-cc/bluemonday v1.0.2/go.mod h1:iVP4YcDBq+n/5fb23BhYFvIMq/leAFZyRl6bYmGDlGc= -github.com/mitchellh/go-homedir v1.0.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0= github.com/mitchellh/go-homedir v1.1.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0= -github.com/mitchellh/mapstructure v1.0.0/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y= -github.com/mitchellh/mapstructure v1.1.2/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y= github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd h1:TRLaZ9cD/w8PVh93nsPXa1VrQ6jlwL5oN8l14QlcNfg= github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= +github.com/modern-go/reflect2 v0.0.0-20180701023420-4b7aa43c6742/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0= github.com/modern-go/reflect2 v1.0.1 h1:9f412s+6RmYXLWZSEzVVgPGK7C2PphHj5RJrvfx9AWI= github.com/modern-go/reflect2 v1.0.1/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0= -github.com/monoculum/formam v0.0.0-20180901015400-4e68be1d79ba/go.mod h1:RKgILGEJq24YyJ2ban8EO0RUVSJlF1pGsEvoLEACr/Q= github.com/mwitkow/go-conntrack v0.0.0-20161129095857-cc309e4a2223/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U= -github.com/neelance/astrewrite v0.0.0-20160511093645-99348263ae86/go.mod h1:kHJEU3ofeGjhHklVoIGuVj85JJwZ6kWPaJwCIxgnFmo= -github.com/neelance/sourcemap v0.0.0-20151028013722-8c68805598ab/go.mod h1:Qr6/a/Q4r9LP1IltGz7tA7iOK1WonHEYhu1HRBA7ZiM= -github.com/nicksnyder/go-i18n v1.10.0/go.mod h1:HrK7VCrbOvQoUAQ7Vpy7i87N7JZZZ7R2xBGjv0j365Q= github.com/onsi/ginkgo v1.6.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= github.com/onsi/ginkgo v1.7.0 h1:WSHQ+IS43OoUrWtD1/bbclrwK8TTH5hzp+umCiuxHgs= github.com/onsi/ginkgo v1.7.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= github.com/onsi/ginkgo v1.8.0 h1:VkHVNpR4iVnU8XQR6DBm8BqYjN7CRzw+xKUbVVbbW9w= github.com/onsi/ginkgo v1.8.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= -github.com/onsi/gomega v1.4.1/go.mod h1:C1qb7wdrVGGVU+Z6iS04AVkA3Q65CEZX59MT0QO5uiA= github.com/onsi/gomega v1.4.2/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY= github.com/onsi/gomega v1.4.3 h1:RE1xgDvH7imwFD45h+u2SgIfERHlS2yNG4DObb5BSKU= github.com/onsi/gomega v1.4.3/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY= github.com/onsi/gomega v1.5.0 h1:izbySO9zDPmjJ8rDjLvkA2zJHIo+HkYXHnf7eN7SSyo= github.com/onsi/gomega v1.5.0/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY= -github.com/openzipkin/zipkin-go v0.1.1/go.mod h1:NtoC/o8u3JlF1lSlyPNswIbeQH9bJTmOf0Erfk+hxe8= -github.com/pborman/uuid v1.2.0 h1:J7Q5mO4ysT1dv8hyrUGHb9+ooztCXu1D8MY8DZYsu3g= -github.com/pborman/uuid v1.2.0/go.mod h1:X/NO0urCmaxf9VXbdlT7C2Yzkj2IKimNn4k+gtPdI/k= -github.com/pelletier/go-toml v1.2.0/go.mod h1:5z9KED0ma1S8pY6P1sdut58dfprrGBbd/94hg7ilaic= -github.com/peterbourgon/diskv v2.0.1+incompatible h1:UBdAOUP5p4RWqPBg048CAvpKN+vxiaj6gdUUzhl4XmI= -github.com/peterbourgon/diskv v2.0.1+incompatible/go.mod h1:uqqh8zWWbv1HBMNONnaR/tNboyR3/BZd58JJSHlUSCU= +github.com/openzipkin/zipkin-go v0.1.6/go.mod h1:QgAqvLzwWbR/WpD4A3cGpPtJrZXNIiJc5AZX7/PBEpw= +github.com/pborman/uuid v0.0.0-20170612153648-e790cca94e6c h1:MUyE44mTvnI5A0xrxIxaMqoWFzPfQvtE2IWUollMDMs= +github.com/pborman/uuid v0.0.0-20170612153648-e790cca94e6c/go.mod h1:VyrYX9gd7irzKovcSS6BIIEwPRkP2Wm2m9ufcdFSJ34= +github.com/pierrec/lz4 v2.0.5+incompatible/go.mod h1:pdkljMzZIN41W+lC3N2tnIh5sFi+IEE17M5jbnwPHcY= github.com/pkg/errors v0.8.0/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= github.com/pkg/errors v0.8.1 h1:iURUrRGxPUNPdy5/HRSm+Yj6okJ6UtLINN0Q9M4+h3I= github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= -github.com/prometheus/client_golang v0.8.0/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw= +github.com/prometheus/client_golang v0.9.0/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw= github.com/prometheus/client_golang v0.9.1/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw= -github.com/prometheus/client_golang v0.9.2 h1:awm861/B8OKDd2I/6o1dy3ra4BamzKhYOiGItCeZ740= -github.com/prometheus/client_golang v0.9.2/go.mod h1:OsXs2jCmiKlQ1lTBmv21f2mNfw4xf/QclQDMrYNZzcM= +github.com/prometheus/client_golang v0.9.3-0.20190127221311-3c4408c8b829 h1:D+CiwcpGTW6pL6bv6KI3KbyEyCKyS+1JWS2h8PNDnGA= +github.com/prometheus/client_golang v0.9.3-0.20190127221311-3c4408c8b829/go.mod h1:p2iRAGwDERtqlqzRXnrOVns+ignqQo//hLXqYxZYVNs= github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo= github.com/prometheus/client_model v0.0.0-20190115171406-56726106282f h1:BVwpUVJDADN2ufcGik7W992pyps0wZ888b/y9GXcLTU= github.com/prometheus/client_model v0.0.0-20190115171406-56726106282f/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo= github.com/prometheus/common v0.0.0-20180801064454-c7de2306084e/go.mod h1:daVV7qP5qjZbuso7PdcryaAu0sAZbrN9i7WWcTMWvro= -github.com/prometheus/common v0.0.0-20181126121408-4724e9255275/go.mod h1:daVV7qP5qjZbuso7PdcryaAu0sAZbrN9i7WWcTMWvro= -github.com/prometheus/common v0.1.0 h1:IxU7wGikQPAcoOd3/f4Ol7+vIKS1Sgu08tzjktR4nJE= -github.com/prometheus/common v0.1.0/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4= +github.com/prometheus/common v0.2.0 h1:kUZDBDTdBVBYBj5Tmh2NZLlF60mfjA27rM34b+cVwNU= +github.com/prometheus/common v0.2.0/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4= github.com/prometheus/procfs v0.0.0-20180725123919-05ee40e3a273/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= -github.com/prometheus/procfs v0.0.0-20181204211112-1dc9a6cbc91a/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= github.com/prometheus/procfs v0.0.0-20190117184657-bf6a532e95b1 h1:/K3IL0Z1quvmJ7X0A1AwNEK7CRkVK3YwfOU/QAL4WGg= github.com/prometheus/procfs v0.0.0-20190117184657-bf6a532e95b1/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= -github.com/rogpeppe/go-internal v1.0.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4= -github.com/rogpeppe/go-internal v1.1.0 h1:g0fH8RicVgNl+zVZDCDfbdWxAWoAEJyI7I3TZYXFiig= -github.com/rogpeppe/go-internal v1.1.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4= -github.com/russross/blackfriday v1.5.2/go.mod h1:JO/DiYxRf+HjHt06OyowR9PTA263kcR/rfWxYHBV53g= -github.com/satori/go.uuid v1.2.0/go.mod h1:dA0hQrYB0VpLJoorglMZABFdXlWrHn1NEOzdhQKdks0= -github.com/serenize/snaker v0.0.0-20171204205717-a683aaf2d516/go.mod h1:Yow6lPLSAXx2ifx470yD/nUe22Dv5vBvxK/UK9UUTVs= +github.com/rcrowley/go-metrics v0.0.0-20181016184325-3113b8401b8a/go.mod h1:bCqnVzQkZxMG4s8nGwiZ5l3QUCyqpo9Y+/ZMZ9VjZe4= +github.com/rogpeppe/fastuuid v0.0.0-20150106093220-6724a57986af/go.mod h1:XWv6SoW27p1b0cqNHllgS5HIMJraePCO15w5zCzIWYg= github.com/sergi/go-diff v1.0.0/go.mod h1:0CfEIISq7TuYL3j771MWULgwwjU+GofnZX9QAmXWZgo= -github.com/shopspring/decimal v0.0.0-20180709203117-cd690d0c9e24/go.mod h1:M+9NzErvs504Cn4c5DxATwIqPbtswREoFCre64PpcG4= -github.com/shurcooL/component v0.0.0-20170202220835-f88ec8f54cc4/go.mod h1:XhFIlyj5a1fBNx5aJTbKoIq0mNaPvOagO+HjB3EtxrY= -github.com/shurcooL/events v0.0.0-20181021180414-410e4ca65f48/go.mod h1:5u70Mqkb5O5cxEA8nxTsgrgLehJeAw6Oc4Ab1c/P1HM= -github.com/shurcooL/github_flavored_markdown v0.0.0-20181002035957-2122de532470/go.mod h1:2dOwnU2uBioM+SGy2aZoq1f/Sd1l9OkAeAUvjSyvgU0= -github.com/shurcooL/go v0.0.0-20180423040247-9e1955d9fb6e/go.mod h1:TDJrrUr11Vxrven61rcy3hJMUqaf/CLWYhHNPmT14Lk= -github.com/shurcooL/go-goon v0.0.0-20170922171312-37c2f522c041/go.mod h1:N5mDOmsrJOB+vfqUK+7DmDyjhSLIIBnXo9lvZJj3MWQ= -github.com/shurcooL/gofontwoff v0.0.0-20180329035133-29b52fc0a18d/go.mod h1:05UtEgK5zq39gLST6uB0cf3NEHjETfB4Fgr3Gx5R9Vw= -github.com/shurcooL/gopherjslib v0.0.0-20160914041154-feb6d3990c2c/go.mod h1:8d3azKNyqcHP1GaQE/c6dDgjkgSx2BZ4IoEi4F1reUI= -github.com/shurcooL/highlight_diff v0.0.0-20170515013008-09bb4053de1b/go.mod h1:ZpfEhSmds4ytuByIcDnOLkTHGUI6KNqRNPDLHDk+mUU= -github.com/shurcooL/highlight_go v0.0.0-20170515013102-78fb10f4a5f8/go.mod h1:UDKB5a1T23gOMUJrI+uSuH0VRDStOiUVSjBTRDVBVag= -github.com/shurcooL/highlight_go v0.0.0-20181028180052-98c3abbbae20/go.mod h1:UDKB5a1T23gOMUJrI+uSuH0VRDStOiUVSjBTRDVBVag= -github.com/shurcooL/home v0.0.0-20181020052607-80b7ffcb30f9/go.mod h1:+rgNQw2P9ARFAs37qieuu7ohDNQ3gds9msbT2yn85sg= -github.com/shurcooL/htmlg v0.0.0-20170918183704-d01228ac9e50/go.mod h1:zPn1wHpTIePGnXSHpsVPWEktKXHr6+SS6x/IKRb7cpw= -github.com/shurcooL/httperror v0.0.0-20170206035902-86b7830d14cc/go.mod h1:aYMfkZ6DWSJPJ6c4Wwz3QtW22G7mf/PEgaB9k/ik5+Y= -github.com/shurcooL/httpfs v0.0.0-20171119174359-809beceb2371/go.mod h1:ZY1cvUeJuFPAdZ/B6v7RHavJWZn2YPVFQ1OSXhCGOkg= -github.com/shurcooL/httpgzip v0.0.0-20180522190206-b1c53ac65af9/go.mod h1:919LwcH0M7/W4fcZ0/jy0qGght1GIhqyS/EgWGH2j5Q= -github.com/shurcooL/issues v0.0.0-20181008053335-6292fdc1e191/go.mod h1:e2qWDig5bLteJ4fwvDAc2NHzqFEthkqn7aOZAOpj+PQ= -github.com/shurcooL/issuesapp v0.0.0-20180602232740-048589ce2241/go.mod h1:NPpHK2TI7iSaM0buivtFUc9offApnI0Alt/K8hcHy0I= -github.com/shurcooL/notifications v0.0.0-20181007000457-627ab5aea122/go.mod h1:b5uSkrEVM1jQUspwbixRBhaIjIzL2xazXp6kntxYle0= -github.com/shurcooL/octicon v0.0.0-20180602230221-c42b0e3b24d9/go.mod h1:eWdoE5JD4R5UVWDucdOPg1g2fqQRq78IQa9zlOV1vpQ= -github.com/shurcooL/octicon v0.0.0-20181028054416-fa4f57f9efb2/go.mod h1:eWdoE5JD4R5UVWDucdOPg1g2fqQRq78IQa9zlOV1vpQ= -github.com/shurcooL/reactions v0.0.0-20181006231557-f2e0b4ca5b82/go.mod h1:TCR1lToEk4d2s07G3XGfz2QrgHXg4RJBvjrOozvoWfk= -github.com/shurcooL/sanitized_anchor_name v0.0.0-20170918181015-86672fcb3f95/go.mod h1:1NzhyTcUVG4SuEtjjoZeVRXNmyL/1OwPU0+IJeTBvfc= -github.com/shurcooL/users v0.0.0-20180125191416-49c67e49c537/go.mod h1:QJTqeLYEDaXHZDBsXlPCDqdhQuJkuw4NOtaxYe3xii4= -github.com/shurcooL/webdavfs v0.0.0-20170829043945-18c3829fa133/go.mod h1:hKmq5kWdCj2z2KEozexVbfEZIWiTjhE0+UjmZgPqehw= -github.com/sirupsen/logrus v1.0.6/go.mod h1:pMByvHTf9Beacp5x1UXfOR9xyW/9antXMhjMPG0dEzc= -github.com/sirupsen/logrus v1.1.0/go.mod h1:zrgwTnHtNr00buQ1vSptGe8m1f/BbgsPukg8qsT7A+A= -github.com/sirupsen/logrus v1.1.1/go.mod h1:zrgwTnHtNr00buQ1vSptGe8m1f/BbgsPukg8qsT7A+A= github.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo= -github.com/sirupsen/logrus v1.3.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo= -github.com/sourcegraph/annotate v0.0.0-20160123013949-f4cad6c6324d/go.mod h1:UdhH50NIW0fCiwBSr0co2m7BnFLdv4fQTgdqdJTHFeE= -github.com/sourcegraph/syntaxhighlight v0.0.0-20170531221838-bd320f5d308e/go.mod h1:HuIsMU8RRBOtsCgI77wP899iHVBQpCmg4ErYMZB+2IA= -github.com/spf13/afero v1.1.2/go.mod h1:j4pytiNVoe2o6bmDsKpLACNPDBIoEAkihy7loJ1B0CQ= -github.com/spf13/afero v1.2.0/go.mod h1:9ZxEEn6pIJ8Rxe320qSDBk6AsU0r9pR7Q4OcevTdifk= github.com/spf13/afero v1.2.2 h1:5jhuqJyZCZf2JRofRvN/nIFgIWNzPa3/Vz8mYylgbWc= github.com/spf13/afero v1.2.2/go.mod h1:9ZxEEn6pIJ8Rxe320qSDBk6AsU0r9pR7Q4OcevTdifk= -github.com/spf13/cast v1.2.0/go.mod h1:r2rcYCSwa1IExKTDiTfzaxqT2FNHs8hODu4LnUfgKEg= -github.com/spf13/cast v1.3.0/go.mod h1:Qx5cxh0v+4UWYiBimWS+eyWzqEqokIECu5etghLkUJE= github.com/spf13/cobra v0.0.3 h1:ZlrZ4XsMRm04Fr5pSFxBgfND2EBVa1nLpiy1stUsX/8= github.com/spf13/cobra v0.0.3/go.mod h1:1l0Ry5zgKvJasoi3XT1TypsSe7PqH0Sj9dhYf7v3XqQ= -github.com/spf13/jwalterweatherman v1.0.0/go.mod h1:cQK4TGJAtQXfYWX+Ddv3mKDzgVb68N+wFjFa4jdeBTo= github.com/spf13/pflag v1.0.2/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4= github.com/spf13/pflag v1.0.3 h1:zPAT6CGy6wXeQ7NtTnaTerfKOsV6V6F8agHXFiazDkg= github.com/spf13/pflag v1.0.3/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4= -github.com/spf13/viper v1.2.1/go.mod h1:P4AexN0a+C9tGAnUFNwDMYYZv3pjFuvmeiMyKRaNVlI= -github.com/spf13/viper v1.3.1/go.mod h1:ZiWeW+zYFKm7srdB9IoDzzZXaJaI5eL9QjNiN/DMA2s= github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= github.com/stretchr/testify v1.3.0 h1:TivCn/peBQ7UY8ooIcPgZFpTNSz0Q2U6UrFlUfqbe0Q= github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= -github.com/tarm/serial v0.0.0-20180830185346-98f6abe2eb07/go.mod h1:kDXzergiv9cbyO7IOYJZWg1U88JhDg3PB6klq9Hg2pA= -github.com/ugorji/go/codec v0.0.0-20181204163529-d75b2dcb6bc8/go.mod h1:VFNgLljTbGfSG7qAOspJ7OScBnGdDN/yBr0sguwnwf0= -github.com/unrolled/secure v0.0.0-20180918153822-f340ee86eb8b/go.mod h1:mnPT77IAdsi/kV7+Es7y+pXALeV3h7G6dQF6mNYjcLA= -github.com/unrolled/secure v0.0.0-20181005190816-ff9db2ff917f/go.mod h1:mnPT77IAdsi/kV7+Es7y+pXALeV3h7G6dQF6mNYjcLA= github.com/vincent-petithory/dataurl v0.0.0-20160330182126-9a301d65acbb h1:lyL3z7vYwTWXf4/bI+A01+cCSnfhKIBhy+SQ46Z/ml8= github.com/vincent-petithory/dataurl v0.0.0-20160330182126-9a301d65acbb/go.mod h1:FHafX5vmDzyP+1CQATJn7WFKc9CvnvxyvZy6I1MrG/U= -github.com/xordataexchange/crypt v0.0.3-0.20170626215501-b2862e3d0a77/go.mod h1:aYKd//L2LvnjZzWKhF00oedf4jCCReLcmhLdhm1A27Q= -go.opencensus.io v0.17.0/go.mod h1:mp1VrMQxhlqqDpKvH4UcQUa4YwlzNmymAjPrDdfxNpI= -go.opencensus.io v0.18.0 h1:Mk5rgZcggtbvtAun5aJzAtjKKN/t0R3jJPlWILlv938= -go.opencensus.io v0.18.0/go.mod h1:vKdFvxhtzZ9onBp9VKHK8z/sRpBMnKAsufL7wlDrCOA= +go.opencensus.io v0.20.1/go.mod h1:6WKK9ahsWS3RSO+PY9ZHZUfv2irvY6gN279GOPZjmmk= +go.opencensus.io v0.20.2 h1:NAfh7zF0/3/HqtMvJNZ/RFrSlCE6ZTlHmKfhL/Dm1Jk= +go.opencensus.io v0.20.2/go.mod h1:6WKK9ahsWS3RSO+PY9ZHZUfv2irvY6gN279GOPZjmmk= go.uber.org/atomic v1.3.2 h1:2Oa65PReHzfn29GpvgsYwloV9AVFHPDk8tYxt2c2tr4= go.uber.org/atomic v1.3.2/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE= go.uber.org/multierr v1.1.0 h1:HoEmRHQPVSqub6w2z2d2EOVs2fjyFRGyofhKuyDq0QI= go.uber.org/multierr v1.1.0/go.mod h1:wR5kodmAFQ0UK8QlbwjlSNy0Z68gJhDJUG5sjR94q/0= go.uber.org/zap v1.9.1 h1:XCJQEf3W6eZaVwhRBof6ImoYGJSITeKWsyeh3HFu/5o= go.uber.org/zap v1.9.1/go.mod h1:vwi/ZaCAaUcBkycHslxD9B2zi4UTXhF60s6SWpuDF0Q= -go4.org v0.0.0-20180809161055-417644f6feb5 h1:+hE86LblG4AyDgwMCLTE6FOlM9+qjHSYS+rKqxUVdsM= -go4.org v0.0.0-20180809161055-417644f6feb5/go.mod h1:MkTOUMDaeVYJUOUsaDXIhWPZYa1yOyC1qaOBpL57BhE= -golang.org/x/build v0.0.0-20190111050920-041ab4dc3f9d/go.mod h1:OWs+y06UdEOHN4y+MfF/py+xQ/tYqIWW03b70/CG9Rw= +go4.org v0.0.0-20190313082347-94abd6928b1d h1:JkRdGP3zvTtTbabWSAC6n67ka30y7gOzWAah4XYJSfw= +go4.org v0.0.0-20190313082347-94abd6928b1d/go.mod h1:MkTOUMDaeVYJUOUsaDXIhWPZYa1yOyC1qaOBpL57BhE= +golang.org/x/crypto v0.0.0-20180820150726-614d502a4dac/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= -golang.org/x/crypto v0.0.0-20180910181607-0e37d006457b/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= -golang.org/x/crypto v0.0.0-20181001203147-e3636079e1a4/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= -golang.org/x/crypto v0.0.0-20181009213950-7c1a557ab941/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= -golang.org/x/crypto v0.0.0-20181015023909-0c41d7ab0a0e/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= -golang.org/x/crypto v0.0.0-20181024171144-74cb1d3d52f4/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= -golang.org/x/crypto v0.0.0-20181025113841-85e1b3f9139a/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= -golang.org/x/crypto v0.0.0-20181025213731-e84da0312774/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= -golang.org/x/crypto v0.0.0-20181030102418-4d3f4d9ffa16/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= -golang.org/x/crypto v0.0.0-20181106171534-e4dc69e5b2fd/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= -golang.org/x/crypto v0.0.0-20181112202954-3d3f9f413869/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= -golang.org/x/crypto v0.0.0-20181127143415-eb0de9b17e85/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= -golang.org/x/crypto v0.0.0-20181203042331-505ab145d0a9/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= -golang.org/x/crypto v0.0.0-20190102171810-8d7daa0c54b3/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= -golang.org/x/crypto v0.0.0-20190103213133-ff983b9c42bc/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= golang.org/x/crypto v0.0.0-20190211182817-74369b46fc67/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2 h1:VklqNMn3ovrHsnt90PveolxSbWFaJdECFbxSq0Mqo2M= golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= golang.org/x/crypto v0.0.0-20190611184440-5c40567a22f8 h1:1wopBVtVdWnn03fZelqdXTqk7U7zPQCb+T4rbU9ZEoU= golang.org/x/crypto v0.0.0-20190611184440-5c40567a22f8/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= -golang.org/x/lint v0.0.0-20180702182130-06c8688daad7/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= +golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU= +golang.org/x/lint v0.0.0-20190301231843-5614ed5bae6f/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/net v0.0.0-20180816102801-aaf60122140d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/net v0.0.0-20180921000356-2f5d2388922f/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/net v0.0.0-20180926154720-4dfa2610cdf3/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/net v0.0.0-20181005035420-146acd28ed58/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/net v0.0.0-20181011144130-49bb7cea24b1/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/net v0.0.0-20181017193950-04a2e542c03f/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/net v0.0.0-20181023162649-9b4f9f5ad519/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/net v0.0.0-20181029044818-c44066c5c816/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/net v0.0.0-20181102091132-c10e9556a7bc/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/net v0.0.0-20181106065722-10aee1819953/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20181114220301-adae6a3d119a/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/net v0.0.0-20181201002055-351d144fa1fc/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/net v0.0.0-20181207154023-610586996380/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20181220203305-927f97764cc3/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20190108225652-1e06a53dbb7e/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20190125091013-d26f9f9a57f3/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/net v0.0.0-20190501004415-9ce7a6920f09/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= golang.org/x/net v0.0.0-20190613194153-d28f0bde5980 h1:dfGZHvZk057jK2MCeWus/TowKpJ8y4AmooUzdBSR9GU= golang.org/x/net v0.0.0-20190613194153-d28f0bde5980/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= -golang.org/x/oauth2 v0.0.0-20181017192945-9dcd33a902f4/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= -golang.org/x/oauth2 v0.0.0-20181203162652-d668ce993890/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= -golang.org/x/oauth2 v0.0.0-20190115181402-5dab4167f31c h1:pcBdqVcrlT+A3i+tWsOROFONQyey9tisIQHI4xqVGLg= -golang.org/x/oauth2 v0.0.0-20190115181402-5dab4167f31c/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= -golang.org/x/perf v0.0.0-20180704124530-6e6d33e29852/go.mod h1:JLpeXjPJfIyPr5TlbXLkXWLhP8nz10XfvxElABhCtcw= +golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421 h1:Wo7BWFiOk0QRFMLYMqJGFMd9CgUAcGx7V+qEg/h5IBI= +golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4 h1:YUO/7uOKsKeq9UokNS62b8FYywz3ker1l1vDZRCRefw= golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sys v0.0.0-20180816055513-1c9583448a9c/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sync v0.0.0-20190227155943-e225da77a7e6/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20190423024810-112230192c58 h1:8gQV6CLnAEikrhgkHFbMAEhagSSnXWGV915qUMm9mrU= +golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20180906133057-8cf3aee42992/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20180921163948-d47a0f339242/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20180927150500-dad3d9fb7b6e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20181005133103-4497e2df6f9e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20181011152604-fa43e7bc11ba/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20181022134430-8a28ead16f52/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20181024145615-5cd93ef61a7c/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20181025063200-d989b31c8746/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20181026064943-731415f00dce/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20181026203630-95b1ffbd15a5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20181029174526-d69651ed3497/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20181106135930-3a76605856fd/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20181107165924-66b7b1311ac8/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20181116152217-5ac8a444bdc5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20181205085412-a5c9d58dba9a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20181206074257-70b957f3b65e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20190102155601-82a175fd1598/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20181122145206-62eef0e2fa9b/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190209173611-3b5209105503/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20190222072716-a9d3bda3a223/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190616124812-15dcb6c0061f h1:25KHgbfyiSm6vwQLbM3zZIe1v9p/3ea4Rz+nnM5K/i4= -golang.org/x/sys v0.0.0-20190616124812-15dcb6c0061f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190429190828-d89cdac9e872 h1:cGjJzUd8RgBw428LXP65YXni0aiGNA4Bl+ls8SmLOm8= +golang.org/x/sys v0.0.0-20190429190828-d89cdac9e872/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= -golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.2 h1:tW2bmiBqwgJj/UpqtC8EpXEZVYOwU0yG4iWbprSVAcs= golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk= +golang.org/x/time v0.0.0-20180412165947-fbb02b2291d2 h1:+DCIGbF/swA92ohVg0//6X2IVY3KZs6p9mix0ziNYJM= golang.org/x/time v0.0.0-20180412165947-fbb02b2291d2/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= -golang.org/x/time v0.0.0-20181108054448-85acf8d2951c h1:fqgJT0MGcGpPgpWU7VRdRjuArfcOvC4AoJmILihzhDg= -golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= +golang.org/x/tools v0.0.0-20180221164845-07fd8470d635/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20180828015842-6cd1fcedba52/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= -golang.org/x/tools v0.0.0-20181003024731-2f84ea8ef872/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= -golang.org/x/tools v0.0.0-20181006002542-f60d9635b16a/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= -golang.org/x/tools v0.0.0-20181008205924-a2b3f7f249e9/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= -golang.org/x/tools v0.0.0-20181013182035-5e66757b835f/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= -golang.org/x/tools v0.0.0-20181017214349-06f26fdaaa28/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= -golang.org/x/tools v0.0.0-20181024171208-a2dc47679d30/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= -golang.org/x/tools v0.0.0-20181026183834-f60e5f99f081/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= -golang.org/x/tools v0.0.0-20181030000716-a0a13e073c7b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= -golang.org/x/tools v0.0.0-20181105230042-78dc5bac0cac/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= -golang.org/x/tools v0.0.0-20181107215632-34b416bd17b3/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= -golang.org/x/tools v0.0.0-20181114190951-94339b83286c/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= -golang.org/x/tools v0.0.0-20181119130350-139d099f6620/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= -golang.org/x/tools v0.0.0-20181127195227-b4e97c0ed882/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= -golang.org/x/tools v0.0.0-20181127232545-e782529d0ddd/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= -golang.org/x/tools v0.0.0-20181203210056-e5f3ab76ea4b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= -golang.org/x/tools v0.0.0-20181205224935-3576414c54a4/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= -golang.org/x/tools v0.0.0-20181206194817-bcd4e47d0288/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= -golang.org/x/tools v0.0.0-20181207183836-8bc39b988060/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= -golang.org/x/tools v0.0.0-20181212172921-837e80568c09/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= -golang.org/x/tools v0.0.0-20190102213336-ca9055ed7d04/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= -golang.org/x/tools v0.0.0-20190104182027-498d95493402/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= -golang.org/x/tools v0.0.0-20190111214448-fc1d57b08d7b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= -golang.org/x/tools v0.0.0-20190124215303-cc6a436ffe6b h1:QhDb4isMLF+1hiAgAqj1YPRPQh+eAqwfG6wJzp57Iuo= -golang.org/x/tools v0.0.0-20190124215303-cc6a436ffe6b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= -google.golang.org/api v0.0.0-20180910000450-7ca32eb868bf/go.mod h1:4mhQ8q/RsB7i+udVvVy5NUi08OU8ZlA0gRVgrF7VFY0= -google.golang.org/api v0.0.0-20181030000543-1d582fd0359e/go.mod h1:4mhQ8q/RsB7i+udVvVy5NUi08OU8ZlA0gRVgrF7VFY0= -google.golang.org/api v0.1.0 h1:K6z2u68e86TPdSdefXdzvXgR1zEMa+459vBSfWYAZkI= -google.golang.org/api v0.1.0/go.mod h1:UGEZY7KEX120AnNLIHFMKIo4obdJhkp2tPbaPlQx13Y= +golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY= +golang.org/x/tools v0.0.0-20190312170243-e65039ee4138/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= +golang.org/x/tools v0.0.0-20190501045030-23463209683d h1:D7DVZUZEUgsSIDTivnUtVeGfN5AvhDIKtdIZAqx0ieE= +golang.org/x/tools v0.0.0-20190501045030-23463209683d/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= +gomodules.xyz/jsonpatch/v2 v2.0.0-20190626003512-87910169748d h1:0090FZwbU/rzM98x9+E8+2QoGbnHLmiaIki0y880lTU= +gomodules.xyz/jsonpatch/v2 v2.0.0-20190626003512-87910169748d/go.mod h1:IhYNNY4jnS53ZnfE4PAmpKtDpTCj1JFXc+3mwe7XcUU= +google.golang.org/api v0.3.1 h1:oJra/lMfmtm13/rgY/8i3MzjFWYXvQIAKjQ3HqofMk8= +google.golang.org/api v0.3.1/go.mod h1:6wY9I6uQWHQ8EM57III9mq/AjF+i8G65rmVagqKMtkk= google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM= -google.golang.org/appengine v1.2.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= -google.golang.org/appengine v1.3.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= google.golang.org/appengine v1.4.0 h1:/wp5JvzpHIxhs/dumFmF7BXTf3Z+dd4uXta4kVyO508= google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= -google.golang.org/genproto v0.0.0-20180831171423-11092d34479b/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= -google.golang.org/genproto v0.0.0-20181029155118-b69ba1387ce2/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= -google.golang.org/genproto v0.0.0-20181202183823-bd91e49a0898/go.mod h1:7Ep/1NZk928CDR8SjdVbjWNpdIf6nzjE3BTgJDr2Atg= -google.golang.org/genproto v0.0.0-20190122154452-ba6ebe99b011/go.mod h1:7Ep/1NZk928CDR8SjdVbjWNpdIf6nzjE3BTgJDr2Atg= -google.golang.org/genproto v0.0.0-20190219182410-082222b4a5c5 h1:SdCO7As+ChE1iV3IjBleIIWlj8VjZWuYEUF5pjELOOQ= -google.golang.org/genproto v0.0.0-20190219182410-082222b4a5c5/go.mod h1:L3J43x8/uS+qIUoksaLKe6OS3nUKxOKuIFz1sl2/jx4= -google.golang.org/grpc v1.14.0/go.mod h1:yo6s7OP7yaDglbqo1J04qKzAhqBH6lvTonzMVmEdcZw= -google.golang.org/grpc v1.15.0/go.mod h1:0JHn/cJsOMiMfNA9+DeHDlAU7KAAB5GDlYFpa9MZMio= -google.golang.org/grpc v1.16.0/go.mod h1:0JHn/cJsOMiMfNA9+DeHDlAU7KAAB5GDlYFpa9MZMio= +google.golang.org/genproto v0.0.0-20190307195333-5fe7a883aa19 h1:Lj2SnHtxkRGJDqnGaSjo+CCdIieEnwVazbOXILwQemk= +google.golang.org/genproto v0.0.0-20190307195333-5fe7a883aa19/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= google.golang.org/grpc v1.17.0/go.mod h1:6QZJwpn2B+Zp71q/5VxRsJ6NXXVCE5NRUHRo+f3cWCs= -google.golang.org/grpc v1.18.0 h1:IZl7mfBGfbhYx2p2rKRtYgDFw6SBz+kclmxYrCksPPA= -google.golang.org/grpc v1.18.0/go.mod h1:6QZJwpn2B+Zp71q/5VxRsJ6NXXVCE5NRUHRo+f3cWCs= -gopkg.in/airbrake/gobrake.v2 v2.0.9/go.mod h1:/h5ZAUhDkGaJfjzjKLSjv6zCL6O0LLBxU4K+aSYdM/U= +google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= +google.golang.org/grpc v1.19.1 h1:TrBcJ1yqAl1G++wO39nD/qtgpsW9/1+QGrluyMGEYgM= +google.golang.org/grpc v1.19.1/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw= -gopkg.in/alexcesaro/quotedprintable.v3 v3.0.0-20150716171945-2caba252f4dc/go.mod h1:m7x9LTH6d71AHyAX77c9yqWCCa3UKHcVEj9y7hAtKDk= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127 h1:qIbj1fsPNlZgppZ+VLlY7N33q108Sa+fhmuc+sWQYwY= gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= -gopkg.in/errgo.v2 v2.1.0/go.mod h1:hNsd1EY+bozCKY1Ytp96fpM3vjJbqLJn88ws8XvfDNI= gopkg.in/fsnotify.v1 v1.4.7 h1:xOHLXZwVvI9hhs+cLKq5+I5onOuwQLhQwiu63xxlHs4= gopkg.in/fsnotify.v1 v1.4.7/go.mod h1:Tz8NjZHkW78fSQdbUxIjBTcgA1z1m8ZHf0WmKUhAMys= -gopkg.in/gemnasium/logrus-airbrake-hook.v2 v2.1.2/go.mod h1:Xk6kEKp8OKb+X14hQBKWaSkCsqBpgog8nAV2xsGOxlo= -gopkg.in/gomail.v2 v2.0.0-20160411212932-81ebce5c23df/go.mod h1:LRQQ+SO6ZHR7tOkpBDuZnXENFzX8qRjMDMyPD6BRkCw= gopkg.in/inf.v0 v0.9.1 h1:73M5CoZyi3ZLMOyDlQh031Cx6N9NDJ2Vvfl76EDAgDc= gopkg.in/inf.v0 v0.9.1/go.mod h1:cWUDdTG/fYaXco+Dcufb5Vnc6Gp2YChqWtbxRZE0mXw= -gopkg.in/mail.v2 v2.0.0-20180731213649-a0242b2233b4/go.mod h1:htwXN1Qh09vZJ1NVKxQqHPBaCBbzKhp5GzuJEA4VJWw= +gopkg.in/resty.v1 v1.12.0/go.mod h1:mDo4pnntr5jdWRML875a/NmxYqAlA73dVijT2AXvQQo= gopkg.in/square/go-jose.v2 v2.2.2/go.mod h1:M9dMgbHiYLoDGQrXy7OpJDJWiKiU//h+vD76mk0e1AI= gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7 h1:uRGJdciOHaEIrze2W8Q3AKkepLTh2hOroT7a+7czfdQ= gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7/go.mod h1:dt/ZhP58zS4L8KSrWDmTeBkI65Dw0HsyUHuEVlX15mw= +gopkg.in/yaml.v2 v2.0.0-20170812160011-eb3733d160e7/go.mod h1:JAlM8MvJe8wmxCU4Bli9HhUf9+ttbYbLASfIpnQbh74= gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.2.2 h1:ZCJp+EgiOT7lHqUV2J862kp8Qj64Jo6az82+3Td9dZw= gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= -grpc.go4.org v0.0.0-20170609214715-11d0a25b4919/go.mod h1:77eQGdRu53HpSqPFJFmuJdjuHRquDANNeA4x7B8WQ9o= honnef.co/go/tools v0.0.0-20180728063816-88497007e858/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= -honnef.co/go/tools v0.0.0-20190106161140-3f1c8253044a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= -k8s.io/api v0.0.0-20190222213804-5cb15d344471 h1:MzQGt8qWQCR+39kbYRd0uQqsvSidpYqJLFeWiJ9l4OE= -k8s.io/api v0.0.0-20190222213804-5cb15d344471/go.mod h1:iuAfoD4hCxJ8Onx9kaTIt30j7jUFS00AXQi6QMi99vA= -k8s.io/apiextensions-apiserver v0.0.0-20190228180357-d002e88f6236 h1:JfFtjaElBIgYKCWEtYQkcNrTpW+lMO4GJy8NP6SVQmM= -k8s.io/apiextensions-apiserver v0.0.0-20190228180357-d002e88f6236/go.mod h1:IxkesAMoaCRoLrPJdZNZUQp9NfZnzqaVzLhb2VEQzXE= -k8s.io/apimachinery v0.0.0-20190221213512-86fb29eff628 h1:UYfHH+KEF88OTg+GojQUwFTNxbxwmoktLwutUzR0GPg= -k8s.io/apimachinery v0.0.0-20190221213512-86fb29eff628/go.mod h1:ccL7Eh7zubPUSh9A3USN90/OzHNSVN6zxzde07TDCL0= -k8s.io/apiserver v0.0.0-20190122042701-b6aa1175dafa/go.mod h1:6bqaTSOSJavUIXUtfaR9Os9JtTCm8ZqH2SUl2S60C4w= -k8s.io/client-go v10.0.0+incompatible h1:F1IqCqw7oMBzDkqlcBymRq1450wD0eNqLE9jzUrIi34= -k8s.io/client-go v10.0.0+incompatible/go.mod h1:7vJpHMYJwNQCWgzmNV+VYUl1zCObLyodBc8nIyt8L5s= -k8s.io/cluster-bootstrap v0.0.0-20190814110523-aeed25068f87 h1:ClhfUBE7Q3tl31AdmaxSUP/o6UeYO9K3i4vWGiQPPG0= -k8s.io/cluster-bootstrap v0.0.0-20190814110523-aeed25068f87/go.mod h1:CD9HJ5mZ7v2e4GaFDhtg+bLIFqkVrnIX7xYkZOS6Q6I= -k8s.io/code-generator v0.0.0-20181117043124-c2090bec4d9b h1:KH0fUlgdFZH8UMxJ/FDCYHpczfSQKefetq5NjL6BVF0= -k8s.io/code-generator v0.0.0-20181117043124-c2090bec4d9b/go.mod h1:MYiN+ZJZ9HkETbgVZdWw2AsuAi9PZ4V80cwfuf2axe8= -k8s.io/component-base v0.0.0-20190703210340-65d72cfeb85d h1:rnHKkkKa8rR4DkemPeIzuL+/Ks2zZj6d3OV/HYD0t/E= -k8s.io/component-base v0.0.0-20190703210340-65d72cfeb85d/go.mod h1:lD66vYoxfIYwZTZunUYSg/e6fEj0fcgked2esAmsIfA= -k8s.io/gengo v0.0.0-20190116091435-f8a0810f38af h1:SwjZbO0u5ZuaV6TRMWOGB40iaycX8sbdMQHtjNZ19dk= -k8s.io/gengo v0.0.0-20190116091435-f8a0810f38af/go.mod h1:ezvh/TsK7cY6rbqRK0oQQ8IAqLxYwwyPxAX1Pzy0ii0= +honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= +k8s.io/api v0.0.0-20190704095032-f4ca3d3bdf1d h1:X3GqeHwOBOJa0O7jrPobw6MGLMwthSnA/sM86ENzbCo= +k8s.io/api v0.0.0-20190704095032-f4ca3d3bdf1d/go.mod h1:iuAfoD4hCxJ8Onx9kaTIt30j7jUFS00AXQi6QMi99vA= +k8s.io/apiextensions-apiserver v0.0.0-20190409022649-727a075fdec8 h1:q1Qvjzs/iEdXF6A1a8H3AKVFDzJNcJn3nXMs6R6qFtA= +k8s.io/apiextensions-apiserver v0.0.0-20190409022649-727a075fdec8/go.mod h1:IxkesAMoaCRoLrPJdZNZUQp9NfZnzqaVzLhb2VEQzXE= +k8s.io/apimachinery v0.0.0-20190704094733-8f6ac2502e51 h1:Nuz3yWD9v9dVvDiVSBIah+B+pxkxQxtuBAx2712t7tQ= +k8s.io/apimachinery v0.0.0-20190704094733-8f6ac2502e51/go.mod h1:ccL7Eh7zubPUSh9A3USN90/OzHNSVN6zxzde07TDCL0= +k8s.io/apiserver v0.0.0-20190409021813-1ec86e4da56c/go.mod h1:6bqaTSOSJavUIXUtfaR9Os9JtTCm8ZqH2SUl2S60C4w= +k8s.io/client-go v11.0.1-0.20190409021438-1a26190bd76a+incompatible h1:U5Bt+dab9K8qaUmXINrkXO135kA11/i5Kg1RUydgaMQ= +k8s.io/client-go v11.0.1-0.20190409021438-1a26190bd76a+incompatible/go.mod h1:7vJpHMYJwNQCWgzmNV+VYUl1zCObLyodBc8nIyt8L5s= +k8s.io/cluster-bootstrap v0.0.0-20190711112844-b7409fb13d1b h1:jjPHxAEL11SvxuFoklFRsdiyrvStBqdQZCsmnscZh4Q= +k8s.io/cluster-bootstrap v0.0.0-20190711112844-b7409fb13d1b/go.mod h1:or0n37dMGRT/sKozvlkpaeObeTROaqZ1QRWBXy9vIVA= +k8s.io/code-generator v0.0.0-20190311093542-50b561225d70 h1:lgPp615xLHxN84RBd+viA/oHzJfI0miFYFH4T9wpPQ4= +k8s.io/code-generator v0.0.0-20190311093542-50b561225d70/go.mod h1:MYiN+ZJZ9HkETbgVZdWw2AsuAi9PZ4V80cwfuf2axe8= +k8s.io/component-base v0.0.0-20190409021516-bd2732e5c3f7 h1:f+AySqWvoqyCD7aArN3EZ+g2boKIS52pcSo6zdZkc+4= +k8s.io/component-base v0.0.0-20190409021516-bd2732e5c3f7/go.mod h1:DMaomcf3j3MM2j1FsvlLVVlc7wA2jPytEur3cP9zRxQ= +k8s.io/gengo v0.0.0-20190813173942-955ffa8fcfc9 h1:/gJp8cw8+k4AxBNGZ5u5eRoCOzH3WVpY02K654HNiyU= +k8s.io/gengo v0.0.0-20190813173942-955ffa8fcfc9/go.mod h1:ezvh/TsK7cY6rbqRK0oQQ8IAqLxYwwyPxAX1Pzy0ii0= +k8s.io/klog v0.2.0/go.mod h1:Gq+BEi5rUBO/HRz0bTSXDUcqjScdoY3a9IHpCEIOOfk= k8s.io/klog v0.3.0/go.mod h1:Gq+BEi5rUBO/HRz0bTSXDUcqjScdoY3a9IHpCEIOOfk= k8s.io/klog v0.3.1/go.mod h1:Gq+BEi5rUBO/HRz0bTSXDUcqjScdoY3a9IHpCEIOOfk= -k8s.io/klog v0.3.2/go.mod h1:Gq+BEi5rUBO/HRz0bTSXDUcqjScdoY3a9IHpCEIOOfk= k8s.io/klog v0.4.0 h1:lCJCxf/LIowc2IGS9TPjWDyXY4nOmdGdfcwwDQCOURQ= k8s.io/klog v0.4.0/go.mod h1:4Bi6QPql/J/LkTDqv7R/cd3hPo4k2DG6Ptcz060Ez5I= -k8s.io/kube-openapi v0.0.0-20190228160746-b3a7cee44a30 h1:TRb4wNWoBVrH9plmkp2q86FIDppkbrEXdXlxU3a3BMI= -k8s.io/kube-openapi v0.0.0-20190228160746-b3a7cee44a30/go.mod h1:BXM9ceUBTj2QnfH2MK1odQs778ajze1RxcmP6S8RVVc= -k8s.io/kubernetes v1.13.4 h1:gQqFv/pH8hlbznLXQUsi8s5zqYnv0slmUDl/yVA0EWc= -k8s.io/kubernetes v1.13.4/go.mod h1:ocZa8+6APFNC2tX1DZASIbocyYT5jHzqFVsY5aoB7Jk= -k8s.io/utils v0.0.0-20190607212802-c55fbcfc754a h1:2jUDc9gJja832Ftp+QbDV0tVhQHMISFn01els+2ZAcw= -k8s.io/utils v0.0.0-20190607212802-c55fbcfc754a/go.mod h1:sZAwmy6armz5eXlNoLmJcl4F1QuKu7sr+mFQ0byX7Ew= -sigs.k8s.io/cluster-api v0.1.9 h1:pSkEW8Ws46jtHM1KUsFFhf0phY36hljxd5O75EXy9wI= -sigs.k8s.io/cluster-api v0.1.9/go.mod h1:MTxMmx3MSFZNi0RAjj64X+RurP9EEdhrRwLzdFZKTYw= -sigs.k8s.io/controller-runtime v0.1.12 h1:ovDq28E64PeY1yR+6H7DthakIC09soiDCrKvfP2tPYo= -sigs.k8s.io/controller-runtime v0.1.12/go.mod h1:HFAYoOh6XMV+jKF1UjFwrknPbowfyHEHHRdJMf2jMX8= -sigs.k8s.io/controller-tools v0.1.11 h1:1ln8Ipmg2xosbeHmnlzG53kwO1f3yf9l6auvCHdbZpI= -sigs.k8s.io/controller-tools v0.1.11/go.mod h1:6g08p9m9G/So3sBc1AOQifHfhxH/mb6Sc4z0LMI8XMw= -sigs.k8s.io/controller-tools v0.1.12 h1:LW8Tfywz+epjYiySSOYWFQl1O1y0os+ZWf22XJmsFww= -sigs.k8s.io/controller-tools v0.1.12/go.mod h1:6g08p9m9G/So3sBc1AOQifHfhxH/mb6Sc4z0LMI8XMw= +k8s.io/kube-openapi v0.0.0-20180731170545-e3762e86a74c h1:3KSCztE7gPitlZmWbNwue/2U0YruD65DqX3INopDAQM= +k8s.io/kube-openapi v0.0.0-20180731170545-e3762e86a74c/go.mod h1:BXM9ceUBTj2QnfH2MK1odQs778ajze1RxcmP6S8RVVc= +k8s.io/kubernetes v1.14.2 h1:Gdq2hPpttbaJBoClIanCE6WSu4IZReA54yhkZtvPUOo= +k8s.io/kubernetes v1.14.2/go.mod h1:ocZa8+6APFNC2tX1DZASIbocyYT5jHzqFVsY5aoB7Jk= +k8s.io/utils v0.0.0-20190506122338-8fab8cb257d5 h1:VBM/0P5TWxwk+Nw6Z+lAw3DKgO76g90ETOiA6rfLV1Y= +k8s.io/utils v0.0.0-20190506122338-8fab8cb257d5/go.mod h1:sZAwmy6armz5eXlNoLmJcl4F1QuKu7sr+mFQ0byX7Ew= +sigs.k8s.io/cluster-api v0.0.0-20190813192342-65800b3b20e8 h1:eL1qZBKN+aQzl3Zhhnzl0JplS+EAtFuJoRalS1hQ9LI= +sigs.k8s.io/cluster-api v0.0.0-20190813192342-65800b3b20e8/go.mod h1:dRPbLOP8J7nde5WRB2opPY3JW+0OJ/v/wBuOPRNRieI= +sigs.k8s.io/controller-runtime v0.2.0-beta.5/go.mod h1:HweyYKQ8fBuzdu2bdaeBJvsFgAi/OqBBnrVGXcqKhME= +sigs.k8s.io/controller-runtime v0.2.0-rc.0 h1:49JLOielmXfrd44Cmk2c0eeIkQ/Vq4AvfqsZqya16/E= +sigs.k8s.io/controller-runtime v0.2.0-rc.0/go.mod h1:HweyYKQ8fBuzdu2bdaeBJvsFgAi/OqBBnrVGXcqKhME= +sigs.k8s.io/controller-tools v0.2.0-beta.5/go.mod h1:8t/X+FVWvk6TaBcsa+UKUBbn7GMtvyBKX30SGl4em6Y= +sigs.k8s.io/controller-tools v0.2.0-rc.0 h1:8FZR8qgxNPPBCb6Q/WwoRUfYqWvgn1Fz6m5uKcCbXfI= +sigs.k8s.io/controller-tools v0.2.0-rc.0/go.mod h1:8t/X+FVWvk6TaBcsa+UKUBbn7GMtvyBKX30SGl4em6Y= sigs.k8s.io/testing_frameworks v0.1.1 h1:cP2l8fkA3O9vekpy5Ks8mmA0NW/F7yBdXf8brkWhVrs= sigs.k8s.io/testing_frameworks v0.1.1/go.mod h1:VVBKrHmJ6Ekkfz284YKhQePcdycOzNH9qL6ht1zEr/U= +sigs.k8s.io/testing_frameworks v0.1.2-0.20190130140139-57f07443c2d4 h1:GtDhkj3cF4A4IW+A9LScsuxvJqA9DE7G7PGH1f8B07U= +sigs.k8s.io/testing_frameworks v0.1.2-0.20190130140139-57f07443c2d4/go.mod h1:VVBKrHmJ6Ekkfz284YKhQePcdycOzNH9qL6ht1zEr/U= sigs.k8s.io/yaml v1.1.0 h1:4A07+ZFc2wgJwo8YNlQpr1rVlgUDlxXHhPJciaPY5gs= sigs.k8s.io/yaml v1.1.0/go.mod h1:UJmg0vDUVViEyp3mgSv9WPwZCDxu4rQW1olrI1uml+o= -sourcegraph.com/sourcegraph/go-diff v0.5.0/go.mod h1:kuch7UrkMzY0X+p9CRK03kfuPQ2zzQcaEFbx8wA8rck= -sourcegraph.com/sqs/pbtypes v0.0.0-20180604144634-d3ebe8f20ae4/go.mod h1:ketZ/q3QxT9HOBeFhu6RdvsftgpsbFHBF5Cas6cDKZ0= diff --git a/hack/boilerplate.go.txt b/hack/boilerplate/boilerplate.generatego.txt similarity index 100% rename from hack/boilerplate.go.txt rename to hack/boilerplate/boilerplate.generatego.txt diff --git a/hack/tools.go b/hack/tools.go index a8b2f67e35..c8fb0d3280 100644 --- a/hack/tools.go +++ b/hack/tools.go @@ -18,14 +18,14 @@ limitations under the License. package tools import ( - _ "k8s.io/code-generator/cmd/client-gen" //nolint - _ "k8s.io/code-generator/cmd/conversion-gen" //nolint - _ "k8s.io/code-generator/cmd/deepcopy-gen" //nolint - _ "k8s.io/code-generator/cmd/defaulter-gen" //nolint - _ "k8s.io/code-generator/cmd/informer-gen" //nolint - _ "k8s.io/code-generator/cmd/lister-gen" //nolint - _ "k8s.io/code-generator/cmd/register-gen" //nolint - _ "k8s.io/code-generator/cmd/set-gen" //nolint - _ "sigs.k8s.io/controller-tools/cmd/controller-gen" //nolint - _ "sigs.k8s.io/testing_frameworks/integration" //nolint + _ "k8s.io/code-generator/cmd/client-gen" + _ "k8s.io/code-generator/cmd/conversion-gen" + _ "k8s.io/code-generator/cmd/deepcopy-gen" + _ "k8s.io/code-generator/cmd/defaulter-gen" + _ "k8s.io/code-generator/cmd/informer-gen" + _ "k8s.io/code-generator/cmd/lister-gen" + _ "k8s.io/code-generator/cmd/register-gen" + _ "k8s.io/code-generator/cmd/set-gen" + _ "sigs.k8s.io/controller-tools/cmd/controller-gen" + _ "sigs.k8s.io/testing_frameworks/integration" ) diff --git a/hack/update-vendor.sh b/hack/update-vendor.sh index 5e2fb0e358..e0cb10a4a1 100755 --- a/hack/update-vendor.sh +++ b/hack/update-vendor.sh @@ -19,17 +19,21 @@ set -o nounset set -o pipefail KUBE_ROOT=$(dirname "${BASH_SOURCE[0]}")/.. +# shellcheck source=ensure-go.sh source "${KUBE_ROOT}/hack/ensure-go.sh" +GOPROXY=$(go env GOPROXY) +export GOPROXY="${GOPROXY:-https://proxy.golang.org}" go mod tidy go mod vendor # Copy full dependencies if needed. -for dep in $(cat ${KUBE_ROOT}/go.vendor); do - src=$(go mod download -json ${dep} | jq -r .Dir) +while IFS= read -r dep; do + src="$(go mod download -json "${dep}" | jq -r .Dir)" dst="${KUBE_ROOT}/vendor/${dep}" - cp -af "${src}"/* "${dst}" - chmod -R +w ${dst} -done + rm -rf "${dst}" + cp -af "${src}" "${dst}" + chmod -R +w "${dst}" +done < "${KUBE_ROOT}/go.vendor" go mod verify diff --git a/main.go b/main.go new file mode 100644 index 0000000000..5e41afedc7 --- /dev/null +++ b/main.go @@ -0,0 +1,122 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package main + +import ( + "flag" + "net/http" + "os" + "sigs.k8s.io/cluster-api-provider-openstack/pkg/record" + "sigs.k8s.io/controller-runtime/pkg/client/config" + "time" + + _ "net/http/pprof" + + "k8s.io/apimachinery/pkg/runtime" + clientgoscheme "k8s.io/client-go/kubernetes/scheme" + "k8s.io/klog" + "k8s.io/klog/klogr" + infrastructurev1alpha2 "sigs.k8s.io/cluster-api-provider-openstack/api/v1alpha2" + "sigs.k8s.io/cluster-api-provider-openstack/controllers" + clusterv1 "sigs.k8s.io/cluster-api/api/v1alpha2" + ctrl "sigs.k8s.io/controller-runtime" + // +kubebuilder:scaffold:imports +) + +var ( + scheme = runtime.NewScheme() + setupLog = ctrl.Log.WithName("setup") +) + +func init() { + _ = clientgoscheme.AddToScheme(scheme) + _ = clusterv1.AddToScheme(scheme) + _ = infrastructurev1alpha2.AddToScheme(scheme) + // +kubebuilder:scaffold:scheme +} + +func main() { + klog.InitFlags(nil) + + var metricsAddr string + var enableLeaderElection bool + + flag.StringVar(&metricsAddr, "metrics-addr", ":8080", "The address the metric endpoint binds to.") + flag.BoolVar(&enableLeaderElection, "enable-leader-election", false, + "Enable leader election for controller manager. Enabling this will ensure there is only one active controller manager.") + watchNamespace := flag.String("namespace", "", + "Namespace that the controller watches to reconcile cluster-api objects. If unspecified, the controller watches for cluster-api objects across all namespaces.") + profilerAddress := flag.String("profiler-address", "", "Bind address to expose the pprof profiler (e.g. localhost:6060)") + flag.Parse() + + if *watchNamespace != "" { + setupLog.Info("Watching cluster-api objects only in namespace for reconciliation", "namespace", *watchNamespace) + } + + if *profilerAddress != "" { + setupLog.Info("Profiler listening for requests", "profiler-address", *profilerAddress) + go func() { + setupLog.Error(http.ListenAndServe(*profilerAddress, nil), "listen and serve error") + }() + } + + syncPeriod := 10 * time.Minute + + ctrl.SetLogger(klogr.New()) + + cfg, err := config.GetConfigWithContext(os.Getenv("KUBECONTEXT")) + if err != nil { + setupLog.Error(err, "unable to get kubeconfig") + } + + mgr, err := ctrl.NewManager(cfg, ctrl.Options{ + Scheme: scheme, + MetricsBindAddress: metricsAddr, + LeaderElection: enableLeaderElection, + SyncPeriod: &syncPeriod, + Namespace: *watchNamespace, + }) + if err != nil { + setupLog.Error(err, "unable to start manager") + os.Exit(1) + } + + // Initialize event recorder. + record.InitFromRecorder(mgr.GetEventRecorderFor("openstack-controller")) + + if err = (&controllers.OpenStackMachineReconciler{ + Client: mgr.GetClient(), + Log: ctrl.Log.WithName("controllers").WithName("OpenStackMachine"), + }).SetupWithManager(mgr); err != nil { + setupLog.Error(err, "unable to create controller", "controller", "OpenStackMachine") + os.Exit(1) + } + if err = (&controllers.OpenStackClusterReconciler{ + Client: mgr.GetClient(), + Log: ctrl.Log.WithName("controllers").WithName("OpenStackCluster"), + }).SetupWithManager(mgr); err != nil { + setupLog.Error(err, "unable to create controller", "controller", "OpenStackCluster") + os.Exit(1) + } + // +kubebuilder:scaffold:builder + + setupLog.Info("starting manager") + if err := mgr.Start(ctrl.SetupSignalHandler()); err != nil { + setupLog.Error(err, "problem running manager") + os.Exit(1) + } +} diff --git a/overlays-config/coreos/kustomization.yaml b/overlays-config/coreos/kustomization.yaml deleted file mode 100644 index 490ceb77e7..0000000000 --- a/overlays-config/coreos/kustomization.yaml +++ /dev/null @@ -1,4 +0,0 @@ -bases: - - ../../config/ -patchesStrategicMerge: - - manager/deployment.yaml diff --git a/overlays-config/coreos/manager/deployment.yaml b/overlays-config/coreos/manager/deployment.yaml deleted file mode 100644 index 45baeaf050..0000000000 --- a/overlays-config/coreos/manager/deployment.yaml +++ /dev/null @@ -1,19 +0,0 @@ -apiVersion: apps/v1 -kind: StatefulSet -metadata: - name: clusterapi-controllers - namespace: openstack-provider-system - labels: - control-plane: controller-manager - controller-tools.k8s.io: "1.0" -spec: - selector: - matchLabels: - control-plane: controller-manager - controller-tools.k8s.io: "1.0" - template: - spec: - volumes: - - name: kubeadm - hostPath: - path: /opt/bin/kubeadm diff --git a/overlays-config/generic/kustomization.yaml b/overlays-config/generic/kustomization.yaml deleted file mode 100644 index fe4960087f..0000000000 --- a/overlays-config/generic/kustomization.yaml +++ /dev/null @@ -1,4 +0,0 @@ -bases: - - ../../config -patchesStrategicMerge: - - manager/deployment.yaml diff --git a/overlays-config/generic/manager/deployment.yaml b/overlays-config/generic/manager/deployment.yaml deleted file mode 100644 index 207d3f10c2..0000000000 --- a/overlays-config/generic/manager/deployment.yaml +++ /dev/null @@ -1,19 +0,0 @@ -apiVersion: apps/v1 -kind: StatefulSet -metadata: - name: clusterapi-controllers - namespace: openstack-provider-system - labels: - control-plane: controller-manager - controller-tools.k8s.io: "1.0" -spec: - selector: - matchLabels: - control-plane: controller-manager - controller-tools.k8s.io: "1.0" - template: - spec: - volumes: - - name: kubeadm - hostPath: - path: /usr/bin/kubeadm diff --git a/pkg/apis/apis.go b/pkg/apis/apis.go deleted file mode 100644 index 99c9b2b6a7..0000000000 --- a/pkg/apis/apis.go +++ /dev/null @@ -1,39 +0,0 @@ -/* -Copyright 2018 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Generate deepcopy for apis -//go:generate go run ../../vendor/k8s.io/code-generator/cmd/deepcopy-gen/main.go -O zz_generated.deepcopy -i ./... -h ../../hack/boilerplate.go.txt - -// Package apis contains Kubernetes API groups. -package apis - -import ( - "k8s.io/apimachinery/pkg/runtime" - "sigs.k8s.io/cluster-api-provider-openstack/pkg/apis/openstackproviderconfig/v1alpha1" -) - -// AddToSchemes may be used to add all resources defined in the project to a Scheme -var AddToSchemes runtime.SchemeBuilder - -func init() { - // Register the types with the Scheme so the components can map objects to GroupVersionKinds and back - AddToSchemes = append(AddToSchemes, v1alpha1.SchemeBuilder.AddToScheme) -} - -// AddToScheme adds all Resources to the Scheme -func AddToScheme(s *runtime.Scheme) error { - return AddToSchemes.AddToScheme(s) -} diff --git a/pkg/apis/openstackproviderconfig/v1alpha1/register.go b/pkg/apis/openstackproviderconfig/v1alpha1/register.go deleted file mode 100644 index 44798a9664..0000000000 --- a/pkg/apis/openstackproviderconfig/v1alpha1/register.go +++ /dev/null @@ -1,158 +0,0 @@ -/* -Copyright 2018 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package v1alpha1 - -import ( - "fmt" - "github.com/pkg/errors" - "k8s.io/apimachinery/pkg/runtime" - "k8s.io/apimachinery/pkg/runtime/schema" - "k8s.io/apimachinery/pkg/util/json" - "sigs.k8s.io/yaml" - - clusterv1 "sigs.k8s.io/cluster-api/pkg/apis/cluster/v1alpha1" - "sigs.k8s.io/controller-runtime/pkg/runtime/scheme" -) - -const GroupName = "openstackproviderconfig" - -var ( - // SchemeGroupVersion is group version used to register these objects - SchemeGroupVersion = schema.GroupVersion{Group: "openstackproviderconfig.k8s.io", Version: "v1alpha1"} - - // SchemeBuilder is used to add go types to the GroupVersionKind scheme - SchemeBuilder = &scheme.Builder{GroupVersion: SchemeGroupVersion} -) - -func ClusterSpecAndStatusFromProviderSpec(cluster *clusterv1.Cluster) (*OpenstackClusterProviderSpec, *OpenstackClusterProviderStatus, error) { - clusterProviderSpec, err := ClusterSpecFromProviderSpec(cluster.Spec.ProviderSpec) - if err != nil { - return nil, nil, errors.Errorf("failed to load cluster provider spec: %v", err) - } - clusterProviderStatus, err := ClusterStatusFromProviderStatus(cluster.Status.ProviderStatus) - if err != nil { - return nil, nil, errors.Errorf("failed to load cluster provider status: %v", err) - } - return clusterProviderSpec, clusterProviderStatus, nil -} - -// ClusterConfigFromProviderSpec unmarshals a provider config into an OpenStack Cluster type -func ClusterSpecFromProviderSpec(providerSpec clusterv1.ProviderSpec) (*OpenstackClusterProviderSpec, error) { - if providerSpec.Value == nil { - return nil, errors.New("no such providerSpec found in manifest") - } - - var config OpenstackClusterProviderSpec - if err := yaml.Unmarshal(providerSpec.Value.Raw, &config); err != nil { - return nil, err - } - return &config, nil -} - -// ClusterStatusFromProviderStatus unmarshals a provider status into an OpenStack Cluster Status type -func ClusterStatusFromProviderStatus(extension *runtime.RawExtension) (*OpenstackClusterProviderStatus, error) { - if extension == nil { - return &OpenstackClusterProviderStatus{}, nil - } - - status := new(OpenstackClusterProviderStatus) - if err := yaml.Unmarshal(extension.Raw, status); err != nil { - return nil, err - } - - return status, nil -} - -// This is the same as ClusterSpecFromProviderSpec but we -// expect there to be a specific Spec type for Machines soon -func MachineSpecFromProviderSpec(providerSpec clusterv1.ProviderSpec) (*OpenstackProviderSpec, error) { - if providerSpec.Value == nil { - return nil, errors.New("no such providerSpec found in manifest") - } - - var config OpenstackProviderSpec - if err := yaml.Unmarshal(providerSpec.Value.Raw, &config); err != nil { - return nil, err - } - return &config, nil -} - -func EncodeClusterSpecAndStatus(cluster *clusterv1.Cluster, clusterProviderSpec *OpenstackClusterProviderSpec, clusterProviderStatus *OpenstackClusterProviderStatus) (*runtime.RawExtension, *runtime.RawExtension, error) { - rawSpec, err := EncodeClusterSpec(clusterProviderSpec) - if err != nil { - return nil, nil, fmt.Errorf("failed to encod cluster spec for cluster %q in namespace %q: %v", cluster.Name, cluster.Namespace, err) - } - rawStatus, err := EncodeClusterStatus(clusterProviderStatus) - if err != nil { - return nil, nil, fmt.Errorf("failed to update cluster status for cluster %q in namespace %q: %v", cluster.Name, cluster.Namespace, err) - } - return rawSpec, rawStatus, nil -} - -func EncodeClusterSpec(spec *OpenstackClusterProviderSpec) (*runtime.RawExtension, error) { - if spec == nil { - return &runtime.RawExtension{}, nil - } - - var rawBytes []byte - var err error - - // TODO: use apimachinery conversion https://godoc.org/k8s.io/apimachinery/pkg/runtime#Convert_runtime_Object_To_runtime_RawExtension - if rawBytes, err = json.Marshal(spec); err != nil { - return nil, err - } - - return &runtime.RawExtension{ - Raw: rawBytes, - }, nil -} - -func EncodeClusterStatus(status *OpenstackClusterProviderStatus) (*runtime.RawExtension, error) { - if status == nil { - return &runtime.RawExtension{}, nil - } - - var rawBytes []byte - var err error - - // TODO: use apimachinery conversion https://godoc.org/k8s.io/apimachinery/pkg/runtime#Convert_runtime_Object_To_runtime_RawExtension - if rawBytes, err = json.Marshal(status); err != nil { - return nil, err - } - - return &runtime.RawExtension{ - Raw: rawBytes, - }, nil -} - -func EncodeMachineStatus(status *OpenstackMachineProviderStatus) (*runtime.RawExtension, error) { - if status == nil { - return &runtime.RawExtension{}, nil - } - - var rawBytes []byte - var err error - - // TODO: use apimachinery conversion https://godoc.org/k8s.io/apimachinery/pkg/runtime#Convert_runtime_Object_To_runtime_RawExtension - if rawBytes, err = json.Marshal(status); err != nil { - return nil, err - } - - return &runtime.RawExtension{ - Raw: rawBytes, - }, nil -} diff --git a/pkg/apis/openstackproviderconfig/v1alpha1/securitygroup_types.go b/pkg/apis/openstackproviderconfig/v1alpha1/securitygroup_types.go deleted file mode 100644 index 3318c660c0..0000000000 --- a/pkg/apis/openstackproviderconfig/v1alpha1/securitygroup_types.go +++ /dev/null @@ -1,51 +0,0 @@ -/* -Copyright 2019 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package v1alpha1 - -// SecurityGroup represents the basic information of the associated -// OpenStack Neutron Security Group. -type SecurityGroup struct { - Name string `json:"name"` - ID string `json:"id"` - Rules []SecurityGroupRule `json:"rules"` -} - -// SecurityGroupRule represent the basic information of the associated OpenStack -// Security Group Role. -type SecurityGroupRule struct { - ID string `json:"name"` - Direction string `json:"direction"` - EtherType string `json:"etherType"` - SecurityGroupID string `json:"securityGroupID"` - PortRangeMin int `json:"portRangeMin"` - PortRangeMax int `json:"portRangeMax"` - Protocol string `json:"protocol"` - RemoteGroupID string `json:"remoteGroupID"` - RemoteIPPrefix string `json:"remoteIPPrefix"` -} - -// Equal checks if two SecurityGroupRules are the same. -func (r SecurityGroupRule) Equal(x SecurityGroupRule) bool { - return (r.Direction == x.Direction && - r.EtherType == x.EtherType && - r.PortRangeMin == x.PortRangeMin && - r.PortRangeMax == x.PortRangeMax && - r.Protocol == x.Protocol && - r.RemoteGroupID == x.RemoteGroupID && - r.RemoteIPPrefix == x.RemoteIPPrefix) - -} diff --git a/pkg/apis/openstackproviderconfig/v1alpha1/types.go b/pkg/apis/openstackproviderconfig/v1alpha1/types.go deleted file mode 100644 index 7ff385019f..0000000000 --- a/pkg/apis/openstackproviderconfig/v1alpha1/types.go +++ /dev/null @@ -1,371 +0,0 @@ -/* -Copyright 2018 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package v1alpha1 - -import ( - corev1 "k8s.io/api/core/v1" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - - kubeadmv1beta1 "k8s.io/kubernetes/cmd/kubeadm/app/apis/kubeadm/v1beta1" -) - -// +genclient -// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object - -// OpenstackProviderSpec is the type that will be embedded in a Machine.Spec.ProviderSpec field -// for an OpenStack Instance. It is used by the Openstack machine actuator to create a single machine instance. -// TODO(cglaubitz): We might consider to change this to OpenstackMachineProviderSpec -// +k8s:openapi-gen=true -type OpenstackProviderSpec struct { - metav1.TypeMeta `json:",inline"` - metav1.ObjectMeta `json:"metadata,omitempty"` - - // The name of the secret containing the openstack credentials - CloudsSecret *corev1.SecretReference `json:"cloudsSecret"` - - // The name of the cloud to use from the clouds secret - CloudName string `json:"cloudName"` - - // The flavor reference for the flavor for your server instance. - Flavor string `json:"flavor"` - - // The name of the image to use for your server instance. - // If the RootVolume is specified, this will be ignored and use rootVolume directly. - Image string `json:"image"` - - // The ssh key to inject in the instance - KeyName string `json:"keyName,omitempty"` - - // A networks object. Required parameter when there are multiple networks defined for the tenant. - // When you do not specify the networks parameter, the server attaches to the only network created for the current tenant. - Networks []NetworkParam `json:"networks,omitempty"` - // The floatingIP which will be associated to the machine, only used for master. - // The floatingIP should have been created and haven't been associated. - FloatingIP string `json:"floatingIP,omitempty"` - - // The availability zone from which to launch the server. - AvailabilityZone string `json:"availabilityZone,omitempty"` - - // The names of the security groups to assign to the instance - SecurityGroups []SecurityGroupParam `json:"securityGroups,omitempty"` - - // The name of the secret containing the user data (startup script in most cases) - UserDataSecret *corev1.SecretReference `json:"userDataSecret,omitempty"` - - // Whether the server instance is created on a trunk port or not. - Trunk bool `json:"trunk,omitempty"` - - // Machine tags - // Requires Nova api 2.52 minimum! - Tags []string `json:"tags,omitempty"` - - // Metadata mapping. Allows you to create a map of key value pairs to add to the server instance. - ServerMetadata map[string]string `json:"serverMetadata,omitempty"` - - // Config Drive support - ConfigDrive *bool `json:"configDrive,omitempty"` - - // The volume metadata to boot from - RootVolume *RootVolume `json:"rootVolume,omitempty"` - - // KubeadmConfiguration holds the kubeadm configuration options - // +optional - KubeadmConfiguration KubeadmConfiguration `json:"kubeadmConfiguration,omitempty"` -} - -// KubeadmConfiguration holds the various configurations that kubeadm uses -type KubeadmConfiguration struct { - // JoinConfiguration is used to customize any kubeadm join configuration - // parameters. - Join kubeadmv1beta1.JoinConfiguration `json:"join,omitempty"` - - // InitConfiguration is used to customize any kubeadm init configuration - // parameters. - Init kubeadmv1beta1.InitConfiguration `json:"init,omitempty"` -} - -type SecurityGroupParam struct { - // Security Group UID - UUID string `json:"uuid,omitempty"` - // Security Group name - Name string `json:"name,omitempty"` - // Filters used to query security groups in openstack - Filter SecurityGroupFilter `json:"filter,omitempty"` -} - -type SecurityGroupFilter struct { - ID string `json:"id,omitempty"` - Name string `json:"name,omitempty"` - Description string `json:"description,omitempty"` - TenantID string `json:"tenantId,omitempty"` - ProjectID string `json:"projectId,omitempty"` - Limit int `json:"limit,omitempty"` - Marker string `json:"marker,omitempty"` - SortKey string `json:"sortKey,omitempty"` - SortDir string `json:"sortDir,omitempty"` - Tags string `json:"tags,omitempty"` - TagsAny string `json:"tagsAny,omitempty"` - NotTags string `json:"notTags,omitempty"` - NotTagsAny string `json:"notTagsAny,omitempty"` -} - -type NetworkParam struct { - // The UUID of the network. Required if you omit the port attribute. - UUID string `json:"uuid,omitempty"` - // A fixed IPv4 address for the NIC. - FixedIp string `json:"fixedIp,omitempty"` - // Filters for optional network query - Filter Filter `json:"filter,omitempty"` - // Subnet within a network to use - Subnets []SubnetParam `json:"subnets,omitempty"` -} - -type Filter struct { - Status string `json:"status,omitempty"` - Name string `json:"name,omitempty"` - Description string `json:"description,omitempty"` - AdminStateUp *bool `json:"adminStateUp,omitempty"` - TenantID string `json:"tenantId,omitempty"` - ProjectID string `json:"projectId,omitempty"` - Shared *bool `json:"shared,omitempty"` - ID string `json:"id,omitempty"` - Marker string `json:"marker,omitempty"` - Limit int `json:"limit,omitempty"` - SortKey string `json:"sortKey,omitempty"` - SortDir string `json:"sortDir,omitempty"` - Tags string `json:"tags,omitempty"` - TagsAny string `json:"tagsAny,omitempty"` - NotTags string `json:"notTags,omitempty"` - NotTagsAny string `json:"notTagsAny,omitempty"` -} - -type SubnetParam struct { - // The UUID of the network. Required if you omit the port attribute. - UUID string `json:"uuid,omitempty"` - - // Filters for optional network query - Filter SubnetFilter `json:"filter,omitempty"` -} - -type SubnetFilter struct { - Name string `json:"name,omitempty"` - Description string `json:"description,omitempty"` - EnableDHCP *bool `json:"enableDhcp,omitempty"` - NetworkID string `json:"networkId,omitempty"` - TenantID string `json:"tenantId,omitempty"` - ProjectID string `json:"projectId,omitempty"` - IPVersion int `json:"ipVersion,omitempty"` - GatewayIP string `json:"gateway_ip,omitempty"` - CIDR string `json:"cidr,omitempty"` - IPv6AddressMode string `json:"ipv6AddressMode,omitempty"` - IPv6RAMode string `json:"ipv6RaMode,omitempty"` - ID string `json:"id,omitempty"` - SubnetPoolID string `json:"subnetpoolId,omitempty"` - Limit int `json:"limit,omitempty"` - Marker string `json:"marker,omitempty"` - SortKey string `json:"sortKey,omitempty"` - SortDir string `json:"sortDir,omitempty"` - Tags string `json:"tags,omitempty"` - TagsAny string `json:"tagsAny,omitempty"` - NotTags string `json:"notTags,omitempty"` - NotTagsAny string `json:"notTagsAny,omitempty"` -} - -type RootVolume struct { - SourceType string `json:"sourceType,omitempty"` - SourceUUID string `json:"sourceUUID,omitempty"` - DeviceType string `json:"deviceType"` - Size int `json:"diskSize,omitempty"` -} - -// +genclient -// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object - -// OpenstackClusterProviderSpec is the providerSpec for OpenStack in the cluster object -// +k8s:openapi-gen=true -type OpenstackClusterProviderSpec struct { - metav1.TypeMeta `json:",inline"` - metav1.ObjectMeta `json:"metadata,omitempty"` - - // The name of the secret containing the openstack credentials - CloudsSecret *corev1.SecretReference `json:"cloudsSecret"` - - // The name of the cloud to use from the clouds secret - CloudName string `json:"cloudName"` - - // NodeCIDR is the OpenStack Subnet to be created. Cluster actuator will create a - // network, a subnet with NodeCIDR, and a router connected to this subnet. - // If you leave this empty, no network will be created. - NodeCIDR string `json:"nodeCidr,omitempty"` - // DNSNameservers is the list of nameservers for OpenStack Subnet being created. - DNSNameservers []string `json:"dnsNameservers,omitempty"` - // ExternalRouterIPs is an array of externalIPs on the respective subnets. - // This is necessary if the router needs a fixed ip in a specific subnet. - ExternalRouterIPs []ExternalRouterIPParam `json:"externalRouterIPs,omitempty"` - // ExternalNetworkID is the ID of an external OpenStack Network. This is necessary - // to get public internet to the VMs. - ExternalNetworkID string `json:"externalNetworkId,omitempty"` - // UseOctavia is weather LoadBalancer Service is Octavia or not - UseOctavia bool `json:"useOctavia"` - - // ManagedAPIServerLoadBalancer defines whether a LoadBalancer for the - // APIServer should be created. If set to true the following properties are - // mandatory: APIServerLoadBalancerFloatingIP, APIServerLoadBalancerPort - ManagedAPIServerLoadBalancer bool `json:"managedAPIServerLoadBalancer"` - - // APIServerLoadBalancerFloatingIP is the floatingIP which will be associated - // to the APIServer loadbalancer. The floatingIP will be created if it not - // already exists. - APIServerLoadBalancerFloatingIP string `json:"apiServerLoadBalancerFloatingIP,omitempty"` - - // APIServerLoadBalancerPort is the port on which the listener on the APIServer - // loadbalancer will be created - APIServerLoadBalancerPort int `json:"apiServerLoadBalancerPort,omitempty"` - - // APIServerLoadBalancerAdditionalPorts adds additional ports to the APIServerLoadBalancer - APIServerLoadBalancerAdditionalPorts []int `json:"apiServerLoadBalancerAdditionalPorts,omitempty"` - - // ManagedSecurityGroups defines that kubernetes manages the OpenStack security groups - // for now, that means that we'll create two security groups, one allowing SSH - // and API access from everywhere, and another one that allows all traffic to/from - // machines belonging to that group. In the future, we could make this more flexible. - ManagedSecurityGroups bool `json:"managedSecurityGroups"` - - // DisablePortSecurity disables the port security of the network created for the - // Kubernetes cluster, which also disables SecurityGroups - DisablePortSecurity bool `json:"disablePortSecurity,omitempty"` - - // Tags for all resources in cluster - Tags []string `json:"tags,omitempty"` - - // Default: True. In case of server tag errors, set to False - DisableServerTags bool `json:"disableServerTags,omitempty"` - - // CAKeyPair is the key pair for ca certs. - CAKeyPair KeyPair `json:"caKeyPair,omitempty"` - - //EtcdCAKeyPair is the key pair for etcd. - EtcdCAKeyPair KeyPair `json:"etcdCAKeyPair,omitempty"` - - // FrontProxyCAKeyPair is the key pair for FrontProxyKeyPair. - FrontProxyCAKeyPair KeyPair `json:"frontProxyCAKeyPair,omitempty"` - - // SAKeyPair is the service account key pair. - SAKeyPair KeyPair `json:"saKeyPair,omitempty"` - - // ClusterConfiguration holds the cluster-wide information used during a - // kubeadm init call. - ClusterConfiguration kubeadmv1beta1.ClusterConfiguration `json:"clusterConfiguration,omitempty"` -} - -// KeyPair is how operators can supply custom keypairs for kubeadm to use. -type KeyPair struct { - // base64 encoded cert and key - Cert []byte `json:"cert,omitempty"` - Key []byte `json:"key,omitempty"` -} - -// HasCertAndKey returns whether a keypair contains cert and key of non-zero length. -func (kp *KeyPair) HasCertAndKey() bool { - return len(kp.Cert) != 0 && len(kp.Key) != 0 -} - -type ExternalRouterIPParam struct { - // The FixedIP in the corresponding subnet - FixedIP string `json:"fixedIP,omitempty"` - // The subnet in which the FixedIP is used for the Gateway of this router - Subnet SubnetParam `json:"subnet"` -} - -// +genclient -// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object - -// OpenstackClusterProviderStatus contains the status fields -// relevant to OpenStack in the cluster object. -// +k8s:openapi-gen=true -type OpenstackClusterProviderStatus struct { - metav1.TypeMeta `json:",inline"` - metav1.ObjectMeta `json:"metadata,omitempty"` - - // Network contains all information about the created OpenStack Network. - // It includes Subnets and Router. - Network *Network `json:"network,omitempty"` - - // ControlPlaneSecurityGroups contains all the information about the OpenStack - // Security Group that needs to be applied to control plane nodes. - // TODO: Maybe instead of two properties, we add a property to the group? - ControlPlaneSecurityGroup *SecurityGroup `json:"controlPlaneSecurityGroup,omitempty"` - - // GlobalSecurityGroup contains all the information about the OpenStack Security - // Group that needs to be applied to all nodes, both control plane and worker nodes. - GlobalSecurityGroup *SecurityGroup `json:"globalSecurityGroup,omitempty"` -} - -// Network represents basic information about the associated OpenStach Neutron Network -type Network struct { - Name string `json:"name"` - ID string `json:"id"` - - Subnet *Subnet `json:"subnet,omitempty"` - Router *Router `json:"router,omitempty"` - - // Be careful when using APIServerLoadBalancer, because this field is optional and therefore not - // set in all cases - APIServerLoadBalancer *LoadBalancer `json:"apiServerLoadBalancer,omitempty"` -} - -// Subnet represents basic information about the associated OpenStack Neutron Subnet -type Subnet struct { - Name string `json:"name"` - ID string `json:"id"` - - CIDR string `json:"cidr"` -} - -// Router represents basic information about the associated OpenStack Neutron Router -type Router struct { - Name string `json:"name"` - ID string `json:"id"` -} - -// LoadBalancer represents basic information about the associated OpenStack LoadBalancer -type LoadBalancer struct { - Name string `json:"name"` - ID string `json:"id"` - IP string `json:"ip"` - InternalIP string `json:"internalIP"` -} - -// +genclient -// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object - -// OpenstackClusterProviderStatus contains the status fields -// relevant to OpenStack in the cluster object. -// +k8s:openapi-gen=true -type OpenstackMachineProviderStatus struct { - metav1.TypeMeta `json:",inline"` - metav1.ObjectMeta `json:"metadata,omitempty"` - - //TODO: add provider status -} - -func init() { - SchemeBuilder.Register(&OpenstackProviderSpec{}) - SchemeBuilder.Register(&OpenstackClusterProviderSpec{}) - SchemeBuilder.Register(&OpenstackClusterProviderStatus{}) - SchemeBuilder.Register(&OpenstackMachineProviderStatus{}) -} diff --git a/pkg/cloud/openstack/cluster/actuator.go b/pkg/cloud/openstack/cluster/actuator.go deleted file mode 100644 index f8c7621a50..0000000000 --- a/pkg/cloud/openstack/cluster/actuator.go +++ /dev/null @@ -1,267 +0,0 @@ -package cluster - -import ( - "encoding/json" - "fmt" - "github.com/pkg/errors" - apiv1 "k8s.io/api/core/v1" - apierrors "k8s.io/apimachinery/pkg/api/errors" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/runtime" - "k8s.io/apimachinery/pkg/types" - "k8s.io/client-go/kubernetes" - "k8s.io/client-go/tools/record" - "k8s.io/klog" - "reflect" - providerv1 "sigs.k8s.io/cluster-api-provider-openstack/pkg/apis/openstackproviderconfig/v1alpha1" - "sigs.k8s.io/cluster-api-provider-openstack/pkg/cloud/openstack/services/certificates" - "sigs.k8s.io/cluster-api-provider-openstack/pkg/cloud/openstack/services/loadbalancer" - "sigs.k8s.io/cluster-api-provider-openstack/pkg/cloud/openstack/services/networking" - "sigs.k8s.io/cluster-api-provider-openstack/pkg/cloud/openstack/services/provider" - "sigs.k8s.io/cluster-api-provider-openstack/pkg/deployer" - clusterv1 "sigs.k8s.io/cluster-api/pkg/apis/cluster/v1alpha1" - clientclusterv1 "sigs.k8s.io/cluster-api/pkg/client/clientset_generated/clientset/typed/cluster/v1alpha1" - "sigs.k8s.io/cluster-api/pkg/controller/remote" - "sigs.k8s.io/controller-runtime/pkg/client" - "sigs.k8s.io/controller-runtime/pkg/patch" - "strconv" - "strings" -) - -// Actuator controls cluster related infrastructure. -type Actuator struct { - *deployer.Deployer - - params ActuatorParams -} - -// ActuatorParams holds parameter information for Actuator -type ActuatorParams struct { - KubeClient kubernetes.Interface - Client client.Client - ClusterClient clientclusterv1.ClusterV1alpha1Interface - EventRecorder record.EventRecorder - Scheme *runtime.Scheme -} - -// NewActuator creates a new Actuator -func NewActuator(params ActuatorParams) *Actuator { - return &Actuator{ - Deployer: deployer.New(), - params: params, - } -} - -// Reconcile creates or applies updates to the cluster. -func (a *Actuator) Reconcile(cluster *clusterv1.Cluster) error { - - if cluster == nil { - return fmt.Errorf("the cluster is nil, check your cluster configuration") - } - klog.Infof("Reconciling Cluster %s/%s", cluster.Namespace, cluster.Name) - - // ClusterCopy is used for patch generation during storeCluster - clusterCopy := cluster.DeepCopy() - clusterName := fmt.Sprintf("%s-%s", cluster.Namespace, cluster.Name) - - osProviderClient, clientOpts, err := provider.NewClientFromCluster(a.params.KubeClient, cluster) - if err != nil { - return err - } - - networkingService, err := networking.NewService(osProviderClient, clientOpts) - if err != nil { - return err - } - - certificatesService := certificates.NewService() - - // Load provider spec & status. - clusterProviderSpec, clusterProviderStatus, err := providerv1.ClusterSpecAndStatusFromProviderSpec(cluster) - if err != nil { - return err - } - loadbalancerService, err := loadbalancer.NewService(osProviderClient, clientOpts, clusterProviderSpec.UseOctavia) - if err != nil { - return err - } - - defer func() { - if err := a.storeCluster(cluster, clusterCopy, clusterProviderSpec, clusterProviderStatus); err != nil { - klog.Errorf("Failed to store cluster %q in namespace %q: %v", cluster.Name, cluster.Namespace, err) - } - }() - - klog.Infof("Reconciling certificates for cluster %s", clusterName) - // Store cert material in spec. - if err := certificatesService.ReconcileCertificates(clusterName, clusterProviderSpec); err != nil { - return errors.Wrapf(err, "failed to reconcile certificates for cluster %q", cluster.Name) - } - - klog.Infof("Reconciling network components for cluster %s", clusterName) - if clusterProviderSpec.NodeCIDR == "" { - klog.V(4).Infof("No need to reconcile network for cluster %s", clusterName) - } else { - err := networkingService.ReconcileNetwork(clusterName, clusterProviderSpec, clusterProviderStatus) - if err != nil { - return errors.Errorf("failed to reconcile network: %v", err) - } - err = networkingService.ReconcileSubnet(clusterName, clusterProviderSpec, clusterProviderStatus) - if err != nil { - return errors.Errorf("failed to reconcile subnets: %v", err) - } - err = networkingService.ReconcileRouter(clusterName, clusterProviderSpec, clusterProviderStatus) - if err != nil { - return errors.Errorf("failed to reconcile router: %v", err) - } - if clusterProviderSpec.ManagedAPIServerLoadBalancer { - err = loadbalancerService.ReconcileLoadBalancer(clusterName, clusterProviderSpec, clusterProviderStatus) - if err != nil { - return errors.Errorf("failed to reconcile load balancer: %v", err) - } - } - } - - err = networkingService.ReconcileSecurityGroups(clusterName, *clusterProviderSpec, clusterProviderStatus) - if err != nil { - return errors.Errorf("failed to reconcile security groups: %v", err) - } - - // Store KubeConfig for Cluster API NodeRef controller to use. - kubeConfigSecretName := remote.KubeConfigSecretName(cluster.Name) - if _, err := a.params.KubeClient.CoreV1().Secrets(cluster.Namespace).Get(kubeConfigSecretName, metav1.GetOptions{}); err != nil && apierrors.IsNotFound(err) { - kubeConfig, err := a.Deployer.GetKubeConfig(cluster, nil) - if err != nil { - return errors.Wrapf(err, "failed to get kubeconfig for cluster %q", cluster.Name) - } - - kubeConfigSecret := &apiv1.Secret{ - ObjectMeta: metav1.ObjectMeta{ - Name: kubeConfigSecretName, - }, - StringData: map[string]string{ - "value": kubeConfig, - }, - } - - if _, err := a.params.KubeClient.CoreV1().Secrets(cluster.Namespace).Create(kubeConfigSecret); err != nil { - return errors.Wrapf(err, "failed to create kubeconfig secret for cluster %q", cluster.Name) - } - } else if err != nil { - return errors.Wrapf(err, "failed to get kubeconfig secret for cluster %q", cluster.Name) - } - - klog.Infof("Reconciled Cluster %s/%s successfully", cluster.Namespace, cluster.Name) - return nil -} - -// Delete deletes a cluster and is invoked by the Cluster Controller -func (a *Actuator) Delete(cluster *clusterv1.Cluster) error { - klog.Infof("Deleting Cluster %s/%s", cluster.Namespace, cluster.Name) - clusterName := fmt.Sprintf("%s-%s", cluster.Namespace, cluster.Name) - osProviderClient, clientOpts, err := provider.NewClientFromCluster(a.params.KubeClient, cluster) - if err != nil { - return err - } - - networkingService, err := networking.NewService(osProviderClient, clientOpts) - if err != nil { - return err - } - - // Load provider spec & status. - clusterProviderSpec, clusterProviderStatus, err := providerv1.ClusterSpecAndStatusFromProviderSpec(cluster) - if err != nil { - return err - } - - loadbalancerService, err := loadbalancer.NewService(osProviderClient, clientOpts, clusterProviderSpec.UseOctavia) - if err != nil { - return err - } - - if clusterProviderSpec.ManagedAPIServerLoadBalancer { - err = loadbalancerService.DeleteLoadBalancer(clusterName, clusterProviderSpec, clusterProviderStatus) - if err != nil { - return errors.Errorf("failed to delete load balancer: %v", err) - } - } - - // Delete other things - if clusterProviderStatus.GlobalSecurityGroup != nil { - klog.Infof("Deleting global security group %q", clusterProviderStatus.GlobalSecurityGroup.Name) - err := networkingService.DeleteSecurityGroups(clusterProviderStatus.GlobalSecurityGroup) - if err != nil { - return errors.Errorf("failed to delete security group: %v", err) - } - } - - if clusterProviderStatus.ControlPlaneSecurityGroup != nil { - klog.Infof("Deleting control plane security group %q", clusterProviderStatus.ControlPlaneSecurityGroup.Name) - err := networkingService.DeleteSecurityGroups(clusterProviderStatus.ControlPlaneSecurityGroup) - if err != nil { - return errors.Errorf("failed to delete security group: %v", err) - } - } - return nil -} - -func (a *Actuator) storeCluster(cluster *clusterv1.Cluster, clusterCopy *clusterv1.Cluster, clusterProviderSpec *providerv1.OpenstackClusterProviderSpec, clusterProviderStatus *providerv1.OpenstackClusterProviderStatus) error { - - rawSpec, rawStatus, err := providerv1.EncodeClusterSpecAndStatus(cluster, clusterProviderSpec, clusterProviderStatus) - if err != nil { - return err - } - - cluster.Spec.ProviderSpec.Value = rawSpec - - // Build a patch and marshal that patch to something the client will understand. - p, err := patch.NewJSONPatch(clusterCopy, cluster) - if err != nil { - return fmt.Errorf("failed to create new JSONPatch: %v", err) - } - - clusterClient := a.params.ClusterClient.Clusters(cluster.Namespace) - - // Do not update Cluster if nothing has changed - if len(p) != 0 { - pb, err := json.MarshalIndent(p, "", " ") - if err != nil { - return fmt.Errorf("failed to json marshal patch: %v", err) - } - klog.Infof("Patching cluster %s", cluster.Name) - result, err := clusterClient.Patch(cluster.Name, types.JSONPatchType, pb) - if err != nil { - return fmt.Errorf("failed to patch cluster: %v", err) - } - // Keep the resource version updated so the status update can succeed - cluster.ResourceVersion = result.ResourceVersion - } - - controlPlaneURI := strings.Split(clusterProviderSpec.ClusterConfiguration.ControlPlaneEndpoint, ":") - apiServerHost := controlPlaneURI[0] - apiServerPortStr := controlPlaneURI[1] - apiServerPort, err := strconv.Atoi(apiServerPortStr) - if err != nil { - return fmt.Errorf("could not parse port of controlPlaneEndpoint %s: %v", clusterProviderSpec.ClusterConfiguration.ControlPlaneEndpoint, err) - } - - // Check if API endpoints is not set or has changed. - if len(cluster.Status.APIEndpoints) == 0 || cluster.Status.APIEndpoints[0].Host != apiServerHost { - cluster.Status.APIEndpoints = []clusterv1.APIEndpoint{ - { - Host: apiServerHost, - Port: apiServerPort, - }, - } - } - - cluster.Status.ProviderStatus = rawStatus - if !reflect.DeepEqual(cluster.Status, clusterCopy.Status) { - klog.Infof("Updating cluster status %s", cluster.Name) - if _, err := clusterClient.UpdateStatus(cluster); err != nil { - return fmt.Errorf("failed to update cluster status: %v", err) - } - } - return nil -} diff --git a/pkg/cloud/openstack/contants/constants.go b/pkg/cloud/openstack/contants/constants.go deleted file mode 100644 index a7d3b4bf49..0000000000 --- a/pkg/cloud/openstack/contants/constants.go +++ /dev/null @@ -1,6 +0,0 @@ -package constants - -const ( - OpenstackIPAnnotationKey = "openstack-ip-address" - OpenstackIdAnnotationKey = "openstack-resourceId" -) diff --git a/pkg/cloud/openstack/machine/actuator.go b/pkg/cloud/openstack/machine/actuator.go deleted file mode 100644 index 80e801486c..0000000000 --- a/pkg/cloud/openstack/machine/actuator.go +++ /dev/null @@ -1,512 +0,0 @@ -/* -Copyright 2018 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package machine - -import ( - "context" - "encoding/json" - "fmt" - "k8s.io/client-go/kubernetes" - "net" - "os" - "reflect" - constants "sigs.k8s.io/cluster-api-provider-openstack/pkg/cloud/openstack/contants" - "sigs.k8s.io/cluster-api-provider-openstack/pkg/cloud/openstack/services/compute" - "sigs.k8s.io/cluster-api-provider-openstack/pkg/cloud/openstack/services/loadbalancer" - "sigs.k8s.io/cluster-api-provider-openstack/pkg/cloud/openstack/services/networking" - "sigs.k8s.io/cluster-api-provider-openstack/pkg/cloud/openstack/services/provider" - "sigs.k8s.io/cluster-api-provider-openstack/pkg/cloud/openstack/services/userdata" - "sigs.k8s.io/cluster-api-provider-openstack/pkg/deployer" - "strconv" - "time" - - "k8s.io/apimachinery/pkg/runtime" - clientgorecord "k8s.io/client-go/tools/record" - "k8s.io/klog" - providerv1 "sigs.k8s.io/cluster-api-provider-openstack/pkg/apis/openstackproviderconfig/v1alpha1" - "sigs.k8s.io/cluster-api-provider-openstack/pkg/record" - clusterv1 "sigs.k8s.io/cluster-api/pkg/apis/cluster/v1alpha1" - clientclusterv1 "sigs.k8s.io/cluster-api/pkg/client/clientset_generated/clientset/typed/cluster/v1alpha1" - apierrors "sigs.k8s.io/cluster-api/pkg/errors" - "sigs.k8s.io/cluster-api/pkg/util" - "sigs.k8s.io/controller-runtime/pkg/client" -) - -const ( - TimeoutInstanceCreate = 5 - TimeoutInstanceDelete = 5 - RetryIntervalInstanceStatus = 10 * time.Second -) - -type Actuator struct { - *deployer.Deployer - - params ActuatorParams - scheme *runtime.Scheme - client client.Client -} - -// ActuatorParams holds parameter information for Actuator -type ActuatorParams struct { - KubeClient kubernetes.Interface - Client client.Client - ClusterClient clientclusterv1.ClusterV1alpha1Interface - EventRecorder clientgorecord.EventRecorder - Scheme *runtime.Scheme -} - -func NewActuator(params ActuatorParams) *Actuator { - return &Actuator{ - Deployer: deployer.New(), - params: params, - client: params.Client, - scheme: params.Scheme, - } -} - -func (a *Actuator) Create(ctx context.Context, cluster *clusterv1.Cluster, machine *clusterv1.Machine) error { - - if cluster == nil { - return fmt.Errorf("the cluster is nil, check your cluster configuration") - } - klog.Infof("Creating Machine %s/%s: %s", cluster.Namespace, cluster.Name, machine.Name) - - clusterName := fmt.Sprintf("%s-%s", cluster.ObjectMeta.Namespace, cluster.Name) - - osProviderClient, clientOpts, err := provider.NewClientFromMachine(a.params.KubeClient, machine) - if err != nil { - return err - } - - computeService, err := compute.NewService(osProviderClient, clientOpts) - if err != nil { - return err - } - - networkingService, err := networking.NewService(osProviderClient, clientOpts) - if err != nil { - return err - } - - clusterProviderSpec, clusterProviderStatus, err := providerv1.ClusterSpecAndStatusFromProviderSpec(cluster) - if err != nil { - return a.handleMachineError(machine, apierrors.CreateMachine( - "error creating Openstack instance: %v", err)) - } - machineProviderSpec, err := providerv1.MachineSpecFromProviderSpec(machine.Spec.ProviderSpec) - if err != nil { - return a.handleMachineError(machine, apierrors.InvalidMachineConfiguration( - "Cannot unmarshal machineProviderSpec field: %v", err)) - } - - if vErr := a.validateMachine(machine, machineProviderSpec); vErr != nil { - return a.handleMachineError(machine, vErr) - } - - instance, err := a.instanceExists(machine) - if err != nil { - return err - } - // We're skipping server creation if it already exists. FloatingIP and LoadBalancer member should be reconciled anyway. - if instance != nil { - klog.Infof("Skipped creating a VM that already exists.\n") - } else { - userData, err := userdata.GetUserData(a.params.Client, a.params.KubeClient, machineProviderSpec, cluster, machine) - if err != nil { - if machineError, ok := err.(*apierrors.MachineError); ok { - return a.handleMachineError(machine, machineError) - } - return err - } - - instance, err = computeService.InstanceCreate(clusterName, machine.Name, clusterProviderSpec, machineProviderSpec, userData, machineProviderSpec.KeyName) - - if err != nil { - return a.handleMachineError(machine, apierrors.CreateMachine( - "error creating Openstack instance: %v", err)) - } - instanceCreateTimeout := getTimeout("CLUSTER_API_OPENSTACK_INSTANCE_CREATE_TIMEOUT", TimeoutInstanceCreate) - instanceCreateTimeout = instanceCreateTimeout * time.Minute - err = util.PollImmediate(RetryIntervalInstanceStatus, instanceCreateTimeout, func() (bool, error) { - instance, err := computeService.GetInstance(instance.ID) - if err != nil { - return false, nil - } - return instance.Status == "ACTIVE", nil - }) - if err != nil { - return a.handleMachineError(machine, apierrors.CreateMachine( - "error creating Openstack instance: %v", err)) - } - } - - if machineProviderSpec.FloatingIP != "" { - err := networkingService.GetOrCreateFloatingIP(clusterProviderSpec, machineProviderSpec.FloatingIP) - if err != nil { - return a.handleMachineError(machine, apierrors.CreateMachine( - "Create floatingIP err: %v", err)) - } - - err = computeService.AssociateFloatingIP(instance.ID, machineProviderSpec.FloatingIP) - if err != nil { - return a.handleMachineError(machine, apierrors.CreateMachine( - "Associate floatingIP err: %v", err)) - } - } - - loadbalancerService, err := loadbalancer.NewService(osProviderClient, clientOpts, clusterProviderSpec.UseOctavia) - if err != nil { - return err - } - if clusterProviderSpec.ManagedAPIServerLoadBalancer { - err := loadbalancerService.ReconcileLoadBalancerMember(clusterName, machine, clusterProviderSpec, clusterProviderStatus) - if err != nil { - return a.handleMachineError(machine, apierrors.CreateMachine( - "Reconcile LoadBalancer Member err: %v", err)) - } - } - - // update Annotation below will store machine spec - providerID := fmt.Sprintf("openstack:////%s", instance.ID) - machine.Spec.ProviderID = &providerID - - klog.Infof("updating status of machine of %s", machine.Name) - ext, _ := providerv1.EncodeMachineStatus(&providerv1.OpenstackMachineProviderStatus{}) - machine.Status.ProviderStatus = ext - err = a.updateMachine(cluster, machine) - if err != nil { - klog.Infof("updated status of machine failed: %v", err) - } - - record.Eventf(machine, "CreatedInstance", "Created new instance with id: %s", instance.ID) - err = a.updateAnnotation(machine, instance.ID) - if err != nil { - return err - } - - klog.Infof("Created Machine %s/%s: %s successfully", cluster.Namespace, cluster.Name, machine.Name) - return nil -} - -func (a *Actuator) updateMachine(cluster *clusterv1.Cluster, machine *clusterv1.Machine) error { - machineClient := a.params.ClusterClient.Machines(cluster.Namespace) - _, err := machineClient.UpdateStatus(machine) - return err -} - -func (a *Actuator) Delete(ctx context.Context, cluster *clusterv1.Cluster, machine *clusterv1.Machine) error { - - if cluster == nil { - return fmt.Errorf("the cluster is nil, check your cluster configuration") - } - klog.Infof("Deleting Machine %s/%s: %s", cluster.Namespace, cluster.Name, machine.Name) - - clusterName := fmt.Sprintf("%s-%s", cluster.ObjectMeta.Namespace, cluster.Name) - - osProviderClient, clientOpts, err := provider.NewClientFromMachine(a.params.KubeClient, machine) - if err != nil { - return err - } - - computeService, err := compute.NewService(osProviderClient, clientOpts) - if err != nil { - return err - } - - clusterProviderSpec, clusterProviderStatus, err := providerv1.ClusterSpecAndStatusFromProviderSpec(cluster) - if err != nil { - return a.handleMachineError(machine, apierrors.CreateMachine( - "error updating Openstack instance: %v", err)) - } - - loadbalancerService, err := loadbalancer.NewService(osProviderClient, clientOpts, clusterProviderSpec.UseOctavia) - if err != nil { - return err - } - if clusterProviderSpec.ManagedAPIServerLoadBalancer { - err = loadbalancerService.DeleteLoadBalancerMember(clusterName, machine, clusterProviderSpec, clusterProviderStatus) - if err != nil { - return err - } - } - - instance, err := a.instanceExists(machine) - if err != nil { - return err - } - - if instance == nil { - klog.Infof("Skipped deleting %s that is already deleted.\n", machine.Name) - return nil - } - - id := machine.ObjectMeta.Annotations[constants.OpenstackIdAnnotationKey] - err = computeService.InstanceDelete(id) - if err != nil { - return a.handleMachineError(machine, apierrors.DeleteMachine( - "error deleting Openstack instance: %v", err)) - } - record.Eventf(machine, "DeletedInstance", "Deleted instance with id: %s", instance.ID) - return nil -} - -func (a *Actuator) Update(ctx context.Context, cluster *clusterv1.Cluster, machine *clusterv1.Machine) error { - - if cluster == nil { - return fmt.Errorf("the cluster is nil, check your cluster configuration") - } - klog.Infof("Updating Machine %s/%s: %s", cluster.Namespace, cluster.Name, machine.Name) - - clusterName := fmt.Sprintf("%s-%s", cluster.ObjectMeta.Namespace, cluster.Name) - - osProviderClient, clientOpts, err := provider.NewClientFromMachine(a.params.KubeClient, machine) - if err != nil { - return err - } - - clusterProviderSpec, clusterProviderStatus, err := providerv1.ClusterSpecAndStatusFromProviderSpec(cluster) - if err != nil { - return a.handleMachineError(machine, apierrors.CreateMachine( - "error updating Openstack instance: %v", err)) - } - - status, err := a.instanceStatus(machine) - if err != nil { - return err - } - - // FIXME: Sometimes the master node ProviderStatus update of bootstrap cluster didn't reflected on the new cluster - klog.Infof("updating status of machine of %s", machine.Name) - ext, _ := providerv1.EncodeMachineStatus(&providerv1.OpenstackMachineProviderStatus{}) - machine.Status.ProviderStatus = ext - err = a.updateMachine(cluster, machine) - if err != nil { - klog.Infof("updated status of machine failed: %v", err) - } - var machineAlreadyExists bool - currentMachine := (*clusterv1.Machine)(status) - if currentMachine == nil { - instance, err := a.instanceExists(machine) - if err != nil { - return err - } - if instance != nil && instance.Status == "ACTIVE" { - klog.Infof("Populating current state for boostrap machine %v", machine.ObjectMeta.Name) - err = a.updateAnnotation(machine, instance.ID) - if err != nil { - return err - } - machineAlreadyExists = true - } else { - return fmt.Errorf("cannot retrieve current state to update machine %v", machine.ObjectMeta.Name) - } - } - - if !requiresUpdate(currentMachine, machine) { - machineAlreadyExists = true - } - - if !machineAlreadyExists { - if util.IsControlPlaneMachine(currentMachine) { - // TODO: add master inplace - klog.Errorf("master inplace update failed: not support master in place update now") - } else { - klog.Infof("re-creating machine %s for update.", currentMachine.ObjectMeta.Name) - err = a.Delete(ctx, cluster, currentMachine) - if err != nil { - klog.Errorf("delete machine %s for update failed: %v", currentMachine.ObjectMeta.Name, err) - } else { - instanceDeleteTimeout := getTimeout("CLUSTER_API_OPENSTACK_INSTANCE_DELETE_TIMEOUT", TimeoutInstanceDelete) - instanceDeleteTimeout = instanceDeleteTimeout * time.Minute - err = util.PollImmediate(RetryIntervalInstanceStatus, instanceDeleteTimeout, func() (bool, error) { - instance, err := a.instanceExists(machine) - if err != nil { - return false, nil - } - return instance == nil, nil - }) - if err != nil { - return a.handleMachineError(machine, apierrors.DeleteMachine( - "error deleting Openstack instance: %v", err)) - } - - err = a.Create(ctx, cluster, machine) - if err != nil { - klog.Errorf("create machine %s for update failed: %v", machine.ObjectMeta.Name, err) - } - klog.Infof("Successfully updated machine %s", currentMachine.ObjectMeta.Name) - } - } - } - loadbalancerService, err := loadbalancer.NewService(osProviderClient, clientOpts, clusterProviderSpec.UseOctavia) - if err != nil { - return err - } - - if clusterProviderSpec.ManagedAPIServerLoadBalancer { - err = loadbalancerService.ReconcileLoadBalancerMember(clusterName, machine, clusterProviderSpec, clusterProviderStatus) - if err != nil { - return err - } - } - - return nil -} - -func (a *Actuator) Exists(ctx context.Context, cluster *clusterv1.Cluster, machine *clusterv1.Machine) (bool, error) { - instance, err := a.instanceExists(machine) - if err != nil { - return false, err - } - - if (instance != nil) && (machine.Spec.ProviderID == nil || *machine.Spec.ProviderID == "") { - providerID := fmt.Sprintf("openstack:////%s", instance.ID) - machine.Spec.ProviderID = &providerID - - a.client.Update(nil, machine) - } - return instance != nil, err -} - -func getIPFromInstance(instance *compute.Instance) (string, error) { - if instance.AccessIPv4 != "" && net.ParseIP(instance.AccessIPv4) != nil { - return instance.AccessIPv4, nil - } - type networkInterface struct { - Address string `json:"addr"` - Version float64 `json:"version"` - Type string `json:"OS-EXT-IPS:type"` - } - var addrList []string - - for _, b := range instance.Addresses { - list, err := json.Marshal(b) - if err != nil { - return "", fmt.Errorf("extract IP from instance err: %v", err) - } - var networks []interface{} - json.Unmarshal(list, &networks) - for _, network := range networks { - var netInterface networkInterface - b, _ := json.Marshal(network) - json.Unmarshal(b, &netInterface) - if netInterface.Version == 4.0 { - if netInterface.Type == "floating" { - return netInterface.Address, nil - } - addrList = append(addrList, netInterface.Address) - } - } - } - if len(addrList) != 0 { - return addrList[0], nil - } - return "", fmt.Errorf("extract IP from instance err") -} - -// If the Actuator has a client for updating Machine objects, this will set -// the appropriate reason/message on the Machine.Status. If not, such as during -// cluster installation, it will operate as a no-op. It also returns the -// original error for convenience, so callers can do "return handleMachineError(...)". -func (a *Actuator) handleMachineError(machine *clusterv1.Machine, err *apierrors.MachineError) error { - if a.client != nil { - reason := err.Reason - message := err.Message - machine.Status.ErrorReason = &reason - machine.Status.ErrorMessage = &message - if err := a.client.Update(nil, machine); err != nil { - return fmt.Errorf("unable to update machine status: %v", err) - } - } - - klog.Errorf("Machine error %s: %v", machine.Name, err.Message) - return err -} - -func (a *Actuator) updateAnnotation(machine *clusterv1.Machine, id string) error { - if machine.ObjectMeta.Annotations == nil { - machine.ObjectMeta.Annotations = make(map[string]string) - } - machine.ObjectMeta.Annotations[constants.OpenstackIdAnnotationKey] = id - instance, _ := a.instanceExists(machine) - ip, err := getIPFromInstance(instance) - if err != nil { - return err - } - machine.ObjectMeta.Annotations[constants.OpenstackIPAnnotationKey] = ip - if err := a.client.Update(nil, machine); err != nil { - return err - } - return a.updateInstanceStatus(machine) -} - -func requiresUpdate(current *clusterv1.Machine, new *clusterv1.Machine) bool { - if current == nil || new == nil { - return true - } - // Do not want status changes. Do want changes that impact machine provisioning - return !reflect.DeepEqual(current.Spec.ObjectMeta, new.Spec.ObjectMeta) || - !reflect.DeepEqual(current.Spec.ProviderSpec, new.Spec.ProviderSpec) || - !reflect.DeepEqual(current.Spec.Versions, new.Spec.Versions) || - current.ObjectMeta.Name != new.ObjectMeta.Name -} - -func (a *Actuator) instanceExists(machine *clusterv1.Machine) (instance *compute.Instance, err error) { - machineSpec, err := providerv1.MachineSpecFromProviderSpec(machine.Spec.ProviderSpec) - if err != nil { - return nil, err - } - opts := &compute.InstanceListOpts{ - Name: machine.Name, - Image: machineSpec.Image, - Flavor: machineSpec.Flavor, - } - - osProviderClient, clientOpts, err := provider.NewClientFromMachine(a.params.KubeClient, machine) - if err != nil { - return nil, err - } - - computeService, err := compute.NewService(osProviderClient, clientOpts) - if err != nil { - return nil, err - } - - instanceList, err := computeService.GetInstanceList(opts) - if err != nil { - return nil, err - } - if len(instanceList) == 0 { - return nil, nil - } - return instanceList[0], nil -} - -func (a *Actuator) validateMachine(machine *clusterv1.Machine, config *providerv1.OpenstackProviderSpec) *apierrors.MachineError { - // TODO: other validate of openstackCloud - return nil -} - -func getTimeout(name string, timeout int) time.Duration { - if v := os.Getenv(name); v != "" { - timeout, err := strconv.Atoi(v) - if err == nil { - return time.Duration(timeout) - } - } - return time.Duration(timeout) -} diff --git a/pkg/cloud/openstack/machine/instancestatus.go b/pkg/cloud/openstack/machine/instancestatus.go deleted file mode 100644 index 5545f19436..0000000000 --- a/pkg/cloud/openstack/machine/instancestatus.go +++ /dev/null @@ -1,114 +0,0 @@ -/* -Copyright 2018 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package machine - -import ( - "bytes" - "fmt" - - "k8s.io/apimachinery/pkg/runtime/serializer/json" - - "k8s.io/apimachinery/pkg/runtime/schema" - clusterv1 "sigs.k8s.io/cluster-api/pkg/apis/cluster/v1alpha1" - "sigs.k8s.io/cluster-api/pkg/util" -) - -// Long term, we should retrieve the current status by asking k8s, openstack etc. for all the needed info. -// For now, it is stored in the matching CRD under an annotation. This is similar to -// the spec and status concept where the machine CRD is the instance spec and the annotation is the instance status. - -const InstanceStatusAnnotationKey = "instance-status" - -type instanceStatus *clusterv1.Machine - -// Get the status of the instance identified by the given machine -func (a *Actuator) instanceStatus(machine *clusterv1.Machine) (instanceStatus, error) { - currentMachine, err := util.GetMachineIfExists(a.client, machine.Namespace, machine.Name) - if err != nil { - return nil, err - } - - if currentMachine == nil { - // The current status no longer exists because the matching CRD has been deleted (or does not exist yet ie. bootstrapping) - return nil, nil - } - return a.machineInstanceStatus(currentMachine) -} - -// Sets the status of the instance identified by the given machine to the given machine -func (a *Actuator) updateInstanceStatus(machine *clusterv1.Machine) error { - status := instanceStatus(machine) - currentMachine, err := util.GetMachineIfExists(a.client, machine.Namespace, machine.Name) - if err != nil { - return err - } - - if currentMachine == nil { - // The current status no longer exists because the matching CRD has been deleted. - return fmt.Errorf("Machine has already been deleted. Cannot update current instance status for machine %v", machine.ObjectMeta.Name) - } - - m, err := a.setMachineInstanceStatus(currentMachine, status) - if err != nil { - return err - } - - return a.client.Update(nil, m) -} - -// Gets the state of the instance stored on the given machine CRD -func (a *Actuator) machineInstanceStatus(machine *clusterv1.Machine) (instanceStatus, error) { - if machine.ObjectMeta.Annotations == nil { - // No state - return nil, nil - } - - status := machine.ObjectMeta.Annotations[InstanceStatusAnnotationKey] - if status == "" { - // No state - return nil, nil - } - - serializer := json.NewSerializer(json.DefaultMetaFactory, a.scheme, a.scheme, false) - var statusMachine clusterv1.Machine - _, _, err := serializer.Decode([]byte(status), &schema.GroupVersionKind{Group: "cluster.k8s.io", Version: "v1alpha1", Kind: "Machine"}, &statusMachine) - if err != nil { - return nil, fmt.Errorf("decoding failure: %v", err) - } - - return instanceStatus(&statusMachine), nil -} - -// Applies the state of an instance onto a given machine CRD -func (a *Actuator) setMachineInstanceStatus(machine *clusterv1.Machine, status instanceStatus) (*clusterv1.Machine, error) { - // Avoid status within status within status ... - status.ObjectMeta.Annotations[InstanceStatusAnnotationKey] = "" - - serializer := json.NewSerializer(json.DefaultMetaFactory, a.scheme, a.scheme, false) - b := []byte{} - buff := bytes.NewBuffer(b) - err := serializer.Encode((*clusterv1.Machine)(status), buff) - if err != nil { - return nil, fmt.Errorf("encoding failure: %v", err) - } - - if machine.ObjectMeta.Annotations == nil { - machine.ObjectMeta.Annotations = make(map[string]string) - } - machine.ObjectMeta.Annotations[InstanceStatusAnnotationKey] = buff.String() - return machine, nil -} diff --git a/pkg/cloud/openstack/services/certificates/BUILD.bazel b/pkg/cloud/openstack/services/certificates/BUILD.bazel deleted file mode 100644 index 6dc36a93a2..0000000000 --- a/pkg/cloud/openstack/services/certificates/BUILD.bazel +++ /dev/null @@ -1,27 +0,0 @@ -load("@io_bazel_rules_go//go:def.bzl", "go_library", "go_test") - -go_library( - name = "go_default_library", - srcs = [ - "certificates.go", - "service.go", - ], - importpath = "sigs.k8s.io/cluster-api-provider-aws/pkg/cloud/aws/services/certificates", - visibility = ["//visibility:public"], - deps = [ - "//pkg/apis/awsprovider/v1alpha1:go_default_library", - "//pkg/cloud/aws/actuators:go_default_library", - "//vendor/github.com/pkg/errors:go_default_library", - "//vendor/k8s.io/client-go/tools/clientcmd/api:go_default_library", - ], -) - -go_test( - name = "go_default_test", - srcs = ["certificates_test.go"], - embed = [":go_default_library"], - deps = [ - "//pkg/apis/awsprovider/v1alpha1:go_default_library", - "//vendor/github.com/pkg/errors:go_default_library", - ], -) diff --git a/pkg/cloud/openstack/services/certificates/certificates.go b/pkg/cloud/openstack/services/certificates/certificates.go index 9f9e3ae8ed..aad15dd3c1 100644 --- a/pkg/cloud/openstack/services/certificates/certificates.go +++ b/pkg/cloud/openstack/services/certificates/certificates.go @@ -31,7 +31,7 @@ import ( "math" "math/big" "net" - "sigs.k8s.io/cluster-api-provider-openstack/pkg/apis/openstackproviderconfig/v1alpha1" + infrav1 "sigs.k8s.io/cluster-api-provider-openstack/api/v1alpha2" "strings" "time" ) @@ -67,51 +67,51 @@ type Config struct { } // ReconcileCertificates generate certificates if none exists. -func (s *Service) ReconcileCertificates(clusterName string, clusterProviderSpec *v1alpha1.OpenstackClusterProviderSpec) error { - if !clusterProviderSpec.CAKeyPair.HasCertAndKey() { +func (s *Service) ReconcileCertificates(clusterName string, openStackCluster *infrav1.OpenStackCluster) error { + if !openStackCluster.Spec.CAKeyPair.HasCertAndKey() { klog.Infof("Generating keypair for user %s", clusterCA) - clusterCAKeyPair, err := generateCACert(&clusterProviderSpec.CAKeyPair, clusterCA) + clusterCAKeyPair, err := generateCACert(&openStackCluster.Spec.CAKeyPair, clusterCA) if err != nil { return errors.Wrapf(err, "Failed to generate certs for %q", clusterCA) } - clusterProviderSpec.CAKeyPair = clusterCAKeyPair + openStackCluster.Spec.CAKeyPair = clusterCAKeyPair } - if !clusterProviderSpec.EtcdCAKeyPair.HasCertAndKey() { + if !openStackCluster.Spec.EtcdCAKeyPair.HasCertAndKey() { klog.Infof("Generating keypair for user %s", etcdCA) - etcdCAKeyPair, err := generateCACert(&clusterProviderSpec.EtcdCAKeyPair, etcdCA) + etcdCAKeyPair, err := generateCACert(&openStackCluster.Spec.EtcdCAKeyPair, etcdCA) if err != nil { return errors.Wrapf(err, "Failed to generate certs for %q", etcdCA) } - clusterProviderSpec.EtcdCAKeyPair = etcdCAKeyPair + openStackCluster.Spec.EtcdCAKeyPair = etcdCAKeyPair } - if !clusterProviderSpec.FrontProxyCAKeyPair.HasCertAndKey() { + if !openStackCluster.Spec.FrontProxyCAKeyPair.HasCertAndKey() { klog.Infof("Generating keypair for user %s", frontProxyCA) - fpCAKeyPair, err := generateCACert(&clusterProviderSpec.FrontProxyCAKeyPair, frontProxyCA) + fpCAKeyPair, err := generateCACert(&openStackCluster.Spec.FrontProxyCAKeyPair, frontProxyCA) if err != nil { return errors.Wrapf(err, "Failed to generate certs for %q", frontProxyCA) } - clusterProviderSpec.FrontProxyCAKeyPair = fpCAKeyPair + openStackCluster.Spec.FrontProxyCAKeyPair = fpCAKeyPair } - if !clusterProviderSpec.SAKeyPair.HasCertAndKey() { + if !openStackCluster.Spec.SAKeyPair.HasCertAndKey() { klog.Infof("Generating service account keys for user %s", serviceAccount) - saKeyPair, err := generateServiceAccountKeys(&clusterProviderSpec.SAKeyPair, serviceAccount) + saKeyPair, err := generateServiceAccountKeys(&openStackCluster.Spec.SAKeyPair, serviceAccount) if err != nil { return errors.Wrapf(err, "Failed to generate keyPair for %q", serviceAccount) } - clusterProviderSpec.SAKeyPair = saKeyPair + openStackCluster.Spec.SAKeyPair = saKeyPair } return nil } -func generateCACert(kp *v1alpha1.KeyPair, user string) (v1alpha1.KeyPair, error) { +func generateCACert(kp *infrav1.KeyPair, user string) (infrav1.KeyPair, error) { x509Cert, privKey, err := NewCertificateAuthority() if err != nil { - return v1alpha1.KeyPair{}, errors.Wrapf(err, "failed to generate CA cert for %q", user) + return infrav1.KeyPair{}, errors.Wrapf(err, "failed to generate CA cert for %q", user) } if kp == nil { - return v1alpha1.KeyPair{ + return infrav1.KeyPair{ Cert: EncodeCertPEM(x509Cert), Key: EncodePrivateKeyPEM(privKey), }, nil @@ -121,17 +121,17 @@ func generateCACert(kp *v1alpha1.KeyPair, user string) (v1alpha1.KeyPair, error) return *kp, nil } -func generateServiceAccountKeys(kp *v1alpha1.KeyPair, user string) (v1alpha1.KeyPair, error) { +func generateServiceAccountKeys(kp *infrav1.KeyPair, user string) (infrav1.KeyPair, error) { saCreds, err := NewPrivateKey() if err != nil { - return v1alpha1.KeyPair{}, errors.Wrapf(err, "failed to create service account public and private keys") + return infrav1.KeyPair{}, errors.Wrapf(err, "failed to create service account public and private keys") } saPub, err := EncodePublicKeyPEM(&saCreds.PublicKey) if err != nil { - return v1alpha1.KeyPair{}, errors.Wrapf(err, "failed to encode service account public key to PEM") + return infrav1.KeyPair{}, errors.Wrapf(err, "failed to encode service account public key to PEM") } if kp == nil { - return v1alpha1.KeyPair{ + return infrav1.KeyPair{ Cert: saPub, Key: EncodePrivateKeyPEM(saCreds), }, nil diff --git a/pkg/cloud/openstack/services/certificates/certificates_test.go b/pkg/cloud/openstack/services/certificates/certificates_test.go index 0f687a9755..608d1be68a 100644 --- a/pkg/cloud/openstack/services/certificates/certificates_test.go +++ b/pkg/cloud/openstack/services/certificates/certificates_test.go @@ -18,16 +18,16 @@ package certificates import ( "crypto/x509" + infrav1 "sigs.k8s.io/cluster-api-provider-openstack/api/v1alpha2" "testing" "github.com/pkg/errors" - "sigs.k8s.io/cluster-api-provider-openstack/pkg/apis/openstackproviderconfig/v1alpha1" ) func TestGenerateCACert(t *testing.T) { testCases := []struct { name string - inputKeyPair *v1alpha1.KeyPair + inputKeyPair *infrav1.KeyPair inputUser string expectKeyPairGen bool expectedError error @@ -41,28 +41,28 @@ func TestGenerateCACert(t *testing.T) { }, { name: "should generate keypair when inputKeyPair has no cert", - inputKeyPair: &v1alpha1.KeyPair{Key: []byte("foo-key")}, + inputKeyPair: &infrav1.KeyPair{Key: []byte("foo-key")}, inputUser: "foo-ca", expectKeyPairGen: true, expectedError: nil, }, { name: "should generate keypair when inputKeyPair has no key", - inputKeyPair: &v1alpha1.KeyPair{Cert: []byte("foo-cert")}, + inputKeyPair: &infrav1.KeyPair{Cert: []byte("foo-cert")}, inputUser: "foo-ca", expectKeyPairGen: true, expectedError: nil, }, { name: "should generate keypair when inputKeyPair has no cert and nokey", - inputKeyPair: &v1alpha1.KeyPair{}, + inputKeyPair: &infrav1.KeyPair{}, inputUser: "foo-ca", expectKeyPairGen: true, expectedError: nil, }, { name: "should not generate keypair when inputKeyPair has cert and key", - inputKeyPair: &v1alpha1.KeyPair{Cert: []byte("foo-cert"), Key: []byte("foo-key")}, + inputKeyPair: &infrav1.KeyPair{Cert: []byte("foo-cert"), Key: []byte("foo-key")}, inputUser: "foo-ca", expectKeyPairGen: false, expectedError: nil, @@ -99,7 +99,7 @@ func TestGenerateCACert(t *testing.T) { func TestGenerateServiceAccountKeys(t *testing.T) { testCases := []struct { name string - inputKeyPair *v1alpha1.KeyPair + inputKeyPair *infrav1.KeyPair inputUser string expectKeyPairGen bool expectedError error @@ -113,28 +113,28 @@ func TestGenerateServiceAccountKeys(t *testing.T) { }, { name: "should generate keypair when inputKeyPair has no cert", - inputKeyPair: &v1alpha1.KeyPair{Key: []byte("foo-key")}, + inputKeyPair: &infrav1.KeyPair{Key: []byte("foo-key")}, inputUser: "foo-sa", expectKeyPairGen: true, expectedError: nil, }, { name: "should generate keypair when inputKeyPair has no key", - inputKeyPair: &v1alpha1.KeyPair{Cert: []byte("foo-cert")}, + inputKeyPair: &infrav1.KeyPair{Cert: []byte("foo-cert")}, inputUser: "foo-sa", expectKeyPairGen: true, expectedError: nil, }, { name: "should generate keypair when inputKeyPair has no cert and nokey", - inputKeyPair: &v1alpha1.KeyPair{}, + inputKeyPair: &infrav1.KeyPair{}, inputUser: "foo-ca", expectKeyPairGen: true, expectedError: nil, }, { name: "should not generate keypair when inputKeyPair has cert and key", - inputKeyPair: &v1alpha1.KeyPair{Cert: []byte("foo-cert"), Key: []byte("foo-key")}, + inputKeyPair: &infrav1.KeyPair{Cert: []byte("foo-cert"), Key: []byte("foo-key")}, inputUser: "foo-sa", expectKeyPairGen: false, expectedError: nil, diff --git a/pkg/cloud/openstack/services/compute/instance.go b/pkg/cloud/openstack/services/compute/instance.go index 7b334d65f2..0bc8c505f2 100644 --- a/pkg/cloud/openstack/services/compute/instance.go +++ b/pkg/cloud/openstack/services/compute/instance.go @@ -22,6 +22,8 @@ import ( "github.com/gophercloud/gophercloud/openstack/identity/v3/tokens" "k8s.io/klog" "sigs.k8s.io/cluster-api-provider-openstack/pkg/cloud/openstack/services/networking" + "sigs.k8s.io/cluster-api/api/v1alpha2" + "sigs.k8s.io/cluster-api/pkg/controller/noderefutil" "time" "github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/security/groups" @@ -41,7 +43,7 @@ import ( "github.com/gophercloud/gophercloud/openstack/networking/v2/ports" "github.com/gophercloud/gophercloud/openstack/networking/v2/subnets" "github.com/gophercloud/gophercloud/pagination" - openstackconfigv1 "sigs.k8s.io/cluster-api-provider-openstack/pkg/apis/openstackproviderconfig/v1alpha1" + infrav1 "sigs.k8s.io/cluster-api-provider-openstack/api/v1alpha2" "sigs.k8s.io/cluster-api/pkg/util" ) @@ -53,8 +55,10 @@ const ( RetryIntervalPortDelete = 5 * time.Second ) +// TODO(sbueringer) We should probably wrape the OpenStack object completely (see CAPA) type Instance struct { servers.Server + State infrav1.InstanceState } type ServerNetwork struct { @@ -63,12 +67,12 @@ type ServerNetwork struct { } // InstanceCreate creates a compute instance -func (is *Service) InstanceCreate(clusterName string, name string, clusterSpec *openstackconfigv1.OpenstackClusterProviderSpec, config *openstackconfigv1.OpenstackProviderSpec, userdata string, keyName string) (instance *Instance, err error) { +func (is *Service) InstanceCreate(clusterName string, name string, openStackCluster *infrav1.OpenStackCluster, openStackMachine *infrav1.OpenStackMachine, userdata string) (instance *Instance, err error) { var createOpts servers.CreateOptsBuilder - if config == nil { + if openStackMachine == nil { return nil, fmt.Errorf("create Options need be specified to create instace") } - if config.Trunk == true { + if openStackMachine.Spec.Trunk == true { trunkSupport, err := getTrunkSupport(is) if err != nil { return nil, fmt.Errorf("there was an issue verifying whether trunk support is available, please disable it: %v", err) @@ -85,19 +89,19 @@ func (is *Service) InstanceCreate(clusterName string, name string, clusterSpec * } // Append machine specific tags - machineTags = append(machineTags, config.Tags...) + machineTags = append(machineTags, openStackMachine.Spec.Tags...) // Append cluster scope tags - machineTags = append(machineTags, clusterSpec.Tags...) + machineTags = append(machineTags, openStackCluster.Spec.Tags...) // Get security groups - securityGroups, err := getSecurityGroups(is, config.SecurityGroups) + securityGroups, err := getSecurityGroups(is, openStackMachine.Spec.SecurityGroups) if err != nil { return nil, err } // Get all network UUIDs var nets []ServerNetwork - for _, net := range config.Networks { + for _, net := range openStackMachine.Spec.Networks { opts := networks.ListOpts(net.Filter) opts.ID = net.UUID ids, err := getNetworkIDsByFilter(is.networkClient, &opts) @@ -165,7 +169,7 @@ func (is *Service) InstanceCreate(clusterName string, name string, clusterSpec * Port: port.ID, }) - if config.Trunk == true { + if openStackMachine.Spec.Trunk == true { allPages, err := trunks.List(is.networkClient, trunks.ListOpts{ Name: name, PortID: port.ID, @@ -202,7 +206,7 @@ func (is *Service) InstanceCreate(clusterName string, name string, clusterSpec * } var serverTags []string - if clusterSpec.DisableServerTags == false { + if openStackCluster.Spec.DisableServerTags == false { serverTags = machineTags // NOTE(flaper87): This is the minimum required version // to use tags. @@ -210,7 +214,7 @@ func (is *Service) InstanceCreate(clusterName string, name string, clusterSpec * } // Get image ID - imageID, err := getImageID(is, config.Image) + imageID, err := getImageID(is, openStackMachine.Spec.Image) if err != nil { return nil, fmt.Errorf("create new server err: %v", err) } @@ -218,29 +222,29 @@ func (is *Service) InstanceCreate(clusterName string, name string, clusterSpec * serverCreateOpts := servers.CreateOpts{ Name: name, ImageRef: imageID, - FlavorName: config.Flavor, - AvailabilityZone: config.AvailabilityZone, + FlavorName: openStackMachine.Spec.Flavor, + AvailabilityZone: openStackMachine.Spec.AvailabilityZone, Networks: portsList, UserData: []byte(userData), SecurityGroups: securityGroups, ServiceClient: is.computeClient, Tags: serverTags, - Metadata: config.ServerMetadata, - ConfigDrive: config.ConfigDrive, + Metadata: openStackMachine.Spec.ServerMetadata, + ConfigDrive: openStackMachine.Spec.ConfigDrive, } // If the root volume Size is not 0, means boot from volume - if config.RootVolume != nil && config.RootVolume.Size != 0 { + if openStackMachine.Spec.RootVolume != nil && openStackMachine.Spec.RootVolume.Size != 0 { var blocks []bootfromvolume.BlockDevice block := bootfromvolume.BlockDevice{ - SourceType: bootfromvolume.SourceType(config.RootVolume.SourceType), + SourceType: bootfromvolume.SourceType(openStackMachine.Spec.RootVolume.SourceType), BootIndex: 0, - UUID: config.RootVolume.SourceUUID, + UUID: openStackMachine.Spec.RootVolume.SourceUUID, DeleteOnTermination: true, DestinationType: bootfromvolume.DestinationVolume, - VolumeSize: config.RootVolume.Size, - DeviceType: config.RootVolume.DeviceType, + VolumeSize: openStackMachine.Spec.RootVolume.Size, + DeviceType: openStackMachine.Spec.RootVolume.DeviceType, } blocks = append(blocks, block) @@ -252,13 +256,13 @@ func (is *Service) InstanceCreate(clusterName string, name string, clusterSpec * server, err := servers.Create(is.computeClient, keypairs.CreateOptsExt{ CreateOptsBuilder: serverCreateOpts, - KeyName: keyName, + KeyName: openStackMachine.Spec.KeyName, }).Extract() if err != nil { return nil, fmt.Errorf("create new server err: %v", err) } is.computeClient.Microversion = "" - return &Instance{*server}, nil + return &Instance{Server: *server, State: infrav1.InstanceState(server.Status)}, nil } func getTrunkSupport(is *Service) (bool, error) { @@ -280,7 +284,7 @@ func getTrunkSupport(is *Service) (bool, error) { return false, nil } -func getSecurityGroups(is *Service, securityGroupParams []openstackconfigv1.SecurityGroupParam) ([]string, error) { +func getSecurityGroups(is *Service, securityGroupParams []infrav1.SecurityGroupParam) ([]string, error) { var sgIDs []string for _, sg := range securityGroupParams { listOpts := groups.ListOpts(sg.Filter) @@ -396,9 +400,15 @@ func (is *Service) AssociateFloatingIP(instanceID, floatingIP string) error { return floatingips.AssociateInstance(is.computeClient, instanceID, opts).ExtractErr() } -func (is *Service) InstanceDelete(id string) error { +func (is *Service) InstanceDelete(machine *v1alpha2.Machine) error { + + parsed, err := noderefutil.NewProviderID(*machine.Spec.ProviderID) + if err != nil { + return err + } + // get instance port id - allInterfaces, err := attachinterfaces.List(is.computeClient, id).AllPages() + allInterfaces, err := attachinterfaces.List(is.computeClient, parsed.ID()).AllPages() if err != nil { return err } @@ -407,7 +417,7 @@ func (is *Service) InstanceDelete(id string) error { return err } if len(instanceInterfaces) < 1 { - return servers.Delete(is.computeClient, id).ExtractErr() + return servers.Delete(is.computeClient, parsed.ID()).ExtractErr() } trunkSupport, err := getTrunkSupport(is) @@ -416,7 +426,7 @@ func (is *Service) InstanceDelete(id string) error { } // get and delete trunks for _, port := range instanceInterfaces { - err := attachinterfaces.Delete(is.computeClient, id, port.PortID).ExtractErr() + err := attachinterfaces.Delete(is.computeClient, parsed.ID(), port.PortID).ExtractErr() if err != nil { return err } @@ -460,7 +470,7 @@ func (is *Service) InstanceDelete(id string) error { } // delete instance - return servers.Delete(is.computeClient, id).ExtractErr() + return servers.Delete(is.computeClient, parsed.ID()).ExtractErr() } type InstanceListOpts struct { @@ -497,7 +507,10 @@ func (is *Service) GetInstanceList(opts *InstanceListOpts) ([]*Instance, error) } var instanceList []*Instance for _, server := range serverList { - instanceList = append(instanceList, &Instance{server}) + instanceList = append(instanceList, &Instance{ + Server: server, + State: infrav1.InstanceState(server.Status), + }) } return instanceList, nil } @@ -510,7 +523,24 @@ func (is *Service) GetInstance(resourceId string) (instance *Instance, err error if err != nil { return nil, fmt.Errorf("get server %q detail failed: %v", resourceId, err) } - return &Instance{*server}, err + return &Instance{Server: *server, State: infrav1.InstanceState(server.Status)}, err +} + +func (is *Service) InstanceExists(openStackMachine *infrav1.OpenStackMachine) (instance *Instance, err error) { + opts := &InstanceListOpts{ + Name: openStackMachine.Name, + Image: openStackMachine.Spec.Image, + Flavor: openStackMachine.Spec.Flavor, + } + + instanceList, err := is.GetInstanceList(opts) + if err != nil { + return nil, err + } + if len(instanceList) == 0 { + return nil, nil + } + return instanceList[0], nil } // UpdateToken to update token if need. diff --git a/pkg/cloud/openstack/services/kubeadm/configs.go b/pkg/cloud/openstack/services/kubeadm/configs.go index 548b2c1800..c0231e8faa 100644 --- a/pkg/cloud/openstack/services/kubeadm/configs.go +++ b/pkg/cloud/openstack/services/kubeadm/configs.go @@ -18,7 +18,7 @@ package kubeadm import ( kubeadmv1beta1 "k8s.io/kubernetes/cmd/kubeadm/app/apis/kubeadm/v1beta1" - "sigs.k8s.io/cluster-api/pkg/apis/cluster/v1alpha1" + clusterv1 "sigs.k8s.io/cluster-api/api/v1alpha2" ) // InitConfigurationOption will set various options on kubeadm's InitConfiguration type. @@ -111,7 +111,7 @@ func WithControllerManagerExtraVolumes(args []kubeadmv1beta1.HostPathMount) Clus } // WithClusterNetworkFromClusterNetworkingConfig maps a cluster api ClusterNetworkingConfig to a kubeadm Networking object. -func WithClusterNetworkFromClusterNetworkingConfig(networkingConfig v1alpha1.ClusterNetworkingConfig) ClusterConfigurationOption { +func WithClusterNetworkFromClusterNetworkingConfig(networkingConfig *clusterv1.ClusterNetworkingConfig) ClusterConfigurationOption { return func(c *kubeadmv1beta1.ClusterConfiguration) { c.Networking.DNSDomain = networkingConfig.ServiceDomain c.Networking.PodSubnet = networkingConfig.Pods.CIDRBlocks[0] diff --git a/pkg/cloud/openstack/services/kubeadm/configs_test.go b/pkg/cloud/openstack/services/kubeadm/configs_test.go index eb405f4bf6..be2df134e3 100644 --- a/pkg/cloud/openstack/services/kubeadm/configs_test.go +++ b/pkg/cloud/openstack/services/kubeadm/configs_test.go @@ -23,7 +23,7 @@ import ( kubeadmv1beta1 "k8s.io/kubernetes/cmd/kubeadm/app/apis/kubeadm/v1beta1" "sigs.k8s.io/cluster-api-provider-openstack/pkg/cloud/openstack/services/kubeadm" - clusterv1 "sigs.k8s.io/cluster-api/pkg/apis/cluster/v1alpha1" + clusterv1 "sigs.k8s.io/cluster-api/api/v1alpha2" ) func TestInitConfiguration(t *testing.T) { @@ -95,7 +95,7 @@ func TestClusterConfiguration(t *testing.T) { kubeadm.WithKubernetesVersion("test version"), kubeadm.WithAPIServerExtraArgs(map[string]string{"test key": "test value"}), kubeadm.WithControllerManagerExtraArgs(map[string]string{"test cm key": "test cm value"}), - kubeadm.WithClusterNetworkFromClusterNetworkingConfig(clusterv1.ClusterNetworkingConfig{ + kubeadm.WithClusterNetworkFromClusterNetworkingConfig(&clusterv1.ClusterNetworkingConfig{ ServiceDomain: "test dns domain", Pods: clusterv1.NetworkRanges{ CIDRBlocks: []string{ diff --git a/pkg/cloud/openstack/services/loadbalancer/loadbalancer.go b/pkg/cloud/openstack/services/loadbalancer/loadbalancer.go index 6de6832d19..1af4a73f2e 100644 --- a/pkg/cloud/openstack/services/loadbalancer/loadbalancer.go +++ b/pkg/cloud/openstack/services/loadbalancer/loadbalancer.go @@ -11,24 +11,23 @@ import ( "github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/layer3/floatingips" "k8s.io/apimachinery/pkg/util/wait" "k8s.io/klog" - providerv1 "sigs.k8s.io/cluster-api-provider-openstack/pkg/apis/openstackproviderconfig/v1alpha1" - constants "sigs.k8s.io/cluster-api-provider-openstack/pkg/cloud/openstack/contants" - clusterv1 "sigs.k8s.io/cluster-api/pkg/apis/cluster/v1alpha1" + infrav1 "sigs.k8s.io/cluster-api-provider-openstack/api/v1alpha2" + "sigs.k8s.io/cluster-api/api/v1alpha2" "sigs.k8s.io/cluster-api/pkg/util" "time" ) -func (s *Service) ReconcileLoadBalancer(clusterName string, clusterProviderSpec *providerv1.OpenstackClusterProviderSpec, clusterProviderStatus *providerv1.OpenstackClusterProviderStatus) error { +func (s *Service) ReconcileLoadBalancer(clusterName string, openStackCluster *infrav1.OpenStackCluster) error { - if clusterProviderSpec.ExternalNetworkID == "" { + if openStackCluster.Spec.ExternalNetworkID == "" { klog.V(3).Infof("No need to create loadbalancer, due to missing ExternalNetworkID") return nil } - if clusterProviderSpec.APIServerLoadBalancerFloatingIP == "" { + if openStackCluster.Spec.APIServerLoadBalancerFloatingIP == "" { klog.V(3).Infof("No need to create loadbalancer, due to missing APIServerLoadBalancerFloatingIP") return nil } - if clusterProviderSpec.APIServerLoadBalancerPort == 0 { + if openStackCluster.Spec.APIServerLoadBalancerPort == 0 { klog.V(3).Infof("No need to create loadbalancer, due to missing APIServerLoadBalancerPort") return nil } @@ -45,7 +44,7 @@ func (s *Service) ReconcileLoadBalancer(clusterName string, clusterProviderSpec klog.Infof("Creating loadbalancer %s", loadBalancerName) lbCreateOpts := loadbalancers.CreateOpts{ Name: loadBalancerName, - VipSubnetID: clusterProviderStatus.Network.Subnet.ID, + VipSubnetID: openStackCluster.Status.Network.Subnet.ID, } lb, err = loadbalancers.Create(s.loadbalancerClient, lbCreateOpts).Extract() @@ -59,15 +58,15 @@ func (s *Service) ReconcileLoadBalancer(clusterName string, clusterProviderSpec } // floating ip - fp, err := checkIfFloatingIPExists(s.networkingClient, clusterProviderSpec.APIServerLoadBalancerFloatingIP) + fp, err := checkIfFloatingIPExists(s.networkingClient, openStackCluster.Spec.APIServerLoadBalancerFloatingIP) if err != nil { return err } if fp == nil { - klog.Infof("Creating floating ip %s", clusterProviderSpec.APIServerLoadBalancerFloatingIP) + klog.Infof("Creating floating ip %s", openStackCluster.Spec.APIServerLoadBalancerFloatingIP) fpCreateOpts := &floatingips.CreateOpts{ - FloatingIP: clusterProviderSpec.APIServerLoadBalancerFloatingIP, - FloatingNetworkID: clusterProviderSpec.ExternalNetworkID, + FloatingIP: openStackCluster.Spec.APIServerLoadBalancerFloatingIP, + FloatingNetworkID: openStackCluster.Spec.ExternalNetworkID, } fp, err = floatingips.Create(s.networkingClient, fpCreateOpts).Extract() if err != nil { @@ -76,7 +75,7 @@ func (s *Service) ReconcileLoadBalancer(clusterName string, clusterProviderSpec } // associate floating ip - klog.Infof("Associating floating ip %s", clusterProviderSpec.APIServerLoadBalancerFloatingIP) + klog.Infof("Associating floating ip %s", openStackCluster.Spec.APIServerLoadBalancerFloatingIP) fpUpdateOpts := &floatingips.UpdateOpts{ PortID: &lb.VipPortID, } @@ -90,8 +89,8 @@ func (s *Service) ReconcileLoadBalancer(clusterName string, clusterProviderSpec } // lb listener - portList := []int{clusterProviderSpec.APIServerLoadBalancerPort} - portList = append(portList, clusterProviderSpec.APIServerLoadBalancerAdditionalPorts...) + portList := []int{openStackCluster.Spec.APIServerLoadBalancerPort} + portList = append(portList, openStackCluster.Spec.APIServerLoadBalancerAdditionalPorts...) for _, port := range portList { lbPortObjectsName := fmt.Sprintf("%s-%d", loadBalancerName, port) @@ -170,7 +169,7 @@ func (s *Service) ReconcileLoadBalancer(clusterName string, clusterProviderSpec } } - clusterProviderStatus.Network.APIServerLoadBalancer = &providerv1.LoadBalancer{ + openStackCluster.Status.Network.APIServerLoadBalancer = &infrav1.LoadBalancer{ Name: lb.Name, ID: lb.ID, InternalIP: lb.VipAddress, @@ -179,43 +178,39 @@ func (s *Service) ReconcileLoadBalancer(clusterName string, clusterProviderSpec return nil } -func (s *Service) ReconcileLoadBalancerMember(clusterName string, machine *clusterv1.Machine, clusterProviderSpec *providerv1.OpenstackClusterProviderSpec, clusterProviderStatus *providerv1.OpenstackClusterProviderStatus) error { - +func (s *Service) ReconcileLoadBalancerMember(clusterName string, machine *v1alpha2.Machine, openStackMachine *infrav1.OpenStackMachine, openStackCluster *infrav1.OpenStackCluster, ip string) error { if !util.IsControlPlaneMachine(machine) { return nil } - if clusterProviderStatus.Network == nil { - return errors.New("network is not yet available in clusterProviderStatus") + if openStackCluster.Status.Network == nil { + return errors.New("network is not yet available in openStackCluster.Status") } - if clusterProviderStatus.Network.Subnet == nil { - return errors.New("network.Subnet is not yet available in clusterProviderStatus") + if openStackCluster.Status.Network.Subnet == nil { + return errors.New("network.Subnet is not yet available in openStackCluster.Status") } - if clusterProviderStatus.Network.APIServerLoadBalancer == nil { - return errors.New("network.APIServerLoadBalancer is not yet available in clusterProviderStatus") + if openStackCluster.Status.Network.APIServerLoadBalancer == nil { + return errors.New("network.APIServerLoadBalancer is not yet available in openStackCluster.Status") } loadBalancerName := fmt.Sprintf("%s-cluster-%s-%s", networkPrefix, clusterName, kubeapiLBSuffix) - klog.Infof("Reconciling loadbalancer %s for member %s", loadBalancerName, machine.Name) + klog.Infof("Reconciling loadbalancer %s for member %s", loadBalancerName, openStackMachine.Name) - lbID := clusterProviderStatus.Network.APIServerLoadBalancer.ID - subnetID := clusterProviderStatus.Network.Subnet.ID + lbID := openStackCluster.Status.Network.APIServerLoadBalancer.ID + subnetID := openStackCluster.Status.Network.Subnet.ID - portList := []int{clusterProviderSpec.APIServerLoadBalancerPort} - portList = append(portList, clusterProviderSpec.APIServerLoadBalancerAdditionalPorts...) + portList := []int{openStackCluster.Spec.APIServerLoadBalancerPort} + portList = append(portList, openStackCluster.Spec.APIServerLoadBalancerAdditionalPorts...) for _, port := range portList { lbPortObjectsName := fmt.Sprintf("%s-%d", loadBalancerName, port) - name := lbPortObjectsName + "-" + machine.Name + name := lbPortObjectsName + "-" + openStackMachine.Name pool, err := checkIfPoolExists(s.loadbalancerClient, lbPortObjectsName) if err != nil { return err } - - ip, ok := machine.ObjectMeta.Annotations[constants.OpenstackIPAnnotationKey] - if !ok { - klog.Infof("no ip found yet on annotation %s on machine %s", constants.OpenstackIPAnnotationKey, machine.Name) - return nil + if pool == nil { + return errors.New("loadbalancer pool does not exist yet") } lbMember, err := checkIfLbMemberExists(s.loadbalancerClient, pool.ID, name) @@ -273,7 +268,7 @@ func (s *Service) ReconcileLoadBalancerMember(clusterName string, machine *clust return nil } -func (s *Service) DeleteLoadBalancer(clusterName string, clusterProviderSpec *providerv1.OpenstackClusterProviderSpec, clusterProviderStatus *providerv1.OpenstackClusterProviderStatus) error { +func (s *Service) DeleteLoadBalancer(clusterName string, openStackCluster *infrav1.OpenStackCluster) error { loadBalancerName := fmt.Sprintf("%s-cluster-%s-%s", networkPrefix, clusterName, kubeapiLBSuffix) lb, err := checkIfLbExists(s.loadbalancerClient, loadBalancerName) if err != nil { @@ -294,22 +289,22 @@ func (s *Service) DeleteLoadBalancer(clusterName string, clusterProviderSpec *pr return nil } -func (s *Service) DeleteLoadBalancerMember(clusterName string, machine *clusterv1.Machine, clusterProviderSpec *providerv1.OpenstackClusterProviderSpec, clusterProviderStatus *providerv1.OpenstackClusterProviderStatus) error { +func (s *Service) DeleteLoadBalancerMember(clusterName string, machine *v1alpha2.Machine, openStackMachine *infrav1.OpenStackMachine, openStackCluster *infrav1.OpenStackCluster) error { - if machine == nil || !util.IsControlPlaneMachine(machine) { + if openStackMachine == nil || !util.IsControlPlaneMachine(machine) { return nil } loadBalancerName := fmt.Sprintf("%s-cluster-%s-%s", networkPrefix, clusterName, kubeapiLBSuffix) klog.Infof("Reconciling loadbalancer %s", loadBalancerName) - lbID := clusterProviderStatus.Network.APIServerLoadBalancer.ID + lbID := openStackCluster.Status.Network.APIServerLoadBalancer.ID - portList := []int{clusterProviderSpec.APIServerLoadBalancerPort} - portList = append(portList, clusterProviderSpec.APIServerLoadBalancerAdditionalPorts...) + portList := []int{openStackCluster.Spec.APIServerLoadBalancerPort} + portList = append(portList, openStackCluster.Spec.APIServerLoadBalancerAdditionalPorts...) for _, port := range portList { lbPortObjectsName := fmt.Sprintf("%s-%d", loadBalancerName, port) - name := lbPortObjectsName + "-" + machine.Name + name := lbPortObjectsName + "-" + openStackMachine.Name pool, err := checkIfPoolExists(s.loadbalancerClient, lbPortObjectsName) if err != nil { diff --git a/pkg/cloud/openstack/services/networking/floatingip.go b/pkg/cloud/openstack/services/networking/floatingip.go index 757d478346..b3e11e1614 100644 --- a/pkg/cloud/openstack/services/networking/floatingip.go +++ b/pkg/cloud/openstack/services/networking/floatingip.go @@ -4,13 +4,11 @@ import ( "fmt" "github.com/gophercloud/gophercloud" "github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/layer3/floatingips" - "k8s.io/apimachinery/pkg/util/wait" "k8s.io/klog" - providerv1 "sigs.k8s.io/cluster-api-provider-openstack/pkg/apis/openstackproviderconfig/v1alpha1" - "time" + infrav1 "sigs.k8s.io/cluster-api-provider-openstack/api/v1alpha2" ) -func (s *Service) GetOrCreateFloatingIP(clusterProviderSpec *providerv1.OpenstackClusterProviderSpec, ip string) error { +func (s *Service) GetOrCreateFloatingIP(openStackCluster *infrav1.OpenStackCluster, ip string) error { fp, err := checkIfFloatingIPExists(s.client, ip) if err != nil { return err @@ -19,7 +17,7 @@ func (s *Service) GetOrCreateFloatingIP(clusterProviderSpec *providerv1.Openstac klog.Infof("Creating floating ip %s", ip) fpCreateOpts := &floatingips.CreateOpts{ FloatingIP: ip, - FloatingNetworkID: clusterProviderSpec.ExternalNetworkID, + FloatingNetworkID: openStackCluster.Spec.ExternalNetworkID, } fp, err = floatingips.Create(s.client, fpCreateOpts).Extract() if err != nil { @@ -43,21 +41,3 @@ func checkIfFloatingIPExists(client *gophercloud.ServiceClient, ip string) (*flo } return &fpList[0], nil } - -var backoff = wait.Backoff{ - Steps: 10, - Duration: 30 * time.Second, - Factor: 1.0, - Jitter: 0.1, -} - -func waitForFloatingIP(client *gophercloud.ServiceClient, id, target string) error { - klog.Infof("Waiting for floatingip %s to become %s.", id, target) - return wait.ExponentialBackoff(backoff, func() (bool, error) { - fp, err := floatingips.Get(client, id).Extract() - if err != nil { - return false, err - } - return fp.Status == target, nil - }) -} diff --git a/pkg/cloud/openstack/services/networking/network.go b/pkg/cloud/openstack/services/networking/network.go index 20078725d1..d93326c568 100644 --- a/pkg/cloud/openstack/services/networking/network.go +++ b/pkg/cloud/openstack/services/networking/network.go @@ -24,7 +24,7 @@ import ( "github.com/gophercloud/gophercloud/openstack/networking/v2/subnets" "github.com/pkg/errors" "k8s.io/klog" - openstackconfigv1 "sigs.k8s.io/cluster-api-provider-openstack/pkg/apis/openstackproviderconfig/v1alpha1" + infrav1 "sigs.k8s.io/cluster-api-provider-openstack/api/v1alpha2" ) type createOpts struct { @@ -37,7 +37,7 @@ func (c createOpts) ToNetworkCreateMap() (map[string]interface{}, error) { return gophercloud.BuildRequestBody(c, "network") } -func (s *Service) ReconcileNetwork(clusterName string, clusterProviderSpec *openstackconfigv1.OpenstackClusterProviderSpec, clusterProviderStatus *openstackconfigv1.OpenstackClusterProviderStatus) error { +func (s *Service) ReconcileNetwork(clusterName string, openStackCluster *infrav1.OpenStackCluster) error { networkName := fmt.Sprintf("%s-cluster-%s", networkPrefix, clusterName) klog.Infof("Reconciling network %s", networkName) @@ -49,7 +49,7 @@ func (s *Service) ReconcileNetwork(clusterName string, clusterProviderSpec *open if res.ID != "" { // Network exists - clusterProviderStatus.Network = &openstackconfigv1.Network{ + openStackCluster.Status.Network = &infrav1.Network{ ID: res.ID, Name: res.Name, } @@ -57,7 +57,7 @@ func (s *Service) ReconcileNetwork(clusterName string, clusterProviderSpec *open } var portSecurityEnabled gophercloud.EnabledState - if clusterProviderSpec.DisablePortSecurity { + if openStackCluster.Spec.DisablePortSecurity { portSecurityEnabled = gophercloud.Disabled } else { portSecurityEnabled = gophercloud.Enabled @@ -81,16 +81,16 @@ func (s *Service) ReconcileNetwork(clusterName string, clusterProviderSpec *open return err } - clusterProviderStatus.Network = &openstackconfigv1.Network{ + openStackCluster.Status.Network = &infrav1.Network{ ID: network.ID, Name: network.Name, } return nil } -func (s *Service) ReconcileSubnet(clusterName string, clusterProviderSpec *openstackconfigv1.OpenstackClusterProviderSpec, clusterProviderStatus *openstackconfigv1.OpenstackClusterProviderStatus) error { +func (s *Service) ReconcileSubnet(clusterName string, openStackCluster *infrav1.OpenStackCluster) error { - if clusterProviderStatus.Network == nil || clusterProviderStatus.Network.ID == "" { + if openStackCluster.Status.Network == nil || openStackCluster.Status.Network.ID == "" { klog.V(4).Infof("No need to reconcile network components since no network exists.") return nil } @@ -99,8 +99,8 @@ func (s *Service) ReconcileSubnet(clusterName string, clusterProviderSpec *opens klog.Infof("Reconciling subnet %s", subnetName) allPages, err := subnets.List(s.client, subnets.ListOpts{ - NetworkID: clusterProviderStatus.Network.ID, - CIDR: clusterProviderSpec.NodeCIDR, + NetworkID: openStackCluster.Status.Network.ID, + CIDR: openStackCluster.Spec.NodeCIDR, }).AllPages() if err != nil { return err @@ -111,32 +111,32 @@ func (s *Service) ReconcileSubnet(clusterName string, clusterProviderSpec *opens return err } - var observedSubnet openstackconfigv1.Subnet + var observedSubnet infrav1.Subnet if len(subnetList) > 1 { // Not panicing here, because every other cluster might work. - return fmt.Errorf("found more than 1 network with the expected name (%d) and CIDR (%s), which should not be able to exist in OpenStack", len(subnetList), clusterProviderSpec.NodeCIDR) + return fmt.Errorf("found more than 1 network with the expected name (%d) and CIDR (%s), which should not be able to exist in OpenStack", len(subnetList), openStackCluster.Spec.NodeCIDR) } else if len(subnetList) == 0 { opts := subnets.CreateOpts{ - NetworkID: clusterProviderStatus.Network.ID, + NetworkID: openStackCluster.Status.Network.ID, Name: subnetName, IPVersion: 4, - CIDR: clusterProviderSpec.NodeCIDR, - DNSNameservers: clusterProviderSpec.DNSNameservers, + CIDR: openStackCluster.Spec.NodeCIDR, + DNSNameservers: openStackCluster.Spec.DNSNameservers, } newSubnet, err := subnets.Create(s.client, opts).Extract() if err != nil { return err } - observedSubnet = openstackconfigv1.Subnet{ + observedSubnet = infrav1.Subnet{ ID: newSubnet.ID, Name: newSubnet.Name, CIDR: newSubnet.CIDR, } } else if len(subnetList) == 1 { - observedSubnet = openstackconfigv1.Subnet{ + observedSubnet = infrav1.Subnet{ ID: subnetList[0].ID, Name: subnetList[0].Name, @@ -153,7 +153,7 @@ func (s *Service) ReconcileSubnet(clusterName string, clusterProviderSpec *opens return err } - clusterProviderStatus.Network.Subnet = &observedSubnet + openStackCluster.Status.Network.Subnet = &observedSubnet return nil } diff --git a/pkg/cloud/openstack/services/networking/router.go b/pkg/cloud/openstack/services/networking/router.go index a94b70bd69..32cbd84bcd 100644 --- a/pkg/cloud/openstack/services/networking/router.go +++ b/pkg/cloud/openstack/services/networking/router.go @@ -25,20 +25,20 @@ import ( "github.com/gophercloud/gophercloud/openstack/networking/v2/subnets" "github.com/gophercloud/gophercloud/pagination" "k8s.io/klog" - openstackconfigv1 "sigs.k8s.io/cluster-api-provider-openstack/pkg/apis/openstackproviderconfig/v1alpha1" + infrav1 "sigs.k8s.io/cluster-api-provider-openstack/api/v1alpha2" ) -func (s *Service) ReconcileRouter(clusterName string, clusterProviderSpec *openstackconfigv1.OpenstackClusterProviderSpec, clusterProviderStatus *openstackconfigv1.OpenstackClusterProviderStatus) error { +func (s *Service) ReconcileRouter(clusterName string, openStackCluster *infrav1.OpenStackCluster) error { - if clusterProviderStatus.Network == nil || clusterProviderStatus.Network.ID == "" { + if openStackCluster.Status.Network == nil || openStackCluster.Status.Network.ID == "" { klog.V(3).Infof("No need to reconcile router since no network exists.") return nil } - if clusterProviderStatus.Network.Subnet == nil || clusterProviderStatus.Network.Subnet.ID == "" { + if openStackCluster.Status.Network.Subnet == nil || openStackCluster.Status.Network.Subnet.ID == "" { klog.V(4).Infof("No need to reconcile router since no subnet exists.") return nil } - if clusterProviderSpec.ExternalNetworkID == "" { + if openStackCluster.Spec.ExternalNetworkID == "" { klog.V(3).Info("No need to create router, due to missing ExternalNetworkID.") return nil } @@ -66,9 +66,9 @@ func (s *Service) ReconcileRouter(clusterName string, clusterProviderSpec *opens // should be configured because at least in our environment // we can only set the routerIP via gateway update not during create // That's also the same way terraform provider OpenStack does it - if len(clusterProviderSpec.ExternalRouterIPs) == 0 { + if len(openStackCluster.Spec.ExternalRouterIPs) == 0 { opts.GatewayInfo = &routers.GatewayInfo{ - NetworkID: clusterProviderSpec.ExternalNetworkID, + NetworkID: openStackCluster.Spec.ExternalNetworkID, } } newRouter, err := routers.Create(s.client, opts).Extract() @@ -80,12 +80,12 @@ func (s *Service) ReconcileRouter(clusterName string, clusterProviderSpec *opens router = routerList[0] } - if len(clusterProviderSpec.ExternalRouterIPs) > 0 { + if len(openStackCluster.Spec.ExternalRouterIPs) > 0 { var updateOpts routers.UpdateOpts updateOpts.GatewayInfo = &routers.GatewayInfo{ - NetworkID: clusterProviderSpec.ExternalNetworkID, + NetworkID: openStackCluster.Spec.ExternalNetworkID, } - for _, externalRouterIP := range clusterProviderSpec.ExternalRouterIPs { + for _, externalRouterIP := range openStackCluster.Spec.ExternalRouterIPs { subnetID := externalRouterIP.Subnet.UUID if subnetID == "" { sopts := subnets.ListOpts(externalRouterIP.Subnet.Filter) @@ -110,7 +110,7 @@ func (s *Service) ReconcileRouter(clusterName string, clusterProviderSpec *opens } } - observedRouter := openstackconfigv1.Router{ + observedRouter := infrav1.Router{ Name: router.Name, ID: router.ID, } @@ -125,7 +125,7 @@ func (s *Service) ReconcileRouter(clusterName string, clusterProviderSpec *opens INTERFACE_LOOP: for _, iface := range routerInterfaces { for _, ip := range iface.FixedIPs { - if ip.SubnetID == clusterProviderStatus.Network.Subnet.ID { + if ip.SubnetID == openStackCluster.Status.Network.Subnet.ID { createInterface = false break INTERFACE_LOOP } @@ -134,9 +134,9 @@ INTERFACE_LOOP: // ... and create a router interface for our subnet. if createInterface { - klog.V(4).Infof("Creating RouterInterface on %s in subnet %s", router.ID, clusterProviderStatus.Network.Subnet.ID) + klog.V(4).Infof("Creating RouterInterface on %s in subnet %s", router.ID, openStackCluster.Status.Network.Subnet.ID) iface, err := routers.AddInterface(s.client, router.ID, routers.AddInterfaceOpts{ - SubnetID: clusterProviderStatus.Network.Subnet.ID, + SubnetID: openStackCluster.Status.Network.Subnet.ID, }).Extract() if err != nil { return fmt.Errorf("unable to create router interface: %v", err) @@ -154,7 +154,7 @@ INTERFACE_LOOP: } if observedRouter.ID != "" { - clusterProviderStatus.Network.Router = &observedRouter + openStackCluster.Status.Network.Router = &observedRouter } return nil } diff --git a/pkg/cloud/openstack/services/networking/securitygroups.go b/pkg/cloud/openstack/services/networking/securitygroups.go index 4aa245d8c8..badcd48d6b 100644 --- a/pkg/cloud/openstack/services/networking/securitygroups.go +++ b/pkg/cloud/openstack/services/networking/securitygroups.go @@ -22,8 +22,7 @@ import ( "github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/security/groups" "github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/security/rules" - - openstackconfigv1 "sigs.k8s.io/cluster-api-provider-openstack/pkg/apis/openstackproviderconfig/v1alpha1" + infrav1 "sigs.k8s.io/cluster-api-provider-openstack/api/v1alpha2" ) const ( @@ -32,7 +31,7 @@ const ( globalSuffix string = "all" ) -var defaultRules = []openstackconfigv1.SecurityGroupRule{ +var defaultRules = []infrav1.SecurityGroupRule{ { Direction: "egress", EtherType: "IPv4", @@ -52,17 +51,17 @@ var defaultRules = []openstackconfigv1.SecurityGroupRule{ } // Reconcile the security groups. -func (s *Service) ReconcileSecurityGroups(clusterName string, desired openstackconfigv1.OpenstackClusterProviderSpec, status *openstackconfigv1.OpenstackClusterProviderStatus) error { +func (s *Service) ReconcileSecurityGroups(clusterName string, openStackCluster *infrav1.OpenStackCluster) error { klog.Infof("Reconciling security groups for cluster %s", clusterName) - if !desired.ManagedSecurityGroups { + if !openStackCluster.Spec.ManagedSecurityGroups { klog.V(4).Infof("No need to reconcile security groups for cluster %s", clusterName) return nil } - desiredSecGroups := map[string]openstackconfigv1.SecurityGroup{ + desiredSecGroups := map[string]infrav1.SecurityGroup{ "controlplane": generateControlPlaneGroup(clusterName), "global": generateGlobalGroup(clusterName), } - observedSecGroups := make(map[string]*openstackconfigv1.SecurityGroup) + observedSecGroups := make(map[string]*infrav1.SecurityGroup) for k, desiredSecGroup := range desiredSecGroups { klog.Infof("Reconciling security group %s", desiredSecGroup.Name) @@ -92,13 +91,13 @@ func (s *Service) ReconcileSecurityGroups(clusterName string, desired openstackc observedSecGroups[k], err = s.createSecGroup(desiredSecGroup) } - status.ControlPlaneSecurityGroup = observedSecGroups["controlplane"] - status.GlobalSecurityGroup = observedSecGroups["global"] + openStackCluster.Status.ControlPlaneSecurityGroup = observedSecGroups["controlplane"] + openStackCluster.Status.GlobalSecurityGroup = observedSecGroups["global"] return nil } -func (s *Service) DeleteSecurityGroups(group *openstackconfigv1.SecurityGroup) error { +func (s *Service) DeleteSecurityGroups(group *infrav1.SecurityGroup) error { exists, err := s.exists(group.ID) if err != nil { return err @@ -127,15 +126,15 @@ func (s *Service) exists(groupID string) (bool, error) { return true, nil } -func generateControlPlaneGroup(clusterName string) openstackconfigv1.SecurityGroup { +func generateControlPlaneGroup(clusterName string) infrav1.SecurityGroup { secGroupName := fmt.Sprintf("%s-cluster-%s-secgroup-%s", secGroupPrefix, clusterName, controlPlaneSuffix) // Hardcoded rules for now, we might want to make this definable in the Spec but it's more // likely that the infrastructure plan in cluster-api will have taken form by then. - return openstackconfigv1.SecurityGroup{ + return infrav1.SecurityGroup{ Name: secGroupName, Rules: append( - []openstackconfigv1.SecurityGroupRule{ + []infrav1.SecurityGroupRule{ { Direction: "ingress", EtherType: "IPv4", @@ -158,14 +157,14 @@ func generateControlPlaneGroup(clusterName string) openstackconfigv1.SecurityGro } } -func generateGlobalGroup(clusterName string) openstackconfigv1.SecurityGroup { +func generateGlobalGroup(clusterName string) infrav1.SecurityGroup { secGroupName := fmt.Sprintf("%s-cluster-%s-secgroup-%s", secGroupPrefix, clusterName, globalSuffix) // As above, hardcoded rules. - return openstackconfigv1.SecurityGroup{ + return infrav1.SecurityGroup{ Name: secGroupName, Rules: append( - []openstackconfigv1.SecurityGroupRule{ + []infrav1.SecurityGroupRule{ { Direction: "ingress", EtherType: "IPv4", @@ -197,7 +196,7 @@ func generateGlobalGroup(clusterName string) openstackconfigv1.SecurityGroup { } // matchGroups will check if security groups match. -func matchGroups(desired, observed *openstackconfigv1.SecurityGroup) bool { +func matchGroups(desired, observed *infrav1.SecurityGroup) bool { // If they have differing amount of rules they obviously don't match. if len(desired.Rules) != len(observed.Rules) { return false @@ -226,16 +225,16 @@ func matchGroups(desired, observed *openstackconfigv1.SecurityGroup) bool { // reconcileGroup reconciles an already existing observed group by essentially emptying out all the rules and // recreating them. -func (s *Service) reconcileGroup(desired, observed *openstackconfigv1.SecurityGroup) (*openstackconfigv1.SecurityGroup, error) { +func (s *Service) reconcileGroup(desired, observed *infrav1.SecurityGroup) (*infrav1.SecurityGroup, error) { klog.V(6).Infof("Deleting all rules for group %s", observed.Name) for _, rule := range observed.Rules { klog.V(6).Infof("Deleting rule %s from group %s", rule.ID, observed.Name) err := rules.Delete(s.client, rule.ID).ExtractErr() if err != nil { - return &openstackconfigv1.SecurityGroup{}, err + return &infrav1.SecurityGroup{}, err } } - recreatedRules := make([]openstackconfigv1.SecurityGroupRule, 0, len(desired.Rules)) + recreatedRules := make([]infrav1.SecurityGroupRule, 0, len(desired.Rules)) klog.V(6).Infof("Recreating all rules for group %s", observed.Name) for _, rule := range desired.Rules { r := rule @@ -245,7 +244,7 @@ func (s *Service) reconcileGroup(desired, observed *openstackconfigv1.SecurityGr } newRule, err := s.createRule(r) if err != nil { - return &openstackconfigv1.SecurityGroup{}, err + return &infrav1.SecurityGroup{}, err } recreatedRules = append(recreatedRules, newRule) } @@ -253,7 +252,7 @@ func (s *Service) reconcileGroup(desired, observed *openstackconfigv1.SecurityGr return observed, nil } -func (s *Service) createSecGroup(group openstackconfigv1.SecurityGroup) (*openstackconfigv1.SecurityGroup, error) { +func (s *Service) createSecGroup(group infrav1.SecurityGroup) (*infrav1.SecurityGroup, error) { createOpts := groups.CreateOpts{ Name: group.Name, Description: "Cluster API managed group", @@ -261,11 +260,11 @@ func (s *Service) createSecGroup(group openstackconfigv1.SecurityGroup) (*openst klog.V(6).Infof("Creating group %+v", createOpts) g, err := groups.Create(s.client, createOpts).Extract() if err != nil { - return &openstackconfigv1.SecurityGroup{}, err + return &infrav1.SecurityGroup{}, err } newGroup := convertOSSecGroupToConfigSecGroup(*g) - securityGroupRules := make([]openstackconfigv1.SecurityGroupRule, 0, len(group.Rules)) + securityGroupRules := make([]infrav1.SecurityGroupRule, 0, len(group.Rules)) klog.V(6).Infof("Creating rules for group %s", group.Name) for _, rule := range group.Rules { r := rule @@ -275,7 +274,7 @@ func (s *Service) createSecGroup(group openstackconfigv1.SecurityGroup) (*openst } newRule, err := s.createRule(r) if err != nil { - return &openstackconfigv1.SecurityGroup{}, err + return &infrav1.SecurityGroup{}, err } securityGroupRules = append(securityGroupRules, newRule) } @@ -284,7 +283,7 @@ func (s *Service) createSecGroup(group openstackconfigv1.SecurityGroup) (*openst return newGroup, nil } -func (s *Service) getSecurityGroupByName(name string) (*openstackconfigv1.SecurityGroup, error) { +func (s *Service) getSecurityGroupByName(name string) (*infrav1.SecurityGroup, error) { opts := groups.ListOpts{ Name: name, } @@ -292,25 +291,25 @@ func (s *Service) getSecurityGroupByName(name string) (*openstackconfigv1.Securi klog.V(6).Infof("Attempting to fetch security group with name %s", name) allPages, err := groups.List(s.client, opts).AllPages() if err != nil { - return &openstackconfigv1.SecurityGroup{}, err + return &infrav1.SecurityGroup{}, err } allGroups, err := groups.ExtractGroups(allPages) if err != nil { - return &openstackconfigv1.SecurityGroup{}, err + return &infrav1.SecurityGroup{}, err } switch len(allGroups) { case 0: - return &openstackconfigv1.SecurityGroup{}, nil + return &infrav1.SecurityGroup{}, nil case 1: return convertOSSecGroupToConfigSecGroup(allGroups[0]), nil } - return &openstackconfigv1.SecurityGroup{}, fmt.Errorf("More than one security group found named: %s", name) + return &infrav1.SecurityGroup{}, fmt.Errorf("more than one security group found named: %s", name) } -func (s *Service) createRule(r openstackconfigv1.SecurityGroupRule) (openstackconfigv1.SecurityGroupRule, error) { +func (s *Service) createRule(r infrav1.SecurityGroupRule) (infrav1.SecurityGroupRule, error) { dir := rules.RuleDirection(r.Direction) proto := rules.RuleProtocol(r.Protocol) etherType := rules.RuleEtherType(r.EtherType) @@ -328,25 +327,25 @@ func (s *Service) createRule(r openstackconfigv1.SecurityGroupRule) (openstackco klog.V(6).Infof("Creating rule %+v", createOpts) rule, err := rules.Create(s.client, createOpts).Extract() if err != nil { - return openstackconfigv1.SecurityGroupRule{}, err + return infrav1.SecurityGroupRule{}, err } return convertOSSecGroupRuleToConfigSecGroupRule(*rule), nil } -func convertOSSecGroupToConfigSecGroup(osSecGroup groups.SecGroup) *openstackconfigv1.SecurityGroup { - securityGroupRules := make([]openstackconfigv1.SecurityGroupRule, len(osSecGroup.Rules)) +func convertOSSecGroupToConfigSecGroup(osSecGroup groups.SecGroup) *infrav1.SecurityGroup { + securityGroupRules := make([]infrav1.SecurityGroupRule, len(osSecGroup.Rules)) for i, rule := range osSecGroup.Rules { securityGroupRules[i] = convertOSSecGroupRuleToConfigSecGroupRule(rule) } - return &openstackconfigv1.SecurityGroup{ + return &infrav1.SecurityGroup{ ID: osSecGroup.ID, Name: osSecGroup.Name, Rules: securityGroupRules, } } -func convertOSSecGroupRuleToConfigSecGroupRule(osSecGroupRule rules.SecGroupRule) openstackconfigv1.SecurityGroupRule { - return openstackconfigv1.SecurityGroupRule{ +func convertOSSecGroupRuleToConfigSecGroupRule(osSecGroupRule rules.SecGroupRule) infrav1.SecurityGroupRule { + return infrav1.SecurityGroupRule{ ID: osSecGroupRule.ID, Direction: osSecGroupRule.Direction, EtherType: osSecGroupRule.EtherType, diff --git a/pkg/cloud/openstack/services/provider/provider.go b/pkg/cloud/openstack/services/provider/provider.go index b1f8eddd15..799bb6357c 100644 --- a/pkg/cloud/openstack/services/provider/provider.go +++ b/pkg/cloud/openstack/services/provider/provider.go @@ -1,19 +1,19 @@ package provider import ( + "context" "crypto/tls" "crypto/x509" "fmt" "github.com/gophercloud/gophercloud" "github.com/gophercloud/gophercloud/openstack" "github.com/gophercloud/utils/openstack/clientconfig" - "github.com/pkg/errors" "gopkg.in/yaml.v2" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/client-go/kubernetes" + v1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/types" "net/http" - providerv1 "sigs.k8s.io/cluster-api-provider-openstack/pkg/apis/openstackproviderconfig/v1alpha1" - clusterv1 "sigs.k8s.io/cluster-api/pkg/apis/cluster/v1alpha1" + infrav1 "sigs.k8s.io/cluster-api-provider-openstack/api/v1alpha2" + "sigs.k8s.io/controller-runtime/pkg/client" ) const ( @@ -21,20 +21,17 @@ const ( CaSecretKey = "cacert" ) -func NewClientFromMachine(kubeClient kubernetes.Interface, machine *clusterv1.Machine) (*gophercloud.ProviderClient, *clientconfig.ClientOpts, error) { - machineProviderSpec, err := providerv1.MachineSpecFromProviderSpec(machine.Spec.ProviderSpec) - if err != nil { - return nil, nil, errors.Errorf("failed to load machine provider spec: %v", err) - } +func NewClientFromMachine(ctrlClient client.Client, openStackMachine *infrav1.OpenStackMachine) (*gophercloud.ProviderClient, *clientconfig.ClientOpts, error) { var cloud clientconfig.Cloud var caCert []byte - if machineProviderSpec.CloudsSecret != nil && machineProviderSpec.CloudsSecret.Name != "" { - namespace := machineProviderSpec.CloudsSecret.Namespace + if openStackMachine.Spec.CloudsSecret != nil && openStackMachine.Spec.CloudsSecret.Name != "" { + namespace := openStackMachine.Spec.CloudsSecret.Namespace if namespace == "" { - namespace = machine.Namespace + namespace = openStackMachine.Namespace } - cloud, caCert, err = getCloudFromSecret(kubeClient, namespace, machineProviderSpec.CloudsSecret.Name, machineProviderSpec.CloudName) + var err error + cloud, caCert, err = getCloudFromSecret(ctrlClient, namespace, openStackMachine.Spec.CloudsSecret.Name, openStackMachine.Spec.CloudName) if err != nil { return nil, nil, err } @@ -42,20 +39,17 @@ func NewClientFromMachine(kubeClient kubernetes.Interface, machine *clusterv1.Ma return newClient(cloud, caCert) } -func NewClientFromCluster(kubeClient kubernetes.Interface, cluster *clusterv1.Cluster) (*gophercloud.ProviderClient, *clientconfig.ClientOpts, error) { - clusterProviderSpec, err := providerv1.ClusterSpecFromProviderSpec(cluster.Spec.ProviderSpec) - if err != nil { - return nil, nil, errors.Errorf("failed to load cluster provider spec: %v", err) - } +func NewClientFromCluster(ctrlClient client.Client, openStackCluster *infrav1.OpenStackCluster) (*gophercloud.ProviderClient, *clientconfig.ClientOpts, error) { var cloud clientconfig.Cloud var caCert []byte - if clusterProviderSpec.CloudsSecret != nil && clusterProviderSpec.CloudsSecret.Name != "" { - namespace := clusterProviderSpec.CloudsSecret.Namespace + if openStackCluster.Spec.CloudsSecret != nil && openStackCluster.Spec.CloudsSecret.Name != "" { + namespace := openStackCluster.Spec.CloudsSecret.Namespace if namespace == "" { - namespace = cluster.Namespace + namespace = openStackCluster.Namespace } - cloud, caCert, err = getCloudFromSecret(kubeClient, namespace, clusterProviderSpec.CloudsSecret.Name, clusterProviderSpec.CloudName) + var err error + cloud, caCert, err = getCloudFromSecret(ctrlClient, namespace, openStackCluster.Spec.CloudsSecret.Name, openStackCluster.Spec.CloudName) if err != nil { return nil, nil, err } @@ -105,7 +99,8 @@ func newClient(cloud clientconfig.Cloud, caCert []byte) (*gophercloud.ProviderCl } // getCloudFromSecret extract a Cloud from the given namespace:secretName -func getCloudFromSecret(kubeClient kubernetes.Interface, namespace string, secretName string, cloudName string) (clientconfig.Cloud, []byte, error) { +func getCloudFromSecret(ctrlClient client.Client, secretNamespace string, secretName string, cloudName string) (clientconfig.Cloud, []byte, error) { + ctx := context.TODO() emptyCloud := clientconfig.Cloud{} if secretName == "" { @@ -116,7 +111,11 @@ func getCloudFromSecret(kubeClient kubernetes.Interface, namespace string, secre return emptyCloud, nil, fmt.Errorf("secret name set to %v but no cloud was specified. Please set cloud_name in your machine spec", secretName) } - secret, err := kubeClient.CoreV1().Secrets(namespace).Get(secretName, metav1.GetOptions{}) + secret := &v1.Secret{} + err := ctrlClient.Get(ctx, types.NamespacedName{ + Namespace: secretNamespace, + Name: secretName, + }, secret) if err != nil { return emptyCloud, nil, err } diff --git a/pkg/cloud/openstack/services/userdata/kubeadm.go b/pkg/cloud/openstack/services/userdata/kubeadm.go index 85b940d666..354ae818b7 100644 --- a/pkg/cloud/openstack/services/userdata/kubeadm.go +++ b/pkg/cloud/openstack/services/userdata/kubeadm.go @@ -4,10 +4,10 @@ import ( "fmt" "k8s.io/klog" kubeadmv1beta1 "k8s.io/kubernetes/cmd/kubeadm/app/apis/kubeadm/v1beta1" - providerv1 "sigs.k8s.io/cluster-api-provider-openstack/pkg/apis/openstackproviderconfig/v1alpha1" + infrav1 "sigs.k8s.io/cluster-api-provider-openstack/api/v1alpha2" "sigs.k8s.io/cluster-api-provider-openstack/pkg/cloud/openstack/services/certificates" "sigs.k8s.io/cluster-api-provider-openstack/pkg/cloud/openstack/services/kubeadm" - clusterv1 "sigs.k8s.io/cluster-api/pkg/apis/cluster/v1alpha1" + clusterv1 "sigs.k8s.io/cluster-api/api/v1alpha2" ) const ( @@ -19,9 +19,9 @@ const ( nodeRole = "node-role.kubernetes.io/node=" ) -func generateKubeadmConfig(isControlPlane bool, bootstrapToken string, cluster *clusterv1.Cluster, machine *clusterv1.Machine, machineProviderSpec *providerv1.OpenstackProviderSpec, clusterProviderSpec *providerv1.OpenstackClusterProviderSpec) (string, error) { +func generateKubeadmConfig(isControlPlane bool, bootstrapToken string, machine *clusterv1.Machine, openStackMachine *infrav1.OpenStackMachine, cluster *clusterv1.Cluster, openStackCluster *infrav1.OpenStackCluster) (string, error) { - caCertHash, err := certificates.GenerateCertificateHash(clusterProviderSpec.CAKeyPair.Cert) + caCertHash, err := certificates.GenerateCertificateHash(openStackCluster.Spec.CAKeyPair.Cert) if err != nil { return "", err } @@ -34,16 +34,19 @@ func generateKubeadmConfig(isControlPlane bool, bootstrapToken string, cluster * if bootstrapToken == "" { klog.Info("Machine is the first control plane machine for the cluster") - if !clusterProviderSpec.CAKeyPair.HasCertAndKey() { + if !openStackCluster.Spec.CAKeyPair.HasCertAndKey() { return "", fmt.Errorf("failed to create controlplane kubeadm config, missing CAKeyPair") } - clusterConfigurationCopy := clusterProviderSpec.ClusterConfiguration.DeepCopy() + clusterConfigurationCopy := openStackCluster.Spec.ClusterConfiguration.DeepCopy() // Set default values. If they are not set, these two properties are added with an // empty string and the CoreOS ignition postprocesser fails with "could not find expected key" if clusterConfigurationCopy.CertificatesDir == "" { clusterConfigurationCopy.CertificatesDir = kubeadmv1beta1.DefaultCertificatesDir } + if clusterConfigurationCopy.ImageRepository == "" { + clusterConfigurationCopy.ImageRepository = kubeadmv1beta1.DefaultImageRepository + } if string(clusterConfigurationCopy.DNS.Type) == "" { clusterConfigurationCopy.DNS.Type = kubeadmv1beta1.CoreDNS } @@ -52,7 +55,7 @@ func generateKubeadmConfig(isControlPlane bool, bootstrapToken string, cluster * } kubeadm.SetClusterConfigurationOptions( clusterConfigurationCopy, - kubeadm.WithKubernetesVersion(machine.Spec.Versions.ControlPlane), + kubeadm.WithKubernetesVersion(*machine.Spec.Version), kubeadm.WithAPIServerCertificateSANs(localIPV4Lookup), kubeadm.WithAPIServerExtraArgs(map[string]string{"cloud-provider": cloudProvider}), kubeadm.WithAPIServerExtraArgs(map[string]string{"cloud-config": cloudProviderConfig}), @@ -77,7 +80,7 @@ func generateKubeadmConfig(isControlPlane bool, bootstrapToken string, cluster * }, }), kubeadm.WithClusterNetworkFromClusterNetworkingConfig(cluster.Spec.ClusterNetwork), - kubeadm.WithKubernetesVersion(machine.Spec.Versions.ControlPlane), + kubeadm.WithKubernetesVersion(*machine.Spec.Version), ) clusterConfigYAML, err := kubeadm.ConfigurationToYAML(clusterConfigurationCopy) if err != nil { @@ -85,17 +88,16 @@ func generateKubeadmConfig(isControlPlane bool, bootstrapToken string, cluster * } kubeadm.SetInitConfigurationOptions( - &machineProviderSpec.KubeadmConfiguration.Init, + &openStackMachine.Spec.KubeadmConfiguration.Init, kubeadm.WithNodeRegistrationOptions( kubeadm.NewNodeRegistration( - kubeadm.WithTaints(machine.Spec.Taints), kubeadm.WithKubeletExtraArgs(map[string]string{"cloud-provider": cloudProvider}), kubeadm.WithKubeletExtraArgs(map[string]string{"cloud-config": cloudProviderConfig}), ), ), kubeadm.WithInitLocalAPIEndpointAndPort(localIPV4Lookup, cluster.Status.APIEndpoints[0].Port), ) - initConfigYAML, err := kubeadm.ConfigurationToYAML(&machineProviderSpec.KubeadmConfiguration.Init) + initConfigYAML, err := kubeadm.ConfigurationToYAML(&openStackMachine.Spec.KubeadmConfiguration.Init) if err != nil { return "", err } @@ -104,7 +106,7 @@ func generateKubeadmConfig(isControlPlane bool, bootstrapToken string, cluster * } else { klog.Info("Allowing a machine to join the control plane") - joinConfigurationCopy := machineProviderSpec.KubeadmConfiguration.Join + joinConfigurationCopy := openStackMachine.Spec.KubeadmConfiguration.Join // Set default values. If they are not set, these two properties are added with an // empty string and the CoreOS ignition postprocesser fails with "could not find expected key" joinConfigurationCopy.CACertPath = kubeadmv1beta1.DefaultCACertPath @@ -112,7 +114,7 @@ func generateKubeadmConfig(isControlPlane bool, bootstrapToken string, cluster * &joinConfigurationCopy, kubeadm.WithBootstrapTokenDiscovery( kubeadm.NewBootstrapTokenDiscovery( - kubeadm.WithAPIServerEndpoint(clusterProviderSpec.ClusterConfiguration.ControlPlaneEndpoint), + kubeadm.WithAPIServerEndpoint(openStackCluster.Spec.ClusterConfiguration.ControlPlaneEndpoint), kubeadm.WithToken(bootstrapToken), kubeadm.WithCACertificateHash(caCertHash), ), @@ -120,7 +122,6 @@ func generateKubeadmConfig(isControlPlane bool, bootstrapToken string, cluster * kubeadm.WithTLSBootstrapToken(bootstrapToken), kubeadm.WithJoinNodeRegistrationOptions( kubeadm.NewNodeRegistration( - kubeadm.WithTaints(machine.Spec.Taints), kubeadm.WithKubeletExtraArgs(map[string]string{"cloud-provider": cloudProvider}), kubeadm.WithKubeletExtraArgs(map[string]string{"cloud-config": cloudProviderConfig}), ), @@ -138,7 +139,7 @@ func generateKubeadmConfig(isControlPlane bool, bootstrapToken string, cluster * } else { klog.Info("Joining a worker node to the cluster") - joinConfigurationCopy := machineProviderSpec.KubeadmConfiguration.Join + joinConfigurationCopy := openStackMachine.Spec.KubeadmConfiguration.Join // Set default values. If they are not set, these two properties are added with an // empty string and the CoreOS ignition postprocesser fails with "could not find expected key" joinConfigurationCopy.CACertPath = kubeadmv1beta1.DefaultCACertPath @@ -146,7 +147,7 @@ func generateKubeadmConfig(isControlPlane bool, bootstrapToken string, cluster * &joinConfigurationCopy, kubeadm.WithBootstrapTokenDiscovery( kubeadm.NewBootstrapTokenDiscovery( - kubeadm.WithAPIServerEndpoint(clusterProviderSpec.ClusterConfiguration.ControlPlaneEndpoint), + kubeadm.WithAPIServerEndpoint(openStackCluster.Spec.ClusterConfiguration.ControlPlaneEndpoint), kubeadm.WithToken(bootstrapToken), kubeadm.WithCACertificateHash(caCertHash), ), @@ -154,7 +155,6 @@ func generateKubeadmConfig(isControlPlane bool, bootstrapToken string, cluster * kubeadm.WithTLSBootstrapToken(bootstrapToken), kubeadm.WithJoinNodeRegistrationOptions( kubeadm.NewNodeRegistration( - kubeadm.WithTaints(machine.Spec.Taints), kubeadm.WithKubeletExtraArgs(map[string]string{"cloud-provider": cloudProvider}), kubeadm.WithKubeletExtraArgs(map[string]string{"cloud-config": cloudProviderConfig}), kubeadm.WithKubeletExtraArgs(map[string]string{"node-labels": nodeRole}), diff --git a/pkg/cloud/openstack/services/userdata/machinescript.go b/pkg/cloud/openstack/services/userdata/machinescript.go index 6e49e5321e..e2e0eaac02 100644 --- a/pkg/cloud/openstack/services/userdata/machinescript.go +++ b/pkg/cloud/openstack/services/userdata/machinescript.go @@ -20,19 +20,19 @@ import ( "bytes" "context" "errors" + werrors "github.com/pkg/errors" "io/ioutil" - "k8s.io/client-go/kubernetes" + v1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/types" "k8s.io/client-go/tools/clientcmd" "k8s.io/klog" "path" - "sigs.k8s.io/cluster-api-provider-openstack/pkg/apis/openstackproviderconfig/v1alpha1" "sigs.k8s.io/cluster-api-provider-openstack/pkg/cloud/openstack/options" + "sigs.k8s.io/cluster-api-provider-openstack/pkg/cloud/openstack/services/certificates" "sigs.k8s.io/cluster-api-provider-openstack/pkg/cloud/openstack/services/compute" "sigs.k8s.io/cluster-api-provider-openstack/pkg/cloud/openstack/services/provider" - "sigs.k8s.io/cluster-api-provider-openstack/pkg/deployer" "sigs.k8s.io/cluster-api/pkg/util" "sigs.k8s.io/controller-runtime/pkg/client" - "sigs.k8s.io/controller-runtime/pkg/manager" "strings" "text/template" "time" @@ -41,12 +41,11 @@ import ( "encoding/json" clconfig "github.com/coreos/container-linux-config-transpiler/config" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" tokenapi "k8s.io/cluster-bootstrap/token/api" tokenutil "k8s.io/cluster-bootstrap/token/util" - providerv1 "sigs.k8s.io/cluster-api-provider-openstack/pkg/apis/openstackproviderconfig/v1alpha1" + infrav1 "sigs.k8s.io/cluster-api-provider-openstack/api/v1alpha2" "sigs.k8s.io/cluster-api-provider-openstack/pkg/bootstrap" - clusterv1 "sigs.k8s.io/cluster-api/pkg/apis/cluster/v1alpha1" + clusterv1 "sigs.k8s.io/cluster-api/api/v1alpha2" apierrors "sigs.k8s.io/cluster-api/pkg/errors" ) @@ -71,8 +70,8 @@ type setupParams struct { KubeadmConfig string } -func GetUserData(controllerClient client.Client, kubeClient kubernetes.Interface, machineProviderSpec *providerv1.OpenstackProviderSpec, cluster *clusterv1.Cluster, machine *clusterv1.Machine) (string, error) { - +func GetUserData(ctrlClient client.Client, machine *clusterv1.Machine, openStackMachine *infrav1.OpenStackMachine, cluster *clusterv1.Cluster, openStackCluster *infrav1.OpenStackCluster) (string, error) { + ctx := context.TODO() // get machine startup script var ok bool var disableTemplating bool @@ -81,24 +80,28 @@ func GetUserData(controllerClient client.Client, kubeClient kubernetes.Interface var err error var userData []byte - if machineProviderSpec.UserDataSecret != nil { - namespace := machineProviderSpec.UserDataSecret.Namespace + if openStackMachine.Spec.UserDataSecret != nil { + namespace := openStackMachine.Spec.UserDataSecret.Namespace if namespace == "" { namespace = machine.Namespace } - if machineProviderSpec.UserDataSecret.Name == "" { + if openStackMachine.Spec.UserDataSecret.Name == "" { return "", fmt.Errorf("UserDataSecret name must be provided") } - userDataSecret, err := kubeClient.CoreV1().Secrets(namespace).Get(machineProviderSpec.UserDataSecret.Name, metav1.GetOptions{}) + userDataSecret := &v1.Secret{} + err := ctrlClient.Get(ctx, types.NamespacedName{ + Namespace: namespace, + Name: openStackMachine.Spec.UserDataSecret.Name, + }, userDataSecret) if err != nil { return "", err } userData, ok = userDataSecret.Data[UserDataKey] if !ok { - return "", fmt.Errorf("machine's userdata secret %v in namespace %v did not contain key %v", machineProviderSpec.UserDataSecret.Name, namespace, UserDataKey) + return "", fmt.Errorf("machine's userdata secret %v in namespace %v did not contain key %v", openStackMachine.Spec.UserDataSecret.Name, namespace, UserDataKey) } _, disableTemplating = userDataSecret.Data[DisableTemplatingKey] @@ -120,7 +123,7 @@ func GetUserData(controllerClient client.Client, kubeClient kubernetes.Interface var userDataRendered string if len(userData) > 0 && !disableTemplating { - isNodeJoin, err := isNodeJoin(controllerClient, kubeClient, cluster, machine) + isNodeJoin, err := isNodeJoin(ctrlClient, openStackCluster, machine) if err != nil { return "", apierrors.CreateMachine("error creating Openstack instance: %v", err) } @@ -128,13 +131,13 @@ func GetUserData(controllerClient client.Client, kubeClient kubernetes.Interface var bootstrapToken string if isNodeJoin { klog.Info("Creating bootstrap token") - bootstrapToken, err = createBootstrapToken(controllerClient) + bootstrapToken, err = createBootstrapToken(openStackCluster) if err != nil { return "", apierrors.CreateMachine("error creating Openstack instance: %v", err) } } - userDataRendered, err = startupScript(cluster, machine, machineProviderSpec, string(userData), bootstrapToken) + userDataRendered, err = startupScript(machine, openStackMachine, cluster, openStackCluster, string(userData), bootstrapToken) if err != nil { return "", apierrors.CreateMachine("error creating Openstack instance: %v", err) } @@ -154,29 +157,29 @@ func GetUserData(controllerClient client.Client, kubeClient kubernetes.Interface case "ct": clCfg, ast, report := clconfig.Parse([]byte(userDataRendered)) if len(report.Entries) > 0 { - return "", fmt.Errorf("Postprocessor error: %s", report.String()) + return "", fmt.Errorf("postprocessor error: %s", report.String()) } ignCfg, report := clconfig.Convert(clCfg, "openstack-metadata", ast) if len(report.Entries) > 0 { - return "", fmt.Errorf("Postprocessor error: %s", report.String()) + return "", fmt.Errorf("postprocessor error: %s", report.String()) } ud, err := json.Marshal(&ignCfg) if err != nil { - return "", fmt.Errorf("Postprocessor error: %s", err) + return "", fmt.Errorf("postprocessor error: %s", err) } userDataRendered = string(ud) default: - return "", fmt.Errorf("Postprocessor error: unknown postprocessor: '%s'", postprocessor) + return "", fmt.Errorf("postprocessor error: unknown postprocessor: '%s'", postprocessor) } } return userDataRendered, nil } -func isNodeJoin(controllerClient client.Client, kubeClient kubernetes.Interface, cluster *clusterv1.Cluster, machine *clusterv1.Machine) (bool, error) { +func isNodeJoin(ctrlClient client.Client, openStackCluster *infrav1.OpenStackCluster, machine *clusterv1.Machine) (bool, error) { // Worker machines always join if !util.IsControlPlaneMachine(machine) { @@ -185,7 +188,7 @@ func isNodeJoin(controllerClient client.Client, kubeClient kubernetes.Interface, } // Get control plane machines and return false if none found - controlPlaneMachines, err := getControlPlaneMachines(controllerClient) + controlPlaneMachines, err := getControlPlaneMachines(ctrlClient) if err != nil { return false, apierrors.CreateMachine("error retrieving control plane machines: %v", err) } @@ -195,7 +198,7 @@ func isNodeJoin(controllerClient client.Client, kubeClient kubernetes.Interface, } // Get control plane machine instances and return false if none found - osProviderClient, clientOpts, err := provider.NewClientFromCluster(kubeClient, cluster) + osProviderClient, clientOpts, err := provider.NewClientFromCluster(ctrlClient, openStackCluster) if err != nil { return false, err } @@ -224,11 +227,10 @@ func isNodeJoin(controllerClient client.Client, kubeClient kubernetes.Interface, return false, nil } -func getControlPlaneMachines(controllerClient client.Client) ([]*clusterv1.Machine, error) { +func getControlPlaneMachines(ctrlClient client.Client) ([]*clusterv1.Machine, error) { var controlPlaneMachines []*clusterv1.Machine msList := &clusterv1.MachineList{} - listOptions := &client.ListOptions{} - err := controllerClient.List(context.TODO(), listOptions, msList) + err := ctrlClient.List(context.TODO(), msList) if err != nil { return nil, fmt.Errorf("error retrieving machines: %v", err) } @@ -242,7 +244,7 @@ func getControlPlaneMachines(controllerClient client.Client) ([]*clusterv1.Machi return controlPlaneMachines, nil } -func createBootstrapToken(controllerClient client.Client) (string, error) { +func createBootstrapToken(openStackCluster *infrav1.OpenStackCluster) (string, error) { token, err := tokenutil.GenerateBootstrapToken() if err != nil { return "", err @@ -254,7 +256,7 @@ func createBootstrapToken(controllerClient client.Client) (string, error) { panic(fmt.Sprintf("unable to create token. there might be a bug somwhere: %v", err)) } - kubeClient, err := getWorkloadClusterKubeClient(controllerClient) + kubeClient, err := getKubeClient(openStackCluster) if err != nil { return "", err } @@ -270,34 +272,8 @@ func createBootstrapToken(controllerClient client.Client) (string, error) { ), nil } -func getWorkloadClusterKubeClient(controllerClient client.Client) (client.Client, error) { - var cluster *clusterv1.Cluster - - controlPlaneMachines, err := getControlPlaneMachines(controllerClient) - if err != nil { - return nil, err - } - - if len(controlPlaneMachines) == 0 { - return nil, fmt.Errorf("could not find control plane nodes") - } - - clusterList := &clusterv1.ClusterList{} - listOptions := &client.ListOptions{} - err = controllerClient.List(context.TODO(), listOptions, clusterList) - if err != nil { - return nil, fmt.Errorf("error retrieving clusters: %v", err) - } - for _, c := range clusterList.Items { - if clusterName, ok := controlPlaneMachines[0].Labels[clusterv1.MachineClusterLabelName]; ok && clusterName == c.Name { - cluster = &c - } - } - if cluster == nil { - return nil, fmt.Errorf("could not find cluster") - } - - kubeConfig, err := deployer.New().GetKubeConfig(cluster, controlPlaneMachines[0]) +func getKubeClient(openStackCluster *infrav1.OpenStackCluster) (client.Client, error) { + kubeConfig, err := GetKubeConfig(openStackCluster) if err != nil { return nil, err } @@ -312,38 +288,33 @@ func getWorkloadClusterKubeClient(controllerClient client.Client) (client.Client return nil, err } - mgr, err := manager.New(cfg, manager.Options{}) + cl, err := client.New(cfg, client.Options{}) if err != nil { - return nil, fmt.Errorf("unable to create manager for restConfig: %v", err) + return nil, fmt.Errorf("unable to create client for restConfig: %v", err) } - return mgr.GetClient(), nil + return cl, nil } -func startupScript(cluster *clusterv1.Cluster, machine *clusterv1.Machine, machineProviderSpec *providerv1.OpenstackProviderSpec, userdata, bootstrapToken string) (string, error) { - clusterProviderSpec, err := providerv1.ClusterSpecFromProviderSpec(cluster.Spec.ProviderSpec) - if err != nil { - return "", err - } - - if err := validateCertificates(clusterProviderSpec); err != nil { +func startupScript(machine *clusterv1.Machine, openStackMachine *infrav1.OpenStackMachine, cluster *clusterv1.Cluster, openStackCluster *infrav1.OpenStackCluster, userdata, bootstrapToken string) (string, error) { + if err := validateCertificates(openStackCluster); err != nil { return "", err } - kubeadmConfig, err := generateKubeadmConfig(util.IsControlPlaneMachine(machine), bootstrapToken, cluster, machine, machineProviderSpec, clusterProviderSpec) + kubeadmConfig, err := generateKubeadmConfig(util.IsControlPlaneMachine(machine), bootstrapToken, machine, openStackMachine, cluster, openStackCluster) if err != nil { return "", err } params := setupParams{ - CACert: string(clusterProviderSpec.CAKeyPair.Cert), - CAKey: string(clusterProviderSpec.CAKeyPair.Key), - EtcdCACert: string(clusterProviderSpec.EtcdCAKeyPair.Cert), - EtcdCAKey: string(clusterProviderSpec.EtcdCAKeyPair.Key), - FrontProxyCACert: string(clusterProviderSpec.FrontProxyCAKeyPair.Cert), - FrontProxyCAKey: string(clusterProviderSpec.FrontProxyCAKeyPair.Key), - SaCert: string(clusterProviderSpec.SAKeyPair.Cert), - SaKey: string(clusterProviderSpec.SAKeyPair.Key), + CACert: string(openStackCluster.Spec.CAKeyPair.Cert), + CAKey: string(openStackCluster.Spec.CAKeyPair.Key), + EtcdCACert: string(openStackCluster.Spec.EtcdCAKeyPair.Cert), + EtcdCAKey: string(openStackCluster.Spec.EtcdCAKeyPair.Key), + FrontProxyCACert: string(openStackCluster.Spec.FrontProxyCAKeyPair.Cert), + FrontProxyCAKey: string(openStackCluster.Spec.FrontProxyCAKeyPair.Key), + SaCert: string(openStackCluster.Spec.SAKeyPair.Cert), + SaKey: string(openStackCluster.Spec.SAKeyPair.Key), Machine: machine, KubeadmConfig: kubeadmConfig, } @@ -361,21 +332,21 @@ func startupScript(cluster *clusterv1.Cluster, machine *clusterv1.Machine, machi return buf.String(), nil } -func validateCertificates(clusterProviderSpec *v1alpha1.OpenstackClusterProviderSpec) error { - if !isKeyPairValid(clusterProviderSpec.CAKeyPair.Cert, clusterProviderSpec.CAKeyPair.Key) { - return errors.New("CA cert material in the ClusterProviderSpec is missing cert/key") +func validateCertificates(openStackCluster *infrav1.OpenStackCluster) error { + if !isKeyPairValid(openStackCluster.Spec.CAKeyPair.Cert, openStackCluster.Spec.CAKeyPair.Key) { + return errors.New("CA cert material in the openStackCluster.Spec is missing cert/key") } - if !isKeyPairValid(clusterProviderSpec.EtcdCAKeyPair.Cert, clusterProviderSpec.EtcdCAKeyPair.Key) { - return errors.New("ETCD CA cert material in the ClusterProviderSpec is missing cert/key") + if !isKeyPairValid(openStackCluster.Spec.EtcdCAKeyPair.Cert, openStackCluster.Spec.EtcdCAKeyPair.Key) { + return errors.New("ETCD CA cert material in the openStackCluster.Spec is missing cert/key") } - if !isKeyPairValid(clusterProviderSpec.FrontProxyCAKeyPair.Cert, clusterProviderSpec.FrontProxyCAKeyPair.Key) { - return errors.New("FrontProxy CA cert material in ClusterProviderSpec is missing cert/key") + if !isKeyPairValid(openStackCluster.Spec.FrontProxyCAKeyPair.Cert, openStackCluster.Spec.FrontProxyCAKeyPair.Key) { + return errors.New("FrontProxy CA cert material in openStackCluster.Spec is missing cert/key") } - if !isKeyPairValid(clusterProviderSpec.SAKeyPair.Cert, clusterProviderSpec.SAKeyPair.Key) { - return errors.New("ServiceAccount cert material in ClusterProviderSpec is missing cert/key") + if !isKeyPairValid(openStackCluster.Spec.SAKeyPair.Cert, openStackCluster.Spec.SAKeyPair.Key) { + return errors.New("ServiceAccount cert material in openStackCluster.Spec is missing cert/key") } return nil } @@ -393,3 +364,33 @@ func templateYAMLIndent(i int, input string) string { ident := "\n" + strings.Repeat(" ", i) return strings.Repeat(" ", i) + strings.Join(split, ident) } + +// GetKubeConfig returns the kubeConfig after the bootstrap process is complete. +func GetKubeConfig(openStackCluster *infrav1.OpenStackCluster) (string, error) { + + cert, err := certificates.DecodeCertPEM(openStackCluster.Spec.CAKeyPair.Cert) + if err != nil { + return "", werrors.Wrap(err, "failed to decode CA Cert") + } else if cert == nil { + return "", errors.New("certificate not found in clusterProviderSpec") + } + + key, err := certificates.DecodePrivateKeyPEM(openStackCluster.Spec.CAKeyPair.Key) + if err != nil { + return "", werrors.Wrap(err, "failed to decode private key") + } else if key == nil { + return "", errors.New("key not found in clusterProviderSpec") + } + + cfg, err := certificates.NewKubeconfig(openStackCluster.Name, fmt.Sprintf("https://%s", openStackCluster.Spec.ClusterConfiguration.ControlPlaneEndpoint), cert, key) + if err != nil { + return "", werrors.Wrap(err, "failed to generate a kubeconfig") + } + + yaml, err := clientcmd.Write(*cfg) + if err != nil { + return "", werrors.Wrap(err, "failed to serialize config to yaml") + } + + return string(yaml), nil +} diff --git a/pkg/deployer/deployer.go b/pkg/deployer/deployer.go deleted file mode 100644 index efa3614100..0000000000 --- a/pkg/deployer/deployer.go +++ /dev/null @@ -1,70 +0,0 @@ -package deployer - -import ( - "fmt" - "github.com/pkg/errors" - "k8s.io/client-go/tools/clientcmd" - providerv1 "sigs.k8s.io/cluster-api-provider-openstack/pkg/apis/openstackproviderconfig/v1alpha1" - "sigs.k8s.io/cluster-api-provider-openstack/pkg/cloud/openstack/services/certificates" - clusterv1 "sigs.k8s.io/cluster-api/pkg/apis/cluster/v1alpha1" -) - -// Deployer satisfies the ProviderDeployer(https://github.com/kubernetes-sigs/cluster-api/blob/master/cmd/clusterctl/clusterdeployer/clusterdeployer.go) interface. -type Deployer struct { -} - -// New returns a new Deployer. -func New() *Deployer { - return &Deployer{} -} - -// GetIP returns the controlPlaneEndpoint of the Kubernetes cluster, which makes sense in the current -// contexts where this method is called. Seems to be going away anyway with v1alpha2. -func (d *Deployer) GetIP(cluster *clusterv1.Cluster, machine *clusterv1.Machine) (string, error) { - clusterProviderSpec, err := providerv1.ClusterSpecFromProviderSpec(cluster.Spec.ProviderSpec) - if err != nil { - return "", fmt.Errorf("could not get IP: %v", err) - } - return clusterProviderSpec.ClusterConfiguration.ControlPlaneEndpoint, nil -} - -// GetKubeConfig returns the kubeConfig after the bootstrap process is complete. -func (d *Deployer) GetKubeConfig(cluster *clusterv1.Cluster, master *clusterv1.Machine) (string, error) { - - // Load provider config. - clusterProviderSpec, err := providerv1.ClusterSpecFromProviderSpec(cluster.Spec.ProviderSpec) - if err != nil { - return "", errors.Errorf("failed to load cluster provider status: %v", err) - } - - cert, err := certificates.DecodeCertPEM(clusterProviderSpec.CAKeyPair.Cert) - if err != nil { - return "", errors.Wrap(err, "failed to decode CA Cert") - } else if cert == nil { - return "", errors.New("certificate not found in clusterProviderSpec") - } - - key, err := certificates.DecodePrivateKeyPEM(clusterProviderSpec.CAKeyPair.Key) - if err != nil { - return "", errors.Wrap(err, "failed to decode private key") - } else if key == nil { - return "", errors.New("key not found in clusterProviderSpec") - } - - apiServerEndpoint, err := d.GetIP(cluster, master) - if err != nil { - return "", err - } - - cfg, err := certificates.NewKubeconfig(cluster.Name, fmt.Sprintf("https://%s", apiServerEndpoint), cert, key) - if err != nil { - return "", errors.Wrap(err, "failed to generate a kubeconfig") - } - - yaml, err := clientcmd.Write(*cfg) - if err != nil { - return "", errors.Wrap(err, "failed to serialize config to yaml") - } - - return string(yaml), nil -} diff --git a/cmd/clusterctl/examples/openstack/.gitignore b/samples/.gitignore similarity index 100% rename from cmd/clusterctl/examples/openstack/.gitignore rename to samples/.gitignore diff --git a/cmd/clusterctl/examples/openstack/README.md b/samples/README.md similarity index 94% rename from cmd/clusterctl/examples/openstack/README.md rename to samples/README.md index c4c41aa04a..02e66b7089 100644 --- a/cmd/clusterctl/examples/openstack/README.md +++ b/samples/README.md @@ -10,10 +10,7 @@ -# Openstack Example Files -## Contents -- `*.yaml` - concrete example files that can be used as is. -- `*.yaml.template` - template example files that need values filled in before use. +# Openstack Samples ## Prerequisites diff --git a/cmd/clusterctl/examples/openstack/generate-yaml.sh b/samples/generate-yaml.sh similarity index 85% rename from cmd/clusterctl/examples/openstack/generate-yaml.sh rename to samples/generate-yaml.sh index 273f857dfa..136e4cc281 100755 --- a/cmd/clusterctl/examples/openstack/generate-yaml.sh +++ b/samples/generate-yaml.sh @@ -4,7 +4,7 @@ set -e # Function that prints out the help message, describing the script print_help() { - echo "$SCRIPT - generates a provider-configs.yaml file" + echo "$SCRIPT - generates a provider-specific YAML files" echo "" echo "Usage:" echo "$SCRIPT [options] [output folder]" @@ -115,9 +115,9 @@ fi # Define global variables PWD=$(cd `dirname $0`; pwd) TEMPLATES_PATH=${TEMPLATES_PATH:-$PWD/$SUPPORTED_PROVIDER_OS} -HOME_DIR=${PWD%%/cmd/clusterctl/examples/*} -CONFIG_DIR=$PWD/provider-component/clouds-secrets/configs -USERDATA=$PWD/provider-component/user-data +HOME_DIR=${PWD%%/samples/*} +CONFIG_DIR=$PWD/templates/clouds-secrets/configs +USERDATA=$PWD/templates/user-data MASTER_USER_DATA=$USERDATA/$PROVIDER_OS/templates/master-user-data.sh WORKER_USER_DATA=$USERDATA/$PROVIDER_OS/templates/worker-user-data.sh @@ -140,9 +140,8 @@ CACERT="/etc/certs/cacert" # Set up the output dir if it does not yet exist mkdir -p $PWD/$OUTPUT -cp -n $PWD/cluster.yaml $PWD/$OUTPUT/cluster.yaml || true -cp -n $PWD/machines.yaml.template $PWD/$OUTPUT/machines.yaml || true -cp -n $PWD/machine-deployment.yaml.template $PWD/$OUTPUT/machine-deployment.yaml || true +cp -n $PWD/templates/cluster.yaml $PWD/$OUTPUT/cluster.yaml || true +cp -n $PWD/templates/machines.yaml $PWD/$OUTPUT/machines.yaml || true # Make the config directory mkdir -p $CONFIG_DIR @@ -225,22 +224,13 @@ else echo "dummy" > $CONFIG_DIR/cacert fi -# Build provider-components.yaml with kustomize -# Coreos has a different kubeadm path (/usr is read-only) so gets a different kustomization. -if [[ "$PROVIDER_OS" == "coreos" ]]; then - kubectl kustomize $PWD/../../../../overlays-config/coreos > $PWD/$OUTPUT/provider-components.yaml -else - kubectl kustomize $PWD/../../../../overlays-config/generic > $PWD/$OUTPUT/provider-components.yaml -fi echo "---" >> $PWD/$OUTPUT/provider-components.yaml -kubectl kustomize $PWD/provider-component/clouds-secrets >> $PWD/$OUTPUT/provider-components.yaml +kubectl kustomize $PWD/templates/namespace > $PWD/$OUTPUT/provider-components.yaml echo "---" >> $PWD/$OUTPUT/provider-components.yaml -# latest kustomize don't allow include files out of build folder -cp -r $CONFIG_DIR/../../../../../../../vendor/sigs.k8s.io/cluster-api/config $PWD/provider-component/cluster-api echo "---" >> $PWD/$OUTPUT/provider-components.yaml -kubectl kustomize $PWD/provider-component/cluster-api >> $PWD/$OUTPUT/provider-components.yaml -rm -fr $PWD/provider-component/cluster-api/config +kubectl kustomize $PWD/templates/clouds-secrets >> $PWD/$OUTPUT/provider-components.yaml +echo "---" >> $PWD/$OUTPUT/provider-components.yaml echo "---" >> $PWD/$OUTPUT/provider-components.yaml kubectl kustomize $USERDATA/$PROVIDER_OS >> $PWD/$OUTPUT/provider-components.yaml diff --git a/samples/templates/clouds-secrets/.gitignore b/samples/templates/clouds-secrets/.gitignore new file mode 100644 index 0000000000..52e5937d23 --- /dev/null +++ b/samples/templates/clouds-secrets/.gitignore @@ -0,0 +1 @@ +configs \ No newline at end of file diff --git a/cmd/clusterctl/examples/openstack/provider-component/clouds-secrets/kustomization.yaml b/samples/templates/clouds-secrets/kustomization.yaml similarity index 89% rename from cmd/clusterctl/examples/openstack/provider-component/clouds-secrets/kustomization.yaml rename to samples/templates/clouds-secrets/kustomization.yaml index 25dd9b6edb..a971ef55b2 100644 --- a/cmd/clusterctl/examples/openstack/provider-component/clouds-secrets/kustomization.yaml +++ b/samples/templates/clouds-secrets/kustomization.yaml @@ -14,4 +14,4 @@ secretGenerator: - OS_CLOUD=configs/os_cloud.txt type: Opaque -namespace: openstack-provider-system +namespace: capo-system diff --git a/samples/templates/cluster.yaml b/samples/templates/cluster.yaml new file mode 100644 index 0000000000..6c06fa0fe1 --- /dev/null +++ b/samples/templates/cluster.yaml @@ -0,0 +1,40 @@ +apiVersion: cluster.x-k8s.io/v1alpha2 +kind: Cluster +metadata: + name: cluster001 + namespace: default +spec: + clusterNetwork: + services: + cidrBlocks: ["10.96.0.0/12"] + pods: + cidrBlocks: ["192.168.0.0/16"] + serviceDomain: "cluster.local" + infrastructureRef: + kind: OpenStackCluster + apiVersion: infrastructure.cluster.x-k8s.io/v1alpha2 + name: cluster001 + namespace: default +--- +apiVersion: infrastructure.cluster.x-k8s.io/v1alpha2 +kind: OpenStackCluster +metadata: + name: cluster001 + namespace: default +spec: + tags: + - a_cluster_wide_tag + clusterConfiguration: + controlPlaneEndpoint: :6443 + kubernetesVersion: 1.15.0 + imageRepository: k8s.gcr.io + certificatesDir: /etc/kubernetes/pki + dns: + type: "CoreDNS" + networking: + dnsDomain: "cluster.local" + podSubnet: "192.168.0.0/16" + serviceSubnet: "10.96.0.0/12" + etcd: + local: + dataDir: "/var/lib/etcd" diff --git a/samples/templates/machines.yaml b/samples/templates/machines.yaml new file mode 100644 index 0000000000..d6ec1761e4 --- /dev/null +++ b/samples/templates/machines.yaml @@ -0,0 +1,104 @@ +apiVersion: cluster.x-k8s.io/v1alpha2 +kind: Machine +metadata: + name: test-cluster-kube-master-01 + labels: + cluster.x-k8s.io/cluster-name: test-cluster + cluster.x-k8s.io/control-plane: "true" +spec: + bootstrap: + configRef: + kind: KubeadmConfig + apiVersion: bootstrap.cluster.x-k8s.io/v1alpha2 + namespace: default + name: test-cluster-config + infrastructureRef: + kind: OpenStackMachine + apiVersion: infrastructure.cluster.x-k8s.io/v1alpha2 + namespace: default + name: test-cluster-kube-master-01 + version: "v1.15.0" +--- +apiVersion: infrastructure.cluster.x-k8s.io/v1alpha2 +kind: OpenStackMachine +metadata: + name: test-cluster-kube-master-01 +spec: + flavor: m1.medium + image: + keyName: cluster-api-provider-openstack + availabilityZone: nova + networks: + - uuid: + floatingIP: + securityGroups: + - uuid: + userDataSecret: + name: master-user-data + namespace: openstack-provider-system + trunk: false + tags: + - a-machine-specific-tag + serverMetadata: + key: value +--- +apiVersion: bootstrap.cluster.x-k8s.io/v1alpha2 +kind: KubeadmConfig +metadata: + name: test-cluster-config +spec: + clusterConfiguration: + certificatesDir: /etc/kubernetes/pki + controlPlaneEndpoint: "53.48.112.86:6443" + dns: + type: "CoreDNS" + imageRepository: "k8s.gcr.io" + kubernetesVersion: v1.14.2 + networking: + dnsDomain: "" + podSubnet: "" + serviceSubnet: "" + etcd: {} +--- +apiVersion: cluster.x-k8s.io/v1alpha2 +kind: Machine +metadata: + name: test-cluster-openstack-node-01 + labels: + cluster.x-k8s.io/cluster-name: test-cluster +spec: + bootstrap: + configRef: + kind: KubeadmConfig + apiVersion: bootstrap.cluster.x-k8s.io/v1alpha2 + namespace: default + name: test-cluster-config + infrastructureRef: + kind: OpenStackMachine + apiVersion: infrastructure.cluster.x-k8s.io/v1alpha2 + namespace: default + name: test-cluster-openstack-node-01 + version: "v1.15.0" +--- +apiVersion: infrastructure.cluster.x-k8s.io/v1alpha2 +kind: OpenStackMachine +metadata: + name: test-cluster-openstack-node-01 +spec: + flavor: m1.medium + image: + keyName: cluster-api-provider-openstack + availabilityZone: nova + networks: + - uuid: + floatingIP: + securityGroups: + - uuid: + userDataSecret: + name: worker-user-data + namespace: openstack-provider-system + trunk: false + tags: + - a-machine-specific-tag + serverMetadata: + key: value diff --git a/samples/templates/namespace/kustomization.yaml b/samples/templates/namespace/kustomization.yaml new file mode 100644 index 0000000000..b0b5e49473 --- /dev/null +++ b/samples/templates/namespace/kustomization.yaml @@ -0,0 +1,2 @@ +resources: + - ./namespace.yaml \ No newline at end of file diff --git a/config/manager/namespace.yaml b/samples/templates/namespace/namespace.yaml similarity index 71% rename from config/manager/namespace.yaml rename to samples/templates/namespace/namespace.yaml index 807e3adb34..37b573104f 100644 --- a/config/manager/namespace.yaml +++ b/samples/templates/namespace/namespace.yaml @@ -3,4 +3,4 @@ kind: Namespace metadata: labels: controller-tools.k8s.io: "1.0" - name: openstack-provider-system + name: openstack-provider-system \ No newline at end of file diff --git a/cmd/clusterctl/examples/openstack/provider-component/user-data/centos/kustomization.yaml b/samples/templates/user-data/centos/kustomization.yaml similarity index 100% rename from cmd/clusterctl/examples/openstack/provider-component/user-data/centos/kustomization.yaml rename to samples/templates/user-data/centos/kustomization.yaml diff --git a/cmd/clusterctl/examples/openstack/provider-component/user-data/centos/templates/master-user-data.sh b/samples/templates/user-data/centos/templates/master-user-data.sh similarity index 98% rename from cmd/clusterctl/examples/openstack/provider-component/user-data/centos/templates/master-user-data.sh rename to samples/templates/user-data/centos/templates/master-user-data.sh index 9ca8e1da5c..559cc4e7d1 100644 --- a/cmd/clusterctl/examples/openstack/provider-component/user-data/centos/templates/master-user-data.sh +++ b/samples/templates/user-data/centos/templates/master-user-data.sh @@ -2,7 +2,7 @@ set -e set -x ( -KUBELET_VERSION={{ .Machine.Spec.Versions.Kubelet }} +KUBELET_VERSION={{ .Machine.Spec.Version }} NAMESPACE={{ .Machine.ObjectMeta.Namespace }} MACHINE=$NAMESPACE MACHINE+="/" diff --git a/cmd/clusterctl/examples/openstack/provider-component/user-data/centos/templates/worker-user-data.sh b/samples/templates/user-data/centos/templates/worker-user-data.sh similarity index 97% rename from cmd/clusterctl/examples/openstack/provider-component/user-data/centos/templates/worker-user-data.sh rename to samples/templates/user-data/centos/templates/worker-user-data.sh index eed672f19c..be8ddb7062 100644 --- a/cmd/clusterctl/examples/openstack/provider-component/user-data/centos/templates/worker-user-data.sh +++ b/samples/templates/user-data/centos/templates/worker-user-data.sh @@ -2,7 +2,7 @@ set -e set -x ( -KUBELET_VERSION={{ .Machine.Spec.Versions.Kubelet }} +KUBELET_VERSION={{ .Machine.Spec.Version }} NAMESPACE={{ .Machine.ObjectMeta.Namespace }} MACHINE=$NAMESPACE MACHINE+="/" diff --git a/cmd/clusterctl/examples/openstack/provider-component/user-data/coreos/kustomization.yaml b/samples/templates/user-data/coreos/kustomization.yaml similarity index 100% rename from cmd/clusterctl/examples/openstack/provider-component/user-data/coreos/kustomization.yaml rename to samples/templates/user-data/coreos/kustomization.yaml diff --git a/samples/templates/user-data/coreos/master-user-data.yaml b/samples/templates/user-data/coreos/master-user-data.yaml new file mode 100644 index 0000000000..c27aa3b83a --- /dev/null +++ b/samples/templates/user-data/coreos/master-user-data.yaml @@ -0,0 +1,353 @@ +locksmith: + reboot_strategy: "off" +storage: + directories: + - filesystem: root + group: + id: 0 + mode: 493 + path: /opt + user: + id: 0 + - filesystem: root + group: + id: 0 + mode: 493 + path: /opt/kubetmp + user: + id: 0 + - filesystem: root + group: + id: 0 + mode: 493 + path: /opt/bin + user: + id: 0 + - filesystem: root + group: + id: 0 + mode: 493 + path: /opt/cni + user: + id: 0 + - filesystem: root + group: + id: 0 + mode: 493 + path: /opt/cni/bin + user: + id: 0 + files: + - contents: + inline: | + {{ .Machine.ObjectMeta.Name }} + filesystem: root + group: + id: 0 + mode: 420 + path: /etc/hostname + user: + id: 0 + - contents: + remote: + url: https://github.com/containernetworking/plugins/releases/download/v0.7.5/cni-plugins-amd64-v0.7.5.tgz + filesystem: root + group: + id: 0 + mode: 420 + path: /opt/kubetmp/cni-plugins.tar.gz + user: + id: 0 + - contents: + remote: + url: https://github.com/kubernetes-sigs/cri-tools/releases/download/v1.12.0/crictl-v1.12.0-linux-amd64.tar.gz + filesystem: root + group: + id: 0 + mode: 420 + path: /opt/kubetmp/crictl.tar.gz + user: + id: 0 + - contents: + remote: + url: https://storage.googleapis.com/kubernetes-release/release/v{{.Machine.Spec.Version}}/bin/linux/amd64/kubeadm + filesystem: root + group: + id: 0 + mode: 493 + path: /opt/bin/kubeadm + user: + id: 0 + - contents: + remote: + url: https://storage.googleapis.com/kubernetes-release/release/v{{.Machine.Spec.Version}}/bin/linux/amd64/kubectl + filesystem: root + group: + id: 0 + mode: 493 + path: /opt/bin/kubectl + user: + id: 0 + - contents: + remote: + url: https://storage.googleapis.com/kubernetes-release/release/v{{.Machine.Spec.Version}}/bin/linux/amd64/kubelet + filesystem: root + group: + id: 0 + mode: 493 + path: /opt/bin/kubelet + user: + id: 0 + - contents: + inline: | + { + "exec-opts": ["native.cgroupdriver=systemd"], + "iptables": false, + "ip-masq": false + } + filesystem: root + group: + id: 0 + mode: 420 + path: /etc/docker/daemon.json + user: + id: 0 + - contents: + inline: |+ + [Global] + auth-url=https://os.detss.corpintra.net:5000/v3/ + username="E415_s_os1pi020" + password="KJFs!3CuCu%kjYuyfd87" + region="null" + tenant-id="b9abf84da3944b85a61cab7aecf97d77" + domain-name="null" + + filesystem: root + group: + id: 0 + mode: 384 + path: /etc/kubernetes/cloud.conf + user: + id: 0 + - contents: + inline: "" + filesystem: root + group: + id: 0 + mode: 384 + path: /etc/certs/cacert + user: + id: 0 + - contents: + inline: | + ip_vs + filesystem: root + group: + id: 0 + mode: 420 + path: /etc/modules-load.d/ipvs.conf + user: + id: 0 + - contents: + inline: | + ip_vs_rr + filesystem: root + group: + id: 0 + mode: 420 + path: /etc/modules-load.d/ipvsrr.conf + user: + id: 0 + - contents: + inline: | + ip_vs_sh + filesystem: root + group: + id: 0 + mode: 420 + path: /etc/modules-load.d/ipvssh.conf + user: + id: 0 + - contents: + inline: | + ip_vs_wrr + filesystem: root + group: + id: 0 + mode: 420 + path: /etc/modules-load.d/ipvswrr.conf + user: + id: 0 + - contents: + inline: | + br_netfilter + filesystem: root + group: + id: 0 + mode: 420 + path: /etc/modules-load.d/br_netfilter.conf + user: + id: 0 + - contents: + inline: '{{ .CACert | EscapeNewLines }}' + filesystem: root + group: + id: 0 + mode: 416 + path: /etc/kubernetes/pki/ca.crt + user: + id: 0 + - contents: + inline: '{{ .CAKey | EscapeNewLines }}' + filesystem: root + group: + id: 0 + mode: 384 + path: /etc/kubernetes/pki/ca.key + user: + id: 0 + - contents: + inline: '{{ .EtcdCACert | EscapeNewLines }}' + filesystem: root + group: + id: 0 + mode: 416 + path: /etc/kubernetes/pki/etcd/ca.crt + user: + id: 0 + - contents: + inline: '{{ .EtcdCAKey | EscapeNewLines }}' + filesystem: root + group: + id: 0 + mode: 384 + path: /etc/kubernetes/pki/etcd/ca.key + user: + id: 0 + - contents: + inline: '{{ .FrontProxyCACert | EscapeNewLines }}' + filesystem: root + group: + id: 0 + mode: 416 + path: /etc/kubernetes/pki/front-proxy-ca.crt + user: + id: 0 + - contents: + inline: '{{ .FrontProxyCAKey | EscapeNewLines }}' + filesystem: root + group: + id: 0 + mode: 384 + path: /etc/kubernetes/pki/front-proxy-ca.key + user: + id: 0 + - contents: + inline: '{{ .SaCert | EscapeNewLines }}' + filesystem: root + group: + id: 0 + mode: 416 + path: /etc/kubernetes/pki/sa.pub + user: + id: 0 + - contents: + inline: '{{ .SaKey | EscapeNewLines }}' + filesystem: root + group: + id: 0 + mode: 384 + path: /etc/kubernetes/pki/sa.key + user: + id: 0 + - contents: + inline: '{{ .KubeadmConfig | EscapeNewLines }}' + filesystem: root + group: + id: 0 + mode: 420 + path: /etc/kubernetes/kubeadm_config.yaml + user: + id: 0 + - contents: + inline: | + #!/usr/bin/env bash + + . /run/metadata/coreos + + echo "Replacing OPENSTACK_IPV4_LOCAL in kubeadm_config through ${COREOS_OPENSTACK_IPV4_LOCAL}" + /usr/bin/sed -i "s#\${OPENSTACK_IPV4_LOCAL}#${COREOS_OPENSTACK_IPV4_LOCAL}#" /etc/kubernetes/kubeadm_config.yaml + filesystem: root + group: + id: 0 + mode: 493 + path: /opt/bin/prepare.sh + user: + id: 0 +systemd: + units: + - enabled: true + name: docker.service + - contents: | + [Unit] + Description=Unpack CNI and CRICTL into the right places. + Before=kubelet.service + + [Service] + Type=oneshot + ExecStartPre=/usr/bin/tar -C /opt/bin -xzf /opt/kubetmp/crictl.tar.gz + ExecStart=/usr/bin/tar -C /opt/cni/bin -xzf /opt/kubetmp/cni-plugins.tar.gz + ExecStartPost=/usr/bin/systemctl disable unpack.service + + [Install] + WantedBy=multi-user.target + enabled: true + name: unpack.service + - contents: | + [Unit] + Description=kubelet: The Kubernetes Node Agent + Documentation=http://kubernetes.io/docs/ + + [Service] + ExecStart=/opt/bin/kubelet + Restart=always + StartLimitInterval=0 + RestartSec=10 + + [Install] + WantedBy=multi-user.target + dropins: + - contents: | + # Note: This dropin only works with kubeadm and kubelet v1.11+ + [Service] + Environment="KUBELET_KUBECONFIG_ARGS=--bootstrap-kubeconfig=/etc/kubernetes/bootstrap-kubelet.conf --kubeconfig=/etc/kubernetes/kubelet.conf" + Environment="KUBELET_CONFIG_ARGS=--config=/var/lib/kubelet/config.yaml" + # This is a file that "kubeadm init" and "kubeadm join" generates at runtime, populating the KUBELET_KUBEADM_ARGS variable dynamically + EnvironmentFile=-/var/lib/kubelet/kubeadm-flags.env + # This is a file that the user can use for overrides of the kubelet args as a last resort. Preferably, the user should use + # the .NodeRegistration.KubeletExtraArgs object in the configuration files instead. KUBELET_EXTRA_ARGS should be sourced from this file. + EnvironmentFile=-/etc/default/kubelet + ExecStart= + ExecStart=/opt/bin/kubelet $KUBELET_KUBECONFIG_ARGS $KUBELET_CONFIG_ARGS $KUBELET_KUBEADM_ARGS $KUBELET_EXTRA_ARGS + name: 10-kubeadm.conf + enabled: true + name: kubelet.service + - contents: |- + [Unit] + Description=Initialise Kubernetes control plane node. + After=kubelet.service + Requires=coreos-metadata.service + + [Service] + Type=oneshot + Environment="PATH=/usr/bin:/usr/sbin:/opt/bin:/opt/cni/bin:/bin/sbin" + ExecStartPre=/opt/bin/prepare.sh + ExecStart=/opt/bin/kubeadm init --config /etc/kubernetes/kubeadm_config.yaml + ExecStartPost=/opt/bin/kubectl --kubeconfig /etc/kubernetes/kubelet.conf annotate --overwrite node %H machine={{ .Machine.ObjectMeta.Namespace }}/{{ .Machine.ObjectMeta.Name }} + ExecStartPost=/opt/bin/kubectl --kubeconfig /etc/kubernetes/admin.conf apply -f https://docs.projectcalico.org/v3.6/getting-started/kubernetes/installation/hosted/kubernetes-datastore/calico-networking/1.7/calico.yaml + ExecStartPost=/usr/bin/systemctl disable kubeadm.service + + [Install] + WantedBy=multi-user.target + enabled: true + name: kubeadm.service diff --git a/cmd/clusterctl/examples/openstack/provider-component/user-data/coreos/templates/common.yaml b/samples/templates/user-data/coreos/templates/common.yaml similarity index 96% rename from cmd/clusterctl/examples/openstack/provider-component/user-data/coreos/templates/common.yaml rename to samples/templates/user-data/coreos/templates/common.yaml index 82329c1bb6..dd73e3c928 100644 --- a/cmd/clusterctl/examples/openstack/provider-component/user-data/coreos/templates/common.yaml +++ b/samples/templates/user-data/coreos/templates/common.yaml @@ -70,7 +70,7 @@ storage: filesystem: root contents: remote: - url: https://storage.googleapis.com/kubernetes-release/release/v{{.Machine.Spec.Versions.Kubelet}}/bin/linux/amd64/kubeadm + url: https://storage.googleapis.com/kubernetes-release/release/v{{.Machine.Spec.Version}}/bin/linux/amd64/kubeadm mode: 0755 user: id: 0 @@ -80,7 +80,7 @@ storage: filesystem: root contents: remote: - url: https://storage.googleapis.com/kubernetes-release/release/v{{.Machine.Spec.Versions.Kubelet}}/bin/linux/amd64/kubectl + url: https://storage.googleapis.com/kubernetes-release/release/v{{.Machine.Spec.Version}}/bin/linux/amd64/kubectl mode: 0755 user: id: 0 @@ -90,7 +90,7 @@ storage: filesystem: root contents: remote: - url: https://storage.googleapis.com/kubernetes-release/release/v{{.Machine.Spec.Versions.Kubelet}}/bin/linux/amd64/kubelet + url: https://storage.googleapis.com/kubernetes-release/release/v{{.Machine.Spec.Version}}/bin/linux/amd64/kubelet mode: 0755 user: id: 0 diff --git a/cmd/clusterctl/examples/openstack/provider-component/user-data/coreos/templates/master.yaml b/samples/templates/user-data/coreos/templates/master.yaml similarity index 100% rename from cmd/clusterctl/examples/openstack/provider-component/user-data/coreos/templates/master.yaml rename to samples/templates/user-data/coreos/templates/master.yaml diff --git a/cmd/clusterctl/examples/openstack/provider-component/user-data/coreos/templates/worker.yaml b/samples/templates/user-data/coreos/templates/worker.yaml similarity index 100% rename from cmd/clusterctl/examples/openstack/provider-component/user-data/coreos/templates/worker.yaml rename to samples/templates/user-data/coreos/templates/worker.yaml diff --git a/samples/templates/user-data/coreos/worker-user-data.yaml b/samples/templates/user-data/coreos/worker-user-data.yaml new file mode 100644 index 0000000000..3a35111d3e --- /dev/null +++ b/samples/templates/user-data/coreos/worker-user-data.yaml @@ -0,0 +1,264 @@ +locksmith: + reboot_strategy: "off" +storage: + directories: + - filesystem: root + group: + id: 0 + mode: 493 + path: /opt + user: + id: 0 + - filesystem: root + group: + id: 0 + mode: 493 + path: /opt/kubetmp + user: + id: 0 + - filesystem: root + group: + id: 0 + mode: 493 + path: /opt/bin + user: + id: 0 + - filesystem: root + group: + id: 0 + mode: 493 + path: /opt/cni + user: + id: 0 + - filesystem: root + group: + id: 0 + mode: 493 + path: /opt/cni/bin + user: + id: 0 + files: + - contents: + inline: | + {{ .Machine.ObjectMeta.Name }} + filesystem: root + group: + id: 0 + mode: 420 + path: /etc/hostname + user: + id: 0 + - contents: + remote: + url: https://github.com/containernetworking/plugins/releases/download/v0.7.5/cni-plugins-amd64-v0.7.5.tgz + filesystem: root + group: + id: 0 + mode: 420 + path: /opt/kubetmp/cni-plugins.tar.gz + user: + id: 0 + - contents: + remote: + url: https://github.com/kubernetes-sigs/cri-tools/releases/download/v1.12.0/crictl-v1.12.0-linux-amd64.tar.gz + filesystem: root + group: + id: 0 + mode: 420 + path: /opt/kubetmp/crictl.tar.gz + user: + id: 0 + - contents: + remote: + url: https://storage.googleapis.com/kubernetes-release/release/v{{.Machine.Spec.Version}}/bin/linux/amd64/kubeadm + filesystem: root + group: + id: 0 + mode: 493 + path: /opt/bin/kubeadm + user: + id: 0 + - contents: + remote: + url: https://storage.googleapis.com/kubernetes-release/release/v{{.Machine.Spec.Version}}/bin/linux/amd64/kubectl + filesystem: root + group: + id: 0 + mode: 493 + path: /opt/bin/kubectl + user: + id: 0 + - contents: + remote: + url: https://storage.googleapis.com/kubernetes-release/release/v{{.Machine.Spec.Version}}/bin/linux/amd64/kubelet + filesystem: root + group: + id: 0 + mode: 493 + path: /opt/bin/kubelet + user: + id: 0 + - contents: + inline: | + { + "exec-opts": ["native.cgroupdriver=systemd"], + "iptables": false, + "ip-masq": false + } + filesystem: root + group: + id: 0 + mode: 420 + path: /etc/docker/daemon.json + user: + id: 0 + - contents: + inline: |+ + [Global] + auth-url=https://os.detss.corpintra.net:5000/v3/ + username="E415_s_os1pi020" + password="KJFs!3CuCu%kjYuyfd87" + region="null" + tenant-id="b9abf84da3944b85a61cab7aecf97d77" + domain-name="null" + + filesystem: root + group: + id: 0 + mode: 384 + path: /etc/kubernetes/cloud.conf + user: + id: 0 + - contents: + inline: "" + filesystem: root + group: + id: 0 + mode: 384 + path: /etc/certs/cacert + user: + id: 0 + - contents: + inline: | + ip_vs + filesystem: root + group: + id: 0 + mode: 420 + path: /etc/modules-load.d/ipvs.conf + user: + id: 0 + - contents: + inline: | + ip_vs_rr + filesystem: root + group: + id: 0 + mode: 420 + path: /etc/modules-load.d/ipvsrr.conf + user: + id: 0 + - contents: + inline: | + ip_vs_sh + filesystem: root + group: + id: 0 + mode: 420 + path: /etc/modules-load.d/ipvssh.conf + user: + id: 0 + - contents: + inline: | + ip_vs_wrr + filesystem: root + group: + id: 0 + mode: 420 + path: /etc/modules-load.d/ipvswrr.conf + user: + id: 0 + - contents: + inline: | + br_netfilter + filesystem: root + group: + id: 0 + mode: 420 + path: /etc/modules-load.d/br_netfilter.conf + user: + id: 0 + - contents: + inline: '{{ .KubeadmConfig | EscapeNewLines }}' + filesystem: root + group: + id: 0 + mode: 420 + path: /etc/kubernetes/kubeadm_config.yaml + user: + id: 0 +systemd: + units: + - enabled: true + name: docker.service + - contents: | + [Unit] + Description=Unpack CNI and CRICTL into the right places. + Before=kubelet.service + + [Service] + Type=oneshot + ExecStartPre=/usr/bin/tar -C /opt/bin -xzf /opt/kubetmp/crictl.tar.gz + ExecStart=/usr/bin/tar -C /opt/cni/bin -xzf /opt/kubetmp/cni-plugins.tar.gz + ExecStartPost=/usr/bin/systemctl disable unpack.service + + [Install] + WantedBy=multi-user.target + enabled: true + name: unpack.service + - contents: | + [Unit] + Description=kubelet: The Kubernetes Node Agent + Documentation=http://kubernetes.io/docs/ + + [Service] + ExecStart=/opt/bin/kubelet + Restart=always + StartLimitInterval=0 + RestartSec=10 + + [Install] + WantedBy=multi-user.target + dropins: + - contents: | + # Note: This dropin only works with kubeadm and kubelet v1.11+ + [Service] + Environment="KUBELET_KUBECONFIG_ARGS=--bootstrap-kubeconfig=/etc/kubernetes/bootstrap-kubelet.conf --kubeconfig=/etc/kubernetes/kubelet.conf" + Environment="KUBELET_CONFIG_ARGS=--config=/var/lib/kubelet/config.yaml" + # This is a file that "kubeadm init" and "kubeadm join" generates at runtime, populating the KUBELET_KUBEADM_ARGS variable dynamically + EnvironmentFile=-/var/lib/kubelet/kubeadm-flags.env + # This is a file that the user can use for overrides of the kubelet args as a last resort. Preferably, the user should use + # the .NodeRegistration.KubeletExtraArgs object in the configuration files instead. KUBELET_EXTRA_ARGS should be sourced from this file. + EnvironmentFile=-/etc/default/kubelet + ExecStart= + ExecStart=/opt/bin/kubelet $KUBELET_KUBECONFIG_ARGS $KUBELET_CONFIG_ARGS $KUBELET_KUBEADM_ARGS $KUBELET_EXTRA_ARGS + name: 10-kubeadm.conf + enabled: true + name: kubelet.service + - contents: |- + [Unit] + Description=Initialise Kubernetes worker node. + After=kubelet.service + Requires=coreos-metadata.service + + [Service] + Type=oneshot + Environment="PATH=/usr/bin:/usr/sbin:/opt/bin:/opt/cni/bin:/bin/sbin" + ExecStart=/opt/bin/kubeadm join --ignore-preflight-errors=all --config /etc/kubernetes/kubeadm_config.yaml + ExecStartPost=/opt/bin/kubectl --kubeconfig /etc/kubernetes/kubelet.conf annotate --overwrite node %H machine={{ .Machine.ObjectMeta.Namespace }}/{{ .Machine.ObjectMeta.Name }} + ExecStartPost=/usr/bin/systemctl disable kubeadm.service + + [Install] + WantedBy=multi-user.target + enabled: true + name: kubeadm.service diff --git a/cmd/clusterctl/examples/openstack/provider-component/user-data/ubuntu/kustomization.yaml b/samples/templates/user-data/ubuntu/kustomization.yaml similarity index 100% rename from cmd/clusterctl/examples/openstack/provider-component/user-data/ubuntu/kustomization.yaml rename to samples/templates/user-data/ubuntu/kustomization.yaml diff --git a/samples/templates/user-data/ubuntu/master-user-data.sh b/samples/templates/user-data/ubuntu/master-user-data.sh new file mode 100644 index 0000000000..afa2972ca5 --- /dev/null +++ b/samples/templates/user-data/ubuntu/master-user-data.sh @@ -0,0 +1,147 @@ +#!/usr/bin/env bash +set -e +set -x +( +KUBELET_VERSION={{ .Machine.Spec.Version }} +VERSION=v${KUBELET_VERSION} +NAMESPACE={{ .Machine.ObjectMeta.Namespace }} +MACHINE=$NAMESPACE +MACHINE+="/" +MACHINE+={{ .Machine.ObjectMeta.Name }} +ARCH=amd64 +swapoff -a +# disable swap in fstab +sed -i.bak -r 's/(.+ swap .+)/#\1/' /etc/fstab +curl -s https://packages.cloud.google.com/apt/doc/apt-key.gpg | sudo apt-key add - +touch /etc/apt/sources.list.d/kubernetes.list +sh -c 'echo "deb http://apt.kubernetes.io/ kubernetes-xenial main" > /etc/apt/sources.list.d/kubernetes.list' +apt-get update -y +apt-get install -y \ + prips + +# Getting local ip from the metadata of the node. +echo "Getting local ip from metadata" +for i in $(seq 60); do + echo "trying to get local-ipv4 $i / 60" + OPENSTACK_IPV4_LOCAL=$(curl --fail -s http://169.254.169.254/latest/meta-data/local-ipv4) + if [[ $? == 0 ]] && [[ -n "$OPENSTACK_IPV4_LOCAL" ]]; then + break + fi + sleep 1 +done + +function install_configure_docker () { + # prevent docker from auto-starting + echo "exit 101" > /usr/sbin/policy-rc.d + chmod +x /usr/sbin/policy-rc.d + trap "rm /usr/sbin/policy-rc.d" RETURN + apt-get install -y docker.io + echo 'DOCKER_OPTS="--iptables=false --ip-masq=false"' > /etc/default/docker + + # Reset iptables config + mkdir -p /etc/systemd/system/docker.service.d + cat > /etc/systemd/system/docker.service.d/10-iptables.conf < /usr/bin/kubeadm.dl +chmod a+rx /usr/bin/kubeadm.dl + +# Our Debian packages have versions like "1.8.0-00" or "1.8.0-01". Do a prefix +# search based on our SemVer to find the right (newest) package version. +function getversion() { + name=$1 + prefix=$2 + version=$(apt-cache madison $name | awk '{ print $3 }' | grep ^$prefix | head -n1) + if [[ -z "$version" ]]; then + echo Can\'t find package $name with prefix $prefix + exit 1 + fi + echo $version +} +KUBELET=$(getversion kubelet ${KUBELET_VERSION}-) +KUBEADM=$(getversion kubeadm ${KUBELET_VERSION}-) +KUBECTL=$(getversion kubectl ${KUBELET_VERSION}-) +apt-get install -y \ + kubelet=${KUBELET} \ + kubeadm=${KUBEADM} \ + kubectl=${KUBECTL} + +mv /usr/bin/kubeadm.dl /usr/bin/kubeadm +chmod a+rx /usr/bin/kubeadm + +echo W0dsb2JhbF0KYXV0aC11cmw9bnVsbAp1c2VybmFtZT0ibnVsbCIKcGFzc3dvcmQ9Im51bGwiCnJlZ2lvbj0ibnVsbCIKdGVuYW50LWlkPSJudWxsIgpkb21haW4tbmFtZT0ibnVsbCIKCg== | base64 -d > /etc/kubernetes/cloud.conf +chmod 600 /etc/kubernetes/cloud.conf +mkdir /etc/certs +echo | base64 -d > /etc/certs/cacert + +systemctl daemon-reload +systemctl restart kubelet.service +systemctl disable ufw +systemctl mask ufw + +# Setup certificates +mkdir - /etc/kubernetes/pki /etc/kubernetes/pki/etcd +cat > /etc/kubernetes/pki/ca.crt < /etc/kubernetes/pki/ca.key < /etc/kubernetes/pki/etcd/ca.crt < /etc/kubernetes/pki/etcd/ca.key < /etc/kubernetes/pki/front-proxy-ca.crt < /etc/kubernetes/pki/front-proxy-ca.key < /etc/kubernetes/pki/sa.pub < /etc/kubernetes/pki/sa.key < /etc/kubernetes/kubeadm_config.yaml <&1 | tee /var/log/startup.log diff --git a/cmd/clusterctl/examples/openstack/provider-component/user-data/ubuntu/templates/master-user-data.sh b/samples/templates/user-data/ubuntu/templates/master-user-data.sh similarity index 98% rename from cmd/clusterctl/examples/openstack/provider-component/user-data/ubuntu/templates/master-user-data.sh rename to samples/templates/user-data/ubuntu/templates/master-user-data.sh index c6a9493b81..e81d7ff012 100644 --- a/cmd/clusterctl/examples/openstack/provider-component/user-data/ubuntu/templates/master-user-data.sh +++ b/samples/templates/user-data/ubuntu/templates/master-user-data.sh @@ -2,7 +2,7 @@ set -e set -x ( -KUBELET_VERSION={{ .Machine.Spec.Versions.Kubelet }} +KUBELET_VERSION={{ .Machine.Spec.Version }} VERSION=v${KUBELET_VERSION} NAMESPACE={{ .Machine.ObjectMeta.Namespace }} MACHINE=$NAMESPACE diff --git a/cmd/clusterctl/examples/openstack/provider-component/user-data/ubuntu/templates/worker-user-data.sh b/samples/templates/user-data/ubuntu/templates/worker-user-data.sh similarity index 98% rename from cmd/clusterctl/examples/openstack/provider-component/user-data/ubuntu/templates/worker-user-data.sh rename to samples/templates/user-data/ubuntu/templates/worker-user-data.sh index 1dbb57f9be..8a6d8bc36a 100644 --- a/cmd/clusterctl/examples/openstack/provider-component/user-data/ubuntu/templates/worker-user-data.sh +++ b/samples/templates/user-data/ubuntu/templates/worker-user-data.sh @@ -2,7 +2,7 @@ set -e set -x ( -KUBELET_VERSION={{ .Machine.Spec.Versions.Kubelet }} +KUBELET_VERSION={{ .Machine.Spec.Version }} NAMESPACE={{ .Machine.ObjectMeta.Namespace }} MACHINE=$NAMESPACE MACHINE+="/" diff --git a/samples/templates/user-data/ubuntu/worker-user-data.sh b/samples/templates/user-data/ubuntu/worker-user-data.sh new file mode 100644 index 0000000000..8730c51a64 --- /dev/null +++ b/samples/templates/user-data/ubuntu/worker-user-data.sh @@ -0,0 +1,95 @@ +#!/usr/bin/env bash +set -e +set -x +( +KUBELET_VERSION={{ .Machine.Spec.Version }} +NAMESPACE={{ .Machine.ObjectMeta.Namespace }} +MACHINE=$NAMESPACE +MACHINE+="/" +MACHINE+={{ .Machine.ObjectMeta.Name }} + +swapoff -a +# disable swap in fstab +sed -i.bak -r 's/(.+ swap .+)/#\1/' /etc/fstab +apt-get update +apt-get install -y apt-transport-https prips +apt-key adv --keyserver hkp://keyserver.ubuntu.com --recv-keys F76221572C52609D +cat < /etc/apt/sources.list.d/k8s.list +deb [arch=amd64] https://apt.dockerproject.org/repo ubuntu-xenial main +EOF +apt-get update + +function install_configure_docker () { + # prevent docker from auto-starting + echo "exit 101" > /usr/sbin/policy-rc.d + chmod +x /usr/sbin/policy-rc.d + trap "rm /usr/sbin/policy-rc.d" RETURN + apt-get install -y docker.io + echo 'DOCKER_OPTS="--iptables=false --ip-masq=false"' > /etc/default/docker + + # Reset iptables config + mkdir -p /etc/systemd/system/docker.service.d + cat > /etc/systemd/system/docker.service.d/10-iptables.conf < /etc/apt/sources.list.d/kubernetes.list +deb http://apt.kubernetes.io/ kubernetes-xenial main +EOF +apt-get update + +# Needed for the node and kubeadm preflights +modprobe ip_vs_sh ip_vs ip_vs_rr ip_vs_wrr + +mkdir -p /etc/kubernetes/ +# Our Debian packages have versions like "1.8.0-00" or "1.8.0-01". Do a prefix +# search based on our SemVer to find the right (newest) package version. +function getversion() { + name=$1 + prefix=$2 + version=$(apt-cache madison $name | awk '{ print $3 }' | grep ^$prefix | head -n1) + if [[ -z "$version" ]]; then + echo Can\'t find package $name with prefix $prefix + exit 1 + fi + echo $version +} +KUBELET=$(getversion kubelet ${KUBELET_VERSION}-) +KUBEADM=$(getversion kubeadm ${KUBELET_VERSION}-) +KUBECTL=$(getversion kubectl ${KUBELET_VERSION}-) +apt-get install -y kubelet=${KUBELET} kubeadm=${KUBEADM} kubectl=${KUBECTL} +# kubeadm uses 10th IP as DNS server + +# Write the cloud.conf so that the kubelet can use it. +echo W0dsb2JhbF0KYXV0aC11cmw9bnVsbAp1c2VybmFtZT0ibnVsbCIKcGFzc3dvcmQ9Im51bGwiCnJlZ2lvbj0ibnVsbCIKdGVuYW50LWlkPSJudWxsIgpkb21haW4tbmFtZT0ibnVsbCIKCg== | base64 -d > /etc/kubernetes/cloud.conf +mkdir /etc/certs +echo | base64 -d > /etc/certs/cacert + +# Set up kubeadm config file to pass to kubeadm join. +cat > /etc/kubernetes/kubeadm_config.yaml <&1 | tee /var/log/startup.log diff --git a/third_party/forked/README.md b/third_party/forked/README.md new file mode 100644 index 0000000000..325c5c6339 --- /dev/null +++ b/third_party/forked/README.md @@ -0,0 +1,6 @@ +# rerun-process-wrapper + + +Repo: https://github.com/windmilleng/rerun-process-wrapper + +Commit: `fbdb45b229df173f862ee856b3070bf649d9dc0b` diff --git a/vendor/contrib.go.opencensus.io/exporter/ocagent/LICENSE b/third_party/forked/rerun-process-wrapper/LICENSE similarity index 100% rename from vendor/contrib.go.opencensus.io/exporter/ocagent/LICENSE rename to third_party/forked/rerun-process-wrapper/LICENSE diff --git a/third_party/forked/rerun-process-wrapper/README.md b/third_party/forked/rerun-process-wrapper/README.md new file mode 100644 index 0000000000..1e9f7576e5 --- /dev/null +++ b/third_party/forked/rerun-process-wrapper/README.md @@ -0,0 +1,29 @@ +# Rerunning your process via `live_update` +## i.e., simulating [`restart_container()`](https://docs.tilt.dev/live_update_reference.html#restart_container) on non-Docker clusters + +As of 6/28/19: `restart_container()`, a command that can be passed to a `live_update`, doesn't work on non-Docker clusters. However there's a workaround available to simulate `restart_container()`'s functionality. It's used in [the onewatch integration test](https://github.com/windmilleng/tilt/tree/master/integration/onewatch) so that the test passes on non-Docker clusters. Here's how to do it yourself: + +Copy `start.sh` and `restart.sh` to your container working dir. + +If your container entrypoint *was* `path-to-binary [arg1] [arg2]...`, change it to: +``` +./start.sh path-to-binary [arg1] [arg2]... +``` + +To restart the container, instead of including Live Update step `restart_container()`, use: +`run('./restart.sh')` + +So, for example: + +```python +docker_build('gcr.io/windmill-test-containers/integration/onewatch', + '.', + dockerfile='Dockerfile', + live_update=[ + sync('.', '/go/src/github.com/windmilleng/tilt/integration/onewatch'), + run('go install github.com/windmilleng/tilt/integration/onewatch'), + run('./restart.sh'), + ]) +``` + +This live update will cause the `go install` to be run in the container every time anything in the `.` path locally changes. After the `go install` is run, `./restart.sh` will be run. This will kill the original entrypoint, and restart it, effectively simulating the `container_restart()` functionality on Docker. diff --git a/third_party/forked/rerun-process-wrapper/restart.sh b/third_party/forked/rerun-process-wrapper/restart.sh new file mode 100755 index 0000000000..5e57254dbe --- /dev/null +++ b/third_party/forked/rerun-process-wrapper/restart.sh @@ -0,0 +1,22 @@ +#!/bin/sh +# +# A helper script to implement restart_container when the docker runtime isn't available. +# +# Usage: +# Copy start.sh and restart.sh to your container working dir. +# +# Make your container entrypoint: +# ./start.sh path-to-binary [args] +# +# To restart the container: +# ./restart.sh + +set -u + +touch restart.txt +PID="$(cat process.txt)" +if [ $? -ne 0 ]; then + echo "unable to read process.txt. was your process started with start.sh?" + exit 1 +fi +kill "$PID" diff --git a/third_party/forked/rerun-process-wrapper/start.sh b/third_party/forked/rerun-process-wrapper/start.sh new file mode 100755 index 0000000000..955f283e8e --- /dev/null +++ b/third_party/forked/rerun-process-wrapper/start.sh @@ -0,0 +1,41 @@ +#!/bin/sh +# +# A helper script to implement restart_container when the docker runtime isn't available. +# +# Usage: +# Copy start.sh and restart.sh to your container working dir. +# +# Make your container entrypoint: +# ./start.sh path-to-binary [args] +# +# To restart the container: +# ./restart.sh + +set -eu + +process_id="" + +trap quit TERM INT + +quit() { + if [ -n "$process_id" ]; then + kill $process_id + fi +} + +while true; do + rm -f restart.txt + + "$@" & + process_id=$! + echo "$process_id" > process.txt + set +e + wait $process_id + EXIT_CODE=$? + set -e + if [ ! -f restart.txt ]; then + echo "Exiting with code $EXIT_CODE" + exit $EXIT_CODE + fi + echo "Restarting" +done diff --git a/vendor/cloud.google.com/go/AUTHORS b/vendor/cloud.google.com/go/AUTHORS deleted file mode 100644 index c364af1da0..0000000000 --- a/vendor/cloud.google.com/go/AUTHORS +++ /dev/null @@ -1,15 +0,0 @@ -# This is the official list of cloud authors for copyright purposes. -# This file is distinct from the CONTRIBUTORS files. -# See the latter for an explanation. - -# Names should be added to this file as: -# Name or Organization -# The email address is not required for organizations. - -Filippo Valsorda -Google Inc. -Ingo Oeser -Palm Stone Games, Inc. -Paweł Knap -Péter Szilágyi -Tyler Treat diff --git a/vendor/cloud.google.com/go/CONTRIBUTORS b/vendor/cloud.google.com/go/CONTRIBUTORS deleted file mode 100644 index 3b3cbed98e..0000000000 --- a/vendor/cloud.google.com/go/CONTRIBUTORS +++ /dev/null @@ -1,40 +0,0 @@ -# People who have agreed to one of the CLAs and can contribute patches. -# The AUTHORS file lists the copyright holders; this file -# lists people. For example, Google employees are listed here -# but not in AUTHORS, because Google holds the copyright. -# -# https://developers.google.com/open-source/cla/individual -# https://developers.google.com/open-source/cla/corporate -# -# Names should be added to this file as: -# Name - -# Keep the list alphabetically sorted. - -Alexis Hunt -Andreas Litt -Andrew Gerrand -Brad Fitzpatrick -Burcu Dogan -Dave Day -David Sansome -David Symonds -Filippo Valsorda -Glenn Lewis -Ingo Oeser -James Hall -Johan Euphrosine -Jonathan Amsterdam -Kunpei Sakai -Luna Duclos -Magnus Hiie -Mario Castro -Michael McGreevy -Omar Jarjur -Paweł Knap -Péter Szilágyi -Sarah Adams -Thanatat Tamtan -Toby Burress -Tuo Shan -Tyler Treat diff --git a/vendor/cloud.google.com/go/LICENSE b/vendor/cloud.google.com/go/LICENSE deleted file mode 100644 index d645695673..0000000000 --- a/vendor/cloud.google.com/go/LICENSE +++ /dev/null @@ -1,202 +0,0 @@ - - Apache License - Version 2.0, January 2004 - http://www.apache.org/licenses/ - - TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - - 1. Definitions. - - "License" shall mean the terms and conditions for use, reproduction, - and distribution as defined by Sections 1 through 9 of this document. - - "Licensor" shall mean the copyright owner or entity authorized by - the copyright owner that is granting the License. - - "Legal Entity" shall mean the union of the acting entity and all - other entities that control, are controlled by, or are under common - control with that entity. For the purposes of this definition, - "control" means (i) the power, direct or indirect, to cause the - direction or management of such entity, whether by contract or - otherwise, or (ii) ownership of fifty percent (50%) or more of the - outstanding shares, or (iii) beneficial ownership of such entity. - - "You" (or "Your") shall mean an individual or Legal Entity - exercising permissions granted by this License. - - "Source" form shall mean the preferred form for making modifications, - including but not limited to software source code, documentation - source, and configuration files. - - "Object" form shall mean any form resulting from mechanical - transformation or translation of a Source form, including but - not limited to compiled object code, generated documentation, - and conversions to other media types. - - "Work" shall mean the work of authorship, whether in Source or - Object form, made available under the License, as indicated by a - copyright notice that is included in or attached to the work - (an example is provided in the Appendix below). - - "Derivative Works" shall mean any work, whether in Source or Object - form, that is based on (or derived from) the Work and for which the - editorial revisions, annotations, elaborations, or other modifications - represent, as a whole, an original work of authorship. For the purposes - of this License, Derivative Works shall not include works that remain - separable from, or merely link (or bind by name) to the interfaces of, - the Work and Derivative Works thereof. - - "Contribution" shall mean any work of authorship, including - the original version of the Work and any modifications or additions - to that Work or Derivative Works thereof, that is intentionally - submitted to Licensor for inclusion in the Work by the copyright owner - or by an individual or Legal Entity authorized to submit on behalf of - the copyright owner. For the purposes of this definition, "submitted" - means any form of electronic, verbal, or written communication sent - to the Licensor or its representatives, including but not limited to - communication on electronic mailing lists, source code control systems, - and issue tracking systems that are managed by, or on behalf of, the - Licensor for the purpose of discussing and improving the Work, but - excluding communication that is conspicuously marked or otherwise - designated in writing by the copyright owner as "Not a Contribution." - - "Contributor" shall mean Licensor and any individual or Legal Entity - on behalf of whom a Contribution has been received by Licensor and - subsequently incorporated within the Work. - - 2. Grant of Copyright License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - copyright license to reproduce, prepare Derivative Works of, - publicly display, publicly perform, sublicense, and distribute the - Work and such Derivative Works in Source or Object form. - - 3. Grant of Patent License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - (except as stated in this section) patent license to make, have made, - use, offer to sell, sell, import, and otherwise transfer the Work, - where such license applies only to those patent claims licensable - by such Contributor that are necessarily infringed by their - Contribution(s) alone or by combination of their Contribution(s) - with the Work to which such Contribution(s) was submitted. If You - institute patent litigation against any entity (including a - cross-claim or counterclaim in a lawsuit) alleging that the Work - or a Contribution incorporated within the Work constitutes direct - or contributory patent infringement, then any patent licenses - granted to You under this License for that Work shall terminate - as of the date such litigation is filed. - - 4. Redistribution. You may reproduce and distribute copies of the - Work or Derivative Works thereof in any medium, with or without - modifications, and in Source or Object form, provided that You - meet the following conditions: - - (a) You must give any other recipients of the Work or - Derivative Works a copy of this License; and - - (b) You must cause any modified files to carry prominent notices - stating that You changed the files; and - - (c) You must retain, in the Source form of any Derivative Works - that You distribute, all copyright, patent, trademark, and - attribution notices from the Source form of the Work, - excluding those notices that do not pertain to any part of - the Derivative Works; and - - (d) If the Work includes a "NOTICE" text file as part of its - distribution, then any Derivative Works that You distribute must - include a readable copy of the attribution notices contained - within such NOTICE file, excluding those notices that do not - pertain to any part of the Derivative Works, in at least one - of the following places: within a NOTICE text file distributed - as part of the Derivative Works; within the Source form or - documentation, if provided along with the Derivative Works; or, - within a display generated by the Derivative Works, if and - wherever such third-party notices normally appear. The contents - of the NOTICE file are for informational purposes only and - do not modify the License. You may add Your own attribution - notices within Derivative Works that You distribute, alongside - or as an addendum to the NOTICE text from the Work, provided - that such additional attribution notices cannot be construed - as modifying the License. - - You may add Your own copyright statement to Your modifications and - may provide additional or different license terms and conditions - for use, reproduction, or distribution of Your modifications, or - for any such Derivative Works as a whole, provided Your use, - reproduction, and distribution of the Work otherwise complies with - the conditions stated in this License. - - 5. Submission of Contributions. Unless You explicitly state otherwise, - any Contribution intentionally submitted for inclusion in the Work - by You to the Licensor shall be under the terms and conditions of - this License, without any additional terms or conditions. - Notwithstanding the above, nothing herein shall supersede or modify - the terms of any separate license agreement you may have executed - with Licensor regarding such Contributions. - - 6. Trademarks. This License does not grant permission to use the trade - names, trademarks, service marks, or product names of the Licensor, - except as required for reasonable and customary use in describing the - origin of the Work and reproducing the content of the NOTICE file. - - 7. Disclaimer of Warranty. Unless required by applicable law or - agreed to in writing, Licensor provides the Work (and each - Contributor provides its Contributions) on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or - implied, including, without limitation, any warranties or conditions - of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A - PARTICULAR PURPOSE. You are solely responsible for determining the - appropriateness of using or redistributing the Work and assume any - risks associated with Your exercise of permissions under this License. - - 8. Limitation of Liability. In no event and under no legal theory, - whether in tort (including negligence), contract, or otherwise, - unless required by applicable law (such as deliberate and grossly - negligent acts) or agreed to in writing, shall any Contributor be - liable to You for damages, including any direct, indirect, special, - incidental, or consequential damages of any character arising as a - result of this License or out of the use or inability to use the - Work (including but not limited to damages for loss of goodwill, - work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses), even if such Contributor - has been advised of the possibility of such damages. - - 9. Accepting Warranty or Additional Liability. While redistributing - the Work or Derivative Works thereof, You may choose to offer, - and charge a fee for, acceptance of support, warranty, indemnity, - or other liability obligations and/or rights consistent with this - License. However, in accepting such obligations, You may act only - on Your own behalf and on Your sole responsibility, not on behalf - of any other Contributor, and only if You agree to indemnify, - defend, and hold each Contributor harmless for any liability - incurred by, or claims asserted against, such Contributor by reason - of your accepting any such warranty or additional liability. - - END OF TERMS AND CONDITIONS - - APPENDIX: How to apply the Apache License to your work. - - To apply the Apache License to your work, attach the following - boilerplate notice, with the fields enclosed by brackets "[]" - replaced with your own identifying information. (Don't include - the brackets!) The text should be enclosed in the appropriate - comment syntax for the file format. We also recommend that a - file or class name and description of purpose be included on the - same "printed page" as the copyright notice for easier - identification within third-party archives. - - Copyright [yyyy] [name of copyright owner] - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. diff --git a/vendor/cloud.google.com/go/compute/metadata/metadata.go b/vendor/cloud.google.com/go/compute/metadata/metadata.go deleted file mode 100644 index 125b7033c9..0000000000 --- a/vendor/cloud.google.com/go/compute/metadata/metadata.go +++ /dev/null @@ -1,513 +0,0 @@ -// Copyright 2014 Google LLC -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -// Package metadata provides access to Google Compute Engine (GCE) -// metadata and API service accounts. -// -// This package is a wrapper around the GCE metadata service, -// as documented at https://developers.google.com/compute/docs/metadata. -package metadata // import "cloud.google.com/go/compute/metadata" - -import ( - "context" - "encoding/json" - "fmt" - "io/ioutil" - "net" - "net/http" - "net/url" - "os" - "runtime" - "strings" - "sync" - "time" -) - -const ( - // metadataIP is the documented metadata server IP address. - metadataIP = "169.254.169.254" - - // metadataHostEnv is the environment variable specifying the - // GCE metadata hostname. If empty, the default value of - // metadataIP ("169.254.169.254") is used instead. - // This is variable name is not defined by any spec, as far as - // I know; it was made up for the Go package. - metadataHostEnv = "GCE_METADATA_HOST" - - userAgent = "gcloud-golang/0.1" -) - -type cachedValue struct { - k string - trim bool - mu sync.Mutex - v string -} - -var ( - projID = &cachedValue{k: "project/project-id", trim: true} - projNum = &cachedValue{k: "project/numeric-project-id", trim: true} - instID = &cachedValue{k: "instance/id", trim: true} -) - -var ( - defaultClient = &Client{hc: &http.Client{ - Transport: &http.Transport{ - Dial: (&net.Dialer{ - Timeout: 2 * time.Second, - KeepAlive: 30 * time.Second, - }).Dial, - ResponseHeaderTimeout: 2 * time.Second, - }, - }} - subscribeClient = &Client{hc: &http.Client{ - Transport: &http.Transport{ - Dial: (&net.Dialer{ - Timeout: 2 * time.Second, - KeepAlive: 30 * time.Second, - }).Dial, - }, - }} -) - -// NotDefinedError is returned when requested metadata is not defined. -// -// The underlying string is the suffix after "/computeMetadata/v1/". -// -// This error is not returned if the value is defined to be the empty -// string. -type NotDefinedError string - -func (suffix NotDefinedError) Error() string { - return fmt.Sprintf("metadata: GCE metadata %q not defined", string(suffix)) -} - -func (c *cachedValue) get(cl *Client) (v string, err error) { - defer c.mu.Unlock() - c.mu.Lock() - if c.v != "" { - return c.v, nil - } - if c.trim { - v, err = cl.getTrimmed(c.k) - } else { - v, err = cl.Get(c.k) - } - if err == nil { - c.v = v - } - return -} - -var ( - onGCEOnce sync.Once - onGCE bool -) - -// OnGCE reports whether this process is running on Google Compute Engine. -func OnGCE() bool { - onGCEOnce.Do(initOnGCE) - return onGCE -} - -func initOnGCE() { - onGCE = testOnGCE() -} - -func testOnGCE() bool { - // The user explicitly said they're on GCE, so trust them. - if os.Getenv(metadataHostEnv) != "" { - return true - } - - ctx, cancel := context.WithCancel(context.Background()) - defer cancel() - - resc := make(chan bool, 2) - - // Try two strategies in parallel. - // See https://github.com/googleapis/google-cloud-go/issues/194 - go func() { - req, _ := http.NewRequest("GET", "http://"+metadataIP, nil) - req.Header.Set("User-Agent", userAgent) - res, err := defaultClient.hc.Do(req.WithContext(ctx)) - if err != nil { - resc <- false - return - } - defer res.Body.Close() - resc <- res.Header.Get("Metadata-Flavor") == "Google" - }() - - go func() { - addrs, err := net.LookupHost("metadata.google.internal") - if err != nil || len(addrs) == 0 { - resc <- false - return - } - resc <- strsContains(addrs, metadataIP) - }() - - tryHarder := systemInfoSuggestsGCE() - if tryHarder { - res := <-resc - if res { - // The first strategy succeeded, so let's use it. - return true - } - // Wait for either the DNS or metadata server probe to - // contradict the other one and say we are running on - // GCE. Give it a lot of time to do so, since the system - // info already suggests we're running on a GCE BIOS. - timer := time.NewTimer(5 * time.Second) - defer timer.Stop() - select { - case res = <-resc: - return res - case <-timer.C: - // Too slow. Who knows what this system is. - return false - } - } - - // There's no hint from the system info that we're running on - // GCE, so use the first probe's result as truth, whether it's - // true or false. The goal here is to optimize for speed for - // users who are NOT running on GCE. We can't assume that - // either a DNS lookup or an HTTP request to a blackholed IP - // address is fast. Worst case this should return when the - // metaClient's Transport.ResponseHeaderTimeout or - // Transport.Dial.Timeout fires (in two seconds). - return <-resc -} - -// systemInfoSuggestsGCE reports whether the local system (without -// doing network requests) suggests that we're running on GCE. If this -// returns true, testOnGCE tries a bit harder to reach its metadata -// server. -func systemInfoSuggestsGCE() bool { - if runtime.GOOS != "linux" { - // We don't have any non-Linux clues available, at least yet. - return false - } - slurp, _ := ioutil.ReadFile("/sys/class/dmi/id/product_name") - name := strings.TrimSpace(string(slurp)) - return name == "Google" || name == "Google Compute Engine" -} - -// Subscribe calls Client.Subscribe on a client designed for subscribing (one with no -// ResponseHeaderTimeout). -func Subscribe(suffix string, fn func(v string, ok bool) error) error { - return subscribeClient.Subscribe(suffix, fn) -} - -// Get calls Client.Get on the default client. -func Get(suffix string) (string, error) { return defaultClient.Get(suffix) } - -// ProjectID returns the current instance's project ID string. -func ProjectID() (string, error) { return defaultClient.ProjectID() } - -// NumericProjectID returns the current instance's numeric project ID. -func NumericProjectID() (string, error) { return defaultClient.NumericProjectID() } - -// InternalIP returns the instance's primary internal IP address. -func InternalIP() (string, error) { return defaultClient.InternalIP() } - -// ExternalIP returns the instance's primary external (public) IP address. -func ExternalIP() (string, error) { return defaultClient.ExternalIP() } - -// Hostname returns the instance's hostname. This will be of the form -// ".c..internal". -func Hostname() (string, error) { return defaultClient.Hostname() } - -// InstanceTags returns the list of user-defined instance tags, -// assigned when initially creating a GCE instance. -func InstanceTags() ([]string, error) { return defaultClient.InstanceTags() } - -// InstanceID returns the current VM's numeric instance ID. -func InstanceID() (string, error) { return defaultClient.InstanceID() } - -// InstanceName returns the current VM's instance ID string. -func InstanceName() (string, error) { return defaultClient.InstanceName() } - -// Zone returns the current VM's zone, such as "us-central1-b". -func Zone() (string, error) { return defaultClient.Zone() } - -// InstanceAttributes calls Client.InstanceAttributes on the default client. -func InstanceAttributes() ([]string, error) { return defaultClient.InstanceAttributes() } - -// ProjectAttributes calls Client.ProjectAttributes on the default client. -func ProjectAttributes() ([]string, error) { return defaultClient.ProjectAttributes() } - -// InstanceAttributeValue calls Client.InstanceAttributeValue on the default client. -func InstanceAttributeValue(attr string) (string, error) { - return defaultClient.InstanceAttributeValue(attr) -} - -// ProjectAttributeValue calls Client.ProjectAttributeValue on the default client. -func ProjectAttributeValue(attr string) (string, error) { - return defaultClient.ProjectAttributeValue(attr) -} - -// Scopes calls Client.Scopes on the default client. -func Scopes(serviceAccount string) ([]string, error) { return defaultClient.Scopes(serviceAccount) } - -func strsContains(ss []string, s string) bool { - for _, v := range ss { - if v == s { - return true - } - } - return false -} - -// A Client provides metadata. -type Client struct { - hc *http.Client -} - -// NewClient returns a Client that can be used to fetch metadata. All HTTP requests -// will use the given http.Client instead of the default client. -func NewClient(c *http.Client) *Client { - return &Client{hc: c} -} - -// getETag returns a value from the metadata service as well as the associated ETag. -// This func is otherwise equivalent to Get. -func (c *Client) getETag(suffix string) (value, etag string, err error) { - // Using a fixed IP makes it very difficult to spoof the metadata service in - // a container, which is an important use-case for local testing of cloud - // deployments. To enable spoofing of the metadata service, the environment - // variable GCE_METADATA_HOST is first inspected to decide where metadata - // requests shall go. - host := os.Getenv(metadataHostEnv) - if host == "" { - // Using 169.254.169.254 instead of "metadata" here because Go - // binaries built with the "netgo" tag and without cgo won't - // know the search suffix for "metadata" is - // ".google.internal", and this IP address is documented as - // being stable anyway. - host = metadataIP - } - u := "http://" + host + "/computeMetadata/v1/" + suffix - req, _ := http.NewRequest("GET", u, nil) - req.Header.Set("Metadata-Flavor", "Google") - req.Header.Set("User-Agent", userAgent) - res, err := c.hc.Do(req) - if err != nil { - return "", "", err - } - defer res.Body.Close() - if res.StatusCode == http.StatusNotFound { - return "", "", NotDefinedError(suffix) - } - all, err := ioutil.ReadAll(res.Body) - if err != nil { - return "", "", err - } - if res.StatusCode != 200 { - return "", "", &Error{Code: res.StatusCode, Message: string(all)} - } - return string(all), res.Header.Get("Etag"), nil -} - -// Get returns a value from the metadata service. -// The suffix is appended to "http://${GCE_METADATA_HOST}/computeMetadata/v1/". -// -// If the GCE_METADATA_HOST environment variable is not defined, a default of -// 169.254.169.254 will be used instead. -// -// If the requested metadata is not defined, the returned error will -// be of type NotDefinedError. -func (c *Client) Get(suffix string) (string, error) { - val, _, err := c.getETag(suffix) - return val, err -} - -func (c *Client) getTrimmed(suffix string) (s string, err error) { - s, err = c.Get(suffix) - s = strings.TrimSpace(s) - return -} - -func (c *Client) lines(suffix string) ([]string, error) { - j, err := c.Get(suffix) - if err != nil { - return nil, err - } - s := strings.Split(strings.TrimSpace(j), "\n") - for i := range s { - s[i] = strings.TrimSpace(s[i]) - } - return s, nil -} - -// ProjectID returns the current instance's project ID string. -func (c *Client) ProjectID() (string, error) { return projID.get(c) } - -// NumericProjectID returns the current instance's numeric project ID. -func (c *Client) NumericProjectID() (string, error) { return projNum.get(c) } - -// InstanceID returns the current VM's numeric instance ID. -func (c *Client) InstanceID() (string, error) { return instID.get(c) } - -// InternalIP returns the instance's primary internal IP address. -func (c *Client) InternalIP() (string, error) { - return c.getTrimmed("instance/network-interfaces/0/ip") -} - -// ExternalIP returns the instance's primary external (public) IP address. -func (c *Client) ExternalIP() (string, error) { - return c.getTrimmed("instance/network-interfaces/0/access-configs/0/external-ip") -} - -// Hostname returns the instance's hostname. This will be of the form -// ".c..internal". -func (c *Client) Hostname() (string, error) { - return c.getTrimmed("instance/hostname") -} - -// InstanceTags returns the list of user-defined instance tags, -// assigned when initially creating a GCE instance. -func (c *Client) InstanceTags() ([]string, error) { - var s []string - j, err := c.Get("instance/tags") - if err != nil { - return nil, err - } - if err := json.NewDecoder(strings.NewReader(j)).Decode(&s); err != nil { - return nil, err - } - return s, nil -} - -// InstanceName returns the current VM's instance ID string. -func (c *Client) InstanceName() (string, error) { - host, err := c.Hostname() - if err != nil { - return "", err - } - return strings.Split(host, ".")[0], nil -} - -// Zone returns the current VM's zone, such as "us-central1-b". -func (c *Client) Zone() (string, error) { - zone, err := c.getTrimmed("instance/zone") - // zone is of the form "projects//zones/". - if err != nil { - return "", err - } - return zone[strings.LastIndex(zone, "/")+1:], nil -} - -// InstanceAttributes returns the list of user-defined attributes, -// assigned when initially creating a GCE VM instance. The value of an -// attribute can be obtained with InstanceAttributeValue. -func (c *Client) InstanceAttributes() ([]string, error) { return c.lines("instance/attributes/") } - -// ProjectAttributes returns the list of user-defined attributes -// applying to the project as a whole, not just this VM. The value of -// an attribute can be obtained with ProjectAttributeValue. -func (c *Client) ProjectAttributes() ([]string, error) { return c.lines("project/attributes/") } - -// InstanceAttributeValue returns the value of the provided VM -// instance attribute. -// -// If the requested attribute is not defined, the returned error will -// be of type NotDefinedError. -// -// InstanceAttributeValue may return ("", nil) if the attribute was -// defined to be the empty string. -func (c *Client) InstanceAttributeValue(attr string) (string, error) { - return c.Get("instance/attributes/" + attr) -} - -// ProjectAttributeValue returns the value of the provided -// project attribute. -// -// If the requested attribute is not defined, the returned error will -// be of type NotDefinedError. -// -// ProjectAttributeValue may return ("", nil) if the attribute was -// defined to be the empty string. -func (c *Client) ProjectAttributeValue(attr string) (string, error) { - return c.Get("project/attributes/" + attr) -} - -// Scopes returns the service account scopes for the given account. -// The account may be empty or the string "default" to use the instance's -// main account. -func (c *Client) Scopes(serviceAccount string) ([]string, error) { - if serviceAccount == "" { - serviceAccount = "default" - } - return c.lines("instance/service-accounts/" + serviceAccount + "/scopes") -} - -// Subscribe subscribes to a value from the metadata service. -// The suffix is appended to "http://${GCE_METADATA_HOST}/computeMetadata/v1/". -// The suffix may contain query parameters. -// -// Subscribe calls fn with the latest metadata value indicated by the provided -// suffix. If the metadata value is deleted, fn is called with the empty string -// and ok false. Subscribe blocks until fn returns a non-nil error or the value -// is deleted. Subscribe returns the error value returned from the last call to -// fn, which may be nil when ok == false. -func (c *Client) Subscribe(suffix string, fn func(v string, ok bool) error) error { - const failedSubscribeSleep = time.Second * 5 - - // First check to see if the metadata value exists at all. - val, lastETag, err := c.getETag(suffix) - if err != nil { - return err - } - - if err := fn(val, true); err != nil { - return err - } - - ok := true - if strings.ContainsRune(suffix, '?') { - suffix += "&wait_for_change=true&last_etag=" - } else { - suffix += "?wait_for_change=true&last_etag=" - } - for { - val, etag, err := c.getETag(suffix + url.QueryEscape(lastETag)) - if err != nil { - if _, deleted := err.(NotDefinedError); !deleted { - time.Sleep(failedSubscribeSleep) - continue // Retry on other errors. - } - ok = false - } - lastETag = etag - - if err := fn(val, ok); err != nil || !ok { - return err - } - } -} - -// Error contains an error response from the server. -type Error struct { - // Code is the HTTP response status code. - Code int - // Message is the server response message. - Message string -} - -func (e *Error) Error() string { - return fmt.Sprintf("compute: Received %d `%s`", e.Code, e.Message) -} diff --git a/vendor/contrib.go.opencensus.io/exporter/ocagent/.travis.yml b/vendor/contrib.go.opencensus.io/exporter/ocagent/.travis.yml deleted file mode 100644 index 192981b7f6..0000000000 --- a/vendor/contrib.go.opencensus.io/exporter/ocagent/.travis.yml +++ /dev/null @@ -1,17 +0,0 @@ -language: go - -go: - - 1.10.x - -go_import_path: contrib.go.opencensus.io/exporter/ocagent - -before_script: - - GO_FILES=$(find . -iname '*.go' | grep -v /vendor/) # All the .go files, excluding vendor/ if any - - PKGS=$(go list ./... | grep -v /vendor/) # All the import paths, excluding vendor/ if any - -script: - - go build ./... # Ensure dependency updates don't break build - - if [ -n "$(gofmt -s -l $GO_FILES)" ]; then echo "gofmt the following files:"; gofmt -s -l $GO_FILES; exit 1; fi - - go vet ./... - - go test -v -race $PKGS # Run all the tests with the race detector enabled - - 'if [[ $TRAVIS_GO_VERSION = 1.8* ]]; then ! golint ./... | grep -vE "(_mock|_string|\.pb)\.go:"; fi' diff --git a/vendor/contrib.go.opencensus.io/exporter/ocagent/CONTRIBUTING.md b/vendor/contrib.go.opencensus.io/exporter/ocagent/CONTRIBUTING.md deleted file mode 100644 index 0786fdf434..0000000000 --- a/vendor/contrib.go.opencensus.io/exporter/ocagent/CONTRIBUTING.md +++ /dev/null @@ -1,24 +0,0 @@ -# How to contribute - -We'd love to accept your patches and contributions to this project. There are -just a few small guidelines you need to follow. - -## Contributor License Agreement - -Contributions to this project must be accompanied by a Contributor License -Agreement. You (or your employer) retain the copyright to your contribution, -this simply gives us permission to use and redistribute your contributions as -part of the project. Head over to to see -your current agreements on file or to sign a new one. - -You generally only need to submit a CLA once, so if you've already submitted one -(even if it was for a different project), you probably don't need to do it -again. - -## Code reviews - -All submissions, including submissions by project members, require review. We -use GitHub pull requests for this purpose. Consult [GitHub Help] for more -information on using pull requests. - -[GitHub Help]: https://help.github.com/articles/about-pull-requests/ diff --git a/vendor/contrib.go.opencensus.io/exporter/ocagent/README.md b/vendor/contrib.go.opencensus.io/exporter/ocagent/README.md deleted file mode 100644 index 64550ee609..0000000000 --- a/vendor/contrib.go.opencensus.io/exporter/ocagent/README.md +++ /dev/null @@ -1,61 +0,0 @@ -# OpenCensus Agent Go Exporter - -[![Build Status][travis-image]][travis-url] [![GoDoc][godoc-image]][godoc-url] - - -This repository contains the Go implementation of the OpenCensus Agent (OC-Agent) Exporter. -OC-Agent is a deamon process running in a VM that can retrieve spans/stats/metrics from -OpenCensus Library, export them to other backends and possibly push configurations back to -Library. See more details on [OC-Agent Readme][OCAgentReadme]. - -Note: This is an experimental repository and is likely to get backwards-incompatible changes. -Ultimately we may want to move the OC-Agent Go Exporter to [OpenCensus Go core library][OpenCensusGo]. - -## Installation - -```bash -$ go get -u contrib.go.opencensus.io/exporter/ocagent/v1 -``` - -## Usage - -```go -import ( - "context" - "fmt" - "log" - "time" - - "contrib.go.opencensus.io/exporter/ocagent/v1" - "go.opencensus.io/trace" -) - -func Example() { - exp, err := ocagent.NewExporter(ocagent.WithInsecure(), ocagent.WithServiceName("your-service-name")) - if err != nil { - log.Fatalf("Failed to create the agent exporter: %v", err) - } - defer exp.Stop() - - // Now register it as a trace exporter. - trace.RegisterExporter(exp) - - // Then use the OpenCensus tracing library, like we normally would. - ctx, span := trace.StartSpan(context.Background(), "AgentExporter-Example") - defer span.End() - - for i := 0; i < 10; i++ { - _, iSpan := trace.StartSpan(ctx, fmt.Sprintf("Sample-%d", i)) - <-time.After(6 * time.Millisecond) - iSpan.End() - } -} -``` - -[OCAgentReadme]: https://github.com/census-instrumentation/opencensus-proto/tree/master/opencensus/proto/agent#opencensus-agent-proto -[OpenCensusGo]: https://github.com/census-instrumentation/opencensus-go -[godoc-image]: https://godoc.org/contrib.go.opencensus.io/exporter/ocagent?status.svg -[godoc-url]: https://godoc.org/contrib.go.opencensus.io/exporter/ocagent -[travis-image]: https://travis-ci.org/census-ecosystem/opencensus-go-exporter-ocagent.svg?branch=master -[travis-url]: https://travis-ci.org/census-ecosystem/opencensus-go-exporter-ocagent - diff --git a/vendor/contrib.go.opencensus.io/exporter/ocagent/common.go b/vendor/contrib.go.opencensus.io/exporter/ocagent/common.go deleted file mode 100644 index 297e44b6e7..0000000000 --- a/vendor/contrib.go.opencensus.io/exporter/ocagent/common.go +++ /dev/null @@ -1,38 +0,0 @@ -// Copyright 2018, OpenCensus Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package ocagent - -import ( - "math/rand" - "time" -) - -var randSrc = rand.New(rand.NewSource(time.Now().UnixNano())) - -// retries function fn upto n times, if fn returns an error lest it returns nil early. -// It applies exponential backoff in units of (1< 0 { - port = ae.agentPort - } - return fmt.Sprintf("%s:%d", DefaultAgentHost, port) -} - -func (ae *Exporter) doStartLocked() error { - if ae.started { - return nil - } - - // Now start it - cc, err := ae.dialToAgent() - if err != nil { - return err - } - ae.grpcClientConn = cc - - // Initiate the trace service by sending over node identifier info. - traceSvcClient := agenttracepb.NewTraceServiceClient(cc) - traceExporter, err := traceSvcClient.Export(context.Background()) - if err != nil { - return fmt.Errorf("Exporter.Start:: TraceServiceClient: %v", err) - } - - firstTraceMessage := &agenttracepb.ExportTraceServiceRequest{Node: ae.nodeInfo} - err = nTriesWithExponentialBackoff(maxInitialTracesRetries, 200*time.Microsecond, func() error { - return traceExporter.Send(firstTraceMessage) - }) - if err != nil { - return fmt.Errorf("Exporter.Start:: Failed to initiate the Config service: %v", err) - } - ae.traceExporter = traceExporter - - // Initiate the config service by sending over node identifier info. - configStream, err := traceSvcClient.Config(context.Background()) - if err != nil { - return fmt.Errorf("Exporter.Start:: ConfigStream: %v", err) - } - firstCfgMessage := &agenttracepb.CurrentLibraryConfig{Node: ae.nodeInfo} - err = nTriesWithExponentialBackoff(maxInitialConfigRetries, 200*time.Microsecond, func() error { - return configStream.Send(firstCfgMessage) - }) - if err != nil { - return fmt.Errorf("Exporter.Start:: Failed to initiate the Config service: %v", err) - } - - // In the background, handle trace configurations that are beamed down - // by the agent, but also reply to it with the applied configuration. - go ae.handleConfigStreaming(configStream) - - return nil -} - -// dialToAgent performs a best case attempt to dial to the agent. -// It retries failed dials with: -// * gRPC dialTimeout of 1s -// * exponential backoff, 5 times with a period of 50ms -// hence in the worst case of (no agent actually available), it -// will take at least: -// (5 * 1s) + ((1<<5)-1) * 0.01 s = 5s + 1.55s = 6.55s -func (ae *Exporter) dialToAgent() (*grpc.ClientConn, error) { - addr := ae.prepareAgentAddress() - dialOpts := []grpc.DialOption{grpc.WithBlock()} - if ae.canDialInsecure { - dialOpts = append(dialOpts, grpc.WithInsecure()) - } - - var cc *grpc.ClientConn - dialOpts = append(dialOpts, grpc.WithTimeout(1*time.Second)) - dialBackoffWaitPeriod := 50 * time.Millisecond - err := nTriesWithExponentialBackoff(5, dialBackoffWaitPeriod, func() error { - var err error - cc, err = grpc.Dial(addr, dialOpts...) - return err - }) - return cc, err -} - -func (ae *Exporter) handleConfigStreaming(configStream agenttracepb.TraceService_ConfigClient) error { - for { - recv, err := configStream.Recv() - if err != nil { - // TODO: Check if this is a transient error or exponential backoff-able. - return err - } - cfg := recv.Config - if cfg == nil { - continue - } - - // Otherwise now apply the trace configuration sent down from the agent - if psamp := cfg.GetProbabilitySampler(); psamp != nil { - trace.ApplyConfig(trace.Config{DefaultSampler: trace.ProbabilitySampler(psamp.SamplingProbability)}) - } else if csamp := cfg.GetConstantSampler(); csamp != nil { - alwaysSample := csamp.Decision == true - if alwaysSample { - trace.ApplyConfig(trace.Config{DefaultSampler: trace.AlwaysSample()}) - } else { - trace.ApplyConfig(trace.Config{DefaultSampler: trace.NeverSample()}) - } - } else { // TODO: Add the rate limiting sampler here - } - - // Then finally send back to upstream the newly applied configuration - err = configStream.Send(&agenttracepb.CurrentLibraryConfig{Config: &tracepb.TraceConfig{Sampler: cfg.Sampler}}) - if err != nil { - return err - } - } -} - -var ( - errNotStarted = errors.New("not started") -) - -// Stop shuts down all the connections and resources -// related to the exporter. -func (ae *Exporter) Stop() error { - ae.mu.Lock() - defer ae.mu.Unlock() - - if !ae.started { - return errNotStarted - } - if ae.stopped { - // TODO: tell the user that we've already stopped, so perhaps a sentinel error? - return nil - } - - ae.Flush() - - // Now close the underlying gRPC connection. - var err error - if ae.grpcClientConn != nil { - err = ae.grpcClientConn.Close() - } - - // At this point we can change the state variables: started and stopped - ae.started = false - ae.stopped = true - - return err -} - -func (ae *Exporter) ExportSpan(sd *trace.SpanData) { - if sd == nil { - return - } - _ = ae.traceBundler.Add(sd, -1) -} - -func (ae *Exporter) uploadTraces(sdl []*trace.SpanData) { - if len(sdl) == 0 { - return - } - protoSpans := make([]*tracepb.Span, 0, len(sdl)) - for _, sd := range sdl { - if sd != nil { - protoSpans = append(protoSpans, ocSpanToProtoSpan(sd)) - } - } - - if len(protoSpans) > 0 { - _ = ae.traceExporter.Send(&agenttracepb.ExportTraceServiceRequest{ - Spans: protoSpans, - }) - } -} - -func (ae *Exporter) Flush() { - ae.traceBundler.Flush() -} diff --git a/vendor/contrib.go.opencensus.io/exporter/ocagent/options.go b/vendor/contrib.go.opencensus.io/exporter/ocagent/options.go deleted file mode 100644 index 751f311f1f..0000000000 --- a/vendor/contrib.go.opencensus.io/exporter/ocagent/options.go +++ /dev/null @@ -1,80 +0,0 @@ -// Copyright 2018, OpenCensus Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package ocagent - -const ( - DefaultAgentPort uint16 = 55678 - DefaultAgentHost string = "localhost" -) - -type ExporterOption interface { - withExporter(e *Exporter) -} - -type portSetter uint16 - -func (ps portSetter) withExporter(e *Exporter) { - e.agentPort = uint16(ps) -} - -var _ ExporterOption = (*portSetter)(nil) - -type insecureGrpcConnection int - -var _ ExporterOption = (*insecureGrpcConnection)(nil) - -func (igc *insecureGrpcConnection) withExporter(e *Exporter) { - e.canDialInsecure = true -} - -// WithInsecure disables client transport security for the exporter's gRPC connection -// just like grpc.WithInsecure() https://godoc.org/google.golang.org/grpc#WithInsecure -// does. Note, by default, client security is required unless WithInsecure is used. -func WithInsecure() ExporterOption { return new(insecureGrpcConnection) } - -// WithPort allows one to override the port that the exporter will -// connect to the agent on, instead of using DefaultAgentPort. -func WithPort(port uint16) ExporterOption { - return portSetter(port) -} - -type addressSetter string - -func (as addressSetter) withExporter(e *Exporter) { - e.agentAddress = string(as) -} - -var _ ExporterOption = (*addressSetter)(nil) - -// WithAddress allows one to set the address that the exporter will -// connect to the agent on. If unset, it will instead try to use -// connect to DefaultAgentHost:DefaultAgentPort -func WithAddress(addr string) ExporterOption { - return addressSetter(addr) -} - -type serviceNameSetter string - -func (sns serviceNameSetter) withExporter(e *Exporter) { - e.serviceName = string(sns) -} - -var _ ExporterOption = (*serviceNameSetter)(nil) - -// WithServiceName allows one to set/override the service name -// that the exporter will report to the agent. -func WithServiceName(serviceName string) ExporterOption { - return serviceNameSetter(serviceName) -} diff --git a/vendor/contrib.go.opencensus.io/exporter/ocagent/transform_spans.go b/vendor/contrib.go.opencensus.io/exporter/ocagent/transform_spans.go deleted file mode 100644 index ba7b51d206..0000000000 --- a/vendor/contrib.go.opencensus.io/exporter/ocagent/transform_spans.go +++ /dev/null @@ -1,162 +0,0 @@ -// Copyright 2018, OpenCensus Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package ocagent - -import ( - "time" - - "go.opencensus.io/trace" - "go.opencensus.io/trace/tracestate" - - tracepb "github.com/census-instrumentation/opencensus-proto/gen-go/trace/v1" - "github.com/golang/protobuf/ptypes/timestamp" -) - -func ocSpanToProtoSpan(sd *trace.SpanData) *tracepb.Span { - if sd == nil { - return nil - } - var namePtr *tracepb.TruncatableString - if sd.Name != "" { - namePtr = &tracepb.TruncatableString{Value: sd.Name} - } - return &tracepb.Span{ - TraceId: sd.TraceID[:], - SpanId: sd.SpanID[:], - ParentSpanId: sd.ParentSpanID[:], - Status: ocStatusToProtoStatus(sd.Status), - StartTime: timeToTimestamp(sd.StartTime), - EndTime: timeToTimestamp(sd.EndTime), - Links: ocLinksToProtoLinks(sd.Links), - Kind: ocSpanKindToProtoSpanKind(sd.SpanKind), - Name: namePtr, - Attributes: ocAttributesToProtoAttributes(sd.Attributes), - Tracestate: ocTracestateToProtoTracestate(sd.Tracestate), - } -} - -var blankStatus trace.Status - -func ocStatusToProtoStatus(status trace.Status) *tracepb.Status { - if status == blankStatus { - return nil - } - return &tracepb.Status{ - Code: status.Code, - Message: status.Message, - } -} - -func ocLinksToProtoLinks(links []trace.Link) *tracepb.Span_Links { - if len(links) == 0 { - return nil - } - - sl := make([]*tracepb.Span_Link, 0, len(links)) - for _, ocLink := range links { - // This redefinition is necessary to prevent ocLink.*ID[:] copies - // being reused -- in short we need a new ocLink per iteration. - ocLink := ocLink - - sl = append(sl, &tracepb.Span_Link{ - TraceId: ocLink.TraceID[:], - SpanId: ocLink.SpanID[:], - Type: ocLinkTypeToProtoLinkType(ocLink.Type), - }) - } - - return &tracepb.Span_Links{ - Link: sl, - } -} - -func ocLinkTypeToProtoLinkType(oct trace.LinkType) tracepb.Span_Link_Type { - switch oct { - case trace.LinkTypeChild: - return tracepb.Span_Link_CHILD_LINKED_SPAN - case trace.LinkTypeParent: - return tracepb.Span_Link_PARENT_LINKED_SPAN - default: - return tracepb.Span_Link_TYPE_UNSPECIFIED - } -} - -func ocAttributesToProtoAttributes(attrs map[string]interface{}) *tracepb.Span_Attributes { - if len(attrs) == 0 { - return nil - } - outMap := make(map[string]*tracepb.AttributeValue) - for k, v := range attrs { - switch v := v.(type) { - case bool: - outMap[k] = &tracepb.AttributeValue{Value: &tracepb.AttributeValue_BoolValue{BoolValue: v}} - - case int: - outMap[k] = &tracepb.AttributeValue{Value: &tracepb.AttributeValue_IntValue{IntValue: int64(v)}} - - case int64: - outMap[k] = &tracepb.AttributeValue{Value: &tracepb.AttributeValue_IntValue{IntValue: v}} - - case string: - outMap[k] = &tracepb.AttributeValue{ - Value: &tracepb.AttributeValue_StringValue{ - StringValue: &tracepb.TruncatableString{Value: v}, - }, - } - } - } - return &tracepb.Span_Attributes{ - AttributeMap: outMap, - } -} - -func timeToTimestamp(t time.Time) *timestamp.Timestamp { - nanoTime := t.UnixNano() - return ×tamp.Timestamp{ - Seconds: nanoTime / 1e9, - Nanos: int32(nanoTime % 1e9), - } -} - -func ocSpanKindToProtoSpanKind(kind int) tracepb.Span_SpanKind { - switch kind { - case trace.SpanKindClient: - return tracepb.Span_CLIENT - case trace.SpanKindServer: - return tracepb.Span_SERVER - default: - return tracepb.Span_SPAN_KIND_UNSPECIFIED - } -} - -func ocTracestateToProtoTracestate(ts *tracestate.Tracestate) *tracepb.Span_Tracestate { - if ts == nil { - return nil - } - return &tracepb.Span_Tracestate{ - Entries: ocTracestateEntriesToProtoTracestateEntries(ts.Entries()), - } -} - -func ocTracestateEntriesToProtoTracestateEntries(entries []tracestate.Entry) []*tracepb.Span_Tracestate_Entry { - protoEntries := make([]*tracepb.Span_Tracestate_Entry, 0, len(entries)) - for _, entry := range entries { - protoEntries = append(protoEntries, &tracepb.Span_Tracestate_Entry{ - Key: entry.Key, - Value: entry.Value, - }) - } - return protoEntries -} diff --git a/vendor/contrib.go.opencensus.io/exporter/ocagent/version.go b/vendor/contrib.go.opencensus.io/exporter/ocagent/version.go deleted file mode 100644 index 68be4c75bd..0000000000 --- a/vendor/contrib.go.opencensus.io/exporter/ocagent/version.go +++ /dev/null @@ -1,17 +0,0 @@ -// Copyright 2018, OpenCensus Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package ocagent - -const Version = "0.0.1" diff --git a/vendor/github.com/Azure/go-autorest/LICENSE b/vendor/github.com/Azure/go-autorest/LICENSE deleted file mode 100644 index b9d6a27ea9..0000000000 --- a/vendor/github.com/Azure/go-autorest/LICENSE +++ /dev/null @@ -1,191 +0,0 @@ - - Apache License - Version 2.0, January 2004 - http://www.apache.org/licenses/ - - TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - - 1. Definitions. - - "License" shall mean the terms and conditions for use, reproduction, - and distribution as defined by Sections 1 through 9 of this document. - - "Licensor" shall mean the copyright owner or entity authorized by - the copyright owner that is granting the License. - - "Legal Entity" shall mean the union of the acting entity and all - other entities that control, are controlled by, or are under common - control with that entity. For the purposes of this definition, - "control" means (i) the power, direct or indirect, to cause the - direction or management of such entity, whether by contract or - otherwise, or (ii) ownership of fifty percent (50%) or more of the - outstanding shares, or (iii) beneficial ownership of such entity. - - "You" (or "Your") shall mean an individual or Legal Entity - exercising permissions granted by this License. - - "Source" form shall mean the preferred form for making modifications, - including but not limited to software source code, documentation - source, and configuration files. - - "Object" form shall mean any form resulting from mechanical - transformation or translation of a Source form, including but - not limited to compiled object code, generated documentation, - and conversions to other media types. - - "Work" shall mean the work of authorship, whether in Source or - Object form, made available under the License, as indicated by a - copyright notice that is included in or attached to the work - (an example is provided in the Appendix below). - - "Derivative Works" shall mean any work, whether in Source or Object - form, that is based on (or derived from) the Work and for which the - editorial revisions, annotations, elaborations, or other modifications - represent, as a whole, an original work of authorship. For the purposes - of this License, Derivative Works shall not include works that remain - separable from, or merely link (or bind by name) to the interfaces of, - the Work and Derivative Works thereof. - - "Contribution" shall mean any work of authorship, including - the original version of the Work and any modifications or additions - to that Work or Derivative Works thereof, that is intentionally - submitted to Licensor for inclusion in the Work by the copyright owner - or by an individual or Legal Entity authorized to submit on behalf of - the copyright owner. For the purposes of this definition, "submitted" - means any form of electronic, verbal, or written communication sent - to the Licensor or its representatives, including but not limited to - communication on electronic mailing lists, source code control systems, - and issue tracking systems that are managed by, or on behalf of, the - Licensor for the purpose of discussing and improving the Work, but - excluding communication that is conspicuously marked or otherwise - designated in writing by the copyright owner as "Not a Contribution." - - "Contributor" shall mean Licensor and any individual or Legal Entity - on behalf of whom a Contribution has been received by Licensor and - subsequently incorporated within the Work. - - 2. Grant of Copyright License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - copyright license to reproduce, prepare Derivative Works of, - publicly display, publicly perform, sublicense, and distribute the - Work and such Derivative Works in Source or Object form. - - 3. Grant of Patent License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - (except as stated in this section) patent license to make, have made, - use, offer to sell, sell, import, and otherwise transfer the Work, - where such license applies only to those patent claims licensable - by such Contributor that are necessarily infringed by their - Contribution(s) alone or by combination of their Contribution(s) - with the Work to which such Contribution(s) was submitted. If You - institute patent litigation against any entity (including a - cross-claim or counterclaim in a lawsuit) alleging that the Work - or a Contribution incorporated within the Work constitutes direct - or contributory patent infringement, then any patent licenses - granted to You under this License for that Work shall terminate - as of the date such litigation is filed. - - 4. Redistribution. You may reproduce and distribute copies of the - Work or Derivative Works thereof in any medium, with or without - modifications, and in Source or Object form, provided that You - meet the following conditions: - - (a) You must give any other recipients of the Work or - Derivative Works a copy of this License; and - - (b) You must cause any modified files to carry prominent notices - stating that You changed the files; and - - (c) You must retain, in the Source form of any Derivative Works - that You distribute, all copyright, patent, trademark, and - attribution notices from the Source form of the Work, - excluding those notices that do not pertain to any part of - the Derivative Works; and - - (d) If the Work includes a "NOTICE" text file as part of its - distribution, then any Derivative Works that You distribute must - include a readable copy of the attribution notices contained - within such NOTICE file, excluding those notices that do not - pertain to any part of the Derivative Works, in at least one - of the following places: within a NOTICE text file distributed - as part of the Derivative Works; within the Source form or - documentation, if provided along with the Derivative Works; or, - within a display generated by the Derivative Works, if and - wherever such third-party notices normally appear. The contents - of the NOTICE file are for informational purposes only and - do not modify the License. You may add Your own attribution - notices within Derivative Works that You distribute, alongside - or as an addendum to the NOTICE text from the Work, provided - that such additional attribution notices cannot be construed - as modifying the License. - - You may add Your own copyright statement to Your modifications and - may provide additional or different license terms and conditions - for use, reproduction, or distribution of Your modifications, or - for any such Derivative Works as a whole, provided Your use, - reproduction, and distribution of the Work otherwise complies with - the conditions stated in this License. - - 5. Submission of Contributions. Unless You explicitly state otherwise, - any Contribution intentionally submitted for inclusion in the Work - by You to the Licensor shall be under the terms and conditions of - this License, without any additional terms or conditions. - Notwithstanding the above, nothing herein shall supersede or modify - the terms of any separate license agreement you may have executed - with Licensor regarding such Contributions. - - 6. Trademarks. This License does not grant permission to use the trade - names, trademarks, service marks, or product names of the Licensor, - except as required for reasonable and customary use in describing the - origin of the Work and reproducing the content of the NOTICE file. - - 7. Disclaimer of Warranty. Unless required by applicable law or - agreed to in writing, Licensor provides the Work (and each - Contributor provides its Contributions) on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or - implied, including, without limitation, any warranties or conditions - of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A - PARTICULAR PURPOSE. You are solely responsible for determining the - appropriateness of using or redistributing the Work and assume any - risks associated with Your exercise of permissions under this License. - - 8. Limitation of Liability. In no event and under no legal theory, - whether in tort (including negligence), contract, or otherwise, - unless required by applicable law (such as deliberate and grossly - negligent acts) or agreed to in writing, shall any Contributor be - liable to You for damages, including any direct, indirect, special, - incidental, or consequential damages of any character arising as a - result of this License or out of the use or inability to use the - Work (including but not limited to damages for loss of goodwill, - work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses), even if such Contributor - has been advised of the possibility of such damages. - - 9. Accepting Warranty or Additional Liability. While redistributing - the Work or Derivative Works thereof, You may choose to offer, - and charge a fee for, acceptance of support, warranty, indemnity, - or other liability obligations and/or rights consistent with this - License. However, in accepting such obligations, You may act only - on Your own behalf and on Your sole responsibility, not on behalf - of any other Contributor, and only if You agree to indemnify, - defend, and hold each Contributor harmless for any liability - incurred by, or claims asserted against, such Contributor by reason - of your accepting any such warranty or additional liability. - - END OF TERMS AND CONDITIONS - - Copyright 2015 Microsoft Corporation - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. diff --git a/vendor/github.com/Azure/go-autorest/autorest/adal/README.md b/vendor/github.com/Azure/go-autorest/autorest/adal/README.md deleted file mode 100644 index 7b0c4bc4d2..0000000000 --- a/vendor/github.com/Azure/go-autorest/autorest/adal/README.md +++ /dev/null @@ -1,292 +0,0 @@ -# Azure Active Directory authentication for Go - -This is a standalone package for authenticating with Azure Active -Directory from other Go libraries and applications, in particular the [Azure SDK -for Go](https://github.com/Azure/azure-sdk-for-go). - -Note: Despite the package's name it is not related to other "ADAL" libraries -maintained in the [github.com/AzureAD](https://github.com/AzureAD) org. Issues -should be opened in [this repo's](https://github.com/Azure/go-autorest/issues) -or [the SDK's](https://github.com/Azure/azure-sdk-for-go/issues) issue -trackers. - -## Install - -```bash -go get -u github.com/Azure/go-autorest/autorest/adal -``` - -## Usage - -An Active Directory application is required in order to use this library. An application can be registered in the [Azure Portal](https://portal.azure.com/) by following these [guidelines](https://docs.microsoft.com/en-us/azure/active-directory/develop/active-directory-integrating-applications) or using the [Azure CLI](https://github.com/Azure/azure-cli). - -### Register an Azure AD Application with secret - - -1. Register a new application with a `secret` credential - - ``` - az ad app create \ - --display-name example-app \ - --homepage https://example-app/home \ - --identifier-uris https://example-app/app \ - --password secret - ``` - -2. Create a service principal using the `Application ID` from previous step - - ``` - az ad sp create --id "Application ID" - ``` - - * Replace `Application ID` with `appId` from step 1. - -### Register an Azure AD Application with certificate - -1. Create a private key - - ``` - openssl genrsa -out "example-app.key" 2048 - ``` - -2. Create the certificate - - ``` - openssl req -new -key "example-app.key" -subj "/CN=example-app" -out "example-app.csr" - openssl x509 -req -in "example-app.csr" -signkey "example-app.key" -out "example-app.crt" -days 10000 - ``` - -3. Create the PKCS12 version of the certificate containing also the private key - - ``` - openssl pkcs12 -export -out "example-app.pfx" -inkey "example-app.key" -in "example-app.crt" -passout pass: - - ``` - -4. Register a new application with the certificate content form `example-app.crt` - - ``` - certificateContents="$(tail -n+2 "example-app.crt" | head -n-1)" - - az ad app create \ - --display-name example-app \ - --homepage https://example-app/home \ - --identifier-uris https://example-app/app \ - --key-usage Verify --end-date 2018-01-01 \ - --key-value "${certificateContents}" - ``` - -5. Create a service principal using the `Application ID` from previous step - - ``` - az ad sp create --id "APPLICATION_ID" - ``` - - * Replace `APPLICATION_ID` with `appId` from step 4. - - -### Grant the necessary permissions - -Azure relies on a Role-Based Access Control (RBAC) model to manage the access to resources at a fine-grained -level. There is a set of [pre-defined roles](https://docs.microsoft.com/en-us/azure/active-directory/role-based-access-built-in-roles) -which can be assigned to a service principal of an Azure AD application depending of your needs. - -``` -az role assignment create --assigner "SERVICE_PRINCIPAL_ID" --role "ROLE_NAME" -``` - -* Replace the `SERVICE_PRINCIPAL_ID` with the `appId` from previous step. -* Replace the `ROLE_NAME` with a role name of your choice. - -It is also possible to define custom role definitions. - -``` -az role definition create --role-definition role-definition.json -``` - -* Check [custom roles](https://docs.microsoft.com/en-us/azure/active-directory/role-based-access-control-custom-roles) for more details regarding the content of `role-definition.json` file. - - -### Acquire Access Token - -The common configuration used by all flows: - -```Go -const activeDirectoryEndpoint = "https://login.microsoftonline.com/" -tenantID := "TENANT_ID" -oauthConfig, err := adal.NewOAuthConfig(activeDirectoryEndpoint, tenantID) - -applicationID := "APPLICATION_ID" - -callback := func(token adal.Token) error { - // This is called after the token is acquired -} - -// The resource for which the token is acquired -resource := "https://management.core.windows.net/" -``` - -* Replace the `TENANT_ID` with your tenant ID. -* Replace the `APPLICATION_ID` with the value from previous section. - -#### Client Credentials - -```Go -applicationSecret := "APPLICATION_SECRET" - -spt, err := adal.NewServicePrincipalToken( - oauthConfig, - appliationID, - applicationSecret, - resource, - callbacks...) -if err != nil { - return nil, err -} - -// Acquire a new access token -err = spt.Refresh() -if (err == nil) { - token := spt.Token -} -``` - -* Replace the `APPLICATION_SECRET` with the `password` value from previous section. - -#### Client Certificate - -```Go -certificatePath := "./example-app.pfx" - -certData, err := ioutil.ReadFile(certificatePath) -if err != nil { - return nil, fmt.Errorf("failed to read the certificate file (%s): %v", certificatePath, err) -} - -// Get the certificate and private key from pfx file -certificate, rsaPrivateKey, err := decodePkcs12(certData, "") -if err != nil { - return nil, fmt.Errorf("failed to decode pkcs12 certificate while creating spt: %v", err) -} - -spt, err := adal.NewServicePrincipalTokenFromCertificate( - oauthConfig, - applicationID, - certificate, - rsaPrivateKey, - resource, - callbacks...) - -// Acquire a new access token -err = spt.Refresh() -if (err == nil) { - token := spt.Token -} -``` - -* Update the certificate path to point to the example-app.pfx file which was created in previous section. - - -#### Device Code - -```Go -oauthClient := &http.Client{} - -// Acquire the device code -deviceCode, err := adal.InitiateDeviceAuth( - oauthClient, - oauthConfig, - applicationID, - resource) -if err != nil { - return nil, fmt.Errorf("Failed to start device auth flow: %s", err) -} - -// Display the authentication message -fmt.Println(*deviceCode.Message) - -// Wait here until the user is authenticated -token, err := adal.WaitForUserCompletion(oauthClient, deviceCode) -if err != nil { - return nil, fmt.Errorf("Failed to finish device auth flow: %s", err) -} - -spt, err := adal.NewServicePrincipalTokenFromManualToken( - oauthConfig, - applicationID, - resource, - *token, - callbacks...) - -if (err == nil) { - token := spt.Token -} -``` - -#### Username password authenticate - -```Go -spt, err := adal.NewServicePrincipalTokenFromUsernamePassword( - oauthConfig, - applicationID, - username, - password, - resource, - callbacks...) - -if (err == nil) { - token := spt.Token -} -``` - -#### Authorization code authenticate - -``` Go -spt, err := adal.NewServicePrincipalTokenFromAuthorizationCode( - oauthConfig, - applicationID, - clientSecret, - authorizationCode, - redirectURI, - resource, - callbacks...) - -err = spt.Refresh() -if (err == nil) { - token := spt.Token -} -``` - -### Command Line Tool - -A command line tool is available in `cmd/adal.go` that can acquire a token for a given resource. It supports all flows mentioned above. - -``` -adal -h - -Usage of ./adal: - -applicationId string - application id - -certificatePath string - path to pk12/PFC application certificate - -mode string - authentication mode (device, secret, cert, refresh) (default "device") - -resource string - resource for which the token is requested - -secret string - application secret - -tenantId string - tenant id - -tokenCachePath string - location of oath token cache (default "/home/cgc/.adal/accessToken.json") -``` - -Example acquire a token for `https://management.core.windows.net/` using device code flow: - -``` -adal -mode device \ - -applicationId "APPLICATION_ID" \ - -tenantId "TENANT_ID" \ - -resource https://management.core.windows.net/ - -``` diff --git a/vendor/github.com/Azure/go-autorest/autorest/adal/config.go b/vendor/github.com/Azure/go-autorest/autorest/adal/config.go deleted file mode 100644 index 8c83a917ff..0000000000 --- a/vendor/github.com/Azure/go-autorest/autorest/adal/config.go +++ /dev/null @@ -1,91 +0,0 @@ -package adal - -// Copyright 2017 Microsoft Corporation -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -import ( - "fmt" - "net/url" -) - -// OAuthConfig represents the endpoints needed -// in OAuth operations -type OAuthConfig struct { - AuthorityEndpoint url.URL `json:"authorityEndpoint"` - AuthorizeEndpoint url.URL `json:"authorizeEndpoint"` - TokenEndpoint url.URL `json:"tokenEndpoint"` - DeviceCodeEndpoint url.URL `json:"deviceCodeEndpoint"` -} - -// IsZero returns true if the OAuthConfig object is zero-initialized. -func (oac OAuthConfig) IsZero() bool { - return oac == OAuthConfig{} -} - -func validateStringParam(param, name string) error { - if len(param) == 0 { - return fmt.Errorf("parameter '" + name + "' cannot be empty") - } - return nil -} - -// NewOAuthConfig returns an OAuthConfig with tenant specific urls -func NewOAuthConfig(activeDirectoryEndpoint, tenantID string) (*OAuthConfig, error) { - apiVer := "1.0" - return NewOAuthConfigWithAPIVersion(activeDirectoryEndpoint, tenantID, &apiVer) -} - -// NewOAuthConfigWithAPIVersion returns an OAuthConfig with tenant specific urls. -// If apiVersion is not nil the "api-version" query parameter will be appended to the endpoint URLs with the specified value. -func NewOAuthConfigWithAPIVersion(activeDirectoryEndpoint, tenantID string, apiVersion *string) (*OAuthConfig, error) { - if err := validateStringParam(activeDirectoryEndpoint, "activeDirectoryEndpoint"); err != nil { - return nil, err - } - api := "" - // it's legal for tenantID to be empty so don't validate it - if apiVersion != nil { - if err := validateStringParam(*apiVersion, "apiVersion"); err != nil { - return nil, err - } - api = fmt.Sprintf("?api-version=%s", *apiVersion) - } - const activeDirectoryEndpointTemplate = "%s/oauth2/%s%s" - u, err := url.Parse(activeDirectoryEndpoint) - if err != nil { - return nil, err - } - authorityURL, err := u.Parse(tenantID) - if err != nil { - return nil, err - } - authorizeURL, err := u.Parse(fmt.Sprintf(activeDirectoryEndpointTemplate, tenantID, "authorize", api)) - if err != nil { - return nil, err - } - tokenURL, err := u.Parse(fmt.Sprintf(activeDirectoryEndpointTemplate, tenantID, "token", api)) - if err != nil { - return nil, err - } - deviceCodeURL, err := u.Parse(fmt.Sprintf(activeDirectoryEndpointTemplate, tenantID, "devicecode", api)) - if err != nil { - return nil, err - } - - return &OAuthConfig{ - AuthorityEndpoint: *authorityURL, - AuthorizeEndpoint: *authorizeURL, - TokenEndpoint: *tokenURL, - DeviceCodeEndpoint: *deviceCodeURL, - }, nil -} diff --git a/vendor/github.com/Azure/go-autorest/autorest/adal/devicetoken.go b/vendor/github.com/Azure/go-autorest/autorest/adal/devicetoken.go deleted file mode 100644 index b38f4c2458..0000000000 --- a/vendor/github.com/Azure/go-autorest/autorest/adal/devicetoken.go +++ /dev/null @@ -1,242 +0,0 @@ -package adal - -// Copyright 2017 Microsoft Corporation -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -/* - This file is largely based on rjw57/oauth2device's code, with the follow differences: - * scope -> resource, and only allow a single one - * receive "Message" in the DeviceCode struct and show it to users as the prompt - * azure-xplat-cli has the following behavior that this emulates: - - does not send client_secret during the token exchange - - sends resource again in the token exchange request -*/ - -import ( - "encoding/json" - "fmt" - "io/ioutil" - "net/http" - "net/url" - "strings" - "time" -) - -const ( - logPrefix = "autorest/adal/devicetoken:" -) - -var ( - // ErrDeviceGeneric represents an unknown error from the token endpoint when using device flow - ErrDeviceGeneric = fmt.Errorf("%s Error while retrieving OAuth token: Unknown Error", logPrefix) - - // ErrDeviceAccessDenied represents an access denied error from the token endpoint when using device flow - ErrDeviceAccessDenied = fmt.Errorf("%s Error while retrieving OAuth token: Access Denied", logPrefix) - - // ErrDeviceAuthorizationPending represents the server waiting on the user to complete the device flow - ErrDeviceAuthorizationPending = fmt.Errorf("%s Error while retrieving OAuth token: Authorization Pending", logPrefix) - - // ErrDeviceCodeExpired represents the server timing out and expiring the code during device flow - ErrDeviceCodeExpired = fmt.Errorf("%s Error while retrieving OAuth token: Code Expired", logPrefix) - - // ErrDeviceSlowDown represents the service telling us we're polling too often during device flow - ErrDeviceSlowDown = fmt.Errorf("%s Error while retrieving OAuth token: Slow Down", logPrefix) - - // ErrDeviceCodeEmpty represents an empty device code from the device endpoint while using device flow - ErrDeviceCodeEmpty = fmt.Errorf("%s Error while retrieving device code: Device Code Empty", logPrefix) - - // ErrOAuthTokenEmpty represents an empty OAuth token from the token endpoint when using device flow - ErrOAuthTokenEmpty = fmt.Errorf("%s Error while retrieving OAuth token: Token Empty", logPrefix) - - errCodeSendingFails = "Error occurred while sending request for Device Authorization Code" - errCodeHandlingFails = "Error occurred while handling response from the Device Endpoint" - errTokenSendingFails = "Error occurred while sending request with device code for a token" - errTokenHandlingFails = "Error occurred while handling response from the Token Endpoint (during device flow)" - errStatusNotOK = "Error HTTP status != 200" -) - -// DeviceCode is the object returned by the device auth endpoint -// It contains information to instruct the user to complete the auth flow -type DeviceCode struct { - DeviceCode *string `json:"device_code,omitempty"` - UserCode *string `json:"user_code,omitempty"` - VerificationURL *string `json:"verification_url,omitempty"` - ExpiresIn *int64 `json:"expires_in,string,omitempty"` - Interval *int64 `json:"interval,string,omitempty"` - - Message *string `json:"message"` // Azure specific - Resource string // store the following, stored when initiating, used when exchanging - OAuthConfig OAuthConfig - ClientID string -} - -// TokenError is the object returned by the token exchange endpoint -// when something is amiss -type TokenError struct { - Error *string `json:"error,omitempty"` - ErrorCodes []int `json:"error_codes,omitempty"` - ErrorDescription *string `json:"error_description,omitempty"` - Timestamp *string `json:"timestamp,omitempty"` - TraceID *string `json:"trace_id,omitempty"` -} - -// DeviceToken is the object return by the token exchange endpoint -// It can either look like a Token or an ErrorToken, so put both here -// and check for presence of "Error" to know if we are in error state -type deviceToken struct { - Token - TokenError -} - -// InitiateDeviceAuth initiates a device auth flow. It returns a DeviceCode -// that can be used with CheckForUserCompletion or WaitForUserCompletion. -func InitiateDeviceAuth(sender Sender, oauthConfig OAuthConfig, clientID, resource string) (*DeviceCode, error) { - v := url.Values{ - "client_id": []string{clientID}, - "resource": []string{resource}, - } - - s := v.Encode() - body := ioutil.NopCloser(strings.NewReader(s)) - - req, err := http.NewRequest(http.MethodPost, oauthConfig.DeviceCodeEndpoint.String(), body) - if err != nil { - return nil, fmt.Errorf("%s %s: %s", logPrefix, errCodeSendingFails, err.Error()) - } - - req.ContentLength = int64(len(s)) - req.Header.Set(contentType, mimeTypeFormPost) - resp, err := sender.Do(req) - if err != nil { - return nil, fmt.Errorf("%s %s: %s", logPrefix, errCodeSendingFails, err.Error()) - } - defer resp.Body.Close() - - rb, err := ioutil.ReadAll(resp.Body) - if err != nil { - return nil, fmt.Errorf("%s %s: %s", logPrefix, errCodeHandlingFails, err.Error()) - } - - if resp.StatusCode != http.StatusOK { - return nil, fmt.Errorf("%s %s: %s", logPrefix, errCodeHandlingFails, errStatusNotOK) - } - - if len(strings.Trim(string(rb), " ")) == 0 { - return nil, ErrDeviceCodeEmpty - } - - var code DeviceCode - err = json.Unmarshal(rb, &code) - if err != nil { - return nil, fmt.Errorf("%s %s: %s", logPrefix, errCodeHandlingFails, err.Error()) - } - - code.ClientID = clientID - code.Resource = resource - code.OAuthConfig = oauthConfig - - return &code, nil -} - -// CheckForUserCompletion takes a DeviceCode and checks with the Azure AD OAuth endpoint -// to see if the device flow has: been completed, timed out, or otherwise failed -func CheckForUserCompletion(sender Sender, code *DeviceCode) (*Token, error) { - v := url.Values{ - "client_id": []string{code.ClientID}, - "code": []string{*code.DeviceCode}, - "grant_type": []string{OAuthGrantTypeDeviceCode}, - "resource": []string{code.Resource}, - } - - s := v.Encode() - body := ioutil.NopCloser(strings.NewReader(s)) - - req, err := http.NewRequest(http.MethodPost, code.OAuthConfig.TokenEndpoint.String(), body) - if err != nil { - return nil, fmt.Errorf("%s %s: %s", logPrefix, errTokenSendingFails, err.Error()) - } - - req.ContentLength = int64(len(s)) - req.Header.Set(contentType, mimeTypeFormPost) - resp, err := sender.Do(req) - if err != nil { - return nil, fmt.Errorf("%s %s: %s", logPrefix, errTokenSendingFails, err.Error()) - } - defer resp.Body.Close() - - rb, err := ioutil.ReadAll(resp.Body) - if err != nil { - return nil, fmt.Errorf("%s %s: %s", logPrefix, errTokenHandlingFails, err.Error()) - } - - if resp.StatusCode != http.StatusOK && len(strings.Trim(string(rb), " ")) == 0 { - return nil, fmt.Errorf("%s %s: %s", logPrefix, errTokenHandlingFails, errStatusNotOK) - } - if len(strings.Trim(string(rb), " ")) == 0 { - return nil, ErrOAuthTokenEmpty - } - - var token deviceToken - err = json.Unmarshal(rb, &token) - if err != nil { - return nil, fmt.Errorf("%s %s: %s", logPrefix, errTokenHandlingFails, err.Error()) - } - - if token.Error == nil { - return &token.Token, nil - } - - switch *token.Error { - case "authorization_pending": - return nil, ErrDeviceAuthorizationPending - case "slow_down": - return nil, ErrDeviceSlowDown - case "access_denied": - return nil, ErrDeviceAccessDenied - case "code_expired": - return nil, ErrDeviceCodeExpired - default: - return nil, ErrDeviceGeneric - } -} - -// WaitForUserCompletion calls CheckForUserCompletion repeatedly until a token is granted or an error state occurs. -// This prevents the user from looping and checking against 'ErrDeviceAuthorizationPending'. -func WaitForUserCompletion(sender Sender, code *DeviceCode) (*Token, error) { - intervalDuration := time.Duration(*code.Interval) * time.Second - waitDuration := intervalDuration - - for { - token, err := CheckForUserCompletion(sender, code) - - if err == nil { - return token, nil - } - - switch err { - case ErrDeviceSlowDown: - waitDuration += waitDuration - case ErrDeviceAuthorizationPending: - // noop - default: // everything else is "fatal" to us - return nil, err - } - - if waitDuration > (intervalDuration * 3) { - return nil, fmt.Errorf("%s Error waiting for user to complete device flow. Server told us to slow_down too much", logPrefix) - } - - time.Sleep(waitDuration) - } -} diff --git a/vendor/github.com/Azure/go-autorest/autorest/adal/persist.go b/vendor/github.com/Azure/go-autorest/autorest/adal/persist.go deleted file mode 100644 index 9e15f2751f..0000000000 --- a/vendor/github.com/Azure/go-autorest/autorest/adal/persist.go +++ /dev/null @@ -1,73 +0,0 @@ -package adal - -// Copyright 2017 Microsoft Corporation -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -import ( - "encoding/json" - "fmt" - "io/ioutil" - "os" - "path/filepath" -) - -// LoadToken restores a Token object from a file located at 'path'. -func LoadToken(path string) (*Token, error) { - file, err := os.Open(path) - if err != nil { - return nil, fmt.Errorf("failed to open file (%s) while loading token: %v", path, err) - } - defer file.Close() - - var token Token - - dec := json.NewDecoder(file) - if err = dec.Decode(&token); err != nil { - return nil, fmt.Errorf("failed to decode contents of file (%s) into Token representation: %v", path, err) - } - return &token, nil -} - -// SaveToken persists an oauth token at the given location on disk. -// It moves the new file into place so it can safely be used to replace an existing file -// that maybe accessed by multiple processes. -func SaveToken(path string, mode os.FileMode, token Token) error { - dir := filepath.Dir(path) - err := os.MkdirAll(dir, os.ModePerm) - if err != nil { - return fmt.Errorf("failed to create directory (%s) to store token in: %v", dir, err) - } - - newFile, err := ioutil.TempFile(dir, "token") - if err != nil { - return fmt.Errorf("failed to create the temp file to write the token: %v", err) - } - tempPath := newFile.Name() - - if err := json.NewEncoder(newFile).Encode(token); err != nil { - return fmt.Errorf("failed to encode token to file (%s) while saving token: %v", tempPath, err) - } - if err := newFile.Close(); err != nil { - return fmt.Errorf("failed to close temp file %s: %v", tempPath, err) - } - - // Atomic replace to avoid multi-writer file corruptions - if err := os.Rename(tempPath, path); err != nil { - return fmt.Errorf("failed to move temporary token to desired output location. src=%s dst=%s: %v", tempPath, path, err) - } - if err := os.Chmod(path, mode); err != nil { - return fmt.Errorf("failed to chmod the token file %s: %v", path, err) - } - return nil -} diff --git a/vendor/github.com/Azure/go-autorest/autorest/adal/sender.go b/vendor/github.com/Azure/go-autorest/autorest/adal/sender.go deleted file mode 100644 index 834401e00d..0000000000 --- a/vendor/github.com/Azure/go-autorest/autorest/adal/sender.go +++ /dev/null @@ -1,60 +0,0 @@ -package adal - -// Copyright 2017 Microsoft Corporation -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -import ( - "net/http" -) - -const ( - contentType = "Content-Type" - mimeTypeFormPost = "application/x-www-form-urlencoded" -) - -// Sender is the interface that wraps the Do method to send HTTP requests. -// -// The standard http.Client conforms to this interface. -type Sender interface { - Do(*http.Request) (*http.Response, error) -} - -// SenderFunc is a method that implements the Sender interface. -type SenderFunc func(*http.Request) (*http.Response, error) - -// Do implements the Sender interface on SenderFunc. -func (sf SenderFunc) Do(r *http.Request) (*http.Response, error) { - return sf(r) -} - -// SendDecorator takes and possibly decorates, by wrapping, a Sender. Decorators may affect the -// http.Request and pass it along or, first, pass the http.Request along then react to the -// http.Response result. -type SendDecorator func(Sender) Sender - -// CreateSender creates, decorates, and returns, as a Sender, the default http.Client. -func CreateSender(decorators ...SendDecorator) Sender { - return DecorateSender(&http.Client{}, decorators...) -} - -// DecorateSender accepts a Sender and a, possibly empty, set of SendDecorators, which is applies to -// the Sender. Decorators are applied in the order received, but their affect upon the request -// depends on whether they are a pre-decorator (change the http.Request and then pass it along) or a -// post-decorator (pass the http.Request along and react to the results in http.Response). -func DecorateSender(s Sender, decorators ...SendDecorator) Sender { - for _, decorate := range decorators { - s = decorate(s) - } - return s -} diff --git a/vendor/github.com/Azure/go-autorest/autorest/adal/token.go b/vendor/github.com/Azure/go-autorest/autorest/adal/token.go deleted file mode 100644 index effa87ab2f..0000000000 --- a/vendor/github.com/Azure/go-autorest/autorest/adal/token.go +++ /dev/null @@ -1,985 +0,0 @@ -package adal - -// Copyright 2017 Microsoft Corporation -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -import ( - "context" - "crypto/rand" - "crypto/rsa" - "crypto/sha1" - "crypto/x509" - "encoding/base64" - "encoding/json" - "errors" - "fmt" - "io/ioutil" - "math" - "net" - "net/http" - "net/url" - "strings" - "sync" - "time" - - "github.com/Azure/go-autorest/autorest/date" - "github.com/Azure/go-autorest/tracing" - "github.com/dgrijalva/jwt-go" -) - -const ( - defaultRefresh = 5 * time.Minute - - // OAuthGrantTypeDeviceCode is the "grant_type" identifier used in device flow - OAuthGrantTypeDeviceCode = "device_code" - - // OAuthGrantTypeClientCredentials is the "grant_type" identifier used in credential flows - OAuthGrantTypeClientCredentials = "client_credentials" - - // OAuthGrantTypeUserPass is the "grant_type" identifier used in username and password auth flows - OAuthGrantTypeUserPass = "password" - - // OAuthGrantTypeRefreshToken is the "grant_type" identifier used in refresh token flows - OAuthGrantTypeRefreshToken = "refresh_token" - - // OAuthGrantTypeAuthorizationCode is the "grant_type" identifier used in authorization code flows - OAuthGrantTypeAuthorizationCode = "authorization_code" - - // metadataHeader is the header required by MSI extension - metadataHeader = "Metadata" - - // msiEndpoint is the well known endpoint for getting MSI authentications tokens - msiEndpoint = "http://169.254.169.254/metadata/identity/oauth2/token" - - // the default number of attempts to refresh an MSI authentication token - defaultMaxMSIRefreshAttempts = 5 -) - -// OAuthTokenProvider is an interface which should be implemented by an access token retriever -type OAuthTokenProvider interface { - OAuthToken() string -} - -// TokenRefreshError is an interface used by errors returned during token refresh. -type TokenRefreshError interface { - error - Response() *http.Response -} - -// Refresher is an interface for token refresh functionality -type Refresher interface { - Refresh() error - RefreshExchange(resource string) error - EnsureFresh() error -} - -// RefresherWithContext is an interface for token refresh functionality -type RefresherWithContext interface { - RefreshWithContext(ctx context.Context) error - RefreshExchangeWithContext(ctx context.Context, resource string) error - EnsureFreshWithContext(ctx context.Context) error -} - -// TokenRefreshCallback is the type representing callbacks that will be called after -// a successful token refresh -type TokenRefreshCallback func(Token) error - -// Token encapsulates the access token used to authorize Azure requests. -// https://docs.microsoft.com/en-us/azure/active-directory/develop/v1-oauth2-client-creds-grant-flow#service-to-service-access-token-response -type Token struct { - AccessToken string `json:"access_token"` - RefreshToken string `json:"refresh_token"` - - ExpiresIn json.Number `json:"expires_in"` - ExpiresOn json.Number `json:"expires_on"` - NotBefore json.Number `json:"not_before"` - - Resource string `json:"resource"` - Type string `json:"token_type"` -} - -func newToken() Token { - return Token{ - ExpiresIn: "0", - ExpiresOn: "0", - NotBefore: "0", - } -} - -// IsZero returns true if the token object is zero-initialized. -func (t Token) IsZero() bool { - return t == Token{} -} - -// Expires returns the time.Time when the Token expires. -func (t Token) Expires() time.Time { - s, err := t.ExpiresOn.Float64() - if err != nil { - s = -3600 - } - - expiration := date.NewUnixTimeFromSeconds(s) - - return time.Time(expiration).UTC() -} - -// IsExpired returns true if the Token is expired, false otherwise. -func (t Token) IsExpired() bool { - return t.WillExpireIn(0) -} - -// WillExpireIn returns true if the Token will expire after the passed time.Duration interval -// from now, false otherwise. -func (t Token) WillExpireIn(d time.Duration) bool { - return !t.Expires().After(time.Now().Add(d)) -} - -//OAuthToken return the current access token -func (t *Token) OAuthToken() string { - return t.AccessToken -} - -// ServicePrincipalSecret is an interface that allows various secret mechanism to fill the form -// that is submitted when acquiring an oAuth token. -type ServicePrincipalSecret interface { - SetAuthenticationValues(spt *ServicePrincipalToken, values *url.Values) error -} - -// ServicePrincipalNoSecret represents a secret type that contains no secret -// meaning it is not valid for fetching a fresh token. This is used by Manual -type ServicePrincipalNoSecret struct { -} - -// SetAuthenticationValues is a method of the interface ServicePrincipalSecret -// It only returns an error for the ServicePrincipalNoSecret type -func (noSecret *ServicePrincipalNoSecret) SetAuthenticationValues(spt *ServicePrincipalToken, v *url.Values) error { - return fmt.Errorf("Manually created ServicePrincipalToken does not contain secret material to retrieve a new access token") -} - -// MarshalJSON implements the json.Marshaler interface. -func (noSecret ServicePrincipalNoSecret) MarshalJSON() ([]byte, error) { - type tokenType struct { - Type string `json:"type"` - } - return json.Marshal(tokenType{ - Type: "ServicePrincipalNoSecret", - }) -} - -// ServicePrincipalTokenSecret implements ServicePrincipalSecret for client_secret type authorization. -type ServicePrincipalTokenSecret struct { - ClientSecret string `json:"value"` -} - -// SetAuthenticationValues is a method of the interface ServicePrincipalSecret. -// It will populate the form submitted during oAuth Token Acquisition using the client_secret. -func (tokenSecret *ServicePrincipalTokenSecret) SetAuthenticationValues(spt *ServicePrincipalToken, v *url.Values) error { - v.Set("client_secret", tokenSecret.ClientSecret) - return nil -} - -// MarshalJSON implements the json.Marshaler interface. -func (tokenSecret ServicePrincipalTokenSecret) MarshalJSON() ([]byte, error) { - type tokenType struct { - Type string `json:"type"` - Value string `json:"value"` - } - return json.Marshal(tokenType{ - Type: "ServicePrincipalTokenSecret", - Value: tokenSecret.ClientSecret, - }) -} - -// ServicePrincipalCertificateSecret implements ServicePrincipalSecret for generic RSA cert auth with signed JWTs. -type ServicePrincipalCertificateSecret struct { - Certificate *x509.Certificate - PrivateKey *rsa.PrivateKey -} - -// SignJwt returns the JWT signed with the certificate's private key. -func (secret *ServicePrincipalCertificateSecret) SignJwt(spt *ServicePrincipalToken) (string, error) { - hasher := sha1.New() - _, err := hasher.Write(secret.Certificate.Raw) - if err != nil { - return "", err - } - - thumbprint := base64.URLEncoding.EncodeToString(hasher.Sum(nil)) - - // The jti (JWT ID) claim provides a unique identifier for the JWT. - jti := make([]byte, 20) - _, err = rand.Read(jti) - if err != nil { - return "", err - } - - token := jwt.New(jwt.SigningMethodRS256) - token.Header["x5t"] = thumbprint - x5c := []string{base64.StdEncoding.EncodeToString(secret.Certificate.Raw)} - token.Header["x5c"] = x5c - token.Claims = jwt.MapClaims{ - "aud": spt.inner.OauthConfig.TokenEndpoint.String(), - "iss": spt.inner.ClientID, - "sub": spt.inner.ClientID, - "jti": base64.URLEncoding.EncodeToString(jti), - "nbf": time.Now().Unix(), - "exp": time.Now().Add(time.Hour * 24).Unix(), - } - - signedString, err := token.SignedString(secret.PrivateKey) - return signedString, err -} - -// SetAuthenticationValues is a method of the interface ServicePrincipalSecret. -// It will populate the form submitted during oAuth Token Acquisition using a JWT signed with a certificate. -func (secret *ServicePrincipalCertificateSecret) SetAuthenticationValues(spt *ServicePrincipalToken, v *url.Values) error { - jwt, err := secret.SignJwt(spt) - if err != nil { - return err - } - - v.Set("client_assertion", jwt) - v.Set("client_assertion_type", "urn:ietf:params:oauth:client-assertion-type:jwt-bearer") - return nil -} - -// MarshalJSON implements the json.Marshaler interface. -func (secret ServicePrincipalCertificateSecret) MarshalJSON() ([]byte, error) { - return nil, errors.New("marshalling ServicePrincipalCertificateSecret is not supported") -} - -// ServicePrincipalMSISecret implements ServicePrincipalSecret for machines running the MSI Extension. -type ServicePrincipalMSISecret struct { -} - -// SetAuthenticationValues is a method of the interface ServicePrincipalSecret. -func (msiSecret *ServicePrincipalMSISecret) SetAuthenticationValues(spt *ServicePrincipalToken, v *url.Values) error { - return nil -} - -// MarshalJSON implements the json.Marshaler interface. -func (msiSecret ServicePrincipalMSISecret) MarshalJSON() ([]byte, error) { - return nil, errors.New("marshalling ServicePrincipalMSISecret is not supported") -} - -// ServicePrincipalUsernamePasswordSecret implements ServicePrincipalSecret for username and password auth. -type ServicePrincipalUsernamePasswordSecret struct { - Username string `json:"username"` - Password string `json:"password"` -} - -// SetAuthenticationValues is a method of the interface ServicePrincipalSecret. -func (secret *ServicePrincipalUsernamePasswordSecret) SetAuthenticationValues(spt *ServicePrincipalToken, v *url.Values) error { - v.Set("username", secret.Username) - v.Set("password", secret.Password) - return nil -} - -// MarshalJSON implements the json.Marshaler interface. -func (secret ServicePrincipalUsernamePasswordSecret) MarshalJSON() ([]byte, error) { - type tokenType struct { - Type string `json:"type"` - Username string `json:"username"` - Password string `json:"password"` - } - return json.Marshal(tokenType{ - Type: "ServicePrincipalUsernamePasswordSecret", - Username: secret.Username, - Password: secret.Password, - }) -} - -// ServicePrincipalAuthorizationCodeSecret implements ServicePrincipalSecret for authorization code auth. -type ServicePrincipalAuthorizationCodeSecret struct { - ClientSecret string `json:"value"` - AuthorizationCode string `json:"authCode"` - RedirectURI string `json:"redirect"` -} - -// SetAuthenticationValues is a method of the interface ServicePrincipalSecret. -func (secret *ServicePrincipalAuthorizationCodeSecret) SetAuthenticationValues(spt *ServicePrincipalToken, v *url.Values) error { - v.Set("code", secret.AuthorizationCode) - v.Set("client_secret", secret.ClientSecret) - v.Set("redirect_uri", secret.RedirectURI) - return nil -} - -// MarshalJSON implements the json.Marshaler interface. -func (secret ServicePrincipalAuthorizationCodeSecret) MarshalJSON() ([]byte, error) { - type tokenType struct { - Type string `json:"type"` - Value string `json:"value"` - AuthCode string `json:"authCode"` - Redirect string `json:"redirect"` - } - return json.Marshal(tokenType{ - Type: "ServicePrincipalAuthorizationCodeSecret", - Value: secret.ClientSecret, - AuthCode: secret.AuthorizationCode, - Redirect: secret.RedirectURI, - }) -} - -// ServicePrincipalToken encapsulates a Token created for a Service Principal. -type ServicePrincipalToken struct { - inner servicePrincipalToken - refreshLock *sync.RWMutex - sender Sender - refreshCallbacks []TokenRefreshCallback - // MaxMSIRefreshAttempts is the maximum number of attempts to refresh an MSI token. - MaxMSIRefreshAttempts int -} - -// MarshalTokenJSON returns the marshalled inner token. -func (spt ServicePrincipalToken) MarshalTokenJSON() ([]byte, error) { - return json.Marshal(spt.inner.Token) -} - -// SetRefreshCallbacks replaces any existing refresh callbacks with the specified callbacks. -func (spt *ServicePrincipalToken) SetRefreshCallbacks(callbacks []TokenRefreshCallback) { - spt.refreshCallbacks = callbacks -} - -// MarshalJSON implements the json.Marshaler interface. -func (spt ServicePrincipalToken) MarshalJSON() ([]byte, error) { - return json.Marshal(spt.inner) -} - -// UnmarshalJSON implements the json.Unmarshaler interface. -func (spt *ServicePrincipalToken) UnmarshalJSON(data []byte) error { - // need to determine the token type - raw := map[string]interface{}{} - err := json.Unmarshal(data, &raw) - if err != nil { - return err - } - secret := raw["secret"].(map[string]interface{}) - switch secret["type"] { - case "ServicePrincipalNoSecret": - spt.inner.Secret = &ServicePrincipalNoSecret{} - case "ServicePrincipalTokenSecret": - spt.inner.Secret = &ServicePrincipalTokenSecret{} - case "ServicePrincipalCertificateSecret": - return errors.New("unmarshalling ServicePrincipalCertificateSecret is not supported") - case "ServicePrincipalMSISecret": - return errors.New("unmarshalling ServicePrincipalMSISecret is not supported") - case "ServicePrincipalUsernamePasswordSecret": - spt.inner.Secret = &ServicePrincipalUsernamePasswordSecret{} - case "ServicePrincipalAuthorizationCodeSecret": - spt.inner.Secret = &ServicePrincipalAuthorizationCodeSecret{} - default: - return fmt.Errorf("unrecognized token type '%s'", secret["type"]) - } - err = json.Unmarshal(data, &spt.inner) - if err != nil { - return err - } - // Don't override the refreshLock or the sender if those have been already set. - if spt.refreshLock == nil { - spt.refreshLock = &sync.RWMutex{} - } - if spt.sender == nil { - spt.sender = &http.Client{Transport: tracing.Transport} - } - return nil -} - -// internal type used for marshalling/unmarshalling -type servicePrincipalToken struct { - Token Token `json:"token"` - Secret ServicePrincipalSecret `json:"secret"` - OauthConfig OAuthConfig `json:"oauth"` - ClientID string `json:"clientID"` - Resource string `json:"resource"` - AutoRefresh bool `json:"autoRefresh"` - RefreshWithin time.Duration `json:"refreshWithin"` -} - -func validateOAuthConfig(oac OAuthConfig) error { - if oac.IsZero() { - return fmt.Errorf("parameter 'oauthConfig' cannot be zero-initialized") - } - return nil -} - -// NewServicePrincipalTokenWithSecret create a ServicePrincipalToken using the supplied ServicePrincipalSecret implementation. -func NewServicePrincipalTokenWithSecret(oauthConfig OAuthConfig, id string, resource string, secret ServicePrincipalSecret, callbacks ...TokenRefreshCallback) (*ServicePrincipalToken, error) { - if err := validateOAuthConfig(oauthConfig); err != nil { - return nil, err - } - if err := validateStringParam(id, "id"); err != nil { - return nil, err - } - if err := validateStringParam(resource, "resource"); err != nil { - return nil, err - } - if secret == nil { - return nil, fmt.Errorf("parameter 'secret' cannot be nil") - } - spt := &ServicePrincipalToken{ - inner: servicePrincipalToken{ - Token: newToken(), - OauthConfig: oauthConfig, - Secret: secret, - ClientID: id, - Resource: resource, - AutoRefresh: true, - RefreshWithin: defaultRefresh, - }, - refreshLock: &sync.RWMutex{}, - sender: &http.Client{Transport: tracing.Transport}, - refreshCallbacks: callbacks, - } - return spt, nil -} - -// NewServicePrincipalTokenFromManualToken creates a ServicePrincipalToken using the supplied token -func NewServicePrincipalTokenFromManualToken(oauthConfig OAuthConfig, clientID string, resource string, token Token, callbacks ...TokenRefreshCallback) (*ServicePrincipalToken, error) { - if err := validateOAuthConfig(oauthConfig); err != nil { - return nil, err - } - if err := validateStringParam(clientID, "clientID"); err != nil { - return nil, err - } - if err := validateStringParam(resource, "resource"); err != nil { - return nil, err - } - if token.IsZero() { - return nil, fmt.Errorf("parameter 'token' cannot be zero-initialized") - } - spt, err := NewServicePrincipalTokenWithSecret( - oauthConfig, - clientID, - resource, - &ServicePrincipalNoSecret{}, - callbacks...) - if err != nil { - return nil, err - } - - spt.inner.Token = token - - return spt, nil -} - -// NewServicePrincipalTokenFromManualTokenSecret creates a ServicePrincipalToken using the supplied token and secret -func NewServicePrincipalTokenFromManualTokenSecret(oauthConfig OAuthConfig, clientID string, resource string, token Token, secret ServicePrincipalSecret, callbacks ...TokenRefreshCallback) (*ServicePrincipalToken, error) { - if err := validateOAuthConfig(oauthConfig); err != nil { - return nil, err - } - if err := validateStringParam(clientID, "clientID"); err != nil { - return nil, err - } - if err := validateStringParam(resource, "resource"); err != nil { - return nil, err - } - if secret == nil { - return nil, fmt.Errorf("parameter 'secret' cannot be nil") - } - if token.IsZero() { - return nil, fmt.Errorf("parameter 'token' cannot be zero-initialized") - } - spt, err := NewServicePrincipalTokenWithSecret( - oauthConfig, - clientID, - resource, - secret, - callbacks...) - if err != nil { - return nil, err - } - - spt.inner.Token = token - - return spt, nil -} - -// NewServicePrincipalToken creates a ServicePrincipalToken from the supplied Service Principal -// credentials scoped to the named resource. -func NewServicePrincipalToken(oauthConfig OAuthConfig, clientID string, secret string, resource string, callbacks ...TokenRefreshCallback) (*ServicePrincipalToken, error) { - if err := validateOAuthConfig(oauthConfig); err != nil { - return nil, err - } - if err := validateStringParam(clientID, "clientID"); err != nil { - return nil, err - } - if err := validateStringParam(secret, "secret"); err != nil { - return nil, err - } - if err := validateStringParam(resource, "resource"); err != nil { - return nil, err - } - return NewServicePrincipalTokenWithSecret( - oauthConfig, - clientID, - resource, - &ServicePrincipalTokenSecret{ - ClientSecret: secret, - }, - callbacks..., - ) -} - -// NewServicePrincipalTokenFromCertificate creates a ServicePrincipalToken from the supplied pkcs12 bytes. -func NewServicePrincipalTokenFromCertificate(oauthConfig OAuthConfig, clientID string, certificate *x509.Certificate, privateKey *rsa.PrivateKey, resource string, callbacks ...TokenRefreshCallback) (*ServicePrincipalToken, error) { - if err := validateOAuthConfig(oauthConfig); err != nil { - return nil, err - } - if err := validateStringParam(clientID, "clientID"); err != nil { - return nil, err - } - if err := validateStringParam(resource, "resource"); err != nil { - return nil, err - } - if certificate == nil { - return nil, fmt.Errorf("parameter 'certificate' cannot be nil") - } - if privateKey == nil { - return nil, fmt.Errorf("parameter 'privateKey' cannot be nil") - } - return NewServicePrincipalTokenWithSecret( - oauthConfig, - clientID, - resource, - &ServicePrincipalCertificateSecret{ - PrivateKey: privateKey, - Certificate: certificate, - }, - callbacks..., - ) -} - -// NewServicePrincipalTokenFromUsernamePassword creates a ServicePrincipalToken from the username and password. -func NewServicePrincipalTokenFromUsernamePassword(oauthConfig OAuthConfig, clientID string, username string, password string, resource string, callbacks ...TokenRefreshCallback) (*ServicePrincipalToken, error) { - if err := validateOAuthConfig(oauthConfig); err != nil { - return nil, err - } - if err := validateStringParam(clientID, "clientID"); err != nil { - return nil, err - } - if err := validateStringParam(username, "username"); err != nil { - return nil, err - } - if err := validateStringParam(password, "password"); err != nil { - return nil, err - } - if err := validateStringParam(resource, "resource"); err != nil { - return nil, err - } - return NewServicePrincipalTokenWithSecret( - oauthConfig, - clientID, - resource, - &ServicePrincipalUsernamePasswordSecret{ - Username: username, - Password: password, - }, - callbacks..., - ) -} - -// NewServicePrincipalTokenFromAuthorizationCode creates a ServicePrincipalToken from the -func NewServicePrincipalTokenFromAuthorizationCode(oauthConfig OAuthConfig, clientID string, clientSecret string, authorizationCode string, redirectURI string, resource string, callbacks ...TokenRefreshCallback) (*ServicePrincipalToken, error) { - - if err := validateOAuthConfig(oauthConfig); err != nil { - return nil, err - } - if err := validateStringParam(clientID, "clientID"); err != nil { - return nil, err - } - if err := validateStringParam(clientSecret, "clientSecret"); err != nil { - return nil, err - } - if err := validateStringParam(authorizationCode, "authorizationCode"); err != nil { - return nil, err - } - if err := validateStringParam(redirectURI, "redirectURI"); err != nil { - return nil, err - } - if err := validateStringParam(resource, "resource"); err != nil { - return nil, err - } - - return NewServicePrincipalTokenWithSecret( - oauthConfig, - clientID, - resource, - &ServicePrincipalAuthorizationCodeSecret{ - ClientSecret: clientSecret, - AuthorizationCode: authorizationCode, - RedirectURI: redirectURI, - }, - callbacks..., - ) -} - -// GetMSIVMEndpoint gets the MSI endpoint on Virtual Machines. -func GetMSIVMEndpoint() (string, error) { - return msiEndpoint, nil -} - -// NewServicePrincipalTokenFromMSI creates a ServicePrincipalToken via the MSI VM Extension. -// It will use the system assigned identity when creating the token. -func NewServicePrincipalTokenFromMSI(msiEndpoint, resource string, callbacks ...TokenRefreshCallback) (*ServicePrincipalToken, error) { - return newServicePrincipalTokenFromMSI(msiEndpoint, resource, nil, callbacks...) -} - -// NewServicePrincipalTokenFromMSIWithUserAssignedID creates a ServicePrincipalToken via the MSI VM Extension. -// It will use the specified user assigned identity when creating the token. -func NewServicePrincipalTokenFromMSIWithUserAssignedID(msiEndpoint, resource string, userAssignedID string, callbacks ...TokenRefreshCallback) (*ServicePrincipalToken, error) { - return newServicePrincipalTokenFromMSI(msiEndpoint, resource, &userAssignedID, callbacks...) -} - -func newServicePrincipalTokenFromMSI(msiEndpoint, resource string, userAssignedID *string, callbacks ...TokenRefreshCallback) (*ServicePrincipalToken, error) { - if err := validateStringParam(msiEndpoint, "msiEndpoint"); err != nil { - return nil, err - } - if err := validateStringParam(resource, "resource"); err != nil { - return nil, err - } - if userAssignedID != nil { - if err := validateStringParam(*userAssignedID, "userAssignedID"); err != nil { - return nil, err - } - } - // We set the oauth config token endpoint to be MSI's endpoint - msiEndpointURL, err := url.Parse(msiEndpoint) - if err != nil { - return nil, err - } - - v := url.Values{} - v.Set("resource", resource) - v.Set("api-version", "2018-02-01") - if userAssignedID != nil { - v.Set("client_id", *userAssignedID) - } - msiEndpointURL.RawQuery = v.Encode() - - spt := &ServicePrincipalToken{ - inner: servicePrincipalToken{ - Token: newToken(), - OauthConfig: OAuthConfig{ - TokenEndpoint: *msiEndpointURL, - }, - Secret: &ServicePrincipalMSISecret{}, - Resource: resource, - AutoRefresh: true, - RefreshWithin: defaultRefresh, - }, - refreshLock: &sync.RWMutex{}, - sender: &http.Client{Transport: tracing.Transport}, - refreshCallbacks: callbacks, - MaxMSIRefreshAttempts: defaultMaxMSIRefreshAttempts, - } - - if userAssignedID != nil { - spt.inner.ClientID = *userAssignedID - } - - return spt, nil -} - -// internal type that implements TokenRefreshError -type tokenRefreshError struct { - message string - resp *http.Response -} - -// Error implements the error interface which is part of the TokenRefreshError interface. -func (tre tokenRefreshError) Error() string { - return tre.message -} - -// Response implements the TokenRefreshError interface, it returns the raw HTTP response from the refresh operation. -func (tre tokenRefreshError) Response() *http.Response { - return tre.resp -} - -func newTokenRefreshError(message string, resp *http.Response) TokenRefreshError { - return tokenRefreshError{message: message, resp: resp} -} - -// EnsureFresh will refresh the token if it will expire within the refresh window (as set by -// RefreshWithin) and autoRefresh flag is on. This method is safe for concurrent use. -func (spt *ServicePrincipalToken) EnsureFresh() error { - return spt.EnsureFreshWithContext(context.Background()) -} - -// EnsureFreshWithContext will refresh the token if it will expire within the refresh window (as set by -// RefreshWithin) and autoRefresh flag is on. This method is safe for concurrent use. -func (spt *ServicePrincipalToken) EnsureFreshWithContext(ctx context.Context) error { - if spt.inner.AutoRefresh && spt.inner.Token.WillExpireIn(spt.inner.RefreshWithin) { - // take the write lock then check to see if the token was already refreshed - spt.refreshLock.Lock() - defer spt.refreshLock.Unlock() - if spt.inner.Token.WillExpireIn(spt.inner.RefreshWithin) { - return spt.refreshInternal(ctx, spt.inner.Resource) - } - } - return nil -} - -// InvokeRefreshCallbacks calls any TokenRefreshCallbacks that were added to the SPT during initialization -func (spt *ServicePrincipalToken) InvokeRefreshCallbacks(token Token) error { - if spt.refreshCallbacks != nil { - for _, callback := range spt.refreshCallbacks { - err := callback(spt.inner.Token) - if err != nil { - return fmt.Errorf("adal: TokenRefreshCallback handler failed. Error = '%v'", err) - } - } - } - return nil -} - -// Refresh obtains a fresh token for the Service Principal. -// This method is not safe for concurrent use and should be syncrhonized. -func (spt *ServicePrincipalToken) Refresh() error { - return spt.RefreshWithContext(context.Background()) -} - -// RefreshWithContext obtains a fresh token for the Service Principal. -// This method is not safe for concurrent use and should be syncrhonized. -func (spt *ServicePrincipalToken) RefreshWithContext(ctx context.Context) error { - spt.refreshLock.Lock() - defer spt.refreshLock.Unlock() - return spt.refreshInternal(ctx, spt.inner.Resource) -} - -// RefreshExchange refreshes the token, but for a different resource. -// This method is not safe for concurrent use and should be syncrhonized. -func (spt *ServicePrincipalToken) RefreshExchange(resource string) error { - return spt.RefreshExchangeWithContext(context.Background(), resource) -} - -// RefreshExchangeWithContext refreshes the token, but for a different resource. -// This method is not safe for concurrent use and should be syncrhonized. -func (spt *ServicePrincipalToken) RefreshExchangeWithContext(ctx context.Context, resource string) error { - spt.refreshLock.Lock() - defer spt.refreshLock.Unlock() - return spt.refreshInternal(ctx, resource) -} - -func (spt *ServicePrincipalToken) getGrantType() string { - switch spt.inner.Secret.(type) { - case *ServicePrincipalUsernamePasswordSecret: - return OAuthGrantTypeUserPass - case *ServicePrincipalAuthorizationCodeSecret: - return OAuthGrantTypeAuthorizationCode - default: - return OAuthGrantTypeClientCredentials - } -} - -func isIMDS(u url.URL) bool { - imds, err := url.Parse(msiEndpoint) - if err != nil { - return false - } - return u.Host == imds.Host && u.Path == imds.Path -} - -func (spt *ServicePrincipalToken) refreshInternal(ctx context.Context, resource string) error { - req, err := http.NewRequest(http.MethodPost, spt.inner.OauthConfig.TokenEndpoint.String(), nil) - if err != nil { - return fmt.Errorf("adal: Failed to build the refresh request. Error = '%v'", err) - } - req.Header.Add("User-Agent", UserAgent()) - req = req.WithContext(ctx) - if !isIMDS(spt.inner.OauthConfig.TokenEndpoint) { - v := url.Values{} - v.Set("client_id", spt.inner.ClientID) - v.Set("resource", resource) - - if spt.inner.Token.RefreshToken != "" { - v.Set("grant_type", OAuthGrantTypeRefreshToken) - v.Set("refresh_token", spt.inner.Token.RefreshToken) - // web apps must specify client_secret when refreshing tokens - // see https://docs.microsoft.com/en-us/azure/active-directory/develop/active-directory-protocols-oauth-code#refreshing-the-access-tokens - if spt.getGrantType() == OAuthGrantTypeAuthorizationCode { - err := spt.inner.Secret.SetAuthenticationValues(spt, &v) - if err != nil { - return err - } - } - } else { - v.Set("grant_type", spt.getGrantType()) - err := spt.inner.Secret.SetAuthenticationValues(spt, &v) - if err != nil { - return err - } - } - - s := v.Encode() - body := ioutil.NopCloser(strings.NewReader(s)) - req.ContentLength = int64(len(s)) - req.Header.Set(contentType, mimeTypeFormPost) - req.Body = body - } - - if _, ok := spt.inner.Secret.(*ServicePrincipalMSISecret); ok { - req.Method = http.MethodGet - req.Header.Set(metadataHeader, "true") - } - - var resp *http.Response - if isIMDS(spt.inner.OauthConfig.TokenEndpoint) { - resp, err = retryForIMDS(spt.sender, req, spt.MaxMSIRefreshAttempts) - } else { - resp, err = spt.sender.Do(req) - } - if err != nil { - return newTokenRefreshError(fmt.Sprintf("adal: Failed to execute the refresh request. Error = '%v'", err), nil) - } - - defer resp.Body.Close() - rb, err := ioutil.ReadAll(resp.Body) - - if resp.StatusCode != http.StatusOK { - if err != nil { - return newTokenRefreshError(fmt.Sprintf("adal: Refresh request failed. Status Code = '%d'. Failed reading response body: %v", resp.StatusCode, err), resp) - } - return newTokenRefreshError(fmt.Sprintf("adal: Refresh request failed. Status Code = '%d'. Response body: %s", resp.StatusCode, string(rb)), resp) - } - - // for the following error cases don't return a TokenRefreshError. the operation succeeded - // but some transient failure happened during deserialization. by returning a generic error - // the retry logic will kick in (we don't retry on TokenRefreshError). - - if err != nil { - return fmt.Errorf("adal: Failed to read a new service principal token during refresh. Error = '%v'", err) - } - if len(strings.Trim(string(rb), " ")) == 0 { - return fmt.Errorf("adal: Empty service principal token received during refresh") - } - var token Token - err = json.Unmarshal(rb, &token) - if err != nil { - return fmt.Errorf("adal: Failed to unmarshal the service principal token during refresh. Error = '%v' JSON = '%s'", err, string(rb)) - } - - spt.inner.Token = token - - return spt.InvokeRefreshCallbacks(token) -} - -// retry logic specific to retrieving a token from the IMDS endpoint -func retryForIMDS(sender Sender, req *http.Request, maxAttempts int) (resp *http.Response, err error) { - // copied from client.go due to circular dependency - retries := []int{ - http.StatusRequestTimeout, // 408 - http.StatusTooManyRequests, // 429 - http.StatusInternalServerError, // 500 - http.StatusBadGateway, // 502 - http.StatusServiceUnavailable, // 503 - http.StatusGatewayTimeout, // 504 - } - // extra retry status codes specific to IMDS - retries = append(retries, - http.StatusNotFound, - http.StatusGone, - // all remaining 5xx - http.StatusNotImplemented, - http.StatusHTTPVersionNotSupported, - http.StatusVariantAlsoNegotiates, - http.StatusInsufficientStorage, - http.StatusLoopDetected, - http.StatusNotExtended, - http.StatusNetworkAuthenticationRequired) - - // see https://docs.microsoft.com/en-us/azure/active-directory/managed-service-identity/how-to-use-vm-token#retry-guidance - - const maxDelay time.Duration = 60 * time.Second - - attempt := 0 - delay := time.Duration(0) - - for attempt < maxAttempts { - resp, err = sender.Do(req) - // retry on temporary network errors, e.g. transient network failures. - // if we don't receive a response then assume we can't connect to the - // endpoint so we're likely not running on an Azure VM so don't retry. - if (err != nil && !isTemporaryNetworkError(err)) || resp == nil || resp.StatusCode == http.StatusOK || !containsInt(retries, resp.StatusCode) { - return - } - - // perform exponential backoff with a cap. - // must increment attempt before calculating delay. - attempt++ - // the base value of 2 is the "delta backoff" as specified in the guidance doc - delay += (time.Duration(math.Pow(2, float64(attempt))) * time.Second) - if delay > maxDelay { - delay = maxDelay - } - - select { - case <-time.After(delay): - // intentionally left blank - case <-req.Context().Done(): - err = req.Context().Err() - return - } - } - return -} - -// returns true if the specified error is a temporary network error or false if it's not. -// if the error doesn't implement the net.Error interface the return value is true. -func isTemporaryNetworkError(err error) bool { - if netErr, ok := err.(net.Error); !ok || (ok && netErr.Temporary()) { - return true - } - return false -} - -// returns true if slice ints contains the value n -func containsInt(ints []int, n int) bool { - for _, i := range ints { - if i == n { - return true - } - } - return false -} - -// SetAutoRefresh enables or disables automatic refreshing of stale tokens. -func (spt *ServicePrincipalToken) SetAutoRefresh(autoRefresh bool) { - spt.inner.AutoRefresh = autoRefresh -} - -// SetRefreshWithin sets the interval within which if the token will expire, EnsureFresh will -// refresh the token. -func (spt *ServicePrincipalToken) SetRefreshWithin(d time.Duration) { - spt.inner.RefreshWithin = d - return -} - -// SetSender sets the http.Client used when obtaining the Service Principal token. An -// undecorated http.Client is used by default. -func (spt *ServicePrincipalToken) SetSender(s Sender) { spt.sender = s } - -// OAuthToken implements the OAuthTokenProvider interface. It returns the current access token. -func (spt *ServicePrincipalToken) OAuthToken() string { - spt.refreshLock.RLock() - defer spt.refreshLock.RUnlock() - return spt.inner.Token.OAuthToken() -} - -// Token returns a copy of the current token. -func (spt *ServicePrincipalToken) Token() Token { - spt.refreshLock.RLock() - defer spt.refreshLock.RUnlock() - return spt.inner.Token -} diff --git a/vendor/github.com/Azure/go-autorest/autorest/adal/version.go b/vendor/github.com/Azure/go-autorest/autorest/adal/version.go deleted file mode 100644 index c867b34843..0000000000 --- a/vendor/github.com/Azure/go-autorest/autorest/adal/version.go +++ /dev/null @@ -1,45 +0,0 @@ -package adal - -import ( - "fmt" - "runtime" -) - -// Copyright 2017 Microsoft Corporation -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -const number = "v1.0.0" - -var ( - ua = fmt.Sprintf("Go/%s (%s-%s) go-autorest/adal/%s", - runtime.Version(), - runtime.GOARCH, - runtime.GOOS, - number, - ) -) - -// UserAgent returns a string containing the Go version, system architecture and OS, and the adal version. -func UserAgent() string { - return ua -} - -// AddToUserAgent adds an extension to the current user agent -func AddToUserAgent(extension string) error { - if extension != "" { - ua = fmt.Sprintf("%s %s", ua, extension) - return nil - } - return fmt.Errorf("Extension was empty, User Agent remained as '%s'", ua) -} diff --git a/vendor/github.com/Azure/go-autorest/autorest/authorization.go b/vendor/github.com/Azure/go-autorest/autorest/authorization.go deleted file mode 100644 index bc474b406a..0000000000 --- a/vendor/github.com/Azure/go-autorest/autorest/authorization.go +++ /dev/null @@ -1,260 +0,0 @@ -package autorest - -// Copyright 2017 Microsoft Corporation -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -import ( - "fmt" - "net/http" - "net/url" - "strings" - - "github.com/Azure/go-autorest/autorest/adal" - "github.com/Azure/go-autorest/tracing" -) - -const ( - bearerChallengeHeader = "Www-Authenticate" - bearer = "Bearer" - tenantID = "tenantID" - apiKeyAuthorizerHeader = "Ocp-Apim-Subscription-Key" - bingAPISdkHeader = "X-BingApis-SDK-Client" - golangBingAPISdkHeaderValue = "Go-SDK" -) - -// Authorizer is the interface that provides a PrepareDecorator used to supply request -// authorization. Most often, the Authorizer decorator runs last so it has access to the full -// state of the formed HTTP request. -type Authorizer interface { - WithAuthorization() PrepareDecorator -} - -// NullAuthorizer implements a default, "do nothing" Authorizer. -type NullAuthorizer struct{} - -// WithAuthorization returns a PrepareDecorator that does nothing. -func (na NullAuthorizer) WithAuthorization() PrepareDecorator { - return WithNothing() -} - -// APIKeyAuthorizer implements API Key authorization. -type APIKeyAuthorizer struct { - headers map[string]interface{} - queryParameters map[string]interface{} -} - -// NewAPIKeyAuthorizerWithHeaders creates an ApiKeyAuthorizer with headers. -func NewAPIKeyAuthorizerWithHeaders(headers map[string]interface{}) *APIKeyAuthorizer { - return NewAPIKeyAuthorizer(headers, nil) -} - -// NewAPIKeyAuthorizerWithQueryParameters creates an ApiKeyAuthorizer with query parameters. -func NewAPIKeyAuthorizerWithQueryParameters(queryParameters map[string]interface{}) *APIKeyAuthorizer { - return NewAPIKeyAuthorizer(nil, queryParameters) -} - -// NewAPIKeyAuthorizer creates an ApiKeyAuthorizer with headers. -func NewAPIKeyAuthorizer(headers map[string]interface{}, queryParameters map[string]interface{}) *APIKeyAuthorizer { - return &APIKeyAuthorizer{headers: headers, queryParameters: queryParameters} -} - -// WithAuthorization returns a PrepareDecorator that adds an HTTP headers and Query Parameters. -func (aka *APIKeyAuthorizer) WithAuthorization() PrepareDecorator { - return func(p Preparer) Preparer { - return DecoratePreparer(p, WithHeaders(aka.headers), WithQueryParameters(aka.queryParameters)) - } -} - -// CognitiveServicesAuthorizer implements authorization for Cognitive Services. -type CognitiveServicesAuthorizer struct { - subscriptionKey string -} - -// NewCognitiveServicesAuthorizer is -func NewCognitiveServicesAuthorizer(subscriptionKey string) *CognitiveServicesAuthorizer { - return &CognitiveServicesAuthorizer{subscriptionKey: subscriptionKey} -} - -// WithAuthorization is -func (csa *CognitiveServicesAuthorizer) WithAuthorization() PrepareDecorator { - headers := make(map[string]interface{}) - headers[apiKeyAuthorizerHeader] = csa.subscriptionKey - headers[bingAPISdkHeader] = golangBingAPISdkHeaderValue - - return NewAPIKeyAuthorizerWithHeaders(headers).WithAuthorization() -} - -// BearerAuthorizer implements the bearer authorization -type BearerAuthorizer struct { - tokenProvider adal.OAuthTokenProvider -} - -// NewBearerAuthorizer crates a BearerAuthorizer using the given token provider -func NewBearerAuthorizer(tp adal.OAuthTokenProvider) *BearerAuthorizer { - return &BearerAuthorizer{tokenProvider: tp} -} - -// WithAuthorization returns a PrepareDecorator that adds an HTTP Authorization header whose -// value is "Bearer " followed by the token. -// -// By default, the token will be automatically refreshed through the Refresher interface. -func (ba *BearerAuthorizer) WithAuthorization() PrepareDecorator { - return func(p Preparer) Preparer { - return PreparerFunc(func(r *http.Request) (*http.Request, error) { - r, err := p.Prepare(r) - if err == nil { - // the ordering is important here, prefer RefresherWithContext if available - if refresher, ok := ba.tokenProvider.(adal.RefresherWithContext); ok { - err = refresher.EnsureFreshWithContext(r.Context()) - } else if refresher, ok := ba.tokenProvider.(adal.Refresher); ok { - err = refresher.EnsureFresh() - } - if err != nil { - var resp *http.Response - if tokError, ok := err.(adal.TokenRefreshError); ok { - resp = tokError.Response() - } - return r, NewErrorWithError(err, "azure.BearerAuthorizer", "WithAuthorization", resp, - "Failed to refresh the Token for request to %s", r.URL) - } - return Prepare(r, WithHeader(headerAuthorization, fmt.Sprintf("Bearer %s", ba.tokenProvider.OAuthToken()))) - } - return r, err - }) - } -} - -// BearerAuthorizerCallbackFunc is the authentication callback signature. -type BearerAuthorizerCallbackFunc func(tenantID, resource string) (*BearerAuthorizer, error) - -// BearerAuthorizerCallback implements bearer authorization via a callback. -type BearerAuthorizerCallback struct { - sender Sender - callback BearerAuthorizerCallbackFunc -} - -// NewBearerAuthorizerCallback creates a bearer authorization callback. The callback -// is invoked when the HTTP request is submitted. -func NewBearerAuthorizerCallback(sender Sender, callback BearerAuthorizerCallbackFunc) *BearerAuthorizerCallback { - if sender == nil { - sender = &http.Client{Transport: tracing.Transport} - } - return &BearerAuthorizerCallback{sender: sender, callback: callback} -} - -// WithAuthorization returns a PrepareDecorator that adds an HTTP Authorization header whose value -// is "Bearer " followed by the token. The BearerAuthorizer is obtained via a user-supplied callback. -// -// By default, the token will be automatically refreshed through the Refresher interface. -func (bacb *BearerAuthorizerCallback) WithAuthorization() PrepareDecorator { - return func(p Preparer) Preparer { - return PreparerFunc(func(r *http.Request) (*http.Request, error) { - r, err := p.Prepare(r) - if err == nil { - // make a copy of the request and remove the body as it's not - // required and avoids us having to create a copy of it. - rCopy := *r - removeRequestBody(&rCopy) - - resp, err := bacb.sender.Do(&rCopy) - if err == nil && resp.StatusCode == 401 { - defer resp.Body.Close() - if hasBearerChallenge(resp) { - bc, err := newBearerChallenge(resp) - if err != nil { - return r, err - } - if bacb.callback != nil { - ba, err := bacb.callback(bc.values[tenantID], bc.values["resource"]) - if err != nil { - return r, err - } - return Prepare(r, ba.WithAuthorization()) - } - } - } - } - return r, err - }) - } -} - -// returns true if the HTTP response contains a bearer challenge -func hasBearerChallenge(resp *http.Response) bool { - authHeader := resp.Header.Get(bearerChallengeHeader) - if len(authHeader) == 0 || strings.Index(authHeader, bearer) < 0 { - return false - } - return true -} - -type bearerChallenge struct { - values map[string]string -} - -func newBearerChallenge(resp *http.Response) (bc bearerChallenge, err error) { - challenge := strings.TrimSpace(resp.Header.Get(bearerChallengeHeader)) - trimmedChallenge := challenge[len(bearer)+1:] - - // challenge is a set of key=value pairs that are comma delimited - pairs := strings.Split(trimmedChallenge, ",") - if len(pairs) < 1 { - err = fmt.Errorf("challenge '%s' contains no pairs", challenge) - return bc, err - } - - bc.values = make(map[string]string) - for i := range pairs { - trimmedPair := strings.TrimSpace(pairs[i]) - pair := strings.Split(trimmedPair, "=") - if len(pair) == 2 { - // remove the enclosing quotes - key := strings.Trim(pair[0], "\"") - value := strings.Trim(pair[1], "\"") - - switch key { - case "authorization", "authorization_uri": - // strip the tenant ID from the authorization URL - asURL, err := url.Parse(value) - if err != nil { - return bc, err - } - bc.values[tenantID] = asURL.Path[1:] - default: - bc.values[key] = value - } - } - } - - return bc, err -} - -// EventGridKeyAuthorizer implements authorization for event grid using key authentication. -type EventGridKeyAuthorizer struct { - topicKey string -} - -// NewEventGridKeyAuthorizer creates a new EventGridKeyAuthorizer -// with the specified topic key. -func NewEventGridKeyAuthorizer(topicKey string) EventGridKeyAuthorizer { - return EventGridKeyAuthorizer{topicKey: topicKey} -} - -// WithAuthorization returns a PrepareDecorator that adds the aeg-sas-key authentication header. -func (egta EventGridKeyAuthorizer) WithAuthorization() PrepareDecorator { - headers := map[string]interface{}{ - "aeg-sas-key": egta.topicKey, - } - return NewAPIKeyAuthorizerWithHeaders(headers).WithAuthorization() -} diff --git a/vendor/github.com/Azure/go-autorest/autorest/autorest.go b/vendor/github.com/Azure/go-autorest/autorest/autorest.go deleted file mode 100644 index aafdf021fd..0000000000 --- a/vendor/github.com/Azure/go-autorest/autorest/autorest.go +++ /dev/null @@ -1,150 +0,0 @@ -/* -Package autorest implements an HTTP request pipeline suitable for use across multiple go-routines -and provides the shared routines relied on by AutoRest (see https://github.com/Azure/autorest/) -generated Go code. - -The package breaks sending and responding to HTTP requests into three phases: Preparing, Sending, -and Responding. A typical pattern is: - - req, err := Prepare(&http.Request{}, - token.WithAuthorization()) - - resp, err := Send(req, - WithLogging(logger), - DoErrorIfStatusCode(http.StatusInternalServerError), - DoCloseIfError(), - DoRetryForAttempts(5, time.Second)) - - err = Respond(resp, - ByDiscardingBody(), - ByClosing()) - -Each phase relies on decorators to modify and / or manage processing. Decorators may first modify -and then pass the data along, pass the data first and then modify the result, or wrap themselves -around passing the data (such as a logger might do). Decorators run in the order provided. For -example, the following: - - req, err := Prepare(&http.Request{}, - WithBaseURL("https://microsoft.com/"), - WithPath("a"), - WithPath("b"), - WithPath("c")) - -will set the URL to: - - https://microsoft.com/a/b/c - -Preparers and Responders may be shared and re-used (assuming the underlying decorators support -sharing and re-use). Performant use is obtained by creating one or more Preparers and Responders -shared among multiple go-routines, and a single Sender shared among multiple sending go-routines, -all bound together by means of input / output channels. - -Decorators hold their passed state within a closure (such as the path components in the example -above). Be careful to share Preparers and Responders only in a context where such held state -applies. For example, it may not make sense to share a Preparer that applies a query string from a -fixed set of values. Similarly, sharing a Responder that reads the response body into a passed -struct (e.g., ByUnmarshallingJson) is likely incorrect. - -Lastly, the Swagger specification (https://swagger.io) that drives AutoRest -(https://github.com/Azure/autorest/) precisely defines two date forms: date and date-time. The -github.com/Azure/go-autorest/autorest/date package provides time.Time derivations to ensure -correct parsing and formatting. - -Errors raised by autorest objects and methods will conform to the autorest.Error interface. - -See the included examples for more detail. For details on the suggested use of this package by -generated clients, see the Client described below. -*/ -package autorest - -// Copyright 2017 Microsoft Corporation -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -import ( - "context" - "net/http" - "time" -) - -const ( - // HeaderLocation specifies the HTTP Location header. - HeaderLocation = "Location" - - // HeaderRetryAfter specifies the HTTP Retry-After header. - HeaderRetryAfter = "Retry-After" -) - -// ResponseHasStatusCode returns true if the status code in the HTTP Response is in the passed set -// and false otherwise. -func ResponseHasStatusCode(resp *http.Response, codes ...int) bool { - if resp == nil { - return false - } - return containsInt(codes, resp.StatusCode) -} - -// GetLocation retrieves the URL from the Location header of the passed response. -func GetLocation(resp *http.Response) string { - return resp.Header.Get(HeaderLocation) -} - -// GetRetryAfter extracts the retry delay from the Retry-After header of the passed response. If -// the header is absent or is malformed, it will return the supplied default delay time.Duration. -func GetRetryAfter(resp *http.Response, defaultDelay time.Duration) time.Duration { - retry := resp.Header.Get(HeaderRetryAfter) - if retry == "" { - return defaultDelay - } - - d, err := time.ParseDuration(retry + "s") - if err != nil { - return defaultDelay - } - - return d -} - -// NewPollingRequest allocates and returns a new http.Request to poll for the passed response. -func NewPollingRequest(resp *http.Response, cancel <-chan struct{}) (*http.Request, error) { - location := GetLocation(resp) - if location == "" { - return nil, NewErrorWithResponse("autorest", "NewPollingRequest", resp, "Location header missing from response that requires polling") - } - - req, err := Prepare(&http.Request{Cancel: cancel}, - AsGet(), - WithBaseURL(location)) - if err != nil { - return nil, NewErrorWithError(err, "autorest", "NewPollingRequest", nil, "Failure creating poll request to %s", location) - } - - return req, nil -} - -// NewPollingRequestWithContext allocates and returns a new http.Request with the specified context to poll for the passed response. -func NewPollingRequestWithContext(ctx context.Context, resp *http.Response) (*http.Request, error) { - location := GetLocation(resp) - if location == "" { - return nil, NewErrorWithResponse("autorest", "NewPollingRequestWithContext", resp, "Location header missing from response that requires polling") - } - - req, err := Prepare((&http.Request{}).WithContext(ctx), - AsGet(), - WithBaseURL(location)) - if err != nil { - return nil, NewErrorWithError(err, "autorest", "NewPollingRequestWithContext", nil, "Failure creating poll request to %s", location) - } - - return req, nil -} diff --git a/vendor/github.com/Azure/go-autorest/autorest/azure/async.go b/vendor/github.com/Azure/go-autorest/autorest/azure/async.go deleted file mode 100644 index 0041eacf75..0000000000 --- a/vendor/github.com/Azure/go-autorest/autorest/azure/async.go +++ /dev/null @@ -1,992 +0,0 @@ -package azure - -// Copyright 2017 Microsoft Corporation -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -import ( - "bytes" - "context" - "encoding/json" - "fmt" - "io/ioutil" - "net/http" - "net/url" - "strings" - "time" - - "github.com/Azure/go-autorest/autorest" - "github.com/Azure/go-autorest/tracing" -) - -const ( - headerAsyncOperation = "Azure-AsyncOperation" -) - -const ( - operationInProgress string = "InProgress" - operationCanceled string = "Canceled" - operationFailed string = "Failed" - operationSucceeded string = "Succeeded" -) - -var pollingCodes = [...]int{http.StatusNoContent, http.StatusAccepted, http.StatusCreated, http.StatusOK} - -// Future provides a mechanism to access the status and results of an asynchronous request. -// Since futures are stateful they should be passed by value to avoid race conditions. -type Future struct { - req *http.Request // legacy - pt pollingTracker -} - -// NewFuture returns a new Future object initialized with the specified request. -// Deprecated: Please use NewFutureFromResponse instead. -func NewFuture(req *http.Request) Future { - return Future{req: req} -} - -// NewFutureFromResponse returns a new Future object initialized -// with the initial response from an asynchronous operation. -func NewFutureFromResponse(resp *http.Response) (Future, error) { - pt, err := createPollingTracker(resp) - return Future{pt: pt}, err -} - -// Response returns the last HTTP response. -func (f Future) Response() *http.Response { - if f.pt == nil { - return nil - } - return f.pt.latestResponse() -} - -// Status returns the last status message of the operation. -func (f Future) Status() string { - if f.pt == nil { - return "" - } - return f.pt.pollingStatus() -} - -// PollingMethod returns the method used to monitor the status of the asynchronous operation. -func (f Future) PollingMethod() PollingMethodType { - if f.pt == nil { - return PollingUnknown - } - return f.pt.pollingMethod() -} - -// Done queries the service to see if the operation has completed. -// Deprecated: Use DoneWithContext() -func (f *Future) Done(sender autorest.Sender) (bool, error) { - return f.DoneWithContext(context.Background(), sender) -} - -// DoneWithContext queries the service to see if the operation has completed. -func (f *Future) DoneWithContext(ctx context.Context, sender autorest.Sender) (done bool, err error) { - ctx = tracing.StartSpan(ctx, "github.com/Azure/go-autorest/autorest/azure/async.DoneWithContext") - defer func() { - sc := -1 - resp := f.Response() - if resp != nil { - sc = resp.StatusCode - } - tracing.EndSpan(ctx, sc, err) - }() - - // support for legacy Future implementation - if f.req != nil { - resp, err := sender.Do(f.req) - if err != nil { - return false, err - } - pt, err := createPollingTracker(resp) - if err != nil { - return false, err - } - f.pt = pt - f.req = nil - } - // end legacy - if f.pt == nil { - return false, autorest.NewError("Future", "Done", "future is not initialized") - } - if f.pt.hasTerminated() { - return true, f.pt.pollingError() - } - if err := f.pt.pollForStatus(ctx, sender); err != nil { - return false, err - } - if err := f.pt.checkForErrors(); err != nil { - return f.pt.hasTerminated(), err - } - if err := f.pt.updatePollingState(f.pt.provisioningStateApplicable()); err != nil { - return false, err - } - if err := f.pt.initPollingMethod(); err != nil { - return false, err - } - if err := f.pt.updatePollingMethod(); err != nil { - return false, err - } - return f.pt.hasTerminated(), f.pt.pollingError() -} - -// GetPollingDelay returns a duration the application should wait before checking -// the status of the asynchronous request and true; this value is returned from -// the service via the Retry-After response header. If the header wasn't returned -// then the function returns the zero-value time.Duration and false. -func (f Future) GetPollingDelay() (time.Duration, bool) { - if f.pt == nil { - return 0, false - } - resp := f.pt.latestResponse() - if resp == nil { - return 0, false - } - - retry := resp.Header.Get(autorest.HeaderRetryAfter) - if retry == "" { - return 0, false - } - - d, err := time.ParseDuration(retry + "s") - if err != nil { - panic(err) - } - - return d, true -} - -// WaitForCompletion will return when one of the following conditions is met: the long -// running operation has completed, the provided context is cancelled, or the client's -// polling duration has been exceeded. It will retry failed polling attempts based on -// the retry value defined in the client up to the maximum retry attempts. -// Deprecated: Please use WaitForCompletionRef() instead. -func (f Future) WaitForCompletion(ctx context.Context, client autorest.Client) error { - return f.WaitForCompletionRef(ctx, client) -} - -// WaitForCompletionRef will return when one of the following conditions is met: the long -// running operation has completed, the provided context is cancelled, or the client's -// polling duration has been exceeded. It will retry failed polling attempts based on -// the retry value defined in the client up to the maximum retry attempts. -// If no deadline is specified in the context then the client.PollingDuration will be -// used to determine if a default deadline should be used. -// If PollingDuration is greater than zero the value will be used as the context's timeout. -// If PollingDuration is zero then no default deadline will be used. -func (f *Future) WaitForCompletionRef(ctx context.Context, client autorest.Client) (err error) { - ctx = tracing.StartSpan(ctx, "github.com/Azure/go-autorest/autorest/azure/async.WaitForCompletionRef") - defer func() { - sc := -1 - resp := f.Response() - if resp != nil { - sc = resp.StatusCode - } - tracing.EndSpan(ctx, sc, err) - }() - cancelCtx := ctx - // if the provided context already has a deadline don't override it - _, hasDeadline := ctx.Deadline() - if d := client.PollingDuration; !hasDeadline && d != 0 { - var cancel context.CancelFunc - cancelCtx, cancel = context.WithTimeout(ctx, d) - defer cancel() - } - - done, err := f.DoneWithContext(ctx, client) - for attempts := 0; !done; done, err = f.DoneWithContext(ctx, client) { - if attempts >= client.RetryAttempts { - return autorest.NewErrorWithError(err, "Future", "WaitForCompletion", f.pt.latestResponse(), "the number of retries has been exceeded") - } - // we want delayAttempt to be zero in the non-error case so - // that DelayForBackoff doesn't perform exponential back-off - var delayAttempt int - var delay time.Duration - if err == nil { - // check for Retry-After delay, if not present use the client's polling delay - var ok bool - delay, ok = f.GetPollingDelay() - if !ok { - delay = client.PollingDelay - } - } else { - // there was an error polling for status so perform exponential - // back-off based on the number of attempts using the client's retry - // duration. update attempts after delayAttempt to avoid off-by-one. - delayAttempt = attempts - delay = client.RetryDuration - attempts++ - } - // wait until the delay elapses or the context is cancelled - delayElapsed := autorest.DelayForBackoff(delay, delayAttempt, cancelCtx.Done()) - if !delayElapsed { - return autorest.NewErrorWithError(cancelCtx.Err(), "Future", "WaitForCompletion", f.pt.latestResponse(), "context has been cancelled") - } - } - return -} - -// MarshalJSON implements the json.Marshaler interface. -func (f Future) MarshalJSON() ([]byte, error) { - return json.Marshal(f.pt) -} - -// UnmarshalJSON implements the json.Unmarshaler interface. -func (f *Future) UnmarshalJSON(data []byte) error { - // unmarshal into JSON object to determine the tracker type - obj := map[string]interface{}{} - err := json.Unmarshal(data, &obj) - if err != nil { - return err - } - if obj["method"] == nil { - return autorest.NewError("Future", "UnmarshalJSON", "missing 'method' property") - } - method := obj["method"].(string) - switch strings.ToUpper(method) { - case http.MethodDelete: - f.pt = &pollingTrackerDelete{} - case http.MethodPatch: - f.pt = &pollingTrackerPatch{} - case http.MethodPost: - f.pt = &pollingTrackerPost{} - case http.MethodPut: - f.pt = &pollingTrackerPut{} - default: - return autorest.NewError("Future", "UnmarshalJSON", "unsupoorted method '%s'", method) - } - // now unmarshal into the tracker - return json.Unmarshal(data, &f.pt) -} - -// PollingURL returns the URL used for retrieving the status of the long-running operation. -func (f Future) PollingURL() string { - if f.pt == nil { - return "" - } - return f.pt.pollingURL() -} - -// GetResult should be called once polling has completed successfully. -// It makes the final GET call to retrieve the resultant payload. -func (f Future) GetResult(sender autorest.Sender) (*http.Response, error) { - if f.pt.finalGetURL() == "" { - // we can end up in this situation if the async operation returns a 200 - // with no polling URLs. in that case return the response which should - // contain the JSON payload (only do this for successful terminal cases). - if lr := f.pt.latestResponse(); lr != nil && f.pt.hasSucceeded() { - return lr, nil - } - return nil, autorest.NewError("Future", "GetResult", "missing URL for retrieving result") - } - req, err := http.NewRequest(http.MethodGet, f.pt.finalGetURL(), nil) - if err != nil { - return nil, err - } - return sender.Do(req) -} - -type pollingTracker interface { - // these methods can differ per tracker - - // checks the response headers and status code to determine the polling mechanism - updatePollingMethod() error - - // checks the response for tracker-specific error conditions - checkForErrors() error - - // returns true if provisioning state should be checked - provisioningStateApplicable() bool - - // methods common to all trackers - - // initializes a tracker's polling URL and method, called for each iteration. - // these values can be overridden by each polling tracker as required. - initPollingMethod() error - - // initializes the tracker's internal state, call this when the tracker is created - initializeState() error - - // makes an HTTP request to check the status of the LRO - pollForStatus(ctx context.Context, sender autorest.Sender) error - - // updates internal tracker state, call this after each call to pollForStatus - updatePollingState(provStateApl bool) error - - // returns the error response from the service, can be nil - pollingError() error - - // returns the polling method being used - pollingMethod() PollingMethodType - - // returns the state of the LRO as returned from the service - pollingStatus() string - - // returns the URL used for polling status - pollingURL() string - - // returns the URL used for the final GET to retrieve the resource - finalGetURL() string - - // returns true if the LRO is in a terminal state - hasTerminated() bool - - // returns true if the LRO is in a failed terminal state - hasFailed() bool - - // returns true if the LRO is in a successful terminal state - hasSucceeded() bool - - // returns the cached HTTP response after a call to pollForStatus(), can be nil - latestResponse() *http.Response -} - -type pollingTrackerBase struct { - // resp is the last response, either from the submission of the LRO or from polling - resp *http.Response - - // method is the HTTP verb, this is needed for deserialization - Method string `json:"method"` - - // rawBody is the raw JSON response body - rawBody map[string]interface{} - - // denotes if polling is using async-operation or location header - Pm PollingMethodType `json:"pollingMethod"` - - // the URL to poll for status - URI string `json:"pollingURI"` - - // the state of the LRO as returned from the service - State string `json:"lroState"` - - // the URL to GET for the final result - FinalGetURI string `json:"resultURI"` - - // used to hold an error object returned from the service - Err *ServiceError `json:"error,omitempty"` -} - -func (pt *pollingTrackerBase) initializeState() error { - // determine the initial polling state based on response body and/or HTTP status - // code. this is applicable to the initial LRO response, not polling responses! - pt.Method = pt.resp.Request.Method - if err := pt.updateRawBody(); err != nil { - return err - } - switch pt.resp.StatusCode { - case http.StatusOK: - if ps := pt.getProvisioningState(); ps != nil { - pt.State = *ps - if pt.hasFailed() { - pt.updateErrorFromResponse() - return pt.pollingError() - } - } else { - pt.State = operationSucceeded - } - case http.StatusCreated: - if ps := pt.getProvisioningState(); ps != nil { - pt.State = *ps - } else { - pt.State = operationInProgress - } - case http.StatusAccepted: - pt.State = operationInProgress - case http.StatusNoContent: - pt.State = operationSucceeded - default: - pt.State = operationFailed - pt.updateErrorFromResponse() - return pt.pollingError() - } - return pt.initPollingMethod() -} - -func (pt pollingTrackerBase) getProvisioningState() *string { - if pt.rawBody != nil && pt.rawBody["properties"] != nil { - p := pt.rawBody["properties"].(map[string]interface{}) - if ps := p["provisioningState"]; ps != nil { - s := ps.(string) - return &s - } - } - return nil -} - -func (pt *pollingTrackerBase) updateRawBody() error { - pt.rawBody = map[string]interface{}{} - if pt.resp.ContentLength != 0 { - defer pt.resp.Body.Close() - b, err := ioutil.ReadAll(pt.resp.Body) - if err != nil { - return autorest.NewErrorWithError(err, "pollingTrackerBase", "updateRawBody", nil, "failed to read response body") - } - // observed in 204 responses over HTTP/2.0; the content length is -1 but body is empty - if len(b) == 0 { - return nil - } - // put the body back so it's available to other callers - pt.resp.Body = ioutil.NopCloser(bytes.NewReader(b)) - if err = json.Unmarshal(b, &pt.rawBody); err != nil { - return autorest.NewErrorWithError(err, "pollingTrackerBase", "updateRawBody", nil, "failed to unmarshal response body") - } - } - return nil -} - -func (pt *pollingTrackerBase) pollForStatus(ctx context.Context, sender autorest.Sender) error { - req, err := http.NewRequest(http.MethodGet, pt.URI, nil) - if err != nil { - return autorest.NewErrorWithError(err, "pollingTrackerBase", "pollForStatus", nil, "failed to create HTTP request") - } - - req = req.WithContext(ctx) - pt.resp, err = sender.Do(req) - if err != nil { - return autorest.NewErrorWithError(err, "pollingTrackerBase", "pollForStatus", nil, "failed to send HTTP request") - } - if autorest.ResponseHasStatusCode(pt.resp, pollingCodes[:]...) { - // reset the service error on success case - pt.Err = nil - err = pt.updateRawBody() - } else { - // check response body for error content - pt.updateErrorFromResponse() - err = pt.pollingError() - } - return err -} - -// attempts to unmarshal a ServiceError type from the response body. -// if that fails then make a best attempt at creating something meaningful. -// NOTE: this assumes that the async operation has failed. -func (pt *pollingTrackerBase) updateErrorFromResponse() { - var err error - if pt.resp.ContentLength != 0 { - type respErr struct { - ServiceError *ServiceError `json:"error"` - } - re := respErr{} - defer pt.resp.Body.Close() - var b []byte - if b, err = ioutil.ReadAll(pt.resp.Body); err != nil || len(b) == 0 { - goto Default - } - if err = json.Unmarshal(b, &re); err != nil { - goto Default - } - // unmarshalling the error didn't yield anything, try unwrapped error - if re.ServiceError == nil { - err = json.Unmarshal(b, &re.ServiceError) - if err != nil { - goto Default - } - } - // the unmarshaller will ensure re.ServiceError is non-nil - // even if there was no content unmarshalled so check the code. - if re.ServiceError.Code != "" { - pt.Err = re.ServiceError - return - } - } -Default: - se := &ServiceError{ - Code: pt.pollingStatus(), - Message: "The async operation failed.", - } - if err != nil { - se.InnerError = make(map[string]interface{}) - se.InnerError["unmarshalError"] = err.Error() - } - // stick the response body into the error object in hopes - // it contains something useful to help diagnose the failure. - if len(pt.rawBody) > 0 { - se.AdditionalInfo = []map[string]interface{}{ - pt.rawBody, - } - } - pt.Err = se -} - -func (pt *pollingTrackerBase) updatePollingState(provStateApl bool) error { - if pt.Pm == PollingAsyncOperation && pt.rawBody["status"] != nil { - pt.State = pt.rawBody["status"].(string) - } else { - if pt.resp.StatusCode == http.StatusAccepted { - pt.State = operationInProgress - } else if provStateApl { - if ps := pt.getProvisioningState(); ps != nil { - pt.State = *ps - } else { - pt.State = operationSucceeded - } - } else { - return autorest.NewError("pollingTrackerBase", "updatePollingState", "the response from the async operation has an invalid status code") - } - } - // if the operation has failed update the error state - if pt.hasFailed() { - pt.updateErrorFromResponse() - } - return nil -} - -func (pt pollingTrackerBase) pollingError() error { - if pt.Err == nil { - return nil - } - return pt.Err -} - -func (pt pollingTrackerBase) pollingMethod() PollingMethodType { - return pt.Pm -} - -func (pt pollingTrackerBase) pollingStatus() string { - return pt.State -} - -func (pt pollingTrackerBase) pollingURL() string { - return pt.URI -} - -func (pt pollingTrackerBase) finalGetURL() string { - return pt.FinalGetURI -} - -func (pt pollingTrackerBase) hasTerminated() bool { - return strings.EqualFold(pt.State, operationCanceled) || strings.EqualFold(pt.State, operationFailed) || strings.EqualFold(pt.State, operationSucceeded) -} - -func (pt pollingTrackerBase) hasFailed() bool { - return strings.EqualFold(pt.State, operationCanceled) || strings.EqualFold(pt.State, operationFailed) -} - -func (pt pollingTrackerBase) hasSucceeded() bool { - return strings.EqualFold(pt.State, operationSucceeded) -} - -func (pt pollingTrackerBase) latestResponse() *http.Response { - return pt.resp -} - -// error checking common to all trackers -func (pt pollingTrackerBase) baseCheckForErrors() error { - // for Azure-AsyncOperations the response body cannot be nil or empty - if pt.Pm == PollingAsyncOperation { - if pt.resp.Body == nil || pt.resp.ContentLength == 0 { - return autorest.NewError("pollingTrackerBase", "baseCheckForErrors", "for Azure-AsyncOperation response body cannot be nil") - } - if pt.rawBody["status"] == nil { - return autorest.NewError("pollingTrackerBase", "baseCheckForErrors", "missing status property in Azure-AsyncOperation response body") - } - } - return nil -} - -// default initialization of polling URL/method. each verb tracker will update this as required. -func (pt *pollingTrackerBase) initPollingMethod() error { - if ao, err := getURLFromAsyncOpHeader(pt.resp); err != nil { - return err - } else if ao != "" { - pt.URI = ao - pt.Pm = PollingAsyncOperation - return nil - } - if lh, err := getURLFromLocationHeader(pt.resp); err != nil { - return err - } else if lh != "" { - pt.URI = lh - pt.Pm = PollingLocation - return nil - } - // it's ok if we didn't find a polling header, this will be handled elsewhere - return nil -} - -// DELETE - -type pollingTrackerDelete struct { - pollingTrackerBase -} - -func (pt *pollingTrackerDelete) updatePollingMethod() error { - // for 201 the Location header is required - if pt.resp.StatusCode == http.StatusCreated { - if lh, err := getURLFromLocationHeader(pt.resp); err != nil { - return err - } else if lh == "" { - return autorest.NewError("pollingTrackerDelete", "updateHeaders", "missing Location header in 201 response") - } else { - pt.URI = lh - } - pt.Pm = PollingLocation - pt.FinalGetURI = pt.URI - } - // for 202 prefer the Azure-AsyncOperation header but fall back to Location if necessary - if pt.resp.StatusCode == http.StatusAccepted { - ao, err := getURLFromAsyncOpHeader(pt.resp) - if err != nil { - return err - } else if ao != "" { - pt.URI = ao - pt.Pm = PollingAsyncOperation - } - // if the Location header is invalid and we already have a polling URL - // then we don't care if the Location header URL is malformed. - if lh, err := getURLFromLocationHeader(pt.resp); err != nil && pt.URI == "" { - return err - } else if lh != "" { - if ao == "" { - pt.URI = lh - pt.Pm = PollingLocation - } - // when both headers are returned we use the value in the Location header for the final GET - pt.FinalGetURI = lh - } - // make sure a polling URL was found - if pt.URI == "" { - return autorest.NewError("pollingTrackerPost", "updateHeaders", "didn't get any suitable polling URLs in 202 response") - } - } - return nil -} - -func (pt pollingTrackerDelete) checkForErrors() error { - return pt.baseCheckForErrors() -} - -func (pt pollingTrackerDelete) provisioningStateApplicable() bool { - return pt.resp.StatusCode == http.StatusOK || pt.resp.StatusCode == http.StatusNoContent -} - -// PATCH - -type pollingTrackerPatch struct { - pollingTrackerBase -} - -func (pt *pollingTrackerPatch) updatePollingMethod() error { - // by default we can use the original URL for polling and final GET - if pt.URI == "" { - pt.URI = pt.resp.Request.URL.String() - } - if pt.FinalGetURI == "" { - pt.FinalGetURI = pt.resp.Request.URL.String() - } - if pt.Pm == PollingUnknown { - pt.Pm = PollingRequestURI - } - // for 201 it's permissible for no headers to be returned - if pt.resp.StatusCode == http.StatusCreated { - if ao, err := getURLFromAsyncOpHeader(pt.resp); err != nil { - return err - } else if ao != "" { - pt.URI = ao - pt.Pm = PollingAsyncOperation - } - } - // for 202 prefer the Azure-AsyncOperation header but fall back to Location if necessary - // note the absence of the "final GET" mechanism for PATCH - if pt.resp.StatusCode == http.StatusAccepted { - ao, err := getURLFromAsyncOpHeader(pt.resp) - if err != nil { - return err - } else if ao != "" { - pt.URI = ao - pt.Pm = PollingAsyncOperation - } - if ao == "" { - if lh, err := getURLFromLocationHeader(pt.resp); err != nil { - return err - } else if lh == "" { - return autorest.NewError("pollingTrackerPatch", "updateHeaders", "didn't get any suitable polling URLs in 202 response") - } else { - pt.URI = lh - pt.Pm = PollingLocation - } - } - } - return nil -} - -func (pt pollingTrackerPatch) checkForErrors() error { - return pt.baseCheckForErrors() -} - -func (pt pollingTrackerPatch) provisioningStateApplicable() bool { - return pt.resp.StatusCode == http.StatusOK || pt.resp.StatusCode == http.StatusCreated -} - -// POST - -type pollingTrackerPost struct { - pollingTrackerBase -} - -func (pt *pollingTrackerPost) updatePollingMethod() error { - // 201 requires Location header - if pt.resp.StatusCode == http.StatusCreated { - if lh, err := getURLFromLocationHeader(pt.resp); err != nil { - return err - } else if lh == "" { - return autorest.NewError("pollingTrackerPost", "updateHeaders", "missing Location header in 201 response") - } else { - pt.URI = lh - pt.FinalGetURI = lh - pt.Pm = PollingLocation - } - } - // for 202 prefer the Azure-AsyncOperation header but fall back to Location if necessary - if pt.resp.StatusCode == http.StatusAccepted { - ao, err := getURLFromAsyncOpHeader(pt.resp) - if err != nil { - return err - } else if ao != "" { - pt.URI = ao - pt.Pm = PollingAsyncOperation - } - // if the Location header is invalid and we already have a polling URL - // then we don't care if the Location header URL is malformed. - if lh, err := getURLFromLocationHeader(pt.resp); err != nil && pt.URI == "" { - return err - } else if lh != "" { - if ao == "" { - pt.URI = lh - pt.Pm = PollingLocation - } - // when both headers are returned we use the value in the Location header for the final GET - pt.FinalGetURI = lh - } - // make sure a polling URL was found - if pt.URI == "" { - return autorest.NewError("pollingTrackerPost", "updateHeaders", "didn't get any suitable polling URLs in 202 response") - } - } - return nil -} - -func (pt pollingTrackerPost) checkForErrors() error { - return pt.baseCheckForErrors() -} - -func (pt pollingTrackerPost) provisioningStateApplicable() bool { - return pt.resp.StatusCode == http.StatusOK || pt.resp.StatusCode == http.StatusNoContent -} - -// PUT - -type pollingTrackerPut struct { - pollingTrackerBase -} - -func (pt *pollingTrackerPut) updatePollingMethod() error { - // by default we can use the original URL for polling and final GET - if pt.URI == "" { - pt.URI = pt.resp.Request.URL.String() - } - if pt.FinalGetURI == "" { - pt.FinalGetURI = pt.resp.Request.URL.String() - } - if pt.Pm == PollingUnknown { - pt.Pm = PollingRequestURI - } - // for 201 it's permissible for no headers to be returned - if pt.resp.StatusCode == http.StatusCreated { - if ao, err := getURLFromAsyncOpHeader(pt.resp); err != nil { - return err - } else if ao != "" { - pt.URI = ao - pt.Pm = PollingAsyncOperation - } - } - // for 202 prefer the Azure-AsyncOperation header but fall back to Location if necessary - if pt.resp.StatusCode == http.StatusAccepted { - ao, err := getURLFromAsyncOpHeader(pt.resp) - if err != nil { - return err - } else if ao != "" { - pt.URI = ao - pt.Pm = PollingAsyncOperation - } - // if the Location header is invalid and we already have a polling URL - // then we don't care if the Location header URL is malformed. - if lh, err := getURLFromLocationHeader(pt.resp); err != nil && pt.URI == "" { - return err - } else if lh != "" { - if ao == "" { - pt.URI = lh - pt.Pm = PollingLocation - } - } - // make sure a polling URL was found - if pt.URI == "" { - return autorest.NewError("pollingTrackerPut", "updateHeaders", "didn't get any suitable polling URLs in 202 response") - } - } - return nil -} - -func (pt pollingTrackerPut) checkForErrors() error { - err := pt.baseCheckForErrors() - if err != nil { - return err - } - // if there are no LRO headers then the body cannot be empty - ao, err := getURLFromAsyncOpHeader(pt.resp) - if err != nil { - return err - } - lh, err := getURLFromLocationHeader(pt.resp) - if err != nil { - return err - } - if ao == "" && lh == "" && len(pt.rawBody) == 0 { - return autorest.NewError("pollingTrackerPut", "checkForErrors", "the response did not contain a body") - } - return nil -} - -func (pt pollingTrackerPut) provisioningStateApplicable() bool { - return pt.resp.StatusCode == http.StatusOK || pt.resp.StatusCode == http.StatusCreated -} - -// creates a polling tracker based on the verb of the original request -func createPollingTracker(resp *http.Response) (pollingTracker, error) { - var pt pollingTracker - switch strings.ToUpper(resp.Request.Method) { - case http.MethodDelete: - pt = &pollingTrackerDelete{pollingTrackerBase: pollingTrackerBase{resp: resp}} - case http.MethodPatch: - pt = &pollingTrackerPatch{pollingTrackerBase: pollingTrackerBase{resp: resp}} - case http.MethodPost: - pt = &pollingTrackerPost{pollingTrackerBase: pollingTrackerBase{resp: resp}} - case http.MethodPut: - pt = &pollingTrackerPut{pollingTrackerBase: pollingTrackerBase{resp: resp}} - default: - return nil, autorest.NewError("azure", "createPollingTracker", "unsupported HTTP method %s", resp.Request.Method) - } - if err := pt.initializeState(); err != nil { - return pt, err - } - // this initializes the polling header values, we do this during creation in case the - // initial response send us invalid values; this way the API call will return a non-nil - // error (not doing this means the error shows up in Future.Done) - return pt, pt.updatePollingMethod() -} - -// gets the polling URL from the Azure-AsyncOperation header. -// ensures the URL is well-formed and absolute. -func getURLFromAsyncOpHeader(resp *http.Response) (string, error) { - s := resp.Header.Get(http.CanonicalHeaderKey(headerAsyncOperation)) - if s == "" { - return "", nil - } - if !isValidURL(s) { - return "", autorest.NewError("azure", "getURLFromAsyncOpHeader", "invalid polling URL '%s'", s) - } - return s, nil -} - -// gets the polling URL from the Location header. -// ensures the URL is well-formed and absolute. -func getURLFromLocationHeader(resp *http.Response) (string, error) { - s := resp.Header.Get(http.CanonicalHeaderKey(autorest.HeaderLocation)) - if s == "" { - return "", nil - } - if !isValidURL(s) { - return "", autorest.NewError("azure", "getURLFromLocationHeader", "invalid polling URL '%s'", s) - } - return s, nil -} - -// verify that the URL is valid and absolute -func isValidURL(s string) bool { - u, err := url.Parse(s) - return err == nil && u.IsAbs() -} - -// DoPollForAsynchronous returns a SendDecorator that polls if the http.Response is for an Azure -// long-running operation. It will delay between requests for the duration specified in the -// RetryAfter header or, if the header is absent, the passed delay. Polling may be canceled via -// the context associated with the http.Request. -// Deprecated: Prefer using Futures to allow for non-blocking async operations. -func DoPollForAsynchronous(delay time.Duration) autorest.SendDecorator { - return func(s autorest.Sender) autorest.Sender { - return autorest.SenderFunc(func(r *http.Request) (*http.Response, error) { - resp, err := s.Do(r) - if err != nil { - return resp, err - } - if !autorest.ResponseHasStatusCode(resp, pollingCodes[:]...) { - return resp, nil - } - future, err := NewFutureFromResponse(resp) - if err != nil { - return resp, err - } - // retry until either the LRO completes or we receive an error - var done bool - for done, err = future.Done(s); !done && err == nil; done, err = future.Done(s) { - // check for Retry-After delay, if not present use the specified polling delay - if pd, ok := future.GetPollingDelay(); ok { - delay = pd - } - // wait until the delay elapses or the context is cancelled - if delayElapsed := autorest.DelayForBackoff(delay, 0, r.Context().Done()); !delayElapsed { - return future.Response(), - autorest.NewErrorWithError(r.Context().Err(), "azure", "DoPollForAsynchronous", future.Response(), "context has been cancelled") - } - } - return future.Response(), err - }) - } -} - -// PollingMethodType defines a type used for enumerating polling mechanisms. -type PollingMethodType string - -const ( - // PollingAsyncOperation indicates the polling method uses the Azure-AsyncOperation header. - PollingAsyncOperation PollingMethodType = "AsyncOperation" - - // PollingLocation indicates the polling method uses the Location header. - PollingLocation PollingMethodType = "Location" - - // PollingRequestURI indicates the polling method uses the original request URI. - PollingRequestURI PollingMethodType = "RequestURI" - - // PollingUnknown indicates an unknown polling method and is the default value. - PollingUnknown PollingMethodType = "" -) - -// AsyncOpIncompleteError is the type that's returned from a future that has not completed. -type AsyncOpIncompleteError struct { - // FutureType is the name of the type composed of a azure.Future. - FutureType string -} - -// Error returns an error message including the originating type name of the error. -func (e AsyncOpIncompleteError) Error() string { - return fmt.Sprintf("%s: asynchronous operation has not completed", e.FutureType) -} - -// NewAsyncOpIncompleteError creates a new AsyncOpIncompleteError with the specified parameters. -func NewAsyncOpIncompleteError(futureType string) AsyncOpIncompleteError { - return AsyncOpIncompleteError{ - FutureType: futureType, - } -} diff --git a/vendor/github.com/Azure/go-autorest/autorest/azure/azure.go b/vendor/github.com/Azure/go-autorest/autorest/azure/azure.go deleted file mode 100644 index 3a0a439ff9..0000000000 --- a/vendor/github.com/Azure/go-autorest/autorest/azure/azure.go +++ /dev/null @@ -1,326 +0,0 @@ -// Package azure provides Azure-specific implementations used with AutoRest. -// See the included examples for more detail. -package azure - -// Copyright 2017 Microsoft Corporation -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -import ( - "encoding/json" - "fmt" - "io/ioutil" - "net/http" - "regexp" - "strconv" - "strings" - - "github.com/Azure/go-autorest/autorest" -) - -const ( - // HeaderClientID is the Azure extension header to set a user-specified request ID. - HeaderClientID = "x-ms-client-request-id" - - // HeaderReturnClientID is the Azure extension header to set if the user-specified request ID - // should be included in the response. - HeaderReturnClientID = "x-ms-return-client-request-id" - - // HeaderRequestID is the Azure extension header of the service generated request ID returned - // in the response. - HeaderRequestID = "x-ms-request-id" -) - -// ServiceError encapsulates the error response from an Azure service. -// It adhears to the OData v4 specification for error responses. -type ServiceError struct { - Code string `json:"code"` - Message string `json:"message"` - Target *string `json:"target"` - Details []map[string]interface{} `json:"details"` - InnerError map[string]interface{} `json:"innererror"` - AdditionalInfo []map[string]interface{} `json:"additionalInfo"` -} - -func (se ServiceError) Error() string { - result := fmt.Sprintf("Code=%q Message=%q", se.Code, se.Message) - - if se.Target != nil { - result += fmt.Sprintf(" Target=%q", *se.Target) - } - - if se.Details != nil { - d, err := json.Marshal(se.Details) - if err != nil { - result += fmt.Sprintf(" Details=%v", se.Details) - } - result += fmt.Sprintf(" Details=%v", string(d)) - } - - if se.InnerError != nil { - d, err := json.Marshal(se.InnerError) - if err != nil { - result += fmt.Sprintf(" InnerError=%v", se.InnerError) - } - result += fmt.Sprintf(" InnerError=%v", string(d)) - } - - if se.AdditionalInfo != nil { - d, err := json.Marshal(se.AdditionalInfo) - if err != nil { - result += fmt.Sprintf(" AdditionalInfo=%v", se.AdditionalInfo) - } - result += fmt.Sprintf(" AdditionalInfo=%v", string(d)) - } - - return result -} - -// UnmarshalJSON implements the json.Unmarshaler interface for the ServiceError type. -func (se *ServiceError) UnmarshalJSON(b []byte) error { - // per the OData v4 spec the details field must be an array of JSON objects. - // unfortunately not all services adhear to the spec and just return a single - // object instead of an array with one object. so we have to perform some - // shenanigans to accommodate both cases. - // http://docs.oasis-open.org/odata/odata-json-format/v4.0/os/odata-json-format-v4.0-os.html#_Toc372793091 - - type serviceError1 struct { - Code string `json:"code"` - Message string `json:"message"` - Target *string `json:"target"` - Details []map[string]interface{} `json:"details"` - InnerError map[string]interface{} `json:"innererror"` - AdditionalInfo []map[string]interface{} `json:"additionalInfo"` - } - - type serviceError2 struct { - Code string `json:"code"` - Message string `json:"message"` - Target *string `json:"target"` - Details map[string]interface{} `json:"details"` - InnerError map[string]interface{} `json:"innererror"` - AdditionalInfo []map[string]interface{} `json:"additionalInfo"` - } - - se1 := serviceError1{} - err := json.Unmarshal(b, &se1) - if err == nil { - se.populate(se1.Code, se1.Message, se1.Target, se1.Details, se1.InnerError, se1.AdditionalInfo) - return nil - } - - se2 := serviceError2{} - err = json.Unmarshal(b, &se2) - if err == nil { - se.populate(se2.Code, se2.Message, se2.Target, nil, se2.InnerError, se2.AdditionalInfo) - se.Details = append(se.Details, se2.Details) - return nil - } - return err -} - -func (se *ServiceError) populate(code, message string, target *string, details []map[string]interface{}, inner map[string]interface{}, additional []map[string]interface{}) { - se.Code = code - se.Message = message - se.Target = target - se.Details = details - se.InnerError = inner - se.AdditionalInfo = additional -} - -// RequestError describes an error response returned by Azure service. -type RequestError struct { - autorest.DetailedError - - // The error returned by the Azure service. - ServiceError *ServiceError `json:"error"` - - // The request id (from the x-ms-request-id-header) of the request. - RequestID string -} - -// Error returns a human-friendly error message from service error. -func (e RequestError) Error() string { - return fmt.Sprintf("autorest/azure: Service returned an error. Status=%v %v", - e.StatusCode, e.ServiceError) -} - -// IsAzureError returns true if the passed error is an Azure Service error; false otherwise. -func IsAzureError(e error) bool { - _, ok := e.(*RequestError) - return ok -} - -// Resource contains details about an Azure resource. -type Resource struct { - SubscriptionID string - ResourceGroup string - Provider string - ResourceType string - ResourceName string -} - -// ParseResourceID parses a resource ID into a ResourceDetails struct. -// See https://docs.microsoft.com/en-us/azure/azure-resource-manager/resource-group-template-functions-resource#return-value-4. -func ParseResourceID(resourceID string) (Resource, error) { - - const resourceIDPatternText = `(?i)subscriptions/(.+)/resourceGroups/(.+)/providers/(.+?)/(.+?)/(.+)` - resourceIDPattern := regexp.MustCompile(resourceIDPatternText) - match := resourceIDPattern.FindStringSubmatch(resourceID) - - if len(match) == 0 { - return Resource{}, fmt.Errorf("parsing failed for %s. Invalid resource Id format", resourceID) - } - - v := strings.Split(match[5], "/") - resourceName := v[len(v)-1] - - result := Resource{ - SubscriptionID: match[1], - ResourceGroup: match[2], - Provider: match[3], - ResourceType: match[4], - ResourceName: resourceName, - } - - return result, nil -} - -// NewErrorWithError creates a new Error conforming object from the -// passed packageType, method, statusCode of the given resp (UndefinedStatusCode -// if resp is nil), message, and original error. message is treated as a format -// string to which the optional args apply. -func NewErrorWithError(original error, packageType string, method string, resp *http.Response, message string, args ...interface{}) RequestError { - if v, ok := original.(*RequestError); ok { - return *v - } - - statusCode := autorest.UndefinedStatusCode - if resp != nil { - statusCode = resp.StatusCode - } - return RequestError{ - DetailedError: autorest.DetailedError{ - Original: original, - PackageType: packageType, - Method: method, - StatusCode: statusCode, - Message: fmt.Sprintf(message, args...), - }, - } -} - -// WithReturningClientID returns a PrepareDecorator that adds an HTTP extension header of -// x-ms-client-request-id whose value is the passed, undecorated UUID (e.g., -// "0F39878C-5F76-4DB8-A25D-61D2C193C3CA"). It also sets the x-ms-return-client-request-id -// header to true such that UUID accompanies the http.Response. -func WithReturningClientID(uuid string) autorest.PrepareDecorator { - preparer := autorest.CreatePreparer( - WithClientID(uuid), - WithReturnClientID(true)) - - return func(p autorest.Preparer) autorest.Preparer { - return autorest.PreparerFunc(func(r *http.Request) (*http.Request, error) { - r, err := p.Prepare(r) - if err != nil { - return r, err - } - return preparer.Prepare(r) - }) - } -} - -// WithClientID returns a PrepareDecorator that adds an HTTP extension header of -// x-ms-client-request-id whose value is passed, undecorated UUID (e.g., -// "0F39878C-5F76-4DB8-A25D-61D2C193C3CA"). -func WithClientID(uuid string) autorest.PrepareDecorator { - return autorest.WithHeader(HeaderClientID, uuid) -} - -// WithReturnClientID returns a PrepareDecorator that adds an HTTP extension header of -// x-ms-return-client-request-id whose boolean value indicates if the value of the -// x-ms-client-request-id header should be included in the http.Response. -func WithReturnClientID(b bool) autorest.PrepareDecorator { - return autorest.WithHeader(HeaderReturnClientID, strconv.FormatBool(b)) -} - -// ExtractClientID extracts the client identifier from the x-ms-client-request-id header set on the -// http.Request sent to the service (and returned in the http.Response) -func ExtractClientID(resp *http.Response) string { - return autorest.ExtractHeaderValue(HeaderClientID, resp) -} - -// ExtractRequestID extracts the Azure server generated request identifier from the -// x-ms-request-id header. -func ExtractRequestID(resp *http.Response) string { - return autorest.ExtractHeaderValue(HeaderRequestID, resp) -} - -// WithErrorUnlessStatusCode returns a RespondDecorator that emits an -// azure.RequestError by reading the response body unless the response HTTP status code -// is among the set passed. -// -// If there is a chance service may return responses other than the Azure error -// format and the response cannot be parsed into an error, a decoding error will -// be returned containing the response body. In any case, the Responder will -// return an error if the status code is not satisfied. -// -// If this Responder returns an error, the response body will be replaced with -// an in-memory reader, which needs no further closing. -func WithErrorUnlessStatusCode(codes ...int) autorest.RespondDecorator { - return func(r autorest.Responder) autorest.Responder { - return autorest.ResponderFunc(func(resp *http.Response) error { - err := r.Respond(resp) - if err == nil && !autorest.ResponseHasStatusCode(resp, codes...) { - var e RequestError - defer resp.Body.Close() - - // Copy and replace the Body in case it does not contain an error object. - // This will leave the Body available to the caller. - b, decodeErr := autorest.CopyAndDecode(autorest.EncodedAsJSON, resp.Body, &e) - resp.Body = ioutil.NopCloser(&b) - if decodeErr != nil { - return fmt.Errorf("autorest/azure: error response cannot be parsed: %q error: %v", b.String(), decodeErr) - } - if e.ServiceError == nil { - // Check if error is unwrapped ServiceError - if err := json.Unmarshal(b.Bytes(), &e.ServiceError); err != nil { - return err - } - } - if e.ServiceError.Message == "" { - // if we're here it means the returned error wasn't OData v4 compliant. - // try to unmarshal the body as raw JSON in hopes of getting something. - rawBody := map[string]interface{}{} - if err := json.Unmarshal(b.Bytes(), &rawBody); err != nil { - return err - } - e.ServiceError = &ServiceError{ - Code: "Unknown", - Message: "Unknown service error", - } - if len(rawBody) > 0 { - e.ServiceError.Details = []map[string]interface{}{rawBody} - } - } - e.Response = resp - e.RequestID = ExtractRequestID(resp) - if e.StatusCode == nil { - e.StatusCode = resp.StatusCode - } - err = &e - } - return err - }) - } -} diff --git a/vendor/github.com/Azure/go-autorest/autorest/azure/environments.go b/vendor/github.com/Azure/go-autorest/autorest/azure/environments.go deleted file mode 100644 index 7e41f7fd99..0000000000 --- a/vendor/github.com/Azure/go-autorest/autorest/azure/environments.go +++ /dev/null @@ -1,191 +0,0 @@ -package azure - -// Copyright 2017 Microsoft Corporation -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -import ( - "encoding/json" - "fmt" - "io/ioutil" - "os" - "strings" -) - -// EnvironmentFilepathName captures the name of the environment variable containing the path to the file -// to be used while populating the Azure Environment. -const EnvironmentFilepathName = "AZURE_ENVIRONMENT_FILEPATH" - -var environments = map[string]Environment{ - "AZURECHINACLOUD": ChinaCloud, - "AZUREGERMANCLOUD": GermanCloud, - "AZUREPUBLICCLOUD": PublicCloud, - "AZUREUSGOVERNMENTCLOUD": USGovernmentCloud, -} - -// Environment represents a set of endpoints for each of Azure's Clouds. -type Environment struct { - Name string `json:"name"` - ManagementPortalURL string `json:"managementPortalURL"` - PublishSettingsURL string `json:"publishSettingsURL"` - ServiceManagementEndpoint string `json:"serviceManagementEndpoint"` - ResourceManagerEndpoint string `json:"resourceManagerEndpoint"` - ActiveDirectoryEndpoint string `json:"activeDirectoryEndpoint"` - GalleryEndpoint string `json:"galleryEndpoint"` - KeyVaultEndpoint string `json:"keyVaultEndpoint"` - GraphEndpoint string `json:"graphEndpoint"` - ServiceBusEndpoint string `json:"serviceBusEndpoint"` - BatchManagementEndpoint string `json:"batchManagementEndpoint"` - StorageEndpointSuffix string `json:"storageEndpointSuffix"` - SQLDatabaseDNSSuffix string `json:"sqlDatabaseDNSSuffix"` - TrafficManagerDNSSuffix string `json:"trafficManagerDNSSuffix"` - KeyVaultDNSSuffix string `json:"keyVaultDNSSuffix"` - ServiceBusEndpointSuffix string `json:"serviceBusEndpointSuffix"` - ServiceManagementVMDNSSuffix string `json:"serviceManagementVMDNSSuffix"` - ResourceManagerVMDNSSuffix string `json:"resourceManagerVMDNSSuffix"` - ContainerRegistryDNSSuffix string `json:"containerRegistryDNSSuffix"` - TokenAudience string `json:"tokenAudience"` -} - -var ( - // PublicCloud is the default public Azure cloud environment - PublicCloud = Environment{ - Name: "AzurePublicCloud", - ManagementPortalURL: "https://manage.windowsazure.com/", - PublishSettingsURL: "https://manage.windowsazure.com/publishsettings/index", - ServiceManagementEndpoint: "https://management.core.windows.net/", - ResourceManagerEndpoint: "https://management.azure.com/", - ActiveDirectoryEndpoint: "https://login.microsoftonline.com/", - GalleryEndpoint: "https://gallery.azure.com/", - KeyVaultEndpoint: "https://vault.azure.net/", - GraphEndpoint: "https://graph.windows.net/", - ServiceBusEndpoint: "https://servicebus.windows.net/", - BatchManagementEndpoint: "https://batch.core.windows.net/", - StorageEndpointSuffix: "core.windows.net", - SQLDatabaseDNSSuffix: "database.windows.net", - TrafficManagerDNSSuffix: "trafficmanager.net", - KeyVaultDNSSuffix: "vault.azure.net", - ServiceBusEndpointSuffix: "servicebus.windows.net", - ServiceManagementVMDNSSuffix: "cloudapp.net", - ResourceManagerVMDNSSuffix: "cloudapp.azure.com", - ContainerRegistryDNSSuffix: "azurecr.io", - TokenAudience: "https://management.azure.com/", - } - - // USGovernmentCloud is the cloud environment for the US Government - USGovernmentCloud = Environment{ - Name: "AzureUSGovernmentCloud", - ManagementPortalURL: "https://manage.windowsazure.us/", - PublishSettingsURL: "https://manage.windowsazure.us/publishsettings/index", - ServiceManagementEndpoint: "https://management.core.usgovcloudapi.net/", - ResourceManagerEndpoint: "https://management.usgovcloudapi.net/", - ActiveDirectoryEndpoint: "https://login.microsoftonline.us/", - GalleryEndpoint: "https://gallery.usgovcloudapi.net/", - KeyVaultEndpoint: "https://vault.usgovcloudapi.net/", - GraphEndpoint: "https://graph.windows.net/", - ServiceBusEndpoint: "https://servicebus.usgovcloudapi.net/", - BatchManagementEndpoint: "https://batch.core.usgovcloudapi.net/", - StorageEndpointSuffix: "core.usgovcloudapi.net", - SQLDatabaseDNSSuffix: "database.usgovcloudapi.net", - TrafficManagerDNSSuffix: "usgovtrafficmanager.net", - KeyVaultDNSSuffix: "vault.usgovcloudapi.net", - ServiceBusEndpointSuffix: "servicebus.usgovcloudapi.net", - ServiceManagementVMDNSSuffix: "usgovcloudapp.net", - ResourceManagerVMDNSSuffix: "cloudapp.windowsazure.us", - ContainerRegistryDNSSuffix: "azurecr.io", - TokenAudience: "https://management.usgovcloudapi.net/", - } - - // ChinaCloud is the cloud environment operated in China - ChinaCloud = Environment{ - Name: "AzureChinaCloud", - ManagementPortalURL: "https://manage.chinacloudapi.com/", - PublishSettingsURL: "https://manage.chinacloudapi.com/publishsettings/index", - ServiceManagementEndpoint: "https://management.core.chinacloudapi.cn/", - ResourceManagerEndpoint: "https://management.chinacloudapi.cn/", - ActiveDirectoryEndpoint: "https://login.chinacloudapi.cn/", - GalleryEndpoint: "https://gallery.chinacloudapi.cn/", - KeyVaultEndpoint: "https://vault.azure.cn/", - GraphEndpoint: "https://graph.chinacloudapi.cn/", - ServiceBusEndpoint: "https://servicebus.chinacloudapi.cn/", - BatchManagementEndpoint: "https://batch.chinacloudapi.cn/", - StorageEndpointSuffix: "core.chinacloudapi.cn", - SQLDatabaseDNSSuffix: "database.chinacloudapi.cn", - TrafficManagerDNSSuffix: "trafficmanager.cn", - KeyVaultDNSSuffix: "vault.azure.cn", - ServiceBusEndpointSuffix: "servicebus.chinacloudapi.cn", - ServiceManagementVMDNSSuffix: "chinacloudapp.cn", - ResourceManagerVMDNSSuffix: "cloudapp.azure.cn", - ContainerRegistryDNSSuffix: "azurecr.io", - TokenAudience: "https://management.chinacloudapi.cn/", - } - - // GermanCloud is the cloud environment operated in Germany - GermanCloud = Environment{ - Name: "AzureGermanCloud", - ManagementPortalURL: "http://portal.microsoftazure.de/", - PublishSettingsURL: "https://manage.microsoftazure.de/publishsettings/index", - ServiceManagementEndpoint: "https://management.core.cloudapi.de/", - ResourceManagerEndpoint: "https://management.microsoftazure.de/", - ActiveDirectoryEndpoint: "https://login.microsoftonline.de/", - GalleryEndpoint: "https://gallery.cloudapi.de/", - KeyVaultEndpoint: "https://vault.microsoftazure.de/", - GraphEndpoint: "https://graph.cloudapi.de/", - ServiceBusEndpoint: "https://servicebus.cloudapi.de/", - BatchManagementEndpoint: "https://batch.cloudapi.de/", - StorageEndpointSuffix: "core.cloudapi.de", - SQLDatabaseDNSSuffix: "database.cloudapi.de", - TrafficManagerDNSSuffix: "azuretrafficmanager.de", - KeyVaultDNSSuffix: "vault.microsoftazure.de", - ServiceBusEndpointSuffix: "servicebus.cloudapi.de", - ServiceManagementVMDNSSuffix: "azurecloudapp.de", - ResourceManagerVMDNSSuffix: "cloudapp.microsoftazure.de", - ContainerRegistryDNSSuffix: "azurecr.io", - TokenAudience: "https://management.microsoftazure.de/", - } -) - -// EnvironmentFromName returns an Environment based on the common name specified. -func EnvironmentFromName(name string) (Environment, error) { - // IMPORTANT - // As per @radhikagupta5: - // This is technical debt, fundamentally here because Kubernetes is not currently accepting - // contributions to the providers. Once that is an option, the provider should be updated to - // directly call `EnvironmentFromFile`. Until then, we rely on dispatching Azure Stack environment creation - // from this method based on the name that is provided to us. - if strings.EqualFold(name, "AZURESTACKCLOUD") { - return EnvironmentFromFile(os.Getenv(EnvironmentFilepathName)) - } - - name = strings.ToUpper(name) - env, ok := environments[name] - if !ok { - return env, fmt.Errorf("autorest/azure: There is no cloud environment matching the name %q", name) - } - - return env, nil -} - -// EnvironmentFromFile loads an Environment from a configuration file available on disk. -// This function is particularly useful in the Hybrid Cloud model, where one must define their own -// endpoints. -func EnvironmentFromFile(location string) (unmarshaled Environment, err error) { - fileContents, err := ioutil.ReadFile(location) - if err != nil { - return - } - - err = json.Unmarshal(fileContents, &unmarshaled) - - return -} diff --git a/vendor/github.com/Azure/go-autorest/autorest/azure/metadata_environment.go b/vendor/github.com/Azure/go-autorest/autorest/azure/metadata_environment.go deleted file mode 100644 index 507f9e95cf..0000000000 --- a/vendor/github.com/Azure/go-autorest/autorest/azure/metadata_environment.go +++ /dev/null @@ -1,245 +0,0 @@ -package azure - -import ( - "encoding/json" - "fmt" - "io/ioutil" - "net/http" - "strings" - - "github.com/Azure/go-autorest/autorest" -) - -// Copyright 2017 Microsoft Corporation -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -type audience []string - -type authentication struct { - LoginEndpoint string `json:"loginEndpoint"` - Audiences audience `json:"audiences"` -} - -type environmentMetadataInfo struct { - GalleryEndpoint string `json:"galleryEndpoint"` - GraphEndpoint string `json:"graphEndpoint"` - PortalEndpoint string `json:"portalEndpoint"` - Authentication authentication `json:"authentication"` -} - -// EnvironmentProperty represent property names that clients can override -type EnvironmentProperty string - -const ( - // EnvironmentName ... - EnvironmentName EnvironmentProperty = "name" - // EnvironmentManagementPortalURL .. - EnvironmentManagementPortalURL EnvironmentProperty = "managementPortalURL" - // EnvironmentPublishSettingsURL ... - EnvironmentPublishSettingsURL EnvironmentProperty = "publishSettingsURL" - // EnvironmentServiceManagementEndpoint ... - EnvironmentServiceManagementEndpoint EnvironmentProperty = "serviceManagementEndpoint" - // EnvironmentResourceManagerEndpoint ... - EnvironmentResourceManagerEndpoint EnvironmentProperty = "resourceManagerEndpoint" - // EnvironmentActiveDirectoryEndpoint ... - EnvironmentActiveDirectoryEndpoint EnvironmentProperty = "activeDirectoryEndpoint" - // EnvironmentGalleryEndpoint ... - EnvironmentGalleryEndpoint EnvironmentProperty = "galleryEndpoint" - // EnvironmentKeyVaultEndpoint ... - EnvironmentKeyVaultEndpoint EnvironmentProperty = "keyVaultEndpoint" - // EnvironmentGraphEndpoint ... - EnvironmentGraphEndpoint EnvironmentProperty = "graphEndpoint" - // EnvironmentServiceBusEndpoint ... - EnvironmentServiceBusEndpoint EnvironmentProperty = "serviceBusEndpoint" - // EnvironmentBatchManagementEndpoint ... - EnvironmentBatchManagementEndpoint EnvironmentProperty = "batchManagementEndpoint" - // EnvironmentStorageEndpointSuffix ... - EnvironmentStorageEndpointSuffix EnvironmentProperty = "storageEndpointSuffix" - // EnvironmentSQLDatabaseDNSSuffix ... - EnvironmentSQLDatabaseDNSSuffix EnvironmentProperty = "sqlDatabaseDNSSuffix" - // EnvironmentTrafficManagerDNSSuffix ... - EnvironmentTrafficManagerDNSSuffix EnvironmentProperty = "trafficManagerDNSSuffix" - // EnvironmentKeyVaultDNSSuffix ... - EnvironmentKeyVaultDNSSuffix EnvironmentProperty = "keyVaultDNSSuffix" - // EnvironmentServiceBusEndpointSuffix ... - EnvironmentServiceBusEndpointSuffix EnvironmentProperty = "serviceBusEndpointSuffix" - // EnvironmentServiceManagementVMDNSSuffix ... - EnvironmentServiceManagementVMDNSSuffix EnvironmentProperty = "serviceManagementVMDNSSuffix" - // EnvironmentResourceManagerVMDNSSuffix ... - EnvironmentResourceManagerVMDNSSuffix EnvironmentProperty = "resourceManagerVMDNSSuffix" - // EnvironmentContainerRegistryDNSSuffix ... - EnvironmentContainerRegistryDNSSuffix EnvironmentProperty = "containerRegistryDNSSuffix" - // EnvironmentTokenAudience ... - EnvironmentTokenAudience EnvironmentProperty = "tokenAudience" -) - -// OverrideProperty represents property name and value that clients can override -type OverrideProperty struct { - Key EnvironmentProperty - Value string -} - -// EnvironmentFromURL loads an Environment from a URL -// This function is particularly useful in the Hybrid Cloud model, where one may define their own -// endpoints. -func EnvironmentFromURL(resourceManagerEndpoint string, properties ...OverrideProperty) (environment Environment, err error) { - var metadataEnvProperties environmentMetadataInfo - - if resourceManagerEndpoint == "" { - return environment, fmt.Errorf("Metadata resource manager endpoint is empty") - } - - if metadataEnvProperties, err = retrieveMetadataEnvironment(resourceManagerEndpoint); err != nil { - return environment, err - } - - // Give priority to user's override values - overrideProperties(&environment, properties) - - if environment.Name == "" { - environment.Name = "HybridEnvironment" - } - stampDNSSuffix := environment.StorageEndpointSuffix - if stampDNSSuffix == "" { - stampDNSSuffix = strings.TrimSuffix(strings.TrimPrefix(strings.Replace(resourceManagerEndpoint, strings.Split(resourceManagerEndpoint, ".")[0], "", 1), "."), "/") - environment.StorageEndpointSuffix = stampDNSSuffix - } - if environment.KeyVaultDNSSuffix == "" { - environment.KeyVaultDNSSuffix = fmt.Sprintf("%s.%s", "vault", stampDNSSuffix) - } - if environment.KeyVaultEndpoint == "" { - environment.KeyVaultEndpoint = fmt.Sprintf("%s%s", "https://", environment.KeyVaultDNSSuffix) - } - if environment.TokenAudience == "" { - environment.TokenAudience = metadataEnvProperties.Authentication.Audiences[0] - } - if environment.ActiveDirectoryEndpoint == "" { - environment.ActiveDirectoryEndpoint = metadataEnvProperties.Authentication.LoginEndpoint - } - if environment.ResourceManagerEndpoint == "" { - environment.ResourceManagerEndpoint = resourceManagerEndpoint - } - if environment.GalleryEndpoint == "" { - environment.GalleryEndpoint = metadataEnvProperties.GalleryEndpoint - } - if environment.GraphEndpoint == "" { - environment.GraphEndpoint = metadataEnvProperties.GraphEndpoint - } - - return environment, nil -} - -func overrideProperties(environment *Environment, properties []OverrideProperty) { - for _, property := range properties { - switch property.Key { - case EnvironmentName: - { - environment.Name = property.Value - } - case EnvironmentManagementPortalURL: - { - environment.ManagementPortalURL = property.Value - } - case EnvironmentPublishSettingsURL: - { - environment.PublishSettingsURL = property.Value - } - case EnvironmentServiceManagementEndpoint: - { - environment.ServiceManagementEndpoint = property.Value - } - case EnvironmentResourceManagerEndpoint: - { - environment.ResourceManagerEndpoint = property.Value - } - case EnvironmentActiveDirectoryEndpoint: - { - environment.ActiveDirectoryEndpoint = property.Value - } - case EnvironmentGalleryEndpoint: - { - environment.GalleryEndpoint = property.Value - } - case EnvironmentKeyVaultEndpoint: - { - environment.KeyVaultEndpoint = property.Value - } - case EnvironmentGraphEndpoint: - { - environment.GraphEndpoint = property.Value - } - case EnvironmentServiceBusEndpoint: - { - environment.ServiceBusEndpoint = property.Value - } - case EnvironmentBatchManagementEndpoint: - { - environment.BatchManagementEndpoint = property.Value - } - case EnvironmentStorageEndpointSuffix: - { - environment.StorageEndpointSuffix = property.Value - } - case EnvironmentSQLDatabaseDNSSuffix: - { - environment.SQLDatabaseDNSSuffix = property.Value - } - case EnvironmentTrafficManagerDNSSuffix: - { - environment.TrafficManagerDNSSuffix = property.Value - } - case EnvironmentKeyVaultDNSSuffix: - { - environment.KeyVaultDNSSuffix = property.Value - } - case EnvironmentServiceBusEndpointSuffix: - { - environment.ServiceBusEndpointSuffix = property.Value - } - case EnvironmentServiceManagementVMDNSSuffix: - { - environment.ServiceManagementVMDNSSuffix = property.Value - } - case EnvironmentResourceManagerVMDNSSuffix: - { - environment.ResourceManagerVMDNSSuffix = property.Value - } - case EnvironmentContainerRegistryDNSSuffix: - { - environment.ContainerRegistryDNSSuffix = property.Value - } - case EnvironmentTokenAudience: - { - environment.TokenAudience = property.Value - } - } - } -} - -func retrieveMetadataEnvironment(endpoint string) (environment environmentMetadataInfo, err error) { - client := autorest.NewClientWithUserAgent("") - managementEndpoint := fmt.Sprintf("%s%s", strings.TrimSuffix(endpoint, "/"), "/metadata/endpoints?api-version=1.0") - req, _ := http.NewRequest("GET", managementEndpoint, nil) - response, err := client.Do(req) - if err != nil { - return environment, err - } - defer response.Body.Close() - jsonResponse, err := ioutil.ReadAll(response.Body) - if err != nil { - return environment, err - } - err = json.Unmarshal(jsonResponse, &environment) - return environment, err -} diff --git a/vendor/github.com/Azure/go-autorest/autorest/azure/rp.go b/vendor/github.com/Azure/go-autorest/autorest/azure/rp.go deleted file mode 100644 index 86ce9f2b5b..0000000000 --- a/vendor/github.com/Azure/go-autorest/autorest/azure/rp.go +++ /dev/null @@ -1,200 +0,0 @@ -// Copyright 2017 Microsoft Corporation -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package azure - -import ( - "errors" - "fmt" - "net/http" - "net/url" - "strings" - "time" - - "github.com/Azure/go-autorest/autorest" -) - -// DoRetryWithRegistration tries to register the resource provider in case it is unregistered. -// It also handles request retries -func DoRetryWithRegistration(client autorest.Client) autorest.SendDecorator { - return func(s autorest.Sender) autorest.Sender { - return autorest.SenderFunc(func(r *http.Request) (resp *http.Response, err error) { - rr := autorest.NewRetriableRequest(r) - for currentAttempt := 0; currentAttempt < client.RetryAttempts; currentAttempt++ { - err = rr.Prepare() - if err != nil { - return resp, err - } - - resp, err = autorest.SendWithSender(s, rr.Request(), - autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...), - ) - if err != nil { - return resp, err - } - - if resp.StatusCode != http.StatusConflict || client.SkipResourceProviderRegistration { - return resp, err - } - var re RequestError - err = autorest.Respond( - resp, - autorest.ByUnmarshallingJSON(&re), - ) - if err != nil { - return resp, err - } - err = re - - if re.ServiceError != nil && re.ServiceError.Code == "MissingSubscriptionRegistration" { - regErr := register(client, r, re) - if regErr != nil { - return resp, fmt.Errorf("failed auto registering Resource Provider: %s. Original error: %s", regErr, err) - } - } - } - return resp, err - }) - } -} - -func getProvider(re RequestError) (string, error) { - if re.ServiceError != nil && len(re.ServiceError.Details) > 0 { - return re.ServiceError.Details[0]["target"].(string), nil - } - return "", errors.New("provider was not found in the response") -} - -func register(client autorest.Client, originalReq *http.Request, re RequestError) error { - subID := getSubscription(originalReq.URL.Path) - if subID == "" { - return errors.New("missing parameter subscriptionID to register resource provider") - } - providerName, err := getProvider(re) - if err != nil { - return fmt.Errorf("missing parameter provider to register resource provider: %s", err) - } - newURL := url.URL{ - Scheme: originalReq.URL.Scheme, - Host: originalReq.URL.Host, - } - - // taken from the resources SDK - // with almost identical code, this sections are easier to mantain - // It is also not a good idea to import the SDK here - // https://github.com/Azure/azure-sdk-for-go/blob/9f366792afa3e0ddaecdc860e793ba9d75e76c27/arm/resources/resources/providers.go#L252 - pathParameters := map[string]interface{}{ - "resourceProviderNamespace": autorest.Encode("path", providerName), - "subscriptionId": autorest.Encode("path", subID), - } - - const APIVersion = "2016-09-01" - queryParameters := map[string]interface{}{ - "api-version": APIVersion, - } - - preparer := autorest.CreatePreparer( - autorest.AsPost(), - autorest.WithBaseURL(newURL.String()), - autorest.WithPathParameters("/subscriptions/{subscriptionId}/providers/{resourceProviderNamespace}/register", pathParameters), - autorest.WithQueryParameters(queryParameters), - ) - - req, err := preparer.Prepare(&http.Request{}) - if err != nil { - return err - } - req = req.WithContext(originalReq.Context()) - - resp, err := autorest.SendWithSender(client, req, - autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...), - ) - if err != nil { - return err - } - - type Provider struct { - RegistrationState *string `json:"registrationState,omitempty"` - } - var provider Provider - - err = autorest.Respond( - resp, - WithErrorUnlessStatusCode(http.StatusOK), - autorest.ByUnmarshallingJSON(&provider), - autorest.ByClosing(), - ) - if err != nil { - return err - } - - // poll for registered provisioning state - registrationStartTime := time.Now() - for err == nil && (client.PollingDuration == 0 || (client.PollingDuration != 0 && time.Since(registrationStartTime) < client.PollingDuration)) { - // taken from the resources SDK - // https://github.com/Azure/azure-sdk-for-go/blob/9f366792afa3e0ddaecdc860e793ba9d75e76c27/arm/resources/resources/providers.go#L45 - preparer := autorest.CreatePreparer( - autorest.AsGet(), - autorest.WithBaseURL(newURL.String()), - autorest.WithPathParameters("/subscriptions/{subscriptionId}/providers/{resourceProviderNamespace}", pathParameters), - autorest.WithQueryParameters(queryParameters), - ) - req, err = preparer.Prepare(&http.Request{}) - if err != nil { - return err - } - req = req.WithContext(originalReq.Context()) - - resp, err := autorest.SendWithSender(client, req, - autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...), - ) - if err != nil { - return err - } - - err = autorest.Respond( - resp, - WithErrorUnlessStatusCode(http.StatusOK), - autorest.ByUnmarshallingJSON(&provider), - autorest.ByClosing(), - ) - if err != nil { - return err - } - - if provider.RegistrationState != nil && - *provider.RegistrationState == "Registered" { - break - } - - delayed := autorest.DelayWithRetryAfter(resp, originalReq.Context().Done()) - if !delayed && !autorest.DelayForBackoff(client.PollingDelay, 0, originalReq.Context().Done()) { - return originalReq.Context().Err() - } - } - if client.PollingDuration != 0 && !(time.Since(registrationStartTime) < client.PollingDuration) { - return errors.New("polling for resource provider registration has exceeded the polling duration") - } - return err -} - -func getSubscription(path string) string { - parts := strings.Split(path, "/") - for i, v := range parts { - if v == "subscriptions" && (i+1) < len(parts) { - return parts[i+1] - } - } - return "" -} diff --git a/vendor/github.com/Azure/go-autorest/autorest/client.go b/vendor/github.com/Azure/go-autorest/autorest/client.go deleted file mode 100644 index 4874e6e82d..0000000000 --- a/vendor/github.com/Azure/go-autorest/autorest/client.go +++ /dev/null @@ -1,270 +0,0 @@ -package autorest - -// Copyright 2017 Microsoft Corporation -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -import ( - "bytes" - "fmt" - "io" - "io/ioutil" - "log" - "net/http" - "net/http/cookiejar" - "strings" - "time" - - "github.com/Azure/go-autorest/logger" - "github.com/Azure/go-autorest/tracing" -) - -const ( - // DefaultPollingDelay is a reasonable delay between polling requests. - DefaultPollingDelay = 60 * time.Second - - // DefaultPollingDuration is a reasonable total polling duration. - DefaultPollingDuration = 15 * time.Minute - - // DefaultRetryAttempts is number of attempts for retry status codes (5xx). - DefaultRetryAttempts = 3 - - // DefaultRetryDuration is the duration to wait between retries. - DefaultRetryDuration = 30 * time.Second -) - -var ( - // StatusCodesForRetry are a defined group of status code for which the client will retry - StatusCodesForRetry = []int{ - http.StatusRequestTimeout, // 408 - http.StatusTooManyRequests, // 429 - http.StatusInternalServerError, // 500 - http.StatusBadGateway, // 502 - http.StatusServiceUnavailable, // 503 - http.StatusGatewayTimeout, // 504 - } -) - -const ( - requestFormat = `HTTP Request Begin =================================================== -%s -===================================================== HTTP Request End -` - responseFormat = `HTTP Response Begin =================================================== -%s -===================================================== HTTP Response End -` -) - -// Response serves as the base for all responses from generated clients. It provides access to the -// last http.Response. -type Response struct { - *http.Response `json:"-"` -} - -// LoggingInspector implements request and response inspectors that log the full request and -// response to a supplied log. -type LoggingInspector struct { - Logger *log.Logger -} - -// WithInspection returns a PrepareDecorator that emits the http.Request to the supplied logger. The -// body is restored after being emitted. -// -// Note: Since it reads the entire Body, this decorator should not be used where body streaming is -// important. It is best used to trace JSON or similar body values. -func (li LoggingInspector) WithInspection() PrepareDecorator { - return func(p Preparer) Preparer { - return PreparerFunc(func(r *http.Request) (*http.Request, error) { - var body, b bytes.Buffer - - defer r.Body.Close() - - r.Body = ioutil.NopCloser(io.TeeReader(r.Body, &body)) - if err := r.Write(&b); err != nil { - return nil, fmt.Errorf("Failed to write response: %v", err) - } - - li.Logger.Printf(requestFormat, b.String()) - - r.Body = ioutil.NopCloser(&body) - return p.Prepare(r) - }) - } -} - -// ByInspecting returns a RespondDecorator that emits the http.Response to the supplied logger. The -// body is restored after being emitted. -// -// Note: Since it reads the entire Body, this decorator should not be used where body streaming is -// important. It is best used to trace JSON or similar body values. -func (li LoggingInspector) ByInspecting() RespondDecorator { - return func(r Responder) Responder { - return ResponderFunc(func(resp *http.Response) error { - var body, b bytes.Buffer - defer resp.Body.Close() - resp.Body = ioutil.NopCloser(io.TeeReader(resp.Body, &body)) - if err := resp.Write(&b); err != nil { - return fmt.Errorf("Failed to write response: %v", err) - } - - li.Logger.Printf(responseFormat, b.String()) - - resp.Body = ioutil.NopCloser(&body) - return r.Respond(resp) - }) - } -} - -// Client is the base for autorest generated clients. It provides default, "do nothing" -// implementations of an Authorizer, RequestInspector, and ResponseInspector. It also returns the -// standard, undecorated http.Client as a default Sender. -// -// Generated clients should also use Error (see NewError and NewErrorWithError) for errors and -// return responses that compose with Response. -// -// Most customization of generated clients is best achieved by supplying a custom Authorizer, custom -// RequestInspector, and / or custom ResponseInspector. Users may log requests, implement circuit -// breakers (see https://msdn.microsoft.com/en-us/library/dn589784.aspx) or otherwise influence -// sending the request by providing a decorated Sender. -type Client struct { - Authorizer Authorizer - Sender Sender - RequestInspector PrepareDecorator - ResponseInspector RespondDecorator - - // PollingDelay sets the polling frequency used in absence of a Retry-After HTTP header - PollingDelay time.Duration - - // PollingDuration sets the maximum polling time after which an error is returned. - // Setting this to zero will use the provided context to control the duration. - PollingDuration time.Duration - - // RetryAttempts sets the default number of retry attempts for client. - RetryAttempts int - - // RetryDuration sets the delay duration for retries. - RetryDuration time.Duration - - // UserAgent, if not empty, will be set as the HTTP User-Agent header on all requests sent - // through the Do method. - UserAgent string - - Jar http.CookieJar - - // Set to true to skip attempted registration of resource providers (false by default). - SkipResourceProviderRegistration bool -} - -// NewClientWithUserAgent returns an instance of a Client with the UserAgent set to the passed -// string. -func NewClientWithUserAgent(ua string) Client { - c := Client{ - PollingDelay: DefaultPollingDelay, - PollingDuration: DefaultPollingDuration, - RetryAttempts: DefaultRetryAttempts, - RetryDuration: DefaultRetryDuration, - UserAgent: UserAgent(), - } - c.Sender = c.sender() - c.AddToUserAgent(ua) - return c -} - -// AddToUserAgent adds an extension to the current user agent -func (c *Client) AddToUserAgent(extension string) error { - if extension != "" { - c.UserAgent = fmt.Sprintf("%s %s", c.UserAgent, extension) - return nil - } - return fmt.Errorf("Extension was empty, User Agent stayed as %s", c.UserAgent) -} - -// Do implements the Sender interface by invoking the active Sender after applying authorization. -// If Sender is not set, it uses a new instance of http.Client. In both cases it will, if UserAgent -// is set, apply set the User-Agent header. -func (c Client) Do(r *http.Request) (*http.Response, error) { - if r.UserAgent() == "" { - r, _ = Prepare(r, - WithUserAgent(c.UserAgent)) - } - // NOTE: c.WithInspection() must be last in the list so that it can inspect all preceding operations - r, err := Prepare(r, - c.WithAuthorization(), - c.WithInspection()) - if err != nil { - var resp *http.Response - if detErr, ok := err.(DetailedError); ok { - // if the authorization failed (e.g. invalid credentials) there will - // be a response associated with the error, be sure to return it. - resp = detErr.Response - } - return resp, NewErrorWithError(err, "autorest/Client", "Do", nil, "Preparing request failed") - } - logger.Instance.WriteRequest(r, logger.Filter{ - Header: func(k string, v []string) (bool, []string) { - // remove the auth token from the log - if strings.EqualFold(k, "Authorization") || strings.EqualFold(k, "Ocp-Apim-Subscription-Key") { - v = []string{"**REDACTED**"} - } - return true, v - }, - }) - resp, err := SendWithSender(c.sender(), r) - logger.Instance.WriteResponse(resp, logger.Filter{}) - Respond(resp, c.ByInspecting()) - return resp, err -} - -// sender returns the Sender to which to send requests. -func (c Client) sender() Sender { - if c.Sender == nil { - j, _ := cookiejar.New(nil) - client := &http.Client{Jar: j, Transport: tracing.Transport} - return client - } - - return c.Sender -} - -// WithAuthorization is a convenience method that returns the WithAuthorization PrepareDecorator -// from the current Authorizer. If not Authorizer is set, it uses the NullAuthorizer. -func (c Client) WithAuthorization() PrepareDecorator { - return c.authorizer().WithAuthorization() -} - -// authorizer returns the Authorizer to use. -func (c Client) authorizer() Authorizer { - if c.Authorizer == nil { - return NullAuthorizer{} - } - return c.Authorizer -} - -// WithInspection is a convenience method that passes the request to the supplied RequestInspector, -// if present, or returns the WithNothing PrepareDecorator otherwise. -func (c Client) WithInspection() PrepareDecorator { - if c.RequestInspector == nil { - return WithNothing() - } - return c.RequestInspector -} - -// ByInspecting is a convenience method that passes the response to the supplied ResponseInspector, -// if present, or returns the ByIgnoring RespondDecorator otherwise. -func (c Client) ByInspecting() RespondDecorator { - if c.ResponseInspector == nil { - return ByIgnoring() - } - return c.ResponseInspector -} diff --git a/vendor/github.com/Azure/go-autorest/autorest/date/date.go b/vendor/github.com/Azure/go-autorest/autorest/date/date.go deleted file mode 100644 index c457106568..0000000000 --- a/vendor/github.com/Azure/go-autorest/autorest/date/date.go +++ /dev/null @@ -1,96 +0,0 @@ -/* -Package date provides time.Time derivatives that conform to the Swagger.io (https://swagger.io/) -defined date formats: Date and DateTime. Both types may, in most cases, be used in lieu of -time.Time types. And both convert to time.Time through a ToTime method. -*/ -package date - -// Copyright 2017 Microsoft Corporation -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -import ( - "fmt" - "time" -) - -const ( - fullDate = "2006-01-02" - fullDateJSON = `"2006-01-02"` - dateFormat = "%04d-%02d-%02d" - jsonFormat = `"%04d-%02d-%02d"` -) - -// Date defines a type similar to time.Time but assumes a layout of RFC3339 full-date (i.e., -// 2006-01-02). -type Date struct { - time.Time -} - -// ParseDate create a new Date from the passed string. -func ParseDate(date string) (d Date, err error) { - return parseDate(date, fullDate) -} - -func parseDate(date string, format string) (Date, error) { - d, err := time.Parse(format, date) - return Date{Time: d}, err -} - -// MarshalBinary preserves the Date as a byte array conforming to RFC3339 full-date (i.e., -// 2006-01-02). -func (d Date) MarshalBinary() ([]byte, error) { - return d.MarshalText() -} - -// UnmarshalBinary reconstitutes a Date saved as a byte array conforming to RFC3339 full-date (i.e., -// 2006-01-02). -func (d *Date) UnmarshalBinary(data []byte) error { - return d.UnmarshalText(data) -} - -// MarshalJSON preserves the Date as a JSON string conforming to RFC3339 full-date (i.e., -// 2006-01-02). -func (d Date) MarshalJSON() (json []byte, err error) { - return []byte(fmt.Sprintf(jsonFormat, d.Year(), d.Month(), d.Day())), nil -} - -// UnmarshalJSON reconstitutes the Date from a JSON string conforming to RFC3339 full-date (i.e., -// 2006-01-02). -func (d *Date) UnmarshalJSON(data []byte) (err error) { - d.Time, err = time.Parse(fullDateJSON, string(data)) - return err -} - -// MarshalText preserves the Date as a byte array conforming to RFC3339 full-date (i.e., -// 2006-01-02). -func (d Date) MarshalText() (text []byte, err error) { - return []byte(fmt.Sprintf(dateFormat, d.Year(), d.Month(), d.Day())), nil -} - -// UnmarshalText reconstitutes a Date saved as a byte array conforming to RFC3339 full-date (i.e., -// 2006-01-02). -func (d *Date) UnmarshalText(data []byte) (err error) { - d.Time, err = time.Parse(fullDate, string(data)) - return err -} - -// String returns the Date formatted as an RFC3339 full-date string (i.e., 2006-01-02). -func (d Date) String() string { - return fmt.Sprintf(dateFormat, d.Year(), d.Month(), d.Day()) -} - -// ToTime returns a Date as a time.Time -func (d Date) ToTime() time.Time { - return d.Time -} diff --git a/vendor/github.com/Azure/go-autorest/autorest/date/time.go b/vendor/github.com/Azure/go-autorest/autorest/date/time.go deleted file mode 100644 index b453fad049..0000000000 --- a/vendor/github.com/Azure/go-autorest/autorest/date/time.go +++ /dev/null @@ -1,103 +0,0 @@ -package date - -// Copyright 2017 Microsoft Corporation -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -import ( - "regexp" - "time" -) - -// Azure reports time in UTC but it doesn't include the 'Z' time zone suffix in some cases. -const ( - azureUtcFormatJSON = `"2006-01-02T15:04:05.999999999"` - azureUtcFormat = "2006-01-02T15:04:05.999999999" - rfc3339JSON = `"` + time.RFC3339Nano + `"` - rfc3339 = time.RFC3339Nano - tzOffsetRegex = `(Z|z|\+|-)(\d+:\d+)*"*$` -) - -// Time defines a type similar to time.Time but assumes a layout of RFC3339 date-time (i.e., -// 2006-01-02T15:04:05Z). -type Time struct { - time.Time -} - -// MarshalBinary preserves the Time as a byte array conforming to RFC3339 date-time (i.e., -// 2006-01-02T15:04:05Z). -func (t Time) MarshalBinary() ([]byte, error) { - return t.Time.MarshalText() -} - -// UnmarshalBinary reconstitutes a Time saved as a byte array conforming to RFC3339 date-time -// (i.e., 2006-01-02T15:04:05Z). -func (t *Time) UnmarshalBinary(data []byte) error { - return t.UnmarshalText(data) -} - -// MarshalJSON preserves the Time as a JSON string conforming to RFC3339 date-time (i.e., -// 2006-01-02T15:04:05Z). -func (t Time) MarshalJSON() (json []byte, err error) { - return t.Time.MarshalJSON() -} - -// UnmarshalJSON reconstitutes the Time from a JSON string conforming to RFC3339 date-time -// (i.e., 2006-01-02T15:04:05Z). -func (t *Time) UnmarshalJSON(data []byte) (err error) { - timeFormat := azureUtcFormatJSON - match, err := regexp.Match(tzOffsetRegex, data) - if err != nil { - return err - } else if match { - timeFormat = rfc3339JSON - } - t.Time, err = ParseTime(timeFormat, string(data)) - return err -} - -// MarshalText preserves the Time as a byte array conforming to RFC3339 date-time (i.e., -// 2006-01-02T15:04:05Z). -func (t Time) MarshalText() (text []byte, err error) { - return t.Time.MarshalText() -} - -// UnmarshalText reconstitutes a Time saved as a byte array conforming to RFC3339 date-time -// (i.e., 2006-01-02T15:04:05Z). -func (t *Time) UnmarshalText(data []byte) (err error) { - timeFormat := azureUtcFormat - match, err := regexp.Match(tzOffsetRegex, data) - if err != nil { - return err - } else if match { - timeFormat = rfc3339 - } - t.Time, err = ParseTime(timeFormat, string(data)) - return err -} - -// String returns the Time formatted as an RFC3339 date-time string (i.e., -// 2006-01-02T15:04:05Z). -func (t Time) String() string { - // Note: time.Time.String does not return an RFC3339 compliant string, time.Time.MarshalText does. - b, err := t.MarshalText() - if err != nil { - return "" - } - return string(b) -} - -// ToTime returns a Time as a time.Time -func (t Time) ToTime() time.Time { - return t.Time -} diff --git a/vendor/github.com/Azure/go-autorest/autorest/date/timerfc1123.go b/vendor/github.com/Azure/go-autorest/autorest/date/timerfc1123.go deleted file mode 100644 index 48fb39ba9b..0000000000 --- a/vendor/github.com/Azure/go-autorest/autorest/date/timerfc1123.go +++ /dev/null @@ -1,100 +0,0 @@ -package date - -// Copyright 2017 Microsoft Corporation -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -import ( - "errors" - "time" -) - -const ( - rfc1123JSON = `"` + time.RFC1123 + `"` - rfc1123 = time.RFC1123 -) - -// TimeRFC1123 defines a type similar to time.Time but assumes a layout of RFC1123 date-time (i.e., -// Mon, 02 Jan 2006 15:04:05 MST). -type TimeRFC1123 struct { - time.Time -} - -// UnmarshalJSON reconstitutes the Time from a JSON string conforming to RFC1123 date-time -// (i.e., Mon, 02 Jan 2006 15:04:05 MST). -func (t *TimeRFC1123) UnmarshalJSON(data []byte) (err error) { - t.Time, err = ParseTime(rfc1123JSON, string(data)) - if err != nil { - return err - } - return nil -} - -// MarshalJSON preserves the Time as a JSON string conforming to RFC1123 date-time (i.e., -// Mon, 02 Jan 2006 15:04:05 MST). -func (t TimeRFC1123) MarshalJSON() ([]byte, error) { - if y := t.Year(); y < 0 || y >= 10000 { - return nil, errors.New("Time.MarshalJSON: year outside of range [0,9999]") - } - b := []byte(t.Format(rfc1123JSON)) - return b, nil -} - -// MarshalText preserves the Time as a byte array conforming to RFC1123 date-time (i.e., -// Mon, 02 Jan 2006 15:04:05 MST). -func (t TimeRFC1123) MarshalText() ([]byte, error) { - if y := t.Year(); y < 0 || y >= 10000 { - return nil, errors.New("Time.MarshalText: year outside of range [0,9999]") - } - - b := []byte(t.Format(rfc1123)) - return b, nil -} - -// UnmarshalText reconstitutes a Time saved as a byte array conforming to RFC1123 date-time -// (i.e., Mon, 02 Jan 2006 15:04:05 MST). -func (t *TimeRFC1123) UnmarshalText(data []byte) (err error) { - t.Time, err = ParseTime(rfc1123, string(data)) - if err != nil { - return err - } - return nil -} - -// MarshalBinary preserves the Time as a byte array conforming to RFC1123 date-time (i.e., -// Mon, 02 Jan 2006 15:04:05 MST). -func (t TimeRFC1123) MarshalBinary() ([]byte, error) { - return t.MarshalText() -} - -// UnmarshalBinary reconstitutes a Time saved as a byte array conforming to RFC1123 date-time -// (i.e., Mon, 02 Jan 2006 15:04:05 MST). -func (t *TimeRFC1123) UnmarshalBinary(data []byte) error { - return t.UnmarshalText(data) -} - -// ToTime returns a Time as a time.Time -func (t TimeRFC1123) ToTime() time.Time { - return t.Time -} - -// String returns the Time formatted as an RFC1123 date-time string (i.e., -// Mon, 02 Jan 2006 15:04:05 MST). -func (t TimeRFC1123) String() string { - // Note: time.Time.String does not return an RFC1123 compliant string, time.Time.MarshalText does. - b, err := t.MarshalText() - if err != nil { - return "" - } - return string(b) -} diff --git a/vendor/github.com/Azure/go-autorest/autorest/date/unixtime.go b/vendor/github.com/Azure/go-autorest/autorest/date/unixtime.go deleted file mode 100644 index 7073959b2a..0000000000 --- a/vendor/github.com/Azure/go-autorest/autorest/date/unixtime.go +++ /dev/null @@ -1,123 +0,0 @@ -package date - -// Copyright 2017 Microsoft Corporation -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -import ( - "bytes" - "encoding/binary" - "encoding/json" - "time" -) - -// unixEpoch is the moment in time that should be treated as timestamp 0. -var unixEpoch = time.Date(1970, time.January, 1, 0, 0, 0, 0, time.UTC) - -// UnixTime marshals and unmarshals a time that is represented as the number -// of seconds (ignoring skip-seconds) since the Unix Epoch. -type UnixTime time.Time - -// Duration returns the time as a Duration since the UnixEpoch. -func (t UnixTime) Duration() time.Duration { - return time.Time(t).Sub(unixEpoch) -} - -// NewUnixTimeFromSeconds creates a UnixTime as a number of seconds from the UnixEpoch. -func NewUnixTimeFromSeconds(seconds float64) UnixTime { - return NewUnixTimeFromDuration(time.Duration(seconds * float64(time.Second))) -} - -// NewUnixTimeFromNanoseconds creates a UnixTime as a number of nanoseconds from the UnixEpoch. -func NewUnixTimeFromNanoseconds(nanoseconds int64) UnixTime { - return NewUnixTimeFromDuration(time.Duration(nanoseconds)) -} - -// NewUnixTimeFromDuration creates a UnixTime as a duration of time since the UnixEpoch. -func NewUnixTimeFromDuration(dur time.Duration) UnixTime { - return UnixTime(unixEpoch.Add(dur)) -} - -// UnixEpoch retreives the moment considered the Unix Epoch. I.e. The time represented by '0' -func UnixEpoch() time.Time { - return unixEpoch -} - -// MarshalJSON preserves the UnixTime as a JSON number conforming to Unix Timestamp requirements. -// (i.e. the number of seconds since midnight January 1st, 1970 not considering leap seconds.) -func (t UnixTime) MarshalJSON() ([]byte, error) { - buffer := &bytes.Buffer{} - enc := json.NewEncoder(buffer) - err := enc.Encode(float64(time.Time(t).UnixNano()) / 1e9) - if err != nil { - return nil, err - } - return buffer.Bytes(), nil -} - -// UnmarshalJSON reconstitures a UnixTime saved as a JSON number of the number of seconds since -// midnight January 1st, 1970. -func (t *UnixTime) UnmarshalJSON(text []byte) error { - dec := json.NewDecoder(bytes.NewReader(text)) - - var secondsSinceEpoch float64 - if err := dec.Decode(&secondsSinceEpoch); err != nil { - return err - } - - *t = NewUnixTimeFromSeconds(secondsSinceEpoch) - - return nil -} - -// MarshalText stores the number of seconds since the Unix Epoch as a textual floating point number. -func (t UnixTime) MarshalText() ([]byte, error) { - cast := time.Time(t) - return cast.MarshalText() -} - -// UnmarshalText populates a UnixTime with a value stored textually as a floating point number of seconds since the Unix Epoch. -func (t *UnixTime) UnmarshalText(raw []byte) error { - var unmarshaled time.Time - - if err := unmarshaled.UnmarshalText(raw); err != nil { - return err - } - - *t = UnixTime(unmarshaled) - return nil -} - -// MarshalBinary converts a UnixTime into a binary.LittleEndian float64 of nanoseconds since the epoch. -func (t UnixTime) MarshalBinary() ([]byte, error) { - buf := &bytes.Buffer{} - - payload := int64(t.Duration()) - - if err := binary.Write(buf, binary.LittleEndian, &payload); err != nil { - return nil, err - } - - return buf.Bytes(), nil -} - -// UnmarshalBinary converts a from a binary.LittleEndian float64 of nanoseconds since the epoch into a UnixTime. -func (t *UnixTime) UnmarshalBinary(raw []byte) error { - var nanosecondsSinceEpoch int64 - - if err := binary.Read(bytes.NewReader(raw), binary.LittleEndian, &nanosecondsSinceEpoch); err != nil { - return err - } - *t = NewUnixTimeFromNanoseconds(nanosecondsSinceEpoch) - return nil -} diff --git a/vendor/github.com/Azure/go-autorest/autorest/date/utility.go b/vendor/github.com/Azure/go-autorest/autorest/date/utility.go deleted file mode 100644 index 12addf0ebb..0000000000 --- a/vendor/github.com/Azure/go-autorest/autorest/date/utility.go +++ /dev/null @@ -1,25 +0,0 @@ -package date - -// Copyright 2017 Microsoft Corporation -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -import ( - "strings" - "time" -) - -// ParseTime to parse Time string to specified format. -func ParseTime(format string, t string) (d time.Time, err error) { - return time.Parse(format, strings.ToUpper(t)) -} diff --git a/vendor/github.com/Azure/go-autorest/autorest/error.go b/vendor/github.com/Azure/go-autorest/autorest/error.go deleted file mode 100644 index f724f33327..0000000000 --- a/vendor/github.com/Azure/go-autorest/autorest/error.go +++ /dev/null @@ -1,98 +0,0 @@ -package autorest - -// Copyright 2017 Microsoft Corporation -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -import ( - "fmt" - "net/http" -) - -const ( - // UndefinedStatusCode is used when HTTP status code is not available for an error. - UndefinedStatusCode = 0 -) - -// DetailedError encloses a error with details of the package, method, and associated HTTP -// status code (if any). -type DetailedError struct { - Original error - - // PackageType is the package type of the object emitting the error. For types, the value - // matches that produced the the '%T' format specifier of the fmt package. For other elements, - // such as functions, it is just the package name (e.g., "autorest"). - PackageType string - - // Method is the name of the method raising the error. - Method string - - // StatusCode is the HTTP Response StatusCode (if non-zero) that led to the error. - StatusCode interface{} - - // Message is the error message. - Message string - - // Service Error is the response body of failed API in bytes - ServiceError []byte - - // Response is the response object that was returned during failure if applicable. - Response *http.Response -} - -// NewError creates a new Error conforming object from the passed packageType, method, and -// message. message is treated as a format string to which the optional args apply. -func NewError(packageType string, method string, message string, args ...interface{}) DetailedError { - return NewErrorWithError(nil, packageType, method, nil, message, args...) -} - -// NewErrorWithResponse creates a new Error conforming object from the passed -// packageType, method, statusCode of the given resp (UndefinedStatusCode if -// resp is nil), and message. message is treated as a format string to which the -// optional args apply. -func NewErrorWithResponse(packageType string, method string, resp *http.Response, message string, args ...interface{}) DetailedError { - return NewErrorWithError(nil, packageType, method, resp, message, args...) -} - -// NewErrorWithError creates a new Error conforming object from the -// passed packageType, method, statusCode of the given resp (UndefinedStatusCode -// if resp is nil), message, and original error. message is treated as a format -// string to which the optional args apply. -func NewErrorWithError(original error, packageType string, method string, resp *http.Response, message string, args ...interface{}) DetailedError { - if v, ok := original.(DetailedError); ok { - return v - } - - statusCode := UndefinedStatusCode - if resp != nil { - statusCode = resp.StatusCode - } - - return DetailedError{ - Original: original, - PackageType: packageType, - Method: method, - StatusCode: statusCode, - Message: fmt.Sprintf(message, args...), - Response: resp, - } -} - -// Error returns a formatted containing all available details (i.e., PackageType, Method, -// StatusCode, Message, and original error (if any)). -func (e DetailedError) Error() string { - if e.Original == nil { - return fmt.Sprintf("%s#%s: %s: StatusCode=%d", e.PackageType, e.Method, e.Message, e.StatusCode) - } - return fmt.Sprintf("%s#%s: %s: StatusCode=%d -- Original Error: %v", e.PackageType, e.Method, e.Message, e.StatusCode, e.Original) -} diff --git a/vendor/github.com/Azure/go-autorest/autorest/preparer.go b/vendor/github.com/Azure/go-autorest/autorest/preparer.go deleted file mode 100644 index 6d67bd7337..0000000000 --- a/vendor/github.com/Azure/go-autorest/autorest/preparer.go +++ /dev/null @@ -1,480 +0,0 @@ -package autorest - -// Copyright 2017 Microsoft Corporation -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -import ( - "bytes" - "encoding/json" - "fmt" - "io" - "io/ioutil" - "mime/multipart" - "net/http" - "net/url" - "strings" -) - -const ( - mimeTypeJSON = "application/json" - mimeTypeOctetStream = "application/octet-stream" - mimeTypeFormPost = "application/x-www-form-urlencoded" - - headerAuthorization = "Authorization" - headerContentType = "Content-Type" - headerUserAgent = "User-Agent" -) - -// Preparer is the interface that wraps the Prepare method. -// -// Prepare accepts and possibly modifies an http.Request (e.g., adding Headers). Implementations -// must ensure to not share or hold per-invocation state since Preparers may be shared and re-used. -type Preparer interface { - Prepare(*http.Request) (*http.Request, error) -} - -// PreparerFunc is a method that implements the Preparer interface. -type PreparerFunc func(*http.Request) (*http.Request, error) - -// Prepare implements the Preparer interface on PreparerFunc. -func (pf PreparerFunc) Prepare(r *http.Request) (*http.Request, error) { - return pf(r) -} - -// PrepareDecorator takes and possibly decorates, by wrapping, a Preparer. Decorators may affect the -// http.Request and pass it along or, first, pass the http.Request along then affect the result. -type PrepareDecorator func(Preparer) Preparer - -// CreatePreparer creates, decorates, and returns a Preparer. -// Without decorators, the returned Preparer returns the passed http.Request unmodified. -// Preparers are safe to share and re-use. -func CreatePreparer(decorators ...PrepareDecorator) Preparer { - return DecoratePreparer( - Preparer(PreparerFunc(func(r *http.Request) (*http.Request, error) { return r, nil })), - decorators...) -} - -// DecoratePreparer accepts a Preparer and a, possibly empty, set of PrepareDecorators, which it -// applies to the Preparer. Decorators are applied in the order received, but their affect upon the -// request depends on whether they are a pre-decorator (change the http.Request and then pass it -// along) or a post-decorator (pass the http.Request along and alter it on return). -func DecoratePreparer(p Preparer, decorators ...PrepareDecorator) Preparer { - for _, decorate := range decorators { - p = decorate(p) - } - return p -} - -// Prepare accepts an http.Request and a, possibly empty, set of PrepareDecorators. -// It creates a Preparer from the decorators which it then applies to the passed http.Request. -func Prepare(r *http.Request, decorators ...PrepareDecorator) (*http.Request, error) { - if r == nil { - return nil, NewError("autorest", "Prepare", "Invoked without an http.Request") - } - return CreatePreparer(decorators...).Prepare(r) -} - -// WithNothing returns a "do nothing" PrepareDecorator that makes no changes to the passed -// http.Request. -func WithNothing() PrepareDecorator { - return func(p Preparer) Preparer { - return PreparerFunc(func(r *http.Request) (*http.Request, error) { - return p.Prepare(r) - }) - } -} - -// WithHeader returns a PrepareDecorator that sets the specified HTTP header of the http.Request to -// the passed value. It canonicalizes the passed header name (via http.CanonicalHeaderKey) before -// adding the header. -func WithHeader(header string, value string) PrepareDecorator { - return func(p Preparer) Preparer { - return PreparerFunc(func(r *http.Request) (*http.Request, error) { - r, err := p.Prepare(r) - if err == nil { - if r.Header == nil { - r.Header = make(http.Header) - } - r.Header.Set(http.CanonicalHeaderKey(header), value) - } - return r, err - }) - } -} - -// WithHeaders returns a PrepareDecorator that sets the specified HTTP headers of the http.Request to -// the passed value. It canonicalizes the passed headers name (via http.CanonicalHeaderKey) before -// adding them. -func WithHeaders(headers map[string]interface{}) PrepareDecorator { - h := ensureValueStrings(headers) - return func(p Preparer) Preparer { - return PreparerFunc(func(r *http.Request) (*http.Request, error) { - r, err := p.Prepare(r) - if err == nil { - if r.Header == nil { - r.Header = make(http.Header) - } - - for name, value := range h { - r.Header.Set(http.CanonicalHeaderKey(name), value) - } - } - return r, err - }) - } -} - -// WithBearerAuthorization returns a PrepareDecorator that adds an HTTP Authorization header whose -// value is "Bearer " followed by the supplied token. -func WithBearerAuthorization(token string) PrepareDecorator { - return WithHeader(headerAuthorization, fmt.Sprintf("Bearer %s", token)) -} - -// AsContentType returns a PrepareDecorator that adds an HTTP Content-Type header whose value -// is the passed contentType. -func AsContentType(contentType string) PrepareDecorator { - return WithHeader(headerContentType, contentType) -} - -// WithUserAgent returns a PrepareDecorator that adds an HTTP User-Agent header whose value is the -// passed string. -func WithUserAgent(ua string) PrepareDecorator { - return WithHeader(headerUserAgent, ua) -} - -// AsFormURLEncoded returns a PrepareDecorator that adds an HTTP Content-Type header whose value is -// "application/x-www-form-urlencoded". -func AsFormURLEncoded() PrepareDecorator { - return AsContentType(mimeTypeFormPost) -} - -// AsJSON returns a PrepareDecorator that adds an HTTP Content-Type header whose value is -// "application/json". -func AsJSON() PrepareDecorator { - return AsContentType(mimeTypeJSON) -} - -// AsOctetStream returns a PrepareDecorator that adds the "application/octet-stream" Content-Type header. -func AsOctetStream() PrepareDecorator { - return AsContentType(mimeTypeOctetStream) -} - -// WithMethod returns a PrepareDecorator that sets the HTTP method of the passed request. The -// decorator does not validate that the passed method string is a known HTTP method. -func WithMethod(method string) PrepareDecorator { - return func(p Preparer) Preparer { - return PreparerFunc(func(r *http.Request) (*http.Request, error) { - r.Method = method - return p.Prepare(r) - }) - } -} - -// AsDelete returns a PrepareDecorator that sets the HTTP method to DELETE. -func AsDelete() PrepareDecorator { return WithMethod("DELETE") } - -// AsGet returns a PrepareDecorator that sets the HTTP method to GET. -func AsGet() PrepareDecorator { return WithMethod("GET") } - -// AsHead returns a PrepareDecorator that sets the HTTP method to HEAD. -func AsHead() PrepareDecorator { return WithMethod("HEAD") } - -// AsOptions returns a PrepareDecorator that sets the HTTP method to OPTIONS. -func AsOptions() PrepareDecorator { return WithMethod("OPTIONS") } - -// AsPatch returns a PrepareDecorator that sets the HTTP method to PATCH. -func AsPatch() PrepareDecorator { return WithMethod("PATCH") } - -// AsPost returns a PrepareDecorator that sets the HTTP method to POST. -func AsPost() PrepareDecorator { return WithMethod("POST") } - -// AsPut returns a PrepareDecorator that sets the HTTP method to PUT. -func AsPut() PrepareDecorator { return WithMethod("PUT") } - -// WithBaseURL returns a PrepareDecorator that populates the http.Request with a url.URL constructed -// from the supplied baseUrl. -func WithBaseURL(baseURL string) PrepareDecorator { - return func(p Preparer) Preparer { - return PreparerFunc(func(r *http.Request) (*http.Request, error) { - r, err := p.Prepare(r) - if err == nil { - var u *url.URL - if u, err = url.Parse(baseURL); err != nil { - return r, err - } - if u.Scheme == "" { - err = fmt.Errorf("autorest: No scheme detected in URL %s", baseURL) - } - if err == nil { - r.URL = u - } - } - return r, err - }) - } -} - -// WithCustomBaseURL returns a PrepareDecorator that replaces brace-enclosed keys within the -// request base URL (i.e., http.Request.URL) with the corresponding values from the passed map. -func WithCustomBaseURL(baseURL string, urlParameters map[string]interface{}) PrepareDecorator { - parameters := ensureValueStrings(urlParameters) - for key, value := range parameters { - baseURL = strings.Replace(baseURL, "{"+key+"}", value, -1) - } - return WithBaseURL(baseURL) -} - -// WithFormData returns a PrepareDecoratore that "URL encodes" (e.g., bar=baz&foo=quux) into the -// http.Request body. -func WithFormData(v url.Values) PrepareDecorator { - return func(p Preparer) Preparer { - return PreparerFunc(func(r *http.Request) (*http.Request, error) { - r, err := p.Prepare(r) - if err == nil { - s := v.Encode() - - if r.Header == nil { - r.Header = make(http.Header) - } - r.Header.Set(http.CanonicalHeaderKey(headerContentType), mimeTypeFormPost) - r.ContentLength = int64(len(s)) - r.Body = ioutil.NopCloser(strings.NewReader(s)) - } - return r, err - }) - } -} - -// WithMultiPartFormData returns a PrepareDecoratore that "URL encodes" (e.g., bar=baz&foo=quux) form parameters -// into the http.Request body. -func WithMultiPartFormData(formDataParameters map[string]interface{}) PrepareDecorator { - return func(p Preparer) Preparer { - return PreparerFunc(func(r *http.Request) (*http.Request, error) { - r, err := p.Prepare(r) - if err == nil { - var body bytes.Buffer - writer := multipart.NewWriter(&body) - for key, value := range formDataParameters { - if rc, ok := value.(io.ReadCloser); ok { - var fd io.Writer - if fd, err = writer.CreateFormFile(key, key); err != nil { - return r, err - } - if _, err = io.Copy(fd, rc); err != nil { - return r, err - } - } else { - if err = writer.WriteField(key, ensureValueString(value)); err != nil { - return r, err - } - } - } - if err = writer.Close(); err != nil { - return r, err - } - if r.Header == nil { - r.Header = make(http.Header) - } - r.Header.Set(http.CanonicalHeaderKey(headerContentType), writer.FormDataContentType()) - r.Body = ioutil.NopCloser(bytes.NewReader(body.Bytes())) - r.ContentLength = int64(body.Len()) - return r, err - } - return r, err - }) - } -} - -// WithFile returns a PrepareDecorator that sends file in request body. -func WithFile(f io.ReadCloser) PrepareDecorator { - return func(p Preparer) Preparer { - return PreparerFunc(func(r *http.Request) (*http.Request, error) { - r, err := p.Prepare(r) - if err == nil { - b, err := ioutil.ReadAll(f) - if err != nil { - return r, err - } - r.Body = ioutil.NopCloser(bytes.NewReader(b)) - r.ContentLength = int64(len(b)) - } - return r, err - }) - } -} - -// WithBool returns a PrepareDecorator that encodes the passed bool into the body of the request -// and sets the Content-Length header. -func WithBool(v bool) PrepareDecorator { - return WithString(fmt.Sprintf("%v", v)) -} - -// WithFloat32 returns a PrepareDecorator that encodes the passed float32 into the body of the -// request and sets the Content-Length header. -func WithFloat32(v float32) PrepareDecorator { - return WithString(fmt.Sprintf("%v", v)) -} - -// WithFloat64 returns a PrepareDecorator that encodes the passed float64 into the body of the -// request and sets the Content-Length header. -func WithFloat64(v float64) PrepareDecorator { - return WithString(fmt.Sprintf("%v", v)) -} - -// WithInt32 returns a PrepareDecorator that encodes the passed int32 into the body of the request -// and sets the Content-Length header. -func WithInt32(v int32) PrepareDecorator { - return WithString(fmt.Sprintf("%v", v)) -} - -// WithInt64 returns a PrepareDecorator that encodes the passed int64 into the body of the request -// and sets the Content-Length header. -func WithInt64(v int64) PrepareDecorator { - return WithString(fmt.Sprintf("%v", v)) -} - -// WithString returns a PrepareDecorator that encodes the passed string into the body of the request -// and sets the Content-Length header. -func WithString(v string) PrepareDecorator { - return func(p Preparer) Preparer { - return PreparerFunc(func(r *http.Request) (*http.Request, error) { - r, err := p.Prepare(r) - if err == nil { - r.ContentLength = int64(len(v)) - r.Body = ioutil.NopCloser(strings.NewReader(v)) - } - return r, err - }) - } -} - -// WithJSON returns a PrepareDecorator that encodes the data passed as JSON into the body of the -// request and sets the Content-Length header. -func WithJSON(v interface{}) PrepareDecorator { - return func(p Preparer) Preparer { - return PreparerFunc(func(r *http.Request) (*http.Request, error) { - r, err := p.Prepare(r) - if err == nil { - b, err := json.Marshal(v) - if err == nil { - r.ContentLength = int64(len(b)) - r.Body = ioutil.NopCloser(bytes.NewReader(b)) - } - } - return r, err - }) - } -} - -// WithPath returns a PrepareDecorator that adds the supplied path to the request URL. If the path -// is absolute (that is, it begins with a "/"), it replaces the existing path. -func WithPath(path string) PrepareDecorator { - return func(p Preparer) Preparer { - return PreparerFunc(func(r *http.Request) (*http.Request, error) { - r, err := p.Prepare(r) - if err == nil { - if r.URL == nil { - return r, NewError("autorest", "WithPath", "Invoked with a nil URL") - } - if r.URL, err = parseURL(r.URL, path); err != nil { - return r, err - } - } - return r, err - }) - } -} - -// WithEscapedPathParameters returns a PrepareDecorator that replaces brace-enclosed keys within the -// request path (i.e., http.Request.URL.Path) with the corresponding values from the passed map. The -// values will be escaped (aka URL encoded) before insertion into the path. -func WithEscapedPathParameters(path string, pathParameters map[string]interface{}) PrepareDecorator { - parameters := escapeValueStrings(ensureValueStrings(pathParameters)) - return func(p Preparer) Preparer { - return PreparerFunc(func(r *http.Request) (*http.Request, error) { - r, err := p.Prepare(r) - if err == nil { - if r.URL == nil { - return r, NewError("autorest", "WithEscapedPathParameters", "Invoked with a nil URL") - } - for key, value := range parameters { - path = strings.Replace(path, "{"+key+"}", value, -1) - } - if r.URL, err = parseURL(r.URL, path); err != nil { - return r, err - } - } - return r, err - }) - } -} - -// WithPathParameters returns a PrepareDecorator that replaces brace-enclosed keys within the -// request path (i.e., http.Request.URL.Path) with the corresponding values from the passed map. -func WithPathParameters(path string, pathParameters map[string]interface{}) PrepareDecorator { - parameters := ensureValueStrings(pathParameters) - return func(p Preparer) Preparer { - return PreparerFunc(func(r *http.Request) (*http.Request, error) { - r, err := p.Prepare(r) - if err == nil { - if r.URL == nil { - return r, NewError("autorest", "WithPathParameters", "Invoked with a nil URL") - } - for key, value := range parameters { - path = strings.Replace(path, "{"+key+"}", value, -1) - } - - if r.URL, err = parseURL(r.URL, path); err != nil { - return r, err - } - } - return r, err - }) - } -} - -func parseURL(u *url.URL, path string) (*url.URL, error) { - p := strings.TrimRight(u.String(), "/") - if !strings.HasPrefix(path, "/") { - path = "/" + path - } - return url.Parse(p + path) -} - -// WithQueryParameters returns a PrepareDecorators that encodes and applies the query parameters -// given in the supplied map (i.e., key=value). -func WithQueryParameters(queryParameters map[string]interface{}) PrepareDecorator { - parameters := ensureValueStrings(queryParameters) - return func(p Preparer) Preparer { - return PreparerFunc(func(r *http.Request) (*http.Request, error) { - r, err := p.Prepare(r) - if err == nil { - if r.URL == nil { - return r, NewError("autorest", "WithQueryParameters", "Invoked with a nil URL") - } - - v := r.URL.Query() - for key, value := range parameters { - d, err := url.QueryUnescape(value) - if err != nil { - return r, err - } - v.Add(key, d) - } - r.URL.RawQuery = v.Encode() - } - return r, err - }) - } -} diff --git a/vendor/github.com/Azure/go-autorest/autorest/responder.go b/vendor/github.com/Azure/go-autorest/autorest/responder.go deleted file mode 100644 index a908a0adb7..0000000000 --- a/vendor/github.com/Azure/go-autorest/autorest/responder.go +++ /dev/null @@ -1,250 +0,0 @@ -package autorest - -// Copyright 2017 Microsoft Corporation -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -import ( - "bytes" - "encoding/json" - "encoding/xml" - "fmt" - "io" - "io/ioutil" - "net/http" - "strings" -) - -// Responder is the interface that wraps the Respond method. -// -// Respond accepts and reacts to an http.Response. Implementations must ensure to not share or hold -// state since Responders may be shared and re-used. -type Responder interface { - Respond(*http.Response) error -} - -// ResponderFunc is a method that implements the Responder interface. -type ResponderFunc func(*http.Response) error - -// Respond implements the Responder interface on ResponderFunc. -func (rf ResponderFunc) Respond(r *http.Response) error { - return rf(r) -} - -// RespondDecorator takes and possibly decorates, by wrapping, a Responder. Decorators may react to -// the http.Response and pass it along or, first, pass the http.Response along then react. -type RespondDecorator func(Responder) Responder - -// CreateResponder creates, decorates, and returns a Responder. Without decorators, the returned -// Responder returns the passed http.Response unmodified. Responders may or may not be safe to share -// and re-used: It depends on the applied decorators. For example, a standard decorator that closes -// the response body is fine to share whereas a decorator that reads the body into a passed struct -// is not. -// -// To prevent memory leaks, ensure that at least one Responder closes the response body. -func CreateResponder(decorators ...RespondDecorator) Responder { - return DecorateResponder( - Responder(ResponderFunc(func(r *http.Response) error { return nil })), - decorators...) -} - -// DecorateResponder accepts a Responder and a, possibly empty, set of RespondDecorators, which it -// applies to the Responder. Decorators are applied in the order received, but their affect upon the -// request depends on whether they are a pre-decorator (react to the http.Response and then pass it -// along) or a post-decorator (pass the http.Response along and then react). -func DecorateResponder(r Responder, decorators ...RespondDecorator) Responder { - for _, decorate := range decorators { - r = decorate(r) - } - return r -} - -// Respond accepts an http.Response and a, possibly empty, set of RespondDecorators. -// It creates a Responder from the decorators it then applies to the passed http.Response. -func Respond(r *http.Response, decorators ...RespondDecorator) error { - if r == nil { - return nil - } - return CreateResponder(decorators...).Respond(r) -} - -// ByIgnoring returns a RespondDecorator that ignores the passed http.Response passing it unexamined -// to the next RespondDecorator. -func ByIgnoring() RespondDecorator { - return func(r Responder) Responder { - return ResponderFunc(func(resp *http.Response) error { - return r.Respond(resp) - }) - } -} - -// ByCopying copies the contents of the http.Response Body into the passed bytes.Buffer as -// the Body is read. -func ByCopying(b *bytes.Buffer) RespondDecorator { - return func(r Responder) Responder { - return ResponderFunc(func(resp *http.Response) error { - err := r.Respond(resp) - if err == nil && resp != nil && resp.Body != nil { - resp.Body = TeeReadCloser(resp.Body, b) - } - return err - }) - } -} - -// ByDiscardingBody returns a RespondDecorator that first invokes the passed Responder after which -// it copies the remaining bytes (if any) in the response body to ioutil.Discard. Since the passed -// Responder is invoked prior to discarding the response body, the decorator may occur anywhere -// within the set. -func ByDiscardingBody() RespondDecorator { - return func(r Responder) Responder { - return ResponderFunc(func(resp *http.Response) error { - err := r.Respond(resp) - if err == nil && resp != nil && resp.Body != nil { - if _, err := io.Copy(ioutil.Discard, resp.Body); err != nil { - return fmt.Errorf("Error discarding the response body: %v", err) - } - } - return err - }) - } -} - -// ByClosing returns a RespondDecorator that first invokes the passed Responder after which it -// closes the response body. Since the passed Responder is invoked prior to closing the response -// body, the decorator may occur anywhere within the set. -func ByClosing() RespondDecorator { - return func(r Responder) Responder { - return ResponderFunc(func(resp *http.Response) error { - err := r.Respond(resp) - if resp != nil && resp.Body != nil { - if err := resp.Body.Close(); err != nil { - return fmt.Errorf("Error closing the response body: %v", err) - } - } - return err - }) - } -} - -// ByClosingIfError returns a RespondDecorator that first invokes the passed Responder after which -// it closes the response if the passed Responder returns an error and the response body exists. -func ByClosingIfError() RespondDecorator { - return func(r Responder) Responder { - return ResponderFunc(func(resp *http.Response) error { - err := r.Respond(resp) - if err != nil && resp != nil && resp.Body != nil { - if err := resp.Body.Close(); err != nil { - return fmt.Errorf("Error closing the response body: %v", err) - } - } - return err - }) - } -} - -// ByUnmarshallingJSON returns a RespondDecorator that decodes a JSON document returned in the -// response Body into the value pointed to by v. -func ByUnmarshallingJSON(v interface{}) RespondDecorator { - return func(r Responder) Responder { - return ResponderFunc(func(resp *http.Response) error { - err := r.Respond(resp) - if err == nil { - b, errInner := ioutil.ReadAll(resp.Body) - // Some responses might include a BOM, remove for successful unmarshalling - b = bytes.TrimPrefix(b, []byte("\xef\xbb\xbf")) - if errInner != nil { - err = fmt.Errorf("Error occurred reading http.Response#Body - Error = '%v'", errInner) - } else if len(strings.Trim(string(b), " ")) > 0 { - errInner = json.Unmarshal(b, v) - if errInner != nil { - err = fmt.Errorf("Error occurred unmarshalling JSON - Error = '%v' JSON = '%s'", errInner, string(b)) - } - } - } - return err - }) - } -} - -// ByUnmarshallingXML returns a RespondDecorator that decodes a XML document returned in the -// response Body into the value pointed to by v. -func ByUnmarshallingXML(v interface{}) RespondDecorator { - return func(r Responder) Responder { - return ResponderFunc(func(resp *http.Response) error { - err := r.Respond(resp) - if err == nil { - b, errInner := ioutil.ReadAll(resp.Body) - if errInner != nil { - err = fmt.Errorf("Error occurred reading http.Response#Body - Error = '%v'", errInner) - } else { - errInner = xml.Unmarshal(b, v) - if errInner != nil { - err = fmt.Errorf("Error occurred unmarshalling Xml - Error = '%v' Xml = '%s'", errInner, string(b)) - } - } - } - return err - }) - } -} - -// WithErrorUnlessStatusCode returns a RespondDecorator that emits an error unless the response -// StatusCode is among the set passed. On error, response body is fully read into a buffer and -// presented in the returned error, as well as in the response body. -func WithErrorUnlessStatusCode(codes ...int) RespondDecorator { - return func(r Responder) Responder { - return ResponderFunc(func(resp *http.Response) error { - err := r.Respond(resp) - if err == nil && !ResponseHasStatusCode(resp, codes...) { - derr := NewErrorWithResponse("autorest", "WithErrorUnlessStatusCode", resp, "%v %v failed with %s", - resp.Request.Method, - resp.Request.URL, - resp.Status) - if resp.Body != nil { - defer resp.Body.Close() - b, _ := ioutil.ReadAll(resp.Body) - derr.ServiceError = b - resp.Body = ioutil.NopCloser(bytes.NewReader(b)) - } - err = derr - } - return err - }) - } -} - -// WithErrorUnlessOK returns a RespondDecorator that emits an error if the response StatusCode is -// anything other than HTTP 200. -func WithErrorUnlessOK() RespondDecorator { - return WithErrorUnlessStatusCode(http.StatusOK) -} - -// ExtractHeader extracts all values of the specified header from the http.Response. It returns an -// empty string slice if the passed http.Response is nil or the header does not exist. -func ExtractHeader(header string, resp *http.Response) []string { - if resp != nil && resp.Header != nil { - return resp.Header[http.CanonicalHeaderKey(header)] - } - return nil -} - -// ExtractHeaderValue extracts the first value of the specified header from the http.Response. It -// returns an empty string if the passed http.Response is nil or the header does not exist. -func ExtractHeaderValue(header string, resp *http.Response) string { - h := ExtractHeader(header, resp) - if len(h) > 0 { - return h[0] - } - return "" -} diff --git a/vendor/github.com/Azure/go-autorest/autorest/retriablerequest.go b/vendor/github.com/Azure/go-autorest/autorest/retriablerequest.go deleted file mode 100644 index fa11dbed79..0000000000 --- a/vendor/github.com/Azure/go-autorest/autorest/retriablerequest.go +++ /dev/null @@ -1,52 +0,0 @@ -package autorest - -// Copyright 2017 Microsoft Corporation -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -import ( - "bytes" - "io" - "io/ioutil" - "net/http" -) - -// NewRetriableRequest returns a wrapper around an HTTP request that support retry logic. -func NewRetriableRequest(req *http.Request) *RetriableRequest { - return &RetriableRequest{req: req} -} - -// Request returns the wrapped HTTP request. -func (rr *RetriableRequest) Request() *http.Request { - return rr.req -} - -func (rr *RetriableRequest) prepareFromByteReader() (err error) { - // fall back to making a copy (only do this once) - b := []byte{} - if rr.req.ContentLength > 0 { - b = make([]byte, rr.req.ContentLength) - _, err = io.ReadFull(rr.req.Body, b) - if err != nil { - return err - } - } else { - b, err = ioutil.ReadAll(rr.req.Body) - if err != nil { - return err - } - } - rr.br = bytes.NewReader(b) - rr.req.Body = ioutil.NopCloser(rr.br) - return err -} diff --git a/vendor/github.com/Azure/go-autorest/autorest/retriablerequest_1.7.go b/vendor/github.com/Azure/go-autorest/autorest/retriablerequest_1.7.go deleted file mode 100644 index 7143cc61b5..0000000000 --- a/vendor/github.com/Azure/go-autorest/autorest/retriablerequest_1.7.go +++ /dev/null @@ -1,54 +0,0 @@ -// +build !go1.8 - -// Copyright 2017 Microsoft Corporation -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package autorest - -import ( - "bytes" - "io/ioutil" - "net/http" -) - -// RetriableRequest provides facilities for retrying an HTTP request. -type RetriableRequest struct { - req *http.Request - br *bytes.Reader -} - -// Prepare signals that the request is about to be sent. -func (rr *RetriableRequest) Prepare() (err error) { - // preserve the request body; this is to support retry logic as - // the underlying transport will always close the reqeust body - if rr.req.Body != nil { - if rr.br != nil { - _, err = rr.br.Seek(0, 0 /*io.SeekStart*/) - rr.req.Body = ioutil.NopCloser(rr.br) - } - if err != nil { - return err - } - if rr.br == nil { - // fall back to making a copy (only do this once) - err = rr.prepareFromByteReader() - } - } - return err -} - -func removeRequestBody(req *http.Request) { - req.Body = nil - req.ContentLength = 0 -} diff --git a/vendor/github.com/Azure/go-autorest/autorest/retriablerequest_1.8.go b/vendor/github.com/Azure/go-autorest/autorest/retriablerequest_1.8.go deleted file mode 100644 index ae15c6bf96..0000000000 --- a/vendor/github.com/Azure/go-autorest/autorest/retriablerequest_1.8.go +++ /dev/null @@ -1,66 +0,0 @@ -// +build go1.8 - -// Copyright 2017 Microsoft Corporation -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package autorest - -import ( - "bytes" - "io" - "io/ioutil" - "net/http" -) - -// RetriableRequest provides facilities for retrying an HTTP request. -type RetriableRequest struct { - req *http.Request - rc io.ReadCloser - br *bytes.Reader -} - -// Prepare signals that the request is about to be sent. -func (rr *RetriableRequest) Prepare() (err error) { - // preserve the request body; this is to support retry logic as - // the underlying transport will always close the reqeust body - if rr.req.Body != nil { - if rr.rc != nil { - rr.req.Body = rr.rc - } else if rr.br != nil { - _, err = rr.br.Seek(0, io.SeekStart) - rr.req.Body = ioutil.NopCloser(rr.br) - } - if err != nil { - return err - } - if rr.req.GetBody != nil { - // this will allow us to preserve the body without having to - // make a copy. note we need to do this on each iteration - rr.rc, err = rr.req.GetBody() - if err != nil { - return err - } - } else if rr.br == nil { - // fall back to making a copy (only do this once) - err = rr.prepareFromByteReader() - } - } - return err -} - -func removeRequestBody(req *http.Request) { - req.Body = nil - req.GetBody = nil - req.ContentLength = 0 -} diff --git a/vendor/github.com/Azure/go-autorest/autorest/sender.go b/vendor/github.com/Azure/go-autorest/autorest/sender.go deleted file mode 100644 index 6665d7c006..0000000000 --- a/vendor/github.com/Azure/go-autorest/autorest/sender.go +++ /dev/null @@ -1,326 +0,0 @@ -package autorest - -// Copyright 2017 Microsoft Corporation -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -import ( - "fmt" - "log" - "math" - "net/http" - "strconv" - "time" - - "github.com/Azure/go-autorest/tracing" -) - -// Sender is the interface that wraps the Do method to send HTTP requests. -// -// The standard http.Client conforms to this interface. -type Sender interface { - Do(*http.Request) (*http.Response, error) -} - -// SenderFunc is a method that implements the Sender interface. -type SenderFunc func(*http.Request) (*http.Response, error) - -// Do implements the Sender interface on SenderFunc. -func (sf SenderFunc) Do(r *http.Request) (*http.Response, error) { - return sf(r) -} - -// SendDecorator takes and possibly decorates, by wrapping, a Sender. Decorators may affect the -// http.Request and pass it along or, first, pass the http.Request along then react to the -// http.Response result. -type SendDecorator func(Sender) Sender - -// CreateSender creates, decorates, and returns, as a Sender, the default http.Client. -func CreateSender(decorators ...SendDecorator) Sender { - return DecorateSender(&http.Client{}, decorators...) -} - -// DecorateSender accepts a Sender and a, possibly empty, set of SendDecorators, which is applies to -// the Sender. Decorators are applied in the order received, but their affect upon the request -// depends on whether they are a pre-decorator (change the http.Request and then pass it along) or a -// post-decorator (pass the http.Request along and react to the results in http.Response). -func DecorateSender(s Sender, decorators ...SendDecorator) Sender { - for _, decorate := range decorators { - s = decorate(s) - } - return s -} - -// Send sends, by means of the default http.Client, the passed http.Request, returning the -// http.Response and possible error. It also accepts a, possibly empty, set of SendDecorators which -// it will apply the http.Client before invoking the Do method. -// -// Send is a convenience method and not recommended for production. Advanced users should use -// SendWithSender, passing and sharing their own Sender (e.g., instance of http.Client). -// -// Send will not poll or retry requests. -func Send(r *http.Request, decorators ...SendDecorator) (*http.Response, error) { - return SendWithSender(&http.Client{Transport: tracing.Transport}, r, decorators...) -} - -// SendWithSender sends the passed http.Request, through the provided Sender, returning the -// http.Response and possible error. It also accepts a, possibly empty, set of SendDecorators which -// it will apply the http.Client before invoking the Do method. -// -// SendWithSender will not poll or retry requests. -func SendWithSender(s Sender, r *http.Request, decorators ...SendDecorator) (*http.Response, error) { - return DecorateSender(s, decorators...).Do(r) -} - -// AfterDelay returns a SendDecorator that delays for the passed time.Duration before -// invoking the Sender. The delay may be terminated by closing the optional channel on the -// http.Request. If canceled, no further Senders are invoked. -func AfterDelay(d time.Duration) SendDecorator { - return func(s Sender) Sender { - return SenderFunc(func(r *http.Request) (*http.Response, error) { - if !DelayForBackoff(d, 0, r.Context().Done()) { - return nil, fmt.Errorf("autorest: AfterDelay canceled before full delay") - } - return s.Do(r) - }) - } -} - -// AsIs returns a SendDecorator that invokes the passed Sender without modifying the http.Request. -func AsIs() SendDecorator { - return func(s Sender) Sender { - return SenderFunc(func(r *http.Request) (*http.Response, error) { - return s.Do(r) - }) - } -} - -// DoCloseIfError returns a SendDecorator that first invokes the passed Sender after which -// it closes the response if the passed Sender returns an error and the response body exists. -func DoCloseIfError() SendDecorator { - return func(s Sender) Sender { - return SenderFunc(func(r *http.Request) (*http.Response, error) { - resp, err := s.Do(r) - if err != nil { - Respond(resp, ByDiscardingBody(), ByClosing()) - } - return resp, err - }) - } -} - -// DoErrorIfStatusCode returns a SendDecorator that emits an error if the response StatusCode is -// among the set passed. Since these are artificial errors, the response body may still require -// closing. -func DoErrorIfStatusCode(codes ...int) SendDecorator { - return func(s Sender) Sender { - return SenderFunc(func(r *http.Request) (*http.Response, error) { - resp, err := s.Do(r) - if err == nil && ResponseHasStatusCode(resp, codes...) { - err = NewErrorWithResponse("autorest", "DoErrorIfStatusCode", resp, "%v %v failed with %s", - resp.Request.Method, - resp.Request.URL, - resp.Status) - } - return resp, err - }) - } -} - -// DoErrorUnlessStatusCode returns a SendDecorator that emits an error unless the response -// StatusCode is among the set passed. Since these are artificial errors, the response body -// may still require closing. -func DoErrorUnlessStatusCode(codes ...int) SendDecorator { - return func(s Sender) Sender { - return SenderFunc(func(r *http.Request) (*http.Response, error) { - resp, err := s.Do(r) - if err == nil && !ResponseHasStatusCode(resp, codes...) { - err = NewErrorWithResponse("autorest", "DoErrorUnlessStatusCode", resp, "%v %v failed with %s", - resp.Request.Method, - resp.Request.URL, - resp.Status) - } - return resp, err - }) - } -} - -// DoPollForStatusCodes returns a SendDecorator that polls if the http.Response contains one of the -// passed status codes. It expects the http.Response to contain a Location header providing the -// URL at which to poll (using GET) and will poll until the time passed is equal to or greater than -// the supplied duration. It will delay between requests for the duration specified in the -// RetryAfter header or, if the header is absent, the passed delay. Polling may be canceled by -// closing the optional channel on the http.Request. -func DoPollForStatusCodes(duration time.Duration, delay time.Duration, codes ...int) SendDecorator { - return func(s Sender) Sender { - return SenderFunc(func(r *http.Request) (resp *http.Response, err error) { - resp, err = s.Do(r) - - if err == nil && ResponseHasStatusCode(resp, codes...) { - r, err = NewPollingRequestWithContext(r.Context(), resp) - - for err == nil && ResponseHasStatusCode(resp, codes...) { - Respond(resp, - ByDiscardingBody(), - ByClosing()) - resp, err = SendWithSender(s, r, - AfterDelay(GetRetryAfter(resp, delay))) - } - } - - return resp, err - }) - } -} - -// DoRetryForAttempts returns a SendDecorator that retries a failed request for up to the specified -// number of attempts, exponentially backing off between requests using the supplied backoff -// time.Duration (which may be zero). Retrying may be canceled by closing the optional channel on -// the http.Request. -func DoRetryForAttempts(attempts int, backoff time.Duration) SendDecorator { - return func(s Sender) Sender { - return SenderFunc(func(r *http.Request) (resp *http.Response, err error) { - rr := NewRetriableRequest(r) - for attempt := 0; attempt < attempts; attempt++ { - err = rr.Prepare() - if err != nil { - return resp, err - } - resp, err = s.Do(rr.Request()) - if err == nil { - return resp, err - } - if !DelayForBackoff(backoff, attempt, r.Context().Done()) { - return nil, r.Context().Err() - } - } - return resp, err - }) - } -} - -// DoRetryForStatusCodes returns a SendDecorator that retries for specified statusCodes for up to the specified -// number of attempts, exponentially backing off between requests using the supplied backoff -// time.Duration (which may be zero). Retrying may be canceled by closing the optional channel on -// the http.Request. -func DoRetryForStatusCodes(attempts int, backoff time.Duration, codes ...int) SendDecorator { - return func(s Sender) Sender { - return SenderFunc(func(r *http.Request) (resp *http.Response, err error) { - rr := NewRetriableRequest(r) - // Increment to add the first call (attempts denotes number of retries) - for attempt := 0; attempt < attempts+1; { - err = rr.Prepare() - if err != nil { - return resp, err - } - resp, err = s.Do(rr.Request()) - // if the error isn't temporary don't bother retrying - if err != nil && !IsTemporaryNetworkError(err) { - return nil, err - } - // we want to retry if err is not nil (e.g. transient network failure). note that for failed authentication - // resp and err will both have a value, so in this case we don't want to retry as it will never succeed. - if err == nil && !ResponseHasStatusCode(resp, codes...) || IsTokenRefreshError(err) { - return resp, err - } - delayed := DelayWithRetryAfter(resp, r.Context().Done()) - if !delayed && !DelayForBackoff(backoff, attempt, r.Context().Done()) { - return resp, r.Context().Err() - } - // don't count a 429 against the number of attempts - // so that we continue to retry until it succeeds - if resp == nil || resp.StatusCode != http.StatusTooManyRequests { - attempt++ - } - } - return resp, err - }) - } -} - -// DelayWithRetryAfter invokes time.After for the duration specified in the "Retry-After" header in -// responses with status code 429 -func DelayWithRetryAfter(resp *http.Response, cancel <-chan struct{}) bool { - if resp == nil { - return false - } - retryAfter, _ := strconv.Atoi(resp.Header.Get("Retry-After")) - if resp.StatusCode == http.StatusTooManyRequests && retryAfter > 0 { - select { - case <-time.After(time.Duration(retryAfter) * time.Second): - return true - case <-cancel: - return false - } - } - return false -} - -// DoRetryForDuration returns a SendDecorator that retries the request until the total time is equal -// to or greater than the specified duration, exponentially backing off between requests using the -// supplied backoff time.Duration (which may be zero). Retrying may be canceled by closing the -// optional channel on the http.Request. -func DoRetryForDuration(d time.Duration, backoff time.Duration) SendDecorator { - return func(s Sender) Sender { - return SenderFunc(func(r *http.Request) (resp *http.Response, err error) { - rr := NewRetriableRequest(r) - end := time.Now().Add(d) - for attempt := 0; time.Now().Before(end); attempt++ { - err = rr.Prepare() - if err != nil { - return resp, err - } - resp, err = s.Do(rr.Request()) - if err == nil { - return resp, err - } - if !DelayForBackoff(backoff, attempt, r.Context().Done()) { - return nil, r.Context().Err() - } - } - return resp, err - }) - } -} - -// WithLogging returns a SendDecorator that implements simple before and after logging of the -// request. -func WithLogging(logger *log.Logger) SendDecorator { - return func(s Sender) Sender { - return SenderFunc(func(r *http.Request) (*http.Response, error) { - logger.Printf("Sending %s %s", r.Method, r.URL) - resp, err := s.Do(r) - if err != nil { - logger.Printf("%s %s received error '%v'", r.Method, r.URL, err) - } else { - logger.Printf("%s %s received %s", r.Method, r.URL, resp.Status) - } - return resp, err - }) - } -} - -// DelayForBackoff invokes time.After for the supplied backoff duration raised to the power of -// passed attempt (i.e., an exponential backoff delay). Backoff duration is in seconds and can set -// to zero for no delay. The delay may be canceled by closing the passed channel. If terminated early, -// returns false. -// Note: Passing attempt 1 will result in doubling "backoff" duration. Treat this as a zero-based attempt -// count. -func DelayForBackoff(backoff time.Duration, attempt int, cancel <-chan struct{}) bool { - select { - case <-time.After(time.Duration(backoff.Seconds()*math.Pow(2, float64(attempt))) * time.Second): - return true - case <-cancel: - return false - } -} diff --git a/vendor/github.com/Azure/go-autorest/autorest/utility.go b/vendor/github.com/Azure/go-autorest/autorest/utility.go deleted file mode 100644 index 08cf11c118..0000000000 --- a/vendor/github.com/Azure/go-autorest/autorest/utility.go +++ /dev/null @@ -1,228 +0,0 @@ -package autorest - -// Copyright 2017 Microsoft Corporation -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -import ( - "bytes" - "encoding/json" - "encoding/xml" - "fmt" - "io" - "net" - "net/http" - "net/url" - "reflect" - "strings" - - "github.com/Azure/go-autorest/autorest/adal" -) - -// EncodedAs is a series of constants specifying various data encodings -type EncodedAs string - -const ( - // EncodedAsJSON states that data is encoded as JSON - EncodedAsJSON EncodedAs = "JSON" - - // EncodedAsXML states that data is encoded as Xml - EncodedAsXML EncodedAs = "XML" -) - -// Decoder defines the decoding method json.Decoder and xml.Decoder share -type Decoder interface { - Decode(v interface{}) error -} - -// NewDecoder creates a new decoder appropriate to the passed encoding. -// encodedAs specifies the type of encoding and r supplies the io.Reader containing the -// encoded data. -func NewDecoder(encodedAs EncodedAs, r io.Reader) Decoder { - if encodedAs == EncodedAsJSON { - return json.NewDecoder(r) - } else if encodedAs == EncodedAsXML { - return xml.NewDecoder(r) - } - return nil -} - -// CopyAndDecode decodes the data from the passed io.Reader while making a copy. Having a copy -// is especially useful if there is a chance the data will fail to decode. -// encodedAs specifies the expected encoding, r provides the io.Reader to the data, and v -// is the decoding destination. -func CopyAndDecode(encodedAs EncodedAs, r io.Reader, v interface{}) (bytes.Buffer, error) { - b := bytes.Buffer{} - return b, NewDecoder(encodedAs, io.TeeReader(r, &b)).Decode(v) -} - -// TeeReadCloser returns a ReadCloser that writes to w what it reads from rc. -// It utilizes io.TeeReader to copy the data read and has the same behavior when reading. -// Further, when it is closed, it ensures that rc is closed as well. -func TeeReadCloser(rc io.ReadCloser, w io.Writer) io.ReadCloser { - return &teeReadCloser{rc, io.TeeReader(rc, w)} -} - -type teeReadCloser struct { - rc io.ReadCloser - r io.Reader -} - -func (t *teeReadCloser) Read(p []byte) (int, error) { - return t.r.Read(p) -} - -func (t *teeReadCloser) Close() error { - return t.rc.Close() -} - -func containsInt(ints []int, n int) bool { - for _, i := range ints { - if i == n { - return true - } - } - return false -} - -func escapeValueStrings(m map[string]string) map[string]string { - for key, value := range m { - m[key] = url.QueryEscape(value) - } - return m -} - -func ensureValueStrings(mapOfInterface map[string]interface{}) map[string]string { - mapOfStrings := make(map[string]string) - for key, value := range mapOfInterface { - mapOfStrings[key] = ensureValueString(value) - } - return mapOfStrings -} - -func ensureValueString(value interface{}) string { - if value == nil { - return "" - } - switch v := value.(type) { - case string: - return v - case []byte: - return string(v) - default: - return fmt.Sprintf("%v", v) - } -} - -// MapToValues method converts map[string]interface{} to url.Values. -func MapToValues(m map[string]interface{}) url.Values { - v := url.Values{} - for key, value := range m { - x := reflect.ValueOf(value) - if x.Kind() == reflect.Array || x.Kind() == reflect.Slice { - for i := 0; i < x.Len(); i++ { - v.Add(key, ensureValueString(x.Index(i))) - } - } else { - v.Add(key, ensureValueString(value)) - } - } - return v -} - -// AsStringSlice method converts interface{} to []string. This expects a -//that the parameter passed to be a slice or array of a type that has the underlying -//type a string. -func AsStringSlice(s interface{}) ([]string, error) { - v := reflect.ValueOf(s) - if v.Kind() != reflect.Slice && v.Kind() != reflect.Array { - return nil, NewError("autorest", "AsStringSlice", "the value's type is not an array.") - } - stringSlice := make([]string, 0, v.Len()) - - for i := 0; i < v.Len(); i++ { - stringSlice = append(stringSlice, v.Index(i).String()) - } - return stringSlice, nil -} - -// String method converts interface v to string. If interface is a list, it -// joins list elements using the separator. Note that only sep[0] will be used for -// joining if any separator is specified. -func String(v interface{}, sep ...string) string { - if len(sep) == 0 { - return ensureValueString(v) - } - stringSlice, ok := v.([]string) - if ok == false { - var err error - stringSlice, err = AsStringSlice(v) - if err != nil { - panic(fmt.Sprintf("autorest: Couldn't convert value to a string %s.", err)) - } - } - return ensureValueString(strings.Join(stringSlice, sep[0])) -} - -// Encode method encodes url path and query parameters. -func Encode(location string, v interface{}, sep ...string) string { - s := String(v, sep...) - switch strings.ToLower(location) { - case "path": - return pathEscape(s) - case "query": - return queryEscape(s) - default: - return s - } -} - -func pathEscape(s string) string { - return strings.Replace(url.QueryEscape(s), "+", "%20", -1) -} - -func queryEscape(s string) string { - return url.QueryEscape(s) -} - -// ChangeToGet turns the specified http.Request into a GET (it assumes it wasn't). -// This is mainly useful for long-running operations that use the Azure-AsyncOperation -// header, so we change the initial PUT into a GET to retrieve the final result. -func ChangeToGet(req *http.Request) *http.Request { - req.Method = "GET" - req.Body = nil - req.ContentLength = 0 - req.Header.Del("Content-Length") - return req -} - -// IsTokenRefreshError returns true if the specified error implements the TokenRefreshError -// interface. If err is a DetailedError it will walk the chain of Original errors. -func IsTokenRefreshError(err error) bool { - if _, ok := err.(adal.TokenRefreshError); ok { - return true - } - if de, ok := err.(DetailedError); ok { - return IsTokenRefreshError(de.Original) - } - return false -} - -// IsTemporaryNetworkError returns true if the specified error is a temporary network error or false -// if it's not. If the error doesn't implement the net.Error interface the return value is true. -func IsTemporaryNetworkError(err error) bool { - if netErr, ok := err.(net.Error); !ok || (ok && netErr.Temporary()) { - return true - } - return false -} diff --git a/vendor/github.com/Azure/go-autorest/autorest/version.go b/vendor/github.com/Azure/go-autorest/autorest/version.go deleted file mode 100644 index 6230b73961..0000000000 --- a/vendor/github.com/Azure/go-autorest/autorest/version.go +++ /dev/null @@ -1,41 +0,0 @@ -package autorest - -// Copyright 2017 Microsoft Corporation -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -import ( - "fmt" - "runtime" -) - -const number = "v11.5.0" - -var ( - userAgent = fmt.Sprintf("Go/%s (%s-%s) go-autorest/%s", - runtime.Version(), - runtime.GOARCH, - runtime.GOOS, - number, - ) -) - -// UserAgent returns a string containing the Go version, system architecture and OS, and the go-autorest version. -func UserAgent() string { - return userAgent -} - -// Version returns the semantic version (see http://semver.org). -func Version() string { - return number -} diff --git a/vendor/github.com/Azure/go-autorest/logger/logger.go b/vendor/github.com/Azure/go-autorest/logger/logger.go deleted file mode 100644 index da09f394c5..0000000000 --- a/vendor/github.com/Azure/go-autorest/logger/logger.go +++ /dev/null @@ -1,328 +0,0 @@ -package logger - -// Copyright 2017 Microsoft Corporation -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -import ( - "bytes" - "fmt" - "io" - "io/ioutil" - "net/http" - "net/url" - "os" - "strings" - "sync" - "time" -) - -// LevelType tells a logger the minimum level to log. When code reports a log entry, -// the LogLevel indicates the level of the log entry. The logger only records entries -// whose level is at least the level it was told to log. See the Log* constants. -// For example, if a logger is configured with LogError, then LogError, LogPanic, -// and LogFatal entries will be logged; lower level entries are ignored. -type LevelType uint32 - -const ( - // LogNone tells a logger not to log any entries passed to it. - LogNone LevelType = iota - - // LogFatal tells a logger to log all LogFatal entries passed to it. - LogFatal - - // LogPanic tells a logger to log all LogPanic and LogFatal entries passed to it. - LogPanic - - // LogError tells a logger to log all LogError, LogPanic and LogFatal entries passed to it. - LogError - - // LogWarning tells a logger to log all LogWarning, LogError, LogPanic and LogFatal entries passed to it. - LogWarning - - // LogInfo tells a logger to log all LogInfo, LogWarning, LogError, LogPanic and LogFatal entries passed to it. - LogInfo - - // LogDebug tells a logger to log all LogDebug, LogInfo, LogWarning, LogError, LogPanic and LogFatal entries passed to it. - LogDebug -) - -const ( - logNone = "NONE" - logFatal = "FATAL" - logPanic = "PANIC" - logError = "ERROR" - logWarning = "WARNING" - logInfo = "INFO" - logDebug = "DEBUG" - logUnknown = "UNKNOWN" -) - -// ParseLevel converts the specified string into the corresponding LevelType. -func ParseLevel(s string) (lt LevelType, err error) { - switch strings.ToUpper(s) { - case logFatal: - lt = LogFatal - case logPanic: - lt = LogPanic - case logError: - lt = LogError - case logWarning: - lt = LogWarning - case logInfo: - lt = LogInfo - case logDebug: - lt = LogDebug - default: - err = fmt.Errorf("bad log level '%s'", s) - } - return -} - -// String implements the stringer interface for LevelType. -func (lt LevelType) String() string { - switch lt { - case LogNone: - return logNone - case LogFatal: - return logFatal - case LogPanic: - return logPanic - case LogError: - return logError - case LogWarning: - return logWarning - case LogInfo: - return logInfo - case LogDebug: - return logDebug - default: - return logUnknown - } -} - -// Filter defines functions for filtering HTTP request/response content. -type Filter struct { - // URL returns a potentially modified string representation of a request URL. - URL func(u *url.URL) string - - // Header returns a potentially modified set of values for the specified key. - // To completely exclude the header key/values return false. - Header func(key string, val []string) (bool, []string) - - // Body returns a potentially modified request/response body. - Body func(b []byte) []byte -} - -func (f Filter) processURL(u *url.URL) string { - if f.URL == nil { - return u.String() - } - return f.URL(u) -} - -func (f Filter) processHeader(k string, val []string) (bool, []string) { - if f.Header == nil { - return true, val - } - return f.Header(k, val) -} - -func (f Filter) processBody(b []byte) []byte { - if f.Body == nil { - return b - } - return f.Body(b) -} - -// Writer defines methods for writing to a logging facility. -type Writer interface { - // Writeln writes the specified message with the standard log entry header and new-line character. - Writeln(level LevelType, message string) - - // Writef writes the specified format specifier with the standard log entry header and no new-line character. - Writef(level LevelType, format string, a ...interface{}) - - // WriteRequest writes the specified HTTP request to the logger if the log level is greater than - // or equal to LogInfo. The request body, if set, is logged at level LogDebug or higher. - // Custom filters can be specified to exclude URL, header, and/or body content from the log. - // By default no request content is excluded. - WriteRequest(req *http.Request, filter Filter) - - // WriteResponse writes the specified HTTP response to the logger if the log level is greater than - // or equal to LogInfo. The response body, if set, is logged at level LogDebug or higher. - // Custom filters can be specified to exclude URL, header, and/or body content from the log. - // By default no response content is excluded. - WriteResponse(resp *http.Response, filter Filter) -} - -// Instance is the default log writer initialized during package init. -// This can be replaced with a custom implementation as required. -var Instance Writer - -// default log level -var logLevel = LogNone - -// Level returns the value specified in AZURE_GO_AUTOREST_LOG_LEVEL. -// If no value was specified the default value is LogNone. -// Custom loggers can call this to retrieve the configured log level. -func Level() LevelType { - return logLevel -} - -func init() { - // separated for testing purposes - initDefaultLogger() -} - -func initDefaultLogger() { - // init with nilLogger so callers don't have to do a nil check on Default - Instance = nilLogger{} - llStr := strings.ToLower(os.Getenv("AZURE_GO_SDK_LOG_LEVEL")) - if llStr == "" { - return - } - var err error - logLevel, err = ParseLevel(llStr) - if err != nil { - fmt.Fprintf(os.Stderr, "go-autorest: failed to parse log level: %s\n", err.Error()) - return - } - if logLevel == LogNone { - return - } - // default to stderr - dest := os.Stderr - lfStr := os.Getenv("AZURE_GO_SDK_LOG_FILE") - if strings.EqualFold(lfStr, "stdout") { - dest = os.Stdout - } else if lfStr != "" { - lf, err := os.Create(lfStr) - if err == nil { - dest = lf - } else { - fmt.Fprintf(os.Stderr, "go-autorest: failed to create log file, using stderr: %s\n", err.Error()) - } - } - Instance = fileLogger{ - logLevel: logLevel, - mu: &sync.Mutex{}, - logFile: dest, - } -} - -// the nil logger does nothing -type nilLogger struct{} - -func (nilLogger) Writeln(LevelType, string) {} - -func (nilLogger) Writef(LevelType, string, ...interface{}) {} - -func (nilLogger) WriteRequest(*http.Request, Filter) {} - -func (nilLogger) WriteResponse(*http.Response, Filter) {} - -// A File is used instead of a Logger so the stream can be flushed after every write. -type fileLogger struct { - logLevel LevelType - mu *sync.Mutex // for synchronizing writes to logFile - logFile *os.File -} - -func (fl fileLogger) Writeln(level LevelType, message string) { - fl.Writef(level, "%s\n", message) -} - -func (fl fileLogger) Writef(level LevelType, format string, a ...interface{}) { - if fl.logLevel >= level { - fl.mu.Lock() - defer fl.mu.Unlock() - fmt.Fprintf(fl.logFile, "%s %s", entryHeader(level), fmt.Sprintf(format, a...)) - fl.logFile.Sync() - } -} - -func (fl fileLogger) WriteRequest(req *http.Request, filter Filter) { - if req == nil || fl.logLevel < LogInfo { - return - } - b := &bytes.Buffer{} - fmt.Fprintf(b, "%s REQUEST: %s %s\n", entryHeader(LogInfo), req.Method, filter.processURL(req.URL)) - // dump headers - for k, v := range req.Header { - if ok, mv := filter.processHeader(k, v); ok { - fmt.Fprintf(b, "%s: %s\n", k, strings.Join(mv, ",")) - } - } - if fl.shouldLogBody(req.Header, req.Body) { - // dump body - body, err := ioutil.ReadAll(req.Body) - if err == nil { - fmt.Fprintln(b, string(filter.processBody(body))) - if nc, ok := req.Body.(io.Seeker); ok { - // rewind to the beginning - nc.Seek(0, io.SeekStart) - } else { - // recreate the body - req.Body = ioutil.NopCloser(bytes.NewReader(body)) - } - } else { - fmt.Fprintf(b, "failed to read body: %v\n", err) - } - } - fl.mu.Lock() - defer fl.mu.Unlock() - fmt.Fprint(fl.logFile, b.String()) - fl.logFile.Sync() -} - -func (fl fileLogger) WriteResponse(resp *http.Response, filter Filter) { - if resp == nil || fl.logLevel < LogInfo { - return - } - b := &bytes.Buffer{} - fmt.Fprintf(b, "%s RESPONSE: %d %s\n", entryHeader(LogInfo), resp.StatusCode, filter.processURL(resp.Request.URL)) - // dump headers - for k, v := range resp.Header { - if ok, mv := filter.processHeader(k, v); ok { - fmt.Fprintf(b, "%s: %s\n", k, strings.Join(mv, ",")) - } - } - if fl.shouldLogBody(resp.Header, resp.Body) { - // dump body - defer resp.Body.Close() - body, err := ioutil.ReadAll(resp.Body) - if err == nil { - fmt.Fprintln(b, string(filter.processBody(body))) - resp.Body = ioutil.NopCloser(bytes.NewReader(body)) - } else { - fmt.Fprintf(b, "failed to read body: %v\n", err) - } - } - fl.mu.Lock() - defer fl.mu.Unlock() - fmt.Fprint(fl.logFile, b.String()) - fl.logFile.Sync() -} - -// returns true if the provided body should be included in the log -func (fl fileLogger) shouldLogBody(header http.Header, body io.ReadCloser) bool { - ct := header.Get("Content-Type") - return fl.logLevel >= LogDebug && body != nil && !strings.Contains(ct, "application/octet-stream") -} - -// creates standard header for log entries, it contains a timestamp and the log level -func entryHeader(level LevelType) string { - // this format provides a fixed number of digits so the size of the timestamp is constant - return fmt.Sprintf("(%s) %s:", time.Now().Format("2006-01-02T15:04:05.0000000Z07:00"), level.String()) -} diff --git a/vendor/github.com/Azure/go-autorest/tracing/tracing.go b/vendor/github.com/Azure/go-autorest/tracing/tracing.go deleted file mode 100644 index cd61cb18b6..0000000000 --- a/vendor/github.com/Azure/go-autorest/tracing/tracing.go +++ /dev/null @@ -1,190 +0,0 @@ -package tracing - -// Copyright 2018 Microsoft Corporation -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -import ( - "context" - "fmt" - "net/http" - "os" - - "contrib.go.opencensus.io/exporter/ocagent" - "go.opencensus.io/plugin/ochttp" - "go.opencensus.io/plugin/ochttp/propagation/tracecontext" - "go.opencensus.io/stats/view" - "go.opencensus.io/trace" -) - -var ( - // Transport is the default tracing RoundTripper. The custom options setter will control - // if traces are being emitted or not. - Transport = &ochttp.Transport{ - Propagation: &tracecontext.HTTPFormat{}, - GetStartOptions: getStartOptions, - } - - // enabled is the flag for marking if tracing is enabled. - enabled = false - - // Sampler is the tracing sampler. If tracing is disabled it will never sample. Otherwise - // it will be using the parent sampler or the default. - sampler = trace.NeverSample() - - // Views for metric instrumentation. - views = map[string]*view.View{} - - // the trace exporter - traceExporter trace.Exporter -) - -func init() { - enableFromEnv() -} - -func enableFromEnv() { - _, ok := os.LookupEnv("AZURE_SDK_TRACING_ENABLED") - _, legacyOk := os.LookupEnv("AZURE_SDK_TRACING_ENABELD") - if ok || legacyOk { - agentEndpoint, ok := os.LookupEnv("OCAGENT_TRACE_EXPORTER_ENDPOINT") - - if ok { - EnableWithAIForwarding(agentEndpoint) - } else { - Enable() - } - } -} - -// IsEnabled returns true if monitoring is enabled for the sdk. -func IsEnabled() bool { - return enabled -} - -// Enable will start instrumentation for metrics and traces. -func Enable() error { - enabled = true - sampler = nil - - err := initStats() - return err -} - -// Disable will disable instrumentation for metrics and traces. -func Disable() { - disableStats() - sampler = trace.NeverSample() - if traceExporter != nil { - trace.UnregisterExporter(traceExporter) - } - enabled = false -} - -// EnableWithAIForwarding will start instrumentation and will connect to app insights forwarder -// exporter making the metrics and traces available in app insights. -func EnableWithAIForwarding(agentEndpoint string) (err error) { - err = Enable() - if err != nil { - return err - } - - traceExporter, err := ocagent.NewExporter(ocagent.WithInsecure(), ocagent.WithAddress(agentEndpoint)) - if err != nil { - return err - } - trace.RegisterExporter(traceExporter) - return -} - -// getStartOptions is the custom options setter for the ochttp package. -func getStartOptions(*http.Request) trace.StartOptions { - return trace.StartOptions{ - Sampler: sampler, - } -} - -// initStats registers the views for the http metrics -func initStats() (err error) { - clientViews := []*view.View{ - ochttp.ClientCompletedCount, - ochttp.ClientRoundtripLatencyDistribution, - ochttp.ClientReceivedBytesDistribution, - ochttp.ClientSentBytesDistribution, - } - for _, cv := range clientViews { - vn := fmt.Sprintf("Azure/go-autorest/tracing-%s", cv.Name) - views[vn] = cv.WithName(vn) - err = view.Register(views[vn]) - if err != nil { - return err - } - } - return -} - -// disableStats will unregister the previously registered metrics -func disableStats() { - for _, v := range views { - view.Unregister(v) - } -} - -// StartSpan starts a trace span -func StartSpan(ctx context.Context, name string) context.Context { - ctx, _ = trace.StartSpan(ctx, name, trace.WithSampler(sampler)) - return ctx -} - -// EndSpan ends a previously started span stored in the context -func EndSpan(ctx context.Context, httpStatusCode int, err error) { - span := trace.FromContext(ctx) - - if span == nil { - return - } - - if err != nil { - span.SetStatus(trace.Status{Message: err.Error(), Code: toTraceStatusCode(httpStatusCode)}) - } - span.End() -} - -// toTraceStatusCode converts HTTP Codes to OpenCensus codes as defined -// at https://github.com/census-instrumentation/opencensus-specs/blob/master/trace/HTTP.md#status -func toTraceStatusCode(httpStatusCode int) int32 { - switch { - case http.StatusOK <= httpStatusCode && httpStatusCode < http.StatusBadRequest: - return trace.StatusCodeOK - case httpStatusCode == http.StatusBadRequest: - return trace.StatusCodeInvalidArgument - case httpStatusCode == http.StatusUnauthorized: // 401 is actually unauthenticated. - return trace.StatusCodeUnauthenticated - case httpStatusCode == http.StatusForbidden: - return trace.StatusCodePermissionDenied - case httpStatusCode == http.StatusNotFound: - return trace.StatusCodeNotFound - case httpStatusCode == http.StatusTooManyRequests: - return trace.StatusCodeResourceExhausted - case httpStatusCode == 499: - return trace.StatusCodeCancelled - case httpStatusCode == http.StatusNotImplemented: - return trace.StatusCodeUnimplemented - case httpStatusCode == http.StatusServiceUnavailable: - return trace.StatusCodeUnavailable - case httpStatusCode == http.StatusGatewayTimeout: - return trace.StatusCodeDeadlineExceeded - default: - return trace.StatusCodeUnknown - } -} diff --git a/vendor/github.com/appscode/jsonpatch/.gitignore b/vendor/github.com/appscode/jsonpatch/.gitignore deleted file mode 100644 index 0e9448e052..0000000000 --- a/vendor/github.com/appscode/jsonpatch/.gitignore +++ /dev/null @@ -1,26 +0,0 @@ -# Compiled Object files, Static and Dynamic libs (Shared Objects) -*.o -*.a -*.so - -# Folders -_obj -_test - -# Architecture specific extensions/prefixes -*.[568vq] -[568vq].out - -*.cgo1.go -*.cgo2.c -_cgo_defun.c -_cgo_gotypes.go -_cgo_export.* - -_testmain.go - -*.exe -*.test -*.prof - -.idea/ diff --git a/vendor/github.com/appscode/jsonpatch/.travis.yml b/vendor/github.com/appscode/jsonpatch/.travis.yml deleted file mode 100644 index 92f2439d74..0000000000 --- a/vendor/github.com/appscode/jsonpatch/.travis.yml +++ /dev/null @@ -1,10 +0,0 @@ -language: go -go: - - 1.x - - tip - -env: - - GO111MODULE=on - -script: - - go test -v diff --git a/vendor/github.com/appscode/jsonpatch/README.md b/vendor/github.com/appscode/jsonpatch/README.md deleted file mode 100644 index bbbf72921f..0000000000 --- a/vendor/github.com/appscode/jsonpatch/README.md +++ /dev/null @@ -1,53 +0,0 @@ -# jsonpatch - -[![Build Status](https://travis-ci.org/appscode/jsonpatch.svg?branch=master)](https://travis-ci.org/appscode/jsonpatch) -[![Go Report Card](https://goreportcard.com/badge/appscode/jsonpatch "Go Report Card")](https://goreportcard.com/report/appscode/jsonpatch) -[![GoDoc](https://godoc.org/github.com/appscode/jsonpatch?status.svg "GoDoc")](https://godoc.org/github.com/appscode/jsonpatch) - -As per http://jsonpatch.com JSON Patch is specified in RFC 6902 from the IETF. - -JSON Patch allows you to generate JSON that describes changes you want to make to a document, so you don't have to send the whole doc. JSON Patch format is supported by HTTP PATCH method, allowing for standards based partial updates via REST APIs. - -```console -go get github.com/appscode/jsonpatch -``` - -I tried some of the other "jsonpatch" go implementations, but none of them could diff two json documents and -generate format like jsonpatch.com specifies. Here's an example of the patch format: - -```json -[ - { "op": "replace", "path": "/baz", "value": "boo" }, - { "op": "add", "path": "/hello", "value": ["world"] }, - { "op": "remove", "path": "/foo"} -] - -``` -The API is super simple - -## example - -```go -package main - -import ( - "fmt" - "github.com/appscode/jsonpatch" -) - -var simpleA = `{"a":100, "b":200, "c":"hello"}` -var simpleB = `{"a":100, "b":200, "c":"goodbye"}` - -func main() { - patch, e := jsonpatch.CreatePatch([]byte(simpleA), []byte(simpleA)) - if e != nil { - fmt.Printf("Error creating JSON patch:%v", e) - return - } - for _, operation := range patch { - fmt.Printf("%s\n", operation.Json()) - } -} -``` - -This code needs more tests, as it's a highly recursive, type-fiddly monster. It's not a lot of code, but it has to deal with a lot of complexity. diff --git a/vendor/github.com/appscode/jsonpatch/go.mod b/vendor/github.com/appscode/jsonpatch/go.mod deleted file mode 100644 index 458d129ec9..0000000000 --- a/vendor/github.com/appscode/jsonpatch/go.mod +++ /dev/null @@ -1,8 +0,0 @@ -module github.com/appscode/jsonpatch - -require ( - github.com/davecgh/go-spew v1.1.1 // indirect - github.com/evanphx/json-patch v4.0.0+incompatible - github.com/pmezard/go-difflib v1.0.0 // indirect - github.com/stretchr/testify v1.2.2 -) diff --git a/vendor/github.com/appscode/jsonpatch/go.sum b/vendor/github.com/appscode/jsonpatch/go.sum deleted file mode 100644 index 0972e0e1a9..0000000000 --- a/vendor/github.com/appscode/jsonpatch/go.sum +++ /dev/null @@ -1,8 +0,0 @@ -github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= -github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= -github.com/evanphx/json-patch v4.0.0+incompatible h1:xregGRMLBeuRcwiOTHRCsPPuzCQlqhxUPbqdw+zNkLc= -github.com/evanphx/json-patch v4.0.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk= -github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= -github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= -github.com/stretchr/testify v1.2.2 h1:bSDNvY7ZPG5RlJ8otE/7V6gMiyenm9RtJ7IUVIAoJ1w= -github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= diff --git a/vendor/github.com/census-instrumentation/opencensus-proto/AUTHORS b/vendor/github.com/census-instrumentation/opencensus-proto/AUTHORS deleted file mode 100644 index e068e731ea..0000000000 --- a/vendor/github.com/census-instrumentation/opencensus-proto/AUTHORS +++ /dev/null @@ -1 +0,0 @@ -Google Inc. \ No newline at end of file diff --git a/vendor/github.com/census-instrumentation/opencensus-proto/LICENSE b/vendor/github.com/census-instrumentation/opencensus-proto/LICENSE deleted file mode 100644 index d645695673..0000000000 --- a/vendor/github.com/census-instrumentation/opencensus-proto/LICENSE +++ /dev/null @@ -1,202 +0,0 @@ - - Apache License - Version 2.0, January 2004 - http://www.apache.org/licenses/ - - TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - - 1. Definitions. - - "License" shall mean the terms and conditions for use, reproduction, - and distribution as defined by Sections 1 through 9 of this document. - - "Licensor" shall mean the copyright owner or entity authorized by - the copyright owner that is granting the License. - - "Legal Entity" shall mean the union of the acting entity and all - other entities that control, are controlled by, or are under common - control with that entity. For the purposes of this definition, - "control" means (i) the power, direct or indirect, to cause the - direction or management of such entity, whether by contract or - otherwise, or (ii) ownership of fifty percent (50%) or more of the - outstanding shares, or (iii) beneficial ownership of such entity. - - "You" (or "Your") shall mean an individual or Legal Entity - exercising permissions granted by this License. - - "Source" form shall mean the preferred form for making modifications, - including but not limited to software source code, documentation - source, and configuration files. - - "Object" form shall mean any form resulting from mechanical - transformation or translation of a Source form, including but - not limited to compiled object code, generated documentation, - and conversions to other media types. - - "Work" shall mean the work of authorship, whether in Source or - Object form, made available under the License, as indicated by a - copyright notice that is included in or attached to the work - (an example is provided in the Appendix below). - - "Derivative Works" shall mean any work, whether in Source or Object - form, that is based on (or derived from) the Work and for which the - editorial revisions, annotations, elaborations, or other modifications - represent, as a whole, an original work of authorship. For the purposes - of this License, Derivative Works shall not include works that remain - separable from, or merely link (or bind by name) to the interfaces of, - the Work and Derivative Works thereof. - - "Contribution" shall mean any work of authorship, including - the original version of the Work and any modifications or additions - to that Work or Derivative Works thereof, that is intentionally - submitted to Licensor for inclusion in the Work by the copyright owner - or by an individual or Legal Entity authorized to submit on behalf of - the copyright owner. For the purposes of this definition, "submitted" - means any form of electronic, verbal, or written communication sent - to the Licensor or its representatives, including but not limited to - communication on electronic mailing lists, source code control systems, - and issue tracking systems that are managed by, or on behalf of, the - Licensor for the purpose of discussing and improving the Work, but - excluding communication that is conspicuously marked or otherwise - designated in writing by the copyright owner as "Not a Contribution." - - "Contributor" shall mean Licensor and any individual or Legal Entity - on behalf of whom a Contribution has been received by Licensor and - subsequently incorporated within the Work. - - 2. Grant of Copyright License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - copyright license to reproduce, prepare Derivative Works of, - publicly display, publicly perform, sublicense, and distribute the - Work and such Derivative Works in Source or Object form. - - 3. Grant of Patent License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - (except as stated in this section) patent license to make, have made, - use, offer to sell, sell, import, and otherwise transfer the Work, - where such license applies only to those patent claims licensable - by such Contributor that are necessarily infringed by their - Contribution(s) alone or by combination of their Contribution(s) - with the Work to which such Contribution(s) was submitted. If You - institute patent litigation against any entity (including a - cross-claim or counterclaim in a lawsuit) alleging that the Work - or a Contribution incorporated within the Work constitutes direct - or contributory patent infringement, then any patent licenses - granted to You under this License for that Work shall terminate - as of the date such litigation is filed. - - 4. Redistribution. You may reproduce and distribute copies of the - Work or Derivative Works thereof in any medium, with or without - modifications, and in Source or Object form, provided that You - meet the following conditions: - - (a) You must give any other recipients of the Work or - Derivative Works a copy of this License; and - - (b) You must cause any modified files to carry prominent notices - stating that You changed the files; and - - (c) You must retain, in the Source form of any Derivative Works - that You distribute, all copyright, patent, trademark, and - attribution notices from the Source form of the Work, - excluding those notices that do not pertain to any part of - the Derivative Works; and - - (d) If the Work includes a "NOTICE" text file as part of its - distribution, then any Derivative Works that You distribute must - include a readable copy of the attribution notices contained - within such NOTICE file, excluding those notices that do not - pertain to any part of the Derivative Works, in at least one - of the following places: within a NOTICE text file distributed - as part of the Derivative Works; within the Source form or - documentation, if provided along with the Derivative Works; or, - within a display generated by the Derivative Works, if and - wherever such third-party notices normally appear. The contents - of the NOTICE file are for informational purposes only and - do not modify the License. You may add Your own attribution - notices within Derivative Works that You distribute, alongside - or as an addendum to the NOTICE text from the Work, provided - that such additional attribution notices cannot be construed - as modifying the License. - - You may add Your own copyright statement to Your modifications and - may provide additional or different license terms and conditions - for use, reproduction, or distribution of Your modifications, or - for any such Derivative Works as a whole, provided Your use, - reproduction, and distribution of the Work otherwise complies with - the conditions stated in this License. - - 5. Submission of Contributions. Unless You explicitly state otherwise, - any Contribution intentionally submitted for inclusion in the Work - by You to the Licensor shall be under the terms and conditions of - this License, without any additional terms or conditions. - Notwithstanding the above, nothing herein shall supersede or modify - the terms of any separate license agreement you may have executed - with Licensor regarding such Contributions. - - 6. Trademarks. This License does not grant permission to use the trade - names, trademarks, service marks, or product names of the Licensor, - except as required for reasonable and customary use in describing the - origin of the Work and reproducing the content of the NOTICE file. - - 7. Disclaimer of Warranty. Unless required by applicable law or - agreed to in writing, Licensor provides the Work (and each - Contributor provides its Contributions) on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or - implied, including, without limitation, any warranties or conditions - of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A - PARTICULAR PURPOSE. You are solely responsible for determining the - appropriateness of using or redistributing the Work and assume any - risks associated with Your exercise of permissions under this License. - - 8. Limitation of Liability. In no event and under no legal theory, - whether in tort (including negligence), contract, or otherwise, - unless required by applicable law (such as deliberate and grossly - negligent acts) or agreed to in writing, shall any Contributor be - liable to You for damages, including any direct, indirect, special, - incidental, or consequential damages of any character arising as a - result of this License or out of the use or inability to use the - Work (including but not limited to damages for loss of goodwill, - work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses), even if such Contributor - has been advised of the possibility of such damages. - - 9. Accepting Warranty or Additional Liability. While redistributing - the Work or Derivative Works thereof, You may choose to offer, - and charge a fee for, acceptance of support, warranty, indemnity, - or other liability obligations and/or rights consistent with this - License. However, in accepting such obligations, You may act only - on Your own behalf and on Your sole responsibility, not on behalf - of any other Contributor, and only if You agree to indemnify, - defend, and hold each Contributor harmless for any liability - incurred by, or claims asserted against, such Contributor by reason - of your accepting any such warranty or additional liability. - - END OF TERMS AND CONDITIONS - - APPENDIX: How to apply the Apache License to your work. - - To apply the Apache License to your work, attach the following - boilerplate notice, with the fields enclosed by brackets "[]" - replaced with your own identifying information. (Don't include - the brackets!) The text should be enclosed in the appropriate - comment syntax for the file format. We also recommend that a - file or class name and description of purpose be included on the - same "printed page" as the copyright notice for easier - identification within third-party archives. - - Copyright [yyyy] [name of copyright owner] - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. diff --git a/vendor/github.com/census-instrumentation/opencensus-proto/gen-go/agent/common/v1/common.pb.go b/vendor/github.com/census-instrumentation/opencensus-proto/gen-go/agent/common/v1/common.pb.go deleted file mode 100644 index 2f12e428ed..0000000000 --- a/vendor/github.com/census-instrumentation/opencensus-proto/gen-go/agent/common/v1/common.pb.go +++ /dev/null @@ -1,356 +0,0 @@ -// Code generated by protoc-gen-go. DO NOT EDIT. -// source: opencensus/proto/agent/common/v1/common.proto - -package v1 - -import ( - fmt "fmt" - proto "github.com/golang/protobuf/proto" - timestamp "github.com/golang/protobuf/ptypes/timestamp" - math "math" -) - -// Reference imports to suppress errors if they are not otherwise used. -var _ = proto.Marshal -var _ = fmt.Errorf -var _ = math.Inf - -// This is a compile-time assertion to ensure that this generated file -// is compatible with the proto package it is being compiled against. -// A compilation error at this line likely means your copy of the -// proto package needs to be updated. -const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package - -type LibraryInfo_Language int32 - -const ( - LibraryInfo_LANGUAGE_UNSPECIFIED LibraryInfo_Language = 0 - LibraryInfo_CPP LibraryInfo_Language = 1 - LibraryInfo_C_SHARP LibraryInfo_Language = 2 - LibraryInfo_ERLANG LibraryInfo_Language = 3 - LibraryInfo_GO_LANG LibraryInfo_Language = 4 - LibraryInfo_JAVA LibraryInfo_Language = 5 - LibraryInfo_NODE_JS LibraryInfo_Language = 6 - LibraryInfo_PHP LibraryInfo_Language = 7 - LibraryInfo_PYTHON LibraryInfo_Language = 8 - LibraryInfo_RUBY LibraryInfo_Language = 9 -) - -var LibraryInfo_Language_name = map[int32]string{ - 0: "LANGUAGE_UNSPECIFIED", - 1: "CPP", - 2: "C_SHARP", - 3: "ERLANG", - 4: "GO_LANG", - 5: "JAVA", - 6: "NODE_JS", - 7: "PHP", - 8: "PYTHON", - 9: "RUBY", -} - -var LibraryInfo_Language_value = map[string]int32{ - "LANGUAGE_UNSPECIFIED": 0, - "CPP": 1, - "C_SHARP": 2, - "ERLANG": 3, - "GO_LANG": 4, - "JAVA": 5, - "NODE_JS": 6, - "PHP": 7, - "PYTHON": 8, - "RUBY": 9, -} - -func (x LibraryInfo_Language) String() string { - return proto.EnumName(LibraryInfo_Language_name, int32(x)) -} - -func (LibraryInfo_Language) EnumDescriptor() ([]byte, []int) { - return fileDescriptor_126c72ed8a252c84, []int{2, 0} -} - -// Identifier metadata of the Node (Application instrumented with OpenCensus) -// that connects to OpenCensus Agent. -// In the future we plan to extend the identifier proto definition to support -// additional information (e.g cloud id, etc.) -type Node struct { - // Identifier that uniquely identifies a process within a VM/container. - Identifier *ProcessIdentifier `protobuf:"bytes,1,opt,name=identifier,proto3" json:"identifier,omitempty"` - // Information on the OpenCensus Library that initiates the stream. - LibraryInfo *LibraryInfo `protobuf:"bytes,2,opt,name=library_info,json=libraryInfo,proto3" json:"library_info,omitempty"` - // Additional information on service. - ServiceInfo *ServiceInfo `protobuf:"bytes,3,opt,name=service_info,json=serviceInfo,proto3" json:"service_info,omitempty"` - // Additional attributes. - Attributes map[string]string `protobuf:"bytes,4,rep,name=attributes,proto3" json:"attributes,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *Node) Reset() { *m = Node{} } -func (m *Node) String() string { return proto.CompactTextString(m) } -func (*Node) ProtoMessage() {} -func (*Node) Descriptor() ([]byte, []int) { - return fileDescriptor_126c72ed8a252c84, []int{0} -} - -func (m *Node) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_Node.Unmarshal(m, b) -} -func (m *Node) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_Node.Marshal(b, m, deterministic) -} -func (m *Node) XXX_Merge(src proto.Message) { - xxx_messageInfo_Node.Merge(m, src) -} -func (m *Node) XXX_Size() int { - return xxx_messageInfo_Node.Size(m) -} -func (m *Node) XXX_DiscardUnknown() { - xxx_messageInfo_Node.DiscardUnknown(m) -} - -var xxx_messageInfo_Node proto.InternalMessageInfo - -func (m *Node) GetIdentifier() *ProcessIdentifier { - if m != nil { - return m.Identifier - } - return nil -} - -func (m *Node) GetLibraryInfo() *LibraryInfo { - if m != nil { - return m.LibraryInfo - } - return nil -} - -func (m *Node) GetServiceInfo() *ServiceInfo { - if m != nil { - return m.ServiceInfo - } - return nil -} - -func (m *Node) GetAttributes() map[string]string { - if m != nil { - return m.Attributes - } - return nil -} - -// Identifier that uniquely identifies a process within a VM/container. -type ProcessIdentifier struct { - // The host name. Usually refers to the machine/container name. - // For example: os.Hostname() in Go, socket.gethostname() in Python. - HostName string `protobuf:"bytes,1,opt,name=host_name,json=hostName,proto3" json:"host_name,omitempty"` - // Process id. - Pid uint32 `protobuf:"varint,2,opt,name=pid,proto3" json:"pid,omitempty"` - // Start time of this ProcessIdentifier. Represented in epoch time. - StartTimestamp *timestamp.Timestamp `protobuf:"bytes,3,opt,name=start_timestamp,json=startTimestamp,proto3" json:"start_timestamp,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *ProcessIdentifier) Reset() { *m = ProcessIdentifier{} } -func (m *ProcessIdentifier) String() string { return proto.CompactTextString(m) } -func (*ProcessIdentifier) ProtoMessage() {} -func (*ProcessIdentifier) Descriptor() ([]byte, []int) { - return fileDescriptor_126c72ed8a252c84, []int{1} -} - -func (m *ProcessIdentifier) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_ProcessIdentifier.Unmarshal(m, b) -} -func (m *ProcessIdentifier) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_ProcessIdentifier.Marshal(b, m, deterministic) -} -func (m *ProcessIdentifier) XXX_Merge(src proto.Message) { - xxx_messageInfo_ProcessIdentifier.Merge(m, src) -} -func (m *ProcessIdentifier) XXX_Size() int { - return xxx_messageInfo_ProcessIdentifier.Size(m) -} -func (m *ProcessIdentifier) XXX_DiscardUnknown() { - xxx_messageInfo_ProcessIdentifier.DiscardUnknown(m) -} - -var xxx_messageInfo_ProcessIdentifier proto.InternalMessageInfo - -func (m *ProcessIdentifier) GetHostName() string { - if m != nil { - return m.HostName - } - return "" -} - -func (m *ProcessIdentifier) GetPid() uint32 { - if m != nil { - return m.Pid - } - return 0 -} - -func (m *ProcessIdentifier) GetStartTimestamp() *timestamp.Timestamp { - if m != nil { - return m.StartTimestamp - } - return nil -} - -// Information on OpenCensus Library. -type LibraryInfo struct { - // Language of OpenCensus Library. - Language LibraryInfo_Language `protobuf:"varint,1,opt,name=language,proto3,enum=opencensus.proto.agent.common.v1.LibraryInfo_Language" json:"language,omitempty"` - // Version of Agent exporter of Library. - ExporterVersion string `protobuf:"bytes,2,opt,name=exporter_version,json=exporterVersion,proto3" json:"exporter_version,omitempty"` - // Version of OpenCensus Library. - CoreLibraryVersion string `protobuf:"bytes,3,opt,name=core_library_version,json=coreLibraryVersion,proto3" json:"core_library_version,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *LibraryInfo) Reset() { *m = LibraryInfo{} } -func (m *LibraryInfo) String() string { return proto.CompactTextString(m) } -func (*LibraryInfo) ProtoMessage() {} -func (*LibraryInfo) Descriptor() ([]byte, []int) { - return fileDescriptor_126c72ed8a252c84, []int{2} -} - -func (m *LibraryInfo) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_LibraryInfo.Unmarshal(m, b) -} -func (m *LibraryInfo) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_LibraryInfo.Marshal(b, m, deterministic) -} -func (m *LibraryInfo) XXX_Merge(src proto.Message) { - xxx_messageInfo_LibraryInfo.Merge(m, src) -} -func (m *LibraryInfo) XXX_Size() int { - return xxx_messageInfo_LibraryInfo.Size(m) -} -func (m *LibraryInfo) XXX_DiscardUnknown() { - xxx_messageInfo_LibraryInfo.DiscardUnknown(m) -} - -var xxx_messageInfo_LibraryInfo proto.InternalMessageInfo - -func (m *LibraryInfo) GetLanguage() LibraryInfo_Language { - if m != nil { - return m.Language - } - return LibraryInfo_LANGUAGE_UNSPECIFIED -} - -func (m *LibraryInfo) GetExporterVersion() string { - if m != nil { - return m.ExporterVersion - } - return "" -} - -func (m *LibraryInfo) GetCoreLibraryVersion() string { - if m != nil { - return m.CoreLibraryVersion - } - return "" -} - -// Additional service information. -type ServiceInfo struct { - // Name of the service. - Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *ServiceInfo) Reset() { *m = ServiceInfo{} } -func (m *ServiceInfo) String() string { return proto.CompactTextString(m) } -func (*ServiceInfo) ProtoMessage() {} -func (*ServiceInfo) Descriptor() ([]byte, []int) { - return fileDescriptor_126c72ed8a252c84, []int{3} -} - -func (m *ServiceInfo) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_ServiceInfo.Unmarshal(m, b) -} -func (m *ServiceInfo) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_ServiceInfo.Marshal(b, m, deterministic) -} -func (m *ServiceInfo) XXX_Merge(src proto.Message) { - xxx_messageInfo_ServiceInfo.Merge(m, src) -} -func (m *ServiceInfo) XXX_Size() int { - return xxx_messageInfo_ServiceInfo.Size(m) -} -func (m *ServiceInfo) XXX_DiscardUnknown() { - xxx_messageInfo_ServiceInfo.DiscardUnknown(m) -} - -var xxx_messageInfo_ServiceInfo proto.InternalMessageInfo - -func (m *ServiceInfo) GetName() string { - if m != nil { - return m.Name - } - return "" -} - -func init() { - proto.RegisterEnum("opencensus.proto.agent.common.v1.LibraryInfo_Language", LibraryInfo_Language_name, LibraryInfo_Language_value) - proto.RegisterType((*Node)(nil), "opencensus.proto.agent.common.v1.Node") - proto.RegisterMapType((map[string]string)(nil), "opencensus.proto.agent.common.v1.Node.AttributesEntry") - proto.RegisterType((*ProcessIdentifier)(nil), "opencensus.proto.agent.common.v1.ProcessIdentifier") - proto.RegisterType((*LibraryInfo)(nil), "opencensus.proto.agent.common.v1.LibraryInfo") - proto.RegisterType((*ServiceInfo)(nil), "opencensus.proto.agent.common.v1.ServiceInfo") -} - -func init() { - proto.RegisterFile("opencensus/proto/agent/common/v1/common.proto", fileDescriptor_126c72ed8a252c84) -} - -var fileDescriptor_126c72ed8a252c84 = []byte{ - // 590 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x8c, 0x94, 0x4f, 0x4f, 0xdb, 0x3e, - 0x1c, 0xc6, 0x7f, 0x69, 0x0a, 0xb4, 0xdf, 0xfc, 0x06, 0x99, 0xc5, 0xa1, 0x62, 0x87, 0xb1, 0xee, - 0xc2, 0x0e, 0x4d, 0x06, 0x48, 0xd3, 0x34, 0x69, 0x87, 0x52, 0x3a, 0x28, 0x42, 0x25, 0x72, 0x01, - 0x89, 0x5d, 0xa2, 0xb4, 0xb8, 0xc1, 0x5a, 0x63, 0x57, 0xb6, 0x53, 0x8d, 0xd3, 0x8e, 0xd3, 0xde, - 0xc0, 0x5e, 0xd4, 0x5e, 0xd5, 0x64, 0x3b, 0x69, 0xa3, 0x71, 0x28, 0xb7, 0xef, 0x9f, 0xe7, 0xf9, - 0x38, 0x7a, 0x6c, 0x05, 0x3a, 0x7c, 0x4e, 0xd8, 0x84, 0x30, 0x99, 0xcb, 0x70, 0x2e, 0xb8, 0xe2, - 0x61, 0x92, 0x12, 0xa6, 0xc2, 0x09, 0xcf, 0x32, 0xce, 0xc2, 0xc5, 0x61, 0x51, 0x05, 0x66, 0x89, - 0xf6, 0x57, 0x72, 0x3b, 0x09, 0x8c, 0x3c, 0x28, 0x44, 0x8b, 0xc3, 0xbd, 0xd7, 0x29, 0xe7, 0xe9, - 0x8c, 0x58, 0xd8, 0x38, 0x9f, 0x86, 0x8a, 0x66, 0x44, 0xaa, 0x24, 0x9b, 0x5b, 0x43, 0xfb, 0xb7, - 0x0b, 0xf5, 0x21, 0xbf, 0x27, 0x68, 0x04, 0x40, 0xef, 0x09, 0x53, 0x74, 0x4a, 0x89, 0x68, 0x39, - 0xfb, 0xce, 0x81, 0x77, 0x74, 0x1c, 0xac, 0x3b, 0x20, 0x88, 0x04, 0x9f, 0x10, 0x29, 0x07, 0x4b, - 0x2b, 0xae, 0x60, 0x50, 0x04, 0xff, 0xcf, 0xe8, 0x58, 0x24, 0xe2, 0x31, 0xa6, 0x6c, 0xca, 0x5b, - 0x35, 0x83, 0xed, 0xac, 0xc7, 0x5e, 0x5a, 0xd7, 0x80, 0x4d, 0x39, 0xf6, 0x66, 0xab, 0x46, 0x13, - 0x25, 0x11, 0x0b, 0x3a, 0x21, 0x96, 0xe8, 0x3e, 0x97, 0x38, 0xb2, 0x2e, 0x4b, 0x94, 0xab, 0x06, - 0xdd, 0x02, 0x24, 0x4a, 0x09, 0x3a, 0xce, 0x15, 0x91, 0xad, 0xfa, 0xbe, 0x7b, 0xe0, 0x1d, 0x7d, - 0x58, 0xcf, 0xd3, 0xa1, 0x05, 0xdd, 0xa5, 0xb1, 0xcf, 0x94, 0x78, 0xc4, 0x15, 0xd2, 0xde, 0x67, - 0xd8, 0xf9, 0x67, 0x8d, 0x7c, 0x70, 0xbf, 0x91, 0x47, 0x13, 0x6e, 0x13, 0xeb, 0x12, 0xed, 0xc2, - 0xc6, 0x22, 0x99, 0xe5, 0xc4, 0x24, 0xd3, 0xc4, 0xb6, 0xf9, 0x54, 0xfb, 0xe8, 0xb4, 0x7f, 0x3a, - 0xf0, 0xf2, 0x49, 0xb8, 0xe8, 0x15, 0x34, 0x1f, 0xb8, 0x54, 0x31, 0x4b, 0x32, 0x52, 0x70, 0x1a, - 0x7a, 0x30, 0x4c, 0x32, 0xa2, 0xf1, 0x73, 0x7a, 0x6f, 0x50, 0x2f, 0xb0, 0x2e, 0x51, 0x0f, 0x76, - 0xa4, 0x4a, 0x84, 0x8a, 0x97, 0xd7, 0x5e, 0x04, 0xb6, 0x17, 0xd8, 0x87, 0x11, 0x94, 0x0f, 0x23, - 0xb8, 0x2e, 0x15, 0x78, 0xdb, 0x58, 0x96, 0x7d, 0xfb, 0x4f, 0x0d, 0xbc, 0xca, 0x7d, 0x20, 0x0c, - 0x8d, 0x59, 0xc2, 0xd2, 0x3c, 0x49, 0xed, 0x27, 0x6c, 0x3f, 0x27, 0xae, 0x0a, 0x20, 0xb8, 0x2c, - 0xdc, 0x78, 0xc9, 0x41, 0xef, 0xc0, 0x27, 0xdf, 0xe7, 0x5c, 0x28, 0x22, 0xe2, 0x05, 0x11, 0x92, - 0x72, 0x56, 0x44, 0xb2, 0x53, 0xce, 0x6f, 0xed, 0x18, 0xbd, 0x87, 0xdd, 0x09, 0x17, 0x24, 0x2e, - 0x1f, 0x56, 0x29, 0x77, 0x8d, 0x1c, 0xe9, 0x5d, 0x71, 0x58, 0xe1, 0x68, 0xff, 0x72, 0xa0, 0x51, - 0x9e, 0x89, 0x5a, 0xb0, 0x7b, 0xd9, 0x1d, 0x9e, 0xdd, 0x74, 0xcf, 0xfa, 0xf1, 0xcd, 0x70, 0x14, - 0xf5, 0x7b, 0x83, 0x2f, 0x83, 0xfe, 0xa9, 0xff, 0x1f, 0xda, 0x02, 0xb7, 0x17, 0x45, 0xbe, 0x83, - 0x3c, 0xd8, 0xea, 0xc5, 0xa3, 0xf3, 0x2e, 0x8e, 0xfc, 0x1a, 0x02, 0xd8, 0xec, 0x63, 0xed, 0xf0, - 0x5d, 0xbd, 0x38, 0xbb, 0x8a, 0x4d, 0x53, 0x47, 0x0d, 0xa8, 0x5f, 0x74, 0x6f, 0xbb, 0xfe, 0x86, - 0x1e, 0x0f, 0xaf, 0x4e, 0xfb, 0xf1, 0xc5, 0xc8, 0xdf, 0xd4, 0x94, 0xe8, 0x3c, 0xf2, 0xb7, 0xb4, - 0x31, 0xba, 0xbb, 0x3e, 0xbf, 0x1a, 0xfa, 0x0d, 0xad, 0xc5, 0x37, 0x27, 0x77, 0x7e, 0xb3, 0xfd, - 0x06, 0xbc, 0xca, 0x4b, 0x44, 0x08, 0xea, 0x95, 0xab, 0x34, 0xf5, 0xc9, 0x0f, 0x78, 0x4b, 0xf9, - 0xda, 0x44, 0x4f, 0xbc, 0x9e, 0x29, 0x23, 0xbd, 0x8c, 0x9c, 0xaf, 0x83, 0x94, 0xaa, 0x87, 0x7c, - 0xac, 0x05, 0xa1, 0xf5, 0x75, 0x28, 0x93, 0x4a, 0xe4, 0x19, 0x61, 0x2a, 0x51, 0x94, 0xb3, 0x70, - 0x85, 0xec, 0xd8, 0x9f, 0x4b, 0x4a, 0x58, 0x27, 0x7d, 0xf2, 0x8f, 0x19, 0x6f, 0x9a, 0xed, 0xf1, - 0xdf, 0x00, 0x00, 0x00, 0xff, 0xff, 0x94, 0xe5, 0x77, 0x76, 0x8e, 0x04, 0x00, 0x00, -} diff --git a/vendor/github.com/census-instrumentation/opencensus-proto/gen-go/agent/trace/v1/trace_service.pb.go b/vendor/github.com/census-instrumentation/opencensus-proto/gen-go/agent/trace/v1/trace_service.pb.go deleted file mode 100644 index 01eddf4cc1..0000000000 --- a/vendor/github.com/census-instrumentation/opencensus-proto/gen-go/agent/trace/v1/trace_service.pb.go +++ /dev/null @@ -1,443 +0,0 @@ -// Code generated by protoc-gen-go. DO NOT EDIT. -// source: opencensus/proto/agent/trace/v1/trace_service.proto - -package v1 - -import ( - fmt "fmt" - v1 "github.com/census-instrumentation/opencensus-proto/gen-go/agent/common/v1" - v12 "github.com/census-instrumentation/opencensus-proto/gen-go/resource/v1" - v11 "github.com/census-instrumentation/opencensus-proto/gen-go/trace/v1" - proto "github.com/golang/protobuf/proto" - context "golang.org/x/net/context" - grpc "google.golang.org/grpc" - math "math" -) - -// Reference imports to suppress errors if they are not otherwise used. -var _ = proto.Marshal -var _ = fmt.Errorf -var _ = math.Inf - -// This is a compile-time assertion to ensure that this generated file -// is compatible with the proto package it is being compiled against. -// A compilation error at this line likely means your copy of the -// proto package needs to be updated. -const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package - -type CurrentLibraryConfig struct { - // This is required only in the first message on the stream or if the - // previous sent CurrentLibraryConfig message has a different Node (e.g. - // when the same RPC is used to configure multiple Applications). - Node *v1.Node `protobuf:"bytes,1,opt,name=node,proto3" json:"node,omitempty"` - // Current configuration. - Config *v11.TraceConfig `protobuf:"bytes,2,opt,name=config,proto3" json:"config,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *CurrentLibraryConfig) Reset() { *m = CurrentLibraryConfig{} } -func (m *CurrentLibraryConfig) String() string { return proto.CompactTextString(m) } -func (*CurrentLibraryConfig) ProtoMessage() {} -func (*CurrentLibraryConfig) Descriptor() ([]byte, []int) { - return fileDescriptor_7027f99caf7ac6a5, []int{0} -} - -func (m *CurrentLibraryConfig) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_CurrentLibraryConfig.Unmarshal(m, b) -} -func (m *CurrentLibraryConfig) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_CurrentLibraryConfig.Marshal(b, m, deterministic) -} -func (m *CurrentLibraryConfig) XXX_Merge(src proto.Message) { - xxx_messageInfo_CurrentLibraryConfig.Merge(m, src) -} -func (m *CurrentLibraryConfig) XXX_Size() int { - return xxx_messageInfo_CurrentLibraryConfig.Size(m) -} -func (m *CurrentLibraryConfig) XXX_DiscardUnknown() { - xxx_messageInfo_CurrentLibraryConfig.DiscardUnknown(m) -} - -var xxx_messageInfo_CurrentLibraryConfig proto.InternalMessageInfo - -func (m *CurrentLibraryConfig) GetNode() *v1.Node { - if m != nil { - return m.Node - } - return nil -} - -func (m *CurrentLibraryConfig) GetConfig() *v11.TraceConfig { - if m != nil { - return m.Config - } - return nil -} - -type UpdatedLibraryConfig struct { - // This field is ignored when the RPC is used to configure only one Application. - // This is required only in the first message on the stream or if the - // previous sent UpdatedLibraryConfig message has a different Node. - Node *v1.Node `protobuf:"bytes,1,opt,name=node,proto3" json:"node,omitempty"` - // Requested updated configuration. - Config *v11.TraceConfig `protobuf:"bytes,2,opt,name=config,proto3" json:"config,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *UpdatedLibraryConfig) Reset() { *m = UpdatedLibraryConfig{} } -func (m *UpdatedLibraryConfig) String() string { return proto.CompactTextString(m) } -func (*UpdatedLibraryConfig) ProtoMessage() {} -func (*UpdatedLibraryConfig) Descriptor() ([]byte, []int) { - return fileDescriptor_7027f99caf7ac6a5, []int{1} -} - -func (m *UpdatedLibraryConfig) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_UpdatedLibraryConfig.Unmarshal(m, b) -} -func (m *UpdatedLibraryConfig) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_UpdatedLibraryConfig.Marshal(b, m, deterministic) -} -func (m *UpdatedLibraryConfig) XXX_Merge(src proto.Message) { - xxx_messageInfo_UpdatedLibraryConfig.Merge(m, src) -} -func (m *UpdatedLibraryConfig) XXX_Size() int { - return xxx_messageInfo_UpdatedLibraryConfig.Size(m) -} -func (m *UpdatedLibraryConfig) XXX_DiscardUnknown() { - xxx_messageInfo_UpdatedLibraryConfig.DiscardUnknown(m) -} - -var xxx_messageInfo_UpdatedLibraryConfig proto.InternalMessageInfo - -func (m *UpdatedLibraryConfig) GetNode() *v1.Node { - if m != nil { - return m.Node - } - return nil -} - -func (m *UpdatedLibraryConfig) GetConfig() *v11.TraceConfig { - if m != nil { - return m.Config - } - return nil -} - -type ExportTraceServiceRequest struct { - // This is required only in the first message on the stream or if the - // previous sent ExportTraceServiceRequest message has a different Node (e.g. - // when the same RPC is used to send Spans from multiple Applications). - Node *v1.Node `protobuf:"bytes,1,opt,name=node,proto3" json:"node,omitempty"` - // A list of Spans that belong to the last received Node. - Spans []*v11.Span `protobuf:"bytes,2,rep,name=spans,proto3" json:"spans,omitempty"` - // The resource for the spans in this message that do not have an explicit - // resource set. - // If unset, the most recently set resource in the RPC stream applies. It is - // valid to never be set within a stream, e.g. when no resource info is known. - Resource *v12.Resource `protobuf:"bytes,3,opt,name=resource,proto3" json:"resource,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *ExportTraceServiceRequest) Reset() { *m = ExportTraceServiceRequest{} } -func (m *ExportTraceServiceRequest) String() string { return proto.CompactTextString(m) } -func (*ExportTraceServiceRequest) ProtoMessage() {} -func (*ExportTraceServiceRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_7027f99caf7ac6a5, []int{2} -} - -func (m *ExportTraceServiceRequest) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_ExportTraceServiceRequest.Unmarshal(m, b) -} -func (m *ExportTraceServiceRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_ExportTraceServiceRequest.Marshal(b, m, deterministic) -} -func (m *ExportTraceServiceRequest) XXX_Merge(src proto.Message) { - xxx_messageInfo_ExportTraceServiceRequest.Merge(m, src) -} -func (m *ExportTraceServiceRequest) XXX_Size() int { - return xxx_messageInfo_ExportTraceServiceRequest.Size(m) -} -func (m *ExportTraceServiceRequest) XXX_DiscardUnknown() { - xxx_messageInfo_ExportTraceServiceRequest.DiscardUnknown(m) -} - -var xxx_messageInfo_ExportTraceServiceRequest proto.InternalMessageInfo - -func (m *ExportTraceServiceRequest) GetNode() *v1.Node { - if m != nil { - return m.Node - } - return nil -} - -func (m *ExportTraceServiceRequest) GetSpans() []*v11.Span { - if m != nil { - return m.Spans - } - return nil -} - -func (m *ExportTraceServiceRequest) GetResource() *v12.Resource { - if m != nil { - return m.Resource - } - return nil -} - -type ExportTraceServiceResponse struct { - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *ExportTraceServiceResponse) Reset() { *m = ExportTraceServiceResponse{} } -func (m *ExportTraceServiceResponse) String() string { return proto.CompactTextString(m) } -func (*ExportTraceServiceResponse) ProtoMessage() {} -func (*ExportTraceServiceResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_7027f99caf7ac6a5, []int{3} -} - -func (m *ExportTraceServiceResponse) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_ExportTraceServiceResponse.Unmarshal(m, b) -} -func (m *ExportTraceServiceResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_ExportTraceServiceResponse.Marshal(b, m, deterministic) -} -func (m *ExportTraceServiceResponse) XXX_Merge(src proto.Message) { - xxx_messageInfo_ExportTraceServiceResponse.Merge(m, src) -} -func (m *ExportTraceServiceResponse) XXX_Size() int { - return xxx_messageInfo_ExportTraceServiceResponse.Size(m) -} -func (m *ExportTraceServiceResponse) XXX_DiscardUnknown() { - xxx_messageInfo_ExportTraceServiceResponse.DiscardUnknown(m) -} - -var xxx_messageInfo_ExportTraceServiceResponse proto.InternalMessageInfo - -func init() { - proto.RegisterType((*CurrentLibraryConfig)(nil), "opencensus.proto.agent.trace.v1.CurrentLibraryConfig") - proto.RegisterType((*UpdatedLibraryConfig)(nil), "opencensus.proto.agent.trace.v1.UpdatedLibraryConfig") - proto.RegisterType((*ExportTraceServiceRequest)(nil), "opencensus.proto.agent.trace.v1.ExportTraceServiceRequest") - proto.RegisterType((*ExportTraceServiceResponse)(nil), "opencensus.proto.agent.trace.v1.ExportTraceServiceResponse") -} - -func init() { - proto.RegisterFile("opencensus/proto/agent/trace/v1/trace_service.proto", fileDescriptor_7027f99caf7ac6a5) -} - -var fileDescriptor_7027f99caf7ac6a5 = []byte{ - // 423 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xcc, 0x54, 0xbf, 0x6b, 0xdb, 0x40, - 0x14, 0xee, 0xd9, 0xad, 0x28, 0xe7, 0x2e, 0x15, 0x1d, 0x54, 0x51, 0xb0, 0x11, 0xb4, 0x18, 0x5a, - 0x9d, 0x2a, 0x1b, 0x2f, 0x2e, 0x74, 0xb0, 0x29, 0x74, 0x28, 0xc5, 0xc8, 0xed, 0x92, 0xc5, 0xc8, - 0xd2, 0x8b, 0xa2, 0xc1, 0x77, 0xca, 0xdd, 0x49, 0x24, 0x90, 0x2d, 0x43, 0xf6, 0x0c, 0xf9, 0xc3, - 0xf2, 0x17, 0x05, 0xdd, 0xc9, 0x3f, 0x12, 0x5b, 0x11, 0x24, 0x4b, 0xb6, 0x87, 0xde, 0xf7, 0x7d, - 0xf7, 0xbd, 0x7b, 0xdf, 0x09, 0x0f, 0x59, 0x06, 0x34, 0x02, 0x2a, 0x72, 0xe1, 0x65, 0x9c, 0x49, - 0xe6, 0x85, 0x09, 0x50, 0xe9, 0x49, 0x1e, 0x46, 0xe0, 0x15, 0xbe, 0x2e, 0x16, 0x02, 0x78, 0x91, - 0x46, 0x40, 0x14, 0xc4, 0xec, 0x6e, 0x49, 0xfa, 0x0b, 0x51, 0x24, 0xa2, 0xb0, 0xa4, 0xf0, 0x6d, - 0xb7, 0x46, 0x35, 0x62, 0xab, 0x15, 0xa3, 0xa5, 0xac, 0xae, 0x34, 0xdb, 0xfe, 0xba, 0x07, 0xe7, - 0x20, 0x58, 0xce, 0xb5, 0x83, 0x75, 0x5d, 0x81, 0x3f, 0xef, 0x81, 0xef, 0x7b, 0xad, 0x60, 0xdf, - 0x1a, 0x60, 0x8b, 0x88, 0xd1, 0xe3, 0x34, 0xd1, 0x68, 0xe7, 0x1a, 0xe1, 0x0f, 0xd3, 0x9c, 0x73, - 0xa0, 0xf2, 0x4f, 0xba, 0xe4, 0x21, 0x3f, 0x9f, 0xaa, 0xb6, 0x39, 0xc6, 0xaf, 0x29, 0x8b, 0xc1, - 0x42, 0x3d, 0xd4, 0xef, 0x0c, 0xbe, 0x90, 0x9a, 0xc9, 0xab, 0x71, 0x0a, 0x9f, 0xfc, 0x65, 0x31, - 0x04, 0x8a, 0x63, 0xfe, 0xc4, 0x86, 0x3e, 0xc4, 0x6a, 0xd5, 0xb1, 0xd7, 0x37, 0x46, 0xfe, 0x95, - 0x85, 0x3e, 0x33, 0xa8, 0x58, 0xca, 0xd4, 0xff, 0x2c, 0x0e, 0x25, 0xc4, 0x2f, 0xc7, 0xd4, 0x2d, - 0xc2, 0x1f, 0x7f, 0x9d, 0x65, 0x8c, 0x4b, 0xd5, 0x9d, 0xeb, 0x60, 0x04, 0x70, 0x9a, 0x83, 0x90, - 0xcf, 0x72, 0x36, 0xc2, 0x6f, 0x44, 0x16, 0x52, 0x61, 0xb5, 0x7a, 0xed, 0x7e, 0x67, 0xd0, 0x7d, - 0xc4, 0xd8, 0x3c, 0x0b, 0x69, 0xa0, 0xd1, 0xe6, 0x04, 0xbf, 0x5d, 0x27, 0xc4, 0x6a, 0xd7, 0x1d, - 0xbb, 0xc9, 0x50, 0xe1, 0x93, 0xa0, 0xaa, 0x83, 0x0d, 0xcf, 0xf9, 0x84, 0xed, 0x43, 0x33, 0x89, - 0x8c, 0x51, 0x01, 0x83, 0x9b, 0x16, 0x7e, 0xb7, 0xdb, 0x30, 0x2f, 0xb0, 0x51, 0x6d, 0x62, 0x44, - 0x1a, 0x9e, 0x02, 0x39, 0x94, 0x2a, 0xbb, 0x99, 0x76, 0x68, 0xef, 0xce, 0xab, 0x3e, 0xfa, 0x8e, - 0xcc, 0x2b, 0x84, 0x0d, 0xed, 0xd6, 0x1c, 0x37, 0xea, 0xd4, 0xae, 0xca, 0xfe, 0xf1, 0x24, 0xae, - 0xbe, 0x12, 0xed, 0x64, 0x72, 0x89, 0xb0, 0x93, 0xb2, 0x26, 0x9d, 0xc9, 0xfb, 0x5d, 0x89, 0x59, - 0x89, 0x98, 0xa1, 0xa3, 0xdf, 0x49, 0x2a, 0x4f, 0xf2, 0x65, 0x19, 0x05, 0x4f, 0x93, 0xdd, 0x94, - 0x0a, 0xc9, 0xf3, 0x15, 0x50, 0x19, 0xca, 0x94, 0x51, 0x6f, 0xab, 0xeb, 0xea, 0x17, 0x9c, 0x00, - 0x75, 0x93, 0x87, 0x7f, 0xa8, 0xa5, 0xa1, 0x9a, 0xc3, 0xbb, 0x00, 0x00, 0x00, 0xff, 0xff, 0xcf, - 0x9c, 0x9b, 0xf7, 0xcb, 0x04, 0x00, 0x00, -} - -// Reference imports to suppress errors if they are not otherwise used. -var _ context.Context -var _ grpc.ClientConn - -// This is a compile-time assertion to ensure that this generated file -// is compatible with the grpc package it is being compiled against. -const _ = grpc.SupportPackageIsVersion4 - -// TraceServiceClient is the client API for TraceService service. -// -// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://godoc.org/google.golang.org/grpc#ClientConn.NewStream. -type TraceServiceClient interface { - // After initialization, this RPC must be kept alive for the entire life of - // the application. The agent pushes configs down to applications via a - // stream. - Config(ctx context.Context, opts ...grpc.CallOption) (TraceService_ConfigClient, error) - // For performance reasons, it is recommended to keep this RPC - // alive for the entire life of the application. - Export(ctx context.Context, opts ...grpc.CallOption) (TraceService_ExportClient, error) -} - -type traceServiceClient struct { - cc *grpc.ClientConn -} - -func NewTraceServiceClient(cc *grpc.ClientConn) TraceServiceClient { - return &traceServiceClient{cc} -} - -func (c *traceServiceClient) Config(ctx context.Context, opts ...grpc.CallOption) (TraceService_ConfigClient, error) { - stream, err := c.cc.NewStream(ctx, &_TraceService_serviceDesc.Streams[0], "/opencensus.proto.agent.trace.v1.TraceService/Config", opts...) - if err != nil { - return nil, err - } - x := &traceServiceConfigClient{stream} - return x, nil -} - -type TraceService_ConfigClient interface { - Send(*CurrentLibraryConfig) error - Recv() (*UpdatedLibraryConfig, error) - grpc.ClientStream -} - -type traceServiceConfigClient struct { - grpc.ClientStream -} - -func (x *traceServiceConfigClient) Send(m *CurrentLibraryConfig) error { - return x.ClientStream.SendMsg(m) -} - -func (x *traceServiceConfigClient) Recv() (*UpdatedLibraryConfig, error) { - m := new(UpdatedLibraryConfig) - if err := x.ClientStream.RecvMsg(m); err != nil { - return nil, err - } - return m, nil -} - -func (c *traceServiceClient) Export(ctx context.Context, opts ...grpc.CallOption) (TraceService_ExportClient, error) { - stream, err := c.cc.NewStream(ctx, &_TraceService_serviceDesc.Streams[1], "/opencensus.proto.agent.trace.v1.TraceService/Export", opts...) - if err != nil { - return nil, err - } - x := &traceServiceExportClient{stream} - return x, nil -} - -type TraceService_ExportClient interface { - Send(*ExportTraceServiceRequest) error - Recv() (*ExportTraceServiceResponse, error) - grpc.ClientStream -} - -type traceServiceExportClient struct { - grpc.ClientStream -} - -func (x *traceServiceExportClient) Send(m *ExportTraceServiceRequest) error { - return x.ClientStream.SendMsg(m) -} - -func (x *traceServiceExportClient) Recv() (*ExportTraceServiceResponse, error) { - m := new(ExportTraceServiceResponse) - if err := x.ClientStream.RecvMsg(m); err != nil { - return nil, err - } - return m, nil -} - -// TraceServiceServer is the server API for TraceService service. -type TraceServiceServer interface { - // After initialization, this RPC must be kept alive for the entire life of - // the application. The agent pushes configs down to applications via a - // stream. - Config(TraceService_ConfigServer) error - // For performance reasons, it is recommended to keep this RPC - // alive for the entire life of the application. - Export(TraceService_ExportServer) error -} - -func RegisterTraceServiceServer(s *grpc.Server, srv TraceServiceServer) { - s.RegisterService(&_TraceService_serviceDesc, srv) -} - -func _TraceService_Config_Handler(srv interface{}, stream grpc.ServerStream) error { - return srv.(TraceServiceServer).Config(&traceServiceConfigServer{stream}) -} - -type TraceService_ConfigServer interface { - Send(*UpdatedLibraryConfig) error - Recv() (*CurrentLibraryConfig, error) - grpc.ServerStream -} - -type traceServiceConfigServer struct { - grpc.ServerStream -} - -func (x *traceServiceConfigServer) Send(m *UpdatedLibraryConfig) error { - return x.ServerStream.SendMsg(m) -} - -func (x *traceServiceConfigServer) Recv() (*CurrentLibraryConfig, error) { - m := new(CurrentLibraryConfig) - if err := x.ServerStream.RecvMsg(m); err != nil { - return nil, err - } - return m, nil -} - -func _TraceService_Export_Handler(srv interface{}, stream grpc.ServerStream) error { - return srv.(TraceServiceServer).Export(&traceServiceExportServer{stream}) -} - -type TraceService_ExportServer interface { - Send(*ExportTraceServiceResponse) error - Recv() (*ExportTraceServiceRequest, error) - grpc.ServerStream -} - -type traceServiceExportServer struct { - grpc.ServerStream -} - -func (x *traceServiceExportServer) Send(m *ExportTraceServiceResponse) error { - return x.ServerStream.SendMsg(m) -} - -func (x *traceServiceExportServer) Recv() (*ExportTraceServiceRequest, error) { - m := new(ExportTraceServiceRequest) - if err := x.ServerStream.RecvMsg(m); err != nil { - return nil, err - } - return m, nil -} - -var _TraceService_serviceDesc = grpc.ServiceDesc{ - ServiceName: "opencensus.proto.agent.trace.v1.TraceService", - HandlerType: (*TraceServiceServer)(nil), - Methods: []grpc.MethodDesc{}, - Streams: []grpc.StreamDesc{ - { - StreamName: "Config", - Handler: _TraceService_Config_Handler, - ServerStreams: true, - ClientStreams: true, - }, - { - StreamName: "Export", - Handler: _TraceService_Export_Handler, - ServerStreams: true, - ClientStreams: true, - }, - }, - Metadata: "opencensus/proto/agent/trace/v1/trace_service.proto", -} diff --git a/vendor/github.com/census-instrumentation/opencensus-proto/gen-go/resource/v1/resource.pb.go b/vendor/github.com/census-instrumentation/opencensus-proto/gen-go/resource/v1/resource.pb.go deleted file mode 100644 index 560dbd94a0..0000000000 --- a/vendor/github.com/census-instrumentation/opencensus-proto/gen-go/resource/v1/resource.pb.go +++ /dev/null @@ -1,99 +0,0 @@ -// Code generated by protoc-gen-go. DO NOT EDIT. -// source: opencensus/proto/resource/v1/resource.proto - -package v1 - -import ( - fmt "fmt" - proto "github.com/golang/protobuf/proto" - math "math" -) - -// Reference imports to suppress errors if they are not otherwise used. -var _ = proto.Marshal -var _ = fmt.Errorf -var _ = math.Inf - -// This is a compile-time assertion to ensure that this generated file -// is compatible with the proto package it is being compiled against. -// A compilation error at this line likely means your copy of the -// proto package needs to be updated. -const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package - -// Resource information. -type Resource struct { - // Type identifier for the resource. - Type string `protobuf:"bytes,1,opt,name=type,proto3" json:"type,omitempty"` - // Set of labels that describe the resource. - Labels map[string]string `protobuf:"bytes,2,rep,name=labels,proto3" json:"labels,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *Resource) Reset() { *m = Resource{} } -func (m *Resource) String() string { return proto.CompactTextString(m) } -func (*Resource) ProtoMessage() {} -func (*Resource) Descriptor() ([]byte, []int) { - return fileDescriptor_584700775a2fc762, []int{0} -} - -func (m *Resource) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_Resource.Unmarshal(m, b) -} -func (m *Resource) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_Resource.Marshal(b, m, deterministic) -} -func (m *Resource) XXX_Merge(src proto.Message) { - xxx_messageInfo_Resource.Merge(m, src) -} -func (m *Resource) XXX_Size() int { - return xxx_messageInfo_Resource.Size(m) -} -func (m *Resource) XXX_DiscardUnknown() { - xxx_messageInfo_Resource.DiscardUnknown(m) -} - -var xxx_messageInfo_Resource proto.InternalMessageInfo - -func (m *Resource) GetType() string { - if m != nil { - return m.Type - } - return "" -} - -func (m *Resource) GetLabels() map[string]string { - if m != nil { - return m.Labels - } - return nil -} - -func init() { - proto.RegisterType((*Resource)(nil), "opencensus.proto.resource.v1.Resource") - proto.RegisterMapType((map[string]string)(nil), "opencensus.proto.resource.v1.Resource.LabelsEntry") -} - -func init() { - proto.RegisterFile("opencensus/proto/resource/v1/resource.proto", fileDescriptor_584700775a2fc762) -} - -var fileDescriptor_584700775a2fc762 = []byte{ - // 234 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0xd2, 0xce, 0x2f, 0x48, 0xcd, - 0x4b, 0x4e, 0xcd, 0x2b, 0x2e, 0x2d, 0xd6, 0x2f, 0x28, 0xca, 0x2f, 0xc9, 0xd7, 0x2f, 0x4a, 0x2d, - 0xce, 0x2f, 0x2d, 0x4a, 0x4e, 0xd5, 0x2f, 0x33, 0x84, 0xb3, 0xf5, 0xc0, 0x52, 0x42, 0x32, 0x08, - 0xc5, 0x10, 0x11, 0x3d, 0xb8, 0x82, 0x32, 0x43, 0xa5, 0xa5, 0x8c, 0x5c, 0x1c, 0x41, 0x50, 0xbe, - 0x90, 0x10, 0x17, 0x4b, 0x49, 0x65, 0x41, 0xaa, 0x04, 0xa3, 0x02, 0xa3, 0x06, 0x67, 0x10, 0x98, - 0x2d, 0xe4, 0xc5, 0xc5, 0x96, 0x93, 0x98, 0x94, 0x9a, 0x53, 0x2c, 0xc1, 0xa4, 0xc0, 0xac, 0xc1, - 0x6d, 0x64, 0xa4, 0x87, 0xcf, 0x3c, 0x3d, 0x98, 0x59, 0x7a, 0x3e, 0x60, 0x4d, 0xae, 0x79, 0x25, - 0x45, 0x95, 0x41, 0x50, 0x13, 0xa4, 0x2c, 0xb9, 0xb8, 0x91, 0x84, 0x85, 0x04, 0xb8, 0x98, 0xb3, - 0x53, 0x2b, 0xa1, 0xb6, 0x81, 0x98, 0x42, 0x22, 0x5c, 0xac, 0x65, 0x89, 0x39, 0xa5, 0xa9, 0x12, - 0x4c, 0x60, 0x31, 0x08, 0xc7, 0x8a, 0xc9, 0x82, 0xd1, 0xa9, 0x92, 0x4b, 0x3e, 0x33, 0x1f, 0xaf, - 0xd5, 0x4e, 0xbc, 0x30, 0xbb, 0x03, 0x40, 0x52, 0x01, 0x8c, 0x51, 0xae, 0xe9, 0x99, 0x25, 0x19, - 0xa5, 0x49, 0x7a, 0xc9, 0xf9, 0xb9, 0xfa, 0x10, 0x5d, 0xba, 0x99, 0x79, 0xc5, 0x25, 0x45, 0xa5, - 0xb9, 0xa9, 0x79, 0x25, 0x89, 0x25, 0x99, 0xf9, 0x79, 0xfa, 0x08, 0x03, 0x75, 0x21, 0x01, 0x99, - 0x9e, 0x9a, 0xa7, 0x9b, 0x8e, 0x12, 0x9e, 0x49, 0x6c, 0x60, 0x19, 0x63, 0x40, 0x00, 0x00, 0x00, - 0xff, 0xff, 0x8e, 0x11, 0xaf, 0xda, 0x76, 0x01, 0x00, 0x00, -} diff --git a/vendor/github.com/census-instrumentation/opencensus-proto/gen-go/trace/v1/trace.pb.go b/vendor/github.com/census-instrumentation/opencensus-proto/gen-go/trace/v1/trace.pb.go deleted file mode 100644 index c34e388922..0000000000 --- a/vendor/github.com/census-instrumentation/opencensus-proto/gen-go/trace/v1/trace.pb.go +++ /dev/null @@ -1,1654 +0,0 @@ -// Code generated by protoc-gen-go. DO NOT EDIT. -// source: opencensus/proto/trace/v1/trace.proto - -package v1 - -import ( - fmt "fmt" - proto "github.com/golang/protobuf/proto" - timestamp "github.com/golang/protobuf/ptypes/timestamp" - wrappers "github.com/golang/protobuf/ptypes/wrappers" - math "math" -) - -// Reference imports to suppress errors if they are not otherwise used. -var _ = proto.Marshal -var _ = fmt.Errorf -var _ = math.Inf - -// This is a compile-time assertion to ensure that this generated file -// is compatible with the proto package it is being compiled against. -// A compilation error at this line likely means your copy of the -// proto package needs to be updated. -const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package - -// Type of span. Can be used to specify additional relationships between spans -// in addition to a parent/child relationship. -type Span_SpanKind int32 - -const ( - // Unspecified. - Span_SPAN_KIND_UNSPECIFIED Span_SpanKind = 0 - // Indicates that the span covers server-side handling of an RPC or other - // remote network request. - Span_SERVER Span_SpanKind = 1 - // Indicates that the span covers the client-side wrapper around an RPC or - // other remote request. - Span_CLIENT Span_SpanKind = 2 -) - -var Span_SpanKind_name = map[int32]string{ - 0: "SPAN_KIND_UNSPECIFIED", - 1: "SERVER", - 2: "CLIENT", -} - -var Span_SpanKind_value = map[string]int32{ - "SPAN_KIND_UNSPECIFIED": 0, - "SERVER": 1, - "CLIENT": 2, -} - -func (x Span_SpanKind) String() string { - return proto.EnumName(Span_SpanKind_name, int32(x)) -} - -func (Span_SpanKind) EnumDescriptor() ([]byte, []int) { - return fileDescriptor_8ea38bbb821bf584, []int{0, 0} -} - -// Indicates whether the message was sent or received. -type Span_TimeEvent_MessageEvent_Type int32 - -const ( - // Unknown event type. - Span_TimeEvent_MessageEvent_TYPE_UNSPECIFIED Span_TimeEvent_MessageEvent_Type = 0 - // Indicates a sent message. - Span_TimeEvent_MessageEvent_SENT Span_TimeEvent_MessageEvent_Type = 1 - // Indicates a received message. - Span_TimeEvent_MessageEvent_RECEIVED Span_TimeEvent_MessageEvent_Type = 2 -) - -var Span_TimeEvent_MessageEvent_Type_name = map[int32]string{ - 0: "TYPE_UNSPECIFIED", - 1: "SENT", - 2: "RECEIVED", -} - -var Span_TimeEvent_MessageEvent_Type_value = map[string]int32{ - "TYPE_UNSPECIFIED": 0, - "SENT": 1, - "RECEIVED": 2, -} - -func (x Span_TimeEvent_MessageEvent_Type) String() string { - return proto.EnumName(Span_TimeEvent_MessageEvent_Type_name, int32(x)) -} - -func (Span_TimeEvent_MessageEvent_Type) EnumDescriptor() ([]byte, []int) { - return fileDescriptor_8ea38bbb821bf584, []int{0, 2, 1, 0} -} - -// The relationship of the current span relative to the linked span: child, -// parent, or unspecified. -type Span_Link_Type int32 - -const ( - // The relationship of the two spans is unknown, or known but other - // than parent-child. - Span_Link_TYPE_UNSPECIFIED Span_Link_Type = 0 - // The linked span is a child of the current span. - Span_Link_CHILD_LINKED_SPAN Span_Link_Type = 1 - // The linked span is a parent of the current span. - Span_Link_PARENT_LINKED_SPAN Span_Link_Type = 2 -) - -var Span_Link_Type_name = map[int32]string{ - 0: "TYPE_UNSPECIFIED", - 1: "CHILD_LINKED_SPAN", - 2: "PARENT_LINKED_SPAN", -} - -var Span_Link_Type_value = map[string]int32{ - "TYPE_UNSPECIFIED": 0, - "CHILD_LINKED_SPAN": 1, - "PARENT_LINKED_SPAN": 2, -} - -func (x Span_Link_Type) String() string { - return proto.EnumName(Span_Link_Type_name, int32(x)) -} - -func (Span_Link_Type) EnumDescriptor() ([]byte, []int) { - return fileDescriptor_8ea38bbb821bf584, []int{0, 4, 0} -} - -// A span represents a single operation within a trace. Spans can be -// nested to form a trace tree. Often, a trace contains a root span -// that describes the end-to-end latency, and one or more subspans for -// its sub-operations. A trace can also contain multiple root spans, -// or none at all. Spans do not need to be contiguous - there may be -// gaps or overlaps between spans in a trace. -// -// The next id is 16. -// TODO(bdrutu): Add an example. -type Span struct { - // A unique identifier for a trace. All spans from the same trace share - // the same `trace_id`. The ID is a 16-byte array. - // - // This field is required. - TraceId []byte `protobuf:"bytes,1,opt,name=trace_id,json=traceId,proto3" json:"trace_id,omitempty"` - // A unique identifier for a span within a trace, assigned when the span - // is created. The ID is an 8-byte array. - // - // This field is required. - SpanId []byte `protobuf:"bytes,2,opt,name=span_id,json=spanId,proto3" json:"span_id,omitempty"` - // The Tracestate on the span. - Tracestate *Span_Tracestate `protobuf:"bytes,15,opt,name=tracestate,proto3" json:"tracestate,omitempty"` - // The `span_id` of this span's parent span. If this is a root span, then this - // field must be empty. The ID is an 8-byte array. - ParentSpanId []byte `protobuf:"bytes,3,opt,name=parent_span_id,json=parentSpanId,proto3" json:"parent_span_id,omitempty"` - // A description of the span's operation. - // - // For example, the name can be a qualified method name or a file name - // and a line number where the operation is called. A best practice is to use - // the same display name at the same call point in an application. - // This makes it easier to correlate spans in different traces. - // - // This field is required. - Name *TruncatableString `protobuf:"bytes,4,opt,name=name,proto3" json:"name,omitempty"` - // Distinguishes between spans generated in a particular context. For example, - // two spans with the same name may be distinguished using `CLIENT` - // and `SERVER` to identify queueing latency associated with the span. - Kind Span_SpanKind `protobuf:"varint,14,opt,name=kind,proto3,enum=opencensus.proto.trace.v1.Span_SpanKind" json:"kind,omitempty"` - // The start time of the span. On the client side, this is the time kept by - // the local machine where the span execution starts. On the server side, this - // is the time when the server's application handler starts running. - StartTime *timestamp.Timestamp `protobuf:"bytes,5,opt,name=start_time,json=startTime,proto3" json:"start_time,omitempty"` - // The end time of the span. On the client side, this is the time kept by - // the local machine where the span execution ends. On the server side, this - // is the time when the server application handler stops running. - EndTime *timestamp.Timestamp `protobuf:"bytes,6,opt,name=end_time,json=endTime,proto3" json:"end_time,omitempty"` - // A set of attributes on the span. - Attributes *Span_Attributes `protobuf:"bytes,7,opt,name=attributes,proto3" json:"attributes,omitempty"` - // A stack trace captured at the start of the span. - StackTrace *StackTrace `protobuf:"bytes,8,opt,name=stack_trace,json=stackTrace,proto3" json:"stack_trace,omitempty"` - // The included time events. - TimeEvents *Span_TimeEvents `protobuf:"bytes,9,opt,name=time_events,json=timeEvents,proto3" json:"time_events,omitempty"` - // The included links. - Links *Span_Links `protobuf:"bytes,10,opt,name=links,proto3" json:"links,omitempty"` - // An optional final status for this span. - Status *Status `protobuf:"bytes,11,opt,name=status,proto3" json:"status,omitempty"` - // A highly recommended but not required flag that identifies when a trace - // crosses a process boundary. True when the parent_span belongs to the - // same process as the current span. - SameProcessAsParentSpan *wrappers.BoolValue `protobuf:"bytes,12,opt,name=same_process_as_parent_span,json=sameProcessAsParentSpan,proto3" json:"same_process_as_parent_span,omitempty"` - // An optional number of child spans that were generated while this span - // was active. If set, allows an implementation to detect missing child spans. - ChildSpanCount *wrappers.UInt32Value `protobuf:"bytes,13,opt,name=child_span_count,json=childSpanCount,proto3" json:"child_span_count,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *Span) Reset() { *m = Span{} } -func (m *Span) String() string { return proto.CompactTextString(m) } -func (*Span) ProtoMessage() {} -func (*Span) Descriptor() ([]byte, []int) { - return fileDescriptor_8ea38bbb821bf584, []int{0} -} - -func (m *Span) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_Span.Unmarshal(m, b) -} -func (m *Span) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_Span.Marshal(b, m, deterministic) -} -func (m *Span) XXX_Merge(src proto.Message) { - xxx_messageInfo_Span.Merge(m, src) -} -func (m *Span) XXX_Size() int { - return xxx_messageInfo_Span.Size(m) -} -func (m *Span) XXX_DiscardUnknown() { - xxx_messageInfo_Span.DiscardUnknown(m) -} - -var xxx_messageInfo_Span proto.InternalMessageInfo - -func (m *Span) GetTraceId() []byte { - if m != nil { - return m.TraceId - } - return nil -} - -func (m *Span) GetSpanId() []byte { - if m != nil { - return m.SpanId - } - return nil -} - -func (m *Span) GetTracestate() *Span_Tracestate { - if m != nil { - return m.Tracestate - } - return nil -} - -func (m *Span) GetParentSpanId() []byte { - if m != nil { - return m.ParentSpanId - } - return nil -} - -func (m *Span) GetName() *TruncatableString { - if m != nil { - return m.Name - } - return nil -} - -func (m *Span) GetKind() Span_SpanKind { - if m != nil { - return m.Kind - } - return Span_SPAN_KIND_UNSPECIFIED -} - -func (m *Span) GetStartTime() *timestamp.Timestamp { - if m != nil { - return m.StartTime - } - return nil -} - -func (m *Span) GetEndTime() *timestamp.Timestamp { - if m != nil { - return m.EndTime - } - return nil -} - -func (m *Span) GetAttributes() *Span_Attributes { - if m != nil { - return m.Attributes - } - return nil -} - -func (m *Span) GetStackTrace() *StackTrace { - if m != nil { - return m.StackTrace - } - return nil -} - -func (m *Span) GetTimeEvents() *Span_TimeEvents { - if m != nil { - return m.TimeEvents - } - return nil -} - -func (m *Span) GetLinks() *Span_Links { - if m != nil { - return m.Links - } - return nil -} - -func (m *Span) GetStatus() *Status { - if m != nil { - return m.Status - } - return nil -} - -func (m *Span) GetSameProcessAsParentSpan() *wrappers.BoolValue { - if m != nil { - return m.SameProcessAsParentSpan - } - return nil -} - -func (m *Span) GetChildSpanCount() *wrappers.UInt32Value { - if m != nil { - return m.ChildSpanCount - } - return nil -} - -// This field conveys information about request position in multiple distributed tracing graphs. -// It is a list of Tracestate.Entry with a maximum of 32 members in the list. -// -// See the https://github.com/w3c/distributed-tracing for more details about this field. -type Span_Tracestate struct { - // A list of entries that represent the Tracestate. - Entries []*Span_Tracestate_Entry `protobuf:"bytes,1,rep,name=entries,proto3" json:"entries,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *Span_Tracestate) Reset() { *m = Span_Tracestate{} } -func (m *Span_Tracestate) String() string { return proto.CompactTextString(m) } -func (*Span_Tracestate) ProtoMessage() {} -func (*Span_Tracestate) Descriptor() ([]byte, []int) { - return fileDescriptor_8ea38bbb821bf584, []int{0, 0} -} - -func (m *Span_Tracestate) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_Span_Tracestate.Unmarshal(m, b) -} -func (m *Span_Tracestate) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_Span_Tracestate.Marshal(b, m, deterministic) -} -func (m *Span_Tracestate) XXX_Merge(src proto.Message) { - xxx_messageInfo_Span_Tracestate.Merge(m, src) -} -func (m *Span_Tracestate) XXX_Size() int { - return xxx_messageInfo_Span_Tracestate.Size(m) -} -func (m *Span_Tracestate) XXX_DiscardUnknown() { - xxx_messageInfo_Span_Tracestate.DiscardUnknown(m) -} - -var xxx_messageInfo_Span_Tracestate proto.InternalMessageInfo - -func (m *Span_Tracestate) GetEntries() []*Span_Tracestate_Entry { - if m != nil { - return m.Entries - } - return nil -} - -type Span_Tracestate_Entry struct { - // The key must begin with a lowercase letter, and can only contain - // lowercase letters 'a'-'z', digits '0'-'9', underscores '_', dashes - // '-', asterisks '*', and forward slashes '/'. - Key string `protobuf:"bytes,1,opt,name=key,proto3" json:"key,omitempty"` - // The value is opaque string up to 256 characters printable ASCII - // RFC0020 characters (i.e., the range 0x20 to 0x7E) except ',' and '='. - // Note that this also excludes tabs, newlines, carriage returns, etc. - Value string `protobuf:"bytes,2,opt,name=value,proto3" json:"value,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *Span_Tracestate_Entry) Reset() { *m = Span_Tracestate_Entry{} } -func (m *Span_Tracestate_Entry) String() string { return proto.CompactTextString(m) } -func (*Span_Tracestate_Entry) ProtoMessage() {} -func (*Span_Tracestate_Entry) Descriptor() ([]byte, []int) { - return fileDescriptor_8ea38bbb821bf584, []int{0, 0, 0} -} - -func (m *Span_Tracestate_Entry) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_Span_Tracestate_Entry.Unmarshal(m, b) -} -func (m *Span_Tracestate_Entry) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_Span_Tracestate_Entry.Marshal(b, m, deterministic) -} -func (m *Span_Tracestate_Entry) XXX_Merge(src proto.Message) { - xxx_messageInfo_Span_Tracestate_Entry.Merge(m, src) -} -func (m *Span_Tracestate_Entry) XXX_Size() int { - return xxx_messageInfo_Span_Tracestate_Entry.Size(m) -} -func (m *Span_Tracestate_Entry) XXX_DiscardUnknown() { - xxx_messageInfo_Span_Tracestate_Entry.DiscardUnknown(m) -} - -var xxx_messageInfo_Span_Tracestate_Entry proto.InternalMessageInfo - -func (m *Span_Tracestate_Entry) GetKey() string { - if m != nil { - return m.Key - } - return "" -} - -func (m *Span_Tracestate_Entry) GetValue() string { - if m != nil { - return m.Value - } - return "" -} - -// A set of attributes, each with a key and a value. -type Span_Attributes struct { - // The set of attributes. The value can be a string, an integer, or the - // Boolean values `true` and `false`. For example: - // - // "/instance_id": "my-instance" - // "/http/user_agent": "" - // "/http/server_latency": 300 - // "abc.com/myattribute": true - AttributeMap map[string]*AttributeValue `protobuf:"bytes,1,rep,name=attribute_map,json=attributeMap,proto3" json:"attribute_map,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` - // The number of attributes that were discarded. Attributes can be discarded - // because their keys are too long or because there are too many attributes. - // If this value is 0, then no attributes were dropped. - DroppedAttributesCount int32 `protobuf:"varint,2,opt,name=dropped_attributes_count,json=droppedAttributesCount,proto3" json:"dropped_attributes_count,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *Span_Attributes) Reset() { *m = Span_Attributes{} } -func (m *Span_Attributes) String() string { return proto.CompactTextString(m) } -func (*Span_Attributes) ProtoMessage() {} -func (*Span_Attributes) Descriptor() ([]byte, []int) { - return fileDescriptor_8ea38bbb821bf584, []int{0, 1} -} - -func (m *Span_Attributes) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_Span_Attributes.Unmarshal(m, b) -} -func (m *Span_Attributes) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_Span_Attributes.Marshal(b, m, deterministic) -} -func (m *Span_Attributes) XXX_Merge(src proto.Message) { - xxx_messageInfo_Span_Attributes.Merge(m, src) -} -func (m *Span_Attributes) XXX_Size() int { - return xxx_messageInfo_Span_Attributes.Size(m) -} -func (m *Span_Attributes) XXX_DiscardUnknown() { - xxx_messageInfo_Span_Attributes.DiscardUnknown(m) -} - -var xxx_messageInfo_Span_Attributes proto.InternalMessageInfo - -func (m *Span_Attributes) GetAttributeMap() map[string]*AttributeValue { - if m != nil { - return m.AttributeMap - } - return nil -} - -func (m *Span_Attributes) GetDroppedAttributesCount() int32 { - if m != nil { - return m.DroppedAttributesCount - } - return 0 -} - -// A time-stamped annotation or message event in the Span. -type Span_TimeEvent struct { - // The time the event occurred. - Time *timestamp.Timestamp `protobuf:"bytes,1,opt,name=time,proto3" json:"time,omitempty"` - // A `TimeEvent` can contain either an `Annotation` object or a - // `MessageEvent` object, but not both. - // - // Types that are valid to be assigned to Value: - // *Span_TimeEvent_Annotation_ - // *Span_TimeEvent_MessageEvent_ - Value isSpan_TimeEvent_Value `protobuf_oneof:"value"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *Span_TimeEvent) Reset() { *m = Span_TimeEvent{} } -func (m *Span_TimeEvent) String() string { return proto.CompactTextString(m) } -func (*Span_TimeEvent) ProtoMessage() {} -func (*Span_TimeEvent) Descriptor() ([]byte, []int) { - return fileDescriptor_8ea38bbb821bf584, []int{0, 2} -} - -func (m *Span_TimeEvent) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_Span_TimeEvent.Unmarshal(m, b) -} -func (m *Span_TimeEvent) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_Span_TimeEvent.Marshal(b, m, deterministic) -} -func (m *Span_TimeEvent) XXX_Merge(src proto.Message) { - xxx_messageInfo_Span_TimeEvent.Merge(m, src) -} -func (m *Span_TimeEvent) XXX_Size() int { - return xxx_messageInfo_Span_TimeEvent.Size(m) -} -func (m *Span_TimeEvent) XXX_DiscardUnknown() { - xxx_messageInfo_Span_TimeEvent.DiscardUnknown(m) -} - -var xxx_messageInfo_Span_TimeEvent proto.InternalMessageInfo - -func (m *Span_TimeEvent) GetTime() *timestamp.Timestamp { - if m != nil { - return m.Time - } - return nil -} - -type isSpan_TimeEvent_Value interface { - isSpan_TimeEvent_Value() -} - -type Span_TimeEvent_Annotation_ struct { - Annotation *Span_TimeEvent_Annotation `protobuf:"bytes,2,opt,name=annotation,proto3,oneof"` -} - -type Span_TimeEvent_MessageEvent_ struct { - MessageEvent *Span_TimeEvent_MessageEvent `protobuf:"bytes,3,opt,name=message_event,json=messageEvent,proto3,oneof"` -} - -func (*Span_TimeEvent_Annotation_) isSpan_TimeEvent_Value() {} - -func (*Span_TimeEvent_MessageEvent_) isSpan_TimeEvent_Value() {} - -func (m *Span_TimeEvent) GetValue() isSpan_TimeEvent_Value { - if m != nil { - return m.Value - } - return nil -} - -func (m *Span_TimeEvent) GetAnnotation() *Span_TimeEvent_Annotation { - if x, ok := m.GetValue().(*Span_TimeEvent_Annotation_); ok { - return x.Annotation - } - return nil -} - -func (m *Span_TimeEvent) GetMessageEvent() *Span_TimeEvent_MessageEvent { - if x, ok := m.GetValue().(*Span_TimeEvent_MessageEvent_); ok { - return x.MessageEvent - } - return nil -} - -// XXX_OneofFuncs is for the internal use of the proto package. -func (*Span_TimeEvent) XXX_OneofFuncs() (func(msg proto.Message, b *proto.Buffer) error, func(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error), func(msg proto.Message) (n int), []interface{}) { - return _Span_TimeEvent_OneofMarshaler, _Span_TimeEvent_OneofUnmarshaler, _Span_TimeEvent_OneofSizer, []interface{}{ - (*Span_TimeEvent_Annotation_)(nil), - (*Span_TimeEvent_MessageEvent_)(nil), - } -} - -func _Span_TimeEvent_OneofMarshaler(msg proto.Message, b *proto.Buffer) error { - m := msg.(*Span_TimeEvent) - // value - switch x := m.Value.(type) { - case *Span_TimeEvent_Annotation_: - b.EncodeVarint(2<<3 | proto.WireBytes) - if err := b.EncodeMessage(x.Annotation); err != nil { - return err - } - case *Span_TimeEvent_MessageEvent_: - b.EncodeVarint(3<<3 | proto.WireBytes) - if err := b.EncodeMessage(x.MessageEvent); err != nil { - return err - } - case nil: - default: - return fmt.Errorf("Span_TimeEvent.Value has unexpected type %T", x) - } - return nil -} - -func _Span_TimeEvent_OneofUnmarshaler(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error) { - m := msg.(*Span_TimeEvent) - switch tag { - case 2: // value.annotation - if wire != proto.WireBytes { - return true, proto.ErrInternalBadWireType - } - msg := new(Span_TimeEvent_Annotation) - err := b.DecodeMessage(msg) - m.Value = &Span_TimeEvent_Annotation_{msg} - return true, err - case 3: // value.message_event - if wire != proto.WireBytes { - return true, proto.ErrInternalBadWireType - } - msg := new(Span_TimeEvent_MessageEvent) - err := b.DecodeMessage(msg) - m.Value = &Span_TimeEvent_MessageEvent_{msg} - return true, err - default: - return false, nil - } -} - -func _Span_TimeEvent_OneofSizer(msg proto.Message) (n int) { - m := msg.(*Span_TimeEvent) - // value - switch x := m.Value.(type) { - case *Span_TimeEvent_Annotation_: - s := proto.Size(x.Annotation) - n += 1 // tag and wire - n += proto.SizeVarint(uint64(s)) - n += s - case *Span_TimeEvent_MessageEvent_: - s := proto.Size(x.MessageEvent) - n += 1 // tag and wire - n += proto.SizeVarint(uint64(s)) - n += s - case nil: - default: - panic(fmt.Sprintf("proto: unexpected type %T in oneof", x)) - } - return n -} - -// A text annotation with a set of attributes. -type Span_TimeEvent_Annotation struct { - // A user-supplied message describing the event. - Description *TruncatableString `protobuf:"bytes,1,opt,name=description,proto3" json:"description,omitempty"` - // A set of attributes on the annotation. - Attributes *Span_Attributes `protobuf:"bytes,2,opt,name=attributes,proto3" json:"attributes,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *Span_TimeEvent_Annotation) Reset() { *m = Span_TimeEvent_Annotation{} } -func (m *Span_TimeEvent_Annotation) String() string { return proto.CompactTextString(m) } -func (*Span_TimeEvent_Annotation) ProtoMessage() {} -func (*Span_TimeEvent_Annotation) Descriptor() ([]byte, []int) { - return fileDescriptor_8ea38bbb821bf584, []int{0, 2, 0} -} - -func (m *Span_TimeEvent_Annotation) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_Span_TimeEvent_Annotation.Unmarshal(m, b) -} -func (m *Span_TimeEvent_Annotation) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_Span_TimeEvent_Annotation.Marshal(b, m, deterministic) -} -func (m *Span_TimeEvent_Annotation) XXX_Merge(src proto.Message) { - xxx_messageInfo_Span_TimeEvent_Annotation.Merge(m, src) -} -func (m *Span_TimeEvent_Annotation) XXX_Size() int { - return xxx_messageInfo_Span_TimeEvent_Annotation.Size(m) -} -func (m *Span_TimeEvent_Annotation) XXX_DiscardUnknown() { - xxx_messageInfo_Span_TimeEvent_Annotation.DiscardUnknown(m) -} - -var xxx_messageInfo_Span_TimeEvent_Annotation proto.InternalMessageInfo - -func (m *Span_TimeEvent_Annotation) GetDescription() *TruncatableString { - if m != nil { - return m.Description - } - return nil -} - -func (m *Span_TimeEvent_Annotation) GetAttributes() *Span_Attributes { - if m != nil { - return m.Attributes - } - return nil -} - -// An event describing a message sent/received between Spans. -type Span_TimeEvent_MessageEvent struct { - // The type of MessageEvent. Indicates whether the message was sent or - // received. - Type Span_TimeEvent_MessageEvent_Type `protobuf:"varint,1,opt,name=type,proto3,enum=opencensus.proto.trace.v1.Span_TimeEvent_MessageEvent_Type" json:"type,omitempty"` - // An identifier for the MessageEvent's message that can be used to match - // SENT and RECEIVED MessageEvents. For example, this field could - // represent a sequence ID for a streaming RPC. It is recommended to be - // unique within a Span. - Id uint64 `protobuf:"varint,2,opt,name=id,proto3" json:"id,omitempty"` - // The number of uncompressed bytes sent or received. - UncompressedSize uint64 `protobuf:"varint,3,opt,name=uncompressed_size,json=uncompressedSize,proto3" json:"uncompressed_size,omitempty"` - // The number of compressed bytes sent or received. If zero, assumed to - // be the same size as uncompressed. - CompressedSize uint64 `protobuf:"varint,4,opt,name=compressed_size,json=compressedSize,proto3" json:"compressed_size,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *Span_TimeEvent_MessageEvent) Reset() { *m = Span_TimeEvent_MessageEvent{} } -func (m *Span_TimeEvent_MessageEvent) String() string { return proto.CompactTextString(m) } -func (*Span_TimeEvent_MessageEvent) ProtoMessage() {} -func (*Span_TimeEvent_MessageEvent) Descriptor() ([]byte, []int) { - return fileDescriptor_8ea38bbb821bf584, []int{0, 2, 1} -} - -func (m *Span_TimeEvent_MessageEvent) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_Span_TimeEvent_MessageEvent.Unmarshal(m, b) -} -func (m *Span_TimeEvent_MessageEvent) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_Span_TimeEvent_MessageEvent.Marshal(b, m, deterministic) -} -func (m *Span_TimeEvent_MessageEvent) XXX_Merge(src proto.Message) { - xxx_messageInfo_Span_TimeEvent_MessageEvent.Merge(m, src) -} -func (m *Span_TimeEvent_MessageEvent) XXX_Size() int { - return xxx_messageInfo_Span_TimeEvent_MessageEvent.Size(m) -} -func (m *Span_TimeEvent_MessageEvent) XXX_DiscardUnknown() { - xxx_messageInfo_Span_TimeEvent_MessageEvent.DiscardUnknown(m) -} - -var xxx_messageInfo_Span_TimeEvent_MessageEvent proto.InternalMessageInfo - -func (m *Span_TimeEvent_MessageEvent) GetType() Span_TimeEvent_MessageEvent_Type { - if m != nil { - return m.Type - } - return Span_TimeEvent_MessageEvent_TYPE_UNSPECIFIED -} - -func (m *Span_TimeEvent_MessageEvent) GetId() uint64 { - if m != nil { - return m.Id - } - return 0 -} - -func (m *Span_TimeEvent_MessageEvent) GetUncompressedSize() uint64 { - if m != nil { - return m.UncompressedSize - } - return 0 -} - -func (m *Span_TimeEvent_MessageEvent) GetCompressedSize() uint64 { - if m != nil { - return m.CompressedSize - } - return 0 -} - -// A collection of `TimeEvent`s. A `TimeEvent` is a time-stamped annotation -// on the span, consisting of either user-supplied key-value pairs, or -// details of a message sent/received between Spans. -type Span_TimeEvents struct { - // A collection of `TimeEvent`s. - TimeEvent []*Span_TimeEvent `protobuf:"bytes,1,rep,name=time_event,json=timeEvent,proto3" json:"time_event,omitempty"` - // The number of dropped annotations in all the included time events. - // If the value is 0, then no annotations were dropped. - DroppedAnnotationsCount int32 `protobuf:"varint,2,opt,name=dropped_annotations_count,json=droppedAnnotationsCount,proto3" json:"dropped_annotations_count,omitempty"` - // The number of dropped message events in all the included time events. - // If the value is 0, then no message events were dropped. - DroppedMessageEventsCount int32 `protobuf:"varint,3,opt,name=dropped_message_events_count,json=droppedMessageEventsCount,proto3" json:"dropped_message_events_count,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *Span_TimeEvents) Reset() { *m = Span_TimeEvents{} } -func (m *Span_TimeEvents) String() string { return proto.CompactTextString(m) } -func (*Span_TimeEvents) ProtoMessage() {} -func (*Span_TimeEvents) Descriptor() ([]byte, []int) { - return fileDescriptor_8ea38bbb821bf584, []int{0, 3} -} - -func (m *Span_TimeEvents) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_Span_TimeEvents.Unmarshal(m, b) -} -func (m *Span_TimeEvents) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_Span_TimeEvents.Marshal(b, m, deterministic) -} -func (m *Span_TimeEvents) XXX_Merge(src proto.Message) { - xxx_messageInfo_Span_TimeEvents.Merge(m, src) -} -func (m *Span_TimeEvents) XXX_Size() int { - return xxx_messageInfo_Span_TimeEvents.Size(m) -} -func (m *Span_TimeEvents) XXX_DiscardUnknown() { - xxx_messageInfo_Span_TimeEvents.DiscardUnknown(m) -} - -var xxx_messageInfo_Span_TimeEvents proto.InternalMessageInfo - -func (m *Span_TimeEvents) GetTimeEvent() []*Span_TimeEvent { - if m != nil { - return m.TimeEvent - } - return nil -} - -func (m *Span_TimeEvents) GetDroppedAnnotationsCount() int32 { - if m != nil { - return m.DroppedAnnotationsCount - } - return 0 -} - -func (m *Span_TimeEvents) GetDroppedMessageEventsCount() int32 { - if m != nil { - return m.DroppedMessageEventsCount - } - return 0 -} - -// A pointer from the current span to another span in the same trace or in a -// different trace. For example, this can be used in batching operations, -// where a single batch handler processes multiple requests from different -// traces or when the handler receives a request from a different project. -type Span_Link struct { - // A unique identifier for a trace. All spans from the same trace share - // the same `trace_id`. The ID is a 16-byte array. - TraceId []byte `protobuf:"bytes,1,opt,name=trace_id,json=traceId,proto3" json:"trace_id,omitempty"` - // A unique identifier for a span within a trace, assigned when the span - // is created. The ID is an 8-byte array. - SpanId []byte `protobuf:"bytes,2,opt,name=span_id,json=spanId,proto3" json:"span_id,omitempty"` - // The relationship of the current span relative to the linked span. - Type Span_Link_Type `protobuf:"varint,3,opt,name=type,proto3,enum=opencensus.proto.trace.v1.Span_Link_Type" json:"type,omitempty"` - // A set of attributes on the link. - Attributes *Span_Attributes `protobuf:"bytes,4,opt,name=attributes,proto3" json:"attributes,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *Span_Link) Reset() { *m = Span_Link{} } -func (m *Span_Link) String() string { return proto.CompactTextString(m) } -func (*Span_Link) ProtoMessage() {} -func (*Span_Link) Descriptor() ([]byte, []int) { - return fileDescriptor_8ea38bbb821bf584, []int{0, 4} -} - -func (m *Span_Link) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_Span_Link.Unmarshal(m, b) -} -func (m *Span_Link) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_Span_Link.Marshal(b, m, deterministic) -} -func (m *Span_Link) XXX_Merge(src proto.Message) { - xxx_messageInfo_Span_Link.Merge(m, src) -} -func (m *Span_Link) XXX_Size() int { - return xxx_messageInfo_Span_Link.Size(m) -} -func (m *Span_Link) XXX_DiscardUnknown() { - xxx_messageInfo_Span_Link.DiscardUnknown(m) -} - -var xxx_messageInfo_Span_Link proto.InternalMessageInfo - -func (m *Span_Link) GetTraceId() []byte { - if m != nil { - return m.TraceId - } - return nil -} - -func (m *Span_Link) GetSpanId() []byte { - if m != nil { - return m.SpanId - } - return nil -} - -func (m *Span_Link) GetType() Span_Link_Type { - if m != nil { - return m.Type - } - return Span_Link_TYPE_UNSPECIFIED -} - -func (m *Span_Link) GetAttributes() *Span_Attributes { - if m != nil { - return m.Attributes - } - return nil -} - -// A collection of links, which are references from this span to a span -// in the same or different trace. -type Span_Links struct { - // A collection of links. - Link []*Span_Link `protobuf:"bytes,1,rep,name=link,proto3" json:"link,omitempty"` - // The number of dropped links after the maximum size was enforced. If - // this value is 0, then no links were dropped. - DroppedLinksCount int32 `protobuf:"varint,2,opt,name=dropped_links_count,json=droppedLinksCount,proto3" json:"dropped_links_count,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *Span_Links) Reset() { *m = Span_Links{} } -func (m *Span_Links) String() string { return proto.CompactTextString(m) } -func (*Span_Links) ProtoMessage() {} -func (*Span_Links) Descriptor() ([]byte, []int) { - return fileDescriptor_8ea38bbb821bf584, []int{0, 5} -} - -func (m *Span_Links) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_Span_Links.Unmarshal(m, b) -} -func (m *Span_Links) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_Span_Links.Marshal(b, m, deterministic) -} -func (m *Span_Links) XXX_Merge(src proto.Message) { - xxx_messageInfo_Span_Links.Merge(m, src) -} -func (m *Span_Links) XXX_Size() int { - return xxx_messageInfo_Span_Links.Size(m) -} -func (m *Span_Links) XXX_DiscardUnknown() { - xxx_messageInfo_Span_Links.DiscardUnknown(m) -} - -var xxx_messageInfo_Span_Links proto.InternalMessageInfo - -func (m *Span_Links) GetLink() []*Span_Link { - if m != nil { - return m.Link - } - return nil -} - -func (m *Span_Links) GetDroppedLinksCount() int32 { - if m != nil { - return m.DroppedLinksCount - } - return 0 -} - -// The `Status` type defines a logical error model that is suitable for different -// programming environments, including REST APIs and RPC APIs. This proto's fields -// are a subset of those of -// [google.rpc.Status](https://github.com/googleapis/googleapis/blob/master/google/rpc/status.proto), -// which is used by [gRPC](https://github.com/grpc). -type Status struct { - // The status code. - Code int32 `protobuf:"varint,1,opt,name=code,proto3" json:"code,omitempty"` - // A developer-facing error message, which should be in English. - Message string `protobuf:"bytes,2,opt,name=message,proto3" json:"message,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *Status) Reset() { *m = Status{} } -func (m *Status) String() string { return proto.CompactTextString(m) } -func (*Status) ProtoMessage() {} -func (*Status) Descriptor() ([]byte, []int) { - return fileDescriptor_8ea38bbb821bf584, []int{1} -} - -func (m *Status) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_Status.Unmarshal(m, b) -} -func (m *Status) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_Status.Marshal(b, m, deterministic) -} -func (m *Status) XXX_Merge(src proto.Message) { - xxx_messageInfo_Status.Merge(m, src) -} -func (m *Status) XXX_Size() int { - return xxx_messageInfo_Status.Size(m) -} -func (m *Status) XXX_DiscardUnknown() { - xxx_messageInfo_Status.DiscardUnknown(m) -} - -var xxx_messageInfo_Status proto.InternalMessageInfo - -func (m *Status) GetCode() int32 { - if m != nil { - return m.Code - } - return 0 -} - -func (m *Status) GetMessage() string { - if m != nil { - return m.Message - } - return "" -} - -// The value of an Attribute. -type AttributeValue struct { - // The type of the value. - // - // Types that are valid to be assigned to Value: - // *AttributeValue_StringValue - // *AttributeValue_IntValue - // *AttributeValue_BoolValue - // *AttributeValue_DoubleValue - Value isAttributeValue_Value `protobuf_oneof:"value"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *AttributeValue) Reset() { *m = AttributeValue{} } -func (m *AttributeValue) String() string { return proto.CompactTextString(m) } -func (*AttributeValue) ProtoMessage() {} -func (*AttributeValue) Descriptor() ([]byte, []int) { - return fileDescriptor_8ea38bbb821bf584, []int{2} -} - -func (m *AttributeValue) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_AttributeValue.Unmarshal(m, b) -} -func (m *AttributeValue) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_AttributeValue.Marshal(b, m, deterministic) -} -func (m *AttributeValue) XXX_Merge(src proto.Message) { - xxx_messageInfo_AttributeValue.Merge(m, src) -} -func (m *AttributeValue) XXX_Size() int { - return xxx_messageInfo_AttributeValue.Size(m) -} -func (m *AttributeValue) XXX_DiscardUnknown() { - xxx_messageInfo_AttributeValue.DiscardUnknown(m) -} - -var xxx_messageInfo_AttributeValue proto.InternalMessageInfo - -type isAttributeValue_Value interface { - isAttributeValue_Value() -} - -type AttributeValue_StringValue struct { - StringValue *TruncatableString `protobuf:"bytes,1,opt,name=string_value,json=stringValue,proto3,oneof"` -} - -type AttributeValue_IntValue struct { - IntValue int64 `protobuf:"varint,2,opt,name=int_value,json=intValue,proto3,oneof"` -} - -type AttributeValue_BoolValue struct { - BoolValue bool `protobuf:"varint,3,opt,name=bool_value,json=boolValue,proto3,oneof"` -} - -type AttributeValue_DoubleValue struct { - DoubleValue float64 `protobuf:"fixed64,4,opt,name=double_value,json=doubleValue,proto3,oneof"` -} - -func (*AttributeValue_StringValue) isAttributeValue_Value() {} - -func (*AttributeValue_IntValue) isAttributeValue_Value() {} - -func (*AttributeValue_BoolValue) isAttributeValue_Value() {} - -func (*AttributeValue_DoubleValue) isAttributeValue_Value() {} - -func (m *AttributeValue) GetValue() isAttributeValue_Value { - if m != nil { - return m.Value - } - return nil -} - -func (m *AttributeValue) GetStringValue() *TruncatableString { - if x, ok := m.GetValue().(*AttributeValue_StringValue); ok { - return x.StringValue - } - return nil -} - -func (m *AttributeValue) GetIntValue() int64 { - if x, ok := m.GetValue().(*AttributeValue_IntValue); ok { - return x.IntValue - } - return 0 -} - -func (m *AttributeValue) GetBoolValue() bool { - if x, ok := m.GetValue().(*AttributeValue_BoolValue); ok { - return x.BoolValue - } - return false -} - -func (m *AttributeValue) GetDoubleValue() float64 { - if x, ok := m.GetValue().(*AttributeValue_DoubleValue); ok { - return x.DoubleValue - } - return 0 -} - -// XXX_OneofFuncs is for the internal use of the proto package. -func (*AttributeValue) XXX_OneofFuncs() (func(msg proto.Message, b *proto.Buffer) error, func(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error), func(msg proto.Message) (n int), []interface{}) { - return _AttributeValue_OneofMarshaler, _AttributeValue_OneofUnmarshaler, _AttributeValue_OneofSizer, []interface{}{ - (*AttributeValue_StringValue)(nil), - (*AttributeValue_IntValue)(nil), - (*AttributeValue_BoolValue)(nil), - (*AttributeValue_DoubleValue)(nil), - } -} - -func _AttributeValue_OneofMarshaler(msg proto.Message, b *proto.Buffer) error { - m := msg.(*AttributeValue) - // value - switch x := m.Value.(type) { - case *AttributeValue_StringValue: - b.EncodeVarint(1<<3 | proto.WireBytes) - if err := b.EncodeMessage(x.StringValue); err != nil { - return err - } - case *AttributeValue_IntValue: - b.EncodeVarint(2<<3 | proto.WireVarint) - b.EncodeVarint(uint64(x.IntValue)) - case *AttributeValue_BoolValue: - t := uint64(0) - if x.BoolValue { - t = 1 - } - b.EncodeVarint(3<<3 | proto.WireVarint) - b.EncodeVarint(t) - case *AttributeValue_DoubleValue: - b.EncodeVarint(4<<3 | proto.WireFixed64) - b.EncodeFixed64(math.Float64bits(x.DoubleValue)) - case nil: - default: - return fmt.Errorf("AttributeValue.Value has unexpected type %T", x) - } - return nil -} - -func _AttributeValue_OneofUnmarshaler(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error) { - m := msg.(*AttributeValue) - switch tag { - case 1: // value.string_value - if wire != proto.WireBytes { - return true, proto.ErrInternalBadWireType - } - msg := new(TruncatableString) - err := b.DecodeMessage(msg) - m.Value = &AttributeValue_StringValue{msg} - return true, err - case 2: // value.int_value - if wire != proto.WireVarint { - return true, proto.ErrInternalBadWireType - } - x, err := b.DecodeVarint() - m.Value = &AttributeValue_IntValue{int64(x)} - return true, err - case 3: // value.bool_value - if wire != proto.WireVarint { - return true, proto.ErrInternalBadWireType - } - x, err := b.DecodeVarint() - m.Value = &AttributeValue_BoolValue{x != 0} - return true, err - case 4: // value.double_value - if wire != proto.WireFixed64 { - return true, proto.ErrInternalBadWireType - } - x, err := b.DecodeFixed64() - m.Value = &AttributeValue_DoubleValue{math.Float64frombits(x)} - return true, err - default: - return false, nil - } -} - -func _AttributeValue_OneofSizer(msg proto.Message) (n int) { - m := msg.(*AttributeValue) - // value - switch x := m.Value.(type) { - case *AttributeValue_StringValue: - s := proto.Size(x.StringValue) - n += 1 // tag and wire - n += proto.SizeVarint(uint64(s)) - n += s - case *AttributeValue_IntValue: - n += 1 // tag and wire - n += proto.SizeVarint(uint64(x.IntValue)) - case *AttributeValue_BoolValue: - n += 1 // tag and wire - n += 1 - case *AttributeValue_DoubleValue: - n += 1 // tag and wire - n += 8 - case nil: - default: - panic(fmt.Sprintf("proto: unexpected type %T in oneof", x)) - } - return n -} - -// The call stack which originated this span. -type StackTrace struct { - // Stack frames in this stack trace. - StackFrames *StackTrace_StackFrames `protobuf:"bytes,1,opt,name=stack_frames,json=stackFrames,proto3" json:"stack_frames,omitempty"` - // The hash ID is used to conserve network bandwidth for duplicate - // stack traces within a single trace. - // - // Often multiple spans will have identical stack traces. - // The first occurrence of a stack trace should contain both - // `stack_frames` and a value in `stack_trace_hash_id`. - // - // Subsequent spans within the same request can refer - // to that stack trace by setting only `stack_trace_hash_id`. - // - // TODO: describe how to deal with the case where stack_trace_hash_id is - // zero because it was not set. - StackTraceHashId uint64 `protobuf:"varint,2,opt,name=stack_trace_hash_id,json=stackTraceHashId,proto3" json:"stack_trace_hash_id,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *StackTrace) Reset() { *m = StackTrace{} } -func (m *StackTrace) String() string { return proto.CompactTextString(m) } -func (*StackTrace) ProtoMessage() {} -func (*StackTrace) Descriptor() ([]byte, []int) { - return fileDescriptor_8ea38bbb821bf584, []int{3} -} - -func (m *StackTrace) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_StackTrace.Unmarshal(m, b) -} -func (m *StackTrace) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_StackTrace.Marshal(b, m, deterministic) -} -func (m *StackTrace) XXX_Merge(src proto.Message) { - xxx_messageInfo_StackTrace.Merge(m, src) -} -func (m *StackTrace) XXX_Size() int { - return xxx_messageInfo_StackTrace.Size(m) -} -func (m *StackTrace) XXX_DiscardUnknown() { - xxx_messageInfo_StackTrace.DiscardUnknown(m) -} - -var xxx_messageInfo_StackTrace proto.InternalMessageInfo - -func (m *StackTrace) GetStackFrames() *StackTrace_StackFrames { - if m != nil { - return m.StackFrames - } - return nil -} - -func (m *StackTrace) GetStackTraceHashId() uint64 { - if m != nil { - return m.StackTraceHashId - } - return 0 -} - -// A single stack frame in a stack trace. -type StackTrace_StackFrame struct { - // The fully-qualified name that uniquely identifies the function or - // method that is active in this frame. - FunctionName *TruncatableString `protobuf:"bytes,1,opt,name=function_name,json=functionName,proto3" json:"function_name,omitempty"` - // An un-mangled function name, if `function_name` is - // [mangled](http://www.avabodh.com/cxxin/namemangling.html). The name can - // be fully qualified. - OriginalFunctionName *TruncatableString `protobuf:"bytes,2,opt,name=original_function_name,json=originalFunctionName,proto3" json:"original_function_name,omitempty"` - // The name of the source file where the function call appears. - FileName *TruncatableString `protobuf:"bytes,3,opt,name=file_name,json=fileName,proto3" json:"file_name,omitempty"` - // The line number in `file_name` where the function call appears. - LineNumber int64 `protobuf:"varint,4,opt,name=line_number,json=lineNumber,proto3" json:"line_number,omitempty"` - // The column number where the function call appears, if available. - // This is important in JavaScript because of its anonymous functions. - ColumnNumber int64 `protobuf:"varint,5,opt,name=column_number,json=columnNumber,proto3" json:"column_number,omitempty"` - // The binary module from where the code was loaded. - LoadModule *Module `protobuf:"bytes,6,opt,name=load_module,json=loadModule,proto3" json:"load_module,omitempty"` - // The version of the deployed source code. - SourceVersion *TruncatableString `protobuf:"bytes,7,opt,name=source_version,json=sourceVersion,proto3" json:"source_version,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *StackTrace_StackFrame) Reset() { *m = StackTrace_StackFrame{} } -func (m *StackTrace_StackFrame) String() string { return proto.CompactTextString(m) } -func (*StackTrace_StackFrame) ProtoMessage() {} -func (*StackTrace_StackFrame) Descriptor() ([]byte, []int) { - return fileDescriptor_8ea38bbb821bf584, []int{3, 0} -} - -func (m *StackTrace_StackFrame) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_StackTrace_StackFrame.Unmarshal(m, b) -} -func (m *StackTrace_StackFrame) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_StackTrace_StackFrame.Marshal(b, m, deterministic) -} -func (m *StackTrace_StackFrame) XXX_Merge(src proto.Message) { - xxx_messageInfo_StackTrace_StackFrame.Merge(m, src) -} -func (m *StackTrace_StackFrame) XXX_Size() int { - return xxx_messageInfo_StackTrace_StackFrame.Size(m) -} -func (m *StackTrace_StackFrame) XXX_DiscardUnknown() { - xxx_messageInfo_StackTrace_StackFrame.DiscardUnknown(m) -} - -var xxx_messageInfo_StackTrace_StackFrame proto.InternalMessageInfo - -func (m *StackTrace_StackFrame) GetFunctionName() *TruncatableString { - if m != nil { - return m.FunctionName - } - return nil -} - -func (m *StackTrace_StackFrame) GetOriginalFunctionName() *TruncatableString { - if m != nil { - return m.OriginalFunctionName - } - return nil -} - -func (m *StackTrace_StackFrame) GetFileName() *TruncatableString { - if m != nil { - return m.FileName - } - return nil -} - -func (m *StackTrace_StackFrame) GetLineNumber() int64 { - if m != nil { - return m.LineNumber - } - return 0 -} - -func (m *StackTrace_StackFrame) GetColumnNumber() int64 { - if m != nil { - return m.ColumnNumber - } - return 0 -} - -func (m *StackTrace_StackFrame) GetLoadModule() *Module { - if m != nil { - return m.LoadModule - } - return nil -} - -func (m *StackTrace_StackFrame) GetSourceVersion() *TruncatableString { - if m != nil { - return m.SourceVersion - } - return nil -} - -// A collection of stack frames, which can be truncated. -type StackTrace_StackFrames struct { - // Stack frames in this call stack. - Frame []*StackTrace_StackFrame `protobuf:"bytes,1,rep,name=frame,proto3" json:"frame,omitempty"` - // The number of stack frames that were dropped because there - // were too many stack frames. - // If this value is 0, then no stack frames were dropped. - DroppedFramesCount int32 `protobuf:"varint,2,opt,name=dropped_frames_count,json=droppedFramesCount,proto3" json:"dropped_frames_count,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *StackTrace_StackFrames) Reset() { *m = StackTrace_StackFrames{} } -func (m *StackTrace_StackFrames) String() string { return proto.CompactTextString(m) } -func (*StackTrace_StackFrames) ProtoMessage() {} -func (*StackTrace_StackFrames) Descriptor() ([]byte, []int) { - return fileDescriptor_8ea38bbb821bf584, []int{3, 1} -} - -func (m *StackTrace_StackFrames) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_StackTrace_StackFrames.Unmarshal(m, b) -} -func (m *StackTrace_StackFrames) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_StackTrace_StackFrames.Marshal(b, m, deterministic) -} -func (m *StackTrace_StackFrames) XXX_Merge(src proto.Message) { - xxx_messageInfo_StackTrace_StackFrames.Merge(m, src) -} -func (m *StackTrace_StackFrames) XXX_Size() int { - return xxx_messageInfo_StackTrace_StackFrames.Size(m) -} -func (m *StackTrace_StackFrames) XXX_DiscardUnknown() { - xxx_messageInfo_StackTrace_StackFrames.DiscardUnknown(m) -} - -var xxx_messageInfo_StackTrace_StackFrames proto.InternalMessageInfo - -func (m *StackTrace_StackFrames) GetFrame() []*StackTrace_StackFrame { - if m != nil { - return m.Frame - } - return nil -} - -func (m *StackTrace_StackFrames) GetDroppedFramesCount() int32 { - if m != nil { - return m.DroppedFramesCount - } - return 0 -} - -// A description of a binary module. -type Module struct { - // TODO: document the meaning of this field. - // For example: main binary, kernel modules, and dynamic libraries - // such as libc.so, sharedlib.so. - Module *TruncatableString `protobuf:"bytes,1,opt,name=module,proto3" json:"module,omitempty"` - // A unique identifier for the module, usually a hash of its - // contents. - BuildId *TruncatableString `protobuf:"bytes,2,opt,name=build_id,json=buildId,proto3" json:"build_id,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *Module) Reset() { *m = Module{} } -func (m *Module) String() string { return proto.CompactTextString(m) } -func (*Module) ProtoMessage() {} -func (*Module) Descriptor() ([]byte, []int) { - return fileDescriptor_8ea38bbb821bf584, []int{4} -} - -func (m *Module) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_Module.Unmarshal(m, b) -} -func (m *Module) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_Module.Marshal(b, m, deterministic) -} -func (m *Module) XXX_Merge(src proto.Message) { - xxx_messageInfo_Module.Merge(m, src) -} -func (m *Module) XXX_Size() int { - return xxx_messageInfo_Module.Size(m) -} -func (m *Module) XXX_DiscardUnknown() { - xxx_messageInfo_Module.DiscardUnknown(m) -} - -var xxx_messageInfo_Module proto.InternalMessageInfo - -func (m *Module) GetModule() *TruncatableString { - if m != nil { - return m.Module - } - return nil -} - -func (m *Module) GetBuildId() *TruncatableString { - if m != nil { - return m.BuildId - } - return nil -} - -// A string that might be shortened to a specified length. -type TruncatableString struct { - // The shortened string. For example, if the original string was 500 bytes long and - // the limit of the string was 128 bytes, then this value contains the first 128 - // bytes of the 500-byte string. Note that truncation always happens on a - // character boundary, to ensure that a truncated string is still valid UTF-8. - // Because it may contain multi-byte characters, the size of the truncated string - // may be less than the truncation limit. - Value string `protobuf:"bytes,1,opt,name=value,proto3" json:"value,omitempty"` - // The number of bytes removed from the original string. If this - // value is 0, then the string was not shortened. - TruncatedByteCount int32 `protobuf:"varint,2,opt,name=truncated_byte_count,json=truncatedByteCount,proto3" json:"truncated_byte_count,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *TruncatableString) Reset() { *m = TruncatableString{} } -func (m *TruncatableString) String() string { return proto.CompactTextString(m) } -func (*TruncatableString) ProtoMessage() {} -func (*TruncatableString) Descriptor() ([]byte, []int) { - return fileDescriptor_8ea38bbb821bf584, []int{5} -} - -func (m *TruncatableString) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_TruncatableString.Unmarshal(m, b) -} -func (m *TruncatableString) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_TruncatableString.Marshal(b, m, deterministic) -} -func (m *TruncatableString) XXX_Merge(src proto.Message) { - xxx_messageInfo_TruncatableString.Merge(m, src) -} -func (m *TruncatableString) XXX_Size() int { - return xxx_messageInfo_TruncatableString.Size(m) -} -func (m *TruncatableString) XXX_DiscardUnknown() { - xxx_messageInfo_TruncatableString.DiscardUnknown(m) -} - -var xxx_messageInfo_TruncatableString proto.InternalMessageInfo - -func (m *TruncatableString) GetValue() string { - if m != nil { - return m.Value - } - return "" -} - -func (m *TruncatableString) GetTruncatedByteCount() int32 { - if m != nil { - return m.TruncatedByteCount - } - return 0 -} - -func init() { - proto.RegisterEnum("opencensus.proto.trace.v1.Span_SpanKind", Span_SpanKind_name, Span_SpanKind_value) - proto.RegisterEnum("opencensus.proto.trace.v1.Span_TimeEvent_MessageEvent_Type", Span_TimeEvent_MessageEvent_Type_name, Span_TimeEvent_MessageEvent_Type_value) - proto.RegisterEnum("opencensus.proto.trace.v1.Span_Link_Type", Span_Link_Type_name, Span_Link_Type_value) - proto.RegisterType((*Span)(nil), "opencensus.proto.trace.v1.Span") - proto.RegisterType((*Span_Tracestate)(nil), "opencensus.proto.trace.v1.Span.Tracestate") - proto.RegisterType((*Span_Tracestate_Entry)(nil), "opencensus.proto.trace.v1.Span.Tracestate.Entry") - proto.RegisterType((*Span_Attributes)(nil), "opencensus.proto.trace.v1.Span.Attributes") - proto.RegisterMapType((map[string]*AttributeValue)(nil), "opencensus.proto.trace.v1.Span.Attributes.AttributeMapEntry") - proto.RegisterType((*Span_TimeEvent)(nil), "opencensus.proto.trace.v1.Span.TimeEvent") - proto.RegisterType((*Span_TimeEvent_Annotation)(nil), "opencensus.proto.trace.v1.Span.TimeEvent.Annotation") - proto.RegisterType((*Span_TimeEvent_MessageEvent)(nil), "opencensus.proto.trace.v1.Span.TimeEvent.MessageEvent") - proto.RegisterType((*Span_TimeEvents)(nil), "opencensus.proto.trace.v1.Span.TimeEvents") - proto.RegisterType((*Span_Link)(nil), "opencensus.proto.trace.v1.Span.Link") - proto.RegisterType((*Span_Links)(nil), "opencensus.proto.trace.v1.Span.Links") - proto.RegisterType((*Status)(nil), "opencensus.proto.trace.v1.Status") - proto.RegisterType((*AttributeValue)(nil), "opencensus.proto.trace.v1.AttributeValue") - proto.RegisterType((*StackTrace)(nil), "opencensus.proto.trace.v1.StackTrace") - proto.RegisterType((*StackTrace_StackFrame)(nil), "opencensus.proto.trace.v1.StackTrace.StackFrame") - proto.RegisterType((*StackTrace_StackFrames)(nil), "opencensus.proto.trace.v1.StackTrace.StackFrames") - proto.RegisterType((*Module)(nil), "opencensus.proto.trace.v1.Module") - proto.RegisterType((*TruncatableString)(nil), "opencensus.proto.trace.v1.TruncatableString") -} - -func init() { - proto.RegisterFile("opencensus/proto/trace/v1/trace.proto", fileDescriptor_8ea38bbb821bf584) -} - -var fileDescriptor_8ea38bbb821bf584 = []byte{ - // 1524 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xa4, 0x58, 0x5b, 0x6f, 0x1b, 0xb7, - 0x12, 0xf6, 0xea, 0xae, 0x91, 0xac, 0xc8, 0x8c, 0x93, 0xc8, 0x3a, 0x39, 0x27, 0x3e, 0x4a, 0x82, - 0xe3, 0x9c, 0xd6, 0x72, 0xe2, 0xa4, 0x41, 0xae, 0x48, 0x7d, 0x91, 0x2b, 0xc5, 0x8e, 0xaa, 0x50, - 0x8a, 0xd1, 0x0b, 0x8a, 0xc5, 0x4a, 0x4b, 0xcb, 0x5b, 0x4b, 0xdc, 0xed, 0x92, 0xeb, 0xc2, 0x79, - 0xeb, 0x53, 0x51, 0xf4, 0xad, 0x40, 0xd1, 0x3f, 0xd0, 0x87, 0xfe, 0x9d, 0xa2, 0xbf, 0xa3, 0xbf, - 0xa0, 0x2f, 0x05, 0xc9, 0xbd, 0xc9, 0x49, 0x6c, 0x55, 0x79, 0x31, 0xb8, 0xe4, 0x7c, 0x1f, 0x39, - 0x9c, 0x6f, 0x86, 0x63, 0xc1, 0x4d, 0xdb, 0x21, 0x74, 0x40, 0x28, 0xf3, 0xd8, 0x9a, 0xe3, 0xda, - 0xdc, 0x5e, 0xe3, 0xae, 0x31, 0x20, 0x6b, 0xc7, 0x77, 0xd4, 0xa0, 0x2e, 0x27, 0xd1, 0x52, 0x64, - 0xa6, 0x66, 0xea, 0x6a, 0xf5, 0xf8, 0x4e, 0xf5, 0xda, 0xd0, 0xb6, 0x87, 0x23, 0xa2, 0xd0, 0x7d, - 0xef, 0x60, 0x8d, 0x5b, 0x63, 0xc2, 0xb8, 0x31, 0x76, 0x94, 0x65, 0xf5, 0x3f, 0xa7, 0x0d, 0xbe, - 0x75, 0x0d, 0xc7, 0x21, 0xae, 0xcf, 0x54, 0xfb, 0xee, 0x12, 0xa4, 0xba, 0x8e, 0x41, 0xd1, 0x12, - 0xe4, 0x24, 0xab, 0x6e, 0x99, 0x15, 0x6d, 0x59, 0x5b, 0x29, 0xe2, 0xac, 0xfc, 0x6e, 0x99, 0xe8, - 0x0a, 0x64, 0x99, 0x63, 0x50, 0xb1, 0x92, 0x90, 0x2b, 0x19, 0xf1, 0xd9, 0x32, 0xd1, 0x73, 0x00, - 0x69, 0xc3, 0xb8, 0xc1, 0x49, 0xe5, 0xc2, 0xb2, 0xb6, 0x52, 0x58, 0xff, 0x7f, 0xfd, 0x9d, 0xa7, - 0xad, 0x8b, 0x8d, 0xea, 0xbd, 0x10, 0x81, 0x63, 0x68, 0x74, 0x03, 0x4a, 0x8e, 0xe1, 0x12, 0xca, - 0xf5, 0x60, 0xaf, 0xa4, 0xdc, 0xab, 0xa8, 0x66, 0xbb, 0x6a, 0xc7, 0x8f, 0x21, 0x45, 0x8d, 0x31, - 0xa9, 0xa4, 0xe4, 0x5e, 0x1f, 0x9e, 0xb1, 0x57, 0xcf, 0xf5, 0xe8, 0xc0, 0xe0, 0x46, 0x7f, 0x44, - 0xba, 0xdc, 0xb5, 0xe8, 0x10, 0x4b, 0x24, 0x7a, 0x02, 0xa9, 0x23, 0x8b, 0x9a, 0x95, 0xd2, 0xb2, - 0xb6, 0x52, 0x5a, 0x5f, 0x39, 0xef, 0xb4, 0xe2, 0xcf, 0xae, 0x45, 0x4d, 0x2c, 0x51, 0xe8, 0x21, - 0x00, 0xe3, 0x86, 0xcb, 0x75, 0x71, 0xcf, 0x95, 0xb4, 0x3c, 0x45, 0xb5, 0xae, 0xee, 0xb8, 0x1e, - 0xdc, 0x71, 0xbd, 0x17, 0x04, 0x01, 0xe7, 0xa5, 0xb5, 0xf8, 0x46, 0x1f, 0x41, 0x8e, 0x50, 0x53, - 0x01, 0x33, 0xe7, 0x02, 0xb3, 0x84, 0x9a, 0x12, 0xf6, 0x1c, 0xc0, 0xe0, 0xdc, 0xb5, 0xfa, 0x1e, - 0x27, 0xac, 0x92, 0x9d, 0xee, 0x8e, 0x37, 0x42, 0x04, 0x8e, 0xa1, 0xd1, 0x0e, 0x14, 0x18, 0x37, - 0x06, 0x47, 0xba, 0xb4, 0xae, 0xe4, 0x24, 0xd9, 0xcd, 0xb3, 0xc8, 0x84, 0xb5, 0x0c, 0x18, 0x06, - 0x16, 0x8e, 0xd1, 0x2e, 0x14, 0x84, 0x1b, 0x3a, 0x39, 0x26, 0x94, 0xb3, 0x4a, 0x7e, 0xca, 0xc0, - 0x5b, 0x63, 0xd2, 0x90, 0x08, 0x0c, 0x3c, 0x1c, 0xa3, 0xc7, 0x90, 0x1e, 0x59, 0xf4, 0x88, 0x55, - 0xe0, 0xfc, 0xe3, 0x08, 0x9a, 0x3d, 0x61, 0x8c, 0x15, 0x06, 0x3d, 0x84, 0x8c, 0x90, 0x8f, 0xc7, - 0x2a, 0x05, 0x89, 0xfe, 0xef, 0xd9, 0xce, 0x70, 0x8f, 0x61, 0x1f, 0x80, 0x3e, 0x83, 0x7f, 0x31, - 0x63, 0x4c, 0x74, 0xc7, 0xb5, 0x07, 0x84, 0x31, 0xdd, 0x60, 0x7a, 0x4c, 0x80, 0x95, 0xe2, 0x3b, - 0x42, 0xb4, 0x69, 0xdb, 0xa3, 0x7d, 0x63, 0xe4, 0x11, 0x7c, 0x45, 0xc0, 0x3b, 0x0a, 0xbd, 0xc1, - 0x3a, 0xa1, 0x4c, 0xd1, 0x0e, 0x94, 0x07, 0x87, 0xd6, 0xc8, 0x54, 0x4a, 0x1e, 0xd8, 0x1e, 0xe5, - 0x95, 0x79, 0x49, 0x77, 0xf5, 0x0d, 0xba, 0x57, 0x2d, 0xca, 0xef, 0xae, 0x2b, 0xc2, 0x92, 0x44, - 0x09, 0x8a, 0x2d, 0x81, 0xa9, 0xfe, 0xa0, 0x01, 0x44, 0xd9, 0x82, 0x9e, 0x43, 0x96, 0x50, 0xee, - 0x5a, 0x84, 0x55, 0xb4, 0xe5, 0xe4, 0x4a, 0x61, 0xfd, 0xf6, 0xf4, 0xa9, 0x56, 0x6f, 0x50, 0xee, - 0x9e, 0xe0, 0x80, 0xa0, 0xba, 0x06, 0x69, 0x39, 0x83, 0xca, 0x90, 0x3c, 0x22, 0x27, 0x32, 0xe3, - 0xf3, 0x58, 0x0c, 0xd1, 0x22, 0xa4, 0x8f, 0xc5, 0x71, 0x64, 0xae, 0xe7, 0xb1, 0xfa, 0xa8, 0xfe, - 0x9c, 0x00, 0x88, 0x54, 0x85, 0x0c, 0x98, 0x0f, 0x75, 0xa5, 0x8f, 0x0d, 0xc7, 0x3f, 0xd1, 0x93, - 0xe9, 0x85, 0x19, 0x0d, 0x5f, 0x18, 0x8e, 0x3a, 0x5d, 0xd1, 0x88, 0x4d, 0xa1, 0x07, 0x50, 0x31, - 0x5d, 0xdb, 0x71, 0x88, 0xa9, 0x47, 0x12, 0xf6, 0x6f, 0x53, 0x1c, 0x2d, 0x8d, 0x2f, 0xfb, 0xeb, - 0x11, 0xa9, 0xba, 0xb7, 0xaf, 0x61, 0xe1, 0x0d, 0xf2, 0xb7, 0x38, 0xfa, 0x2c, 0xee, 0x68, 0x61, - 0xfd, 0xd6, 0x19, 0x67, 0x0f, 0xe9, 0x54, 0xa0, 0x14, 0xee, 0x51, 0xe2, 0x81, 0x56, 0xfd, 0x35, - 0x0d, 0xf9, 0x50, 0xd8, 0xa8, 0x0e, 0x29, 0x99, 0xdf, 0xda, 0xb9, 0xf9, 0x2d, 0xed, 0xd0, 0x3e, - 0x80, 0x41, 0xa9, 0xcd, 0x0d, 0x6e, 0xd9, 0xd4, 0x3f, 0xc7, 0xbd, 0xa9, 0xf3, 0xa8, 0xbe, 0x11, - 0x62, 0x9b, 0x73, 0x38, 0xc6, 0x84, 0xbe, 0x82, 0xf9, 0x31, 0x61, 0xcc, 0x18, 0xfa, 0x39, 0x2a, - 0x6b, 0x69, 0x61, 0xfd, 0xfe, 0xf4, 0xd4, 0x2f, 0x14, 0x5c, 0x7e, 0x34, 0xe7, 0x70, 0x71, 0x1c, - 0xfb, 0xae, 0xfe, 0xa6, 0x01, 0x44, 0x7b, 0xa3, 0x36, 0x14, 0x4c, 0xc2, 0x06, 0xae, 0xe5, 0x48, - 0x37, 0xb4, 0x19, 0x6a, 0x73, 0x9c, 0xe0, 0x54, 0xc9, 0x4b, 0xbc, 0x4f, 0xc9, 0xab, 0xfe, 0xa5, - 0x41, 0x31, 0xee, 0x0b, 0xfa, 0x14, 0x52, 0xfc, 0xc4, 0x51, 0x21, 0x2a, 0xad, 0x3f, 0x9e, 0xed, - 0x46, 0xea, 0xbd, 0x13, 0x87, 0x60, 0x49, 0x84, 0x4a, 0x90, 0xf0, 0x1f, 0xc6, 0x14, 0x4e, 0x58, - 0x26, 0xfa, 0x00, 0x16, 0x3c, 0x3a, 0xb0, 0xc7, 0x8e, 0x4b, 0x18, 0x23, 0xa6, 0xce, 0xac, 0xd7, - 0x44, 0xde, 0x7f, 0x0a, 0x97, 0xe3, 0x0b, 0x5d, 0xeb, 0x35, 0x41, 0xff, 0x83, 0x0b, 0xa7, 0x4d, - 0x53, 0xd2, 0xb4, 0x34, 0x69, 0x58, 0xbb, 0x07, 0x29, 0xb1, 0x27, 0x5a, 0x84, 0x72, 0xef, 0xf3, - 0x4e, 0x43, 0x7f, 0xd5, 0xee, 0x76, 0x1a, 0x5b, 0xad, 0x9d, 0x56, 0x63, 0xbb, 0x3c, 0x87, 0x72, - 0x90, 0xea, 0x36, 0xda, 0xbd, 0xb2, 0x86, 0x8a, 0x90, 0xc3, 0x8d, 0xad, 0x46, 0x6b, 0xbf, 0xb1, - 0x5d, 0x4e, 0x6c, 0x66, 0x7d, 0x89, 0x57, 0xff, 0x10, 0xa5, 0x24, 0xaa, 0xb9, 0x4d, 0x80, 0xa8, - 0x80, 0xfb, 0xb9, 0x7b, 0x6b, 0xea, 0xab, 0xc0, 0xf9, 0xb0, 0x7c, 0xa3, 0x47, 0xb0, 0x14, 0x66, - 0x69, 0xa8, 0x88, 0xc9, 0x34, 0xbd, 0x12, 0xa4, 0x69, 0xb4, 0x2e, 0xf3, 0x14, 0x3d, 0x83, 0xab, - 0x01, 0x76, 0x42, 0xad, 0x01, 0x3c, 0x29, 0xe1, 0x01, 0x7f, 0xfc, 0xfe, 0xfd, 0x44, 0xff, 0x29, - 0x01, 0x29, 0xf1, 0x1c, 0xcc, 0xd4, 0xbc, 0x3c, 0xf5, 0x85, 0x90, 0x94, 0x42, 0xb8, 0x35, 0xcd, - 0xb3, 0x13, 0x0f, 0xfb, 0xa4, 0x48, 0x53, 0xef, 0x23, 0xd2, 0xda, 0xee, 0x99, 0xc1, 0xbd, 0x04, - 0x0b, 0x5b, 0xcd, 0xd6, 0xde, 0xb6, 0xbe, 0xd7, 0x6a, 0xef, 0x36, 0xb6, 0xf5, 0x6e, 0x67, 0xa3, - 0x5d, 0xd6, 0xd0, 0x65, 0x40, 0x9d, 0x0d, 0xdc, 0x68, 0xf7, 0x26, 0xe6, 0x13, 0xd5, 0x6f, 0x20, - 0x2d, 0x9f, 0x48, 0xf4, 0x00, 0x52, 0xe2, 0x91, 0xf4, 0xc3, 0x7b, 0x63, 0x1a, 0x07, 0xb1, 0x44, - 0xa0, 0x3a, 0x5c, 0x0c, 0x02, 0x23, 0x9f, 0xd9, 0x89, 0x70, 0x2e, 0xf8, 0x4b, 0x72, 0x13, 0x19, - 0x87, 0xda, 0x53, 0xc8, 0x05, 0x7d, 0x12, 0x5a, 0x82, 0x4b, 0xe2, 0x20, 0xfa, 0x6e, 0xab, 0xbd, - 0x7d, 0xca, 0x11, 0x80, 0x4c, 0xb7, 0x81, 0xf7, 0x1b, 0xb8, 0xac, 0x89, 0xf1, 0xd6, 0x5e, 0x4b, - 0x68, 0x36, 0x51, 0xbb, 0x0f, 0x19, 0xf5, 0x36, 0x23, 0x04, 0xa9, 0x81, 0x6d, 0xaa, 0xe4, 0x4c, - 0x63, 0x39, 0x46, 0x15, 0xc8, 0xfa, 0xea, 0xf0, 0x5f, 0xa4, 0xe0, 0xb3, 0xf6, 0xbb, 0x06, 0xa5, - 0xc9, 0xca, 0x8c, 0x5e, 0x42, 0x91, 0xc9, 0x8a, 0xa2, 0xab, 0xd2, 0x3e, 0x43, 0x2d, 0x6a, 0xce, - 0xe1, 0x82, 0xe2, 0x50, 0x94, 0xff, 0x86, 0xbc, 0x45, 0xb9, 0x1e, 0x3d, 0x15, 0xc9, 0xe6, 0x1c, - 0xce, 0x59, 0x94, 0xab, 0xe5, 0x6b, 0x00, 0x7d, 0xdb, 0x1e, 0xf9, 0xeb, 0x42, 0x4c, 0xb9, 0xe6, - 0x1c, 0xce, 0xf7, 0x83, 0x36, 0x01, 0x5d, 0x87, 0xa2, 0x69, 0x7b, 0xfd, 0x11, 0xf1, 0x4d, 0x84, - 0x54, 0x34, 0xb1, 0x89, 0x9a, 0x95, 0x46, 0x61, 0xa2, 0xd6, 0x7e, 0xcc, 0x00, 0x44, 0x5d, 0x17, - 0xea, 0x09, 0x7f, 0x44, 0xc7, 0x76, 0xe0, 0x1a, 0x63, 0xf9, 0xf0, 0x0b, 0x7f, 0xee, 0x4c, 0xd5, - 0xb2, 0xa9, 0xe1, 0x8e, 0x04, 0x62, 0xd5, 0xf8, 0xa9, 0x0f, 0xb4, 0x0a, 0x17, 0x63, 0x7d, 0xa0, - 0x7e, 0x68, 0xb0, 0x43, 0x3d, 0xac, 0x61, 0xe5, 0xa8, 0xd1, 0x6b, 0x1a, 0xec, 0xb0, 0x65, 0x56, - 0xff, 0x4c, 0xfa, 0x67, 0x92, 0x70, 0xf4, 0x12, 0xe6, 0x0f, 0x3c, 0x3a, 0x10, 0x89, 0xac, 0xcb, - 0x66, 0x7c, 0x96, 0x82, 0x5f, 0x0c, 0x28, 0xda, 0x82, 0xb2, 0x0f, 0x97, 0x6d, 0xd7, 0x1a, 0x5a, - 0xd4, 0x18, 0xe9, 0x93, 0xdc, 0x89, 0x19, 0xb8, 0x17, 0x03, 0xae, 0x9d, 0xf8, 0x1e, 0x2d, 0xc8, - 0x1f, 0x58, 0x23, 0xa2, 0x68, 0x93, 0x33, 0xd0, 0xe6, 0x04, 0x5c, 0x52, 0x5d, 0x83, 0xc2, 0xc8, - 0xa2, 0x44, 0xa7, 0xde, 0xb8, 0x4f, 0x5c, 0x19, 0xd1, 0x24, 0x06, 0x31, 0xd5, 0x96, 0x33, 0xe8, - 0x3a, 0xcc, 0x0f, 0xec, 0x91, 0x37, 0xa6, 0x81, 0x49, 0x5a, 0x9a, 0x14, 0xd5, 0xa4, 0x6f, 0xb4, - 0x09, 0x85, 0x91, 0x6d, 0x98, 0xfa, 0xd8, 0x36, 0xbd, 0x51, 0xf0, 0x3f, 0xc1, 0x59, 0x0d, 0xec, - 0x0b, 0x69, 0x88, 0x41, 0xa0, 0xd4, 0x18, 0x75, 0xa1, 0xc4, 0x6c, 0xcf, 0x1d, 0x10, 0xfd, 0x98, - 0xb8, 0x4c, 0xbc, 0xbe, 0xd9, 0x19, 0x3c, 0x9b, 0x57, 0x1c, 0xfb, 0x8a, 0xa2, 0xfa, 0xbd, 0x06, - 0x85, 0x98, 0x76, 0xd0, 0x0e, 0xa4, 0xa5, 0xfc, 0xa6, 0x69, 0x3b, 0xdf, 0xa6, 0x3e, 0xac, 0xe0, - 0xe8, 0x36, 0x2c, 0x06, 0x65, 0x45, 0xc9, 0x79, 0xa2, 0xae, 0x20, 0x7f, 0x4d, 0x6d, 0xaa, 0x0a, - 0xcb, 0x2f, 0x1a, 0x64, 0x7c, 0x4f, 0xb7, 0x21, 0xe3, 0x5f, 0xd4, 0x2c, 0x72, 0xf3, 0xb1, 0xe8, - 0x13, 0xc8, 0xf5, 0x3d, 0xd1, 0x9a, 0xfb, 0x72, 0xff, 0xa7, 0x3c, 0x59, 0x89, 0x6e, 0x99, 0xb5, - 0x2f, 0x61, 0xe1, 0x8d, 0xd5, 0xa8, 0x75, 0xd6, 0x62, 0xad, 0xb3, 0x70, 0x9b, 0x2b, 0x53, 0x62, - 0xea, 0xfd, 0x13, 0x4e, 0x26, 0xdd, 0x0e, 0xd7, 0x36, 0x4f, 0x38, 0x91, 0x6e, 0x6f, 0x3a, 0x70, - 0xd5, 0xb2, 0xdf, 0x7d, 0xae, 0x4d, 0xf5, 0x5f, 0x41, 0x47, 0x4c, 0x76, 0xb4, 0x2f, 0x36, 0x87, - 0x16, 0x3f, 0xf4, 0xfa, 0xf5, 0x81, 0x3d, 0x5e, 0x53, 0xf6, 0xab, 0x16, 0x65, 0xdc, 0xf5, 0xc6, - 0x84, 0xaa, 0xf7, 0x76, 0x2d, 0xa2, 0x5a, 0x55, 0xbf, 0x33, 0x0c, 0x09, 0x5d, 0x1d, 0x46, 0x3f, - 0x37, 0xf4, 0x33, 0x72, 0xfa, 0xee, 0xdf, 0x01, 0x00, 0x00, 0xff, 0xff, 0xb1, 0x5e, 0xa6, 0x6a, - 0x92, 0x10, 0x00, 0x00, -} diff --git a/vendor/github.com/census-instrumentation/opencensus-proto/gen-go/trace/v1/trace_config.pb.go b/vendor/github.com/census-instrumentation/opencensus-proto/gen-go/trace/v1/trace_config.pb.go deleted file mode 100644 index 929e8995bb..0000000000 --- a/vendor/github.com/census-instrumentation/opencensus-proto/gen-go/trace/v1/trace_config.pb.go +++ /dev/null @@ -1,406 +0,0 @@ -// Code generated by protoc-gen-go. DO NOT EDIT. -// source: opencensus/proto/trace/v1/trace_config.proto - -package v1 - -import ( - fmt "fmt" - proto "github.com/golang/protobuf/proto" - math "math" -) - -// Reference imports to suppress errors if they are not otherwise used. -var _ = proto.Marshal -var _ = fmt.Errorf -var _ = math.Inf - -// This is a compile-time assertion to ensure that this generated file -// is compatible with the proto package it is being compiled against. -// A compilation error at this line likely means your copy of the -// proto package needs to be updated. -const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package - -// Global configuration of the trace service. -type TraceConfig struct { - // The global default sampler used to make decisions on span sampling. - // - // Types that are valid to be assigned to Sampler: - // *TraceConfig_ProbabilitySampler - // *TraceConfig_ConstantSampler - // *TraceConfig_RateLimitingSampler - Sampler isTraceConfig_Sampler `protobuf_oneof:"sampler"` - // The global default max number of attributes per span. - MaxNumberOfAttributes int64 `protobuf:"varint,4,opt,name=max_number_of_attributes,json=maxNumberOfAttributes,proto3" json:"max_number_of_attributes,omitempty"` - // The global default max number of annotation events per span. - MaxNumberOfAnnotations int64 `protobuf:"varint,5,opt,name=max_number_of_annotations,json=maxNumberOfAnnotations,proto3" json:"max_number_of_annotations,omitempty"` - // The global default max number of message events per span. - MaxNumberOfMessageEvents int64 `protobuf:"varint,6,opt,name=max_number_of_message_events,json=maxNumberOfMessageEvents,proto3" json:"max_number_of_message_events,omitempty"` - // The global default max number of link entries per span. - MaxNumberOfLinks int64 `protobuf:"varint,7,opt,name=max_number_of_links,json=maxNumberOfLinks,proto3" json:"max_number_of_links,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *TraceConfig) Reset() { *m = TraceConfig{} } -func (m *TraceConfig) String() string { return proto.CompactTextString(m) } -func (*TraceConfig) ProtoMessage() {} -func (*TraceConfig) Descriptor() ([]byte, []int) { - return fileDescriptor_5359209b41ff50c5, []int{0} -} - -func (m *TraceConfig) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_TraceConfig.Unmarshal(m, b) -} -func (m *TraceConfig) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_TraceConfig.Marshal(b, m, deterministic) -} -func (m *TraceConfig) XXX_Merge(src proto.Message) { - xxx_messageInfo_TraceConfig.Merge(m, src) -} -func (m *TraceConfig) XXX_Size() int { - return xxx_messageInfo_TraceConfig.Size(m) -} -func (m *TraceConfig) XXX_DiscardUnknown() { - xxx_messageInfo_TraceConfig.DiscardUnknown(m) -} - -var xxx_messageInfo_TraceConfig proto.InternalMessageInfo - -type isTraceConfig_Sampler interface { - isTraceConfig_Sampler() -} - -type TraceConfig_ProbabilitySampler struct { - ProbabilitySampler *ProbabilitySampler `protobuf:"bytes,1,opt,name=probability_sampler,json=probabilitySampler,proto3,oneof"` -} - -type TraceConfig_ConstantSampler struct { - ConstantSampler *ConstantSampler `protobuf:"bytes,2,opt,name=constant_sampler,json=constantSampler,proto3,oneof"` -} - -type TraceConfig_RateLimitingSampler struct { - RateLimitingSampler *RateLimitingSampler `protobuf:"bytes,3,opt,name=rate_limiting_sampler,json=rateLimitingSampler,proto3,oneof"` -} - -func (*TraceConfig_ProbabilitySampler) isTraceConfig_Sampler() {} - -func (*TraceConfig_ConstantSampler) isTraceConfig_Sampler() {} - -func (*TraceConfig_RateLimitingSampler) isTraceConfig_Sampler() {} - -func (m *TraceConfig) GetSampler() isTraceConfig_Sampler { - if m != nil { - return m.Sampler - } - return nil -} - -func (m *TraceConfig) GetProbabilitySampler() *ProbabilitySampler { - if x, ok := m.GetSampler().(*TraceConfig_ProbabilitySampler); ok { - return x.ProbabilitySampler - } - return nil -} - -func (m *TraceConfig) GetConstantSampler() *ConstantSampler { - if x, ok := m.GetSampler().(*TraceConfig_ConstantSampler); ok { - return x.ConstantSampler - } - return nil -} - -func (m *TraceConfig) GetRateLimitingSampler() *RateLimitingSampler { - if x, ok := m.GetSampler().(*TraceConfig_RateLimitingSampler); ok { - return x.RateLimitingSampler - } - return nil -} - -func (m *TraceConfig) GetMaxNumberOfAttributes() int64 { - if m != nil { - return m.MaxNumberOfAttributes - } - return 0 -} - -func (m *TraceConfig) GetMaxNumberOfAnnotations() int64 { - if m != nil { - return m.MaxNumberOfAnnotations - } - return 0 -} - -func (m *TraceConfig) GetMaxNumberOfMessageEvents() int64 { - if m != nil { - return m.MaxNumberOfMessageEvents - } - return 0 -} - -func (m *TraceConfig) GetMaxNumberOfLinks() int64 { - if m != nil { - return m.MaxNumberOfLinks - } - return 0 -} - -// XXX_OneofFuncs is for the internal use of the proto package. -func (*TraceConfig) XXX_OneofFuncs() (func(msg proto.Message, b *proto.Buffer) error, func(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error), func(msg proto.Message) (n int), []interface{}) { - return _TraceConfig_OneofMarshaler, _TraceConfig_OneofUnmarshaler, _TraceConfig_OneofSizer, []interface{}{ - (*TraceConfig_ProbabilitySampler)(nil), - (*TraceConfig_ConstantSampler)(nil), - (*TraceConfig_RateLimitingSampler)(nil), - } -} - -func _TraceConfig_OneofMarshaler(msg proto.Message, b *proto.Buffer) error { - m := msg.(*TraceConfig) - // sampler - switch x := m.Sampler.(type) { - case *TraceConfig_ProbabilitySampler: - b.EncodeVarint(1<<3 | proto.WireBytes) - if err := b.EncodeMessage(x.ProbabilitySampler); err != nil { - return err - } - case *TraceConfig_ConstantSampler: - b.EncodeVarint(2<<3 | proto.WireBytes) - if err := b.EncodeMessage(x.ConstantSampler); err != nil { - return err - } - case *TraceConfig_RateLimitingSampler: - b.EncodeVarint(3<<3 | proto.WireBytes) - if err := b.EncodeMessage(x.RateLimitingSampler); err != nil { - return err - } - case nil: - default: - return fmt.Errorf("TraceConfig.Sampler has unexpected type %T", x) - } - return nil -} - -func _TraceConfig_OneofUnmarshaler(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error) { - m := msg.(*TraceConfig) - switch tag { - case 1: // sampler.probability_sampler - if wire != proto.WireBytes { - return true, proto.ErrInternalBadWireType - } - msg := new(ProbabilitySampler) - err := b.DecodeMessage(msg) - m.Sampler = &TraceConfig_ProbabilitySampler{msg} - return true, err - case 2: // sampler.constant_sampler - if wire != proto.WireBytes { - return true, proto.ErrInternalBadWireType - } - msg := new(ConstantSampler) - err := b.DecodeMessage(msg) - m.Sampler = &TraceConfig_ConstantSampler{msg} - return true, err - case 3: // sampler.rate_limiting_sampler - if wire != proto.WireBytes { - return true, proto.ErrInternalBadWireType - } - msg := new(RateLimitingSampler) - err := b.DecodeMessage(msg) - m.Sampler = &TraceConfig_RateLimitingSampler{msg} - return true, err - default: - return false, nil - } -} - -func _TraceConfig_OneofSizer(msg proto.Message) (n int) { - m := msg.(*TraceConfig) - // sampler - switch x := m.Sampler.(type) { - case *TraceConfig_ProbabilitySampler: - s := proto.Size(x.ProbabilitySampler) - n += 1 // tag and wire - n += proto.SizeVarint(uint64(s)) - n += s - case *TraceConfig_ConstantSampler: - s := proto.Size(x.ConstantSampler) - n += 1 // tag and wire - n += proto.SizeVarint(uint64(s)) - n += s - case *TraceConfig_RateLimitingSampler: - s := proto.Size(x.RateLimitingSampler) - n += 1 // tag and wire - n += proto.SizeVarint(uint64(s)) - n += s - case nil: - default: - panic(fmt.Sprintf("proto: unexpected type %T in oneof", x)) - } - return n -} - -// Sampler that tries to uniformly sample traces with a given probability. -// The probability of sampling a trace is equal to that of the specified probability. -type ProbabilitySampler struct { - // The desired probability of sampling. Must be within [0.0, 1.0]. - SamplingProbability float64 `protobuf:"fixed64,1,opt,name=samplingProbability,proto3" json:"samplingProbability,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *ProbabilitySampler) Reset() { *m = ProbabilitySampler{} } -func (m *ProbabilitySampler) String() string { return proto.CompactTextString(m) } -func (*ProbabilitySampler) ProtoMessage() {} -func (*ProbabilitySampler) Descriptor() ([]byte, []int) { - return fileDescriptor_5359209b41ff50c5, []int{1} -} - -func (m *ProbabilitySampler) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_ProbabilitySampler.Unmarshal(m, b) -} -func (m *ProbabilitySampler) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_ProbabilitySampler.Marshal(b, m, deterministic) -} -func (m *ProbabilitySampler) XXX_Merge(src proto.Message) { - xxx_messageInfo_ProbabilitySampler.Merge(m, src) -} -func (m *ProbabilitySampler) XXX_Size() int { - return xxx_messageInfo_ProbabilitySampler.Size(m) -} -func (m *ProbabilitySampler) XXX_DiscardUnknown() { - xxx_messageInfo_ProbabilitySampler.DiscardUnknown(m) -} - -var xxx_messageInfo_ProbabilitySampler proto.InternalMessageInfo - -func (m *ProbabilitySampler) GetSamplingProbability() float64 { - if m != nil { - return m.SamplingProbability - } - return 0 -} - -// Sampler that makes a constant decision (either always "yes" or always "no") -// on span sampling. -type ConstantSampler struct { - // Whether spans should be always sampled, or never sampled. - Decision bool `protobuf:"varint,1,opt,name=decision,proto3" json:"decision,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *ConstantSampler) Reset() { *m = ConstantSampler{} } -func (m *ConstantSampler) String() string { return proto.CompactTextString(m) } -func (*ConstantSampler) ProtoMessage() {} -func (*ConstantSampler) Descriptor() ([]byte, []int) { - return fileDescriptor_5359209b41ff50c5, []int{2} -} - -func (m *ConstantSampler) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_ConstantSampler.Unmarshal(m, b) -} -func (m *ConstantSampler) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_ConstantSampler.Marshal(b, m, deterministic) -} -func (m *ConstantSampler) XXX_Merge(src proto.Message) { - xxx_messageInfo_ConstantSampler.Merge(m, src) -} -func (m *ConstantSampler) XXX_Size() int { - return xxx_messageInfo_ConstantSampler.Size(m) -} -func (m *ConstantSampler) XXX_DiscardUnknown() { - xxx_messageInfo_ConstantSampler.DiscardUnknown(m) -} - -var xxx_messageInfo_ConstantSampler proto.InternalMessageInfo - -func (m *ConstantSampler) GetDecision() bool { - if m != nil { - return m.Decision - } - return false -} - -// Sampler that tries to sample with a rate per time window. -type RateLimitingSampler struct { - // Rate per second. - Qps int64 `protobuf:"varint,1,opt,name=qps,proto3" json:"qps,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *RateLimitingSampler) Reset() { *m = RateLimitingSampler{} } -func (m *RateLimitingSampler) String() string { return proto.CompactTextString(m) } -func (*RateLimitingSampler) ProtoMessage() {} -func (*RateLimitingSampler) Descriptor() ([]byte, []int) { - return fileDescriptor_5359209b41ff50c5, []int{3} -} - -func (m *RateLimitingSampler) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_RateLimitingSampler.Unmarshal(m, b) -} -func (m *RateLimitingSampler) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_RateLimitingSampler.Marshal(b, m, deterministic) -} -func (m *RateLimitingSampler) XXX_Merge(src proto.Message) { - xxx_messageInfo_RateLimitingSampler.Merge(m, src) -} -func (m *RateLimitingSampler) XXX_Size() int { - return xxx_messageInfo_RateLimitingSampler.Size(m) -} -func (m *RateLimitingSampler) XXX_DiscardUnknown() { - xxx_messageInfo_RateLimitingSampler.DiscardUnknown(m) -} - -var xxx_messageInfo_RateLimitingSampler proto.InternalMessageInfo - -func (m *RateLimitingSampler) GetQps() int64 { - if m != nil { - return m.Qps - } - return 0 -} - -func init() { - proto.RegisterType((*TraceConfig)(nil), "opencensus.proto.trace.v1.TraceConfig") - proto.RegisterType((*ProbabilitySampler)(nil), "opencensus.proto.trace.v1.ProbabilitySampler") - proto.RegisterType((*ConstantSampler)(nil), "opencensus.proto.trace.v1.ConstantSampler") - proto.RegisterType((*RateLimitingSampler)(nil), "opencensus.proto.trace.v1.RateLimitingSampler") -} - -func init() { - proto.RegisterFile("opencensus/proto/trace/v1/trace_config.proto", fileDescriptor_5359209b41ff50c5) -} - -var fileDescriptor_5359209b41ff50c5 = []byte{ - // 431 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x7c, 0x93, 0x41, 0x6f, 0xd4, 0x30, - 0x10, 0x85, 0x09, 0x5b, 0xda, 0x32, 0x3d, 0x74, 0xe5, 0xa8, 0x28, 0x45, 0x3d, 0x54, 0x7b, 0xa1, - 0x42, 0x24, 0xa1, 0x70, 0x40, 0x5c, 0x90, 0xd8, 0x0a, 0xc4, 0xa1, 0xc0, 0x2a, 0x20, 0x21, 0x71, - 0x09, 0x4e, 0xea, 0x0d, 0x16, 0xf1, 0x38, 0xd8, 0x93, 0xa8, 0xfc, 0x17, 0x7e, 0x6c, 0x15, 0x67, - 0x95, 0x64, 0xbb, 0xed, 0xde, 0xec, 0xf7, 0xde, 0xf7, 0x2c, 0x79, 0x6c, 0x78, 0xa1, 0x2b, 0x81, - 0xb9, 0x40, 0x5b, 0xdb, 0xb8, 0x32, 0x9a, 0x74, 0x4c, 0x86, 0xe7, 0x22, 0x6e, 0xce, 0xbb, 0x45, - 0x9a, 0x6b, 0x5c, 0xca, 0x22, 0x72, 0x1e, 0x3b, 0x1e, 0xd2, 0x9d, 0x12, 0xb9, 0x50, 0xd4, 0x9c, - 0xcf, 0xfe, 0xef, 0xc0, 0xc1, 0xf7, 0x76, 0x73, 0xe1, 0x00, 0xf6, 0x0b, 0xfc, 0xca, 0xe8, 0x8c, - 0x67, 0xb2, 0x94, 0xf4, 0x2f, 0xb5, 0x5c, 0x55, 0xa5, 0x30, 0x81, 0x77, 0xea, 0x9d, 0x1d, 0xbc, - 0x0a, 0xa3, 0x7b, 0x8b, 0xa2, 0xc5, 0x40, 0x7d, 0xeb, 0xa0, 0x4f, 0x0f, 0x12, 0x56, 0x6d, 0xa8, - 0xec, 0x07, 0x4c, 0x73, 0x8d, 0x96, 0x38, 0x52, 0x5f, 0xff, 0xd0, 0xd5, 0x3f, 0xdf, 0x52, 0x7f, - 0xb1, 0x42, 0x86, 0xee, 0xc3, 0x7c, 0x5d, 0x62, 0x57, 0x70, 0x64, 0x38, 0x89, 0xb4, 0x94, 0x4a, - 0x92, 0xc4, 0xa2, 0x6f, 0x9f, 0xb8, 0xf6, 0x68, 0x4b, 0x7b, 0xc2, 0x49, 0x5c, 0xae, 0xb0, 0xe1, - 0x04, 0xdf, 0x6c, 0xca, 0xec, 0x0d, 0x04, 0x8a, 0x5f, 0xa7, 0x58, 0xab, 0x4c, 0x98, 0x54, 0x2f, - 0x53, 0x4e, 0x64, 0x64, 0x56, 0x93, 0xb0, 0xc1, 0xce, 0xa9, 0x77, 0x36, 0x49, 0x8e, 0x14, 0xbf, - 0xfe, 0xe2, 0xec, 0xaf, 0xcb, 0xf7, 0xbd, 0xc9, 0xde, 0xc2, 0xf1, 0x2d, 0x10, 0x51, 0x13, 0x27, - 0xa9, 0xd1, 0x06, 0x8f, 0x1c, 0xf9, 0x64, 0x4c, 0x0e, 0x2e, 0x7b, 0x07, 0x27, 0xeb, 0xa8, 0x12, - 0xd6, 0xf2, 0x42, 0xa4, 0xa2, 0x11, 0x48, 0x36, 0xd8, 0x75, 0x74, 0x30, 0xa2, 0x3f, 0x77, 0x81, - 0x0f, 0xce, 0x67, 0x21, 0xf8, 0xeb, 0x7c, 0x29, 0xf1, 0x8f, 0x0d, 0xf6, 0x1c, 0x36, 0x1d, 0x61, - 0x97, 0xad, 0x3e, 0x7f, 0x0c, 0x7b, 0xab, 0xab, 0x9b, 0x7d, 0x04, 0xb6, 0x39, 0x58, 0xf6, 0x12, - 0x7c, 0x17, 0x90, 0x58, 0x8c, 0x5c, 0xf7, 0x48, 0xbc, 0xe4, 0x2e, 0x6b, 0x16, 0xc2, 0xe1, 0xad, - 0x09, 0xb2, 0xa7, 0xb0, 0x7f, 0x25, 0x72, 0x69, 0xa5, 0x46, 0x47, 0xee, 0x27, 0xfd, 0x7e, 0xf6, - 0x0c, 0xfc, 0x3b, 0x46, 0xc2, 0xa6, 0x30, 0xf9, 0x5b, 0x59, 0x97, 0x9e, 0x24, 0xed, 0x72, 0xde, - 0xc0, 0x89, 0xd4, 0xf7, 0x0f, 0x76, 0x3e, 0x1d, 0xbd, 0xed, 0x45, 0x6b, 0x2d, 0xbc, 0x9f, 0xf3, - 0x42, 0xd2, 0xef, 0x3a, 0x8b, 0x72, 0xad, 0xe2, 0x8e, 0x0a, 0x25, 0x5a, 0x32, 0xb5, 0x12, 0xd8, - 0xdd, 0x78, 0x3c, 0x14, 0x86, 0xdd, 0xef, 0x2a, 0x04, 0x86, 0xc5, 0xf0, 0xc9, 0xb2, 0x5d, 0x27, - 0xbf, 0xbe, 0x09, 0x00, 0x00, 0xff, 0xff, 0x1e, 0x98, 0xa1, 0x9e, 0x88, 0x03, 0x00, 0x00, -} diff --git a/vendor/github.com/coreos/go-semver/NOTICE b/vendor/github.com/coreos/go-semver/NOTICE new file mode 100644 index 0000000000..23a0ada2fb --- /dev/null +++ b/vendor/github.com/coreos/go-semver/NOTICE @@ -0,0 +1,5 @@ +CoreOS Project +Copyright 2018 CoreOS, Inc + +This product includes software developed at CoreOS, Inc. +(http://www.coreos.com/). diff --git a/vendor/github.com/coreos/go-semver/semver/semver.go b/vendor/github.com/coreos/go-semver/semver/semver.go index 110fc23e15..76cf4852c7 100644 --- a/vendor/github.com/coreos/go-semver/semver/semver.go +++ b/vendor/github.com/coreos/go-semver/semver/semver.go @@ -19,6 +19,7 @@ import ( "bytes" "errors" "fmt" + "regexp" "strconv" "strings" ) @@ -76,6 +77,14 @@ func (v *Version) Set(version string) error { return fmt.Errorf("%s is not in dotted-tri format", version) } + if err := validateIdentifier(string(preRelease)); err != nil { + return fmt.Errorf("failed to validate pre-release: %v", err) + } + + if err := validateIdentifier(metadata); err != nil { + return fmt.Errorf("failed to validate metadata: %v", err) + } + parsed := make([]int64, 3, 3) for i, v := range dotParts[:3] { @@ -224,6 +233,13 @@ func recursivePreReleaseCompare(versionA []string, versionB []string) int { bInt = true } + // Numeric identifiers always have lower precedence than non-numeric identifiers. + if aInt && !bInt { + return -1 + } else if !aInt && bInt { + return 1 + } + // Handle Integer Comparison if aInt && bInt { if aI > bI { @@ -266,3 +282,15 @@ func (v *Version) BumpPatch() { v.PreRelease = PreRelease("") v.Metadata = "" } + +// validateIdentifier makes sure the provided identifier satisfies semver spec +func validateIdentifier(id string) error { + if id != "" && !reIdentifier.MatchString(id) { + return fmt.Errorf("%s is not a valid semver identifier", id) + } + return nil +} + +// reIdentifier is a regular expression used to check that pre-release and metadata +// identifiers satisfy the spec requirements +var reIdentifier = regexp.MustCompile(`^[0-9A-Za-z-]+(\.[0-9A-Za-z-]+)*$`) diff --git a/vendor/github.com/coreos/go-systemd/unit/deserialize.go b/vendor/github.com/coreos/go-systemd/unit/deserialize.go index bf97280521..c0c06bdfc2 100644 --- a/vendor/github.com/coreos/go-systemd/unit/deserialize.go +++ b/vendor/github.com/coreos/go-systemd/unit/deserialize.go @@ -88,7 +88,7 @@ func (l *lexer) lex() { l.errchan <- err return } - if bytes.IndexAny(line, SYSTEMD_NEWLINE) == -1 { + if !bytes.ContainsAny(line, SYSTEMD_NEWLINE) { l.errchan <- ErrLineTooLong return } diff --git a/vendor/github.com/dgrijalva/jwt-go/.gitignore b/vendor/github.com/dgrijalva/jwt-go/.gitignore deleted file mode 100644 index 80bed650ec..0000000000 --- a/vendor/github.com/dgrijalva/jwt-go/.gitignore +++ /dev/null @@ -1,4 +0,0 @@ -.DS_Store -bin - - diff --git a/vendor/github.com/dgrijalva/jwt-go/.travis.yml b/vendor/github.com/dgrijalva/jwt-go/.travis.yml deleted file mode 100644 index 1027f56cd9..0000000000 --- a/vendor/github.com/dgrijalva/jwt-go/.travis.yml +++ /dev/null @@ -1,13 +0,0 @@ -language: go - -script: - - go vet ./... - - go test -v ./... - -go: - - 1.3 - - 1.4 - - 1.5 - - 1.6 - - 1.7 - - tip diff --git a/vendor/github.com/dgrijalva/jwt-go/MIGRATION_GUIDE.md b/vendor/github.com/dgrijalva/jwt-go/MIGRATION_GUIDE.md deleted file mode 100644 index 7fc1f793cb..0000000000 --- a/vendor/github.com/dgrijalva/jwt-go/MIGRATION_GUIDE.md +++ /dev/null @@ -1,97 +0,0 @@ -## Migration Guide from v2 -> v3 - -Version 3 adds several new, frequently requested features. To do so, it introduces a few breaking changes. We've worked to keep these as minimal as possible. This guide explains the breaking changes and how you can quickly update your code. - -### `Token.Claims` is now an interface type - -The most requested feature from the 2.0 verison of this library was the ability to provide a custom type to the JSON parser for claims. This was implemented by introducing a new interface, `Claims`, to replace `map[string]interface{}`. We also included two concrete implementations of `Claims`: `MapClaims` and `StandardClaims`. - -`MapClaims` is an alias for `map[string]interface{}` with built in validation behavior. It is the default claims type when using `Parse`. The usage is unchanged except you must type cast the claims property. - -The old example for parsing a token looked like this.. - -```go - if token, err := jwt.Parse(tokenString, keyLookupFunc); err == nil { - fmt.Printf("Token for user %v expires %v", token.Claims["user"], token.Claims["exp"]) - } -``` - -is now directly mapped to... - -```go - if token, err := jwt.Parse(tokenString, keyLookupFunc); err == nil { - claims := token.Claims.(jwt.MapClaims) - fmt.Printf("Token for user %v expires %v", claims["user"], claims["exp"]) - } -``` - -`StandardClaims` is designed to be embedded in your custom type. You can supply a custom claims type with the new `ParseWithClaims` function. Here's an example of using a custom claims type. - -```go - type MyCustomClaims struct { - User string - *StandardClaims - } - - if token, err := jwt.ParseWithClaims(tokenString, &MyCustomClaims{}, keyLookupFunc); err == nil { - claims := token.Claims.(*MyCustomClaims) - fmt.Printf("Token for user %v expires %v", claims.User, claims.StandardClaims.ExpiresAt) - } -``` - -### `ParseFromRequest` has been moved - -To keep this library focused on the tokens without becoming overburdened with complex request processing logic, `ParseFromRequest` and its new companion `ParseFromRequestWithClaims` have been moved to a subpackage, `request`. The method signatues have also been augmented to receive a new argument: `Extractor`. - -`Extractors` do the work of picking the token string out of a request. The interface is simple and composable. - -This simple parsing example: - -```go - if token, err := jwt.ParseFromRequest(tokenString, req, keyLookupFunc); err == nil { - fmt.Printf("Token for user %v expires %v", token.Claims["user"], token.Claims["exp"]) - } -``` - -is directly mapped to: - -```go - if token, err := request.ParseFromRequest(req, request.OAuth2Extractor, keyLookupFunc); err == nil { - claims := token.Claims.(jwt.MapClaims) - fmt.Printf("Token for user %v expires %v", claims["user"], claims["exp"]) - } -``` - -There are several concrete `Extractor` types provided for your convenience: - -* `HeaderExtractor` will search a list of headers until one contains content. -* `ArgumentExtractor` will search a list of keys in request query and form arguments until one contains content. -* `MultiExtractor` will try a list of `Extractors` in order until one returns content. -* `AuthorizationHeaderExtractor` will look in the `Authorization` header for a `Bearer` token. -* `OAuth2Extractor` searches the places an OAuth2 token would be specified (per the spec): `Authorization` header and `access_token` argument -* `PostExtractionFilter` wraps an `Extractor`, allowing you to process the content before it's parsed. A simple example is stripping the `Bearer ` text from a header - - -### RSA signing methods no longer accept `[]byte` keys - -Due to a [critical vulnerability](https://auth0.com/blog/2015/03/31/critical-vulnerabilities-in-json-web-token-libraries/), we've decided the convenience of accepting `[]byte` instead of `rsa.PublicKey` or `rsa.PrivateKey` isn't worth the risk of misuse. - -To replace this behavior, we've added two helper methods: `ParseRSAPrivateKeyFromPEM(key []byte) (*rsa.PrivateKey, error)` and `ParseRSAPublicKeyFromPEM(key []byte) (*rsa.PublicKey, error)`. These are just simple helpers for unpacking PEM encoded PKCS1 and PKCS8 keys. If your keys are encoded any other way, all you need to do is convert them to the `crypto/rsa` package's types. - -```go - func keyLookupFunc(*Token) (interface{}, error) { - // Don't forget to validate the alg is what you expect: - if _, ok := token.Method.(*jwt.SigningMethodRSA); !ok { - return nil, fmt.Errorf("Unexpected signing method: %v", token.Header["alg"]) - } - - // Look up key - key, err := lookupPublicKey(token.Header["kid"]) - if err != nil { - return nil, err - } - - // Unpack key from PEM encoded PKCS8 - return jwt.ParseRSAPublicKeyFromPEM(key) - } -``` diff --git a/vendor/github.com/dgrijalva/jwt-go/README.md b/vendor/github.com/dgrijalva/jwt-go/README.md deleted file mode 100644 index d358d881b8..0000000000 --- a/vendor/github.com/dgrijalva/jwt-go/README.md +++ /dev/null @@ -1,100 +0,0 @@ -# jwt-go - -[![Build Status](https://travis-ci.org/dgrijalva/jwt-go.svg?branch=master)](https://travis-ci.org/dgrijalva/jwt-go) -[![GoDoc](https://godoc.org/github.com/dgrijalva/jwt-go?status.svg)](https://godoc.org/github.com/dgrijalva/jwt-go) - -A [go](http://www.golang.org) (or 'golang' for search engine friendliness) implementation of [JSON Web Tokens](http://self-issued.info/docs/draft-ietf-oauth-json-web-token.html) - -**NEW VERSION COMING:** There have been a lot of improvements suggested since the version 3.0.0 released in 2016. I'm working now on cutting two different releases: 3.2.0 will contain any non-breaking changes or enhancements. 4.0.0 will follow shortly which will include breaking changes. See the 4.0.0 milestone to get an idea of what's coming. If you have other ideas, or would like to participate in 4.0.0, now's the time. If you depend on this library and don't want to be interrupted, I recommend you use your dependency mangement tool to pin to version 3. - -**SECURITY NOTICE:** Some older versions of Go have a security issue in the cryotp/elliptic. Recommendation is to upgrade to at least 1.8.3. See issue #216 for more detail. - -**SECURITY NOTICE:** It's important that you [validate the `alg` presented is what you expect](https://auth0.com/blog/2015/03/31/critical-vulnerabilities-in-json-web-token-libraries/). This library attempts to make it easy to do the right thing by requiring key types match the expected alg, but you should take the extra step to verify it in your usage. See the examples provided. - -## What the heck is a JWT? - -JWT.io has [a great introduction](https://jwt.io/introduction) to JSON Web Tokens. - -In short, it's a signed JSON object that does something useful (for example, authentication). It's commonly used for `Bearer` tokens in Oauth 2. A token is made of three parts, separated by `.`'s. The first two parts are JSON objects, that have been [base64url](http://tools.ietf.org/html/rfc4648) encoded. The last part is the signature, encoded the same way. - -The first part is called the header. It contains the necessary information for verifying the last part, the signature. For example, which encryption method was used for signing and what key was used. - -The part in the middle is the interesting bit. It's called the Claims and contains the actual stuff you care about. Refer to [the RFC](http://self-issued.info/docs/draft-jones-json-web-token.html) for information about reserved keys and the proper way to add your own. - -## What's in the box? - -This library supports the parsing and verification as well as the generation and signing of JWTs. Current supported signing algorithms are HMAC SHA, RSA, RSA-PSS, and ECDSA, though hooks are present for adding your own. - -## Examples - -See [the project documentation](https://godoc.org/github.com/dgrijalva/jwt-go) for examples of usage: - -* [Simple example of parsing and validating a token](https://godoc.org/github.com/dgrijalva/jwt-go#example-Parse--Hmac) -* [Simple example of building and signing a token](https://godoc.org/github.com/dgrijalva/jwt-go#example-New--Hmac) -* [Directory of Examples](https://godoc.org/github.com/dgrijalva/jwt-go#pkg-examples) - -## Extensions - -This library publishes all the necessary components for adding your own signing methods. Simply implement the `SigningMethod` interface and register a factory method using `RegisterSigningMethod`. - -Here's an example of an extension that integrates with the Google App Engine signing tools: https://github.com/someone1/gcp-jwt-go - -## Compliance - -This library was last reviewed to comply with [RTF 7519](http://www.rfc-editor.org/info/rfc7519) dated May 2015 with a few notable differences: - -* In order to protect against accidental use of [Unsecured JWTs](http://self-issued.info/docs/draft-ietf-oauth-json-web-token.html#UnsecuredJWT), tokens using `alg=none` will only be accepted if the constant `jwt.UnsafeAllowNoneSignatureType` is provided as the key. - -## Project Status & Versioning - -This library is considered production ready. Feedback and feature requests are appreciated. The API should be considered stable. There should be very few backwards-incompatible changes outside of major version updates (and only with good reason). - -This project uses [Semantic Versioning 2.0.0](http://semver.org). Accepted pull requests will land on `master`. Periodically, versions will be tagged from `master`. You can find all the releases on [the project releases page](https://github.com/dgrijalva/jwt-go/releases). - -While we try to make it obvious when we make breaking changes, there isn't a great mechanism for pushing announcements out to users. You may want to use this alternative package include: `gopkg.in/dgrijalva/jwt-go.v3`. It will do the right thing WRT semantic versioning. - -**BREAKING CHANGES:*** -* Version 3.0.0 includes _a lot_ of changes from the 2.x line, including a few that break the API. We've tried to break as few things as possible, so there should just be a few type signature changes. A full list of breaking changes is available in `VERSION_HISTORY.md`. See `MIGRATION_GUIDE.md` for more information on updating your code. - -## Usage Tips - -### Signing vs Encryption - -A token is simply a JSON object that is signed by its author. this tells you exactly two things about the data: - -* The author of the token was in the possession of the signing secret -* The data has not been modified since it was signed - -It's important to know that JWT does not provide encryption, which means anyone who has access to the token can read its contents. If you need to protect (encrypt) the data, there is a companion spec, `JWE`, that provides this functionality. JWE is currently outside the scope of this library. - -### Choosing a Signing Method - -There are several signing methods available, and you should probably take the time to learn about the various options before choosing one. The principal design decision is most likely going to be symmetric vs asymmetric. - -Symmetric signing methods, such as HSA, use only a single secret. This is probably the simplest signing method to use since any `[]byte` can be used as a valid secret. They are also slightly computationally faster to use, though this rarely is enough to matter. Symmetric signing methods work the best when both producers and consumers of tokens are trusted, or even the same system. Since the same secret is used to both sign and validate tokens, you can't easily distribute the key for validation. - -Asymmetric signing methods, such as RSA, use different keys for signing and verifying tokens. This makes it possible to produce tokens with a private key, and allow any consumer to access the public key for verification. - -### Signing Methods and Key Types - -Each signing method expects a different object type for its signing keys. See the package documentation for details. Here are the most common ones: - -* The [HMAC signing method](https://godoc.org/github.com/dgrijalva/jwt-go#SigningMethodHMAC) (`HS256`,`HS384`,`HS512`) expect `[]byte` values for signing and validation -* The [RSA signing method](https://godoc.org/github.com/dgrijalva/jwt-go#SigningMethodRSA) (`RS256`,`RS384`,`RS512`) expect `*rsa.PrivateKey` for signing and `*rsa.PublicKey` for validation -* The [ECDSA signing method](https://godoc.org/github.com/dgrijalva/jwt-go#SigningMethodECDSA) (`ES256`,`ES384`,`ES512`) expect `*ecdsa.PrivateKey` for signing and `*ecdsa.PublicKey` for validation - -### JWT and OAuth - -It's worth mentioning that OAuth and JWT are not the same thing. A JWT token is simply a signed JSON object. It can be used anywhere such a thing is useful. There is some confusion, though, as JWT is the most common type of bearer token used in OAuth2 authentication. - -Without going too far down the rabbit hole, here's a description of the interaction of these technologies: - -* OAuth is a protocol for allowing an identity provider to be separate from the service a user is logging in to. For example, whenever you use Facebook to log into a different service (Yelp, Spotify, etc), you are using OAuth. -* OAuth defines several options for passing around authentication data. One popular method is called a "bearer token". A bearer token is simply a string that _should_ only be held by an authenticated user. Thus, simply presenting this token proves your identity. You can probably derive from here why a JWT might make a good bearer token. -* Because bearer tokens are used for authentication, it's important they're kept secret. This is why transactions that use bearer tokens typically happen over SSL. - -## More - -Documentation can be found [on godoc.org](http://godoc.org/github.com/dgrijalva/jwt-go). - -The command line utility included in this project (cmd/jwt) provides a straightforward example of token creation and parsing as well as a useful tool for debugging your own integration. You'll also find several implementation examples in the documentation. diff --git a/vendor/github.com/dgrijalva/jwt-go/VERSION_HISTORY.md b/vendor/github.com/dgrijalva/jwt-go/VERSION_HISTORY.md deleted file mode 100644 index 6370298313..0000000000 --- a/vendor/github.com/dgrijalva/jwt-go/VERSION_HISTORY.md +++ /dev/null @@ -1,118 +0,0 @@ -## `jwt-go` Version History - -#### 3.2.0 - -* Added method `ParseUnverified` to allow users to split up the tasks of parsing and validation -* HMAC signing method returns `ErrInvalidKeyType` instead of `ErrInvalidKey` where appropriate -* Added options to `request.ParseFromRequest`, which allows for an arbitrary list of modifiers to parsing behavior. Initial set include `WithClaims` and `WithParser`. Existing usage of this function will continue to work as before. -* Deprecated `ParseFromRequestWithClaims` to simplify API in the future. - -#### 3.1.0 - -* Improvements to `jwt` command line tool -* Added `SkipClaimsValidation` option to `Parser` -* Documentation updates - -#### 3.0.0 - -* **Compatibility Breaking Changes**: See MIGRATION_GUIDE.md for tips on updating your code - * Dropped support for `[]byte` keys when using RSA signing methods. This convenience feature could contribute to security vulnerabilities involving mismatched key types with signing methods. - * `ParseFromRequest` has been moved to `request` subpackage and usage has changed - * The `Claims` property on `Token` is now type `Claims` instead of `map[string]interface{}`. The default value is type `MapClaims`, which is an alias to `map[string]interface{}`. This makes it possible to use a custom type when decoding claims. -* Other Additions and Changes - * Added `Claims` interface type to allow users to decode the claims into a custom type - * Added `ParseWithClaims`, which takes a third argument of type `Claims`. Use this function instead of `Parse` if you have a custom type you'd like to decode into. - * Dramatically improved the functionality and flexibility of `ParseFromRequest`, which is now in the `request` subpackage - * Added `ParseFromRequestWithClaims` which is the `FromRequest` equivalent of `ParseWithClaims` - * Added new interface type `Extractor`, which is used for extracting JWT strings from http requests. Used with `ParseFromRequest` and `ParseFromRequestWithClaims`. - * Added several new, more specific, validation errors to error type bitmask - * Moved examples from README to executable example files - * Signing method registry is now thread safe - * Added new property to `ValidationError`, which contains the raw error returned by calls made by parse/verify (such as those returned by keyfunc or json parser) - -#### 2.7.0 - -This will likely be the last backwards compatible release before 3.0.0, excluding essential bug fixes. - -* Added new option `-show` to the `jwt` command that will just output the decoded token without verifying -* Error text for expired tokens includes how long it's been expired -* Fixed incorrect error returned from `ParseRSAPublicKeyFromPEM` -* Documentation updates - -#### 2.6.0 - -* Exposed inner error within ValidationError -* Fixed validation errors when using UseJSONNumber flag -* Added several unit tests - -#### 2.5.0 - -* Added support for signing method none. You shouldn't use this. The API tries to make this clear. -* Updated/fixed some documentation -* Added more helpful error message when trying to parse tokens that begin with `BEARER ` - -#### 2.4.0 - -* Added new type, Parser, to allow for configuration of various parsing parameters - * You can now specify a list of valid signing methods. Anything outside this set will be rejected. - * You can now opt to use the `json.Number` type instead of `float64` when parsing token JSON -* Added support for [Travis CI](https://travis-ci.org/dgrijalva/jwt-go) -* Fixed some bugs with ECDSA parsing - -#### 2.3.0 - -* Added support for ECDSA signing methods -* Added support for RSA PSS signing methods (requires go v1.4) - -#### 2.2.0 - -* Gracefully handle a `nil` `Keyfunc` being passed to `Parse`. Result will now be the parsed token and an error, instead of a panic. - -#### 2.1.0 - -Backwards compatible API change that was missed in 2.0.0. - -* The `SignedString` method on `Token` now takes `interface{}` instead of `[]byte` - -#### 2.0.0 - -There were two major reasons for breaking backwards compatibility with this update. The first was a refactor required to expand the width of the RSA and HMAC-SHA signing implementations. There will likely be no required code changes to support this change. - -The second update, while unfortunately requiring a small change in integration, is required to open up this library to other signing methods. Not all keys used for all signing methods have a single standard on-disk representation. Requiring `[]byte` as the type for all keys proved too limiting. Additionally, this implementation allows for pre-parsed tokens to be reused, which might matter in an application that parses a high volume of tokens with a small set of keys. Backwards compatibilty has been maintained for passing `[]byte` to the RSA signing methods, but they will also accept `*rsa.PublicKey` and `*rsa.PrivateKey`. - -It is likely the only integration change required here will be to change `func(t *jwt.Token) ([]byte, error)` to `func(t *jwt.Token) (interface{}, error)` when calling `Parse`. - -* **Compatibility Breaking Changes** - * `SigningMethodHS256` is now `*SigningMethodHMAC` instead of `type struct` - * `SigningMethodRS256` is now `*SigningMethodRSA` instead of `type struct` - * `KeyFunc` now returns `interface{}` instead of `[]byte` - * `SigningMethod.Sign` now takes `interface{}` instead of `[]byte` for the key - * `SigningMethod.Verify` now takes `interface{}` instead of `[]byte` for the key -* Renamed type `SigningMethodHS256` to `SigningMethodHMAC`. Specific sizes are now just instances of this type. - * Added public package global `SigningMethodHS256` - * Added public package global `SigningMethodHS384` - * Added public package global `SigningMethodHS512` -* Renamed type `SigningMethodRS256` to `SigningMethodRSA`. Specific sizes are now just instances of this type. - * Added public package global `SigningMethodRS256` - * Added public package global `SigningMethodRS384` - * Added public package global `SigningMethodRS512` -* Moved sample private key for HMAC tests from an inline value to a file on disk. Value is unchanged. -* Refactored the RSA implementation to be easier to read -* Exposed helper methods `ParseRSAPrivateKeyFromPEM` and `ParseRSAPublicKeyFromPEM` - -#### 1.0.2 - -* Fixed bug in parsing public keys from certificates -* Added more tests around the parsing of keys for RS256 -* Code refactoring in RS256 implementation. No functional changes - -#### 1.0.1 - -* Fixed panic if RS256 signing method was passed an invalid key - -#### 1.0.0 - -* First versioned release -* API stabilized -* Supports creating, signing, parsing, and validating JWT tokens -* Supports RS256 and HS256 signing methods \ No newline at end of file diff --git a/vendor/github.com/dgrijalva/jwt-go/claims.go b/vendor/github.com/dgrijalva/jwt-go/claims.go deleted file mode 100644 index f0228f02e0..0000000000 --- a/vendor/github.com/dgrijalva/jwt-go/claims.go +++ /dev/null @@ -1,134 +0,0 @@ -package jwt - -import ( - "crypto/subtle" - "fmt" - "time" -) - -// For a type to be a Claims object, it must just have a Valid method that determines -// if the token is invalid for any supported reason -type Claims interface { - Valid() error -} - -// Structured version of Claims Section, as referenced at -// https://tools.ietf.org/html/rfc7519#section-4.1 -// See examples for how to use this with your own claim types -type StandardClaims struct { - Audience string `json:"aud,omitempty"` - ExpiresAt int64 `json:"exp,omitempty"` - Id string `json:"jti,omitempty"` - IssuedAt int64 `json:"iat,omitempty"` - Issuer string `json:"iss,omitempty"` - NotBefore int64 `json:"nbf,omitempty"` - Subject string `json:"sub,omitempty"` -} - -// Validates time based claims "exp, iat, nbf". -// There is no accounting for clock skew. -// As well, if any of the above claims are not in the token, it will still -// be considered a valid claim. -func (c StandardClaims) Valid() error { - vErr := new(ValidationError) - now := TimeFunc().Unix() - - // The claims below are optional, by default, so if they are set to the - // default value in Go, let's not fail the verification for them. - if c.VerifyExpiresAt(now, false) == false { - delta := time.Unix(now, 0).Sub(time.Unix(c.ExpiresAt, 0)) - vErr.Inner = fmt.Errorf("token is expired by %v", delta) - vErr.Errors |= ValidationErrorExpired - } - - if c.VerifyIssuedAt(now, false) == false { - vErr.Inner = fmt.Errorf("Token used before issued") - vErr.Errors |= ValidationErrorIssuedAt - } - - if c.VerifyNotBefore(now, false) == false { - vErr.Inner = fmt.Errorf("token is not valid yet") - vErr.Errors |= ValidationErrorNotValidYet - } - - if vErr.valid() { - return nil - } - - return vErr -} - -// Compares the aud claim against cmp. -// If required is false, this method will return true if the value matches or is unset -func (c *StandardClaims) VerifyAudience(cmp string, req bool) bool { - return verifyAud(c.Audience, cmp, req) -} - -// Compares the exp claim against cmp. -// If required is false, this method will return true if the value matches or is unset -func (c *StandardClaims) VerifyExpiresAt(cmp int64, req bool) bool { - return verifyExp(c.ExpiresAt, cmp, req) -} - -// Compares the iat claim against cmp. -// If required is false, this method will return true if the value matches or is unset -func (c *StandardClaims) VerifyIssuedAt(cmp int64, req bool) bool { - return verifyIat(c.IssuedAt, cmp, req) -} - -// Compares the iss claim against cmp. -// If required is false, this method will return true if the value matches or is unset -func (c *StandardClaims) VerifyIssuer(cmp string, req bool) bool { - return verifyIss(c.Issuer, cmp, req) -} - -// Compares the nbf claim against cmp. -// If required is false, this method will return true if the value matches or is unset -func (c *StandardClaims) VerifyNotBefore(cmp int64, req bool) bool { - return verifyNbf(c.NotBefore, cmp, req) -} - -// ----- helpers - -func verifyAud(aud string, cmp string, required bool) bool { - if aud == "" { - return !required - } - if subtle.ConstantTimeCompare([]byte(aud), []byte(cmp)) != 0 { - return true - } else { - return false - } -} - -func verifyExp(exp int64, now int64, required bool) bool { - if exp == 0 { - return !required - } - return now <= exp -} - -func verifyIat(iat int64, now int64, required bool) bool { - if iat == 0 { - return !required - } - return now >= iat -} - -func verifyIss(iss string, cmp string, required bool) bool { - if iss == "" { - return !required - } - if subtle.ConstantTimeCompare([]byte(iss), []byte(cmp)) != 0 { - return true - } else { - return false - } -} - -func verifyNbf(nbf int64, now int64, required bool) bool { - if nbf == 0 { - return !required - } - return now >= nbf -} diff --git a/vendor/github.com/dgrijalva/jwt-go/doc.go b/vendor/github.com/dgrijalva/jwt-go/doc.go deleted file mode 100644 index a86dc1a3b3..0000000000 --- a/vendor/github.com/dgrijalva/jwt-go/doc.go +++ /dev/null @@ -1,4 +0,0 @@ -// Package jwt is a Go implementation of JSON Web Tokens: http://self-issued.info/docs/draft-jones-json-web-token.html -// -// See README.md for more info. -package jwt diff --git a/vendor/github.com/dgrijalva/jwt-go/ecdsa.go b/vendor/github.com/dgrijalva/jwt-go/ecdsa.go deleted file mode 100644 index f977381240..0000000000 --- a/vendor/github.com/dgrijalva/jwt-go/ecdsa.go +++ /dev/null @@ -1,148 +0,0 @@ -package jwt - -import ( - "crypto" - "crypto/ecdsa" - "crypto/rand" - "errors" - "math/big" -) - -var ( - // Sadly this is missing from crypto/ecdsa compared to crypto/rsa - ErrECDSAVerification = errors.New("crypto/ecdsa: verification error") -) - -// Implements the ECDSA family of signing methods signing methods -// Expects *ecdsa.PrivateKey for signing and *ecdsa.PublicKey for verification -type SigningMethodECDSA struct { - Name string - Hash crypto.Hash - KeySize int - CurveBits int -} - -// Specific instances for EC256 and company -var ( - SigningMethodES256 *SigningMethodECDSA - SigningMethodES384 *SigningMethodECDSA - SigningMethodES512 *SigningMethodECDSA -) - -func init() { - // ES256 - SigningMethodES256 = &SigningMethodECDSA{"ES256", crypto.SHA256, 32, 256} - RegisterSigningMethod(SigningMethodES256.Alg(), func() SigningMethod { - return SigningMethodES256 - }) - - // ES384 - SigningMethodES384 = &SigningMethodECDSA{"ES384", crypto.SHA384, 48, 384} - RegisterSigningMethod(SigningMethodES384.Alg(), func() SigningMethod { - return SigningMethodES384 - }) - - // ES512 - SigningMethodES512 = &SigningMethodECDSA{"ES512", crypto.SHA512, 66, 521} - RegisterSigningMethod(SigningMethodES512.Alg(), func() SigningMethod { - return SigningMethodES512 - }) -} - -func (m *SigningMethodECDSA) Alg() string { - return m.Name -} - -// Implements the Verify method from SigningMethod -// For this verify method, key must be an ecdsa.PublicKey struct -func (m *SigningMethodECDSA) Verify(signingString, signature string, key interface{}) error { - var err error - - // Decode the signature - var sig []byte - if sig, err = DecodeSegment(signature); err != nil { - return err - } - - // Get the key - var ecdsaKey *ecdsa.PublicKey - switch k := key.(type) { - case *ecdsa.PublicKey: - ecdsaKey = k - default: - return ErrInvalidKeyType - } - - if len(sig) != 2*m.KeySize { - return ErrECDSAVerification - } - - r := big.NewInt(0).SetBytes(sig[:m.KeySize]) - s := big.NewInt(0).SetBytes(sig[m.KeySize:]) - - // Create hasher - if !m.Hash.Available() { - return ErrHashUnavailable - } - hasher := m.Hash.New() - hasher.Write([]byte(signingString)) - - // Verify the signature - if verifystatus := ecdsa.Verify(ecdsaKey, hasher.Sum(nil), r, s); verifystatus == true { - return nil - } else { - return ErrECDSAVerification - } -} - -// Implements the Sign method from SigningMethod -// For this signing method, key must be an ecdsa.PrivateKey struct -func (m *SigningMethodECDSA) Sign(signingString string, key interface{}) (string, error) { - // Get the key - var ecdsaKey *ecdsa.PrivateKey - switch k := key.(type) { - case *ecdsa.PrivateKey: - ecdsaKey = k - default: - return "", ErrInvalidKeyType - } - - // Create the hasher - if !m.Hash.Available() { - return "", ErrHashUnavailable - } - - hasher := m.Hash.New() - hasher.Write([]byte(signingString)) - - // Sign the string and return r, s - if r, s, err := ecdsa.Sign(rand.Reader, ecdsaKey, hasher.Sum(nil)); err == nil { - curveBits := ecdsaKey.Curve.Params().BitSize - - if m.CurveBits != curveBits { - return "", ErrInvalidKey - } - - keyBytes := curveBits / 8 - if curveBits%8 > 0 { - keyBytes += 1 - } - - // We serialize the outpus (r and s) into big-endian byte arrays and pad - // them with zeros on the left to make sure the sizes work out. Both arrays - // must be keyBytes long, and the output must be 2*keyBytes long. - rBytes := r.Bytes() - rBytesPadded := make([]byte, keyBytes) - copy(rBytesPadded[keyBytes-len(rBytes):], rBytes) - - sBytes := s.Bytes() - sBytesPadded := make([]byte, keyBytes) - copy(sBytesPadded[keyBytes-len(sBytes):], sBytes) - - out := append(rBytesPadded, sBytesPadded...) - - return EncodeSegment(out), nil - } else { - return "", err - } -} diff --git a/vendor/github.com/dgrijalva/jwt-go/ecdsa_utils.go b/vendor/github.com/dgrijalva/jwt-go/ecdsa_utils.go deleted file mode 100644 index d19624b726..0000000000 --- a/vendor/github.com/dgrijalva/jwt-go/ecdsa_utils.go +++ /dev/null @@ -1,67 +0,0 @@ -package jwt - -import ( - "crypto/ecdsa" - "crypto/x509" - "encoding/pem" - "errors" -) - -var ( - ErrNotECPublicKey = errors.New("Key is not a valid ECDSA public key") - ErrNotECPrivateKey = errors.New("Key is not a valid ECDSA private key") -) - -// Parse PEM encoded Elliptic Curve Private Key Structure -func ParseECPrivateKeyFromPEM(key []byte) (*ecdsa.PrivateKey, error) { - var err error - - // Parse PEM block - var block *pem.Block - if block, _ = pem.Decode(key); block == nil { - return nil, ErrKeyMustBePEMEncoded - } - - // Parse the key - var parsedKey interface{} - if parsedKey, err = x509.ParseECPrivateKey(block.Bytes); err != nil { - return nil, err - } - - var pkey *ecdsa.PrivateKey - var ok bool - if pkey, ok = parsedKey.(*ecdsa.PrivateKey); !ok { - return nil, ErrNotECPrivateKey - } - - return pkey, nil -} - -// Parse PEM encoded PKCS1 or PKCS8 public key -func ParseECPublicKeyFromPEM(key []byte) (*ecdsa.PublicKey, error) { - var err error - - // Parse PEM block - var block *pem.Block - if block, _ = pem.Decode(key); block == nil { - return nil, ErrKeyMustBePEMEncoded - } - - // Parse the key - var parsedKey interface{} - if parsedKey, err = x509.ParsePKIXPublicKey(block.Bytes); err != nil { - if cert, err := x509.ParseCertificate(block.Bytes); err == nil { - parsedKey = cert.PublicKey - } else { - return nil, err - } - } - - var pkey *ecdsa.PublicKey - var ok bool - if pkey, ok = parsedKey.(*ecdsa.PublicKey); !ok { - return nil, ErrNotECPublicKey - } - - return pkey, nil -} diff --git a/vendor/github.com/dgrijalva/jwt-go/errors.go b/vendor/github.com/dgrijalva/jwt-go/errors.go deleted file mode 100644 index 1c93024aad..0000000000 --- a/vendor/github.com/dgrijalva/jwt-go/errors.go +++ /dev/null @@ -1,59 +0,0 @@ -package jwt - -import ( - "errors" -) - -// Error constants -var ( - ErrInvalidKey = errors.New("key is invalid") - ErrInvalidKeyType = errors.New("key is of invalid type") - ErrHashUnavailable = errors.New("the requested hash function is unavailable") -) - -// The errors that might occur when parsing and validating a token -const ( - ValidationErrorMalformed uint32 = 1 << iota // Token is malformed - ValidationErrorUnverifiable // Token could not be verified because of signing problems - ValidationErrorSignatureInvalid // Signature validation failed - - // Standard Claim validation errors - ValidationErrorAudience // AUD validation failed - ValidationErrorExpired // EXP validation failed - ValidationErrorIssuedAt // IAT validation failed - ValidationErrorIssuer // ISS validation failed - ValidationErrorNotValidYet // NBF validation failed - ValidationErrorId // JTI validation failed - ValidationErrorClaimsInvalid // Generic claims validation error -) - -// Helper for constructing a ValidationError with a string error message -func NewValidationError(errorText string, errorFlags uint32) *ValidationError { - return &ValidationError{ - text: errorText, - Errors: errorFlags, - } -} - -// The error from Parse if token is not valid -type ValidationError struct { - Inner error // stores the error returned by external dependencies, i.e.: KeyFunc - Errors uint32 // bitfield. see ValidationError... constants - text string // errors that do not have a valid error just have text -} - -// Validation error is an error type -func (e ValidationError) Error() string { - if e.Inner != nil { - return e.Inner.Error() - } else if e.text != "" { - return e.text - } else { - return "token is invalid" - } -} - -// No errors -func (e *ValidationError) valid() bool { - return e.Errors == 0 -} diff --git a/vendor/github.com/dgrijalva/jwt-go/hmac.go b/vendor/github.com/dgrijalva/jwt-go/hmac.go deleted file mode 100644 index addbe5d401..0000000000 --- a/vendor/github.com/dgrijalva/jwt-go/hmac.go +++ /dev/null @@ -1,95 +0,0 @@ -package jwt - -import ( - "crypto" - "crypto/hmac" - "errors" -) - -// Implements the HMAC-SHA family of signing methods signing methods -// Expects key type of []byte for both signing and validation -type SigningMethodHMAC struct { - Name string - Hash crypto.Hash -} - -// Specific instances for HS256 and company -var ( - SigningMethodHS256 *SigningMethodHMAC - SigningMethodHS384 *SigningMethodHMAC - SigningMethodHS512 *SigningMethodHMAC - ErrSignatureInvalid = errors.New("signature is invalid") -) - -func init() { - // HS256 - SigningMethodHS256 = &SigningMethodHMAC{"HS256", crypto.SHA256} - RegisterSigningMethod(SigningMethodHS256.Alg(), func() SigningMethod { - return SigningMethodHS256 - }) - - // HS384 - SigningMethodHS384 = &SigningMethodHMAC{"HS384", crypto.SHA384} - RegisterSigningMethod(SigningMethodHS384.Alg(), func() SigningMethod { - return SigningMethodHS384 - }) - - // HS512 - SigningMethodHS512 = &SigningMethodHMAC{"HS512", crypto.SHA512} - RegisterSigningMethod(SigningMethodHS512.Alg(), func() SigningMethod { - return SigningMethodHS512 - }) -} - -func (m *SigningMethodHMAC) Alg() string { - return m.Name -} - -// Verify the signature of HSXXX tokens. Returns nil if the signature is valid. -func (m *SigningMethodHMAC) Verify(signingString, signature string, key interface{}) error { - // Verify the key is the right type - keyBytes, ok := key.([]byte) - if !ok { - return ErrInvalidKeyType - } - - // Decode signature, for comparison - sig, err := DecodeSegment(signature) - if err != nil { - return err - } - - // Can we use the specified hashing method? - if !m.Hash.Available() { - return ErrHashUnavailable - } - - // This signing method is symmetric, so we validate the signature - // by reproducing the signature from the signing string and key, then - // comparing that against the provided signature. - hasher := hmac.New(m.Hash.New, keyBytes) - hasher.Write([]byte(signingString)) - if !hmac.Equal(sig, hasher.Sum(nil)) { - return ErrSignatureInvalid - } - - // No validation errors. Signature is good. - return nil -} - -// Implements the Sign method from SigningMethod for this signing method. -// Key must be []byte -func (m *SigningMethodHMAC) Sign(signingString string, key interface{}) (string, error) { - if keyBytes, ok := key.([]byte); ok { - if !m.Hash.Available() { - return "", ErrHashUnavailable - } - - hasher := hmac.New(m.Hash.New, keyBytes) - hasher.Write([]byte(signingString)) - - return EncodeSegment(hasher.Sum(nil)), nil - } - - return "", ErrInvalidKeyType -} diff --git a/vendor/github.com/dgrijalva/jwt-go/map_claims.go b/vendor/github.com/dgrijalva/jwt-go/map_claims.go deleted file mode 100644 index 291213c460..0000000000 --- a/vendor/github.com/dgrijalva/jwt-go/map_claims.go +++ /dev/null @@ -1,94 +0,0 @@ -package jwt - -import ( - "encoding/json" - "errors" - // "fmt" -) - -// Claims type that uses the map[string]interface{} for JSON decoding -// This is the default claims type if you don't supply one -type MapClaims map[string]interface{} - -// Compares the aud claim against cmp. -// If required is false, this method will return true if the value matches or is unset -func (m MapClaims) VerifyAudience(cmp string, req bool) bool { - aud, _ := m["aud"].(string) - return verifyAud(aud, cmp, req) -} - -// Compares the exp claim against cmp. -// If required is false, this method will return true if the value matches or is unset -func (m MapClaims) VerifyExpiresAt(cmp int64, req bool) bool { - switch exp := m["exp"].(type) { - case float64: - return verifyExp(int64(exp), cmp, req) - case json.Number: - v, _ := exp.Int64() - return verifyExp(v, cmp, req) - } - return req == false -} - -// Compares the iat claim against cmp. -// If required is false, this method will return true if the value matches or is unset -func (m MapClaims) VerifyIssuedAt(cmp int64, req bool) bool { - switch iat := m["iat"].(type) { - case float64: - return verifyIat(int64(iat), cmp, req) - case json.Number: - v, _ := iat.Int64() - return verifyIat(v, cmp, req) - } - return req == false -} - -// Compares the iss claim against cmp. -// If required is false, this method will return true if the value matches or is unset -func (m MapClaims) VerifyIssuer(cmp string, req bool) bool { - iss, _ := m["iss"].(string) - return verifyIss(iss, cmp, req) -} - -// Compares the nbf claim against cmp. -// If required is false, this method will return true if the value matches or is unset -func (m MapClaims) VerifyNotBefore(cmp int64, req bool) bool { - switch nbf := m["nbf"].(type) { - case float64: - return verifyNbf(int64(nbf), cmp, req) - case json.Number: - v, _ := nbf.Int64() - return verifyNbf(v, cmp, req) - } - return req == false -} - -// Validates time based claims "exp, iat, nbf". -// There is no accounting for clock skew. -// As well, if any of the above claims are not in the token, it will still -// be considered a valid claim. -func (m MapClaims) Valid() error { - vErr := new(ValidationError) - now := TimeFunc().Unix() - - if m.VerifyExpiresAt(now, false) == false { - vErr.Inner = errors.New("Token is expired") - vErr.Errors |= ValidationErrorExpired - } - - if m.VerifyIssuedAt(now, false) == false { - vErr.Inner = errors.New("Token used before issued") - vErr.Errors |= ValidationErrorIssuedAt - } - - if m.VerifyNotBefore(now, false) == false { - vErr.Inner = errors.New("Token is not valid yet") - vErr.Errors |= ValidationErrorNotValidYet - } - - if vErr.valid() { - return nil - } - - return vErr -} diff --git a/vendor/github.com/dgrijalva/jwt-go/none.go b/vendor/github.com/dgrijalva/jwt-go/none.go deleted file mode 100644 index f04d189d06..0000000000 --- a/vendor/github.com/dgrijalva/jwt-go/none.go +++ /dev/null @@ -1,52 +0,0 @@ -package jwt - -// Implements the none signing method. This is required by the spec -// but you probably should never use it. -var SigningMethodNone *signingMethodNone - -const UnsafeAllowNoneSignatureType unsafeNoneMagicConstant = "none signing method allowed" - -var NoneSignatureTypeDisallowedError error - -type signingMethodNone struct{} -type unsafeNoneMagicConstant string - -func init() { - SigningMethodNone = &signingMethodNone{} - NoneSignatureTypeDisallowedError = NewValidationError("'none' signature type is not allowed", ValidationErrorSignatureInvalid) - - RegisterSigningMethod(SigningMethodNone.Alg(), func() SigningMethod { - return SigningMethodNone - }) -} - -func (m *signingMethodNone) Alg() string { - return "none" -} - -// Only allow 'none' alg type if UnsafeAllowNoneSignatureType is specified as the key -func (m *signingMethodNone) Verify(signingString, signature string, key interface{}) (err error) { - // Key must be UnsafeAllowNoneSignatureType to prevent accidentally - // accepting 'none' signing method - if _, ok := key.(unsafeNoneMagicConstant); !ok { - return NoneSignatureTypeDisallowedError - } - // If signing method is none, signature must be an empty string - if signature != "" { - return NewValidationError( - "'none' signing method with non-empty signature", - ValidationErrorSignatureInvalid, - ) - } - - // Accept 'none' signing method. - return nil -} - -// Only allow 'none' signing if UnsafeAllowNoneSignatureType is specified as the key -func (m *signingMethodNone) Sign(signingString string, key interface{}) (string, error) { - if _, ok := key.(unsafeNoneMagicConstant); ok { - return "", nil - } - return "", NoneSignatureTypeDisallowedError -} diff --git a/vendor/github.com/dgrijalva/jwt-go/parser.go b/vendor/github.com/dgrijalva/jwt-go/parser.go deleted file mode 100644 index d6901d9adb..0000000000 --- a/vendor/github.com/dgrijalva/jwt-go/parser.go +++ /dev/null @@ -1,148 +0,0 @@ -package jwt - -import ( - "bytes" - "encoding/json" - "fmt" - "strings" -) - -type Parser struct { - ValidMethods []string // If populated, only these methods will be considered valid - UseJSONNumber bool // Use JSON Number format in JSON decoder - SkipClaimsValidation bool // Skip claims validation during token parsing -} - -// Parse, validate, and return a token. -// keyFunc will receive the parsed token and should return the key for validating. -// If everything is kosher, err will be nil -func (p *Parser) Parse(tokenString string, keyFunc Keyfunc) (*Token, error) { - return p.ParseWithClaims(tokenString, MapClaims{}, keyFunc) -} - -func (p *Parser) ParseWithClaims(tokenString string, claims Claims, keyFunc Keyfunc) (*Token, error) { - token, parts, err := p.ParseUnverified(tokenString, claims) - if err != nil { - return token, err - } - - // Verify signing method is in the required set - if p.ValidMethods != nil { - var signingMethodValid = false - var alg = token.Method.Alg() - for _, m := range p.ValidMethods { - if m == alg { - signingMethodValid = true - break - } - } - if !signingMethodValid { - // signing method is not in the listed set - return token, NewValidationError(fmt.Sprintf("signing method %v is invalid", alg), ValidationErrorSignatureInvalid) - } - } - - // Lookup key - var key interface{} - if keyFunc == nil { - // keyFunc was not provided. short circuiting validation - return token, NewValidationError("no Keyfunc was provided.", ValidationErrorUnverifiable) - } - if key, err = keyFunc(token); err != nil { - // keyFunc returned an error - if ve, ok := err.(*ValidationError); ok { - return token, ve - } - return token, &ValidationError{Inner: err, Errors: ValidationErrorUnverifiable} - } - - vErr := &ValidationError{} - - // Validate Claims - if !p.SkipClaimsValidation { - if err := token.Claims.Valid(); err != nil { - - // If the Claims Valid returned an error, check if it is a validation error, - // If it was another error type, create a ValidationError with a generic ClaimsInvalid flag set - if e, ok := err.(*ValidationError); !ok { - vErr = &ValidationError{Inner: err, Errors: ValidationErrorClaimsInvalid} - } else { - vErr = e - } - } - } - - // Perform validation - token.Signature = parts[2] - if err = token.Method.Verify(strings.Join(parts[0:2], "."), token.Signature, key); err != nil { - vErr.Inner = err - vErr.Errors |= ValidationErrorSignatureInvalid - } - - if vErr.valid() { - token.Valid = true - return token, nil - } - - return token, vErr -} - -// WARNING: Don't use this method unless you know what you're doing -// -// This method parses the token but doesn't validate the signature. It's only -// ever useful in cases where you know the signature is valid (because it has -// been checked previously in the stack) and you want to extract values from -// it. -func (p *Parser) ParseUnverified(tokenString string, claims Claims) (token *Token, parts []string, err error) { - parts = strings.Split(tokenString, ".") - if len(parts) != 3 { - return nil, parts, NewValidationError("token contains an invalid number of segments", ValidationErrorMalformed) - } - - token = &Token{Raw: tokenString} - - // parse Header - var headerBytes []byte - if headerBytes, err = DecodeSegment(parts[0]); err != nil { - if strings.HasPrefix(strings.ToLower(tokenString), "bearer ") { - return token, parts, NewValidationError("tokenstring should not contain 'bearer '", ValidationErrorMalformed) - } - return token, parts, &ValidationError{Inner: err, Errors: ValidationErrorMalformed} - } - if err = json.Unmarshal(headerBytes, &token.Header); err != nil { - return token, parts, &ValidationError{Inner: err, Errors: ValidationErrorMalformed} - } - - // parse Claims - var claimBytes []byte - token.Claims = claims - - if claimBytes, err = DecodeSegment(parts[1]); err != nil { - return token, parts, &ValidationError{Inner: err, Errors: ValidationErrorMalformed} - } - dec := json.NewDecoder(bytes.NewBuffer(claimBytes)) - if p.UseJSONNumber { - dec.UseNumber() - } - // JSON Decode. Special case for map type to avoid weird pointer behavior - if c, ok := token.Claims.(MapClaims); ok { - err = dec.Decode(&c) - } else { - err = dec.Decode(&claims) - } - // Handle decode error - if err != nil { - return token, parts, &ValidationError{Inner: err, Errors: ValidationErrorMalformed} - } - - // Lookup signature method - if method, ok := token.Header["alg"].(string); ok { - if token.Method = GetSigningMethod(method); token.Method == nil { - return token, parts, NewValidationError("signing method (alg) is unavailable.", ValidationErrorUnverifiable) - } - } else { - return token, parts, NewValidationError("signing method (alg) is unspecified.", ValidationErrorUnverifiable) - } - - return token, parts, nil -} diff --git a/vendor/github.com/dgrijalva/jwt-go/rsa.go b/vendor/github.com/dgrijalva/jwt-go/rsa.go deleted file mode 100644 index e4caf1ca4a..0000000000 --- a/vendor/github.com/dgrijalva/jwt-go/rsa.go +++ /dev/null @@ -1,101 +0,0 @@ -package jwt - -import ( - "crypto" - "crypto/rand" - "crypto/rsa" -) - -// Implements the RSA family of signing methods signing methods -// Expects *rsa.PrivateKey for signing and *rsa.PublicKey for validation -type SigningMethodRSA struct { - Name string - Hash crypto.Hash -} - -// Specific instances for RS256 and company -var ( - SigningMethodRS256 *SigningMethodRSA - SigningMethodRS384 *SigningMethodRSA - SigningMethodRS512 *SigningMethodRSA -) - -func init() { - // RS256 - SigningMethodRS256 = &SigningMethodRSA{"RS256", crypto.SHA256} - RegisterSigningMethod(SigningMethodRS256.Alg(), func() SigningMethod { - return SigningMethodRS256 - }) - - // RS384 - SigningMethodRS384 = &SigningMethodRSA{"RS384", crypto.SHA384} - RegisterSigningMethod(SigningMethodRS384.Alg(), func() SigningMethod { - return SigningMethodRS384 - }) - - // RS512 - SigningMethodRS512 = &SigningMethodRSA{"RS512", crypto.SHA512} - RegisterSigningMethod(SigningMethodRS512.Alg(), func() SigningMethod { - return SigningMethodRS512 - }) -} - -func (m *SigningMethodRSA) Alg() string { - return m.Name -} - -// Implements the Verify method from SigningMethod -// For this signing method, must be an *rsa.PublicKey structure. -func (m *SigningMethodRSA) Verify(signingString, signature string, key interface{}) error { - var err error - - // Decode the signature - var sig []byte - if sig, err = DecodeSegment(signature); err != nil { - return err - } - - var rsaKey *rsa.PublicKey - var ok bool - - if rsaKey, ok = key.(*rsa.PublicKey); !ok { - return ErrInvalidKeyType - } - - // Create hasher - if !m.Hash.Available() { - return ErrHashUnavailable - } - hasher := m.Hash.New() - hasher.Write([]byte(signingString)) - - // Verify the signature - return rsa.VerifyPKCS1v15(rsaKey, m.Hash, hasher.Sum(nil), sig) -} - -// Implements the Sign method from SigningMethod -// For this signing method, must be an *rsa.PrivateKey structure. -func (m *SigningMethodRSA) Sign(signingString string, key interface{}) (string, error) { - var rsaKey *rsa.PrivateKey - var ok bool - - // Validate type of key - if rsaKey, ok = key.(*rsa.PrivateKey); !ok { - return "", ErrInvalidKey - } - - // Create the hasher - if !m.Hash.Available() { - return "", ErrHashUnavailable - } - - hasher := m.Hash.New() - hasher.Write([]byte(signingString)) - - // Sign the string and return the encoded bytes - if sigBytes, err := rsa.SignPKCS1v15(rand.Reader, rsaKey, m.Hash, hasher.Sum(nil)); err == nil { - return EncodeSegment(sigBytes), nil - } else { - return "", err - } -} diff --git a/vendor/github.com/dgrijalva/jwt-go/rsa_pss.go b/vendor/github.com/dgrijalva/jwt-go/rsa_pss.go deleted file mode 100644 index 10ee9db8a4..0000000000 --- a/vendor/github.com/dgrijalva/jwt-go/rsa_pss.go +++ /dev/null @@ -1,126 +0,0 @@ -// +build go1.4 - -package jwt - -import ( - "crypto" - "crypto/rand" - "crypto/rsa" -) - -// Implements the RSAPSS family of signing methods signing methods -type SigningMethodRSAPSS struct { - *SigningMethodRSA - Options *rsa.PSSOptions -} - -// Specific instances for RS/PS and company -var ( - SigningMethodPS256 *SigningMethodRSAPSS - SigningMethodPS384 *SigningMethodRSAPSS - SigningMethodPS512 *SigningMethodRSAPSS -) - -func init() { - // PS256 - SigningMethodPS256 = &SigningMethodRSAPSS{ - &SigningMethodRSA{ - Name: "PS256", - Hash: crypto.SHA256, - }, - &rsa.PSSOptions{ - SaltLength: rsa.PSSSaltLengthAuto, - Hash: crypto.SHA256, - }, - } - RegisterSigningMethod(SigningMethodPS256.Alg(), func() SigningMethod { - return SigningMethodPS256 - }) - - // PS384 - SigningMethodPS384 = &SigningMethodRSAPSS{ - &SigningMethodRSA{ - Name: "PS384", - Hash: crypto.SHA384, - }, - &rsa.PSSOptions{ - SaltLength: rsa.PSSSaltLengthAuto, - Hash: crypto.SHA384, - }, - } - RegisterSigningMethod(SigningMethodPS384.Alg(), func() SigningMethod { - return SigningMethodPS384 - }) - - // PS512 - SigningMethodPS512 = &SigningMethodRSAPSS{ - &SigningMethodRSA{ - Name: "PS512", - Hash: crypto.SHA512, - }, - &rsa.PSSOptions{ - SaltLength: rsa.PSSSaltLengthAuto, - Hash: crypto.SHA512, - }, - } - RegisterSigningMethod(SigningMethodPS512.Alg(), func() SigningMethod { - return SigningMethodPS512 - }) -} - -// Implements the Verify method from SigningMethod -// For this verify method, key must be an rsa.PublicKey struct -func (m *SigningMethodRSAPSS) Verify(signingString, signature string, key interface{}) error { - var err error - - // Decode the signature - var sig []byte - if sig, err = DecodeSegment(signature); err != nil { - return err - } - - var rsaKey *rsa.PublicKey - switch k := key.(type) { - case *rsa.PublicKey: - rsaKey = k - default: - return ErrInvalidKey - } - - // Create hasher - if !m.Hash.Available() { - return ErrHashUnavailable - } - hasher := m.Hash.New() - hasher.Write([]byte(signingString)) - - return rsa.VerifyPSS(rsaKey, m.Hash, hasher.Sum(nil), sig, m.Options) -} - -// Implements the Sign method from SigningMethod -// For this signing method, key must be an rsa.PrivateKey struct -func (m *SigningMethodRSAPSS) Sign(signingString string, key interface{}) (string, error) { - var rsaKey *rsa.PrivateKey - - switch k := key.(type) { - case *rsa.PrivateKey: - rsaKey = k - default: - return "", ErrInvalidKeyType - } - - // Create the hasher - if !m.Hash.Available() { - return "", ErrHashUnavailable - } - - hasher := m.Hash.New() - hasher.Write([]byte(signingString)) - - // Sign the string and return the encoded bytes - if sigBytes, err := rsa.SignPSS(rand.Reader, rsaKey, m.Hash, hasher.Sum(nil), m.Options); err == nil { - return EncodeSegment(sigBytes), nil - } else { - return "", err - } -} diff --git a/vendor/github.com/dgrijalva/jwt-go/rsa_utils.go b/vendor/github.com/dgrijalva/jwt-go/rsa_utils.go deleted file mode 100644 index a5ababf956..0000000000 --- a/vendor/github.com/dgrijalva/jwt-go/rsa_utils.go +++ /dev/null @@ -1,101 +0,0 @@ -package jwt - -import ( - "crypto/rsa" - "crypto/x509" - "encoding/pem" - "errors" -) - -var ( - ErrKeyMustBePEMEncoded = errors.New("Invalid Key: Key must be PEM encoded PKCS1 or PKCS8 private key") - ErrNotRSAPrivateKey = errors.New("Key is not a valid RSA private key") - ErrNotRSAPublicKey = errors.New("Key is not a valid RSA public key") -) - -// Parse PEM encoded PKCS1 or PKCS8 private key -func ParseRSAPrivateKeyFromPEM(key []byte) (*rsa.PrivateKey, error) { - var err error - - // Parse PEM block - var block *pem.Block - if block, _ = pem.Decode(key); block == nil { - return nil, ErrKeyMustBePEMEncoded - } - - var parsedKey interface{} - if parsedKey, err = x509.ParsePKCS1PrivateKey(block.Bytes); err != nil { - if parsedKey, err = x509.ParsePKCS8PrivateKey(block.Bytes); err != nil { - return nil, err - } - } - - var pkey *rsa.PrivateKey - var ok bool - if pkey, ok = parsedKey.(*rsa.PrivateKey); !ok { - return nil, ErrNotRSAPrivateKey - } - - return pkey, nil -} - -// Parse PEM encoded PKCS1 or PKCS8 private key protected with password -func ParseRSAPrivateKeyFromPEMWithPassword(key []byte, password string) (*rsa.PrivateKey, error) { - var err error - - // Parse PEM block - var block *pem.Block - if block, _ = pem.Decode(key); block == nil { - return nil, ErrKeyMustBePEMEncoded - } - - var parsedKey interface{} - - var blockDecrypted []byte - if blockDecrypted, err = x509.DecryptPEMBlock(block, []byte(password)); err != nil { - return nil, err - } - - if parsedKey, err = x509.ParsePKCS1PrivateKey(blockDecrypted); err != nil { - if parsedKey, err = x509.ParsePKCS8PrivateKey(blockDecrypted); err != nil { - return nil, err - } - } - - var pkey *rsa.PrivateKey - var ok bool - if pkey, ok = parsedKey.(*rsa.PrivateKey); !ok { - return nil, ErrNotRSAPrivateKey - } - - return pkey, nil -} - -// Parse PEM encoded PKCS1 or PKCS8 public key -func ParseRSAPublicKeyFromPEM(key []byte) (*rsa.PublicKey, error) { - var err error - - // Parse PEM block - var block *pem.Block - if block, _ = pem.Decode(key); block == nil { - return nil, ErrKeyMustBePEMEncoded - } - - // Parse the key - var parsedKey interface{} - if parsedKey, err = x509.ParsePKIXPublicKey(block.Bytes); err != nil { - if cert, err := x509.ParseCertificate(block.Bytes); err == nil { - parsedKey = cert.PublicKey - } else { - return nil, err - } - } - - var pkey *rsa.PublicKey - var ok bool - if pkey, ok = parsedKey.(*rsa.PublicKey); !ok { - return nil, ErrNotRSAPublicKey - } - - return pkey, nil -} diff --git a/vendor/github.com/dgrijalva/jwt-go/signing_method.go b/vendor/github.com/dgrijalva/jwt-go/signing_method.go deleted file mode 100644 index ed1f212b21..0000000000 --- a/vendor/github.com/dgrijalva/jwt-go/signing_method.go +++ /dev/null @@ -1,35 +0,0 @@ -package jwt - -import ( - "sync" -) - -var signingMethods = map[string]func() SigningMethod{} -var signingMethodLock = new(sync.RWMutex) - -// Implement SigningMethod to add new methods for signing or verifying tokens. -type SigningMethod interface { - Verify(signingString, signature string, key interface{}) error // Returns nil if signature is valid - Sign(signingString string, key interface{}) (string, error) // Returns encoded signature or error - Alg() string // returns the alg identifier for this method (example: 'HS256') -} - -// Register the "alg" name and a factory function for signing method. -// This is typically done during init() in the method's implementation -func RegisterSigningMethod(alg string, f func() SigningMethod) { - signingMethodLock.Lock() - defer signingMethodLock.Unlock() - - signingMethods[alg] = f -} - -// Get a signing method from an "alg" string -func GetSigningMethod(alg string) (method SigningMethod) { - signingMethodLock.RLock() - defer signingMethodLock.RUnlock() - - if methodF, ok := signingMethods[alg]; ok { - method = methodF() - } - return -} diff --git a/vendor/github.com/dgrijalva/jwt-go/token.go b/vendor/github.com/dgrijalva/jwt-go/token.go deleted file mode 100644 index d637e0867c..0000000000 --- a/vendor/github.com/dgrijalva/jwt-go/token.go +++ /dev/null @@ -1,108 +0,0 @@ -package jwt - -import ( - "encoding/base64" - "encoding/json" - "strings" - "time" -) - -// TimeFunc provides the current time when parsing token to validate "exp" claim (expiration time). -// You can override it to use another time value. This is useful for testing or if your -// server uses a different time zone than your tokens. -var TimeFunc = time.Now - -// Parse methods use this callback function to supply -// the key for verification. The function receives the parsed, -// but unverified Token. This allows you to use properties in the -// Header of the token (such as `kid`) to identify which key to use. -type Keyfunc func(*Token) (interface{}, error) - -// A JWT Token. Different fields will be used depending on whether you're -// creating or parsing/verifying a token. -type Token struct { - Raw string // The raw token. Populated when you Parse a token - Method SigningMethod // The signing method used or to be used - Header map[string]interface{} // The first segment of the token - Claims Claims // The second segment of the token - Signature string // The third segment of the token. Populated when you Parse a token - Valid bool // Is the token valid? Populated when you Parse/Verify a token -} - -// Create a new Token. Takes a signing method -func New(method SigningMethod) *Token { - return NewWithClaims(method, MapClaims{}) -} - -func NewWithClaims(method SigningMethod, claims Claims) *Token { - return &Token{ - Header: map[string]interface{}{ - "typ": "JWT", - "alg": method.Alg(), - }, - Claims: claims, - Method: method, - } -} - -// Get the complete, signed token -func (t *Token) SignedString(key interface{}) (string, error) { - var sig, sstr string - var err error - if sstr, err = t.SigningString(); err != nil { - return "", err - } - if sig, err = t.Method.Sign(sstr, key); err != nil { - return "", err - } - return strings.Join([]string{sstr, sig}, "."), nil -} - -// Generate the signing string. This is the -// most expensive part of the whole deal. Unless you -// need this for something special, just go straight for -// the SignedString. -func (t *Token) SigningString() (string, error) { - var err error - parts := make([]string, 2) - for i, _ := range parts { - var jsonValue []byte - if i == 0 { - if jsonValue, err = json.Marshal(t.Header); err != nil { - return "", err - } - } else { - if jsonValue, err = json.Marshal(t.Claims); err != nil { - return "", err - } - } - - parts[i] = EncodeSegment(jsonValue) - } - return strings.Join(parts, "."), nil -} - -// Parse, validate, and return a token. -// keyFunc will receive the parsed token and should return the key for validating. -// If everything is kosher, err will be nil -func Parse(tokenString string, keyFunc Keyfunc) (*Token, error) { - return new(Parser).Parse(tokenString, keyFunc) -} - -func ParseWithClaims(tokenString string, claims Claims, keyFunc Keyfunc) (*Token, error) { - return new(Parser).ParseWithClaims(tokenString, claims, keyFunc) -} - -// Encode JWT specific base64url encoding with padding stripped -func EncodeSegment(seg []byte) string { - return strings.TrimRight(base64.URLEncoding.EncodeToString(seg), "=") -} - -// Decode JWT specific base64url encoding with padding stripped -func DecodeSegment(seg string) ([]byte, error) { - if l := len(seg) % 4; l > 0 { - seg += strings.Repeat("=", 4-l) - } - - return base64.URLEncoding.DecodeString(seg) -} diff --git a/vendor/github.com/evanphx/json-patch/.travis.yml b/vendor/github.com/evanphx/json-patch/.travis.yml new file mode 100644 index 0000000000..2092c72c46 --- /dev/null +++ b/vendor/github.com/evanphx/json-patch/.travis.yml @@ -0,0 +1,16 @@ +language: go + +go: + - 1.8 + - 1.7 + +install: + - if ! go get code.google.com/p/go.tools/cmd/cover; then go get golang.org/x/tools/cmd/cover; fi + - go get github.com/jessevdk/go-flags + +script: + - go get + - go test -cover ./... + +notifications: + email: false diff --git a/vendor/github.com/evanphx/json-patch/LICENSE b/vendor/github.com/evanphx/json-patch/LICENSE new file mode 100644 index 0000000000..0eb9b72d84 --- /dev/null +++ b/vendor/github.com/evanphx/json-patch/LICENSE @@ -0,0 +1,25 @@ +Copyright (c) 2014, Evan Phoenix +All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + +* Redistributions of source code must retain the above copyright notice, this + list of conditions and the following disclaimer. +* Redistributions in binary form must reproduce the above copyright notice + this list of conditions and the following disclaimer in the documentation + and/or other materials provided with the distribution. +* Neither the name of the Evan Phoenix nor the names of its contributors + may be used to endorse or promote products derived from this software + without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE +FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/vendor/github.com/evanphx/json-patch/README.md b/vendor/github.com/evanphx/json-patch/README.md new file mode 100644 index 0000000000..9c7f87f7ce --- /dev/null +++ b/vendor/github.com/evanphx/json-patch/README.md @@ -0,0 +1,297 @@ +# JSON-Patch +`jsonpatch` is a library which provides functionallity for both applying +[RFC6902 JSON patches](http://tools.ietf.org/html/rfc6902) against documents, as +well as for calculating & applying [RFC7396 JSON merge patches](https://tools.ietf.org/html/rfc7396). + +[![GoDoc](https://godoc.org/github.com/evanphx/json-patch?status.svg)](http://godoc.org/github.com/evanphx/json-patch) +[![Build Status](https://travis-ci.org/evanphx/json-patch.svg?branch=master)](https://travis-ci.org/evanphx/json-patch) +[![Report Card](https://goreportcard.com/badge/github.com/evanphx/json-patch)](https://goreportcard.com/report/github.com/evanphx/json-patch) + +# Get It! + +**Latest and greatest**: +```bash +go get -u github.com/evanphx/json-patch +``` + +**Stable Versions**: +* Version 4: `go get -u gopkg.in/evanphx/json-patch.v4` + +(previous versions below `v3` are unavailable) + +# Use It! +* [Create and apply a merge patch](#create-and-apply-a-merge-patch) +* [Create and apply a JSON Patch](#create-and-apply-a-json-patch) +* [Comparing JSON documents](#comparing-json-documents) +* [Combine merge patches](#combine-merge-patches) + + +# Configuration + +* There is a global configuration variable `jsonpatch.SupportNegativeIndices`. + This defaults to `true` and enables the non-standard practice of allowing + negative indices to mean indices starting at the end of an array. This + functionality can be disabled by setting `jsonpatch.SupportNegativeIndices = + false`. + +* There is a global configuration variable `jsonpatch.AccumulatedCopySizeLimit`, + which limits the total size increase in bytes caused by "copy" operations in a + patch. It defaults to 0, which means there is no limit. + +## Create and apply a merge patch +Given both an original JSON document and a modified JSON document, you can create +a [Merge Patch](https://tools.ietf.org/html/rfc7396) document. + +It can describe the changes needed to convert from the original to the +modified JSON document. + +Once you have a merge patch, you can apply it to other JSON documents using the +`jsonpatch.MergePatch(document, patch)` function. + +```go +package main + +import ( + "fmt" + + jsonpatch "github.com/evanphx/json-patch" +) + +func main() { + // Let's create a merge patch from these two documents... + original := []byte(`{"name": "John", "age": 24, "height": 3.21}`) + target := []byte(`{"name": "Jane", "age": 24}`) + + patch, err := jsonpatch.CreateMergePatch(original, target) + if err != nil { + panic(err) + } + + // Now lets apply the patch against a different JSON document... + + alternative := []byte(`{"name": "Tina", "age": 28, "height": 3.75}`) + modifiedAlternative, err := jsonpatch.MergePatch(alternative, patch) + + fmt.Printf("patch document: %s\n", patch) + fmt.Printf("updated alternative doc: %s\n", modifiedAlternative) +} +``` + +When ran, you get the following output: + +```bash +$ go run main.go +patch document: {"height":null,"name":"Jane"} +updated tina doc: {"age":28,"name":"Jane"} +``` + +## Create and apply a JSON Patch +You can create patch objects using `DecodePatch([]byte)`, which can then +be applied against JSON documents. + +The following is an example of creating a patch from two operations, and +applying it against a JSON document. + +```go +package main + +import ( + "fmt" + + jsonpatch "github.com/evanphx/json-patch" +) + +func main() { + original := []byte(`{"name": "John", "age": 24, "height": 3.21}`) + patchJSON := []byte(`[ + {"op": "replace", "path": "/name", "value": "Jane"}, + {"op": "remove", "path": "/height"} + ]`) + + patch, err := jsonpatch.DecodePatch(patchJSON) + if err != nil { + panic(err) + } + + modified, err := patch.Apply(original) + if err != nil { + panic(err) + } + + fmt.Printf("Original document: %s\n", original) + fmt.Printf("Modified document: %s\n", modified) +} +``` + +When ran, you get the following output: + +```bash +$ go run main.go +Original document: {"name": "John", "age": 24, "height": 3.21} +Modified document: {"age":24,"name":"Jane"} +``` + +## Comparing JSON documents +Due to potential whitespace and ordering differences, one cannot simply compare +JSON strings or byte-arrays directly. + +As such, you can instead use `jsonpatch.Equal(document1, document2)` to +determine if two JSON documents are _structurally_ equal. This ignores +whitespace differences, and key-value ordering. + +```go +package main + +import ( + "fmt" + + jsonpatch "github.com/evanphx/json-patch" +) + +func main() { + original := []byte(`{"name": "John", "age": 24, "height": 3.21}`) + similar := []byte(` + { + "age": 24, + "height": 3.21, + "name": "John" + } + `) + different := []byte(`{"name": "Jane", "age": 20, "height": 3.37}`) + + if jsonpatch.Equal(original, similar) { + fmt.Println(`"original" is structurally equal to "similar"`) + } + + if !jsonpatch.Equal(original, different) { + fmt.Println(`"original" is _not_ structurally equal to "similar"`) + } +} +``` + +When ran, you get the following output: +```bash +$ go run main.go +"original" is structurally equal to "similar" +"original" is _not_ structurally equal to "similar" +``` + +## Combine merge patches +Given two JSON merge patch documents, it is possible to combine them into a +single merge patch which can describe both set of changes. + +The resulting merge patch can be used such that applying it results in a +document structurally similar as merging each merge patch to the document +in succession. + +```go +package main + +import ( + "fmt" + + jsonpatch "github.com/evanphx/json-patch" +) + +func main() { + original := []byte(`{"name": "John", "age": 24, "height": 3.21}`) + + nameAndHeight := []byte(`{"height":null,"name":"Jane"}`) + ageAndEyes := []byte(`{"age":4.23,"eyes":"blue"}`) + + // Let's combine these merge patch documents... + combinedPatch, err := jsonpatch.MergeMergePatches(nameAndHeight, ageAndEyes) + if err != nil { + panic(err) + } + + // Apply each patch individual against the original document + withoutCombinedPatch, err := jsonpatch.MergePatch(original, nameAndHeight) + if err != nil { + panic(err) + } + + withoutCombinedPatch, err = jsonpatch.MergePatch(withoutCombinedPatch, ageAndEyes) + if err != nil { + panic(err) + } + + // Apply the combined patch against the original document + + withCombinedPatch, err := jsonpatch.MergePatch(original, combinedPatch) + if err != nil { + panic(err) + } + + // Do both result in the same thing? They should! + if jsonpatch.Equal(withCombinedPatch, withoutCombinedPatch) { + fmt.Println("Both JSON documents are structurally the same!") + } + + fmt.Printf("combined merge patch: %s", combinedPatch) +} +``` + +When ran, you get the following output: +```bash +$ go run main.go +Both JSON documents are structurally the same! +combined merge patch: {"age":4.23,"eyes":"blue","height":null,"name":"Jane"} +``` + +# CLI for comparing JSON documents +You can install the commandline program `json-patch`. + +This program can take multiple JSON patch documents as arguments, +and fed a JSON document from `stdin`. It will apply the patch(es) against +the document and output the modified doc. + +**patch.1.json** +```json +[ + {"op": "replace", "path": "/name", "value": "Jane"}, + {"op": "remove", "path": "/height"} +] +``` + +**patch.2.json** +```json +[ + {"op": "add", "path": "/address", "value": "123 Main St"}, + {"op": "replace", "path": "/age", "value": "21"} +] +``` + +**document.json** +```json +{ + "name": "John", + "age": 24, + "height": 3.21 +} +``` + +You can then run: + +```bash +$ go install github.com/evanphx/json-patch/cmd/json-patch +$ cat document.json | json-patch -p patch.1.json -p patch.2.json +{"address":"123 Main St","age":"21","name":"Jane"} +``` + +# Help It! +Contributions are welcomed! Leave [an issue](https://github.com/evanphx/json-patch/issues) +or [create a PR](https://github.com/evanphx/json-patch/compare). + + +Before creating a pull request, we'd ask that you make sure tests are passing +and that you have added new tests when applicable. + +Contributors can run tests using: + +```bash +go test -cover ./... +``` + +Builds for pull requests are tested automatically +using [TravisCI](https://travis-ci.org/evanphx/json-patch). diff --git a/vendor/github.com/evanphx/json-patch/errors.go b/vendor/github.com/evanphx/json-patch/errors.go new file mode 100644 index 0000000000..75304b4437 --- /dev/null +++ b/vendor/github.com/evanphx/json-patch/errors.go @@ -0,0 +1,38 @@ +package jsonpatch + +import "fmt" + +// AccumulatedCopySizeError is an error type returned when the accumulated size +// increase caused by copy operations in a patch operation has exceeded the +// limit. +type AccumulatedCopySizeError struct { + limit int64 + accumulated int64 +} + +// NewAccumulatedCopySizeError returns an AccumulatedCopySizeError. +func NewAccumulatedCopySizeError(l, a int64) *AccumulatedCopySizeError { + return &AccumulatedCopySizeError{limit: l, accumulated: a} +} + +// Error implements the error interface. +func (a *AccumulatedCopySizeError) Error() string { + return fmt.Sprintf("Unable to complete the copy, the accumulated size increase of copy is %d, exceeding the limit %d", a.accumulated, a.limit) +} + +// ArraySizeError is an error type returned when the array size has exceeded +// the limit. +type ArraySizeError struct { + limit int + size int +} + +// NewArraySizeError returns an ArraySizeError. +func NewArraySizeError(l, s int) *ArraySizeError { + return &ArraySizeError{limit: l, size: s} +} + +// Error implements the error interface. +func (a *ArraySizeError) Error() string { + return fmt.Sprintf("Unable to create array of size %d, limit is %d", a.size, a.limit) +} diff --git a/vendor/github.com/evanphx/json-patch/merge.go b/vendor/github.com/evanphx/json-patch/merge.go new file mode 100644 index 0000000000..6806c4c200 --- /dev/null +++ b/vendor/github.com/evanphx/json-patch/merge.go @@ -0,0 +1,383 @@ +package jsonpatch + +import ( + "bytes" + "encoding/json" + "fmt" + "reflect" +) + +func merge(cur, patch *lazyNode, mergeMerge bool) *lazyNode { + curDoc, err := cur.intoDoc() + + if err != nil { + pruneNulls(patch) + return patch + } + + patchDoc, err := patch.intoDoc() + + if err != nil { + return patch + } + + mergeDocs(curDoc, patchDoc, mergeMerge) + + return cur +} + +func mergeDocs(doc, patch *partialDoc, mergeMerge bool) { + for k, v := range *patch { + if v == nil { + if mergeMerge { + (*doc)[k] = nil + } else { + delete(*doc, k) + } + } else { + cur, ok := (*doc)[k] + + if !ok || cur == nil { + pruneNulls(v) + (*doc)[k] = v + } else { + (*doc)[k] = merge(cur, v, mergeMerge) + } + } + } +} + +func pruneNulls(n *lazyNode) { + sub, err := n.intoDoc() + + if err == nil { + pruneDocNulls(sub) + } else { + ary, err := n.intoAry() + + if err == nil { + pruneAryNulls(ary) + } + } +} + +func pruneDocNulls(doc *partialDoc) *partialDoc { + for k, v := range *doc { + if v == nil { + delete(*doc, k) + } else { + pruneNulls(v) + } + } + + return doc +} + +func pruneAryNulls(ary *partialArray) *partialArray { + newAry := []*lazyNode{} + + for _, v := range *ary { + if v != nil { + pruneNulls(v) + newAry = append(newAry, v) + } + } + + *ary = newAry + + return ary +} + +var errBadJSONDoc = fmt.Errorf("Invalid JSON Document") +var errBadJSONPatch = fmt.Errorf("Invalid JSON Patch") +var errBadMergeTypes = fmt.Errorf("Mismatched JSON Documents") + +// MergeMergePatches merges two merge patches together, such that +// applying this resulting merged merge patch to a document yields the same +// as merging each merge patch to the document in succession. +func MergeMergePatches(patch1Data, patch2Data []byte) ([]byte, error) { + return doMergePatch(patch1Data, patch2Data, true) +} + +// MergePatch merges the patchData into the docData. +func MergePatch(docData, patchData []byte) ([]byte, error) { + return doMergePatch(docData, patchData, false) +} + +func doMergePatch(docData, patchData []byte, mergeMerge bool) ([]byte, error) { + doc := &partialDoc{} + + docErr := json.Unmarshal(docData, doc) + + patch := &partialDoc{} + + patchErr := json.Unmarshal(patchData, patch) + + if _, ok := docErr.(*json.SyntaxError); ok { + return nil, errBadJSONDoc + } + + if _, ok := patchErr.(*json.SyntaxError); ok { + return nil, errBadJSONPatch + } + + if docErr == nil && *doc == nil { + return nil, errBadJSONDoc + } + + if patchErr == nil && *patch == nil { + return nil, errBadJSONPatch + } + + if docErr != nil || patchErr != nil { + // Not an error, just not a doc, so we turn straight into the patch + if patchErr == nil { + if mergeMerge { + doc = patch + } else { + doc = pruneDocNulls(patch) + } + } else { + patchAry := &partialArray{} + patchErr = json.Unmarshal(patchData, patchAry) + + if patchErr != nil { + return nil, errBadJSONPatch + } + + pruneAryNulls(patchAry) + + out, patchErr := json.Marshal(patchAry) + + if patchErr != nil { + return nil, errBadJSONPatch + } + + return out, nil + } + } else { + mergeDocs(doc, patch, mergeMerge) + } + + return json.Marshal(doc) +} + +// resemblesJSONArray indicates whether the byte-slice "appears" to be +// a JSON array or not. +// False-positives are possible, as this function does not check the internal +// structure of the array. It only checks that the outer syntax is present and +// correct. +func resemblesJSONArray(input []byte) bool { + input = bytes.TrimSpace(input) + + hasPrefix := bytes.HasPrefix(input, []byte("[")) + hasSuffix := bytes.HasSuffix(input, []byte("]")) + + return hasPrefix && hasSuffix +} + +// CreateMergePatch will return a merge patch document capable of converting +// the original document(s) to the modified document(s). +// The parameters can be bytes of either two JSON Documents, or two arrays of +// JSON documents. +// The merge patch returned follows the specification defined at http://tools.ietf.org/html/draft-ietf-appsawg-json-merge-patch-07 +func CreateMergePatch(originalJSON, modifiedJSON []byte) ([]byte, error) { + originalResemblesArray := resemblesJSONArray(originalJSON) + modifiedResemblesArray := resemblesJSONArray(modifiedJSON) + + // Do both byte-slices seem like JSON arrays? + if originalResemblesArray && modifiedResemblesArray { + return createArrayMergePatch(originalJSON, modifiedJSON) + } + + // Are both byte-slices are not arrays? Then they are likely JSON objects... + if !originalResemblesArray && !modifiedResemblesArray { + return createObjectMergePatch(originalJSON, modifiedJSON) + } + + // None of the above? Then return an error because of mismatched types. + return nil, errBadMergeTypes +} + +// createObjectMergePatch will return a merge-patch document capable of +// converting the original document to the modified document. +func createObjectMergePatch(originalJSON, modifiedJSON []byte) ([]byte, error) { + originalDoc := map[string]interface{}{} + modifiedDoc := map[string]interface{}{} + + err := json.Unmarshal(originalJSON, &originalDoc) + if err != nil { + return nil, errBadJSONDoc + } + + err = json.Unmarshal(modifiedJSON, &modifiedDoc) + if err != nil { + return nil, errBadJSONDoc + } + + dest, err := getDiff(originalDoc, modifiedDoc) + if err != nil { + return nil, err + } + + return json.Marshal(dest) +} + +// createArrayMergePatch will return an array of merge-patch documents capable +// of converting the original document to the modified document for each +// pair of JSON documents provided in the arrays. +// Arrays of mismatched sizes will result in an error. +func createArrayMergePatch(originalJSON, modifiedJSON []byte) ([]byte, error) { + originalDocs := []json.RawMessage{} + modifiedDocs := []json.RawMessage{} + + err := json.Unmarshal(originalJSON, &originalDocs) + if err != nil { + return nil, errBadJSONDoc + } + + err = json.Unmarshal(modifiedJSON, &modifiedDocs) + if err != nil { + return nil, errBadJSONDoc + } + + total := len(originalDocs) + if len(modifiedDocs) != total { + return nil, errBadJSONDoc + } + + result := []json.RawMessage{} + for i := 0; i < len(originalDocs); i++ { + original := originalDocs[i] + modified := modifiedDocs[i] + + patch, err := createObjectMergePatch(original, modified) + if err != nil { + return nil, err + } + + result = append(result, json.RawMessage(patch)) + } + + return json.Marshal(result) +} + +// Returns true if the array matches (must be json types). +// As is idiomatic for go, an empty array is not the same as a nil array. +func matchesArray(a, b []interface{}) bool { + if len(a) != len(b) { + return false + } + if (a == nil && b != nil) || (a != nil && b == nil) { + return false + } + for i := range a { + if !matchesValue(a[i], b[i]) { + return false + } + } + return true +} + +// Returns true if the values matches (must be json types) +// The types of the values must match, otherwise it will always return false +// If two map[string]interface{} are given, all elements must match. +func matchesValue(av, bv interface{}) bool { + if reflect.TypeOf(av) != reflect.TypeOf(bv) { + return false + } + switch at := av.(type) { + case string: + bt := bv.(string) + if bt == at { + return true + } + case float64: + bt := bv.(float64) + if bt == at { + return true + } + case bool: + bt := bv.(bool) + if bt == at { + return true + } + case nil: + // Both nil, fine. + return true + case map[string]interface{}: + bt := bv.(map[string]interface{}) + for key := range at { + if !matchesValue(at[key], bt[key]) { + return false + } + } + for key := range bt { + if !matchesValue(at[key], bt[key]) { + return false + } + } + return true + case []interface{}: + bt := bv.([]interface{}) + return matchesArray(at, bt) + } + return false +} + +// getDiff returns the (recursive) difference between a and b as a map[string]interface{}. +func getDiff(a, b map[string]interface{}) (map[string]interface{}, error) { + into := map[string]interface{}{} + for key, bv := range b { + av, ok := a[key] + // value was added + if !ok { + into[key] = bv + continue + } + // If types have changed, replace completely + if reflect.TypeOf(av) != reflect.TypeOf(bv) { + into[key] = bv + continue + } + // Types are the same, compare values + switch at := av.(type) { + case map[string]interface{}: + bt := bv.(map[string]interface{}) + dst := make(map[string]interface{}, len(bt)) + dst, err := getDiff(at, bt) + if err != nil { + return nil, err + } + if len(dst) > 0 { + into[key] = dst + } + case string, float64, bool: + if !matchesValue(av, bv) { + into[key] = bv + } + case []interface{}: + bt := bv.([]interface{}) + if !matchesArray(at, bt) { + into[key] = bv + } + case nil: + switch bv.(type) { + case nil: + // Both nil, fine. + default: + into[key] = bv + } + default: + panic(fmt.Sprintf("Unknown type:%T in key %s", av, key)) + } + } + // Now add all deleted values as nil + for key := range a { + _, found := b[key] + if !found { + into[key] = nil + } + } + return into, nil +} diff --git a/vendor/github.com/evanphx/json-patch/patch.go b/vendor/github.com/evanphx/json-patch/patch.go new file mode 100644 index 0000000000..1b5f95e611 --- /dev/null +++ b/vendor/github.com/evanphx/json-patch/patch.go @@ -0,0 +1,776 @@ +package jsonpatch + +import ( + "bytes" + "encoding/json" + "fmt" + "strconv" + "strings" + + "github.com/pkg/errors" +) + +const ( + eRaw = iota + eDoc + eAry +) + +var ( + // SupportNegativeIndices decides whether to support non-standard practice of + // allowing negative indices to mean indices starting at the end of an array. + // Default to true. + SupportNegativeIndices bool = true + // AccumulatedCopySizeLimit limits the total size increase in bytes caused by + // "copy" operations in a patch. + AccumulatedCopySizeLimit int64 = 0 +) + +var ( + ErrTestFailed = errors.New("test failed") + ErrMissing = errors.New("missing value") + ErrUnknownType = errors.New("unknown object type") + ErrInvalid = errors.New("invalid state detected") + ErrInvalidIndex = errors.New("invalid index referenced") +) + +type lazyNode struct { + raw *json.RawMessage + doc partialDoc + ary partialArray + which int +} + +// Operation is a single JSON-Patch step, such as a single 'add' operation. +type Operation map[string]*json.RawMessage + +// Patch is an ordered collection of Operations. +type Patch []Operation + +type partialDoc map[string]*lazyNode +type partialArray []*lazyNode + +type container interface { + get(key string) (*lazyNode, error) + set(key string, val *lazyNode) error + add(key string, val *lazyNode) error + remove(key string) error +} + +func newLazyNode(raw *json.RawMessage) *lazyNode { + return &lazyNode{raw: raw, doc: nil, ary: nil, which: eRaw} +} + +func (n *lazyNode) MarshalJSON() ([]byte, error) { + switch n.which { + case eRaw: + return json.Marshal(n.raw) + case eDoc: + return json.Marshal(n.doc) + case eAry: + return json.Marshal(n.ary) + default: + return nil, ErrUnknownType + } +} + +func (n *lazyNode) UnmarshalJSON(data []byte) error { + dest := make(json.RawMessage, len(data)) + copy(dest, data) + n.raw = &dest + n.which = eRaw + return nil +} + +func deepCopy(src *lazyNode) (*lazyNode, int, error) { + if src == nil { + return nil, 0, nil + } + a, err := src.MarshalJSON() + if err != nil { + return nil, 0, err + } + sz := len(a) + ra := make(json.RawMessage, sz) + copy(ra, a) + return newLazyNode(&ra), sz, nil +} + +func (n *lazyNode) intoDoc() (*partialDoc, error) { + if n.which == eDoc { + return &n.doc, nil + } + + if n.raw == nil { + return nil, ErrInvalid + } + + err := json.Unmarshal(*n.raw, &n.doc) + + if err != nil { + return nil, err + } + + n.which = eDoc + return &n.doc, nil +} + +func (n *lazyNode) intoAry() (*partialArray, error) { + if n.which == eAry { + return &n.ary, nil + } + + if n.raw == nil { + return nil, ErrInvalid + } + + err := json.Unmarshal(*n.raw, &n.ary) + + if err != nil { + return nil, err + } + + n.which = eAry + return &n.ary, nil +} + +func (n *lazyNode) compact() []byte { + buf := &bytes.Buffer{} + + if n.raw == nil { + return nil + } + + err := json.Compact(buf, *n.raw) + + if err != nil { + return *n.raw + } + + return buf.Bytes() +} + +func (n *lazyNode) tryDoc() bool { + if n.raw == nil { + return false + } + + err := json.Unmarshal(*n.raw, &n.doc) + + if err != nil { + return false + } + + n.which = eDoc + return true +} + +func (n *lazyNode) tryAry() bool { + if n.raw == nil { + return false + } + + err := json.Unmarshal(*n.raw, &n.ary) + + if err != nil { + return false + } + + n.which = eAry + return true +} + +func (n *lazyNode) equal(o *lazyNode) bool { + if n.which == eRaw { + if !n.tryDoc() && !n.tryAry() { + if o.which != eRaw { + return false + } + + return bytes.Equal(n.compact(), o.compact()) + } + } + + if n.which == eDoc { + if o.which == eRaw { + if !o.tryDoc() { + return false + } + } + + if o.which != eDoc { + return false + } + + for k, v := range n.doc { + ov, ok := o.doc[k] + + if !ok { + return false + } + + if v == nil && ov == nil { + continue + } + + if !v.equal(ov) { + return false + } + } + + return true + } + + if o.which != eAry && !o.tryAry() { + return false + } + + if len(n.ary) != len(o.ary) { + return false + } + + for idx, val := range n.ary { + if !val.equal(o.ary[idx]) { + return false + } + } + + return true +} + +// Kind reads the "op" field of the Operation. +func (o Operation) Kind() string { + if obj, ok := o["op"]; ok && obj != nil { + var op string + + err := json.Unmarshal(*obj, &op) + + if err != nil { + return "unknown" + } + + return op + } + + return "unknown" +} + +// Path reads the "path" field of the Operation. +func (o Operation) Path() (string, error) { + if obj, ok := o["path"]; ok && obj != nil { + var op string + + err := json.Unmarshal(*obj, &op) + + if err != nil { + return "unknown", err + } + + return op, nil + } + + return "unknown", errors.Wrapf(ErrMissing, "operation missing path field") +} + +// From reads the "from" field of the Operation. +func (o Operation) From() (string, error) { + if obj, ok := o["from"]; ok && obj != nil { + var op string + + err := json.Unmarshal(*obj, &op) + + if err != nil { + return "unknown", err + } + + return op, nil + } + + return "unknown", errors.Wrapf(ErrMissing, "operation, missing from field") +} + +func (o Operation) value() *lazyNode { + if obj, ok := o["value"]; ok { + return newLazyNode(obj) + } + + return nil +} + +// ValueInterface decodes the operation value into an interface. +func (o Operation) ValueInterface() (interface{}, error) { + if obj, ok := o["value"]; ok && obj != nil { + var v interface{} + + err := json.Unmarshal(*obj, &v) + + if err != nil { + return nil, err + } + + return v, nil + } + + return nil, errors.Wrapf(ErrMissing, "operation, missing value field") +} + +func isArray(buf []byte) bool { +Loop: + for _, c := range buf { + switch c { + case ' ': + case '\n': + case '\t': + continue + case '[': + return true + default: + break Loop + } + } + + return false +} + +func findObject(pd *container, path string) (container, string) { + doc := *pd + + split := strings.Split(path, "/") + + if len(split) < 2 { + return nil, "" + } + + parts := split[1 : len(split)-1] + + key := split[len(split)-1] + + var err error + + for _, part := range parts { + + next, ok := doc.get(decodePatchKey(part)) + + if next == nil || ok != nil { + return nil, "" + } + + if isArray(*next.raw) { + doc, err = next.intoAry() + + if err != nil { + return nil, "" + } + } else { + doc, err = next.intoDoc() + + if err != nil { + return nil, "" + } + } + } + + return doc, decodePatchKey(key) +} + +func (d *partialDoc) set(key string, val *lazyNode) error { + (*d)[key] = val + return nil +} + +func (d *partialDoc) add(key string, val *lazyNode) error { + (*d)[key] = val + return nil +} + +func (d *partialDoc) get(key string) (*lazyNode, error) { + return (*d)[key], nil +} + +func (d *partialDoc) remove(key string) error { + _, ok := (*d)[key] + if !ok { + return errors.Wrapf(ErrMissing, "Unable to remove nonexistent key: %s", key) + } + + delete(*d, key) + return nil +} + +// set should only be used to implement the "replace" operation, so "key" must +// be an already existing index in "d". +func (d *partialArray) set(key string, val *lazyNode) error { + idx, err := strconv.Atoi(key) + if err != nil { + return err + } + (*d)[idx] = val + return nil +} + +func (d *partialArray) add(key string, val *lazyNode) error { + if key == "-" { + *d = append(*d, val) + return nil + } + + idx, err := strconv.Atoi(key) + if err != nil { + return errors.Wrapf(err, "value was not a proper array index: '%s'", key) + } + + sz := len(*d) + 1 + + ary := make([]*lazyNode, sz) + + cur := *d + + if idx >= len(ary) { + return errors.Wrapf(ErrInvalidIndex, "Unable to access invalid index: %d", idx) + } + + if SupportNegativeIndices { + if idx < -len(ary) { + return errors.Wrapf(ErrInvalidIndex, "Unable to access invalid index: %d", idx) + } + + if idx < 0 { + idx += len(ary) + } + } + + copy(ary[0:idx], cur[0:idx]) + ary[idx] = val + copy(ary[idx+1:], cur[idx:]) + + *d = ary + return nil +} + +func (d *partialArray) get(key string) (*lazyNode, error) { + idx, err := strconv.Atoi(key) + + if err != nil { + return nil, err + } + + if idx >= len(*d) { + return nil, errors.Wrapf(ErrInvalidIndex, "Unable to access invalid index: %d", idx) + } + + return (*d)[idx], nil +} + +func (d *partialArray) remove(key string) error { + idx, err := strconv.Atoi(key) + if err != nil { + return err + } + + cur := *d + + if idx >= len(cur) { + return errors.Wrapf(ErrInvalidIndex, "Unable to access invalid index: %d", idx) + } + + if SupportNegativeIndices { + if idx < -len(cur) { + return errors.Wrapf(ErrInvalidIndex, "Unable to access invalid index: %d", idx) + } + + if idx < 0 { + idx += len(cur) + } + } + + ary := make([]*lazyNode, len(cur)-1) + + copy(ary[0:idx], cur[0:idx]) + copy(ary[idx:], cur[idx+1:]) + + *d = ary + return nil + +} + +func (p Patch) add(doc *container, op Operation) error { + path, err := op.Path() + if err != nil { + return errors.Wrapf(ErrMissing, "add operation failed to decode path") + } + + con, key := findObject(doc, path) + + if con == nil { + return errors.Wrapf(ErrMissing, "add operation does not apply: doc is missing path: \"%s\"", path) + } + + err = con.add(key, op.value()) + if err != nil { + return errors.Wrapf(err, "error in add for path: '%s'", path) + } + + return nil +} + +func (p Patch) remove(doc *container, op Operation) error { + path, err := op.Path() + if err != nil { + return errors.Wrapf(ErrMissing, "remove operation failed to decode path") + } + + con, key := findObject(doc, path) + + if con == nil { + return errors.Wrapf(ErrMissing, "remove operation does not apply: doc is missing path: \"%s\"", path) + } + + err = con.remove(key) + if err != nil { + return errors.Wrapf(err, "error in remove for path: '%s'", path) + } + + return nil +} + +func (p Patch) replace(doc *container, op Operation) error { + path, err := op.Path() + if err != nil { + return errors.Wrapf(err, "replace operation failed to decode path") + } + + con, key := findObject(doc, path) + + if con == nil { + return errors.Wrapf(ErrMissing, "replace operation does not apply: doc is missing path: %s", path) + } + + _, ok := con.get(key) + if ok != nil { + return errors.Wrapf(ErrMissing, "replace operation does not apply: doc is missing key: %s", path) + } + + err = con.set(key, op.value()) + if err != nil { + return errors.Wrapf(err, "error in remove for path: '%s'", path) + } + + return nil +} + +func (p Patch) move(doc *container, op Operation) error { + from, err := op.From() + if err != nil { + return errors.Wrapf(err, "move operation failed to decode from") + } + + con, key := findObject(doc, from) + + if con == nil { + return errors.Wrapf(ErrMissing, "move operation does not apply: doc is missing from path: %s", from) + } + + val, err := con.get(key) + if err != nil { + return errors.Wrapf(err, "error in move for path: '%s'", key) + } + + err = con.remove(key) + if err != nil { + return errors.Wrapf(err, "error in move for path: '%s'", key) + } + + path, err := op.Path() + if err != nil { + return errors.Wrapf(err, "move operation failed to decode path") + } + + con, key = findObject(doc, path) + + if con == nil { + return errors.Wrapf(ErrMissing, "move operation does not apply: doc is missing destination path: %s", path) + } + + err = con.add(key, val) + if err != nil { + return errors.Wrapf(err, "error in move for path: '%s'", path) + } + + return nil +} + +func (p Patch) test(doc *container, op Operation) error { + path, err := op.Path() + if err != nil { + return errors.Wrapf(err, "test operation failed to decode path") + } + + con, key := findObject(doc, path) + + if con == nil { + return errors.Wrapf(ErrMissing, "test operation does not apply: is missing path: %s", path) + } + + val, err := con.get(key) + if err != nil { + return errors.Wrapf(err, "error in test for path: '%s'", path) + } + + if val == nil { + if op.value().raw == nil { + return nil + } + return errors.Wrapf(ErrTestFailed, "testing value %s failed", path) + } else if op.value() == nil { + return errors.Wrapf(ErrTestFailed, "testing value %s failed", path) + } + + if val.equal(op.value()) { + return nil + } + + return errors.Wrapf(ErrTestFailed, "testing value %s failed", path) +} + +func (p Patch) copy(doc *container, op Operation, accumulatedCopySize *int64) error { + from, err := op.From() + if err != nil { + return errors.Wrapf(err, "copy operation failed to decode from") + } + + con, key := findObject(doc, from) + + if con == nil { + return errors.Wrapf(ErrMissing, "copy operation does not apply: doc is missing from path: %s", from) + } + + val, err := con.get(key) + if err != nil { + return errors.Wrapf(err, "error in copy for from: '%s'", from) + } + + path, err := op.Path() + if err != nil { + return errors.Wrapf(ErrMissing, "copy operation failed to decode path") + } + + con, key = findObject(doc, path) + + if con == nil { + return errors.Wrapf(ErrMissing, "copy operation does not apply: doc is missing destination path: %s", path) + } + + valCopy, sz, err := deepCopy(val) + if err != nil { + return errors.Wrapf(err, "error while performing deep copy") + } + + (*accumulatedCopySize) += int64(sz) + if AccumulatedCopySizeLimit > 0 && *accumulatedCopySize > AccumulatedCopySizeLimit { + return NewAccumulatedCopySizeError(AccumulatedCopySizeLimit, *accumulatedCopySize) + } + + err = con.add(key, valCopy) + if err != nil { + return errors.Wrapf(err, "error while adding value during copy") + } + + return nil +} + +// Equal indicates if 2 JSON documents have the same structural equality. +func Equal(a, b []byte) bool { + ra := make(json.RawMessage, len(a)) + copy(ra, a) + la := newLazyNode(&ra) + + rb := make(json.RawMessage, len(b)) + copy(rb, b) + lb := newLazyNode(&rb) + + return la.equal(lb) +} + +// DecodePatch decodes the passed JSON document as an RFC 6902 patch. +func DecodePatch(buf []byte) (Patch, error) { + var p Patch + + err := json.Unmarshal(buf, &p) + + if err != nil { + return nil, err + } + + return p, nil +} + +// Apply mutates a JSON document according to the patch, and returns the new +// document. +func (p Patch) Apply(doc []byte) ([]byte, error) { + return p.ApplyIndent(doc, "") +} + +// ApplyIndent mutates a JSON document according to the patch, and returns the new +// document indented. +func (p Patch) ApplyIndent(doc []byte, indent string) ([]byte, error) { + var pd container + if doc[0] == '[' { + pd = &partialArray{} + } else { + pd = &partialDoc{} + } + + err := json.Unmarshal(doc, pd) + + if err != nil { + return nil, err + } + + err = nil + + var accumulatedCopySize int64 + + for _, op := range p { + switch op.Kind() { + case "add": + err = p.add(&pd, op) + case "remove": + err = p.remove(&pd, op) + case "replace": + err = p.replace(&pd, op) + case "move": + err = p.move(&pd, op) + case "test": + err = p.test(&pd, op) + case "copy": + err = p.copy(&pd, op, &accumulatedCopySize) + default: + err = fmt.Errorf("Unexpected kind: %s", op.Kind()) + } + + if err != nil { + return nil, err + } + } + + if indent != "" { + return json.MarshalIndent(pd, "", indent) + } + + return json.Marshal(pd) +} + +// From http://tools.ietf.org/html/rfc6901#section-4 : +// +// Evaluation of each reference token begins by decoding any escaped +// character sequence. This is performed by first transforming any +// occurrence of the sequence '~1' to '/', and then transforming any +// occurrence of the sequence '~0' to '~'. + +var ( + rfc6901Decoder = strings.NewReplacer("~1", "/", "~0", "~") +) + +func decodePatchKey(k string) string { + return rfc6901Decoder.Replace(k) +} diff --git a/vendor/github.com/fatih/color/.travis.yml b/vendor/github.com/fatih/color/.travis.yml new file mode 100644 index 0000000000..95f8a1ff5c --- /dev/null +++ b/vendor/github.com/fatih/color/.travis.yml @@ -0,0 +1,5 @@ +language: go +go: + - 1.8.x + - tip + diff --git a/vendor/github.com/fatih/color/Gopkg.lock b/vendor/github.com/fatih/color/Gopkg.lock new file mode 100644 index 0000000000..7d879e9caf --- /dev/null +++ b/vendor/github.com/fatih/color/Gopkg.lock @@ -0,0 +1,27 @@ +# This file is autogenerated, do not edit; changes may be undone by the next 'dep ensure'. + + +[[projects]] + name = "github.com/mattn/go-colorable" + packages = ["."] + revision = "167de6bfdfba052fa6b2d3664c8f5272e23c9072" + version = "v0.0.9" + +[[projects]] + name = "github.com/mattn/go-isatty" + packages = ["."] + revision = "0360b2af4f38e8d38c7fce2a9f4e702702d73a39" + version = "v0.0.3" + +[[projects]] + branch = "master" + name = "golang.org/x/sys" + packages = ["unix"] + revision = "37707fdb30a5b38865cfb95e5aab41707daec7fd" + +[solve-meta] + analyzer-name = "dep" + analyzer-version = 1 + inputs-digest = "e8a50671c3cb93ea935bf210b1cd20702876b9d9226129be581ef646d1565cdc" + solver-name = "gps-cdcl" + solver-version = 1 diff --git a/vendor/github.com/fatih/color/Gopkg.toml b/vendor/github.com/fatih/color/Gopkg.toml new file mode 100644 index 0000000000..ff1617f71d --- /dev/null +++ b/vendor/github.com/fatih/color/Gopkg.toml @@ -0,0 +1,30 @@ + +# Gopkg.toml example +# +# Refer to https://github.com/golang/dep/blob/master/docs/Gopkg.toml.md +# for detailed Gopkg.toml documentation. +# +# required = ["github.com/user/thing/cmd/thing"] +# ignored = ["github.com/user/project/pkgX", "bitbucket.org/user/project/pkgA/pkgY"] +# +# [[constraint]] +# name = "github.com/user/project" +# version = "1.0.0" +# +# [[constraint]] +# name = "github.com/user/project2" +# branch = "dev" +# source = "github.com/myfork/project2" +# +# [[override]] +# name = "github.com/x/y" +# version = "2.4.0" + + +[[constraint]] + name = "github.com/mattn/go-colorable" + version = "0.0.9" + +[[constraint]] + name = "github.com/mattn/go-isatty" + version = "0.0.3" diff --git a/vendor/github.com/fatih/color/LICENSE.md b/vendor/github.com/fatih/color/LICENSE.md new file mode 100644 index 0000000000..25fdaf639d --- /dev/null +++ b/vendor/github.com/fatih/color/LICENSE.md @@ -0,0 +1,20 @@ +The MIT License (MIT) + +Copyright (c) 2013 Fatih Arslan + +Permission is hereby granted, free of charge, to any person obtaining a copy of +this software and associated documentation files (the "Software"), to deal in +the Software without restriction, including without limitation the rights to +use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of +the Software, and to permit persons to whom the Software is furnished to do so, +subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS +FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. diff --git a/vendor/github.com/fatih/color/README.md b/vendor/github.com/fatih/color/README.md new file mode 100644 index 0000000000..3fc9544602 --- /dev/null +++ b/vendor/github.com/fatih/color/README.md @@ -0,0 +1,179 @@ +# Color [![GoDoc](https://godoc.org/github.com/fatih/color?status.svg)](https://godoc.org/github.com/fatih/color) [![Build Status](https://img.shields.io/travis/fatih/color.svg?style=flat-square)](https://travis-ci.org/fatih/color) + + + +Color lets you use colorized outputs in terms of [ANSI Escape +Codes](http://en.wikipedia.org/wiki/ANSI_escape_code#Colors) in Go (Golang). It +has support for Windows too! The API can be used in several ways, pick one that +suits you. + + +![Color](https://i.imgur.com/c1JI0lA.png) + + +## Install + +```bash +go get github.com/fatih/color +``` + +Note that the `vendor` folder is here for stability. Remove the folder if you +already have the dependencies in your GOPATH. + +## Examples + +### Standard colors + +```go +// Print with default helper functions +color.Cyan("Prints text in cyan.") + +// A newline will be appended automatically +color.Blue("Prints %s in blue.", "text") + +// These are using the default foreground colors +color.Red("We have red") +color.Magenta("And many others ..") + +``` + +### Mix and reuse colors + +```go +// Create a new color object +c := color.New(color.FgCyan).Add(color.Underline) +c.Println("Prints cyan text with an underline.") + +// Or just add them to New() +d := color.New(color.FgCyan, color.Bold) +d.Printf("This prints bold cyan %s\n", "too!.") + +// Mix up foreground and background colors, create new mixes! +red := color.New(color.FgRed) + +boldRed := red.Add(color.Bold) +boldRed.Println("This will print text in bold red.") + +whiteBackground := red.Add(color.BgWhite) +whiteBackground.Println("Red text with white background.") +``` + +### Use your own output (io.Writer) + +```go +// Use your own io.Writer output +color.New(color.FgBlue).Fprintln(myWriter, "blue color!") + +blue := color.New(color.FgBlue) +blue.Fprint(writer, "This will print text in blue.") +``` + +### Custom print functions (PrintFunc) + +```go +// Create a custom print function for convenience +red := color.New(color.FgRed).PrintfFunc() +red("Warning") +red("Error: %s", err) + +// Mix up multiple attributes +notice := color.New(color.Bold, color.FgGreen).PrintlnFunc() +notice("Don't forget this...") +``` + +### Custom fprint functions (FprintFunc) + +```go +blue := color.New(FgBlue).FprintfFunc() +blue(myWriter, "important notice: %s", stars) + +// Mix up with multiple attributes +success := color.New(color.Bold, color.FgGreen).FprintlnFunc() +success(myWriter, "Don't forget this...") +``` + +### Insert into noncolor strings (SprintFunc) + +```go +// Create SprintXxx functions to mix strings with other non-colorized strings: +yellow := color.New(color.FgYellow).SprintFunc() +red := color.New(color.FgRed).SprintFunc() +fmt.Printf("This is a %s and this is %s.\n", yellow("warning"), red("error")) + +info := color.New(color.FgWhite, color.BgGreen).SprintFunc() +fmt.Printf("This %s rocks!\n", info("package")) + +// Use helper functions +fmt.Println("This", color.RedString("warning"), "should be not neglected.") +fmt.Printf("%v %v\n", color.GreenString("Info:"), "an important message.") + +// Windows supported too! Just don't forget to change the output to color.Output +fmt.Fprintf(color.Output, "Windows support: %s", color.GreenString("PASS")) +``` + +### Plug into existing code + +```go +// Use handy standard colors +color.Set(color.FgYellow) + +fmt.Println("Existing text will now be in yellow") +fmt.Printf("This one %s\n", "too") + +color.Unset() // Don't forget to unset + +// You can mix up parameters +color.Set(color.FgMagenta, color.Bold) +defer color.Unset() // Use it in your function + +fmt.Println("All text will now be bold magenta.") +``` + +### Disable/Enable color + +There might be a case where you want to explicitly disable/enable color output. the +`go-isatty` package will automatically disable color output for non-tty output streams +(for example if the output were piped directly to `less`) + +`Color` has support to disable/enable colors both globally and for single color +definitions. For example suppose you have a CLI app and a `--no-color` bool flag. You +can easily disable the color output with: + +```go + +var flagNoColor = flag.Bool("no-color", false, "Disable color output") + +if *flagNoColor { + color.NoColor = true // disables colorized output +} +``` + +It also has support for single color definitions (local). You can +disable/enable color output on the fly: + +```go +c := color.New(color.FgCyan) +c.Println("Prints cyan text") + +c.DisableColor() +c.Println("This is printed without any color") + +c.EnableColor() +c.Println("This prints again cyan...") +``` + +## Todo + +* Save/Return previous values +* Evaluate fmt.Formatter interface + + +## Credits + + * [Fatih Arslan](https://github.com/fatih) + * Windows support via @mattn: [colorable](https://github.com/mattn/go-colorable) + +## License + +The MIT License (MIT) - see [`LICENSE.md`](https://github.com/fatih/color/blob/master/LICENSE.md) for more details + diff --git a/vendor/github.com/fatih/color/color.go b/vendor/github.com/fatih/color/color.go new file mode 100644 index 0000000000..91c8e9f062 --- /dev/null +++ b/vendor/github.com/fatih/color/color.go @@ -0,0 +1,603 @@ +package color + +import ( + "fmt" + "io" + "os" + "strconv" + "strings" + "sync" + + "github.com/mattn/go-colorable" + "github.com/mattn/go-isatty" +) + +var ( + // NoColor defines if the output is colorized or not. It's dynamically set to + // false or true based on the stdout's file descriptor referring to a terminal + // or not. This is a global option and affects all colors. For more control + // over each color block use the methods DisableColor() individually. + NoColor = os.Getenv("TERM") == "dumb" || + (!isatty.IsTerminal(os.Stdout.Fd()) && !isatty.IsCygwinTerminal(os.Stdout.Fd())) + + // Output defines the standard output of the print functions. By default + // os.Stdout is used. + Output = colorable.NewColorableStdout() + + // Error defines a color supporting writer for os.Stderr. + Error = colorable.NewColorableStderr() + + // colorsCache is used to reduce the count of created Color objects and + // allows to reuse already created objects with required Attribute. + colorsCache = make(map[Attribute]*Color) + colorsCacheMu sync.Mutex // protects colorsCache +) + +// Color defines a custom color object which is defined by SGR parameters. +type Color struct { + params []Attribute + noColor *bool +} + +// Attribute defines a single SGR Code +type Attribute int + +const escape = "\x1b" + +// Base attributes +const ( + Reset Attribute = iota + Bold + Faint + Italic + Underline + BlinkSlow + BlinkRapid + ReverseVideo + Concealed + CrossedOut +) + +// Foreground text colors +const ( + FgBlack Attribute = iota + 30 + FgRed + FgGreen + FgYellow + FgBlue + FgMagenta + FgCyan + FgWhite +) + +// Foreground Hi-Intensity text colors +const ( + FgHiBlack Attribute = iota + 90 + FgHiRed + FgHiGreen + FgHiYellow + FgHiBlue + FgHiMagenta + FgHiCyan + FgHiWhite +) + +// Background text colors +const ( + BgBlack Attribute = iota + 40 + BgRed + BgGreen + BgYellow + BgBlue + BgMagenta + BgCyan + BgWhite +) + +// Background Hi-Intensity text colors +const ( + BgHiBlack Attribute = iota + 100 + BgHiRed + BgHiGreen + BgHiYellow + BgHiBlue + BgHiMagenta + BgHiCyan + BgHiWhite +) + +// New returns a newly created color object. +func New(value ...Attribute) *Color { + c := &Color{params: make([]Attribute, 0)} + c.Add(value...) + return c +} + +// Set sets the given parameters immediately. It will change the color of +// output with the given SGR parameters until color.Unset() is called. +func Set(p ...Attribute) *Color { + c := New(p...) + c.Set() + return c +} + +// Unset resets all escape attributes and clears the output. Usually should +// be called after Set(). +func Unset() { + if NoColor { + return + } + + fmt.Fprintf(Output, "%s[%dm", escape, Reset) +} + +// Set sets the SGR sequence. +func (c *Color) Set() *Color { + if c.isNoColorSet() { + return c + } + + fmt.Fprintf(Output, c.format()) + return c +} + +func (c *Color) unset() { + if c.isNoColorSet() { + return + } + + Unset() +} + +func (c *Color) setWriter(w io.Writer) *Color { + if c.isNoColorSet() { + return c + } + + fmt.Fprintf(w, c.format()) + return c +} + +func (c *Color) unsetWriter(w io.Writer) { + if c.isNoColorSet() { + return + } + + if NoColor { + return + } + + fmt.Fprintf(w, "%s[%dm", escape, Reset) +} + +// Add is used to chain SGR parameters. Use as many as parameters to combine +// and create custom color objects. Example: Add(color.FgRed, color.Underline). +func (c *Color) Add(value ...Attribute) *Color { + c.params = append(c.params, value...) + return c +} + +func (c *Color) prepend(value Attribute) { + c.params = append(c.params, 0) + copy(c.params[1:], c.params[0:]) + c.params[0] = value +} + +// Fprint formats using the default formats for its operands and writes to w. +// Spaces are added between operands when neither is a string. +// It returns the number of bytes written and any write error encountered. +// On Windows, users should wrap w with colorable.NewColorable() if w is of +// type *os.File. +func (c *Color) Fprint(w io.Writer, a ...interface{}) (n int, err error) { + c.setWriter(w) + defer c.unsetWriter(w) + + return fmt.Fprint(w, a...) +} + +// Print formats using the default formats for its operands and writes to +// standard output. Spaces are added between operands when neither is a +// string. It returns the number of bytes written and any write error +// encountered. This is the standard fmt.Print() method wrapped with the given +// color. +func (c *Color) Print(a ...interface{}) (n int, err error) { + c.Set() + defer c.unset() + + return fmt.Fprint(Output, a...) +} + +// Fprintf formats according to a format specifier and writes to w. +// It returns the number of bytes written and any write error encountered. +// On Windows, users should wrap w with colorable.NewColorable() if w is of +// type *os.File. +func (c *Color) Fprintf(w io.Writer, format string, a ...interface{}) (n int, err error) { + c.setWriter(w) + defer c.unsetWriter(w) + + return fmt.Fprintf(w, format, a...) +} + +// Printf formats according to a format specifier and writes to standard output. +// It returns the number of bytes written and any write error encountered. +// This is the standard fmt.Printf() method wrapped with the given color. +func (c *Color) Printf(format string, a ...interface{}) (n int, err error) { + c.Set() + defer c.unset() + + return fmt.Fprintf(Output, format, a...) +} + +// Fprintln formats using the default formats for its operands and writes to w. +// Spaces are always added between operands and a newline is appended. +// On Windows, users should wrap w with colorable.NewColorable() if w is of +// type *os.File. +func (c *Color) Fprintln(w io.Writer, a ...interface{}) (n int, err error) { + c.setWriter(w) + defer c.unsetWriter(w) + + return fmt.Fprintln(w, a...) +} + +// Println formats using the default formats for its operands and writes to +// standard output. Spaces are always added between operands and a newline is +// appended. It returns the number of bytes written and any write error +// encountered. This is the standard fmt.Print() method wrapped with the given +// color. +func (c *Color) Println(a ...interface{}) (n int, err error) { + c.Set() + defer c.unset() + + return fmt.Fprintln(Output, a...) +} + +// Sprint is just like Print, but returns a string instead of printing it. +func (c *Color) Sprint(a ...interface{}) string { + return c.wrap(fmt.Sprint(a...)) +} + +// Sprintln is just like Println, but returns a string instead of printing it. +func (c *Color) Sprintln(a ...interface{}) string { + return c.wrap(fmt.Sprintln(a...)) +} + +// Sprintf is just like Printf, but returns a string instead of printing it. +func (c *Color) Sprintf(format string, a ...interface{}) string { + return c.wrap(fmt.Sprintf(format, a...)) +} + +// FprintFunc returns a new function that prints the passed arguments as +// colorized with color.Fprint(). +func (c *Color) FprintFunc() func(w io.Writer, a ...interface{}) { + return func(w io.Writer, a ...interface{}) { + c.Fprint(w, a...) + } +} + +// PrintFunc returns a new function that prints the passed arguments as +// colorized with color.Print(). +func (c *Color) PrintFunc() func(a ...interface{}) { + return func(a ...interface{}) { + c.Print(a...) + } +} + +// FprintfFunc returns a new function that prints the passed arguments as +// colorized with color.Fprintf(). +func (c *Color) FprintfFunc() func(w io.Writer, format string, a ...interface{}) { + return func(w io.Writer, format string, a ...interface{}) { + c.Fprintf(w, format, a...) + } +} + +// PrintfFunc returns a new function that prints the passed arguments as +// colorized with color.Printf(). +func (c *Color) PrintfFunc() func(format string, a ...interface{}) { + return func(format string, a ...interface{}) { + c.Printf(format, a...) + } +} + +// FprintlnFunc returns a new function that prints the passed arguments as +// colorized with color.Fprintln(). +func (c *Color) FprintlnFunc() func(w io.Writer, a ...interface{}) { + return func(w io.Writer, a ...interface{}) { + c.Fprintln(w, a...) + } +} + +// PrintlnFunc returns a new function that prints the passed arguments as +// colorized with color.Println(). +func (c *Color) PrintlnFunc() func(a ...interface{}) { + return func(a ...interface{}) { + c.Println(a...) + } +} + +// SprintFunc returns a new function that returns colorized strings for the +// given arguments with fmt.Sprint(). Useful to put into or mix into other +// string. Windows users should use this in conjunction with color.Output, example: +// +// put := New(FgYellow).SprintFunc() +// fmt.Fprintf(color.Output, "This is a %s", put("warning")) +func (c *Color) SprintFunc() func(a ...interface{}) string { + return func(a ...interface{}) string { + return c.wrap(fmt.Sprint(a...)) + } +} + +// SprintfFunc returns a new function that returns colorized strings for the +// given arguments with fmt.Sprintf(). Useful to put into or mix into other +// string. Windows users should use this in conjunction with color.Output. +func (c *Color) SprintfFunc() func(format string, a ...interface{}) string { + return func(format string, a ...interface{}) string { + return c.wrap(fmt.Sprintf(format, a...)) + } +} + +// SprintlnFunc returns a new function that returns colorized strings for the +// given arguments with fmt.Sprintln(). Useful to put into or mix into other +// string. Windows users should use this in conjunction with color.Output. +func (c *Color) SprintlnFunc() func(a ...interface{}) string { + return func(a ...interface{}) string { + return c.wrap(fmt.Sprintln(a...)) + } +} + +// sequence returns a formatted SGR sequence to be plugged into a "\x1b[...m" +// an example output might be: "1;36" -> bold cyan +func (c *Color) sequence() string { + format := make([]string, len(c.params)) + for i, v := range c.params { + format[i] = strconv.Itoa(int(v)) + } + + return strings.Join(format, ";") +} + +// wrap wraps the s string with the colors attributes. The string is ready to +// be printed. +func (c *Color) wrap(s string) string { + if c.isNoColorSet() { + return s + } + + return c.format() + s + c.unformat() +} + +func (c *Color) format() string { + return fmt.Sprintf("%s[%sm", escape, c.sequence()) +} + +func (c *Color) unformat() string { + return fmt.Sprintf("%s[%dm", escape, Reset) +} + +// DisableColor disables the color output. Useful to not change any existing +// code and still being able to output. Can be used for flags like +// "--no-color". To enable back use EnableColor() method. +func (c *Color) DisableColor() { + c.noColor = boolPtr(true) +} + +// EnableColor enables the color output. Use it in conjunction with +// DisableColor(). Otherwise this method has no side effects. +func (c *Color) EnableColor() { + c.noColor = boolPtr(false) +} + +func (c *Color) isNoColorSet() bool { + // check first if we have user setted action + if c.noColor != nil { + return *c.noColor + } + + // if not return the global option, which is disabled by default + return NoColor +} + +// Equals returns a boolean value indicating whether two colors are equal. +func (c *Color) Equals(c2 *Color) bool { + if len(c.params) != len(c2.params) { + return false + } + + for _, attr := range c.params { + if !c2.attrExists(attr) { + return false + } + } + + return true +} + +func (c *Color) attrExists(a Attribute) bool { + for _, attr := range c.params { + if attr == a { + return true + } + } + + return false +} + +func boolPtr(v bool) *bool { + return &v +} + +func getCachedColor(p Attribute) *Color { + colorsCacheMu.Lock() + defer colorsCacheMu.Unlock() + + c, ok := colorsCache[p] + if !ok { + c = New(p) + colorsCache[p] = c + } + + return c +} + +func colorPrint(format string, p Attribute, a ...interface{}) { + c := getCachedColor(p) + + if !strings.HasSuffix(format, "\n") { + format += "\n" + } + + if len(a) == 0 { + c.Print(format) + } else { + c.Printf(format, a...) + } +} + +func colorString(format string, p Attribute, a ...interface{}) string { + c := getCachedColor(p) + + if len(a) == 0 { + return c.SprintFunc()(format) + } + + return c.SprintfFunc()(format, a...) +} + +// Black is a convenient helper function to print with black foreground. A +// newline is appended to format by default. +func Black(format string, a ...interface{}) { colorPrint(format, FgBlack, a...) } + +// Red is a convenient helper function to print with red foreground. A +// newline is appended to format by default. +func Red(format string, a ...interface{}) { colorPrint(format, FgRed, a...) } + +// Green is a convenient helper function to print with green foreground. A +// newline is appended to format by default. +func Green(format string, a ...interface{}) { colorPrint(format, FgGreen, a...) } + +// Yellow is a convenient helper function to print with yellow foreground. +// A newline is appended to format by default. +func Yellow(format string, a ...interface{}) { colorPrint(format, FgYellow, a...) } + +// Blue is a convenient helper function to print with blue foreground. A +// newline is appended to format by default. +func Blue(format string, a ...interface{}) { colorPrint(format, FgBlue, a...) } + +// Magenta is a convenient helper function to print with magenta foreground. +// A newline is appended to format by default. +func Magenta(format string, a ...interface{}) { colorPrint(format, FgMagenta, a...) } + +// Cyan is a convenient helper function to print with cyan foreground. A +// newline is appended to format by default. +func Cyan(format string, a ...interface{}) { colorPrint(format, FgCyan, a...) } + +// White is a convenient helper function to print with white foreground. A +// newline is appended to format by default. +func White(format string, a ...interface{}) { colorPrint(format, FgWhite, a...) } + +// BlackString is a convenient helper function to return a string with black +// foreground. +func BlackString(format string, a ...interface{}) string { return colorString(format, FgBlack, a...) } + +// RedString is a convenient helper function to return a string with red +// foreground. +func RedString(format string, a ...interface{}) string { return colorString(format, FgRed, a...) } + +// GreenString is a convenient helper function to return a string with green +// foreground. +func GreenString(format string, a ...interface{}) string { return colorString(format, FgGreen, a...) } + +// YellowString is a convenient helper function to return a string with yellow +// foreground. +func YellowString(format string, a ...interface{}) string { return colorString(format, FgYellow, a...) } + +// BlueString is a convenient helper function to return a string with blue +// foreground. +func BlueString(format string, a ...interface{}) string { return colorString(format, FgBlue, a...) } + +// MagentaString is a convenient helper function to return a string with magenta +// foreground. +func MagentaString(format string, a ...interface{}) string { + return colorString(format, FgMagenta, a...) +} + +// CyanString is a convenient helper function to return a string with cyan +// foreground. +func CyanString(format string, a ...interface{}) string { return colorString(format, FgCyan, a...) } + +// WhiteString is a convenient helper function to return a string with white +// foreground. +func WhiteString(format string, a ...interface{}) string { return colorString(format, FgWhite, a...) } + +// HiBlack is a convenient helper function to print with hi-intensity black foreground. A +// newline is appended to format by default. +func HiBlack(format string, a ...interface{}) { colorPrint(format, FgHiBlack, a...) } + +// HiRed is a convenient helper function to print with hi-intensity red foreground. A +// newline is appended to format by default. +func HiRed(format string, a ...interface{}) { colorPrint(format, FgHiRed, a...) } + +// HiGreen is a convenient helper function to print with hi-intensity green foreground. A +// newline is appended to format by default. +func HiGreen(format string, a ...interface{}) { colorPrint(format, FgHiGreen, a...) } + +// HiYellow is a convenient helper function to print with hi-intensity yellow foreground. +// A newline is appended to format by default. +func HiYellow(format string, a ...interface{}) { colorPrint(format, FgHiYellow, a...) } + +// HiBlue is a convenient helper function to print with hi-intensity blue foreground. A +// newline is appended to format by default. +func HiBlue(format string, a ...interface{}) { colorPrint(format, FgHiBlue, a...) } + +// HiMagenta is a convenient helper function to print with hi-intensity magenta foreground. +// A newline is appended to format by default. +func HiMagenta(format string, a ...interface{}) { colorPrint(format, FgHiMagenta, a...) } + +// HiCyan is a convenient helper function to print with hi-intensity cyan foreground. A +// newline is appended to format by default. +func HiCyan(format string, a ...interface{}) { colorPrint(format, FgHiCyan, a...) } + +// HiWhite is a convenient helper function to print with hi-intensity white foreground. A +// newline is appended to format by default. +func HiWhite(format string, a ...interface{}) { colorPrint(format, FgHiWhite, a...) } + +// HiBlackString is a convenient helper function to return a string with hi-intensity black +// foreground. +func HiBlackString(format string, a ...interface{}) string { + return colorString(format, FgHiBlack, a...) +} + +// HiRedString is a convenient helper function to return a string with hi-intensity red +// foreground. +func HiRedString(format string, a ...interface{}) string { return colorString(format, FgHiRed, a...) } + +// HiGreenString is a convenient helper function to return a string with hi-intensity green +// foreground. +func HiGreenString(format string, a ...interface{}) string { + return colorString(format, FgHiGreen, a...) +} + +// HiYellowString is a convenient helper function to return a string with hi-intensity yellow +// foreground. +func HiYellowString(format string, a ...interface{}) string { + return colorString(format, FgHiYellow, a...) +} + +// HiBlueString is a convenient helper function to return a string with hi-intensity blue +// foreground. +func HiBlueString(format string, a ...interface{}) string { return colorString(format, FgHiBlue, a...) } + +// HiMagentaString is a convenient helper function to return a string with hi-intensity magenta +// foreground. +func HiMagentaString(format string, a ...interface{}) string { + return colorString(format, FgHiMagenta, a...) +} + +// HiCyanString is a convenient helper function to return a string with hi-intensity cyan +// foreground. +func HiCyanString(format string, a ...interface{}) string { return colorString(format, FgHiCyan, a...) } + +// HiWhiteString is a convenient helper function to return a string with hi-intensity white +// foreground. +func HiWhiteString(format string, a ...interface{}) string { + return colorString(format, FgHiWhite, a...) +} diff --git a/vendor/github.com/fatih/color/doc.go b/vendor/github.com/fatih/color/doc.go new file mode 100644 index 0000000000..cf1e96500f --- /dev/null +++ b/vendor/github.com/fatih/color/doc.go @@ -0,0 +1,133 @@ +/* +Package color is an ANSI color package to output colorized or SGR defined +output to the standard output. The API can be used in several way, pick one +that suits you. + +Use simple and default helper functions with predefined foreground colors: + + color.Cyan("Prints text in cyan.") + + // a newline will be appended automatically + color.Blue("Prints %s in blue.", "text") + + // More default foreground colors.. + color.Red("We have red") + color.Yellow("Yellow color too!") + color.Magenta("And many others ..") + + // Hi-intensity colors + color.HiGreen("Bright green color.") + color.HiBlack("Bright black means gray..") + color.HiWhite("Shiny white color!") + +However there are times where custom color mixes are required. Below are some +examples to create custom color objects and use the print functions of each +separate color object. + + // Create a new color object + c := color.New(color.FgCyan).Add(color.Underline) + c.Println("Prints cyan text with an underline.") + + // Or just add them to New() + d := color.New(color.FgCyan, color.Bold) + d.Printf("This prints bold cyan %s\n", "too!.") + + + // Mix up foreground and background colors, create new mixes! + red := color.New(color.FgRed) + + boldRed := red.Add(color.Bold) + boldRed.Println("This will print text in bold red.") + + whiteBackground := red.Add(color.BgWhite) + whiteBackground.Println("Red text with White background.") + + // Use your own io.Writer output + color.New(color.FgBlue).Fprintln(myWriter, "blue color!") + + blue := color.New(color.FgBlue) + blue.Fprint(myWriter, "This will print text in blue.") + +You can create PrintXxx functions to simplify even more: + + // Create a custom print function for convenient + red := color.New(color.FgRed).PrintfFunc() + red("warning") + red("error: %s", err) + + // Mix up multiple attributes + notice := color.New(color.Bold, color.FgGreen).PrintlnFunc() + notice("don't forget this...") + +You can also FprintXxx functions to pass your own io.Writer: + + blue := color.New(FgBlue).FprintfFunc() + blue(myWriter, "important notice: %s", stars) + + // Mix up with multiple attributes + success := color.New(color.Bold, color.FgGreen).FprintlnFunc() + success(myWriter, don't forget this...") + + +Or create SprintXxx functions to mix strings with other non-colorized strings: + + yellow := New(FgYellow).SprintFunc() + red := New(FgRed).SprintFunc() + + fmt.Printf("this is a %s and this is %s.\n", yellow("warning"), red("error")) + + info := New(FgWhite, BgGreen).SprintFunc() + fmt.Printf("this %s rocks!\n", info("package")) + +Windows support is enabled by default. All Print functions work as intended. +However only for color.SprintXXX functions, user should use fmt.FprintXXX and +set the output to color.Output: + + fmt.Fprintf(color.Output, "Windows support: %s", color.GreenString("PASS")) + + info := New(FgWhite, BgGreen).SprintFunc() + fmt.Fprintf(color.Output, "this %s rocks!\n", info("package")) + +Using with existing code is possible. Just use the Set() method to set the +standard output to the given parameters. That way a rewrite of an existing +code is not required. + + // Use handy standard colors. + color.Set(color.FgYellow) + + fmt.Println("Existing text will be now in Yellow") + fmt.Printf("This one %s\n", "too") + + color.Unset() // don't forget to unset + + // You can mix up parameters + color.Set(color.FgMagenta, color.Bold) + defer color.Unset() // use it in your function + + fmt.Println("All text will be now bold magenta.") + +There might be a case where you want to disable color output (for example to +pipe the standard output of your app to somewhere else). `Color` has support to +disable colors both globally and for single color definition. For example +suppose you have a CLI app and a `--no-color` bool flag. You can easily disable +the color output with: + + var flagNoColor = flag.Bool("no-color", false, "Disable color output") + + if *flagNoColor { + color.NoColor = true // disables colorized output + } + +It also has support for single color definitions (local). You can +disable/enable color output on the fly: + + c := color.New(color.FgCyan) + c.Println("Prints cyan text") + + c.DisableColor() + c.Println("This is printed without any color") + + c.EnableColor() + c.Println("This prints again cyan...") +*/ +package color diff --git a/vendor/github.com/ghodss/yaml/.gitignore b/vendor/github.com/ghodss/yaml/.gitignore deleted file mode 100644 index e256a31e00..0000000000 --- a/vendor/github.com/ghodss/yaml/.gitignore +++ /dev/null @@ -1,20 +0,0 @@ -# OSX leaves these everywhere on SMB shares -._* - -# Eclipse files -.classpath -.project -.settings/** - -# Emacs save files -*~ - -# Vim-related files -[._]*.s[a-w][a-z] -[._]s[a-w][a-z] -*.un~ -Session.vim -.netrwhist - -# Go test binaries -*.test diff --git a/vendor/github.com/ghodss/yaml/.travis.yml b/vendor/github.com/ghodss/yaml/.travis.yml deleted file mode 100644 index 0e9d6edc01..0000000000 --- a/vendor/github.com/ghodss/yaml/.travis.yml +++ /dev/null @@ -1,7 +0,0 @@ -language: go -go: - - 1.3 - - 1.4 -script: - - go test - - go build diff --git a/vendor/github.com/ghodss/yaml/LICENSE b/vendor/github.com/ghodss/yaml/LICENSE deleted file mode 100644 index 7805d36de7..0000000000 --- a/vendor/github.com/ghodss/yaml/LICENSE +++ /dev/null @@ -1,50 +0,0 @@ -The MIT License (MIT) - -Copyright (c) 2014 Sam Ghods - -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to deal -in the Software without restriction, including without limitation the rights -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in all -copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE -SOFTWARE. - - -Copyright (c) 2012 The Go Authors. All rights reserved. - -Redistribution and use in source and binary forms, with or without -modification, are permitted provided that the following conditions are -met: - - * Redistributions of source code must retain the above copyright -notice, this list of conditions and the following disclaimer. - * Redistributions in binary form must reproduce the above -copyright notice, this list of conditions and the following disclaimer -in the documentation and/or other materials provided with the -distribution. - * Neither the name of Google Inc. nor the names of its -contributors may be used to endorse or promote products derived from -this software without specific prior written permission. - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/vendor/github.com/ghodss/yaml/README.md b/vendor/github.com/ghodss/yaml/README.md deleted file mode 100644 index 0200f75b4d..0000000000 --- a/vendor/github.com/ghodss/yaml/README.md +++ /dev/null @@ -1,121 +0,0 @@ -# YAML marshaling and unmarshaling support for Go - -[![Build Status](https://travis-ci.org/ghodss/yaml.svg)](https://travis-ci.org/ghodss/yaml) - -## Introduction - -A wrapper around [go-yaml](https://github.com/go-yaml/yaml) designed to enable a better way of handling YAML when marshaling to and from structs. - -In short, this library first converts YAML to JSON using go-yaml and then uses `json.Marshal` and `json.Unmarshal` to convert to or from the struct. This means that it effectively reuses the JSON struct tags as well as the custom JSON methods `MarshalJSON` and `UnmarshalJSON` unlike go-yaml. For a detailed overview of the rationale behind this method, [see this blog post](http://ghodss.com/2014/the-right-way-to-handle-yaml-in-golang/). - -## Compatibility - -This package uses [go-yaml](https://github.com/go-yaml/yaml) and therefore supports [everything go-yaml supports](https://github.com/go-yaml/yaml#compatibility). - -## Caveats - -**Caveat #1:** When using `yaml.Marshal` and `yaml.Unmarshal`, binary data should NOT be preceded with the `!!binary` YAML tag. If you do, go-yaml will convert the binary data from base64 to native binary data, which is not compatible with JSON. You can still use binary in your YAML files though - just store them without the `!!binary` tag and decode the base64 in your code (e.g. in the custom JSON methods `MarshalJSON` and `UnmarshalJSON`). This also has the benefit that your YAML and your JSON binary data will be decoded exactly the same way. As an example: - -``` -BAD: - exampleKey: !!binary gIGC - -GOOD: - exampleKey: gIGC -... and decode the base64 data in your code. -``` - -**Caveat #2:** When using `YAMLToJSON` directly, maps with keys that are maps will result in an error since this is not supported by JSON. This error will occur in `Unmarshal` as well since you can't unmarshal map keys anyways since struct fields can't be keys. - -## Installation and usage - -To install, run: - -``` -$ go get github.com/ghodss/yaml -``` - -And import using: - -``` -import "github.com/ghodss/yaml" -``` - -Usage is very similar to the JSON library: - -```go -package main - -import ( - "fmt" - - "github.com/ghodss/yaml" -) - -type Person struct { - Name string `json:"name"` // Affects YAML field names too. - Age int `json:"age"` -} - -func main() { - // Marshal a Person struct to YAML. - p := Person{"John", 30} - y, err := yaml.Marshal(p) - if err != nil { - fmt.Printf("err: %v\n", err) - return - } - fmt.Println(string(y)) - /* Output: - age: 30 - name: John - */ - - // Unmarshal the YAML back into a Person struct. - var p2 Person - err = yaml.Unmarshal(y, &p2) - if err != nil { - fmt.Printf("err: %v\n", err) - return - } - fmt.Println(p2) - /* Output: - {John 30} - */ -} -``` - -`yaml.YAMLToJSON` and `yaml.JSONToYAML` methods are also available: - -```go -package main - -import ( - "fmt" - - "github.com/ghodss/yaml" -) - -func main() { - j := []byte(`{"name": "John", "age": 30}`) - y, err := yaml.JSONToYAML(j) - if err != nil { - fmt.Printf("err: %v\n", err) - return - } - fmt.Println(string(y)) - /* Output: - name: John - age: 30 - */ - j2, err := yaml.YAMLToJSON(y) - if err != nil { - fmt.Printf("err: %v\n", err) - return - } - fmt.Println(string(j2)) - /* Output: - {"age":30,"name":"John"} - */ -} -``` diff --git a/vendor/github.com/ghodss/yaml/fields.go b/vendor/github.com/ghodss/yaml/fields.go deleted file mode 100644 index 5860074026..0000000000 --- a/vendor/github.com/ghodss/yaml/fields.go +++ /dev/null @@ -1,501 +0,0 @@ -// Copyright 2013 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. -package yaml - -import ( - "bytes" - "encoding" - "encoding/json" - "reflect" - "sort" - "strings" - "sync" - "unicode" - "unicode/utf8" -) - -// indirect walks down v allocating pointers as needed, -// until it gets to a non-pointer. -// if it encounters an Unmarshaler, indirect stops and returns that. -// if decodingNull is true, indirect stops at the last pointer so it can be set to nil. -func indirect(v reflect.Value, decodingNull bool) (json.Unmarshaler, encoding.TextUnmarshaler, reflect.Value) { - // If v is a named type and is addressable, - // start with its address, so that if the type has pointer methods, - // we find them. - if v.Kind() != reflect.Ptr && v.Type().Name() != "" && v.CanAddr() { - v = v.Addr() - } - for { - // Load value from interface, but only if the result will be - // usefully addressable. - if v.Kind() == reflect.Interface && !v.IsNil() { - e := v.Elem() - if e.Kind() == reflect.Ptr && !e.IsNil() && (!decodingNull || e.Elem().Kind() == reflect.Ptr) { - v = e - continue - } - } - - if v.Kind() != reflect.Ptr { - break - } - - if v.Elem().Kind() != reflect.Ptr && decodingNull && v.CanSet() { - break - } - if v.IsNil() { - if v.CanSet() { - v.Set(reflect.New(v.Type().Elem())) - } else { - v = reflect.New(v.Type().Elem()) - } - } - if v.Type().NumMethod() > 0 { - if u, ok := v.Interface().(json.Unmarshaler); ok { - return u, nil, reflect.Value{} - } - if u, ok := v.Interface().(encoding.TextUnmarshaler); ok { - return nil, u, reflect.Value{} - } - } - v = v.Elem() - } - return nil, nil, v -} - -// A field represents a single field found in a struct. -type field struct { - name string - nameBytes []byte // []byte(name) - equalFold func(s, t []byte) bool // bytes.EqualFold or equivalent - - tag bool - index []int - typ reflect.Type - omitEmpty bool - quoted bool -} - -func fillField(f field) field { - f.nameBytes = []byte(f.name) - f.equalFold = foldFunc(f.nameBytes) - return f -} - -// byName sorts field by name, breaking ties with depth, -// then breaking ties with "name came from json tag", then -// breaking ties with index sequence. -type byName []field - -func (x byName) Len() int { return len(x) } - -func (x byName) Swap(i, j int) { x[i], x[j] = x[j], x[i] } - -func (x byName) Less(i, j int) bool { - if x[i].name != x[j].name { - return x[i].name < x[j].name - } - if len(x[i].index) != len(x[j].index) { - return len(x[i].index) < len(x[j].index) - } - if x[i].tag != x[j].tag { - return x[i].tag - } - return byIndex(x).Less(i, j) -} - -// byIndex sorts field by index sequence. -type byIndex []field - -func (x byIndex) Len() int { return len(x) } - -func (x byIndex) Swap(i, j int) { x[i], x[j] = x[j], x[i] } - -func (x byIndex) Less(i, j int) bool { - for k, xik := range x[i].index { - if k >= len(x[j].index) { - return false - } - if xik != x[j].index[k] { - return xik < x[j].index[k] - } - } - return len(x[i].index) < len(x[j].index) -} - -// typeFields returns a list of fields that JSON should recognize for the given type. -// The algorithm is breadth-first search over the set of structs to include - the top struct -// and then any reachable anonymous structs. -func typeFields(t reflect.Type) []field { - // Anonymous fields to explore at the current level and the next. - current := []field{} - next := []field{{typ: t}} - - // Count of queued names for current level and the next. - count := map[reflect.Type]int{} - nextCount := map[reflect.Type]int{} - - // Types already visited at an earlier level. - visited := map[reflect.Type]bool{} - - // Fields found. - var fields []field - - for len(next) > 0 { - current, next = next, current[:0] - count, nextCount = nextCount, map[reflect.Type]int{} - - for _, f := range current { - if visited[f.typ] { - continue - } - visited[f.typ] = true - - // Scan f.typ for fields to include. - for i := 0; i < f.typ.NumField(); i++ { - sf := f.typ.Field(i) - if sf.PkgPath != "" { // unexported - continue - } - tag := sf.Tag.Get("json") - if tag == "-" { - continue - } - name, opts := parseTag(tag) - if !isValidTag(name) { - name = "" - } - index := make([]int, len(f.index)+1) - copy(index, f.index) - index[len(f.index)] = i - - ft := sf.Type - if ft.Name() == "" && ft.Kind() == reflect.Ptr { - // Follow pointer. - ft = ft.Elem() - } - - // Record found field and index sequence. - if name != "" || !sf.Anonymous || ft.Kind() != reflect.Struct { - tagged := name != "" - if name == "" { - name = sf.Name - } - fields = append(fields, fillField(field{ - name: name, - tag: tagged, - index: index, - typ: ft, - omitEmpty: opts.Contains("omitempty"), - quoted: opts.Contains("string"), - })) - if count[f.typ] > 1 { - // If there were multiple instances, add a second, - // so that the annihilation code will see a duplicate. - // It only cares about the distinction between 1 or 2, - // so don't bother generating any more copies. - fields = append(fields, fields[len(fields)-1]) - } - continue - } - - // Record new anonymous struct to explore in next round. - nextCount[ft]++ - if nextCount[ft] == 1 { - next = append(next, fillField(field{name: ft.Name(), index: index, typ: ft})) - } - } - } - } - - sort.Sort(byName(fields)) - - // Delete all fields that are hidden by the Go rules for embedded fields, - // except that fields with JSON tags are promoted. - - // The fields are sorted in primary order of name, secondary order - // of field index length. Loop over names; for each name, delete - // hidden fields by choosing the one dominant field that survives. - out := fields[:0] - for advance, i := 0, 0; i < len(fields); i += advance { - // One iteration per name. - // Find the sequence of fields with the name of this first field. - fi := fields[i] - name := fi.name - for advance = 1; i+advance < len(fields); advance++ { - fj := fields[i+advance] - if fj.name != name { - break - } - } - if advance == 1 { // Only one field with this name - out = append(out, fi) - continue - } - dominant, ok := dominantField(fields[i : i+advance]) - if ok { - out = append(out, dominant) - } - } - - fields = out - sort.Sort(byIndex(fields)) - - return fields -} - -// dominantField looks through the fields, all of which are known to -// have the same name, to find the single field that dominates the -// others using Go's embedding rules, modified by the presence of -// JSON tags. If there are multiple top-level fields, the boolean -// will be false: This condition is an error in Go and we skip all -// the fields. -func dominantField(fields []field) (field, bool) { - // The fields are sorted in increasing index-length order. The winner - // must therefore be one with the shortest index length. Drop all - // longer entries, which is easy: just truncate the slice. - length := len(fields[0].index) - tagged := -1 // Index of first tagged field. - for i, f := range fields { - if len(f.index) > length { - fields = fields[:i] - break - } - if f.tag { - if tagged >= 0 { - // Multiple tagged fields at the same level: conflict. - // Return no field. - return field{}, false - } - tagged = i - } - } - if tagged >= 0 { - return fields[tagged], true - } - // All remaining fields have the same length. If there's more than one, - // we have a conflict (two fields named "X" at the same level) and we - // return no field. - if len(fields) > 1 { - return field{}, false - } - return fields[0], true -} - -var fieldCache struct { - sync.RWMutex - m map[reflect.Type][]field -} - -// cachedTypeFields is like typeFields but uses a cache to avoid repeated work. -func cachedTypeFields(t reflect.Type) []field { - fieldCache.RLock() - f := fieldCache.m[t] - fieldCache.RUnlock() - if f != nil { - return f - } - - // Compute fields without lock. - // Might duplicate effort but won't hold other computations back. - f = typeFields(t) - if f == nil { - f = []field{} - } - - fieldCache.Lock() - if fieldCache.m == nil { - fieldCache.m = map[reflect.Type][]field{} - } - fieldCache.m[t] = f - fieldCache.Unlock() - return f -} - -func isValidTag(s string) bool { - if s == "" { - return false - } - for _, c := range s { - switch { - case strings.ContainsRune("!#$%&()*+-./:<=>?@[]^_{|}~ ", c): - // Backslash and quote chars are reserved, but - // otherwise any punctuation chars are allowed - // in a tag name. - default: - if !unicode.IsLetter(c) && !unicode.IsDigit(c) { - return false - } - } - } - return true -} - -const ( - caseMask = ^byte(0x20) // Mask to ignore case in ASCII. - kelvin = '\u212a' - smallLongEss = '\u017f' -) - -// foldFunc returns one of four different case folding equivalence -// functions, from most general (and slow) to fastest: -// -// 1) bytes.EqualFold, if the key s contains any non-ASCII UTF-8 -// 2) equalFoldRight, if s contains special folding ASCII ('k', 'K', 's', 'S') -// 3) asciiEqualFold, no special, but includes non-letters (including _) -// 4) simpleLetterEqualFold, no specials, no non-letters. -// -// The letters S and K are special because they map to 3 runes, not just 2: -// * S maps to s and to U+017F 'ſ' Latin small letter long s -// * k maps to K and to U+212A 'K' Kelvin sign -// See http://play.golang.org/p/tTxjOc0OGo -// -// The returned function is specialized for matching against s and -// should only be given s. It's not curried for performance reasons. -func foldFunc(s []byte) func(s, t []byte) bool { - nonLetter := false - special := false // special letter - for _, b := range s { - if b >= utf8.RuneSelf { - return bytes.EqualFold - } - upper := b & caseMask - if upper < 'A' || upper > 'Z' { - nonLetter = true - } else if upper == 'K' || upper == 'S' { - // See above for why these letters are special. - special = true - } - } - if special { - return equalFoldRight - } - if nonLetter { - return asciiEqualFold - } - return simpleLetterEqualFold -} - -// equalFoldRight is a specialization of bytes.EqualFold when s is -// known to be all ASCII (including punctuation), but contains an 's', -// 'S', 'k', or 'K', requiring a Unicode fold on the bytes in t. -// See comments on foldFunc. -func equalFoldRight(s, t []byte) bool { - for _, sb := range s { - if len(t) == 0 { - return false - } - tb := t[0] - if tb < utf8.RuneSelf { - if sb != tb { - sbUpper := sb & caseMask - if 'A' <= sbUpper && sbUpper <= 'Z' { - if sbUpper != tb&caseMask { - return false - } - } else { - return false - } - } - t = t[1:] - continue - } - // sb is ASCII and t is not. t must be either kelvin - // sign or long s; sb must be s, S, k, or K. - tr, size := utf8.DecodeRune(t) - switch sb { - case 's', 'S': - if tr != smallLongEss { - return false - } - case 'k', 'K': - if tr != kelvin { - return false - } - default: - return false - } - t = t[size:] - - } - if len(t) > 0 { - return false - } - return true -} - -// asciiEqualFold is a specialization of bytes.EqualFold for use when -// s is all ASCII (but may contain non-letters) and contains no -// special-folding letters. -// See comments on foldFunc. -func asciiEqualFold(s, t []byte) bool { - if len(s) != len(t) { - return false - } - for i, sb := range s { - tb := t[i] - if sb == tb { - continue - } - if ('a' <= sb && sb <= 'z') || ('A' <= sb && sb <= 'Z') { - if sb&caseMask != tb&caseMask { - return false - } - } else { - return false - } - } - return true -} - -// simpleLetterEqualFold is a specialization of bytes.EqualFold for -// use when s is all ASCII letters (no underscores, etc) and also -// doesn't contain 'k', 'K', 's', or 'S'. -// See comments on foldFunc. -func simpleLetterEqualFold(s, t []byte) bool { - if len(s) != len(t) { - return false - } - for i, b := range s { - if b&caseMask != t[i]&caseMask { - return false - } - } - return true -} - -// tagOptions is the string following a comma in a struct field's "json" -// tag, or the empty string. It does not include the leading comma. -type tagOptions string - -// parseTag splits a struct field's json tag into its name and -// comma-separated options. -func parseTag(tag string) (string, tagOptions) { - if idx := strings.Index(tag, ","); idx != -1 { - return tag[:idx], tagOptions(tag[idx+1:]) - } - return tag, tagOptions("") -} - -// Contains reports whether a comma-separated list of options -// contains a particular substr flag. substr must be surrounded by a -// string boundary or commas. -func (o tagOptions) Contains(optionName string) bool { - if len(o) == 0 { - return false - } - s := string(o) - for s != "" { - var next string - i := strings.Index(s, ",") - if i >= 0 { - s, next = s[:i], s[i+1:] - } - if s == optionName { - return true - } - s = next - } - return false -} diff --git a/vendor/github.com/ghodss/yaml/yaml.go b/vendor/github.com/ghodss/yaml/yaml.go deleted file mode 100644 index 4fb4054a8b..0000000000 --- a/vendor/github.com/ghodss/yaml/yaml.go +++ /dev/null @@ -1,277 +0,0 @@ -package yaml - -import ( - "bytes" - "encoding/json" - "fmt" - "reflect" - "strconv" - - "gopkg.in/yaml.v2" -) - -// Marshals the object into JSON then converts JSON to YAML and returns the -// YAML. -func Marshal(o interface{}) ([]byte, error) { - j, err := json.Marshal(o) - if err != nil { - return nil, fmt.Errorf("error marshaling into JSON: %v", err) - } - - y, err := JSONToYAML(j) - if err != nil { - return nil, fmt.Errorf("error converting JSON to YAML: %v", err) - } - - return y, nil -} - -// Converts YAML to JSON then uses JSON to unmarshal into an object. -func Unmarshal(y []byte, o interface{}) error { - vo := reflect.ValueOf(o) - j, err := yamlToJSON(y, &vo) - if err != nil { - return fmt.Errorf("error converting YAML to JSON: %v", err) - } - - err = json.Unmarshal(j, o) - if err != nil { - return fmt.Errorf("error unmarshaling JSON: %v", err) - } - - return nil -} - -// Convert JSON to YAML. -func JSONToYAML(j []byte) ([]byte, error) { - // Convert the JSON to an object. - var jsonObj interface{} - // We are using yaml.Unmarshal here (instead of json.Unmarshal) because the - // Go JSON library doesn't try to pick the right number type (int, float, - // etc.) when unmarshalling to interface{}, it just picks float64 - // universally. go-yaml does go through the effort of picking the right - // number type, so we can preserve number type throughout this process. - err := yaml.Unmarshal(j, &jsonObj) - if err != nil { - return nil, err - } - - // Marshal this object into YAML. - return yaml.Marshal(jsonObj) -} - -// Convert YAML to JSON. Since JSON is a subset of YAML, passing JSON through -// this method should be a no-op. -// -// Things YAML can do that are not supported by JSON: -// * In YAML you can have binary and null keys in your maps. These are invalid -// in JSON. (int and float keys are converted to strings.) -// * Binary data in YAML with the !!binary tag is not supported. If you want to -// use binary data with this library, encode the data as base64 as usual but do -// not use the !!binary tag in your YAML. This will ensure the original base64 -// encoded data makes it all the way through to the JSON. -func YAMLToJSON(y []byte) ([]byte, error) { - return yamlToJSON(y, nil) -} - -func yamlToJSON(y []byte, jsonTarget *reflect.Value) ([]byte, error) { - // Convert the YAML to an object. - var yamlObj interface{} - err := yaml.Unmarshal(y, &yamlObj) - if err != nil { - return nil, err - } - - // YAML objects are not completely compatible with JSON objects (e.g. you - // can have non-string keys in YAML). So, convert the YAML-compatible object - // to a JSON-compatible object, failing with an error if irrecoverable - // incompatibilties happen along the way. - jsonObj, err := convertToJSONableObject(yamlObj, jsonTarget) - if err != nil { - return nil, err - } - - // Convert this object to JSON and return the data. - return json.Marshal(jsonObj) -} - -func convertToJSONableObject(yamlObj interface{}, jsonTarget *reflect.Value) (interface{}, error) { - var err error - - // Resolve jsonTarget to a concrete value (i.e. not a pointer or an - // interface). We pass decodingNull as false because we're not actually - // decoding into the value, we're just checking if the ultimate target is a - // string. - if jsonTarget != nil { - ju, tu, pv := indirect(*jsonTarget, false) - // We have a JSON or Text Umarshaler at this level, so we can't be trying - // to decode into a string. - if ju != nil || tu != nil { - jsonTarget = nil - } else { - jsonTarget = &pv - } - } - - // If yamlObj is a number or a boolean, check if jsonTarget is a string - - // if so, coerce. Else return normal. - // If yamlObj is a map or array, find the field that each key is - // unmarshaling to, and when you recurse pass the reflect.Value for that - // field back into this function. - switch typedYAMLObj := yamlObj.(type) { - case map[interface{}]interface{}: - // JSON does not support arbitrary keys in a map, so we must convert - // these keys to strings. - // - // From my reading of go-yaml v2 (specifically the resolve function), - // keys can only have the types string, int, int64, float64, binary - // (unsupported), or null (unsupported). - strMap := make(map[string]interface{}) - for k, v := range typedYAMLObj { - // Resolve the key to a string first. - var keyString string - switch typedKey := k.(type) { - case string: - keyString = typedKey - case int: - keyString = strconv.Itoa(typedKey) - case int64: - // go-yaml will only return an int64 as a key if the system - // architecture is 32-bit and the key's value is between 32-bit - // and 64-bit. Otherwise the key type will simply be int. - keyString = strconv.FormatInt(typedKey, 10) - case float64: - // Stolen from go-yaml to use the same conversion to string as - // the go-yaml library uses to convert float to string when - // Marshaling. - s := strconv.FormatFloat(typedKey, 'g', -1, 32) - switch s { - case "+Inf": - s = ".inf" - case "-Inf": - s = "-.inf" - case "NaN": - s = ".nan" - } - keyString = s - case bool: - if typedKey { - keyString = "true" - } else { - keyString = "false" - } - default: - return nil, fmt.Errorf("Unsupported map key of type: %s, key: %+#v, value: %+#v", - reflect.TypeOf(k), k, v) - } - - // jsonTarget should be a struct or a map. If it's a struct, find - // the field it's going to map to and pass its reflect.Value. If - // it's a map, find the element type of the map and pass the - // reflect.Value created from that type. If it's neither, just pass - // nil - JSON conversion will error for us if it's a real issue. - if jsonTarget != nil { - t := *jsonTarget - if t.Kind() == reflect.Struct { - keyBytes := []byte(keyString) - // Find the field that the JSON library would use. - var f *field - fields := cachedTypeFields(t.Type()) - for i := range fields { - ff := &fields[i] - if bytes.Equal(ff.nameBytes, keyBytes) { - f = ff - break - } - // Do case-insensitive comparison. - if f == nil && ff.equalFold(ff.nameBytes, keyBytes) { - f = ff - } - } - if f != nil { - // Find the reflect.Value of the most preferential - // struct field. - jtf := t.Field(f.index[0]) - strMap[keyString], err = convertToJSONableObject(v, &jtf) - if err != nil { - return nil, err - } - continue - } - } else if t.Kind() == reflect.Map { - // Create a zero value of the map's element type to use as - // the JSON target. - jtv := reflect.Zero(t.Type().Elem()) - strMap[keyString], err = convertToJSONableObject(v, &jtv) - if err != nil { - return nil, err - } - continue - } - } - strMap[keyString], err = convertToJSONableObject(v, nil) - if err != nil { - return nil, err - } - } - return strMap, nil - case []interface{}: - // We need to recurse into arrays in case there are any - // map[interface{}]interface{}'s inside and to convert any - // numbers to strings. - - // If jsonTarget is a slice (which it really should be), find the - // thing it's going to map to. If it's not a slice, just pass nil - // - JSON conversion will error for us if it's a real issue. - var jsonSliceElemValue *reflect.Value - if jsonTarget != nil { - t := *jsonTarget - if t.Kind() == reflect.Slice { - // By default slices point to nil, but we need a reflect.Value - // pointing to a value of the slice type, so we create one here. - ev := reflect.Indirect(reflect.New(t.Type().Elem())) - jsonSliceElemValue = &ev - } - } - - // Make and use a new array. - arr := make([]interface{}, len(typedYAMLObj)) - for i, v := range typedYAMLObj { - arr[i], err = convertToJSONableObject(v, jsonSliceElemValue) - if err != nil { - return nil, err - } - } - return arr, nil - default: - // If the target type is a string and the YAML type is a number, - // convert the YAML type to a string. - if jsonTarget != nil && (*jsonTarget).Kind() == reflect.String { - // Based on my reading of go-yaml, it may return int, int64, - // float64, or uint64. - var s string - switch typedVal := typedYAMLObj.(type) { - case int: - s = strconv.FormatInt(int64(typedVal), 10) - case int64: - s = strconv.FormatInt(typedVal, 10) - case float64: - s = strconv.FormatFloat(typedVal, 'g', -1, 32) - case uint64: - s = strconv.FormatUint(typedVal, 10) - case bool: - if typedVal { - s = "true" - } else { - s = "false" - } - } - if len(s) > 0 { - yamlObj = interface{}(s) - } - } - return yamlObj, nil - } - - return nil, nil -} diff --git a/vendor/github.com/gobuffalo/envy/.env b/vendor/github.com/gobuffalo/envy/.env deleted file mode 100644 index 33eeb3b13b..0000000000 --- a/vendor/github.com/gobuffalo/envy/.env +++ /dev/null @@ -1,5 +0,0 @@ -# This is a comment -# We can use equal or colon notation -DIR: root -FLAVOUR: none -INSIDE_FOLDER=false \ No newline at end of file diff --git a/vendor/github.com/gobuffalo/envy/.gitignore b/vendor/github.com/gobuffalo/envy/.gitignore deleted file mode 100644 index 3689718594..0000000000 --- a/vendor/github.com/gobuffalo/envy/.gitignore +++ /dev/null @@ -1,29 +0,0 @@ -*.log -.DS_Store -doc -tmp -pkg -*.gem -*.pid -coverage -coverage.data -build/* -*.pbxuser -*.mode1v3 -.svn -profile -.console_history -.sass-cache/* -.rake_tasks~ -*.log.lck -solr/ -.jhw-cache/ -jhw.* -*.sublime* -node_modules/ -dist/ -generated/ -.vendor/ -bin/* -gin-bin -.idea/ diff --git a/vendor/github.com/gobuffalo/envy/.travis.yml b/vendor/github.com/gobuffalo/envy/.travis.yml deleted file mode 100644 index 1fb041a259..0000000000 --- a/vendor/github.com/gobuffalo/envy/.travis.yml +++ /dev/null @@ -1,36 +0,0 @@ -language: go - -sudo: false - -matrix: - include: - - os: linux - go: "1.9.x" - - os: windows - go: "1.9.x" - - os: linux - go: "1.10.x" - - os: windows - go: "1.10.x" - - os: linux - go: "1.11.x" - env: - - GO111MODULE=off - - os: windows - go: "1.11.x" - env: - - GO111MODULE=off - - os: linux - go: "1.11.x" - env: - - GO111MODULE=on - - os: windows - go: "1.11.x" - env: - - GO111MODULE=on - -install: false - -script: - - go get -v -t ./... - - go test -v -timeout=5s -race ./... diff --git a/vendor/github.com/gobuffalo/envy/Makefile b/vendor/github.com/gobuffalo/envy/Makefile deleted file mode 100644 index b0db1c4f88..0000000000 --- a/vendor/github.com/gobuffalo/envy/Makefile +++ /dev/null @@ -1,46 +0,0 @@ -TAGS ?= "sqlite" -GO_BIN ?= go - -install: - packr - $(GO_BIN) install -v . - -deps: - $(GO_BIN) get github.com/gobuffalo/release - $(GO_BIN) get github.com/gobuffalo/packr/packr - $(GO_BIN) get -tags ${TAGS} -t ./... -ifeq ($(GO111MODULE),on) - $(GO_BIN) mod tidy -endif - -build: - packr - $(GO_BIN) build -v . - -test: - packr - $(GO_BIN) test -tags ${TAGS} ./... - -ci-test: - $(GO_BIN) test -tags ${TAGS} -race ./... - -lint: - gometalinter --vendor ./... --deadline=1m --skip=internal - -update: - $(GO_BIN) get -u -tags ${TAGS} -ifeq ($(GO111MODULE),on) - $(GO_BIN) mod tidy -endif - packr - make test - make install -ifeq ($(GO111MODULE),on) - $(GO_BIN) mod tidy -endif - -release-test: - $(GO_BIN) test -tags ${TAGS} -race ./... - -release: - release -y -f version.go diff --git a/vendor/github.com/gobuffalo/envy/README.md b/vendor/github.com/gobuffalo/envy/README.md deleted file mode 100644 index f54462a773..0000000000 --- a/vendor/github.com/gobuffalo/envy/README.md +++ /dev/null @@ -1,93 +0,0 @@ -# envy -[![Build Status](https://travis-ci.org/gobuffalo/envy.svg?branch=master)](https://travis-ci.org/gobuffalo/envy) - -Envy makes working with ENV variables in Go trivial. - -* Get ENV variables with default values. -* Set ENV variables safely without affecting the underlying system. -* Temporarily change ENV vars; useful for testing. -* Map all of the key/values in the ENV. -* Loads .env files (by using [godotenv](https://github.com/joho/godotenv/)) -* More! - -## Installation - -```text -$ go get -u github.com/gobuffalo/envy -``` - -## Usage - -```go -func Test_Get(t *testing.T) { - r := require.New(t) - r.NotZero(os.Getenv("GOPATH")) - r.Equal(os.Getenv("GOPATH"), envy.Get("GOPATH", "foo")) - r.Equal("bar", envy.Get("IDONTEXIST", "bar")) -} - -func Test_MustGet(t *testing.T) { - r := require.New(t) - r.NotZero(os.Getenv("GOPATH")) - v, err := envy.MustGet("GOPATH") - r.NoError(err) - r.Equal(os.Getenv("GOPATH"), v) - - _, err = envy.MustGet("IDONTEXIST") - r.Error(err) -} - -func Test_Set(t *testing.T) { - r := require.New(t) - _, err := envy.MustGet("FOO") - r.Error(err) - - envy.Set("FOO", "foo") - r.Equal("foo", envy.Get("FOO", "bar")) -} - -func Test_Temp(t *testing.T) { - r := require.New(t) - - _, err := envy.MustGet("BAR") - r.Error(err) - - envy.Temp(func() { - envy.Set("BAR", "foo") - r.Equal("foo", envy.Get("BAR", "bar")) - _, err = envy.MustGet("BAR") - r.NoError(err) - }) - - _, err = envy.MustGet("BAR") - r.Error(err) -} -``` -## .env files support - -Envy now supports loading `.env` files by using the [godotenv library](https://github.com/joho/godotenv/). -That means one can use and define multiple `.env` files which will be loaded on-demand. By default, no env files will be loaded. To load one or more, you need to call the `envy.Load` function in one of the following ways: - -```go -envy.Load() // 1 - -envy.Load("MY_ENV_FILE") // 2 - -envy.Load(".env", ".env.prod") // 3 - -envy.Load(".env", "NON_EXISTING_FILE") // 4 - -// 5 -envy.Load(".env") -envy.Load("NON_EXISTING_FILE") - -// 6 -envy.Load(".env", "NON_EXISTING_FILE", ".env.prod") -``` - -1. Will load the default `.env` file -2. Will load the file `MY_ENV_FILE`, **but not** `.env` -3. Will load the file `.env`, and after that will load the `.env.prod` file. If any variable is redefined in `. env.prod` it will be overwritten (will contain the `env.prod` value) -4. Will load the `.env` file and return an error as the second file does not exist. The values in `.env` will be loaded and available. -5. Same as 4 -6. Will load the `.env` file and return an error as the second file does not exist. The values in `.env` will be loaded and available, **but the ones in** `.env.prod` **won't**. diff --git a/vendor/github.com/gobuffalo/envy/envy.go b/vendor/github.com/gobuffalo/envy/envy.go deleted file mode 100644 index a5a7a3c6fc..0000000000 --- a/vendor/github.com/gobuffalo/envy/envy.go +++ /dev/null @@ -1,258 +0,0 @@ -/* -package envy makes working with ENV variables in Go trivial. - -* Get ENV variables with default values. -* Set ENV variables safely without affecting the underlying system. -* Temporarily change ENV vars; useful for testing. -* Map all of the key/values in the ENV. -* Loads .env files (by using [godotenv](https://github.com/joho/godotenv/)) -* More! -*/ -package envy - -import ( - "errors" - "flag" - "fmt" - "io/ioutil" - "os" - "os/exec" - "path/filepath" - "runtime" - "strings" - "sync" - - "github.com/joho/godotenv" - "github.com/rogpeppe/go-internal/modfile" -) - -var gil = &sync.RWMutex{} -var env = map[string]string{} - -// GO111MODULE is ENV for turning mods on/off -const GO111MODULE = "GO111MODULE" - -func init() { - Load() - loadEnv() -} - -// Load the ENV variables to the env map -func loadEnv() { - gil.Lock() - defer gil.Unlock() - - if os.Getenv("GO_ENV") == "" { - // if the flag "test.v" is *defined*, we're running as a unit test. Note that we don't care - // about v.Value (verbose test mode); we just want to know if the test environment has defined - // it. It's also possible that the flags are not yet fully parsed (i.e. flag.Parsed() == false), - // so we could not depend on v.Value anyway. - // - if v := flag.Lookup("test.v"); v != nil { - env["GO_ENV"] = "test" - } - } - - // set the GOPATH if using >= 1.8 and the GOPATH isn't set - if os.Getenv("GOPATH") == "" { - out, err := exec.Command("go", "env", "GOPATH").Output() - if err == nil { - gp := strings.TrimSpace(string(out)) - os.Setenv("GOPATH", gp) - } - } - - for _, e := range os.Environ() { - pair := strings.Split(e, "=") - env[pair[0]] = os.Getenv(pair[0]) - } -} - -func Mods() bool { - return Get(GO111MODULE, "off") == "on" -} - -// Reload the ENV variables. Useful if -// an external ENV manager has been used -func Reload() { - env = map[string]string{} - loadEnv() -} - -// Load .env files. Files will be loaded in the same order that are received. -// Redefined vars will override previously existing values. -// IE: envy.Load(".env", "test_env/.env") will result in DIR=test_env -// If no arg passed, it will try to load a .env file. -func Load(files ...string) error { - - // If no files received, load the default one - if len(files) == 0 { - err := godotenv.Overload() - if err == nil { - Reload() - } - return err - } - - // We received a list of files - for _, file := range files { - - // Check if it exists or we can access - if _, err := os.Stat(file); err != nil { - // It does not exist or we can not access. - // Return and stop loading - return err - } - - // It exists and we have permission. Load it - if err := godotenv.Overload(file); err != nil { - return err - } - - // Reload the env so all new changes are noticed - Reload() - - } - return nil -} - -// Get a value from the ENV. If it doesn't exist the -// default value will be returned. -func Get(key string, value string) string { - gil.RLock() - defer gil.RUnlock() - if v, ok := env[key]; ok { - return v - } - return value -} - -// Get a value from the ENV. If it doesn't exist -// an error will be returned -func MustGet(key string) (string, error) { - gil.RLock() - defer gil.RUnlock() - if v, ok := env[key]; ok { - return v, nil - } - return "", fmt.Errorf("could not find ENV var with %s", key) -} - -// Set a value into the ENV. This is NOT permanent. It will -// only affect values accessed through envy. -func Set(key string, value string) { - gil.Lock() - defer gil.Unlock() - env[key] = value -} - -// MustSet the value into the underlying ENV, as well as envy. -// This may return an error if there is a problem setting the -// underlying ENV value. -func MustSet(key string, value string) error { - gil.Lock() - defer gil.Unlock() - err := os.Setenv(key, value) - if err != nil { - return err - } - env[key] = value - return nil -} - -// Map all of the keys/values set in envy. -func Map() map[string]string { - gil.RLock() - defer gil.RUnlock() - cp := map[string]string{} - for k, v := range env { - cp[k] = v - } - return env -} - -// Temp makes a copy of the values and allows operation on -// those values temporarily during the run of the function. -// At the end of the function run the copy is discarded and -// the original values are replaced. This is useful for testing. -// Warning: This function is NOT safe to use from a goroutine or -// from code which may access any Get or Set function from a goroutine -func Temp(f func()) { - oenv := env - env = map[string]string{} - for k, v := range oenv { - env[k] = v - } - defer func() { env = oenv }() - f() -} - -func GoPath() string { - return Get("GOPATH", "") -} - -func GoBin() string { - return Get("GO_BIN", "go") -} - -// GoPaths returns all possible GOPATHS that are set. -func GoPaths() []string { - gp := Get("GOPATH", "") - if runtime.GOOS == "windows" { - return strings.Split(gp, ";") // Windows uses a different separator - } - return strings.Split(gp, ":") -} - -func importPath(path string) string { - path = strings.TrimPrefix(path, "/private") - for _, gopath := range GoPaths() { - srcpath := filepath.Join(gopath, "src") - rel, err := filepath.Rel(srcpath, path) - if err == nil { - return filepath.ToSlash(rel) - } - } - - // fallback to trim - rel := strings.TrimPrefix(path, filepath.Join(GoPath(), "src")) - rel = strings.TrimPrefix(rel, string(filepath.Separator)) - return filepath.ToSlash(rel) -} - -// CurrentModule will attempt to return the module name from `go.mod` if -// modules are enabled. -// If modules are not enabled it will fallback to using CurrentPackage instead. -func CurrentModule() (string, error) { - if !Mods() { - return CurrentPackage(), nil - } - moddata, err := ioutil.ReadFile("go.mod") - if err != nil { - return "", errors.New("go.mod cannot be read or does not exist while go module is enabled") - } - packagePath := modfile.ModulePath(moddata) - if packagePath == "" { - return "", errors.New("go.mod is malformed") - } - return packagePath, nil -} - -// CurrentPackage attempts to figure out the current package name from the PWD -// Use CurrentModule for a more accurate package name. -func CurrentPackage() string { - if Mods() { - } - pwd, _ := os.Getwd() - return importPath(pwd) -} - -func Environ() []string { - gil.RLock() - defer gil.RUnlock() - var e []string - for k, v := range env { - e = append(e, fmt.Sprintf("%s=%s", k, v)) - } - return e -} diff --git a/vendor/github.com/gobuffalo/envy/go.mod b/vendor/github.com/gobuffalo/envy/go.mod deleted file mode 100644 index f10a493794..0000000000 --- a/vendor/github.com/gobuffalo/envy/go.mod +++ /dev/null @@ -1,9 +0,0 @@ -module github.com/gobuffalo/envy - -require ( - github.com/gobuffalo/packr/v2 v2.0.0-rc.14 - github.com/inconshreveable/mousetrap v1.0.0 // indirect - github.com/joho/godotenv v1.3.0 - github.com/rogpeppe/go-internal v1.1.0 - github.com/stretchr/testify v1.3.0 -) diff --git a/vendor/github.com/gobuffalo/envy/go.sum b/vendor/github.com/gobuffalo/envy/go.sum deleted file mode 100644 index 172ff9ae8c..0000000000 --- a/vendor/github.com/gobuffalo/envy/go.sum +++ /dev/null @@ -1,383 +0,0 @@ -github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= -github.com/Masterminds/semver v1.4.2/go.mod h1:MB6lktGJrhw8PrUyiEoblNEGEQ+RzHPF078ddwwvV3Y= -github.com/ajg/form v0.0.0-20160822230020-523a5da1a92f/go.mod h1:uL1WgH+h2mgNtvBq0339dVnzXdBETtL2LeUXaIv25UY= -github.com/armon/consul-api v0.0.0-20180202201655-eb2c6b5be1b6/go.mod h1:grANhF5doyWs3UAsr3K4I6qtAmlQcZDesFNEHPZAzj8= -github.com/cockroachdb/apd v1.1.0/go.mod h1:8Sl8LxpKi29FqWXR16WEFZRNSz3SoPzUzeMeY4+DwBQ= -github.com/cockroachdb/cockroach-go v0.0.0-20181001143604-e0a95dfd547c/go.mod h1:XGLbWH/ujMcbPbhZq52Nv6UrCghb1yGn//133kEsvDk= -github.com/codegangsta/negroni v1.0.0/go.mod h1:v0y3T5G7Y1UlFfyxFn/QLRU4a2EuNau2iZY63YTKWo0= -github.com/coreos/etcd v3.3.10+incompatible/go.mod h1:uF7uidLiAD3TWHmW31ZFd/JWoc32PjwdhPthX9715RE= -github.com/coreos/go-etcd v2.0.0+incompatible/go.mod h1:Jez6KQU2B/sWsbdaef3ED8NzMklzPG4d5KIOhIy30Tk= -github.com/coreos/go-semver v0.2.0/go.mod h1:nnelYz7RCh+5ahJtPPxZlU+153eP4D4r3EedlOD2RNk= -github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= -github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= -github.com/dgrijalva/jwt-go v3.2.0+incompatible/go.mod h1:E3ru+11k8xSBh+hMPgOLZmtrrCbhqsmaPHjLKYnJCaQ= -github.com/dustin/go-humanize v0.0.0-20180713052910-9f541cc9db5d/go.mod h1:HtrtbFcZ19U5GC7JDqmcUSB87Iq5E25KnS6fMYU6eOk= -github.com/dustin/go-humanize v1.0.0/go.mod h1:HtrtbFcZ19U5GC7JDqmcUSB87Iq5E25KnS6fMYU6eOk= -github.com/fatih/color v1.7.0/go.mod h1:Zm6kSWBoL9eyXnKyktHP6abPY2pDugNf5KwzbycvMj4= -github.com/fatih/structs v1.0.0/go.mod h1:9NiDSp5zOcgEDl+j00MP/WkGVPOlPRLejGD8Ga6PJ7M= -github.com/fatih/structs v1.1.0/go.mod h1:9NiDSp5zOcgEDl+j00MP/WkGVPOlPRLejGD8Ga6PJ7M= -github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo= -github.com/go-sql-driver/mysql v1.4.0/go.mod h1:zAC/RDZ24gD3HViQzih4MyKcchzm+sOG5ZlKdlhCg5w= -github.com/gobuffalo/buffalo v0.12.8-0.20181004233540-fac9bb505aa8/go.mod h1:sLyT7/dceRXJUxSsE813JTQtA3Eb1vjxWfo/N//vXIY= -github.com/gobuffalo/buffalo v0.13.0/go.mod h1:Mjn1Ba9wpIbpbrD+lIDMy99pQ0H0LiddMIIDGse7qT4= -github.com/gobuffalo/buffalo-plugins v1.0.2/go.mod h1:pOp/uF7X3IShFHyobahTkTLZaeUXwb0GrUTb9ngJWTs= -github.com/gobuffalo/buffalo-plugins v1.0.4/go.mod h1:pWS1vjtQ6uD17MVFWf7i3zfThrEKWlI5+PYLw/NaDB4= -github.com/gobuffalo/buffalo-plugins v1.4.3/go.mod h1:uCzTY0woez4nDMdQjkcOYKanngeUVRO2HZi7ezmAjWY= -github.com/gobuffalo/buffalo-plugins v1.5.1/go.mod h1:jbmwSZK5+PiAP9cC09VQOrGMZFCa/P0UMlIS3O12r5w= -github.com/gobuffalo/buffalo-plugins v1.6.4/go.mod h1:/+N1aophkA2jZ1ifB2O3Y9yGwu6gKOVMtUmJnbg+OZI= -github.com/gobuffalo/buffalo-plugins v1.6.5/go.mod h1:0HVkbgrVs/MnPZ/FOseDMVanCTm2RNcdM0PuXcL1NNI= -github.com/gobuffalo/buffalo-plugins v1.6.7/go.mod h1:ZGZRkzz2PiKWHs0z7QsPBOTo2EpcGRArMEym6ghKYgk= -github.com/gobuffalo/buffalo-plugins v1.6.9/go.mod h1:yYlYTrPdMCz+6/+UaXg5Jm4gN3xhsvsQ2ygVatZV5vw= -github.com/gobuffalo/buffalo-plugins v1.6.11/go.mod h1:eAA6xJIL8OuynJZ8amXjRmHND6YiusVAaJdHDN1Lu8Q= -github.com/gobuffalo/buffalo-plugins v1.8.2/go.mod h1:9te6/VjEQ7pKp7lXlDIMqzxgGpjlKoAcAANdCgoR960= -github.com/gobuffalo/buffalo-plugins v1.8.3/go.mod h1:IAWq6vjZJVXebIq2qGTLOdlXzmpyTZ5iJG5b59fza5U= -github.com/gobuffalo/buffalo-plugins v1.9.4/go.mod h1:grCV6DGsQlVzQwk6XdgcL3ZPgLm9BVxlBmXPMF8oBHI= -github.com/gobuffalo/buffalo-plugins v1.10.0 h1:oF+BJ20zenp31xJnjkFcXAMUqlrMMTicsD/UwQWCW/s= -github.com/gobuffalo/buffalo-plugins v1.10.0/go.mod h1:4osg8d9s60txLuGwXnqH+RCjPHj9K466cDFRl3PErHI= -github.com/gobuffalo/buffalo-pop v1.0.5/go.mod h1:Fw/LfFDnSmB/vvQXPvcXEjzP98Tc+AudyNWUBWKCwQ8= -github.com/gobuffalo/envy v1.6.4/go.mod h1:Abh+Jfw475/NWtYMEt+hnJWRiC8INKWibIMyNt1w2Mc= -github.com/gobuffalo/envy v1.6.5/go.mod h1:N+GkhhZ/93bGZc6ZKhJLP6+m+tCNPKwgSpH9kaifseQ= -github.com/gobuffalo/envy v1.6.6/go.mod h1:N+GkhhZ/93bGZc6ZKhJLP6+m+tCNPKwgSpH9kaifseQ= -github.com/gobuffalo/envy v1.6.7/go.mod h1:N+GkhhZ/93bGZc6ZKhJLP6+m+tCNPKwgSpH9kaifseQ= -github.com/gobuffalo/envy v1.6.8/go.mod h1:N+GkhhZ/93bGZc6ZKhJLP6+m+tCNPKwgSpH9kaifseQ= -github.com/gobuffalo/envy v1.6.9/go.mod h1:N+GkhhZ/93bGZc6ZKhJLP6+m+tCNPKwgSpH9kaifseQ= -github.com/gobuffalo/envy v1.6.10/go.mod h1:X0CFllQjTV5ogsnUrg+Oks2yTI+PU2dGYBJOEI2D1Uo= -github.com/gobuffalo/envy v1.6.11/go.mod h1:Fiq52W7nrHGDggFPhn2ZCcHw4u/rqXkqo+i7FB6EAcg= -github.com/gobuffalo/events v1.0.3/go.mod h1:Txo8WmqScapa7zimEQIwgiJBvMECMe9gJjsKNPN3uZw= -github.com/gobuffalo/events v1.0.7/go.mod h1:z8txf6H9jWhQ5Scr7YPLWg/cgXBRj8Q4uYI+rsVCCSQ= -github.com/gobuffalo/events v1.0.8/go.mod h1:A5KyqT1sA+3GJiBE4QKZibse9mtOcI9nw8gGrDdqYGs= -github.com/gobuffalo/events v1.1.3/go.mod h1:9yPGWYv11GENtzrIRApwQRMYSbUgCsZ1w6R503fCfrk= -github.com/gobuffalo/events v1.1.4/go.mod h1:09/YRRgZHEOts5Isov+g9X2xajxdvOAcUuAHIX/O//A= -github.com/gobuffalo/events v1.1.5/go.mod h1:3YUSzgHfYctSjEjLCWbkXP6djH2M+MLaVRzb4ymbAK0= -github.com/gobuffalo/events v1.1.7/go.mod h1:6fGqxH2ing5XMb3EYRq9LEkVlyPGs4oO/eLzh+S8CxY= -github.com/gobuffalo/events v1.1.8/go.mod h1:UFy+W6X6VbCWS8k2iT81HYX65dMtiuVycMy04cplt/8= -github.com/gobuffalo/events v1.1.9 h1:ukq5ys/h0TuiX7eLJyZBD1dJOy0r19JTEYmgXKG9j+Y= -github.com/gobuffalo/events v1.1.9/go.mod h1:/0nf8lMtP5TkgNbzYxR6Bl4GzBy5s5TebgNTdRfRbPM= -github.com/gobuffalo/fizz v1.0.12/go.mod h1:C0sltPxpYK8Ftvf64kbsQa2yiCZY4RZviurNxXdAKwc= -github.com/gobuffalo/flect v0.0.0-20180907193754-dc14d8acaf9f/go.mod h1:rCiQgmAE4axgBNl3jZWzS5rETRYTGOsrixTRaCPzNdA= -github.com/gobuffalo/flect v0.0.0-20181002182613-4571df4b1daf/go.mod h1:rCiQgmAE4axgBNl3jZWzS5rETRYTGOsrixTRaCPzNdA= -github.com/gobuffalo/flect v0.0.0-20181007231023-ae7ed6bfe683/go.mod h1:rCiQgmAE4axgBNl3jZWzS5rETRYTGOsrixTRaCPzNdA= -github.com/gobuffalo/flect v0.0.0-20181018182602-fd24a256709f/go.mod h1:rCiQgmAE4axgBNl3jZWzS5rETRYTGOsrixTRaCPzNdA= -github.com/gobuffalo/flect v0.0.0-20181019110701-3d6f0b585514/go.mod h1:rCiQgmAE4axgBNl3jZWzS5rETRYTGOsrixTRaCPzNdA= -github.com/gobuffalo/flect v0.0.0-20181024204909-8f6be1a8c6c2/go.mod h1:rCiQgmAE4axgBNl3jZWzS5rETRYTGOsrixTRaCPzNdA= -github.com/gobuffalo/flect v0.0.0-20181104133451-1f6e9779237a/go.mod h1:rCiQgmAE4axgBNl3jZWzS5rETRYTGOsrixTRaCPzNdA= -github.com/gobuffalo/flect v0.0.0-20181114183036-47375f6d8328/go.mod h1:0HvNbHdfh+WOvDSIASqJOSxTOWSxCCUF++k/Y53v9rI= -github.com/gobuffalo/flect v0.0.0-20181210151238-24a2b68e0316/go.mod h1:en58vff74S9b99Eg42Dr+/9yPu437QjlNsO/hBYPuOk= -github.com/gobuffalo/flect v0.0.0-20190104192022-4af577e09bf2 h1:51NF9n6h4nGMooU5ALB+uJCM0UOmcWjAegIZb/ePtoE= -github.com/gobuffalo/flect v0.0.0-20190104192022-4af577e09bf2/go.mod h1:en58vff74S9b99Eg42Dr+/9yPu437QjlNsO/hBYPuOk= -github.com/gobuffalo/genny v0.0.0-20180924032338-7af3a40f2252/go.mod h1:tUTQOogrr7tAQnhajMSH6rv1BVev34H2sa1xNHMy94g= -github.com/gobuffalo/genny v0.0.0-20181003150629-3786a0744c5d/go.mod h1:WAd8HmjMVrnkAZbmfgH5dLBUchsZfqzp/WS5sQz+uTM= -github.com/gobuffalo/genny v0.0.0-20181005145118-318a41a134cc/go.mod h1:WAd8HmjMVrnkAZbmfgH5dLBUchsZfqzp/WS5sQz+uTM= -github.com/gobuffalo/genny v0.0.0-20181007153042-b8de7d566757/go.mod h1:+oG5Ljrw04czAHbPXREwaFojJbpUvcIy4DiOnbEJFTA= -github.com/gobuffalo/genny v0.0.0-20181012161047-33e5f43d83a6/go.mod h1:+oG5Ljrw04czAHbPXREwaFojJbpUvcIy4DiOnbEJFTA= -github.com/gobuffalo/genny v0.0.0-20181017160347-90a774534246/go.mod h1:+oG5Ljrw04czAHbPXREwaFojJbpUvcIy4DiOnbEJFTA= -github.com/gobuffalo/genny v0.0.0-20181024195656-51392254bf53/go.mod h1:o9GEH5gn5sCKLVB5rHFC4tq40rQ3VRUzmx6WwmaqISE= -github.com/gobuffalo/genny v0.0.0-20181025145300-af3f81d526b8/go.mod h1:uZ1fFYvdcP8mu0B/Ynarf6dsGvp7QFIpk/QACUuFUVI= -github.com/gobuffalo/genny v0.0.0-20181027191429-94d6cfb5c7fc/go.mod h1:x7SkrQQBx204Y+O9EwRXeszLJDTaWN0GnEasxgLrQTA= -github.com/gobuffalo/genny v0.0.0-20181027195209-3887b7171c4f/go.mod h1:JbKx8HSWICu5zyqWOa0dVV1pbbXOHusrSzQUprW6g+w= -github.com/gobuffalo/genny v0.0.0-20181106193839-7dcb0924caf1/go.mod h1:x61yHxvbDCgQ/7cOAbJCacZQuHgB0KMSzoYcw5debjU= -github.com/gobuffalo/genny v0.0.0-20181107223128-f18346459dbe/go.mod h1:utQD3aKKEsdb03oR+Vi/6ztQb1j7pO10N3OBoowRcSU= -github.com/gobuffalo/genny v0.0.0-20181114215459-0a4decd77f5d/go.mod h1:kN2KZ8VgXF9VIIOj/GM0Eo7YK+un4Q3tTreKOf0q1ng= -github.com/gobuffalo/genny v0.0.0-20181119162812-e8ff4adce8bb/go.mod h1:BA9htSe4bZwBDJLe8CUkoqkypq3hn3+CkoHqVOW718E= -github.com/gobuffalo/genny v0.0.0-20181127225641-2d959acc795b/go.mod h1:l54xLXNkteX/PdZ+HlgPk1qtcrgeOr3XUBBPDbH+7CQ= -github.com/gobuffalo/genny v0.0.0-20181128191930-77e34f71ba2a/go.mod h1:FW/D9p7cEEOqxYA71/hnrkOWm62JZ5ZNxcNIVJEaWBU= -github.com/gobuffalo/genny v0.0.0-20181203165245-fda8bcce96b1/go.mod h1:wpNSANu9UErftfiaAlz1pDZclrYzLtO5lALifODyjuM= -github.com/gobuffalo/genny v0.0.0-20181203201232-849d2c9534ea/go.mod h1:wpNSANu9UErftfiaAlz1pDZclrYzLtO5lALifODyjuM= -github.com/gobuffalo/genny v0.0.0-20181206121324-d6fb8a0dbe36/go.mod h1:wpNSANu9UErftfiaAlz1pDZclrYzLtO5lALifODyjuM= -github.com/gobuffalo/genny v0.0.0-20181207164119-84844398a37d/go.mod h1:y0ysCHGGQf2T3vOhCrGHheYN54Y/REj0ayd0Suf4C/8= -github.com/gobuffalo/genny v0.0.0-20181211165820-e26c8466f14d/go.mod h1:sHnK+ZSU4e2feXP3PA29ouij6PUEiN+RCwECjCTB3yM= -github.com/gobuffalo/genny v0.0.0-20190104222617-a71664fc38e7/go.mod h1:QPsQ1FnhEsiU8f+O0qKWXz2RE4TiDqLVChWkBuh1WaY= -github.com/gobuffalo/genny v0.0.0-20190112155932-f31a84fcacf5 h1:boQS3dA9PxhyufJEWIILrG6pJQbDnpwP2rFyvWacdoY= -github.com/gobuffalo/genny v0.0.0-20190112155932-f31a84fcacf5/go.mod h1:CIaHCrSIuJ4il6ka3Hub4DR4adDrGoXGEEt2FbBxoIo= -github.com/gobuffalo/github_flavored_markdown v1.0.4/go.mod h1:uRowCdK+q8d/RF0Kt3/DSalaIXbb0De/dmTqMQdkQ4I= -github.com/gobuffalo/github_flavored_markdown v1.0.5/go.mod h1:U0643QShPF+OF2tJvYNiYDLDGDuQmJZXsf/bHOJPsMY= -github.com/gobuffalo/github_flavored_markdown v1.0.7/go.mod h1:w93Pd9Lz6LvyQXEG6DktTPHkOtCbr+arAD5mkwMzXLI= -github.com/gobuffalo/httptest v1.0.2/go.mod h1:7T1IbSrg60ankme0aDLVnEY0h056g9M1/ZvpVThtB7E= -github.com/gobuffalo/licenser v0.0.0-20180924033006-eae28e638a42/go.mod h1:Ubo90Np8gpsSZqNScZZkVXXAo5DGhTb+WYFIjlnog8w= -github.com/gobuffalo/licenser v0.0.0-20181025145548-437d89de4f75/go.mod h1:x3lEpYxkRG/XtGCUNkio+6RZ/dlOvLzTI9M1auIwFcw= -github.com/gobuffalo/licenser v0.0.0-20181027200154-58051a75da95/go.mod h1:BzhaaxGd1tq1+OLKObzgdCV9kqVhbTulxOpYbvMQWS0= -github.com/gobuffalo/licenser v0.0.0-20181109171355-91a2a7aac9a7/go.mod h1:m+Ygox92pi9bdg+gVaycvqE8RVSjZp7mWw75+K5NPHk= -github.com/gobuffalo/licenser v0.0.0-20181128165715-cc7305f8abed/go.mod h1:oU9F9UCE+AzI/MueCKZamsezGOOHfSirltllOVeRTAE= -github.com/gobuffalo/licenser v0.0.0-20181203160806-fe900bbede07/go.mod h1:ph6VDNvOzt1CdfaWC+9XwcBnlSTBz2j49PBwum6RFaU= -github.com/gobuffalo/licenser v0.0.0-20181211173111-f8a311c51159/go.mod h1:ve/Ue99DRuvnTaLq2zKa6F4KtHiYf7W046tDjuGYPfM= -github.com/gobuffalo/logger v0.0.0-20181022175615-46cfb361fc27/go.mod h1:8sQkgyhWipz1mIctHF4jTxmJh1Vxhp7mP8IqbljgJZo= -github.com/gobuffalo/logger v0.0.0-20181027144941-73d08d2bb969/go.mod h1:7uGg2duHKpWnN4+YmyKBdLXfhopkAdVM6H3nKbyFbz8= -github.com/gobuffalo/logger v0.0.0-20181027193913-9cf4dd0efe46/go.mod h1:7uGg2duHKpWnN4+YmyKBdLXfhopkAdVM6H3nKbyFbz8= -github.com/gobuffalo/logger v0.0.0-20181109185836-3feeab578c17/go.mod h1:oNErH0xLe+utO+OW8ptXMSA5DkiSEDW1u3zGIt8F9Ew= -github.com/gobuffalo/logger v0.0.0-20181117211126-8e9b89b7c264/go.mod h1:5etB91IE0uBlw9k756fVKZJdS+7M7ejVhmpXXiSFj0I= -github.com/gobuffalo/logger v0.0.0-20181127160119-5b956e21995c/go.mod h1:+HxKANrR9VGw9yN3aOAppJKvhO05ctDi63w4mDnKv2U= -github.com/gobuffalo/makr v1.1.5/go.mod h1:Y+o0btAH1kYAMDJW/TX3+oAXEu0bmSLLoC9mIFxtzOw= -github.com/gobuffalo/mapi v1.0.0/go.mod h1:4VAGh89y6rVOvm5A8fKFxYG+wIW6LO1FMTG9hnKStFc= -github.com/gobuffalo/mapi v1.0.1/go.mod h1:4VAGh89y6rVOvm5A8fKFxYG+wIW6LO1FMTG9hnKStFc= -github.com/gobuffalo/meta v0.0.0-20181018155829-df62557efcd3/go.mod h1:XTTOhwMNryif3x9LkTTBO/Llrveezd71u3quLd0u7CM= -github.com/gobuffalo/meta v0.0.0-20181018192820-8c6cef77dab3/go.mod h1:E94EPzx9NERGCY69UWlcj6Hipf2uK/vnfrF4QD0plVE= -github.com/gobuffalo/meta v0.0.0-20181025145500-3a985a084b0a/go.mod h1:YDAKBud2FP7NZdruCSlmTmDOZbVSa6bpK7LJ/A/nlKg= -github.com/gobuffalo/meta v0.0.0-20181114191255-b130ebedd2f7/go.mod h1:K6cRZ29ozr4Btvsqkjvg5nDFTLOgTqf03KA70Ks0ypE= -github.com/gobuffalo/meta v0.0.0-20181127070345-0d7e59dd540b/go.mod h1:RLO7tMvE0IAKAM8wny1aN12pvEKn7EtkBLkUZR00Qf8= -github.com/gobuffalo/mw-basicauth v1.0.3/go.mod h1:dg7+ilMZOKnQFHDefUzUHufNyTswVUviCBgF244C1+0= -github.com/gobuffalo/mw-contenttype v0.0.0-20180802152300-74f5a47f4d56/go.mod h1:7EvcmzBbeCvFtQm5GqF9ys6QnCxz2UM1x0moiWLq1No= -github.com/gobuffalo/mw-csrf v0.0.0-20180802151833-446ff26e108b/go.mod h1:sbGtb8DmDZuDUQoxjr8hG1ZbLtZboD9xsn6p77ppcHo= -github.com/gobuffalo/mw-forcessl v0.0.0-20180802152810-73921ae7a130/go.mod h1:JvNHRj7bYNAMUr/5XMkZaDcw3jZhUZpsmzhd//FFWmQ= -github.com/gobuffalo/mw-i18n v0.0.0-20180802152014-e3060b7e13d6/go.mod h1:91AQfukc52A6hdfIfkxzyr+kpVYDodgAeT5cjX1UIj4= -github.com/gobuffalo/mw-paramlogger v0.0.0-20181005191442-d6ee392ec72e/go.mod h1:6OJr6VwSzgJMqWMj7TYmRUqzNe2LXu/W1rRW4MAz/ME= -github.com/gobuffalo/mw-tokenauth v0.0.0-20181001105134-8545f626c189/go.mod h1:UqBF00IfKvd39ni5+yI5MLMjAf4gX7cDKN/26zDOD6c= -github.com/gobuffalo/packd v0.0.0-20181027182251-01ad393492c8/go.mod h1:SmdBdhj6uhOsg1Ui4SFAyrhuc7U4VCildosO5IDJ3lc= -github.com/gobuffalo/packd v0.0.0-20181027190505-aafc0d02c411/go.mod h1:SmdBdhj6uhOsg1Ui4SFAyrhuc7U4VCildosO5IDJ3lc= -github.com/gobuffalo/packd v0.0.0-20181027194105-7ae579e6d213/go.mod h1:SmdBdhj6uhOsg1Ui4SFAyrhuc7U4VCildosO5IDJ3lc= -github.com/gobuffalo/packd v0.0.0-20181031195726-c82734870264/go.mod h1:Yf2toFaISlyQrr5TfO3h6DB9pl9mZRmyvBGQb/aQ/pI= -github.com/gobuffalo/packd v0.0.0-20181104210303-d376b15f8e96/go.mod h1:Yf2toFaISlyQrr5TfO3h6DB9pl9mZRmyvBGQb/aQ/pI= -github.com/gobuffalo/packd v0.0.0-20181111195323-b2e760a5f0ff/go.mod h1:Yf2toFaISlyQrr5TfO3h6DB9pl9mZRmyvBGQb/aQ/pI= -github.com/gobuffalo/packd v0.0.0-20181114190715-f25c5d2471d7/go.mod h1:Yf2toFaISlyQrr5TfO3h6DB9pl9mZRmyvBGQb/aQ/pI= -github.com/gobuffalo/packd v0.0.0-20181124090624-311c6248e5fb/go.mod h1:Foenia9ZvITEvG05ab6XpiD5EfBHPL8A6hush8SJ0o8= -github.com/gobuffalo/packd v0.0.0-20181207120301-c49825f8f6f4/go.mod h1:LYc0TGKFBBFTRC9dg2pcRcMqGCTMD7T2BIMP7OBuQAA= -github.com/gobuffalo/packd v0.0.0-20181212173646-eca3b8fd6687 h1:uZ+G4JprR0UEq0aHZs+6eP7TEZuFfrIkmQWejIBV/QQ= -github.com/gobuffalo/packd v0.0.0-20181212173646-eca3b8fd6687/go.mod h1:LYc0TGKFBBFTRC9dg2pcRcMqGCTMD7T2BIMP7OBuQAA= -github.com/gobuffalo/packr v1.13.7/go.mod h1:KkinLIn/n6+3tVXMwg6KkNvWwVsrRAz4ph+jgpk3Z24= -github.com/gobuffalo/packr v1.15.0/go.mod h1:t5gXzEhIviQwVlNx/+3SfS07GS+cZ2hn76WLzPp6MGI= -github.com/gobuffalo/packr v1.15.1/go.mod h1:IeqicJ7jm8182yrVmNbM6PR4g79SjN9tZLH8KduZZwE= -github.com/gobuffalo/packr v1.19.0/go.mod h1:MstrNkfCQhd5o+Ct4IJ0skWlxN8emOq8DsoT1G98VIU= -github.com/gobuffalo/packr v1.20.0/go.mod h1:JDytk1t2gP+my1ig7iI4NcVaXr886+N0ecUga6884zw= -github.com/gobuffalo/packr v1.21.0/go.mod h1:H00jGfj1qFKxscFJSw8wcL4hpQtPe1PfU2wa6sg/SR0= -github.com/gobuffalo/packr/v2 v2.0.0-rc.8/go.mod h1:y60QCdzwuMwO2R49fdQhsjCPv7tLQFR0ayzxxla9zes= -github.com/gobuffalo/packr/v2 v2.0.0-rc.9/go.mod h1:fQqADRfZpEsgkc7c/K7aMew3n4aF1Kji7+lIZeR98Fc= -github.com/gobuffalo/packr/v2 v2.0.0-rc.10/go.mod h1:4CWWn4I5T3v4c1OsJ55HbHlUEKNWMITG5iIkdr4Px4w= -github.com/gobuffalo/packr/v2 v2.0.0-rc.11/go.mod h1:JoieH/3h3U4UmatmV93QmqyPUdf4wVM9HELaHEu+3fk= -github.com/gobuffalo/packr/v2 v2.0.0-rc.12/go.mod h1:FV1zZTsVFi1DSCboO36Xgs4pzCZBjB/tDV9Cz/lSaR8= -github.com/gobuffalo/packr/v2 v2.0.0-rc.13/go.mod h1:2Mp7GhBFMdJlOK8vGfl7SYtfMP3+5roE39ejlfjw0rA= -github.com/gobuffalo/packr/v2 v2.0.0-rc.14 h1:K41dNilNHDbDgCL3UE6K02JGuN89pjvD9oG99X7Om2s= -github.com/gobuffalo/packr/v2 v2.0.0-rc.14/go.mod h1:06otbrNvDKO1eNQ3b8hst+1010UooI2MFg+B2Ze4MV8= -github.com/gobuffalo/plush v3.7.16+incompatible/go.mod h1:rQ4zdtUUyZNqULlc6bqd5scsPfLKfT0+TGMChgduDvI= -github.com/gobuffalo/plush v3.7.20+incompatible/go.mod h1:rQ4zdtUUyZNqULlc6bqd5scsPfLKfT0+TGMChgduDvI= -github.com/gobuffalo/plush v3.7.21+incompatible/go.mod h1:rQ4zdtUUyZNqULlc6bqd5scsPfLKfT0+TGMChgduDvI= -github.com/gobuffalo/plush v3.7.22+incompatible/go.mod h1:rQ4zdtUUyZNqULlc6bqd5scsPfLKfT0+TGMChgduDvI= -github.com/gobuffalo/plush v3.7.23+incompatible/go.mod h1:rQ4zdtUUyZNqULlc6bqd5scsPfLKfT0+TGMChgduDvI= -github.com/gobuffalo/plush v3.7.30+incompatible/go.mod h1:rQ4zdtUUyZNqULlc6bqd5scsPfLKfT0+TGMChgduDvI= -github.com/gobuffalo/plush v3.7.31+incompatible/go.mod h1:rQ4zdtUUyZNqULlc6bqd5scsPfLKfT0+TGMChgduDvI= -github.com/gobuffalo/plush v3.7.32+incompatible/go.mod h1:rQ4zdtUUyZNqULlc6bqd5scsPfLKfT0+TGMChgduDvI= -github.com/gobuffalo/plushgen v0.0.0-20181128164830-d29dcb966cb2/go.mod h1:r9QwptTFnuvSaSRjpSp4S2/4e2D3tJhARYbvEBcKSb4= -github.com/gobuffalo/plushgen v0.0.0-20181203163832-9fc4964505c2/go.mod h1:opEdT33AA2HdrIwK1aibqnTJDVVKXC02Bar/GT1YRVs= -github.com/gobuffalo/plushgen v0.0.0-20181207152837-eedb135bd51b/go.mod h1:Lcw7HQbEVm09sAQrCLzIxuhFbB3nAgp4c55E+UlynR0= -github.com/gobuffalo/plushgen v0.0.0-20190104222512-177cd2b872b3/go.mod h1:tYxCozi8X62bpZyKXYHw1ncx2ZtT2nFvG42kuLwYjoc= -github.com/gobuffalo/pop v4.8.2+incompatible/go.mod h1:DwBz3SD5SsHpTZiTubcsFWcVDpJWGsxjVjMPnkiThWg= -github.com/gobuffalo/pop v4.8.3+incompatible/go.mod h1:DwBz3SD5SsHpTZiTubcsFWcVDpJWGsxjVjMPnkiThWg= -github.com/gobuffalo/pop v4.8.4+incompatible/go.mod h1:DwBz3SD5SsHpTZiTubcsFWcVDpJWGsxjVjMPnkiThWg= -github.com/gobuffalo/release v1.0.35/go.mod h1:VtHFAKs61vO3wboCec5xr9JPTjYyWYcvaM3lclkc4x4= -github.com/gobuffalo/release v1.0.38/go.mod h1:VtHFAKs61vO3wboCec5xr9JPTjYyWYcvaM3lclkc4x4= -github.com/gobuffalo/release v1.0.42/go.mod h1:RPs7EtafH4oylgetOJpGP0yCZZUiO4vqHfTHJjSdpug= -github.com/gobuffalo/release v1.0.52/go.mod h1:RPs7EtafH4oylgetOJpGP0yCZZUiO4vqHfTHJjSdpug= -github.com/gobuffalo/release v1.0.53/go.mod h1:FdF257nd8rqhNaqtDWFGhxdJ/Ig4J7VcS3KL7n/a+aA= -github.com/gobuffalo/release v1.0.54/go.mod h1:Pe5/RxRa/BE8whDpGfRqSI7D1a0evGK1T4JDm339tJc= -github.com/gobuffalo/release v1.0.61/go.mod h1:mfIO38ujUNVDlBziIYqXquYfBF+8FDHUjKZgYC1Hj24= -github.com/gobuffalo/release v1.0.72/go.mod h1:NP5NXgg/IX3M5XmHmWR99D687/3Dt9qZtTK/Lbwc1hU= -github.com/gobuffalo/release v1.1.1/go.mod h1:Sluak1Xd6kcp6snkluR1jeXAogdJZpFFRzTYRs/2uwg= -github.com/gobuffalo/release v1.1.3/go.mod h1:CuXc5/m+4zuq8idoDt1l4va0AXAn/OSs08uHOfMVr8E= -github.com/gobuffalo/release v1.1.6/go.mod h1:18naWa3kBsqO0cItXZNJuefCKOENpbbUIqRL1g+p6z0= -github.com/gobuffalo/shoulders v1.0.1/go.mod h1:V33CcVmaQ4gRUmHKwq1fiTXuf8Gp/qjQBUL5tHPmvbA= -github.com/gobuffalo/syncx v0.0.0-20181120191700-98333ab04150/go.mod h1:HhnNqWY95UYwwW3uSASeV7vtgYkT2t16hJgV3AEPUpw= -github.com/gobuffalo/syncx v0.0.0-20181120194010-558ac7de985f/go.mod h1:HhnNqWY95UYwwW3uSASeV7vtgYkT2t16hJgV3AEPUpw= -github.com/gobuffalo/tags v2.0.11+incompatible/go.mod h1:9XmhOkyaB7UzvuY4UoZO4s67q8/xRMVJEaakauVQYeY= -github.com/gobuffalo/tags v2.0.14+incompatible/go.mod h1:9XmhOkyaB7UzvuY4UoZO4s67q8/xRMVJEaakauVQYeY= -github.com/gobuffalo/tags v2.0.15+incompatible/go.mod h1:9XmhOkyaB7UzvuY4UoZO4s67q8/xRMVJEaakauVQYeY= -github.com/gobuffalo/uuid v2.0.3+incompatible/go.mod h1:ErhIzkRhm0FtRuiE/PeORqcw4cVi1RtSpnwYrxuvkfE= -github.com/gobuffalo/uuid v2.0.4+incompatible/go.mod h1:ErhIzkRhm0FtRuiE/PeORqcw4cVi1RtSpnwYrxuvkfE= -github.com/gobuffalo/uuid v2.0.5+incompatible/go.mod h1:ErhIzkRhm0FtRuiE/PeORqcw4cVi1RtSpnwYrxuvkfE= -github.com/gobuffalo/validate v2.0.3+incompatible/go.mod h1:N+EtDe0J8252BgfzQUChBgfd6L93m9weay53EWFVsMM= -github.com/gobuffalo/x v0.0.0-20181003152136-452098b06085/go.mod h1:WevpGD+5YOreDJznWevcn8NTmQEW5STSBgIkpkjzqXc= -github.com/gobuffalo/x v0.0.0-20181007152206-913e47c59ca7/go.mod h1:9rDPXaB3kXdKWzMc4odGQQdG2e2DIEmANy5aSJ9yesY= -github.com/gofrs/uuid v3.1.0+incompatible/go.mod h1:b2aQJv3Z4Fp6yNu3cdSllBxTCLRxnplIgP/c0N/04lM= -github.com/golang/protobuf v1.1.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= -github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= -github.com/gorilla/context v1.1.1/go.mod h1:kBGZzfjB9CEq2AlWe17Uuf7NDRt0dE0s8S51q0aT7Yg= -github.com/gorilla/mux v1.6.2/go.mod h1:1lud6UwP+6orDFRuTfBEV8e9/aOM/c4fVVCaMa2zaAs= -github.com/gorilla/pat v0.0.0-20180118222023-199c85a7f6d1/go.mod h1:YeAe0gNeiNT5hoiZRI4yiOky6jVdNvfO2N6Kav/HmxY= -github.com/gorilla/securecookie v1.1.1/go.mod h1:ra0sb63/xPlUeL+yeDciTfxMRAA+MP+HVt/4epWDjd4= -github.com/gorilla/sessions v1.1.2/go.mod h1:8KCfur6+4Mqcc6S0FEfKuN15Vl5MgXW92AE8ovaJD0w= -github.com/gorilla/sessions v1.1.3/go.mod h1:8KCfur6+4Mqcc6S0FEfKuN15Vl5MgXW92AE8ovaJD0w= -github.com/hashicorp/hcl v1.0.0/go.mod h1:E5yfLk+7swimpb2L/Alb/PJmXilQ/rhwaUYs4T20WEQ= -github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU= -github.com/inconshreveable/mousetrap v1.0.0/go.mod h1:PxqpIevigyE2G7u3NXJIT2ANytuPF1OarO4DADm73n8= -github.com/jackc/fake v0.0.0-20150926172116-812a484cc733/go.mod h1:WrMFNQdiFJ80sQsxDoMokWK1W5TQtxBFNpzWTD84ibQ= -github.com/jackc/pgx v3.2.0+incompatible/go.mod h1:0ZGrqGqkRlliWnWB4zKnWtjbSWbGkVEFm4TeybAXq+I= -github.com/jmoiron/sqlx v0.0.0-20180614180643-0dae4fefe7c0/go.mod h1:IiEW3SEiiErVyFdH8NTuWjSifiEQKUoyK3LNqr2kCHU= -github.com/joho/godotenv v1.2.0/go.mod h1:7hK45KPybAkOC6peb+G5yklZfMxEjkZhHbwpqxOKXbg= -github.com/joho/godotenv v1.3.0/go.mod h1:7hK45KPybAkOC6peb+G5yklZfMxEjkZhHbwpqxOKXbg= -github.com/karrick/godirwalk v1.7.5/go.mod h1:2c9FRhkDxdIbgkOnCEvnSWs71Bhugbl46shStcFDJ34= -github.com/karrick/godirwalk v1.7.7/go.mod h1:2c9FRhkDxdIbgkOnCEvnSWs71Bhugbl46shStcFDJ34= -github.com/karrick/godirwalk v1.7.8 h1:VfG72pyIxgtC7+3X9CMHI0AOl4LwyRAg98WAgsvffi8= -github.com/karrick/godirwalk v1.7.8/go.mod h1:2c9FRhkDxdIbgkOnCEvnSWs71Bhugbl46shStcFDJ34= -github.com/kballard/go-shellquote v0.0.0-20180428030007-95032a82bc51/go.mod h1:CzGEWj7cYgsdH8dAjBGEr58BoE7ScuLd+fwFZ44+/x8= -github.com/konsorten/go-windows-terminal-sequences v0.0.0-20180402223658-b729f2633dfe/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= -github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= -github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= -github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= -github.com/kr/pty v1.1.3/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= -github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= -github.com/lib/pq v1.0.0/go.mod h1:5WUZQaWbwv1U+lTReE5YruASi9Al49XbQIvNi/34Woo= -github.com/magiconair/properties v1.8.0/go.mod h1:PppfXfuXeibc/6YijjN8zIbojt8czPbwD3XqdrwzmxQ= -github.com/markbates/deplist v1.0.4/go.mod h1:gRRbPbbuA8TmMiRvaOzUlRfzfjeCCBqX2A6arxN01MM= -github.com/markbates/deplist v1.0.5/go.mod h1:gRRbPbbuA8TmMiRvaOzUlRfzfjeCCBqX2A6arxN01MM= -github.com/markbates/going v1.0.2/go.mod h1:UWCk3zm0UKefHZ7l8BNqi26UyiEMniznk8naLdTcy6c= -github.com/markbates/grift v1.0.4/go.mod h1:wbmtW74veyx+cgfwFhlnnMWqhoz55rnHR47oMXzsyVs= -github.com/markbates/hmax v1.0.0/go.mod h1:cOkR9dktiESxIMu+65oc/r/bdY4bE8zZw3OLhLx0X2c= -github.com/markbates/inflect v1.0.0/go.mod h1:oTeZL2KHA7CUX6X+fovmK9OvIOFuqu0TwdQrZjLTh88= -github.com/markbates/inflect v1.0.1/go.mod h1:uv3UVNBe5qBIfCm8O8Q+DW+S1EopeyINj+Ikhc7rnCk= -github.com/markbates/inflect v1.0.3/go.mod h1:1fR9+pO2KHEO9ZRtto13gDwwZaAKstQzferVeWqbgNs= -github.com/markbates/inflect v1.0.4/go.mod h1:1fR9+pO2KHEO9ZRtto13gDwwZaAKstQzferVeWqbgNs= -github.com/markbates/oncer v0.0.0-20180924031910-e862a676800b/go.mod h1:Ld9puTsIW75CHf65OeIOkyKbteujpZVXDpWK6YGZbxE= -github.com/markbates/oncer v0.0.0-20180924034138-723ad0170a46/go.mod h1:Ld9puTsIW75CHf65OeIOkyKbteujpZVXDpWK6YGZbxE= -github.com/markbates/oncer v0.0.0-20181014194634-05fccaae8fc4/go.mod h1:Ld9puTsIW75CHf65OeIOkyKbteujpZVXDpWK6YGZbxE= -github.com/markbates/oncer v0.0.0-20181203154359-bf2de49a0be2/go.mod h1:Ld9puTsIW75CHf65OeIOkyKbteujpZVXDpWK6YGZbxE= -github.com/markbates/refresh v1.4.10/go.mod h1:NDPHvotuZmTmesXxr95C9bjlw1/0frJwtME2dzcVKhc= -github.com/markbates/safe v1.0.0/go.mod h1:nAqgmRi7cY2nqMc92/bSEeQA+R4OheNU2T1kNSCBdG0= -github.com/markbates/safe v1.0.1/go.mod h1:nAqgmRi7cY2nqMc92/bSEeQA+R4OheNU2T1kNSCBdG0= -github.com/markbates/sigtx v1.0.0/go.mod h1:QF1Hv6Ic6Ca6W+T+DL0Y/ypborFKyvUY9HmuCD4VeTc= -github.com/markbates/willie v1.0.9/go.mod h1:fsrFVWl91+gXpx/6dv715j7i11fYPfZ9ZGfH0DQzY7w= -github.com/mattn/go-colorable v0.0.9/go.mod h1:9vuHe8Xs5qXnSaW/c/ABM9alt+Vo+STaOChaDxuIBZU= -github.com/mattn/go-isatty v0.0.4/go.mod h1:M+lRXTBqGeGNdLjl/ufCoiOlB5xdOkqRJdNxMWT7Zi4= -github.com/mattn/go-sqlite3 v1.9.0/go.mod h1:FPy6KqzDD04eiIsT53CuJW3U88zkxoIYsOqkbpncsNc= -github.com/microcosm-cc/bluemonday v1.0.1/go.mod h1:hsXNsILzKxV+sX77C5b8FSuKF00vh2OMYv+xgHpAMF4= -github.com/microcosm-cc/bluemonday v1.0.2/go.mod h1:iVP4YcDBq+n/5fb23BhYFvIMq/leAFZyRl6bYmGDlGc= -github.com/mitchellh/go-homedir v1.0.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0= -github.com/mitchellh/mapstructure v1.0.0/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y= -github.com/mitchellh/mapstructure v1.1.2/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y= -github.com/monoculum/formam v0.0.0-20180901015400-4e68be1d79ba/go.mod h1:RKgILGEJq24YyJ2ban8EO0RUVSJlF1pGsEvoLEACr/Q= -github.com/nicksnyder/go-i18n v1.10.0/go.mod h1:HrK7VCrbOvQoUAQ7Vpy7i87N7JZZZ7R2xBGjv0j365Q= -github.com/onsi/ginkgo v1.6.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= -github.com/onsi/ginkgo v1.7.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= -github.com/onsi/gomega v1.4.1/go.mod h1:C1qb7wdrVGGVU+Z6iS04AVkA3Q65CEZX59MT0QO5uiA= -github.com/onsi/gomega v1.4.2/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY= -github.com/onsi/gomega v1.4.3/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY= -github.com/pelletier/go-toml v1.2.0/go.mod h1:5z9KED0ma1S8pY6P1sdut58dfprrGBbd/94hg7ilaic= -github.com/pkg/errors v0.8.0/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= -github.com/pkg/errors v0.8.1 h1:iURUrRGxPUNPdy5/HRSm+Yj6okJ6UtLINN0Q9M4+h3I= -github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= -github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= -github.com/rogpeppe/go-internal v1.0.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4= -github.com/rogpeppe/go-internal v1.1.0 h1:g0fH8RicVgNl+zVZDCDfbdWxAWoAEJyI7I3TZYXFiig= -github.com/rogpeppe/go-internal v1.1.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4= -github.com/satori/go.uuid v1.2.0/go.mod h1:dA0hQrYB0VpLJoorglMZABFdXlWrHn1NEOzdhQKdks0= -github.com/serenize/snaker v0.0.0-20171204205717-a683aaf2d516/go.mod h1:Yow6lPLSAXx2ifx470yD/nUe22Dv5vBvxK/UK9UUTVs= -github.com/sergi/go-diff v1.0.0/go.mod h1:0CfEIISq7TuYL3j771MWULgwwjU+GofnZX9QAmXWZgo= -github.com/shopspring/decimal v0.0.0-20180709203117-cd690d0c9e24/go.mod h1:M+9NzErvs504Cn4c5DxATwIqPbtswREoFCre64PpcG4= -github.com/shurcooL/go v0.0.0-20180423040247-9e1955d9fb6e/go.mod h1:TDJrrUr11Vxrven61rcy3hJMUqaf/CLWYhHNPmT14Lk= -github.com/shurcooL/go-goon v0.0.0-20170922171312-37c2f522c041/go.mod h1:N5mDOmsrJOB+vfqUK+7DmDyjhSLIIBnXo9lvZJj3MWQ= -github.com/shurcooL/highlight_diff v0.0.0-20170515013008-09bb4053de1b/go.mod h1:ZpfEhSmds4ytuByIcDnOLkTHGUI6KNqRNPDLHDk+mUU= -github.com/shurcooL/highlight_go v0.0.0-20170515013102-78fb10f4a5f8/go.mod h1:UDKB5a1T23gOMUJrI+uSuH0VRDStOiUVSjBTRDVBVag= -github.com/shurcooL/octicon v0.0.0-20180602230221-c42b0e3b24d9/go.mod h1:eWdoE5JD4R5UVWDucdOPg1g2fqQRq78IQa9zlOV1vpQ= -github.com/shurcooL/sanitized_anchor_name v0.0.0-20170918181015-86672fcb3f95/go.mod h1:1NzhyTcUVG4SuEtjjoZeVRXNmyL/1OwPU0+IJeTBvfc= -github.com/sirupsen/logrus v1.0.6/go.mod h1:pMByvHTf9Beacp5x1UXfOR9xyW/9antXMhjMPG0dEzc= -github.com/sirupsen/logrus v1.1.0/go.mod h1:zrgwTnHtNr00buQ1vSptGe8m1f/BbgsPukg8qsT7A+A= -github.com/sirupsen/logrus v1.1.1/go.mod h1:zrgwTnHtNr00buQ1vSptGe8m1f/BbgsPukg8qsT7A+A= -github.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo= -github.com/sirupsen/logrus v1.3.0 h1:hI/7Q+DtNZ2kINb6qt/lS+IyXnHQe9e90POfeewL/ME= -github.com/sirupsen/logrus v1.3.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo= -github.com/sourcegraph/annotate v0.0.0-20160123013949-f4cad6c6324d/go.mod h1:UdhH50NIW0fCiwBSr0co2m7BnFLdv4fQTgdqdJTHFeE= -github.com/sourcegraph/syntaxhighlight v0.0.0-20170531221838-bd320f5d308e/go.mod h1:HuIsMU8RRBOtsCgI77wP899iHVBQpCmg4ErYMZB+2IA= -github.com/spf13/afero v1.1.2/go.mod h1:j4pytiNVoe2o6bmDsKpLACNPDBIoEAkihy7loJ1B0CQ= -github.com/spf13/afero v1.2.0/go.mod h1:9ZxEEn6pIJ8Rxe320qSDBk6AsU0r9pR7Q4OcevTdifk= -github.com/spf13/cast v1.2.0/go.mod h1:r2rcYCSwa1IExKTDiTfzaxqT2FNHs8hODu4LnUfgKEg= -github.com/spf13/cast v1.3.0/go.mod h1:Qx5cxh0v+4UWYiBimWS+eyWzqEqokIECu5etghLkUJE= -github.com/spf13/cobra v0.0.3/go.mod h1:1l0Ry5zgKvJasoi3XT1TypsSe7PqH0Sj9dhYf7v3XqQ= -github.com/spf13/jwalterweatherman v1.0.0/go.mod h1:cQK4TGJAtQXfYWX+Ddv3mKDzgVb68N+wFjFa4jdeBTo= -github.com/spf13/pflag v1.0.2/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4= -github.com/spf13/pflag v1.0.3/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4= -github.com/spf13/viper v1.2.1/go.mod h1:P4AexN0a+C9tGAnUFNwDMYYZv3pjFuvmeiMyKRaNVlI= -github.com/spf13/viper v1.3.1/go.mod h1:ZiWeW+zYFKm7srdB9IoDzzZXaJaI5eL9QjNiN/DMA2s= -github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= -github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= -github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= -github.com/stretchr/testify v1.3.0 h1:TivCn/peBQ7UY8ooIcPgZFpTNSz0Q2U6UrFlUfqbe0Q= -github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= -github.com/ugorji/go/codec v0.0.0-20181204163529-d75b2dcb6bc8/go.mod h1:VFNgLljTbGfSG7qAOspJ7OScBnGdDN/yBr0sguwnwf0= -github.com/unrolled/secure v0.0.0-20180918153822-f340ee86eb8b/go.mod h1:mnPT77IAdsi/kV7+Es7y+pXALeV3h7G6dQF6mNYjcLA= -github.com/unrolled/secure v0.0.0-20181005190816-ff9db2ff917f/go.mod h1:mnPT77IAdsi/kV7+Es7y+pXALeV3h7G6dQF6mNYjcLA= -github.com/xordataexchange/crypt v0.0.3-0.20170626215501-b2862e3d0a77/go.mod h1:aYKd//L2LvnjZzWKhF00oedf4jCCReLcmhLdhm1A27Q= -golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= -golang.org/x/crypto v0.0.0-20180910181607-0e37d006457b/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= -golang.org/x/crypto v0.0.0-20181001203147-e3636079e1a4/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= -golang.org/x/crypto v0.0.0-20181009213950-7c1a557ab941/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= -golang.org/x/crypto v0.0.0-20181015023909-0c41d7ab0a0e/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= -golang.org/x/crypto v0.0.0-20181024171144-74cb1d3d52f4/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= -golang.org/x/crypto v0.0.0-20181025113841-85e1b3f9139a/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= -golang.org/x/crypto v0.0.0-20181025213731-e84da0312774/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= -golang.org/x/crypto v0.0.0-20181106171534-e4dc69e5b2fd/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= -golang.org/x/crypto v0.0.0-20181112202954-3d3f9f413869/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= -golang.org/x/crypto v0.0.0-20181127143415-eb0de9b17e85/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= -golang.org/x/crypto v0.0.0-20181203042331-505ab145d0a9/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= -golang.org/x/crypto v0.0.0-20190102171810-8d7daa0c54b3/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= -golang.org/x/crypto v0.0.0-20190103213133-ff983b9c42bc h1:F5tKCVGp+MUAHhKp5MZtGqAlGX3+oCsiL1Q629FL90M= -golang.org/x/crypto v0.0.0-20190103213133-ff983b9c42bc/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= -golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/net v0.0.0-20180816102801-aaf60122140d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/net v0.0.0-20180921000356-2f5d2388922f/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/net v0.0.0-20180926154720-4dfa2610cdf3/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/net v0.0.0-20181005035420-146acd28ed58/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/net v0.0.0-20181011144130-49bb7cea24b1/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/net v0.0.0-20181017193950-04a2e542c03f/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/net v0.0.0-20181023162649-9b4f9f5ad519/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/net v0.0.0-20181102091132-c10e9556a7bc/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/net v0.0.0-20181106065722-10aee1819953/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/net v0.0.0-20181114220301-adae6a3d119a/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/net v0.0.0-20181207154023-610586996380/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/net v0.0.0-20181220203305-927f97764cc3/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4 h1:YUO/7uOKsKeq9UokNS62b8FYywz3ker1l1vDZRCRefw= -golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sys v0.0.0-20180816055513-1c9583448a9c/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20180906133057-8cf3aee42992/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20180921163948-d47a0f339242/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20180927150500-dad3d9fb7b6e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20181005133103-4497e2df6f9e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20181011152604-fa43e7bc11ba/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20181022134430-8a28ead16f52/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20181024145615-5cd93ef61a7c/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20181025063200-d989b31c8746/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20181026064943-731415f00dce/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20181026203630-95b1ffbd15a5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20181106135930-3a76605856fd/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20181107165924-66b7b1311ac8/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20181205085412-a5c9d58dba9a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20181206074257-70b957f3b65e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20190102155601-82a175fd1598 h1:S8GOgffXV1X3fpVG442QRfWOt0iFl79eHJ7OPt725bo= -golang.org/x/sys v0.0.0-20190102155601-82a175fd1598/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= -golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= -golang.org/x/tools v0.0.0-20181003024731-2f84ea8ef872/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= -golang.org/x/tools v0.0.0-20181006002542-f60d9635b16a/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= -golang.org/x/tools v0.0.0-20181008205924-a2b3f7f249e9/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= -golang.org/x/tools v0.0.0-20181013182035-5e66757b835f/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= -golang.org/x/tools v0.0.0-20181017214349-06f26fdaaa28/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= -golang.org/x/tools v0.0.0-20181024171208-a2dc47679d30/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= -golang.org/x/tools v0.0.0-20181026183834-f60e5f99f081/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= -golang.org/x/tools v0.0.0-20181105230042-78dc5bac0cac/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= -golang.org/x/tools v0.0.0-20181107215632-34b416bd17b3/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= -golang.org/x/tools v0.0.0-20181114190951-94339b83286c/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= -golang.org/x/tools v0.0.0-20181119130350-139d099f6620/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= -golang.org/x/tools v0.0.0-20181127195227-b4e97c0ed882/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= -golang.org/x/tools v0.0.0-20181127232545-e782529d0ddd/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= -golang.org/x/tools v0.0.0-20181203210056-e5f3ab76ea4b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= -golang.org/x/tools v0.0.0-20181205224935-3576414c54a4/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= -golang.org/x/tools v0.0.0-20181206194817-bcd4e47d0288/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= -golang.org/x/tools v0.0.0-20181207183836-8bc39b988060/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= -golang.org/x/tools v0.0.0-20181212172921-837e80568c09/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= -golang.org/x/tools v0.0.0-20190102213336-ca9055ed7d04/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= -golang.org/x/tools v0.0.0-20190104182027-498d95493402/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= -golang.org/x/tools v0.0.0-20190111214448-fc1d57b08d7b h1:Z5QW7z0ycYrOVRYv3z4FeSZbRNvVwUfXHKQSZKb5A6w= -golang.org/x/tools v0.0.0-20190111214448-fc1d57b08d7b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= -google.golang.org/appengine v1.2.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= -gopkg.in/airbrake/gobrake.v2 v2.0.9/go.mod h1:/h5ZAUhDkGaJfjzjKLSjv6zCL6O0LLBxU4K+aSYdM/U= -gopkg.in/alexcesaro/quotedprintable.v3 v3.0.0-20150716171945-2caba252f4dc/go.mod h1:m7x9LTH6d71AHyAX77c9yqWCCa3UKHcVEj9y7hAtKDk= -gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= -gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= -gopkg.in/errgo.v2 v2.1.0/go.mod h1:hNsd1EY+bozCKY1Ytp96fpM3vjJbqLJn88ws8XvfDNI= -gopkg.in/fsnotify.v1 v1.4.7/go.mod h1:Tz8NjZHkW78fSQdbUxIjBTcgA1z1m8ZHf0WmKUhAMys= -gopkg.in/gemnasium/logrus-airbrake-hook.v2 v2.1.2/go.mod h1:Xk6kEKp8OKb+X14hQBKWaSkCsqBpgog8nAV2xsGOxlo= -gopkg.in/gomail.v2 v2.0.0-20160411212932-81ebce5c23df/go.mod h1:LRQQ+SO6ZHR7tOkpBDuZnXENFzX8qRjMDMyPD6BRkCw= -gopkg.in/mail.v2 v2.0.0-20180731213649-a0242b2233b4/go.mod h1:htwXN1Qh09vZJ1NVKxQqHPBaCBbzKhp5GzuJEA4VJWw= -gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7/go.mod h1:dt/ZhP58zS4L8KSrWDmTeBkI65Dw0HsyUHuEVlX15mw= -gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= -gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= diff --git a/vendor/github.com/gobuffalo/envy/shoulders.md b/vendor/github.com/gobuffalo/envy/shoulders.md deleted file mode 100644 index 120330be34..0000000000 --- a/vendor/github.com/gobuffalo/envy/shoulders.md +++ /dev/null @@ -1,16 +0,0 @@ -# github.com/gobuffalo/envy Stands on the Shoulders of Giants - -github.com/gobuffalo/envy does not try to reinvent the wheel! Instead, it uses the already great wheels developed by the Go community and puts them all together in the best way possible. Without these giants this project would not be possible. Please make sure to check them out and thank them for all of their hard work. - -Thank you to the following **GIANTS**: - - -* [github.com/gobuffalo/envy](https://godoc.org/github.com/gobuffalo/envy) - -* [github.com/joho/godotenv](https://godoc.org/github.com/joho/godotenv) - -* [github.com/rogpeppe/go-internal/modfile](https://godoc.org/github.com/rogpeppe/go-internal/modfile) - -* [github.com/rogpeppe/go-internal/module](https://godoc.org/github.com/rogpeppe/go-internal/module) - -* [github.com/rogpeppe/go-internal/semver](https://godoc.org/github.com/rogpeppe/go-internal/semver) diff --git a/vendor/github.com/gobuffalo/envy/version.go b/vendor/github.com/gobuffalo/envy/version.go deleted file mode 100644 index f0de312463..0000000000 --- a/vendor/github.com/gobuffalo/envy/version.go +++ /dev/null @@ -1,3 +0,0 @@ -package envy - -const Version = "v1.6.12" diff --git a/vendor/github.com/peterbourgon/diskv/LICENSE b/vendor/github.com/gobuffalo/flect/LICENSE similarity index 87% rename from vendor/github.com/peterbourgon/diskv/LICENSE rename to vendor/github.com/gobuffalo/flect/LICENSE index 41ce7f16e1..649efd4372 100644 --- a/vendor/github.com/peterbourgon/diskv/LICENSE +++ b/vendor/github.com/gobuffalo/flect/LICENSE @@ -1,4 +1,6 @@ -Copyright (c) 2011-2012 Peter Bourgon +The MIT License (MIT) + +Copyright (c) 2019 Mark Bates Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal @@ -7,13 +9,13 @@ to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: -The above copyright notice and this permission notice shall be included in -all copies or substantial portions of the Software. +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -THE SOFTWARE. +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. diff --git a/vendor/github.com/gobuffalo/flect/Makefile b/vendor/github.com/gobuffalo/flect/Makefile index b0db1c4f88..0ac539f1c2 100644 --- a/vendor/github.com/gobuffalo/flect/Makefile +++ b/vendor/github.com/gobuffalo/flect/Makefile @@ -1,46 +1,61 @@ -TAGS ?= "sqlite" -GO_BIN ?= go +TAGS ?= "" +GO_BIN ?= "go" -install: - packr - $(GO_BIN) install -v . +install: + $(GO_BIN) install -tags ${TAGS} -v . + make tidy -deps: - $(GO_BIN) get github.com/gobuffalo/release - $(GO_BIN) get github.com/gobuffalo/packr/packr - $(GO_BIN) get -tags ${TAGS} -t ./... +tidy: ifeq ($(GO111MODULE),on) $(GO_BIN) mod tidy +else + echo skipping go mod tidy endif -build: - packr +deps: + $(GO_BIN) get -tags ${TAGS} -t ./... + make tidy + +build: $(GO_BIN) build -v . + make tidy + +test: + $(GO_BIN) test -cover -tags ${TAGS} ./... + make tidy -test: - packr - $(GO_BIN) test -tags ${TAGS} ./... +ci-deps: + $(GO_BIN) get -tags ${TAGS} -t ./... -ci-test: +ci-test: $(GO_BIN) test -tags ${TAGS} -race ./... lint: - gometalinter --vendor ./... --deadline=1m --skip=internal + go get github.com/golangci/golangci-lint/cmd/golangci-lint + golangci-lint run --enable-all + make tidy update: - $(GO_BIN) get -u -tags ${TAGS} ifeq ($(GO111MODULE),on) + rm go.* + $(GO_BIN) mod init $(GO_BIN) mod tidy +else + $(GO_BIN) get -u -tags ${TAGS} endif - packr make test make install -ifeq ($(GO111MODULE),on) - $(GO_BIN) mod tidy -endif + make tidy -release-test: +release-test: $(GO_BIN) test -tags ${TAGS} -race ./... + make tidy release: - release -y -f version.go + $(GO_BIN) get github.com/gobuffalo/release + make tidy + release -y -f version.go --skip-packr + make tidy + + + diff --git a/vendor/github.com/gobuffalo/flect/README.md b/vendor/github.com/gobuffalo/flect/README.md index af0afbda98..2d9a1bd3b6 100644 --- a/vendor/github.com/gobuffalo/flect/README.md +++ b/vendor/github.com/gobuffalo/flect/README.md @@ -2,7 +2,7 @@

GoDoc -Build Status +CI Go Report Card

diff --git a/vendor/github.com/gobuffalo/flect/SHOULDERS.md b/vendor/github.com/gobuffalo/flect/SHOULDERS.md new file mode 100644 index 0000000000..8c359f157e --- /dev/null +++ b/vendor/github.com/gobuffalo/flect/SHOULDERS.md @@ -0,0 +1,10 @@ +# github.com/gobuffalo/flect Stands on the Shoulders of Giants + +github.com/gobuffalo/flect does not try to reinvent the wheel! Instead, it uses the already great wheels developed by the Go community and puts them all together in the best way possible. Without these giants, this project would not be possible. Please make sure to check them out and thank them for all of their hard work. + +Thank you to the following **GIANTS**: + + +* [github.com/davecgh/go-spew](https://godoc.org/github.com/davecgh/go-spew) + +* [github.com/stretchr/testify](https://godoc.org/github.com/stretchr/testify) diff --git a/vendor/github.com/gobuffalo/flect/azure-pipelines.yml b/vendor/github.com/gobuffalo/flect/azure-pipelines.yml new file mode 100644 index 0000000000..417e2c5792 --- /dev/null +++ b/vendor/github.com/gobuffalo/flect/azure-pipelines.yml @@ -0,0 +1,71 @@ +variables: + GOBIN: "$(GOPATH)/bin" # Go binaries path + GOPATH: "$(system.defaultWorkingDirectory)/gopath" # Go workspace path + modulePath: "$(GOPATH)/src/github.com/$(build.repository.name)" # Path to the module"s code + +jobs: +- job: Windows + pool: + vmImage: "vs2017-win2016" + strategy: + matrix: + go 1.10: + go_version: "1.10" + go 1.11 (on): + go_version: "1.11.5" + GO111MODULE: "on" + go 1.11 (off): + go_version: "1.11.5" + GO111MODULE: "off" + go 1.12 (on): + go_version: "1.12" + GO111MODULE: "on" + go 1.12 (off): + go_version: "1.12" + GO111MODULE: "off" + steps: + - template: azure-tests.yml + +- job: macOS + pool: + vmImage: "macOS-10.13" + strategy: + matrix: + go 1.10: + go_version: "1.10" + go 1.11 (on): + go_version: "1.11.5" + GO111MODULE: "on" + go 1.11 (off): + go_version: "1.11.5" + GO111MODULE: "off" + go 1.12 (on): + go_version: "1.12" + GO111MODULE: "on" + go 1.12 (off): + go_version: "1.12" + GO111MODULE: "off" + steps: + - template: azure-tests.yml + +- job: Linux + pool: + vmImage: "ubuntu-16.04" + strategy: + matrix: + go 1.10: + go_version: "1.10" + go 1.11 (on): + go_version: "1.11.5" + GO111MODULE: "on" + go 1.11 (off): + go_version: "1.11.5" + GO111MODULE: "off" + go 1.12 (on): + go_version: "1.12" + GO111MODULE: "on" + go 1.12 (off): + go_version: "1.12" + GO111MODULE: "off" + steps: + - template: azure-tests.yml diff --git a/vendor/github.com/gobuffalo/flect/azure-tests.yml b/vendor/github.com/gobuffalo/flect/azure-tests.yml new file mode 100644 index 0000000000..eea5822fad --- /dev/null +++ b/vendor/github.com/gobuffalo/flect/azure-tests.yml @@ -0,0 +1,19 @@ +steps: + - task: GoTool@0 + inputs: + version: $(go_version) + - task: Bash@3 + inputs: + targetType: inline + script: | + mkdir -p "$(GOBIN)" + mkdir -p "$(GOPATH)/pkg" + mkdir -p "$(modulePath)" + shopt -s extglob + mv !(gopath) "$(modulePath)" + displayName: "Setup Go Workspace" + - script: | + go get -t -v ./... + go test -race ./... + workingDirectory: "$(modulePath)" + displayName: "Tests" diff --git a/vendor/github.com/gobuffalo/flect/custom_data.go b/vendor/github.com/gobuffalo/flect/custom_data.go index 12d3f9b470..9a2dfc74ad 100644 --- a/vendor/github.com/gobuffalo/flect/custom_data.go +++ b/vendor/github.com/gobuffalo/flect/custom_data.go @@ -8,8 +8,6 @@ import ( "io/ioutil" "os" "path/filepath" - - "github.com/gobuffalo/envy" ) func init() { @@ -23,7 +21,10 @@ type CustomDataParser func(io.Reader) error func loadCustomData(defaultFile, env, readErrorMessage string, parser CustomDataParser) { pwd, _ := os.Getwd() - path := envy.Get(env, filepath.Join(pwd, defaultFile)) + path, found := os.LookupEnv(env) + if !found { + path = filepath.Join(pwd, defaultFile) + } if _, err := os.Stat(path); err != nil { return diff --git a/vendor/github.com/gobuffalo/flect/go.mod b/vendor/github.com/gobuffalo/flect/go.mod index 8501063a0e..cd02d074b9 100644 --- a/vendor/github.com/gobuffalo/flect/go.mod +++ b/vendor/github.com/gobuffalo/flect/go.mod @@ -1,6 +1,8 @@ module github.com/gobuffalo/flect +go 1.12 + require ( - github.com/gobuffalo/envy v1.6.11 - github.com/stretchr/testify v1.2.2 + github.com/davecgh/go-spew v1.1.1 // indirect + github.com/stretchr/testify v1.3.0 ) diff --git a/vendor/github.com/gobuffalo/flect/go.sum b/vendor/github.com/gobuffalo/flect/go.sum index dec25e8087..4f76e62c1f 100644 --- a/vendor/github.com/gobuffalo/flect/go.sum +++ b/vendor/github.com/gobuffalo/flect/go.sum @@ -1,321 +1,9 @@ -github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= -github.com/Masterminds/semver v1.4.2/go.mod h1:MB6lktGJrhw8PrUyiEoblNEGEQ+RzHPF078ddwwvV3Y= -github.com/ajg/form v0.0.0-20160822230020-523a5da1a92f/go.mod h1:uL1WgH+h2mgNtvBq0339dVnzXdBETtL2LeUXaIv25UY= -github.com/cockroachdb/apd v1.1.0/go.mod h1:8Sl8LxpKi29FqWXR16WEFZRNSz3SoPzUzeMeY4+DwBQ= -github.com/cockroachdb/cockroach-go v0.0.0-20181001143604-e0a95dfd547c/go.mod h1:XGLbWH/ujMcbPbhZq52Nv6UrCghb1yGn//133kEsvDk= -github.com/codegangsta/negroni v1.0.0/go.mod h1:v0y3T5G7Y1UlFfyxFn/QLRU4a2EuNau2iZY63YTKWo0= +github.com/davecgh/go-spew v1.1.0 h1:ZDRjVQ15GmhC3fiQ8ni8+OwkZQO4DARzQgrnXU1Liz8= github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= -github.com/dgrijalva/jwt-go v3.2.0+incompatible/go.mod h1:E3ru+11k8xSBh+hMPgOLZmtrrCbhqsmaPHjLKYnJCaQ= -github.com/dustin/go-humanize v0.0.0-20180713052910-9f541cc9db5d/go.mod h1:HtrtbFcZ19U5GC7JDqmcUSB87Iq5E25KnS6fMYU6eOk= -github.com/dustin/go-humanize v1.0.0/go.mod h1:HtrtbFcZ19U5GC7JDqmcUSB87Iq5E25KnS6fMYU6eOk= -github.com/fatih/color v1.7.0/go.mod h1:Zm6kSWBoL9eyXnKyktHP6abPY2pDugNf5KwzbycvMj4= -github.com/fatih/structs v1.0.0/go.mod h1:9NiDSp5zOcgEDl+j00MP/WkGVPOlPRLejGD8Ga6PJ7M= -github.com/fatih/structs v1.1.0/go.mod h1:9NiDSp5zOcgEDl+j00MP/WkGVPOlPRLejGD8Ga6PJ7M= -github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo= -github.com/go-sql-driver/mysql v1.4.0/go.mod h1:zAC/RDZ24gD3HViQzih4MyKcchzm+sOG5ZlKdlhCg5w= -github.com/gobuffalo/buffalo v0.12.8-0.20181004233540-fac9bb505aa8/go.mod h1:sLyT7/dceRXJUxSsE813JTQtA3Eb1vjxWfo/N//vXIY= -github.com/gobuffalo/buffalo v0.13.0/go.mod h1:Mjn1Ba9wpIbpbrD+lIDMy99pQ0H0LiddMIIDGse7qT4= -github.com/gobuffalo/buffalo-plugins v1.0.2/go.mod h1:pOp/uF7X3IShFHyobahTkTLZaeUXwb0GrUTb9ngJWTs= -github.com/gobuffalo/buffalo-plugins v1.0.4/go.mod h1:pWS1vjtQ6uD17MVFWf7i3zfThrEKWlI5+PYLw/NaDB4= -github.com/gobuffalo/buffalo-plugins v1.4.3/go.mod h1:uCzTY0woez4nDMdQjkcOYKanngeUVRO2HZi7ezmAjWY= -github.com/gobuffalo/buffalo-plugins v1.5.1/go.mod h1:jbmwSZK5+PiAP9cC09VQOrGMZFCa/P0UMlIS3O12r5w= -github.com/gobuffalo/buffalo-plugins v1.6.4/go.mod h1:/+N1aophkA2jZ1ifB2O3Y9yGwu6gKOVMtUmJnbg+OZI= -github.com/gobuffalo/buffalo-plugins v1.6.5/go.mod h1:0HVkbgrVs/MnPZ/FOseDMVanCTm2RNcdM0PuXcL1NNI= -github.com/gobuffalo/buffalo-plugins v1.6.7/go.mod h1:ZGZRkzz2PiKWHs0z7QsPBOTo2EpcGRArMEym6ghKYgk= -github.com/gobuffalo/buffalo-plugins v1.6.9/go.mod h1:yYlYTrPdMCz+6/+UaXg5Jm4gN3xhsvsQ2ygVatZV5vw= -github.com/gobuffalo/buffalo-plugins v1.6.11/go.mod h1:eAA6xJIL8OuynJZ8amXjRmHND6YiusVAaJdHDN1Lu8Q= -github.com/gobuffalo/buffalo-plugins v1.8.2/go.mod h1:9te6/VjEQ7pKp7lXlDIMqzxgGpjlKoAcAANdCgoR960= -github.com/gobuffalo/buffalo-pop v1.0.5/go.mod h1:Fw/LfFDnSmB/vvQXPvcXEjzP98Tc+AudyNWUBWKCwQ8= -github.com/gobuffalo/envy v1.6.4/go.mod h1:Abh+Jfw475/NWtYMEt+hnJWRiC8INKWibIMyNt1w2Mc= -github.com/gobuffalo/envy v1.6.5/go.mod h1:N+GkhhZ/93bGZc6ZKhJLP6+m+tCNPKwgSpH9kaifseQ= -github.com/gobuffalo/envy v1.6.6/go.mod h1:N+GkhhZ/93bGZc6ZKhJLP6+m+tCNPKwgSpH9kaifseQ= -github.com/gobuffalo/envy v1.6.7/go.mod h1:N+GkhhZ/93bGZc6ZKhJLP6+m+tCNPKwgSpH9kaifseQ= -github.com/gobuffalo/envy v1.6.8/go.mod h1:N+GkhhZ/93bGZc6ZKhJLP6+m+tCNPKwgSpH9kaifseQ= -github.com/gobuffalo/envy v1.6.9/go.mod h1:N+GkhhZ/93bGZc6ZKhJLP6+m+tCNPKwgSpH9kaifseQ= -github.com/gobuffalo/envy v1.6.10/go.mod h1:X0CFllQjTV5ogsnUrg+Oks2yTI+PU2dGYBJOEI2D1Uo= -github.com/gobuffalo/envy v1.6.11/go.mod h1:Fiq52W7nrHGDggFPhn2ZCcHw4u/rqXkqo+i7FB6EAcg= -github.com/gobuffalo/events v1.0.3/go.mod h1:Txo8WmqScapa7zimEQIwgiJBvMECMe9gJjsKNPN3uZw= -github.com/gobuffalo/events v1.0.7/go.mod h1:z8txf6H9jWhQ5Scr7YPLWg/cgXBRj8Q4uYI+rsVCCSQ= -github.com/gobuffalo/events v1.0.8/go.mod h1:A5KyqT1sA+3GJiBE4QKZibse9mtOcI9nw8gGrDdqYGs= -github.com/gobuffalo/events v1.1.3/go.mod h1:9yPGWYv11GENtzrIRApwQRMYSbUgCsZ1w6R503fCfrk= -github.com/gobuffalo/events v1.1.4/go.mod h1:09/YRRgZHEOts5Isov+g9X2xajxdvOAcUuAHIX/O//A= -github.com/gobuffalo/events v1.1.5/go.mod h1:3YUSzgHfYctSjEjLCWbkXP6djH2M+MLaVRzb4ymbAK0= -github.com/gobuffalo/events v1.1.7/go.mod h1:6fGqxH2ing5XMb3EYRq9LEkVlyPGs4oO/eLzh+S8CxY= -github.com/gobuffalo/events v1.1.8/go.mod h1:UFy+W6X6VbCWS8k2iT81HYX65dMtiuVycMy04cplt/8= -github.com/gobuffalo/fizz v1.0.12/go.mod h1:C0sltPxpYK8Ftvf64kbsQa2yiCZY4RZviurNxXdAKwc= -github.com/gobuffalo/flect v0.0.0-20180907193754-dc14d8acaf9f/go.mod h1:rCiQgmAE4axgBNl3jZWzS5rETRYTGOsrixTRaCPzNdA= -github.com/gobuffalo/flect v0.0.0-20181002182613-4571df4b1daf/go.mod h1:rCiQgmAE4axgBNl3jZWzS5rETRYTGOsrixTRaCPzNdA= -github.com/gobuffalo/flect v0.0.0-20181007231023-ae7ed6bfe683/go.mod h1:rCiQgmAE4axgBNl3jZWzS5rETRYTGOsrixTRaCPzNdA= -github.com/gobuffalo/flect v0.0.0-20181018182602-fd24a256709f/go.mod h1:rCiQgmAE4axgBNl3jZWzS5rETRYTGOsrixTRaCPzNdA= -github.com/gobuffalo/flect v0.0.0-20181019110701-3d6f0b585514/go.mod h1:rCiQgmAE4axgBNl3jZWzS5rETRYTGOsrixTRaCPzNdA= -github.com/gobuffalo/flect v0.0.0-20181024204909-8f6be1a8c6c2/go.mod h1:rCiQgmAE4axgBNl3jZWzS5rETRYTGOsrixTRaCPzNdA= -github.com/gobuffalo/flect v0.0.0-20181104133451-1f6e9779237a/go.mod h1:rCiQgmAE4axgBNl3jZWzS5rETRYTGOsrixTRaCPzNdA= -github.com/gobuffalo/flect v0.0.0-20181114183036-47375f6d8328/go.mod h1:0HvNbHdfh+WOvDSIASqJOSxTOWSxCCUF++k/Y53v9rI= -github.com/gobuffalo/genny v0.0.0-20180924032338-7af3a40f2252/go.mod h1:tUTQOogrr7tAQnhajMSH6rv1BVev34H2sa1xNHMy94g= -github.com/gobuffalo/genny v0.0.0-20181003150629-3786a0744c5d/go.mod h1:WAd8HmjMVrnkAZbmfgH5dLBUchsZfqzp/WS5sQz+uTM= -github.com/gobuffalo/genny v0.0.0-20181005145118-318a41a134cc/go.mod h1:WAd8HmjMVrnkAZbmfgH5dLBUchsZfqzp/WS5sQz+uTM= -github.com/gobuffalo/genny v0.0.0-20181007153042-b8de7d566757/go.mod h1:+oG5Ljrw04czAHbPXREwaFojJbpUvcIy4DiOnbEJFTA= -github.com/gobuffalo/genny v0.0.0-20181012161047-33e5f43d83a6/go.mod h1:+oG5Ljrw04czAHbPXREwaFojJbpUvcIy4DiOnbEJFTA= -github.com/gobuffalo/genny v0.0.0-20181017160347-90a774534246/go.mod h1:+oG5Ljrw04czAHbPXREwaFojJbpUvcIy4DiOnbEJFTA= -github.com/gobuffalo/genny v0.0.0-20181024195656-51392254bf53/go.mod h1:o9GEH5gn5sCKLVB5rHFC4tq40rQ3VRUzmx6WwmaqISE= -github.com/gobuffalo/genny v0.0.0-20181025145300-af3f81d526b8/go.mod h1:uZ1fFYvdcP8mu0B/Ynarf6dsGvp7QFIpk/QACUuFUVI= -github.com/gobuffalo/genny v0.0.0-20181027191429-94d6cfb5c7fc/go.mod h1:x7SkrQQBx204Y+O9EwRXeszLJDTaWN0GnEasxgLrQTA= -github.com/gobuffalo/genny v0.0.0-20181027195209-3887b7171c4f/go.mod h1:JbKx8HSWICu5zyqWOa0dVV1pbbXOHusrSzQUprW6g+w= -github.com/gobuffalo/genny v0.0.0-20181106193839-7dcb0924caf1/go.mod h1:x61yHxvbDCgQ/7cOAbJCacZQuHgB0KMSzoYcw5debjU= -github.com/gobuffalo/genny v0.0.0-20181107223128-f18346459dbe/go.mod h1:utQD3aKKEsdb03oR+Vi/6ztQb1j7pO10N3OBoowRcSU= -github.com/gobuffalo/genny v0.0.0-20181114215459-0a4decd77f5d/go.mod h1:kN2KZ8VgXF9VIIOj/GM0Eo7YK+un4Q3tTreKOf0q1ng= -github.com/gobuffalo/genny v0.0.0-20181119162812-e8ff4adce8bb/go.mod h1:BA9htSe4bZwBDJLe8CUkoqkypq3hn3+CkoHqVOW718E= -github.com/gobuffalo/genny v0.0.0-20181127225641-2d959acc795b/go.mod h1:l54xLXNkteX/PdZ+HlgPk1qtcrgeOr3XUBBPDbH+7CQ= -github.com/gobuffalo/genny v0.0.0-20181128191930-77e34f71ba2a/go.mod h1:FW/D9p7cEEOqxYA71/hnrkOWm62JZ5ZNxcNIVJEaWBU= -github.com/gobuffalo/genny v0.0.0-20181203165245-fda8bcce96b1/go.mod h1:wpNSANu9UErftfiaAlz1pDZclrYzLtO5lALifODyjuM= -github.com/gobuffalo/genny v0.0.0-20181203201232-849d2c9534ea/go.mod h1:wpNSANu9UErftfiaAlz1pDZclrYzLtO5lALifODyjuM= -github.com/gobuffalo/genny v0.0.0-20181206121324-d6fb8a0dbe36/go.mod h1:wpNSANu9UErftfiaAlz1pDZclrYzLtO5lALifODyjuM= -github.com/gobuffalo/genny v0.0.0-20181207164119-84844398a37d/go.mod h1:y0ysCHGGQf2T3vOhCrGHheYN54Y/REj0ayd0Suf4C/8= -github.com/gobuffalo/github_flavored_markdown v1.0.4/go.mod h1:uRowCdK+q8d/RF0Kt3/DSalaIXbb0De/dmTqMQdkQ4I= -github.com/gobuffalo/github_flavored_markdown v1.0.5/go.mod h1:U0643QShPF+OF2tJvYNiYDLDGDuQmJZXsf/bHOJPsMY= -github.com/gobuffalo/github_flavored_markdown v1.0.7/go.mod h1:w93Pd9Lz6LvyQXEG6DktTPHkOtCbr+arAD5mkwMzXLI= -github.com/gobuffalo/httptest v1.0.2/go.mod h1:7T1IbSrg60ankme0aDLVnEY0h056g9M1/ZvpVThtB7E= -github.com/gobuffalo/licenser v0.0.0-20180924033006-eae28e638a42/go.mod h1:Ubo90Np8gpsSZqNScZZkVXXAo5DGhTb+WYFIjlnog8w= -github.com/gobuffalo/licenser v0.0.0-20181025145548-437d89de4f75/go.mod h1:x3lEpYxkRG/XtGCUNkio+6RZ/dlOvLzTI9M1auIwFcw= -github.com/gobuffalo/licenser v0.0.0-20181027200154-58051a75da95/go.mod h1:BzhaaxGd1tq1+OLKObzgdCV9kqVhbTulxOpYbvMQWS0= -github.com/gobuffalo/licenser v0.0.0-20181109171355-91a2a7aac9a7/go.mod h1:m+Ygox92pi9bdg+gVaycvqE8RVSjZp7mWw75+K5NPHk= -github.com/gobuffalo/licenser v0.0.0-20181128165715-cc7305f8abed/go.mod h1:oU9F9UCE+AzI/MueCKZamsezGOOHfSirltllOVeRTAE= -github.com/gobuffalo/licenser v0.0.0-20181203160806-fe900bbede07/go.mod h1:ph6VDNvOzt1CdfaWC+9XwcBnlSTBz2j49PBwum6RFaU= -github.com/gobuffalo/logger v0.0.0-20181022175615-46cfb361fc27/go.mod h1:8sQkgyhWipz1mIctHF4jTxmJh1Vxhp7mP8IqbljgJZo= -github.com/gobuffalo/logger v0.0.0-20181027144941-73d08d2bb969/go.mod h1:7uGg2duHKpWnN4+YmyKBdLXfhopkAdVM6H3nKbyFbz8= -github.com/gobuffalo/logger v0.0.0-20181027193913-9cf4dd0efe46/go.mod h1:7uGg2duHKpWnN4+YmyKBdLXfhopkAdVM6H3nKbyFbz8= -github.com/gobuffalo/logger v0.0.0-20181109185836-3feeab578c17/go.mod h1:oNErH0xLe+utO+OW8ptXMSA5DkiSEDW1u3zGIt8F9Ew= -github.com/gobuffalo/logger v0.0.0-20181117211126-8e9b89b7c264/go.mod h1:5etB91IE0uBlw9k756fVKZJdS+7M7ejVhmpXXiSFj0I= -github.com/gobuffalo/logger v0.0.0-20181127160119-5b956e21995c/go.mod h1:+HxKANrR9VGw9yN3aOAppJKvhO05ctDi63w4mDnKv2U= -github.com/gobuffalo/makr v1.1.5/go.mod h1:Y+o0btAH1kYAMDJW/TX3+oAXEu0bmSLLoC9mIFxtzOw= -github.com/gobuffalo/mapi v1.0.0/go.mod h1:4VAGh89y6rVOvm5A8fKFxYG+wIW6LO1FMTG9hnKStFc= -github.com/gobuffalo/mapi v1.0.1/go.mod h1:4VAGh89y6rVOvm5A8fKFxYG+wIW6LO1FMTG9hnKStFc= -github.com/gobuffalo/meta v0.0.0-20181018155829-df62557efcd3/go.mod h1:XTTOhwMNryif3x9LkTTBO/Llrveezd71u3quLd0u7CM= -github.com/gobuffalo/meta v0.0.0-20181018192820-8c6cef77dab3/go.mod h1:E94EPzx9NERGCY69UWlcj6Hipf2uK/vnfrF4QD0plVE= -github.com/gobuffalo/meta v0.0.0-20181025145500-3a985a084b0a/go.mod h1:YDAKBud2FP7NZdruCSlmTmDOZbVSa6bpK7LJ/A/nlKg= -github.com/gobuffalo/meta v0.0.0-20181114191255-b130ebedd2f7/go.mod h1:K6cRZ29ozr4Btvsqkjvg5nDFTLOgTqf03KA70Ks0ypE= -github.com/gobuffalo/meta v0.0.0-20181127070345-0d7e59dd540b/go.mod h1:RLO7tMvE0IAKAM8wny1aN12pvEKn7EtkBLkUZR00Qf8= -github.com/gobuffalo/mw-basicauth v1.0.3/go.mod h1:dg7+ilMZOKnQFHDefUzUHufNyTswVUviCBgF244C1+0= -github.com/gobuffalo/mw-contenttype v0.0.0-20180802152300-74f5a47f4d56/go.mod h1:7EvcmzBbeCvFtQm5GqF9ys6QnCxz2UM1x0moiWLq1No= -github.com/gobuffalo/mw-csrf v0.0.0-20180802151833-446ff26e108b/go.mod h1:sbGtb8DmDZuDUQoxjr8hG1ZbLtZboD9xsn6p77ppcHo= -github.com/gobuffalo/mw-forcessl v0.0.0-20180802152810-73921ae7a130/go.mod h1:JvNHRj7bYNAMUr/5XMkZaDcw3jZhUZpsmzhd//FFWmQ= -github.com/gobuffalo/mw-i18n v0.0.0-20180802152014-e3060b7e13d6/go.mod h1:91AQfukc52A6hdfIfkxzyr+kpVYDodgAeT5cjX1UIj4= -github.com/gobuffalo/mw-paramlogger v0.0.0-20181005191442-d6ee392ec72e/go.mod h1:6OJr6VwSzgJMqWMj7TYmRUqzNe2LXu/W1rRW4MAz/ME= -github.com/gobuffalo/mw-tokenauth v0.0.0-20181001105134-8545f626c189/go.mod h1:UqBF00IfKvd39ni5+yI5MLMjAf4gX7cDKN/26zDOD6c= -github.com/gobuffalo/packd v0.0.0-20181027182251-01ad393492c8/go.mod h1:SmdBdhj6uhOsg1Ui4SFAyrhuc7U4VCildosO5IDJ3lc= -github.com/gobuffalo/packd v0.0.0-20181027190505-aafc0d02c411/go.mod h1:SmdBdhj6uhOsg1Ui4SFAyrhuc7U4VCildosO5IDJ3lc= -github.com/gobuffalo/packd v0.0.0-20181027194105-7ae579e6d213/go.mod h1:SmdBdhj6uhOsg1Ui4SFAyrhuc7U4VCildosO5IDJ3lc= -github.com/gobuffalo/packd v0.0.0-20181031195726-c82734870264/go.mod h1:Yf2toFaISlyQrr5TfO3h6DB9pl9mZRmyvBGQb/aQ/pI= -github.com/gobuffalo/packd v0.0.0-20181104210303-d376b15f8e96/go.mod h1:Yf2toFaISlyQrr5TfO3h6DB9pl9mZRmyvBGQb/aQ/pI= -github.com/gobuffalo/packd v0.0.0-20181111195323-b2e760a5f0ff/go.mod h1:Yf2toFaISlyQrr5TfO3h6DB9pl9mZRmyvBGQb/aQ/pI= -github.com/gobuffalo/packd v0.0.0-20181114190715-f25c5d2471d7/go.mod h1:Yf2toFaISlyQrr5TfO3h6DB9pl9mZRmyvBGQb/aQ/pI= -github.com/gobuffalo/packd v0.0.0-20181124090624-311c6248e5fb/go.mod h1:Foenia9ZvITEvG05ab6XpiD5EfBHPL8A6hush8SJ0o8= -github.com/gobuffalo/packd v0.0.0-20181207120301-c49825f8f6f4/go.mod h1:LYc0TGKFBBFTRC9dg2pcRcMqGCTMD7T2BIMP7OBuQAA= -github.com/gobuffalo/packr v1.13.7/go.mod h1:KkinLIn/n6+3tVXMwg6KkNvWwVsrRAz4ph+jgpk3Z24= -github.com/gobuffalo/packr v1.15.0/go.mod h1:t5gXzEhIviQwVlNx/+3SfS07GS+cZ2hn76WLzPp6MGI= -github.com/gobuffalo/packr v1.15.1/go.mod h1:IeqicJ7jm8182yrVmNbM6PR4g79SjN9tZLH8KduZZwE= -github.com/gobuffalo/packr v1.19.0/go.mod h1:MstrNkfCQhd5o+Ct4IJ0skWlxN8emOq8DsoT1G98VIU= -github.com/gobuffalo/packr v1.20.0/go.mod h1:JDytk1t2gP+my1ig7iI4NcVaXr886+N0ecUga6884zw= -github.com/gobuffalo/packr v1.21.0/go.mod h1:H00jGfj1qFKxscFJSw8wcL4hpQtPe1PfU2wa6sg/SR0= -github.com/gobuffalo/packr/v2 v2.0.0-rc.8/go.mod h1:y60QCdzwuMwO2R49fdQhsjCPv7tLQFR0ayzxxla9zes= -github.com/gobuffalo/packr/v2 v2.0.0-rc.10/go.mod h1:4CWWn4I5T3v4c1OsJ55HbHlUEKNWMITG5iIkdr4Px4w= -github.com/gobuffalo/packr/v2 v2.0.0-rc.11/go.mod h1:JoieH/3h3U4UmatmV93QmqyPUdf4wVM9HELaHEu+3fk= -github.com/gobuffalo/plush v3.7.16+incompatible/go.mod h1:rQ4zdtUUyZNqULlc6bqd5scsPfLKfT0+TGMChgduDvI= -github.com/gobuffalo/plush v3.7.20+incompatible/go.mod h1:rQ4zdtUUyZNqULlc6bqd5scsPfLKfT0+TGMChgduDvI= -github.com/gobuffalo/plush v3.7.21+incompatible/go.mod h1:rQ4zdtUUyZNqULlc6bqd5scsPfLKfT0+TGMChgduDvI= -github.com/gobuffalo/plush v3.7.22+incompatible/go.mod h1:rQ4zdtUUyZNqULlc6bqd5scsPfLKfT0+TGMChgduDvI= -github.com/gobuffalo/plush v3.7.23+incompatible/go.mod h1:rQ4zdtUUyZNqULlc6bqd5scsPfLKfT0+TGMChgduDvI= -github.com/gobuffalo/plush v3.7.30+incompatible/go.mod h1:rQ4zdtUUyZNqULlc6bqd5scsPfLKfT0+TGMChgduDvI= -github.com/gobuffalo/plush v3.7.32+incompatible/go.mod h1:rQ4zdtUUyZNqULlc6bqd5scsPfLKfT0+TGMChgduDvI= -github.com/gobuffalo/plushgen v0.0.0-20181128164830-d29dcb966cb2/go.mod h1:r9QwptTFnuvSaSRjpSp4S2/4e2D3tJhARYbvEBcKSb4= -github.com/gobuffalo/plushgen v0.0.0-20181203163832-9fc4964505c2/go.mod h1:opEdT33AA2HdrIwK1aibqnTJDVVKXC02Bar/GT1YRVs= -github.com/gobuffalo/plushgen v0.0.0-20181207152837-eedb135bd51b/go.mod h1:Lcw7HQbEVm09sAQrCLzIxuhFbB3nAgp4c55E+UlynR0= -github.com/gobuffalo/pop v4.8.2+incompatible/go.mod h1:DwBz3SD5SsHpTZiTubcsFWcVDpJWGsxjVjMPnkiThWg= -github.com/gobuffalo/pop v4.8.3+incompatible/go.mod h1:DwBz3SD5SsHpTZiTubcsFWcVDpJWGsxjVjMPnkiThWg= -github.com/gobuffalo/pop v4.8.4+incompatible/go.mod h1:DwBz3SD5SsHpTZiTubcsFWcVDpJWGsxjVjMPnkiThWg= -github.com/gobuffalo/release v1.0.35/go.mod h1:VtHFAKs61vO3wboCec5xr9JPTjYyWYcvaM3lclkc4x4= -github.com/gobuffalo/release v1.0.38/go.mod h1:VtHFAKs61vO3wboCec5xr9JPTjYyWYcvaM3lclkc4x4= -github.com/gobuffalo/release v1.0.42/go.mod h1:RPs7EtafH4oylgetOJpGP0yCZZUiO4vqHfTHJjSdpug= -github.com/gobuffalo/release v1.0.52/go.mod h1:RPs7EtafH4oylgetOJpGP0yCZZUiO4vqHfTHJjSdpug= -github.com/gobuffalo/release v1.0.53/go.mod h1:FdF257nd8rqhNaqtDWFGhxdJ/Ig4J7VcS3KL7n/a+aA= -github.com/gobuffalo/release v1.0.54/go.mod h1:Pe5/RxRa/BE8whDpGfRqSI7D1a0evGK1T4JDm339tJc= -github.com/gobuffalo/release v1.0.61/go.mod h1:mfIO38ujUNVDlBziIYqXquYfBF+8FDHUjKZgYC1Hj24= -github.com/gobuffalo/release v1.0.72/go.mod h1:NP5NXgg/IX3M5XmHmWR99D687/3Dt9qZtTK/Lbwc1hU= -github.com/gobuffalo/release v1.1.1/go.mod h1:Sluak1Xd6kcp6snkluR1jeXAogdJZpFFRzTYRs/2uwg= -github.com/gobuffalo/shoulders v1.0.1/go.mod h1:V33CcVmaQ4gRUmHKwq1fiTXuf8Gp/qjQBUL5tHPmvbA= -github.com/gobuffalo/syncx v0.0.0-20181120191700-98333ab04150/go.mod h1:HhnNqWY95UYwwW3uSASeV7vtgYkT2t16hJgV3AEPUpw= -github.com/gobuffalo/syncx v0.0.0-20181120194010-558ac7de985f/go.mod h1:HhnNqWY95UYwwW3uSASeV7vtgYkT2t16hJgV3AEPUpw= -github.com/gobuffalo/tags v2.0.11+incompatible/go.mod h1:9XmhOkyaB7UzvuY4UoZO4s67q8/xRMVJEaakauVQYeY= -github.com/gobuffalo/tags v2.0.14+incompatible/go.mod h1:9XmhOkyaB7UzvuY4UoZO4s67q8/xRMVJEaakauVQYeY= -github.com/gobuffalo/uuid v2.0.3+incompatible/go.mod h1:ErhIzkRhm0FtRuiE/PeORqcw4cVi1RtSpnwYrxuvkfE= -github.com/gobuffalo/uuid v2.0.4+incompatible/go.mod h1:ErhIzkRhm0FtRuiE/PeORqcw4cVi1RtSpnwYrxuvkfE= -github.com/gobuffalo/uuid v2.0.5+incompatible/go.mod h1:ErhIzkRhm0FtRuiE/PeORqcw4cVi1RtSpnwYrxuvkfE= -github.com/gobuffalo/validate v2.0.3+incompatible/go.mod h1:N+EtDe0J8252BgfzQUChBgfd6L93m9weay53EWFVsMM= -github.com/gobuffalo/x v0.0.0-20181003152136-452098b06085/go.mod h1:WevpGD+5YOreDJznWevcn8NTmQEW5STSBgIkpkjzqXc= -github.com/gobuffalo/x v0.0.0-20181007152206-913e47c59ca7/go.mod h1:9rDPXaB3kXdKWzMc4odGQQdG2e2DIEmANy5aSJ9yesY= -github.com/gofrs/uuid v3.1.0+incompatible/go.mod h1:b2aQJv3Z4Fp6yNu3cdSllBxTCLRxnplIgP/c0N/04lM= -github.com/golang/protobuf v1.1.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= -github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= -github.com/gorilla/context v1.1.1/go.mod h1:kBGZzfjB9CEq2AlWe17Uuf7NDRt0dE0s8S51q0aT7Yg= -github.com/gorilla/mux v1.6.2/go.mod h1:1lud6UwP+6orDFRuTfBEV8e9/aOM/c4fVVCaMa2zaAs= -github.com/gorilla/pat v0.0.0-20180118222023-199c85a7f6d1/go.mod h1:YeAe0gNeiNT5hoiZRI4yiOky6jVdNvfO2N6Kav/HmxY= -github.com/gorilla/securecookie v1.1.1/go.mod h1:ra0sb63/xPlUeL+yeDciTfxMRAA+MP+HVt/4epWDjd4= -github.com/gorilla/sessions v1.1.2/go.mod h1:8KCfur6+4Mqcc6S0FEfKuN15Vl5MgXW92AE8ovaJD0w= -github.com/gorilla/sessions v1.1.3/go.mod h1:8KCfur6+4Mqcc6S0FEfKuN15Vl5MgXW92AE8ovaJD0w= -github.com/hashicorp/hcl v1.0.0/go.mod h1:E5yfLk+7swimpb2L/Alb/PJmXilQ/rhwaUYs4T20WEQ= -github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU= -github.com/inconshreveable/mousetrap v1.0.0/go.mod h1:PxqpIevigyE2G7u3NXJIT2ANytuPF1OarO4DADm73n8= -github.com/jackc/fake v0.0.0-20150926172116-812a484cc733/go.mod h1:WrMFNQdiFJ80sQsxDoMokWK1W5TQtxBFNpzWTD84ibQ= -github.com/jackc/pgx v3.2.0+incompatible/go.mod h1:0ZGrqGqkRlliWnWB4zKnWtjbSWbGkVEFm4TeybAXq+I= -github.com/jmoiron/sqlx v0.0.0-20180614180643-0dae4fefe7c0/go.mod h1:IiEW3SEiiErVyFdH8NTuWjSifiEQKUoyK3LNqr2kCHU= -github.com/joho/godotenv v1.2.0/go.mod h1:7hK45KPybAkOC6peb+G5yklZfMxEjkZhHbwpqxOKXbg= -github.com/joho/godotenv v1.3.0/go.mod h1:7hK45KPybAkOC6peb+G5yklZfMxEjkZhHbwpqxOKXbg= -github.com/karrick/godirwalk v1.7.5/go.mod h1:2c9FRhkDxdIbgkOnCEvnSWs71Bhugbl46shStcFDJ34= -github.com/karrick/godirwalk v1.7.7/go.mod h1:2c9FRhkDxdIbgkOnCEvnSWs71Bhugbl46shStcFDJ34= -github.com/kballard/go-shellquote v0.0.0-20180428030007-95032a82bc51/go.mod h1:CzGEWj7cYgsdH8dAjBGEr58BoE7ScuLd+fwFZ44+/x8= -github.com/konsorten/go-windows-terminal-sequences v0.0.0-20180402223658-b729f2633dfe/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= -github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= -github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= -github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= -github.com/kr/pty v1.1.3/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= -github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= -github.com/lib/pq v1.0.0/go.mod h1:5WUZQaWbwv1U+lTReE5YruASi9Al49XbQIvNi/34Woo= -github.com/magiconair/properties v1.8.0/go.mod h1:PppfXfuXeibc/6YijjN8zIbojt8czPbwD3XqdrwzmxQ= -github.com/markbates/deplist v1.0.4/go.mod h1:gRRbPbbuA8TmMiRvaOzUlRfzfjeCCBqX2A6arxN01MM= -github.com/markbates/deplist v1.0.5/go.mod h1:gRRbPbbuA8TmMiRvaOzUlRfzfjeCCBqX2A6arxN01MM= -github.com/markbates/going v1.0.2/go.mod h1:UWCk3zm0UKefHZ7l8BNqi26UyiEMniznk8naLdTcy6c= -github.com/markbates/grift v1.0.4/go.mod h1:wbmtW74veyx+cgfwFhlnnMWqhoz55rnHR47oMXzsyVs= -github.com/markbates/hmax v1.0.0/go.mod h1:cOkR9dktiESxIMu+65oc/r/bdY4bE8zZw3OLhLx0X2c= -github.com/markbates/inflect v1.0.0/go.mod h1:oTeZL2KHA7CUX6X+fovmK9OvIOFuqu0TwdQrZjLTh88= -github.com/markbates/inflect v1.0.1/go.mod h1:uv3UVNBe5qBIfCm8O8Q+DW+S1EopeyINj+Ikhc7rnCk= -github.com/markbates/inflect v1.0.3/go.mod h1:1fR9+pO2KHEO9ZRtto13gDwwZaAKstQzferVeWqbgNs= -github.com/markbates/inflect v1.0.4/go.mod h1:1fR9+pO2KHEO9ZRtto13gDwwZaAKstQzferVeWqbgNs= -github.com/markbates/oncer v0.0.0-20180924031910-e862a676800b/go.mod h1:Ld9puTsIW75CHf65OeIOkyKbteujpZVXDpWK6YGZbxE= -github.com/markbates/oncer v0.0.0-20180924034138-723ad0170a46/go.mod h1:Ld9puTsIW75CHf65OeIOkyKbteujpZVXDpWK6YGZbxE= -github.com/markbates/oncer v0.0.0-20181014194634-05fccaae8fc4/go.mod h1:Ld9puTsIW75CHf65OeIOkyKbteujpZVXDpWK6YGZbxE= -github.com/markbates/oncer v0.0.0-20181203154359-bf2de49a0be2/go.mod h1:Ld9puTsIW75CHf65OeIOkyKbteujpZVXDpWK6YGZbxE= -github.com/markbates/refresh v1.4.10/go.mod h1:NDPHvotuZmTmesXxr95C9bjlw1/0frJwtME2dzcVKhc= -github.com/markbates/safe v1.0.0/go.mod h1:nAqgmRi7cY2nqMc92/bSEeQA+R4OheNU2T1kNSCBdG0= -github.com/markbates/safe v1.0.1/go.mod h1:nAqgmRi7cY2nqMc92/bSEeQA+R4OheNU2T1kNSCBdG0= -github.com/markbates/sigtx v1.0.0/go.mod h1:QF1Hv6Ic6Ca6W+T+DL0Y/ypborFKyvUY9HmuCD4VeTc= -github.com/markbates/willie v1.0.9/go.mod h1:fsrFVWl91+gXpx/6dv715j7i11fYPfZ9ZGfH0DQzY7w= -github.com/mattn/go-colorable v0.0.9/go.mod h1:9vuHe8Xs5qXnSaW/c/ABM9alt+Vo+STaOChaDxuIBZU= -github.com/mattn/go-isatty v0.0.4/go.mod h1:M+lRXTBqGeGNdLjl/ufCoiOlB5xdOkqRJdNxMWT7Zi4= -github.com/mattn/go-sqlite3 v1.9.0/go.mod h1:FPy6KqzDD04eiIsT53CuJW3U88zkxoIYsOqkbpncsNc= -github.com/microcosm-cc/bluemonday v1.0.1/go.mod h1:hsXNsILzKxV+sX77C5b8FSuKF00vh2OMYv+xgHpAMF4= -github.com/mitchellh/go-homedir v1.0.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0= -github.com/mitchellh/mapstructure v1.0.0/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y= -github.com/mitchellh/mapstructure v1.1.2/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y= -github.com/monoculum/formam v0.0.0-20180901015400-4e68be1d79ba/go.mod h1:RKgILGEJq24YyJ2ban8EO0RUVSJlF1pGsEvoLEACr/Q= -github.com/nicksnyder/go-i18n v1.10.0/go.mod h1:HrK7VCrbOvQoUAQ7Vpy7i87N7JZZZ7R2xBGjv0j365Q= -github.com/onsi/ginkgo v1.6.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= -github.com/onsi/ginkgo v1.7.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= -github.com/onsi/gomega v1.4.1/go.mod h1:C1qb7wdrVGGVU+Z6iS04AVkA3Q65CEZX59MT0QO5uiA= -github.com/onsi/gomega v1.4.2/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY= -github.com/onsi/gomega v1.4.3/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY= -github.com/pelletier/go-toml v1.2.0/go.mod h1:5z9KED0ma1S8pY6P1sdut58dfprrGBbd/94hg7ilaic= -github.com/pkg/errors v0.8.0/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= +github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= -github.com/rogpeppe/go-internal v1.0.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4= -github.com/satori/go.uuid v1.2.0/go.mod h1:dA0hQrYB0VpLJoorglMZABFdXlWrHn1NEOzdhQKdks0= -github.com/serenize/snaker v0.0.0-20171204205717-a683aaf2d516/go.mod h1:Yow6lPLSAXx2ifx470yD/nUe22Dv5vBvxK/UK9UUTVs= -github.com/sergi/go-diff v1.0.0/go.mod h1:0CfEIISq7TuYL3j771MWULgwwjU+GofnZX9QAmXWZgo= -github.com/shopspring/decimal v0.0.0-20180709203117-cd690d0c9e24/go.mod h1:M+9NzErvs504Cn4c5DxATwIqPbtswREoFCre64PpcG4= -github.com/shurcooL/go v0.0.0-20180423040247-9e1955d9fb6e/go.mod h1:TDJrrUr11Vxrven61rcy3hJMUqaf/CLWYhHNPmT14Lk= -github.com/shurcooL/go-goon v0.0.0-20170922171312-37c2f522c041/go.mod h1:N5mDOmsrJOB+vfqUK+7DmDyjhSLIIBnXo9lvZJj3MWQ= -github.com/shurcooL/highlight_diff v0.0.0-20170515013008-09bb4053de1b/go.mod h1:ZpfEhSmds4ytuByIcDnOLkTHGUI6KNqRNPDLHDk+mUU= -github.com/shurcooL/highlight_go v0.0.0-20170515013102-78fb10f4a5f8/go.mod h1:UDKB5a1T23gOMUJrI+uSuH0VRDStOiUVSjBTRDVBVag= -github.com/shurcooL/octicon v0.0.0-20180602230221-c42b0e3b24d9/go.mod h1:eWdoE5JD4R5UVWDucdOPg1g2fqQRq78IQa9zlOV1vpQ= -github.com/shurcooL/sanitized_anchor_name v0.0.0-20170918181015-86672fcb3f95/go.mod h1:1NzhyTcUVG4SuEtjjoZeVRXNmyL/1OwPU0+IJeTBvfc= -github.com/sirupsen/logrus v1.0.6/go.mod h1:pMByvHTf9Beacp5x1UXfOR9xyW/9antXMhjMPG0dEzc= -github.com/sirupsen/logrus v1.1.0/go.mod h1:zrgwTnHtNr00buQ1vSptGe8m1f/BbgsPukg8qsT7A+A= -github.com/sirupsen/logrus v1.1.1/go.mod h1:zrgwTnHtNr00buQ1vSptGe8m1f/BbgsPukg8qsT7A+A= -github.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo= -github.com/sourcegraph/annotate v0.0.0-20160123013949-f4cad6c6324d/go.mod h1:UdhH50NIW0fCiwBSr0co2m7BnFLdv4fQTgdqdJTHFeE= -github.com/sourcegraph/syntaxhighlight v0.0.0-20170531221838-bd320f5d308e/go.mod h1:HuIsMU8RRBOtsCgI77wP899iHVBQpCmg4ErYMZB+2IA= -github.com/spf13/afero v1.1.2/go.mod h1:j4pytiNVoe2o6bmDsKpLACNPDBIoEAkihy7loJ1B0CQ= -github.com/spf13/cast v1.2.0/go.mod h1:r2rcYCSwa1IExKTDiTfzaxqT2FNHs8hODu4LnUfgKEg= -github.com/spf13/cast v1.3.0/go.mod h1:Qx5cxh0v+4UWYiBimWS+eyWzqEqokIECu5etghLkUJE= -github.com/spf13/cobra v0.0.3/go.mod h1:1l0Ry5zgKvJasoi3XT1TypsSe7PqH0Sj9dhYf7v3XqQ= -github.com/spf13/jwalterweatherman v1.0.0/go.mod h1:cQK4TGJAtQXfYWX+Ddv3mKDzgVb68N+wFjFa4jdeBTo= -github.com/spf13/pflag v1.0.2/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4= -github.com/spf13/pflag v1.0.3/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4= -github.com/spf13/viper v1.2.1/go.mod h1:P4AexN0a+C9tGAnUFNwDMYYZv3pjFuvmeiMyKRaNVlI= -github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= -github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= -github.com/unrolled/secure v0.0.0-20180918153822-f340ee86eb8b/go.mod h1:mnPT77IAdsi/kV7+Es7y+pXALeV3h7G6dQF6mNYjcLA= -github.com/unrolled/secure v0.0.0-20181005190816-ff9db2ff917f/go.mod h1:mnPT77IAdsi/kV7+Es7y+pXALeV3h7G6dQF6mNYjcLA= -golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= -golang.org/x/crypto v0.0.0-20180910181607-0e37d006457b/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= -golang.org/x/crypto v0.0.0-20181001203147-e3636079e1a4/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= -golang.org/x/crypto v0.0.0-20181009213950-7c1a557ab941/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= -golang.org/x/crypto v0.0.0-20181015023909-0c41d7ab0a0e/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= -golang.org/x/crypto v0.0.0-20181024171144-74cb1d3d52f4/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= -golang.org/x/crypto v0.0.0-20181025113841-85e1b3f9139a/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= -golang.org/x/crypto v0.0.0-20181025213731-e84da0312774/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= -golang.org/x/crypto v0.0.0-20181106171534-e4dc69e5b2fd/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= -golang.org/x/crypto v0.0.0-20181112202954-3d3f9f413869/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= -golang.org/x/crypto v0.0.0-20181127143415-eb0de9b17e85/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= -golang.org/x/crypto v0.0.0-20181203042331-505ab145d0a9/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= -golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/net v0.0.0-20180816102801-aaf60122140d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/net v0.0.0-20180921000356-2f5d2388922f/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/net v0.0.0-20180926154720-4dfa2610cdf3/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/net v0.0.0-20181005035420-146acd28ed58/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/net v0.0.0-20181011144130-49bb7cea24b1/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/net v0.0.0-20181017193950-04a2e542c03f/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/net v0.0.0-20181023162649-9b4f9f5ad519/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/net v0.0.0-20181102091132-c10e9556a7bc/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/net v0.0.0-20181106065722-10aee1819953/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/net v0.0.0-20181114220301-adae6a3d119a/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/net v0.0.0-20181207154023-610586996380/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sys v0.0.0-20180816055513-1c9583448a9c/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20180906133057-8cf3aee42992/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20180921163948-d47a0f339242/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20180927150500-dad3d9fb7b6e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20181005133103-4497e2df6f9e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20181011152604-fa43e7bc11ba/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20181022134430-8a28ead16f52/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20181024145615-5cd93ef61a7c/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20181025063200-d989b31c8746/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20181026064943-731415f00dce/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20181026203630-95b1ffbd15a5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20181106135930-3a76605856fd/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20181107165924-66b7b1311ac8/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20181206074257-70b957f3b65e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= -golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= -golang.org/x/tools v0.0.0-20181003024731-2f84ea8ef872/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= -golang.org/x/tools v0.0.0-20181006002542-f60d9635b16a/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= -golang.org/x/tools v0.0.0-20181008205924-a2b3f7f249e9/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= -golang.org/x/tools v0.0.0-20181013182035-5e66757b835f/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= -golang.org/x/tools v0.0.0-20181017214349-06f26fdaaa28/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= -golang.org/x/tools v0.0.0-20181024171208-a2dc47679d30/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= -golang.org/x/tools v0.0.0-20181026183834-f60e5f99f081/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= -golang.org/x/tools v0.0.0-20181105230042-78dc5bac0cac/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= -golang.org/x/tools v0.0.0-20181107215632-34b416bd17b3/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= -golang.org/x/tools v0.0.0-20181114190951-94339b83286c/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= -golang.org/x/tools v0.0.0-20181119130350-139d099f6620/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= -golang.org/x/tools v0.0.0-20181127195227-b4e97c0ed882/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= -golang.org/x/tools v0.0.0-20181127232545-e782529d0ddd/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= -golang.org/x/tools v0.0.0-20181205224935-3576414c54a4/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= -golang.org/x/tools v0.0.0-20181206194817-bcd4e47d0288/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= -google.golang.org/appengine v1.2.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= -gopkg.in/airbrake/gobrake.v2 v2.0.9/go.mod h1:/h5ZAUhDkGaJfjzjKLSjv6zCL6O0LLBxU4K+aSYdM/U= -gopkg.in/alexcesaro/quotedprintable.v3 v3.0.0-20150716171945-2caba252f4dc/go.mod h1:m7x9LTH6d71AHyAX77c9yqWCCa3UKHcVEj9y7hAtKDk= -gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= -gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= -gopkg.in/errgo.v2 v2.1.0/go.mod h1:hNsd1EY+bozCKY1Ytp96fpM3vjJbqLJn88ws8XvfDNI= -gopkg.in/fsnotify.v1 v1.4.7/go.mod h1:Tz8NjZHkW78fSQdbUxIjBTcgA1z1m8ZHf0WmKUhAMys= -gopkg.in/gemnasium/logrus-airbrake-hook.v2 v2.1.2/go.mod h1:Xk6kEKp8OKb+X14hQBKWaSkCsqBpgog8nAV2xsGOxlo= -gopkg.in/gomail.v2 v2.0.0-20160411212932-81ebce5c23df/go.mod h1:LRQQ+SO6ZHR7tOkpBDuZnXENFzX8qRjMDMyPD6BRkCw= -gopkg.in/mail.v2 v2.0.0-20180731213649-a0242b2233b4/go.mod h1:htwXN1Qh09vZJ1NVKxQqHPBaCBbzKhp5GzuJEA4VJWw= -gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7/go.mod h1:dt/ZhP58zS4L8KSrWDmTeBkI65Dw0HsyUHuEVlX15mw= -gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= -gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= +github.com/stretchr/testify v1.3.0 h1:TivCn/peBQ7UY8ooIcPgZFpTNSz0Q2U6UrFlUfqbe0Q= +github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= diff --git a/vendor/github.com/gobuffalo/flect/humanize.go b/vendor/github.com/gobuffalo/flect/humanize.go new file mode 100644 index 0000000000..ea70fe0b65 --- /dev/null +++ b/vendor/github.com/gobuffalo/flect/humanize.go @@ -0,0 +1,31 @@ +package flect + +import ( + "strings" +) + +// Humanize returns first letter of sentence capitalized +// employee_salary = Employee salary +// employee_id = employee ID +// employee_mobile_number = Employee mobile number +func Humanize(s string) string { + return New(s).Humanize().String() +} + +// Humanize First letter of sentence capitalized +func (i Ident) Humanize() Ident { + if len(i.Original) == 0 { + return New("") + } + + var parts []string + for index, part := range i.Parts { + if index == 0 { + part = strings.Title(i.Parts[0]) + } + + parts = xappend(parts, part) + } + + return New(strings.Join(parts, " ")) +} diff --git a/vendor/github.com/gobuffalo/flect/ident.go b/vendor/github.com/gobuffalo/flect/ident.go index 8c58455739..78b51d4576 100644 --- a/vendor/github.com/gobuffalo/flect/ident.go +++ b/vendor/github.com/gobuffalo/flect/ident.go @@ -2,7 +2,6 @@ package flect import ( "encoding" - "regexp" "strings" "unicode" "unicode/utf8" @@ -29,8 +28,6 @@ func New(s string) Ident { return i } -var splitRx = regexp.MustCompile("[^\\p{L}]") - func toParts(s string) []string { parts := []string{} s = strings.TrimSpace(s) diff --git a/vendor/github.com/gobuffalo/flect/titleize.go b/vendor/github.com/gobuffalo/flect/titleize.go index e7a20d37f2..cbbf08a5aa 100644 --- a/vendor/github.com/gobuffalo/flect/titleize.go +++ b/vendor/github.com/gobuffalo/flect/titleize.go @@ -20,8 +20,7 @@ func Titleize(s string) string { func (i Ident) Titleize() Ident { var parts []string for _, part := range i.Parts { - var x string - x = string(unicode.ToTitle(rune(part[0]))) + x := string(unicode.ToTitle(rune(part[0]))) if len(part) > 1 { x += part[1:] } diff --git a/vendor/github.com/gobuffalo/flect/version.go b/vendor/github.com/gobuffalo/flect/version.go index b5f62170dc..e06098370d 100644 --- a/vendor/github.com/gobuffalo/flect/version.go +++ b/vendor/github.com/gobuffalo/flect/version.go @@ -1,4 +1,4 @@ package flect //Version holds Flect version number -const Version = "v0.0.1" +const Version = "v0.1.5" diff --git a/vendor/github.com/gogo/protobuf/proto/decode.go b/vendor/github.com/gogo/protobuf/proto/decode.go index d9aa3c42d6..63b0f08bef 100644 --- a/vendor/github.com/gogo/protobuf/proto/decode.go +++ b/vendor/github.com/gogo/protobuf/proto/decode.go @@ -186,7 +186,6 @@ func (p *Buffer) DecodeVarint() (x uint64, err error) { if b&0x80 == 0 { goto done } - // x -= 0x80 << 63 // Always zero. return 0, errOverflow diff --git a/vendor/github.com/gogo/protobuf/proto/deprecated.go b/vendor/github.com/gogo/protobuf/proto/deprecated.go new file mode 100644 index 0000000000..35b882c09a --- /dev/null +++ b/vendor/github.com/gogo/protobuf/proto/deprecated.go @@ -0,0 +1,63 @@ +// Go support for Protocol Buffers - Google's data interchange format +// +// Copyright 2018 The Go Authors. All rights reserved. +// https://github.com/golang/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +package proto + +import "errors" + +// Deprecated: do not use. +type Stats struct{ Emalloc, Dmalloc, Encode, Decode, Chit, Cmiss, Size uint64 } + +// Deprecated: do not use. +func GetStats() Stats { return Stats{} } + +// Deprecated: do not use. +func MarshalMessageSet(interface{}) ([]byte, error) { + return nil, errors.New("proto: not implemented") +} + +// Deprecated: do not use. +func UnmarshalMessageSet([]byte, interface{}) error { + return errors.New("proto: not implemented") +} + +// Deprecated: do not use. +func MarshalMessageSetJSON(interface{}) ([]byte, error) { + return nil, errors.New("proto: not implemented") +} + +// Deprecated: do not use. +func UnmarshalMessageSetJSON([]byte, interface{}) error { + return errors.New("proto: not implemented") +} + +// Deprecated: do not use. +func RegisterMessageSetType(Message, int32, string) {} diff --git a/vendor/github.com/gogo/protobuf/proto/extensions.go b/vendor/github.com/gogo/protobuf/proto/extensions.go index 44ebd457cf..686bd2a09d 100644 --- a/vendor/github.com/gogo/protobuf/proto/extensions.go +++ b/vendor/github.com/gogo/protobuf/proto/extensions.go @@ -544,7 +544,7 @@ func SetExtension(pb Message, extension *ExtensionDesc, value interface{}) error } typ := reflect.TypeOf(extension.ExtensionType) if typ != reflect.TypeOf(value) { - return errors.New("proto: bad extension value type") + return fmt.Errorf("proto: bad extension value type. got: %T, want: %T", value, extension.ExtensionType) } // nil extension values need to be caught early, because the // encoder can't distinguish an ErrNil due to a nil extension diff --git a/vendor/github.com/gogo/protobuf/proto/lib.go b/vendor/github.com/gogo/protobuf/proto/lib.go index b2271d0b7b..d17f802092 100644 --- a/vendor/github.com/gogo/protobuf/proto/lib.go +++ b/vendor/github.com/gogo/protobuf/proto/lib.go @@ -341,26 +341,6 @@ type Message interface { ProtoMessage() } -// Stats records allocation details about the protocol buffer encoders -// and decoders. Useful for tuning the library itself. -type Stats struct { - Emalloc uint64 // mallocs in encode - Dmalloc uint64 // mallocs in decode - Encode uint64 // number of encodes - Decode uint64 // number of decodes - Chit uint64 // number of cache hits - Cmiss uint64 // number of cache misses - Size uint64 // number of sizes -} - -// Set to true to enable stats collection. -const collectStats = false - -var stats Stats - -// GetStats returns a copy of the global Stats structure. -func GetStats() Stats { return stats } - // A Buffer is a buffer manager for marshaling and unmarshaling // protocol buffers. It may be reused between invocations to // reduce memory usage. It is not necessary to use a Buffer; diff --git a/vendor/github.com/gogo/protobuf/proto/message_set.go b/vendor/github.com/gogo/protobuf/proto/message_set.go index 3b6ca41d5e..f48a756761 100644 --- a/vendor/github.com/gogo/protobuf/proto/message_set.go +++ b/vendor/github.com/gogo/protobuf/proto/message_set.go @@ -36,13 +36,7 @@ package proto */ import ( - "bytes" - "encoding/json" "errors" - "fmt" - "reflect" - "sort" - "sync" ) // errNoMessageTypeID occurs when a protocol buffer does not have a message type ID. @@ -145,46 +139,9 @@ func skipVarint(buf []byte) []byte { return buf[i+1:] } -// MarshalMessageSet encodes the extension map represented by m in the message set wire format. -// It is called by generated Marshal methods on protocol buffer messages with the message_set_wire_format option. -func MarshalMessageSet(exts interface{}) ([]byte, error) { - return marshalMessageSet(exts, false) -} - -// marshaMessageSet implements above function, with the opt to turn on / off deterministic during Marshal. -func marshalMessageSet(exts interface{}, deterministic bool) ([]byte, error) { - switch exts := exts.(type) { - case *XXX_InternalExtensions: - var u marshalInfo - siz := u.sizeMessageSet(exts) - b := make([]byte, 0, siz) - return u.appendMessageSet(b, exts, deterministic) - - case map[int32]Extension: - // This is an old-style extension map. - // Wrap it in a new-style XXX_InternalExtensions. - ie := XXX_InternalExtensions{ - p: &struct { - mu sync.Mutex - extensionMap map[int32]Extension - }{ - extensionMap: exts, - }, - } - - var u marshalInfo - siz := u.sizeMessageSet(&ie) - b := make([]byte, 0, siz) - return u.appendMessageSet(b, &ie, deterministic) - - default: - return nil, errors.New("proto: not an extension map") - } -} - -// UnmarshalMessageSet decodes the extension map encoded in buf in the message set wire format. +// unmarshalMessageSet decodes the extension map encoded in buf in the message set wire format. // It is called by Unmarshal methods on protocol buffer messages with the message_set_wire_format option. -func UnmarshalMessageSet(buf []byte, exts interface{}) error { +func unmarshalMessageSet(buf []byte, exts interface{}) error { var m map[int32]Extension switch exts := exts.(type) { case *XXX_InternalExtensions: @@ -222,93 +179,3 @@ func UnmarshalMessageSet(buf []byte, exts interface{}) error { } return nil } - -// MarshalMessageSetJSON encodes the extension map represented by m in JSON format. -// It is called by generated MarshalJSON methods on protocol buffer messages with the message_set_wire_format option. -func MarshalMessageSetJSON(exts interface{}) ([]byte, error) { - var m map[int32]Extension - switch exts := exts.(type) { - case *XXX_InternalExtensions: - var mu sync.Locker - m, mu = exts.extensionsRead() - if m != nil { - // Keep the extensions map locked until we're done marshaling to prevent - // races between marshaling and unmarshaling the lazily-{en,de}coded - // values. - mu.Lock() - defer mu.Unlock() - } - case map[int32]Extension: - m = exts - default: - return nil, errors.New("proto: not an extension map") - } - var b bytes.Buffer - b.WriteByte('{') - - // Process the map in key order for deterministic output. - ids := make([]int32, 0, len(m)) - for id := range m { - ids = append(ids, id) - } - sort.Sort(int32Slice(ids)) // int32Slice defined in text.go - - for i, id := range ids { - ext := m[id] - msd, ok := messageSetMap[id] - if !ok { - // Unknown type; we can't render it, so skip it. - continue - } - - if i > 0 && b.Len() > 1 { - b.WriteByte(',') - } - - fmt.Fprintf(&b, `"[%s]":`, msd.name) - - x := ext.value - if x == nil { - x = reflect.New(msd.t.Elem()).Interface() - if err := Unmarshal(ext.enc, x.(Message)); err != nil { - return nil, err - } - } - d, err := json.Marshal(x) - if err != nil { - return nil, err - } - b.Write(d) - } - b.WriteByte('}') - return b.Bytes(), nil -} - -// UnmarshalMessageSetJSON decodes the extension map encoded in buf in JSON format. -// It is called by generated UnmarshalJSON methods on protocol buffer messages with the message_set_wire_format option. -func UnmarshalMessageSetJSON(buf []byte, exts interface{}) error { - // Common-case fast path. - if len(buf) == 0 || bytes.Equal(buf, []byte("{}")) { - return nil - } - - // This is fairly tricky, and it's not clear that it is needed. - return errors.New("TODO: UnmarshalMessageSetJSON not yet implemented") -} - -// A global registry of types that can be used in a MessageSet. - -var messageSetMap = make(map[int32]messageSetDesc) - -type messageSetDesc struct { - t reflect.Type // pointer to struct - name string -} - -// RegisterMessageSetType is called from the generated code. -func RegisterMessageSetType(m Message, fieldNum int32, name string) { - messageSetMap[fieldNum] = messageSetDesc{ - t: reflect.TypeOf(m), - name: name, - } -} diff --git a/vendor/github.com/gogo/protobuf/proto/properties.go b/vendor/github.com/gogo/protobuf/proto/properties.go index 04dcb8d9ef..c9e5fa0207 100644 --- a/vendor/github.com/gogo/protobuf/proto/properties.go +++ b/vendor/github.com/gogo/protobuf/proto/properties.go @@ -391,9 +391,6 @@ func GetProperties(t reflect.Type) *StructProperties { sprop, ok := propertiesMap[t] propertiesMu.RUnlock() if ok { - if collectStats { - stats.Chit++ - } return sprop } @@ -406,14 +403,8 @@ func GetProperties(t reflect.Type) *StructProperties { // getPropertiesLocked requires that propertiesMu is held. func getPropertiesLocked(t reflect.Type) *StructProperties { if prop, ok := propertiesMap[t]; ok { - if collectStats { - stats.Chit++ - } return prop } - if collectStats { - stats.Cmiss++ - } prop := new(StructProperties) // in case of recursive protos, fill this in now. diff --git a/vendor/github.com/gogo/protobuf/proto/table_marshal.go b/vendor/github.com/gogo/protobuf/proto/table_marshal.go index ba58c49a43..9b1538d055 100644 --- a/vendor/github.com/gogo/protobuf/proto/table_marshal.go +++ b/vendor/github.com/gogo/protobuf/proto/table_marshal.go @@ -491,7 +491,7 @@ func (fi *marshalFieldInfo) computeMarshalFieldInfo(f *reflect.StructField) { func (fi *marshalFieldInfo) computeOneofFieldInfo(f *reflect.StructField, oneofImplementers []interface{}) { fi.field = toField(f) - fi.wiretag = 1<<31 - 1 // Use a large tag number, make oneofs sorted at the end. This tag will not appear on the wire. + fi.wiretag = math.MaxInt32 // Use a large tag number, make oneofs sorted at the end. This tag will not appear on the wire. fi.isPointer = true fi.sizer, fi.marshaler = makeOneOfMarshaler(fi, f) fi.oneofElems = make(map[reflect.Type]*marshalElemInfo) diff --git a/vendor/github.com/gogo/protobuf/proto/table_unmarshal.go b/vendor/github.com/gogo/protobuf/proto/table_unmarshal.go index e6b15c76ca..bb2622f28c 100644 --- a/vendor/github.com/gogo/protobuf/proto/table_unmarshal.go +++ b/vendor/github.com/gogo/protobuf/proto/table_unmarshal.go @@ -138,7 +138,7 @@ func (u *unmarshalInfo) unmarshal(m pointer, b []byte) error { u.computeUnmarshalInfo() } if u.isMessageSet { - return UnmarshalMessageSet(b, m.offset(u.extensions).toExtensions()) + return unmarshalMessageSet(b, m.offset(u.extensions).toExtensions()) } var reqMask uint64 // bitmask of required fields we've seen. var errLater error @@ -2142,7 +2142,7 @@ func encodeVarint(b []byte, x uint64) []byte { // If there is an error, it returns 0,0. func decodeVarint(b []byte) (uint64, int) { var x, y uint64 - if len(b) <= 0 { + if len(b) == 0 { goto bad } x = uint64(b[0]) diff --git a/vendor/github.com/golang/protobuf/proto/decode.go b/vendor/github.com/golang/protobuf/proto/decode.go index d9aa3c42d6..63b0f08bef 100644 --- a/vendor/github.com/golang/protobuf/proto/decode.go +++ b/vendor/github.com/golang/protobuf/proto/decode.go @@ -186,7 +186,6 @@ func (p *Buffer) DecodeVarint() (x uint64, err error) { if b&0x80 == 0 { goto done } - // x -= 0x80 << 63 // Always zero. return 0, errOverflow diff --git a/vendor/github.com/golang/protobuf/proto/deprecated.go b/vendor/github.com/golang/protobuf/proto/deprecated.go new file mode 100644 index 0000000000..35b882c09a --- /dev/null +++ b/vendor/github.com/golang/protobuf/proto/deprecated.go @@ -0,0 +1,63 @@ +// Go support for Protocol Buffers - Google's data interchange format +// +// Copyright 2018 The Go Authors. All rights reserved. +// https://github.com/golang/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +package proto + +import "errors" + +// Deprecated: do not use. +type Stats struct{ Emalloc, Dmalloc, Encode, Decode, Chit, Cmiss, Size uint64 } + +// Deprecated: do not use. +func GetStats() Stats { return Stats{} } + +// Deprecated: do not use. +func MarshalMessageSet(interface{}) ([]byte, error) { + return nil, errors.New("proto: not implemented") +} + +// Deprecated: do not use. +func UnmarshalMessageSet([]byte, interface{}) error { + return errors.New("proto: not implemented") +} + +// Deprecated: do not use. +func MarshalMessageSetJSON(interface{}) ([]byte, error) { + return nil, errors.New("proto: not implemented") +} + +// Deprecated: do not use. +func UnmarshalMessageSetJSON([]byte, interface{}) error { + return errors.New("proto: not implemented") +} + +// Deprecated: do not use. +func RegisterMessageSetType(Message, int32, string) {} diff --git a/vendor/github.com/golang/protobuf/proto/equal.go b/vendor/github.com/golang/protobuf/proto/equal.go index d4db5a1c14..f9b6e41b3c 100644 --- a/vendor/github.com/golang/protobuf/proto/equal.go +++ b/vendor/github.com/golang/protobuf/proto/equal.go @@ -246,7 +246,8 @@ func equalExtMap(base reflect.Type, em1, em2 map[int32]Extension) bool { return false } - m1, m2 := e1.value, e2.value + m1 := extensionAsLegacyType(e1.value) + m2 := extensionAsLegacyType(e2.value) if m1 == nil && m2 == nil { // Both have only encoded form. diff --git a/vendor/github.com/golang/protobuf/proto/extensions.go b/vendor/github.com/golang/protobuf/proto/extensions.go index 816a3b9d6c..fa88add30a 100644 --- a/vendor/github.com/golang/protobuf/proto/extensions.go +++ b/vendor/github.com/golang/protobuf/proto/extensions.go @@ -185,9 +185,25 @@ type Extension struct { // extension will have only enc set. When such an extension is // accessed using GetExtension (or GetExtensions) desc and value // will be set. - desc *ExtensionDesc + desc *ExtensionDesc + + // value is a concrete value for the extension field. Let the type of + // desc.ExtensionType be the "API type" and the type of Extension.value + // be the "storage type". The API type and storage type are the same except: + // * For scalars (except []byte), the API type uses *T, + // while the storage type uses T. + // * For repeated fields, the API type uses []T, while the storage type + // uses *[]T. + // + // The reason for the divergence is so that the storage type more naturally + // matches what is expected of when retrieving the values through the + // protobuf reflection APIs. + // + // The value may only be populated if desc is also populated. value interface{} - enc []byte + + // enc is the raw bytes for the extension field. + enc []byte } // SetRawExtension is for testing only. @@ -334,7 +350,7 @@ func GetExtension(pb Message, extension *ExtensionDesc) (interface{}, error) { // descriptors with the same field number. return nil, errors.New("proto: descriptor conflict") } - return e.value, nil + return extensionAsLegacyType(e.value), nil } if extension.ExtensionType == nil { @@ -349,11 +365,11 @@ func GetExtension(pb Message, extension *ExtensionDesc) (interface{}, error) { // Remember the decoded version and drop the encoded version. // That way it is safe to mutate what we return. - e.value = v + e.value = extensionAsStorageType(v) e.desc = extension e.enc = nil emap[extension.Field] = e - return e.value, nil + return extensionAsLegacyType(e.value), nil } // defaultExtensionValue returns the default value for extension. @@ -488,7 +504,7 @@ func SetExtension(pb Message, extension *ExtensionDesc, value interface{}) error } typ := reflect.TypeOf(extension.ExtensionType) if typ != reflect.TypeOf(value) { - return errors.New("proto: bad extension value type") + return fmt.Errorf("proto: bad extension value type. got: %T, want: %T", value, extension.ExtensionType) } // nil extension values need to be caught early, because the // encoder can't distinguish an ErrNil due to a nil extension @@ -500,7 +516,7 @@ func SetExtension(pb Message, extension *ExtensionDesc, value interface{}) error } extmap := epb.extensionsWrite() - extmap[extension.Field] = Extension{desc: extension, value: value} + extmap[extension.Field] = Extension{desc: extension, value: extensionAsStorageType(value)} return nil } @@ -541,3 +557,51 @@ func RegisterExtension(desc *ExtensionDesc) { func RegisteredExtensions(pb Message) map[int32]*ExtensionDesc { return extensionMaps[reflect.TypeOf(pb).Elem()] } + +// extensionAsLegacyType converts an value in the storage type as the API type. +// See Extension.value. +func extensionAsLegacyType(v interface{}) interface{} { + switch rv := reflect.ValueOf(v); rv.Kind() { + case reflect.Bool, reflect.Int32, reflect.Int64, reflect.Uint32, reflect.Uint64, reflect.Float32, reflect.Float64, reflect.String: + // Represent primitive types as a pointer to the value. + rv2 := reflect.New(rv.Type()) + rv2.Elem().Set(rv) + v = rv2.Interface() + case reflect.Ptr: + // Represent slice types as the value itself. + switch rv.Type().Elem().Kind() { + case reflect.Slice: + if rv.IsNil() { + v = reflect.Zero(rv.Type().Elem()).Interface() + } else { + v = rv.Elem().Interface() + } + } + } + return v +} + +// extensionAsStorageType converts an value in the API type as the storage type. +// See Extension.value. +func extensionAsStorageType(v interface{}) interface{} { + switch rv := reflect.ValueOf(v); rv.Kind() { + case reflect.Ptr: + // Represent slice types as the value itself. + switch rv.Type().Elem().Kind() { + case reflect.Bool, reflect.Int32, reflect.Int64, reflect.Uint32, reflect.Uint64, reflect.Float32, reflect.Float64, reflect.String: + if rv.IsNil() { + v = reflect.Zero(rv.Type().Elem()).Interface() + } else { + v = rv.Elem().Interface() + } + } + case reflect.Slice: + // Represent slice types as a pointer to the value. + if rv.Type().Elem().Kind() != reflect.Uint8 { + rv2 := reflect.New(rv.Type()) + rv2.Elem().Set(rv) + v = rv2.Interface() + } + } + return v +} diff --git a/vendor/github.com/golang/protobuf/proto/lib.go b/vendor/github.com/golang/protobuf/proto/lib.go index 75565cc6dc..fdd328bb7f 100644 --- a/vendor/github.com/golang/protobuf/proto/lib.go +++ b/vendor/github.com/golang/protobuf/proto/lib.go @@ -341,26 +341,6 @@ type Message interface { ProtoMessage() } -// Stats records allocation details about the protocol buffer encoders -// and decoders. Useful for tuning the library itself. -type Stats struct { - Emalloc uint64 // mallocs in encode - Dmalloc uint64 // mallocs in decode - Encode uint64 // number of encodes - Decode uint64 // number of decodes - Chit uint64 // number of cache hits - Cmiss uint64 // number of cache misses - Size uint64 // number of sizes -} - -// Set to true to enable stats collection. -const collectStats = false - -var stats Stats - -// GetStats returns a copy of the global Stats structure. -func GetStats() Stats { return stats } - // A Buffer is a buffer manager for marshaling and unmarshaling // protocol buffers. It may be reused between invocations to // reduce memory usage. It is not necessary to use a Buffer; @@ -960,13 +940,19 @@ func isProto3Zero(v reflect.Value) bool { return false } -// ProtoPackageIsVersion2 is referenced from generated protocol buffer files -// to assert that that code is compatible with this version of the proto package. -const ProtoPackageIsVersion2 = true +const ( + // ProtoPackageIsVersion3 is referenced from generated protocol buffer files + // to assert that that code is compatible with this version of the proto package. + ProtoPackageIsVersion3 = true + + // ProtoPackageIsVersion2 is referenced from generated protocol buffer files + // to assert that that code is compatible with this version of the proto package. + ProtoPackageIsVersion2 = true -// ProtoPackageIsVersion1 is referenced from generated protocol buffer files -// to assert that that code is compatible with this version of the proto package. -const ProtoPackageIsVersion1 = true + // ProtoPackageIsVersion1 is referenced from generated protocol buffer files + // to assert that that code is compatible with this version of the proto package. + ProtoPackageIsVersion1 = true +) // InternalMessageInfo is a type used internally by generated .pb.go files. // This type is not intended to be used by non-generated code. diff --git a/vendor/github.com/golang/protobuf/proto/message_set.go b/vendor/github.com/golang/protobuf/proto/message_set.go index 3b6ca41d5e..f48a756761 100644 --- a/vendor/github.com/golang/protobuf/proto/message_set.go +++ b/vendor/github.com/golang/protobuf/proto/message_set.go @@ -36,13 +36,7 @@ package proto */ import ( - "bytes" - "encoding/json" "errors" - "fmt" - "reflect" - "sort" - "sync" ) // errNoMessageTypeID occurs when a protocol buffer does not have a message type ID. @@ -145,46 +139,9 @@ func skipVarint(buf []byte) []byte { return buf[i+1:] } -// MarshalMessageSet encodes the extension map represented by m in the message set wire format. -// It is called by generated Marshal methods on protocol buffer messages with the message_set_wire_format option. -func MarshalMessageSet(exts interface{}) ([]byte, error) { - return marshalMessageSet(exts, false) -} - -// marshaMessageSet implements above function, with the opt to turn on / off deterministic during Marshal. -func marshalMessageSet(exts interface{}, deterministic bool) ([]byte, error) { - switch exts := exts.(type) { - case *XXX_InternalExtensions: - var u marshalInfo - siz := u.sizeMessageSet(exts) - b := make([]byte, 0, siz) - return u.appendMessageSet(b, exts, deterministic) - - case map[int32]Extension: - // This is an old-style extension map. - // Wrap it in a new-style XXX_InternalExtensions. - ie := XXX_InternalExtensions{ - p: &struct { - mu sync.Mutex - extensionMap map[int32]Extension - }{ - extensionMap: exts, - }, - } - - var u marshalInfo - siz := u.sizeMessageSet(&ie) - b := make([]byte, 0, siz) - return u.appendMessageSet(b, &ie, deterministic) - - default: - return nil, errors.New("proto: not an extension map") - } -} - -// UnmarshalMessageSet decodes the extension map encoded in buf in the message set wire format. +// unmarshalMessageSet decodes the extension map encoded in buf in the message set wire format. // It is called by Unmarshal methods on protocol buffer messages with the message_set_wire_format option. -func UnmarshalMessageSet(buf []byte, exts interface{}) error { +func unmarshalMessageSet(buf []byte, exts interface{}) error { var m map[int32]Extension switch exts := exts.(type) { case *XXX_InternalExtensions: @@ -222,93 +179,3 @@ func UnmarshalMessageSet(buf []byte, exts interface{}) error { } return nil } - -// MarshalMessageSetJSON encodes the extension map represented by m in JSON format. -// It is called by generated MarshalJSON methods on protocol buffer messages with the message_set_wire_format option. -func MarshalMessageSetJSON(exts interface{}) ([]byte, error) { - var m map[int32]Extension - switch exts := exts.(type) { - case *XXX_InternalExtensions: - var mu sync.Locker - m, mu = exts.extensionsRead() - if m != nil { - // Keep the extensions map locked until we're done marshaling to prevent - // races between marshaling and unmarshaling the lazily-{en,de}coded - // values. - mu.Lock() - defer mu.Unlock() - } - case map[int32]Extension: - m = exts - default: - return nil, errors.New("proto: not an extension map") - } - var b bytes.Buffer - b.WriteByte('{') - - // Process the map in key order for deterministic output. - ids := make([]int32, 0, len(m)) - for id := range m { - ids = append(ids, id) - } - sort.Sort(int32Slice(ids)) // int32Slice defined in text.go - - for i, id := range ids { - ext := m[id] - msd, ok := messageSetMap[id] - if !ok { - // Unknown type; we can't render it, so skip it. - continue - } - - if i > 0 && b.Len() > 1 { - b.WriteByte(',') - } - - fmt.Fprintf(&b, `"[%s]":`, msd.name) - - x := ext.value - if x == nil { - x = reflect.New(msd.t.Elem()).Interface() - if err := Unmarshal(ext.enc, x.(Message)); err != nil { - return nil, err - } - } - d, err := json.Marshal(x) - if err != nil { - return nil, err - } - b.Write(d) - } - b.WriteByte('}') - return b.Bytes(), nil -} - -// UnmarshalMessageSetJSON decodes the extension map encoded in buf in JSON format. -// It is called by generated UnmarshalJSON methods on protocol buffer messages with the message_set_wire_format option. -func UnmarshalMessageSetJSON(buf []byte, exts interface{}) error { - // Common-case fast path. - if len(buf) == 0 || bytes.Equal(buf, []byte("{}")) { - return nil - } - - // This is fairly tricky, and it's not clear that it is needed. - return errors.New("TODO: UnmarshalMessageSetJSON not yet implemented") -} - -// A global registry of types that can be used in a MessageSet. - -var messageSetMap = make(map[int32]messageSetDesc) - -type messageSetDesc struct { - t reflect.Type // pointer to struct - name string -} - -// RegisterMessageSetType is called from the generated code. -func RegisterMessageSetType(m Message, fieldNum int32, name string) { - messageSetMap[fieldNum] = messageSetDesc{ - t: reflect.TypeOf(m), - name: name, - } -} diff --git a/vendor/github.com/golang/protobuf/proto/pointer_reflect.go b/vendor/github.com/golang/protobuf/proto/pointer_reflect.go index b6cad90834..94fa9194a8 100644 --- a/vendor/github.com/golang/protobuf/proto/pointer_reflect.go +++ b/vendor/github.com/golang/protobuf/proto/pointer_reflect.go @@ -79,10 +79,13 @@ func toPointer(i *Message) pointer { // toAddrPointer converts an interface to a pointer that points to // the interface data. -func toAddrPointer(i *interface{}, isptr bool) pointer { +func toAddrPointer(i *interface{}, isptr, deref bool) pointer { v := reflect.ValueOf(*i) u := reflect.New(v.Type()) u.Elem().Set(v) + if deref { + u = u.Elem() + } return pointer{v: u} } diff --git a/vendor/github.com/golang/protobuf/proto/pointer_unsafe.go b/vendor/github.com/golang/protobuf/proto/pointer_unsafe.go index d55a335d94..dbfffe071b 100644 --- a/vendor/github.com/golang/protobuf/proto/pointer_unsafe.go +++ b/vendor/github.com/golang/protobuf/proto/pointer_unsafe.go @@ -85,16 +85,21 @@ func toPointer(i *Message) pointer { // toAddrPointer converts an interface to a pointer that points to // the interface data. -func toAddrPointer(i *interface{}, isptr bool) pointer { +func toAddrPointer(i *interface{}, isptr, deref bool) (p pointer) { // Super-tricky - read or get the address of data word of interface value. if isptr { // The interface is of pointer type, thus it is a direct interface. // The data word is the pointer data itself. We take its address. - return pointer{p: unsafe.Pointer(uintptr(unsafe.Pointer(i)) + ptrSize)} + p = pointer{p: unsafe.Pointer(uintptr(unsafe.Pointer(i)) + ptrSize)} + } else { + // The interface is not of pointer type. The data word is the pointer + // to the data. + p = pointer{p: (*[2]unsafe.Pointer)(unsafe.Pointer(i))[1]} } - // The interface is not of pointer type. The data word is the pointer - // to the data. - return pointer{p: (*[2]unsafe.Pointer)(unsafe.Pointer(i))[1]} + if deref { + p.p = *(*unsafe.Pointer)(p.p) + } + return p } // valToPointer converts v to a pointer. v must be of pointer type. diff --git a/vendor/github.com/golang/protobuf/proto/properties.go b/vendor/github.com/golang/protobuf/proto/properties.go index 50b99b83a8..79668ff5c5 100644 --- a/vendor/github.com/golang/protobuf/proto/properties.go +++ b/vendor/github.com/golang/protobuf/proto/properties.go @@ -334,9 +334,6 @@ func GetProperties(t reflect.Type) *StructProperties { sprop, ok := propertiesMap[t] propertiesMu.RUnlock() if ok { - if collectStats { - stats.Chit++ - } return sprop } @@ -346,17 +343,20 @@ func GetProperties(t reflect.Type) *StructProperties { return sprop } +type ( + oneofFuncsIface interface { + XXX_OneofFuncs() (func(Message, *Buffer) error, func(Message, int, int, *Buffer) (bool, error), func(Message) int, []interface{}) + } + oneofWrappersIface interface { + XXX_OneofWrappers() []interface{} + } +) + // getPropertiesLocked requires that propertiesMu is held. func getPropertiesLocked(t reflect.Type) *StructProperties { if prop, ok := propertiesMap[t]; ok { - if collectStats { - stats.Chit++ - } return prop } - if collectStats { - stats.Cmiss++ - } prop := new(StructProperties) // in case of recursive protos, fill this in now. @@ -391,13 +391,14 @@ func getPropertiesLocked(t reflect.Type) *StructProperties { // Re-order prop.order. sort.Sort(prop) - type oneofMessage interface { - XXX_OneofFuncs() (func(Message, *Buffer) error, func(Message, int, int, *Buffer) (bool, error), func(Message) int, []interface{}) + var oots []interface{} + switch m := reflect.Zero(reflect.PtrTo(t)).Interface().(type) { + case oneofFuncsIface: + _, _, _, oots = m.XXX_OneofFuncs() + case oneofWrappersIface: + oots = m.XXX_OneofWrappers() } - if om, ok := reflect.Zero(reflect.PtrTo(t)).Interface().(oneofMessage); ok { - var oots []interface{} - _, _, _, oots = om.XXX_OneofFuncs() - + if len(oots) > 0 { // Interpret oneof metadata. prop.OneofTypes = make(map[string]*OneofProperties) for _, oot := range oots { diff --git a/vendor/github.com/golang/protobuf/proto/table_marshal.go b/vendor/github.com/golang/protobuf/proto/table_marshal.go index b16794496f..5cb11fa955 100644 --- a/vendor/github.com/golang/protobuf/proto/table_marshal.go +++ b/vendor/github.com/golang/protobuf/proto/table_marshal.go @@ -87,6 +87,7 @@ type marshalElemInfo struct { sizer sizer marshaler marshaler isptr bool // elem is pointer typed, thus interface of this type is a direct interface (extension only) + deref bool // dereference the pointer before operating on it; implies isptr } var ( @@ -320,8 +321,11 @@ func (u *marshalInfo) computeMarshalInfo() { // get oneof implementers var oneofImplementers []interface{} - if m, ok := reflect.Zero(reflect.PtrTo(t)).Interface().(oneofMessage); ok { + switch m := reflect.Zero(reflect.PtrTo(t)).Interface().(type) { + case oneofFuncsIface: _, _, _, oneofImplementers = m.XXX_OneofFuncs() + case oneofWrappersIface: + oneofImplementers = m.XXX_OneofWrappers() } n := t.NumField() @@ -407,13 +411,22 @@ func (u *marshalInfo) getExtElemInfo(desc *ExtensionDesc) *marshalElemInfo { panic("tag is not an integer") } wt := wiretype(tags[0]) + if t.Kind() == reflect.Ptr && t.Elem().Kind() != reflect.Struct { + t = t.Elem() + } sizer, marshaler := typeMarshaler(t, tags, false, false) + var deref bool + if t.Kind() == reflect.Slice && t.Elem().Kind() != reflect.Uint8 { + t = reflect.PtrTo(t) + deref = true + } e = &marshalElemInfo{ wiretag: uint64(tag)<<3 | wt, tagsize: SizeVarint(uint64(tag) << 3), sizer: sizer, marshaler: marshaler, isptr: t.Kind() == reflect.Ptr, + deref: deref, } // update cache @@ -448,7 +461,7 @@ func (fi *marshalFieldInfo) computeMarshalFieldInfo(f *reflect.StructField) { func (fi *marshalFieldInfo) computeOneofFieldInfo(f *reflect.StructField, oneofImplementers []interface{}) { fi.field = toField(f) - fi.wiretag = 1<<31 - 1 // Use a large tag number, make oneofs sorted at the end. This tag will not appear on the wire. + fi.wiretag = math.MaxInt32 // Use a large tag number, make oneofs sorted at the end. This tag will not appear on the wire. fi.isPointer = true fi.sizer, fi.marshaler = makeOneOfMarshaler(fi, f) fi.oneofElems = make(map[reflect.Type]*marshalElemInfo) @@ -476,10 +489,6 @@ func (fi *marshalFieldInfo) computeOneofFieldInfo(f *reflect.StructField, oneofI } } -type oneofMessage interface { - XXX_OneofFuncs() (func(Message, *Buffer) error, func(Message, int, int, *Buffer) (bool, error), func(Message) int, []interface{}) -} - // wiretype returns the wire encoding of the type. func wiretype(encoding string) uint64 { switch encoding { @@ -2310,8 +2319,8 @@ func makeMapMarshaler(f *reflect.StructField) (sizer, marshaler) { for _, k := range m.MapKeys() { ki := k.Interface() vi := m.MapIndex(k).Interface() - kaddr := toAddrPointer(&ki, false) // pointer to key - vaddr := toAddrPointer(&vi, valIsPtr) // pointer to value + kaddr := toAddrPointer(&ki, false, false) // pointer to key + vaddr := toAddrPointer(&vi, valIsPtr, false) // pointer to value siz := keySizer(kaddr, 1) + valSizer(vaddr, 1) // tag of key = 1 (size=1), tag of val = 2 (size=1) n += siz + SizeVarint(uint64(siz)) + tagsize } @@ -2329,8 +2338,8 @@ func makeMapMarshaler(f *reflect.StructField) (sizer, marshaler) { for _, k := range keys { ki := k.Interface() vi := m.MapIndex(k).Interface() - kaddr := toAddrPointer(&ki, false) // pointer to key - vaddr := toAddrPointer(&vi, valIsPtr) // pointer to value + kaddr := toAddrPointer(&ki, false, false) // pointer to key + vaddr := toAddrPointer(&vi, valIsPtr, false) // pointer to value b = appendVarint(b, tag) siz := keySizer(kaddr, 1) + valCachedSizer(vaddr, 1) // tag of key = 1 (size=1), tag of val = 2 (size=1) b = appendVarint(b, uint64(siz)) @@ -2399,7 +2408,7 @@ func (u *marshalInfo) sizeExtensions(ext *XXX_InternalExtensions) int { // the last time this function was called. ei := u.getExtElemInfo(e.desc) v := e.value - p := toAddrPointer(&v, ei.isptr) + p := toAddrPointer(&v, ei.isptr, ei.deref) n += ei.sizer(p, ei.tagsize) } mu.Unlock() @@ -2434,7 +2443,7 @@ func (u *marshalInfo) appendExtensions(b []byte, ext *XXX_InternalExtensions, de ei := u.getExtElemInfo(e.desc) v := e.value - p := toAddrPointer(&v, ei.isptr) + p := toAddrPointer(&v, ei.isptr, ei.deref) b, err = ei.marshaler(b, p, ei.wiretag, deterministic) if !nerr.Merge(err) { return b, err @@ -2465,7 +2474,7 @@ func (u *marshalInfo) appendExtensions(b []byte, ext *XXX_InternalExtensions, de ei := u.getExtElemInfo(e.desc) v := e.value - p := toAddrPointer(&v, ei.isptr) + p := toAddrPointer(&v, ei.isptr, ei.deref) b, err = ei.marshaler(b, p, ei.wiretag, deterministic) if !nerr.Merge(err) { return b, err @@ -2510,7 +2519,7 @@ func (u *marshalInfo) sizeMessageSet(ext *XXX_InternalExtensions) int { ei := u.getExtElemInfo(e.desc) v := e.value - p := toAddrPointer(&v, ei.isptr) + p := toAddrPointer(&v, ei.isptr, ei.deref) n += ei.sizer(p, 1) // message, tag = 3 (size=1) } mu.Unlock() @@ -2553,7 +2562,7 @@ func (u *marshalInfo) appendMessageSet(b []byte, ext *XXX_InternalExtensions, de ei := u.getExtElemInfo(e.desc) v := e.value - p := toAddrPointer(&v, ei.isptr) + p := toAddrPointer(&v, ei.isptr, ei.deref) b, err = ei.marshaler(b, p, 3<<3|WireBytes, deterministic) if !nerr.Merge(err) { return b, err @@ -2591,7 +2600,7 @@ func (u *marshalInfo) appendMessageSet(b []byte, ext *XXX_InternalExtensions, de ei := u.getExtElemInfo(e.desc) v := e.value - p := toAddrPointer(&v, ei.isptr) + p := toAddrPointer(&v, ei.isptr, ei.deref) b, err = ei.marshaler(b, p, 3<<3|WireBytes, deterministic) b = append(b, 1<<3|WireEndGroup) if !nerr.Merge(err) { @@ -2621,7 +2630,7 @@ func (u *marshalInfo) sizeV1Extensions(m map[int32]Extension) int { ei := u.getExtElemInfo(e.desc) v := e.value - p := toAddrPointer(&v, ei.isptr) + p := toAddrPointer(&v, ei.isptr, ei.deref) n += ei.sizer(p, ei.tagsize) } return n @@ -2656,7 +2665,7 @@ func (u *marshalInfo) appendV1Extensions(b []byte, m map[int32]Extension, determ ei := u.getExtElemInfo(e.desc) v := e.value - p := toAddrPointer(&v, ei.isptr) + p := toAddrPointer(&v, ei.isptr, ei.deref) b, err = ei.marshaler(b, p, ei.wiretag, deterministic) if !nerr.Merge(err) { return b, err diff --git a/vendor/github.com/golang/protobuf/proto/table_unmarshal.go b/vendor/github.com/golang/protobuf/proto/table_unmarshal.go index ebf1caa56a..acee2fc529 100644 --- a/vendor/github.com/golang/protobuf/proto/table_unmarshal.go +++ b/vendor/github.com/golang/protobuf/proto/table_unmarshal.go @@ -136,7 +136,7 @@ func (u *unmarshalInfo) unmarshal(m pointer, b []byte) error { u.computeUnmarshalInfo() } if u.isMessageSet { - return UnmarshalMessageSet(b, m.offset(u.extensions).toExtensions()) + return unmarshalMessageSet(b, m.offset(u.extensions).toExtensions()) } var reqMask uint64 // bitmask of required fields we've seen. var errLater error @@ -362,46 +362,48 @@ func (u *unmarshalInfo) computeUnmarshalInfo() { } // Find any types associated with oneof fields. - // TODO: XXX_OneofFuncs returns more info than we need. Get rid of some of it? - fn := reflect.Zero(reflect.PtrTo(t)).MethodByName("XXX_OneofFuncs") - if fn.IsValid() { - res := fn.Call(nil)[3] // last return value from XXX_OneofFuncs: []interface{} - for i := res.Len() - 1; i >= 0; i-- { - v := res.Index(i) // interface{} - tptr := reflect.ValueOf(v.Interface()).Type() // *Msg_X - typ := tptr.Elem() // Msg_X - - f := typ.Field(0) // oneof implementers have one field - baseUnmarshal := fieldUnmarshaler(&f) - tags := strings.Split(f.Tag.Get("protobuf"), ",") - fieldNum, err := strconv.Atoi(tags[1]) - if err != nil { - panic("protobuf tag field not an integer: " + tags[1]) - } - var name string - for _, tag := range tags { - if strings.HasPrefix(tag, "name=") { - name = strings.TrimPrefix(tag, "name=") - break - } + var oneofImplementers []interface{} + switch m := reflect.Zero(reflect.PtrTo(t)).Interface().(type) { + case oneofFuncsIface: + _, _, _, oneofImplementers = m.XXX_OneofFuncs() + case oneofWrappersIface: + oneofImplementers = m.XXX_OneofWrappers() + } + for _, v := range oneofImplementers { + tptr := reflect.TypeOf(v) // *Msg_X + typ := tptr.Elem() // Msg_X + + f := typ.Field(0) // oneof implementers have one field + baseUnmarshal := fieldUnmarshaler(&f) + tags := strings.Split(f.Tag.Get("protobuf"), ",") + fieldNum, err := strconv.Atoi(tags[1]) + if err != nil { + panic("protobuf tag field not an integer: " + tags[1]) + } + var name string + for _, tag := range tags { + if strings.HasPrefix(tag, "name=") { + name = strings.TrimPrefix(tag, "name=") + break } + } - // Find the oneof field that this struct implements. - // Might take O(n^2) to process all of the oneofs, but who cares. - for _, of := range oneofFields { - if tptr.Implements(of.ityp) { - // We have found the corresponding interface for this struct. - // That lets us know where this struct should be stored - // when we encounter it during unmarshaling. - unmarshal := makeUnmarshalOneof(typ, of.ityp, baseUnmarshal) - u.setTag(fieldNum, of.field, unmarshal, 0, name) - } + // Find the oneof field that this struct implements. + // Might take O(n^2) to process all of the oneofs, but who cares. + for _, of := range oneofFields { + if tptr.Implements(of.ityp) { + // We have found the corresponding interface for this struct. + // That lets us know where this struct should be stored + // when we encounter it during unmarshaling. + unmarshal := makeUnmarshalOneof(typ, of.ityp, baseUnmarshal) + u.setTag(fieldNum, of.field, unmarshal, 0, name) } } + } // Get extension ranges, if any. - fn = reflect.Zero(reflect.PtrTo(t)).MethodByName("ExtensionRangeArray") + fn := reflect.Zero(reflect.PtrTo(t)).MethodByName("ExtensionRangeArray") if fn.IsValid() { if !u.extensions.IsValid() && !u.oldExtensions.IsValid() { panic("a message with extensions, but no extensions field in " + t.Name()) @@ -1948,7 +1950,7 @@ func encodeVarint(b []byte, x uint64) []byte { // If there is an error, it returns 0,0. func decodeVarint(b []byte) (uint64, int) { var x, y uint64 - if len(b) <= 0 { + if len(b) == 0 { goto bad } x = uint64(b[0]) diff --git a/vendor/github.com/golang/protobuf/ptypes/any/any.pb.go b/vendor/github.com/golang/protobuf/ptypes/any/any.pb.go index e3c56d3ffa..78ee523349 100644 --- a/vendor/github.com/golang/protobuf/ptypes/any/any.pb.go +++ b/vendor/github.com/golang/protobuf/ptypes/any/any.pb.go @@ -1,11 +1,13 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // source: google/protobuf/any.proto -package any // import "github.com/golang/protobuf/ptypes/any" +package any -import proto "github.com/golang/protobuf/proto" -import fmt "fmt" -import math "math" +import ( + fmt "fmt" + proto "github.com/golang/protobuf/proto" + math "math" +) // Reference imports to suppress errors if they are not otherwise used. var _ = proto.Marshal @@ -16,7 +18,7 @@ var _ = math.Inf // is compatible with the proto package it is being compiled against. // A compilation error at this line likely means your copy of the // proto package needs to be updated. -const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package +const _ = proto.ProtoPackageIsVersion3 // please upgrade the proto package // `Any` contains an arbitrary serialized protocol buffer message along with a // URL that describes the type of the serialized message. @@ -99,17 +101,18 @@ const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package // } // type Any struct { - // A URL/resource name whose content describes the type of the - // serialized protocol buffer message. + // A URL/resource name that uniquely identifies the type of the serialized + // protocol buffer message. The last segment of the URL's path must represent + // the fully qualified name of the type (as in + // `path/google.protobuf.Duration`). The name should be in a canonical form + // (e.g., leading "." is not accepted). // - // For URLs which use the scheme `http`, `https`, or no scheme, the - // following restrictions and interpretations apply: + // In practice, teams usually precompile into the binary all types that they + // expect it to use in the context of Any. However, for URLs which use the + // scheme `http`, `https`, or no scheme, one can optionally set up a type + // server that maps type URLs to message definitions as follows: // // * If no scheme is provided, `https` is assumed. - // * The last segment of the URL's path must represent the fully - // qualified name of the type (as in `path/google.protobuf.Duration`). - // The name should be in a canonical form (e.g., leading "." is - // not accepted). // * An HTTP GET on the URL must yield a [google.protobuf.Type][] // value in binary format, or produce an error. // * Applications are allowed to cache lookup results based on the @@ -118,6 +121,10 @@ type Any struct { // on changes to types. (Use versioned type names to manage // breaking changes.) // + // Note: this functionality is not currently available in the official + // protobuf release, and it is not used for type URLs beginning with + // type.googleapis.com. + // // Schemes other than `http`, `https` (or the empty scheme) might be // used with implementation specific semantics. // @@ -133,17 +140,19 @@ func (m *Any) Reset() { *m = Any{} } func (m *Any) String() string { return proto.CompactTextString(m) } func (*Any) ProtoMessage() {} func (*Any) Descriptor() ([]byte, []int) { - return fileDescriptor_any_744b9ca530f228db, []int{0} + return fileDescriptor_b53526c13ae22eb4, []int{0} } + func (*Any) XXX_WellKnownType() string { return "Any" } + func (m *Any) XXX_Unmarshal(b []byte) error { return xxx_messageInfo_Any.Unmarshal(m, b) } func (m *Any) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { return xxx_messageInfo_Any.Marshal(b, m, deterministic) } -func (dst *Any) XXX_Merge(src proto.Message) { - xxx_messageInfo_Any.Merge(dst, src) +func (m *Any) XXX_Merge(src proto.Message) { + xxx_messageInfo_Any.Merge(m, src) } func (m *Any) XXX_Size() int { return xxx_messageInfo_Any.Size(m) @@ -172,9 +181,9 @@ func init() { proto.RegisterType((*Any)(nil), "google.protobuf.Any") } -func init() { proto.RegisterFile("google/protobuf/any.proto", fileDescriptor_any_744b9ca530f228db) } +func init() { proto.RegisterFile("google/protobuf/any.proto", fileDescriptor_b53526c13ae22eb4) } -var fileDescriptor_any_744b9ca530f228db = []byte{ +var fileDescriptor_b53526c13ae22eb4 = []byte{ // 185 bytes of a gzipped FileDescriptorProto 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0x92, 0x4c, 0xcf, 0xcf, 0x4f, 0xcf, 0x49, 0xd5, 0x2f, 0x28, 0xca, 0x2f, 0xc9, 0x4f, 0x2a, 0x4d, 0xd3, 0x4f, 0xcc, 0xab, 0xd4, diff --git a/vendor/github.com/golang/protobuf/ptypes/any/any.proto b/vendor/github.com/golang/protobuf/ptypes/any/any.proto index c748667623..4932942558 100644 --- a/vendor/github.com/golang/protobuf/ptypes/any/any.proto +++ b/vendor/github.com/golang/protobuf/ptypes/any/any.proto @@ -120,17 +120,18 @@ option objc_class_prefix = "GPB"; // } // message Any { - // A URL/resource name whose content describes the type of the - // serialized protocol buffer message. + // A URL/resource name that uniquely identifies the type of the serialized + // protocol buffer message. The last segment of the URL's path must represent + // the fully qualified name of the type (as in + // `path/google.protobuf.Duration`). The name should be in a canonical form + // (e.g., leading "." is not accepted). // - // For URLs which use the scheme `http`, `https`, or no scheme, the - // following restrictions and interpretations apply: + // In practice, teams usually precompile into the binary all types that they + // expect it to use in the context of Any. However, for URLs which use the + // scheme `http`, `https`, or no scheme, one can optionally set up a type + // server that maps type URLs to message definitions as follows: // // * If no scheme is provided, `https` is assumed. - // * The last segment of the URL's path must represent the fully - // qualified name of the type (as in `path/google.protobuf.Duration`). - // The name should be in a canonical form (e.g., leading "." is - // not accepted). // * An HTTP GET on the URL must yield a [google.protobuf.Type][] // value in binary format, or produce an error. // * Applications are allowed to cache lookup results based on the @@ -139,6 +140,10 @@ message Any { // on changes to types. (Use versioned type names to manage // breaking changes.) // + // Note: this functionality is not currently available in the official + // protobuf release, and it is not used for type URLs beginning with + // type.googleapis.com. + // // Schemes other than `http`, `https` (or the empty scheme) might be // used with implementation specific semantics. // diff --git a/vendor/github.com/golang/protobuf/ptypes/duration.go b/vendor/github.com/golang/protobuf/ptypes/duration.go index 65cb0f8eb5..26d1ca2fb5 100644 --- a/vendor/github.com/golang/protobuf/ptypes/duration.go +++ b/vendor/github.com/golang/protobuf/ptypes/duration.go @@ -82,7 +82,7 @@ func Duration(p *durpb.Duration) (time.Duration, error) { return 0, fmt.Errorf("duration: %v is out of range for time.Duration", p) } if p.Nanos != 0 { - d += time.Duration(p.Nanos) + d += time.Duration(p.Nanos) * time.Nanosecond if (d < 0) != (p.Nanos < 0) { return 0, fmt.Errorf("duration: %v is out of range for time.Duration", p) } diff --git a/vendor/github.com/golang/protobuf/ptypes/duration/duration.pb.go b/vendor/github.com/golang/protobuf/ptypes/duration/duration.pb.go index a7beb2c414..0d681ee21a 100644 --- a/vendor/github.com/golang/protobuf/ptypes/duration/duration.pb.go +++ b/vendor/github.com/golang/protobuf/ptypes/duration/duration.pb.go @@ -1,11 +1,13 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // source: google/protobuf/duration.proto -package duration // import "github.com/golang/protobuf/ptypes/duration" +package duration -import proto "github.com/golang/protobuf/proto" -import fmt "fmt" -import math "math" +import ( + fmt "fmt" + proto "github.com/golang/protobuf/proto" + math "math" +) // Reference imports to suppress errors if they are not otherwise used. var _ = proto.Marshal @@ -16,7 +18,7 @@ var _ = math.Inf // is compatible with the proto package it is being compiled against. // A compilation error at this line likely means your copy of the // proto package needs to be updated. -const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package +const _ = proto.ProtoPackageIsVersion3 // please upgrade the proto package // A Duration represents a signed, fixed-length span of time represented // as a count of seconds and fractions of seconds at nanosecond @@ -99,17 +101,19 @@ func (m *Duration) Reset() { *m = Duration{} } func (m *Duration) String() string { return proto.CompactTextString(m) } func (*Duration) ProtoMessage() {} func (*Duration) Descriptor() ([]byte, []int) { - return fileDescriptor_duration_e7d612259e3f0613, []int{0} + return fileDescriptor_23597b2ebd7ac6c5, []int{0} } + func (*Duration) XXX_WellKnownType() string { return "Duration" } + func (m *Duration) XXX_Unmarshal(b []byte) error { return xxx_messageInfo_Duration.Unmarshal(m, b) } func (m *Duration) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { return xxx_messageInfo_Duration.Marshal(b, m, deterministic) } -func (dst *Duration) XXX_Merge(src proto.Message) { - xxx_messageInfo_Duration.Merge(dst, src) +func (m *Duration) XXX_Merge(src proto.Message) { + xxx_messageInfo_Duration.Merge(m, src) } func (m *Duration) XXX_Size() int { return xxx_messageInfo_Duration.Size(m) @@ -138,11 +142,9 @@ func init() { proto.RegisterType((*Duration)(nil), "google.protobuf.Duration") } -func init() { - proto.RegisterFile("google/protobuf/duration.proto", fileDescriptor_duration_e7d612259e3f0613) -} +func init() { proto.RegisterFile("google/protobuf/duration.proto", fileDescriptor_23597b2ebd7ac6c5) } -var fileDescriptor_duration_e7d612259e3f0613 = []byte{ +var fileDescriptor_23597b2ebd7ac6c5 = []byte{ // 190 bytes of a gzipped FileDescriptorProto 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0x92, 0x4b, 0xcf, 0xcf, 0x4f, 0xcf, 0x49, 0xd5, 0x2f, 0x28, 0xca, 0x2f, 0xc9, 0x4f, 0x2a, 0x4d, 0xd3, 0x4f, 0x29, 0x2d, 0x4a, diff --git a/vendor/github.com/golang/protobuf/ptypes/timestamp.go b/vendor/github.com/golang/protobuf/ptypes/timestamp.go index 47f10dbc2c..8da0df01ac 100644 --- a/vendor/github.com/golang/protobuf/ptypes/timestamp.go +++ b/vendor/github.com/golang/protobuf/ptypes/timestamp.go @@ -111,11 +111,9 @@ func TimestampNow() *tspb.Timestamp { // TimestampProto converts the time.Time to a google.protobuf.Timestamp proto. // It returns an error if the resulting Timestamp is invalid. func TimestampProto(t time.Time) (*tspb.Timestamp, error) { - seconds := t.Unix() - nanos := int32(t.Sub(time.Unix(seconds, 0))) ts := &tspb.Timestamp{ - Seconds: seconds, - Nanos: nanos, + Seconds: t.Unix(), + Nanos: int32(t.Nanosecond()), } if err := validateTimestamp(ts); err != nil { return nil, err diff --git a/vendor/github.com/golang/protobuf/ptypes/timestamp/timestamp.pb.go b/vendor/github.com/golang/protobuf/ptypes/timestamp/timestamp.pb.go index 8e76ae9763..31cd846de9 100644 --- a/vendor/github.com/golang/protobuf/ptypes/timestamp/timestamp.pb.go +++ b/vendor/github.com/golang/protobuf/ptypes/timestamp/timestamp.pb.go @@ -1,11 +1,13 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // source: google/protobuf/timestamp.proto -package timestamp // import "github.com/golang/protobuf/ptypes/timestamp" +package timestamp -import proto "github.com/golang/protobuf/proto" -import fmt "fmt" -import math "math" +import ( + fmt "fmt" + proto "github.com/golang/protobuf/proto" + math "math" +) // Reference imports to suppress errors if they are not otherwise used. var _ = proto.Marshal @@ -16,7 +18,7 @@ var _ = math.Inf // is compatible with the proto package it is being compiled against. // A compilation error at this line likely means your copy of the // proto package needs to be updated. -const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package +const _ = proto.ProtoPackageIsVersion3 // please upgrade the proto package // A Timestamp represents a point in time independent of any time zone // or calendar, represented as seconds and fractions of seconds at @@ -81,7 +83,9 @@ const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package // {hour}, {min}, and {sec} are zero-padded to two digits each. The fractional // seconds, which can go up to 9 digits (i.e. up to 1 nanosecond resolution), // are optional. The "Z" suffix indicates the timezone ("UTC"); the timezone -// is required, though only UTC (as indicated by "Z") is presently supported. +// is required. A proto3 JSON serializer should always use UTC (as indicated by +// "Z") when printing the Timestamp type and a proto3 JSON parser should be +// able to accept both UTC and other timezones (as indicated by an offset). // // For example, "2017-01-15T01:30:15.01Z" encodes 15.01 seconds past // 01:30 UTC on January 15, 2017. @@ -92,8 +96,8 @@ const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package // to this format using [`strftime`](https://docs.python.org/2/library/time.html#time.strftime) // with the time format spec '%Y-%m-%dT%H:%M:%S.%fZ'. Likewise, in Java, one // can use the Joda Time's [`ISODateTimeFormat.dateTime()`]( -// http://www.joda.org/joda-time/apidocs/org/joda/time/format/ISODateTimeFormat.html#dateTime--) -// to obtain a formatter capable of generating timestamps in this format. +// http://www.joda.org/joda-time/apidocs/org/joda/time/format/ISODateTimeFormat.html#dateTime-- +// ) to obtain a formatter capable of generating timestamps in this format. // // type Timestamp struct { @@ -115,17 +119,19 @@ func (m *Timestamp) Reset() { *m = Timestamp{} } func (m *Timestamp) String() string { return proto.CompactTextString(m) } func (*Timestamp) ProtoMessage() {} func (*Timestamp) Descriptor() ([]byte, []int) { - return fileDescriptor_timestamp_b826e8e5fba671a8, []int{0} + return fileDescriptor_292007bbfe81227e, []int{0} } + func (*Timestamp) XXX_WellKnownType() string { return "Timestamp" } + func (m *Timestamp) XXX_Unmarshal(b []byte) error { return xxx_messageInfo_Timestamp.Unmarshal(m, b) } func (m *Timestamp) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { return xxx_messageInfo_Timestamp.Marshal(b, m, deterministic) } -func (dst *Timestamp) XXX_Merge(src proto.Message) { - xxx_messageInfo_Timestamp.Merge(dst, src) +func (m *Timestamp) XXX_Merge(src proto.Message) { + xxx_messageInfo_Timestamp.Merge(m, src) } func (m *Timestamp) XXX_Size() int { return xxx_messageInfo_Timestamp.Size(m) @@ -154,11 +160,9 @@ func init() { proto.RegisterType((*Timestamp)(nil), "google.protobuf.Timestamp") } -func init() { - proto.RegisterFile("google/protobuf/timestamp.proto", fileDescriptor_timestamp_b826e8e5fba671a8) -} +func init() { proto.RegisterFile("google/protobuf/timestamp.proto", fileDescriptor_292007bbfe81227e) } -var fileDescriptor_timestamp_b826e8e5fba671a8 = []byte{ +var fileDescriptor_292007bbfe81227e = []byte{ // 191 bytes of a gzipped FileDescriptorProto 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0x92, 0x4f, 0xcf, 0xcf, 0x4f, 0xcf, 0x49, 0xd5, 0x2f, 0x28, 0xca, 0x2f, 0xc9, 0x4f, 0x2a, 0x4d, 0xd3, 0x2f, 0xc9, 0xcc, 0x4d, diff --git a/vendor/github.com/golang/protobuf/ptypes/timestamp/timestamp.proto b/vendor/github.com/golang/protobuf/ptypes/timestamp/timestamp.proto index 06750ab1f1..eafb3fa03a 100644 --- a/vendor/github.com/golang/protobuf/ptypes/timestamp/timestamp.proto +++ b/vendor/github.com/golang/protobuf/ptypes/timestamp/timestamp.proto @@ -103,7 +103,9 @@ option objc_class_prefix = "GPB"; // {hour}, {min}, and {sec} are zero-padded to two digits each. The fractional // seconds, which can go up to 9 digits (i.e. up to 1 nanosecond resolution), // are optional. The "Z" suffix indicates the timezone ("UTC"); the timezone -// is required, though only UTC (as indicated by "Z") is presently supported. +// is required. A proto3 JSON serializer should always use UTC (as indicated by +// "Z") when printing the Timestamp type and a proto3 JSON parser should be +// able to accept both UTC and other timezones (as indicated by an offset). // // For example, "2017-01-15T01:30:15.01Z" encodes 15.01 seconds past // 01:30 UTC on January 15, 2017. @@ -114,8 +116,8 @@ option objc_class_prefix = "GPB"; // to this format using [`strftime`](https://docs.python.org/2/library/time.html#time.strftime) // with the time format spec '%Y-%m-%dT%H:%M:%S.%fZ'. Likewise, in Java, one // can use the Joda Time's [`ISODateTimeFormat.dateTime()`]( -// http://www.joda.org/joda-time/apidocs/org/joda/time/format/ISODateTimeFormat.html#dateTime--) -// to obtain a formatter capable of generating timestamps in this format. +// http://www.joda.org/joda-time/apidocs/org/joda/time/format/ISODateTimeFormat.html#dateTime-- +// ) to obtain a formatter capable of generating timestamps in this format. // // message Timestamp { diff --git a/vendor/github.com/golang/protobuf/ptypes/wrappers/wrappers.pb.go b/vendor/github.com/golang/protobuf/ptypes/wrappers/wrappers.pb.go deleted file mode 100644 index 0f0fa837f6..0000000000 --- a/vendor/github.com/golang/protobuf/ptypes/wrappers/wrappers.pb.go +++ /dev/null @@ -1,443 +0,0 @@ -// Code generated by protoc-gen-go. DO NOT EDIT. -// source: google/protobuf/wrappers.proto - -package wrappers // import "github.com/golang/protobuf/ptypes/wrappers" - -import proto "github.com/golang/protobuf/proto" -import fmt "fmt" -import math "math" - -// Reference imports to suppress errors if they are not otherwise used. -var _ = proto.Marshal -var _ = fmt.Errorf -var _ = math.Inf - -// This is a compile-time assertion to ensure that this generated file -// is compatible with the proto package it is being compiled against. -// A compilation error at this line likely means your copy of the -// proto package needs to be updated. -const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package - -// Wrapper message for `double`. -// -// The JSON representation for `DoubleValue` is JSON number. -type DoubleValue struct { - // The double value. - Value float64 `protobuf:"fixed64,1,opt,name=value,proto3" json:"value,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *DoubleValue) Reset() { *m = DoubleValue{} } -func (m *DoubleValue) String() string { return proto.CompactTextString(m) } -func (*DoubleValue) ProtoMessage() {} -func (*DoubleValue) Descriptor() ([]byte, []int) { - return fileDescriptor_wrappers_16c7c35c009f3253, []int{0} -} -func (*DoubleValue) XXX_WellKnownType() string { return "DoubleValue" } -func (m *DoubleValue) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_DoubleValue.Unmarshal(m, b) -} -func (m *DoubleValue) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_DoubleValue.Marshal(b, m, deterministic) -} -func (dst *DoubleValue) XXX_Merge(src proto.Message) { - xxx_messageInfo_DoubleValue.Merge(dst, src) -} -func (m *DoubleValue) XXX_Size() int { - return xxx_messageInfo_DoubleValue.Size(m) -} -func (m *DoubleValue) XXX_DiscardUnknown() { - xxx_messageInfo_DoubleValue.DiscardUnknown(m) -} - -var xxx_messageInfo_DoubleValue proto.InternalMessageInfo - -func (m *DoubleValue) GetValue() float64 { - if m != nil { - return m.Value - } - return 0 -} - -// Wrapper message for `float`. -// -// The JSON representation for `FloatValue` is JSON number. -type FloatValue struct { - // The float value. - Value float32 `protobuf:"fixed32,1,opt,name=value,proto3" json:"value,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *FloatValue) Reset() { *m = FloatValue{} } -func (m *FloatValue) String() string { return proto.CompactTextString(m) } -func (*FloatValue) ProtoMessage() {} -func (*FloatValue) Descriptor() ([]byte, []int) { - return fileDescriptor_wrappers_16c7c35c009f3253, []int{1} -} -func (*FloatValue) XXX_WellKnownType() string { return "FloatValue" } -func (m *FloatValue) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_FloatValue.Unmarshal(m, b) -} -func (m *FloatValue) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_FloatValue.Marshal(b, m, deterministic) -} -func (dst *FloatValue) XXX_Merge(src proto.Message) { - xxx_messageInfo_FloatValue.Merge(dst, src) -} -func (m *FloatValue) XXX_Size() int { - return xxx_messageInfo_FloatValue.Size(m) -} -func (m *FloatValue) XXX_DiscardUnknown() { - xxx_messageInfo_FloatValue.DiscardUnknown(m) -} - -var xxx_messageInfo_FloatValue proto.InternalMessageInfo - -func (m *FloatValue) GetValue() float32 { - if m != nil { - return m.Value - } - return 0 -} - -// Wrapper message for `int64`. -// -// The JSON representation for `Int64Value` is JSON string. -type Int64Value struct { - // The int64 value. - Value int64 `protobuf:"varint,1,opt,name=value,proto3" json:"value,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *Int64Value) Reset() { *m = Int64Value{} } -func (m *Int64Value) String() string { return proto.CompactTextString(m) } -func (*Int64Value) ProtoMessage() {} -func (*Int64Value) Descriptor() ([]byte, []int) { - return fileDescriptor_wrappers_16c7c35c009f3253, []int{2} -} -func (*Int64Value) XXX_WellKnownType() string { return "Int64Value" } -func (m *Int64Value) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_Int64Value.Unmarshal(m, b) -} -func (m *Int64Value) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_Int64Value.Marshal(b, m, deterministic) -} -func (dst *Int64Value) XXX_Merge(src proto.Message) { - xxx_messageInfo_Int64Value.Merge(dst, src) -} -func (m *Int64Value) XXX_Size() int { - return xxx_messageInfo_Int64Value.Size(m) -} -func (m *Int64Value) XXX_DiscardUnknown() { - xxx_messageInfo_Int64Value.DiscardUnknown(m) -} - -var xxx_messageInfo_Int64Value proto.InternalMessageInfo - -func (m *Int64Value) GetValue() int64 { - if m != nil { - return m.Value - } - return 0 -} - -// Wrapper message for `uint64`. -// -// The JSON representation for `UInt64Value` is JSON string. -type UInt64Value struct { - // The uint64 value. - Value uint64 `protobuf:"varint,1,opt,name=value,proto3" json:"value,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *UInt64Value) Reset() { *m = UInt64Value{} } -func (m *UInt64Value) String() string { return proto.CompactTextString(m) } -func (*UInt64Value) ProtoMessage() {} -func (*UInt64Value) Descriptor() ([]byte, []int) { - return fileDescriptor_wrappers_16c7c35c009f3253, []int{3} -} -func (*UInt64Value) XXX_WellKnownType() string { return "UInt64Value" } -func (m *UInt64Value) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_UInt64Value.Unmarshal(m, b) -} -func (m *UInt64Value) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_UInt64Value.Marshal(b, m, deterministic) -} -func (dst *UInt64Value) XXX_Merge(src proto.Message) { - xxx_messageInfo_UInt64Value.Merge(dst, src) -} -func (m *UInt64Value) XXX_Size() int { - return xxx_messageInfo_UInt64Value.Size(m) -} -func (m *UInt64Value) XXX_DiscardUnknown() { - xxx_messageInfo_UInt64Value.DiscardUnknown(m) -} - -var xxx_messageInfo_UInt64Value proto.InternalMessageInfo - -func (m *UInt64Value) GetValue() uint64 { - if m != nil { - return m.Value - } - return 0 -} - -// Wrapper message for `int32`. -// -// The JSON representation for `Int32Value` is JSON number. -type Int32Value struct { - // The int32 value. - Value int32 `protobuf:"varint,1,opt,name=value,proto3" json:"value,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *Int32Value) Reset() { *m = Int32Value{} } -func (m *Int32Value) String() string { return proto.CompactTextString(m) } -func (*Int32Value) ProtoMessage() {} -func (*Int32Value) Descriptor() ([]byte, []int) { - return fileDescriptor_wrappers_16c7c35c009f3253, []int{4} -} -func (*Int32Value) XXX_WellKnownType() string { return "Int32Value" } -func (m *Int32Value) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_Int32Value.Unmarshal(m, b) -} -func (m *Int32Value) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_Int32Value.Marshal(b, m, deterministic) -} -func (dst *Int32Value) XXX_Merge(src proto.Message) { - xxx_messageInfo_Int32Value.Merge(dst, src) -} -func (m *Int32Value) XXX_Size() int { - return xxx_messageInfo_Int32Value.Size(m) -} -func (m *Int32Value) XXX_DiscardUnknown() { - xxx_messageInfo_Int32Value.DiscardUnknown(m) -} - -var xxx_messageInfo_Int32Value proto.InternalMessageInfo - -func (m *Int32Value) GetValue() int32 { - if m != nil { - return m.Value - } - return 0 -} - -// Wrapper message for `uint32`. -// -// The JSON representation for `UInt32Value` is JSON number. -type UInt32Value struct { - // The uint32 value. - Value uint32 `protobuf:"varint,1,opt,name=value,proto3" json:"value,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *UInt32Value) Reset() { *m = UInt32Value{} } -func (m *UInt32Value) String() string { return proto.CompactTextString(m) } -func (*UInt32Value) ProtoMessage() {} -func (*UInt32Value) Descriptor() ([]byte, []int) { - return fileDescriptor_wrappers_16c7c35c009f3253, []int{5} -} -func (*UInt32Value) XXX_WellKnownType() string { return "UInt32Value" } -func (m *UInt32Value) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_UInt32Value.Unmarshal(m, b) -} -func (m *UInt32Value) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_UInt32Value.Marshal(b, m, deterministic) -} -func (dst *UInt32Value) XXX_Merge(src proto.Message) { - xxx_messageInfo_UInt32Value.Merge(dst, src) -} -func (m *UInt32Value) XXX_Size() int { - return xxx_messageInfo_UInt32Value.Size(m) -} -func (m *UInt32Value) XXX_DiscardUnknown() { - xxx_messageInfo_UInt32Value.DiscardUnknown(m) -} - -var xxx_messageInfo_UInt32Value proto.InternalMessageInfo - -func (m *UInt32Value) GetValue() uint32 { - if m != nil { - return m.Value - } - return 0 -} - -// Wrapper message for `bool`. -// -// The JSON representation for `BoolValue` is JSON `true` and `false`. -type BoolValue struct { - // The bool value. - Value bool `protobuf:"varint,1,opt,name=value,proto3" json:"value,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *BoolValue) Reset() { *m = BoolValue{} } -func (m *BoolValue) String() string { return proto.CompactTextString(m) } -func (*BoolValue) ProtoMessage() {} -func (*BoolValue) Descriptor() ([]byte, []int) { - return fileDescriptor_wrappers_16c7c35c009f3253, []int{6} -} -func (*BoolValue) XXX_WellKnownType() string { return "BoolValue" } -func (m *BoolValue) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_BoolValue.Unmarshal(m, b) -} -func (m *BoolValue) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_BoolValue.Marshal(b, m, deterministic) -} -func (dst *BoolValue) XXX_Merge(src proto.Message) { - xxx_messageInfo_BoolValue.Merge(dst, src) -} -func (m *BoolValue) XXX_Size() int { - return xxx_messageInfo_BoolValue.Size(m) -} -func (m *BoolValue) XXX_DiscardUnknown() { - xxx_messageInfo_BoolValue.DiscardUnknown(m) -} - -var xxx_messageInfo_BoolValue proto.InternalMessageInfo - -func (m *BoolValue) GetValue() bool { - if m != nil { - return m.Value - } - return false -} - -// Wrapper message for `string`. -// -// The JSON representation for `StringValue` is JSON string. -type StringValue struct { - // The string value. - Value string `protobuf:"bytes,1,opt,name=value,proto3" json:"value,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *StringValue) Reset() { *m = StringValue{} } -func (m *StringValue) String() string { return proto.CompactTextString(m) } -func (*StringValue) ProtoMessage() {} -func (*StringValue) Descriptor() ([]byte, []int) { - return fileDescriptor_wrappers_16c7c35c009f3253, []int{7} -} -func (*StringValue) XXX_WellKnownType() string { return "StringValue" } -func (m *StringValue) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_StringValue.Unmarshal(m, b) -} -func (m *StringValue) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_StringValue.Marshal(b, m, deterministic) -} -func (dst *StringValue) XXX_Merge(src proto.Message) { - xxx_messageInfo_StringValue.Merge(dst, src) -} -func (m *StringValue) XXX_Size() int { - return xxx_messageInfo_StringValue.Size(m) -} -func (m *StringValue) XXX_DiscardUnknown() { - xxx_messageInfo_StringValue.DiscardUnknown(m) -} - -var xxx_messageInfo_StringValue proto.InternalMessageInfo - -func (m *StringValue) GetValue() string { - if m != nil { - return m.Value - } - return "" -} - -// Wrapper message for `bytes`. -// -// The JSON representation for `BytesValue` is JSON string. -type BytesValue struct { - // The bytes value. - Value []byte `protobuf:"bytes,1,opt,name=value,proto3" json:"value,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *BytesValue) Reset() { *m = BytesValue{} } -func (m *BytesValue) String() string { return proto.CompactTextString(m) } -func (*BytesValue) ProtoMessage() {} -func (*BytesValue) Descriptor() ([]byte, []int) { - return fileDescriptor_wrappers_16c7c35c009f3253, []int{8} -} -func (*BytesValue) XXX_WellKnownType() string { return "BytesValue" } -func (m *BytesValue) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_BytesValue.Unmarshal(m, b) -} -func (m *BytesValue) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_BytesValue.Marshal(b, m, deterministic) -} -func (dst *BytesValue) XXX_Merge(src proto.Message) { - xxx_messageInfo_BytesValue.Merge(dst, src) -} -func (m *BytesValue) XXX_Size() int { - return xxx_messageInfo_BytesValue.Size(m) -} -func (m *BytesValue) XXX_DiscardUnknown() { - xxx_messageInfo_BytesValue.DiscardUnknown(m) -} - -var xxx_messageInfo_BytesValue proto.InternalMessageInfo - -func (m *BytesValue) GetValue() []byte { - if m != nil { - return m.Value - } - return nil -} - -func init() { - proto.RegisterType((*DoubleValue)(nil), "google.protobuf.DoubleValue") - proto.RegisterType((*FloatValue)(nil), "google.protobuf.FloatValue") - proto.RegisterType((*Int64Value)(nil), "google.protobuf.Int64Value") - proto.RegisterType((*UInt64Value)(nil), "google.protobuf.UInt64Value") - proto.RegisterType((*Int32Value)(nil), "google.protobuf.Int32Value") - proto.RegisterType((*UInt32Value)(nil), "google.protobuf.UInt32Value") - proto.RegisterType((*BoolValue)(nil), "google.protobuf.BoolValue") - proto.RegisterType((*StringValue)(nil), "google.protobuf.StringValue") - proto.RegisterType((*BytesValue)(nil), "google.protobuf.BytesValue") -} - -func init() { - proto.RegisterFile("google/protobuf/wrappers.proto", fileDescriptor_wrappers_16c7c35c009f3253) -} - -var fileDescriptor_wrappers_16c7c35c009f3253 = []byte{ - // 259 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0x92, 0x4b, 0xcf, 0xcf, 0x4f, - 0xcf, 0x49, 0xd5, 0x2f, 0x28, 0xca, 0x2f, 0xc9, 0x4f, 0x2a, 0x4d, 0xd3, 0x2f, 0x2f, 0x4a, 0x2c, - 0x28, 0x48, 0x2d, 0x2a, 0xd6, 0x03, 0x8b, 0x08, 0xf1, 0x43, 0xe4, 0xf5, 0x60, 0xf2, 0x4a, 0xca, - 0x5c, 0xdc, 0x2e, 0xf9, 0xa5, 0x49, 0x39, 0xa9, 0x61, 0x89, 0x39, 0xa5, 0xa9, 0x42, 0x22, 0x5c, - 0xac, 0x65, 0x20, 0x86, 0x04, 0xa3, 0x02, 0xa3, 0x06, 0x63, 0x10, 0x84, 0xa3, 0xa4, 0xc4, 0xc5, - 0xe5, 0x96, 0x93, 0x9f, 0x58, 0x82, 0x45, 0x0d, 0x13, 0x92, 0x1a, 0xcf, 0xbc, 0x12, 0x33, 0x13, - 0x2c, 0x6a, 0x98, 0x61, 0x6a, 0x94, 0xb9, 0xb8, 0x43, 0x71, 0x29, 0x62, 0x41, 0x35, 0xc8, 0xd8, - 0x08, 0x8b, 0x1a, 0x56, 0x34, 0x83, 0xb0, 0x2a, 0xe2, 0x85, 0x29, 0x52, 0xe4, 0xe2, 0x74, 0xca, - 0xcf, 0xcf, 0xc1, 0xa2, 0x84, 0x03, 0xc9, 0x9c, 0xe0, 0x92, 0xa2, 0xcc, 0xbc, 0x74, 0x2c, 0x8a, - 0x38, 0x91, 0x1c, 0xe4, 0x54, 0x59, 0x92, 0x5a, 0x8c, 0x45, 0x0d, 0x0f, 0x54, 0x8d, 0x53, 0x0d, - 0x97, 0x70, 0x72, 0x7e, 0xae, 0x1e, 0x5a, 0xe8, 0x3a, 0xf1, 0x86, 0x43, 0x83, 0x3f, 0x00, 0x24, - 0x12, 0xc0, 0x18, 0xa5, 0x95, 0x9e, 0x59, 0x92, 0x51, 0x9a, 0xa4, 0x97, 0x9c, 0x9f, 0xab, 0x9f, - 0x9e, 0x9f, 0x93, 0x98, 0x97, 0x8e, 0x88, 0xaa, 0x82, 0x92, 0xca, 0x82, 0xd4, 0x62, 0x78, 0x8c, - 0xfd, 0x60, 0x64, 0x5c, 0xc4, 0xc4, 0xec, 0x1e, 0xe0, 0xb4, 0x8a, 0x49, 0xce, 0x1d, 0x62, 0x6e, - 0x00, 0x54, 0xa9, 0x5e, 0x78, 0x6a, 0x4e, 0x8e, 0x77, 0x5e, 0x7e, 0x79, 0x5e, 0x08, 0x48, 0x4b, - 0x12, 0x1b, 0xd8, 0x0c, 0x63, 0x40, 0x00, 0x00, 0x00, 0xff, 0xff, 0x19, 0x6c, 0xb9, 0xb8, 0xfe, - 0x01, 0x00, 0x00, -} diff --git a/vendor/github.com/golang/protobuf/ptypes/wrappers/wrappers.proto b/vendor/github.com/golang/protobuf/ptypes/wrappers/wrappers.proto deleted file mode 100644 index 01947639ac..0000000000 --- a/vendor/github.com/golang/protobuf/ptypes/wrappers/wrappers.proto +++ /dev/null @@ -1,118 +0,0 @@ -// Protocol Buffers - Google's data interchange format -// Copyright 2008 Google Inc. All rights reserved. -// https://developers.google.com/protocol-buffers/ -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following disclaimer -// in the documentation and/or other materials provided with the -// distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived from -// this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -// Wrappers for primitive (non-message) types. These types are useful -// for embedding primitives in the `google.protobuf.Any` type and for places -// where we need to distinguish between the absence of a primitive -// typed field and its default value. - -syntax = "proto3"; - -package google.protobuf; - -option csharp_namespace = "Google.Protobuf.WellKnownTypes"; -option cc_enable_arenas = true; -option go_package = "github.com/golang/protobuf/ptypes/wrappers"; -option java_package = "com.google.protobuf"; -option java_outer_classname = "WrappersProto"; -option java_multiple_files = true; -option objc_class_prefix = "GPB"; - -// Wrapper message for `double`. -// -// The JSON representation for `DoubleValue` is JSON number. -message DoubleValue { - // The double value. - double value = 1; -} - -// Wrapper message for `float`. -// -// The JSON representation for `FloatValue` is JSON number. -message FloatValue { - // The float value. - float value = 1; -} - -// Wrapper message for `int64`. -// -// The JSON representation for `Int64Value` is JSON string. -message Int64Value { - // The int64 value. - int64 value = 1; -} - -// Wrapper message for `uint64`. -// -// The JSON representation for `UInt64Value` is JSON string. -message UInt64Value { - // The uint64 value. - uint64 value = 1; -} - -// Wrapper message for `int32`. -// -// The JSON representation for `Int32Value` is JSON number. -message Int32Value { - // The int32 value. - int32 value = 1; -} - -// Wrapper message for `uint32`. -// -// The JSON representation for `UInt32Value` is JSON number. -message UInt32Value { - // The uint32 value. - uint32 value = 1; -} - -// Wrapper message for `bool`. -// -// The JSON representation for `BoolValue` is JSON `true` and `false`. -message BoolValue { - // The bool value. - bool value = 1; -} - -// Wrapper message for `string`. -// -// The JSON representation for `StringValue` is JSON string. -message StringValue { - // The string value. - string value = 1; -} - -// Wrapper message for `bytes`. -// -// The JSON representation for `BytesValue` is JSON string. -message BytesValue { - // The bytes value. - bytes value = 1; -} diff --git a/vendor/github.com/google/btree/.travis.yml b/vendor/github.com/google/btree/.travis.yml deleted file mode 100644 index 4f2ee4d973..0000000000 --- a/vendor/github.com/google/btree/.travis.yml +++ /dev/null @@ -1 +0,0 @@ -language: go diff --git a/vendor/github.com/google/btree/LICENSE b/vendor/github.com/google/btree/LICENSE deleted file mode 100644 index d645695673..0000000000 --- a/vendor/github.com/google/btree/LICENSE +++ /dev/null @@ -1,202 +0,0 @@ - - Apache License - Version 2.0, January 2004 - http://www.apache.org/licenses/ - - TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - - 1. Definitions. - - "License" shall mean the terms and conditions for use, reproduction, - and distribution as defined by Sections 1 through 9 of this document. - - "Licensor" shall mean the copyright owner or entity authorized by - the copyright owner that is granting the License. - - "Legal Entity" shall mean the union of the acting entity and all - other entities that control, are controlled by, or are under common - control with that entity. For the purposes of this definition, - "control" means (i) the power, direct or indirect, to cause the - direction or management of such entity, whether by contract or - otherwise, or (ii) ownership of fifty percent (50%) or more of the - outstanding shares, or (iii) beneficial ownership of such entity. - - "You" (or "Your") shall mean an individual or Legal Entity - exercising permissions granted by this License. - - "Source" form shall mean the preferred form for making modifications, - including but not limited to software source code, documentation - source, and configuration files. - - "Object" form shall mean any form resulting from mechanical - transformation or translation of a Source form, including but - not limited to compiled object code, generated documentation, - and conversions to other media types. - - "Work" shall mean the work of authorship, whether in Source or - Object form, made available under the License, as indicated by a - copyright notice that is included in or attached to the work - (an example is provided in the Appendix below). - - "Derivative Works" shall mean any work, whether in Source or Object - form, that is based on (or derived from) the Work and for which the - editorial revisions, annotations, elaborations, or other modifications - represent, as a whole, an original work of authorship. For the purposes - of this License, Derivative Works shall not include works that remain - separable from, or merely link (or bind by name) to the interfaces of, - the Work and Derivative Works thereof. - - "Contribution" shall mean any work of authorship, including - the original version of the Work and any modifications or additions - to that Work or Derivative Works thereof, that is intentionally - submitted to Licensor for inclusion in the Work by the copyright owner - or by an individual or Legal Entity authorized to submit on behalf of - the copyright owner. For the purposes of this definition, "submitted" - means any form of electronic, verbal, or written communication sent - to the Licensor or its representatives, including but not limited to - communication on electronic mailing lists, source code control systems, - and issue tracking systems that are managed by, or on behalf of, the - Licensor for the purpose of discussing and improving the Work, but - excluding communication that is conspicuously marked or otherwise - designated in writing by the copyright owner as "Not a Contribution." - - "Contributor" shall mean Licensor and any individual or Legal Entity - on behalf of whom a Contribution has been received by Licensor and - subsequently incorporated within the Work. - - 2. Grant of Copyright License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - copyright license to reproduce, prepare Derivative Works of, - publicly display, publicly perform, sublicense, and distribute the - Work and such Derivative Works in Source or Object form. - - 3. Grant of Patent License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - (except as stated in this section) patent license to make, have made, - use, offer to sell, sell, import, and otherwise transfer the Work, - where such license applies only to those patent claims licensable - by such Contributor that are necessarily infringed by their - Contribution(s) alone or by combination of their Contribution(s) - with the Work to which such Contribution(s) was submitted. If You - institute patent litigation against any entity (including a - cross-claim or counterclaim in a lawsuit) alleging that the Work - or a Contribution incorporated within the Work constitutes direct - or contributory patent infringement, then any patent licenses - granted to You under this License for that Work shall terminate - as of the date such litigation is filed. - - 4. Redistribution. You may reproduce and distribute copies of the - Work or Derivative Works thereof in any medium, with or without - modifications, and in Source or Object form, provided that You - meet the following conditions: - - (a) You must give any other recipients of the Work or - Derivative Works a copy of this License; and - - (b) You must cause any modified files to carry prominent notices - stating that You changed the files; and - - (c) You must retain, in the Source form of any Derivative Works - that You distribute, all copyright, patent, trademark, and - attribution notices from the Source form of the Work, - excluding those notices that do not pertain to any part of - the Derivative Works; and - - (d) If the Work includes a "NOTICE" text file as part of its - distribution, then any Derivative Works that You distribute must - include a readable copy of the attribution notices contained - within such NOTICE file, excluding those notices that do not - pertain to any part of the Derivative Works, in at least one - of the following places: within a NOTICE text file distributed - as part of the Derivative Works; within the Source form or - documentation, if provided along with the Derivative Works; or, - within a display generated by the Derivative Works, if and - wherever such third-party notices normally appear. The contents - of the NOTICE file are for informational purposes only and - do not modify the License. You may add Your own attribution - notices within Derivative Works that You distribute, alongside - or as an addendum to the NOTICE text from the Work, provided - that such additional attribution notices cannot be construed - as modifying the License. - - You may add Your own copyright statement to Your modifications and - may provide additional or different license terms and conditions - for use, reproduction, or distribution of Your modifications, or - for any such Derivative Works as a whole, provided Your use, - reproduction, and distribution of the Work otherwise complies with - the conditions stated in this License. - - 5. Submission of Contributions. Unless You explicitly state otherwise, - any Contribution intentionally submitted for inclusion in the Work - by You to the Licensor shall be under the terms and conditions of - this License, without any additional terms or conditions. - Notwithstanding the above, nothing herein shall supersede or modify - the terms of any separate license agreement you may have executed - with Licensor regarding such Contributions. - - 6. Trademarks. This License does not grant permission to use the trade - names, trademarks, service marks, or product names of the Licensor, - except as required for reasonable and customary use in describing the - origin of the Work and reproducing the content of the NOTICE file. - - 7. Disclaimer of Warranty. Unless required by applicable law or - agreed to in writing, Licensor provides the Work (and each - Contributor provides its Contributions) on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or - implied, including, without limitation, any warranties or conditions - of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A - PARTICULAR PURPOSE. You are solely responsible for determining the - appropriateness of using or redistributing the Work and assume any - risks associated with Your exercise of permissions under this License. - - 8. Limitation of Liability. In no event and under no legal theory, - whether in tort (including negligence), contract, or otherwise, - unless required by applicable law (such as deliberate and grossly - negligent acts) or agreed to in writing, shall any Contributor be - liable to You for damages, including any direct, indirect, special, - incidental, or consequential damages of any character arising as a - result of this License or out of the use or inability to use the - Work (including but not limited to damages for loss of goodwill, - work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses), even if such Contributor - has been advised of the possibility of such damages. - - 9. Accepting Warranty or Additional Liability. While redistributing - the Work or Derivative Works thereof, You may choose to offer, - and charge a fee for, acceptance of support, warranty, indemnity, - or other liability obligations and/or rights consistent with this - License. However, in accepting such obligations, You may act only - on Your own behalf and on Your sole responsibility, not on behalf - of any other Contributor, and only if You agree to indemnify, - defend, and hold each Contributor harmless for any liability - incurred by, or claims asserted against, such Contributor by reason - of your accepting any such warranty or additional liability. - - END OF TERMS AND CONDITIONS - - APPENDIX: How to apply the Apache License to your work. - - To apply the Apache License to your work, attach the following - boilerplate notice, with the fields enclosed by brackets "[]" - replaced with your own identifying information. (Don't include - the brackets!) The text should be enclosed in the appropriate - comment syntax for the file format. We also recommend that a - file or class name and description of purpose be included on the - same "printed page" as the copyright notice for easier - identification within third-party archives. - - Copyright [yyyy] [name of copyright owner] - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. diff --git a/vendor/github.com/google/btree/README.md b/vendor/github.com/google/btree/README.md deleted file mode 100644 index 6062a4dacd..0000000000 --- a/vendor/github.com/google/btree/README.md +++ /dev/null @@ -1,12 +0,0 @@ -# BTree implementation for Go - -![Travis CI Build Status](https://api.travis-ci.org/google/btree.svg?branch=master) - -This package provides an in-memory B-Tree implementation for Go, useful as -an ordered, mutable data structure. - -The API is based off of the wonderful -http://godoc.org/github.com/petar/GoLLRB/llrb, and is meant to allow btree to -act as a drop-in replacement for gollrb trees. - -See http://godoc.org/github.com/google/btree for documentation. diff --git a/vendor/github.com/google/btree/btree.go b/vendor/github.com/google/btree/btree.go deleted file mode 100644 index 6ff062f9bb..0000000000 --- a/vendor/github.com/google/btree/btree.go +++ /dev/null @@ -1,890 +0,0 @@ -// Copyright 2014 Google Inc. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -// Package btree implements in-memory B-Trees of arbitrary degree. -// -// btree implements an in-memory B-Tree for use as an ordered data structure. -// It is not meant for persistent storage solutions. -// -// It has a flatter structure than an equivalent red-black or other binary tree, -// which in some cases yields better memory usage and/or performance. -// See some discussion on the matter here: -// http://google-opensource.blogspot.com/2013/01/c-containers-that-save-memory-and-time.html -// Note, though, that this project is in no way related to the C++ B-Tree -// implementation written about there. -// -// Within this tree, each node contains a slice of items and a (possibly nil) -// slice of children. For basic numeric values or raw structs, this can cause -// efficiency differences when compared to equivalent C++ template code that -// stores values in arrays within the node: -// * Due to the overhead of storing values as interfaces (each -// value needs to be stored as the value itself, then 2 words for the -// interface pointing to that value and its type), resulting in higher -// memory use. -// * Since interfaces can point to values anywhere in memory, values are -// most likely not stored in contiguous blocks, resulting in a higher -// number of cache misses. -// These issues don't tend to matter, though, when working with strings or other -// heap-allocated structures, since C++-equivalent structures also must store -// pointers and also distribute their values across the heap. -// -// This implementation is designed to be a drop-in replacement to gollrb.LLRB -// trees, (http://github.com/petar/gollrb), an excellent and probably the most -// widely used ordered tree implementation in the Go ecosystem currently. -// Its functions, therefore, exactly mirror those of -// llrb.LLRB where possible. Unlike gollrb, though, we currently don't -// support storing multiple equivalent values. -package btree - -import ( - "fmt" - "io" - "sort" - "strings" - "sync" -) - -// Item represents a single object in the tree. -type Item interface { - // Less tests whether the current item is less than the given argument. - // - // This must provide a strict weak ordering. - // If !a.Less(b) && !b.Less(a), we treat this to mean a == b (i.e. we can only - // hold one of either a or b in the tree). - Less(than Item) bool -} - -const ( - DefaultFreeListSize = 32 -) - -var ( - nilItems = make(items, 16) - nilChildren = make(children, 16) -) - -// FreeList represents a free list of btree nodes. By default each -// BTree has its own FreeList, but multiple BTrees can share the same -// FreeList. -// Two Btrees using the same freelist are safe for concurrent write access. -type FreeList struct { - mu sync.Mutex - freelist []*node -} - -// NewFreeList creates a new free list. -// size is the maximum size of the returned free list. -func NewFreeList(size int) *FreeList { - return &FreeList{freelist: make([]*node, 0, size)} -} - -func (f *FreeList) newNode() (n *node) { - f.mu.Lock() - index := len(f.freelist) - 1 - if index < 0 { - f.mu.Unlock() - return new(node) - } - n = f.freelist[index] - f.freelist[index] = nil - f.freelist = f.freelist[:index] - f.mu.Unlock() - return -} - -// freeNode adds the given node to the list, returning true if it was added -// and false if it was discarded. -func (f *FreeList) freeNode(n *node) (out bool) { - f.mu.Lock() - if len(f.freelist) < cap(f.freelist) { - f.freelist = append(f.freelist, n) - out = true - } - f.mu.Unlock() - return -} - -// ItemIterator allows callers of Ascend* to iterate in-order over portions of -// the tree. When this function returns false, iteration will stop and the -// associated Ascend* function will immediately return. -type ItemIterator func(i Item) bool - -// New creates a new B-Tree with the given degree. -// -// New(2), for example, will create a 2-3-4 tree (each node contains 1-3 items -// and 2-4 children). -func New(degree int) *BTree { - return NewWithFreeList(degree, NewFreeList(DefaultFreeListSize)) -} - -// NewWithFreeList creates a new B-Tree that uses the given node free list. -func NewWithFreeList(degree int, f *FreeList) *BTree { - if degree <= 1 { - panic("bad degree") - } - return &BTree{ - degree: degree, - cow: ©OnWriteContext{freelist: f}, - } -} - -// items stores items in a node. -type items []Item - -// insertAt inserts a value into the given index, pushing all subsequent values -// forward. -func (s *items) insertAt(index int, item Item) { - *s = append(*s, nil) - if index < len(*s) { - copy((*s)[index+1:], (*s)[index:]) - } - (*s)[index] = item -} - -// removeAt removes a value at a given index, pulling all subsequent values -// back. -func (s *items) removeAt(index int) Item { - item := (*s)[index] - copy((*s)[index:], (*s)[index+1:]) - (*s)[len(*s)-1] = nil - *s = (*s)[:len(*s)-1] - return item -} - -// pop removes and returns the last element in the list. -func (s *items) pop() (out Item) { - index := len(*s) - 1 - out = (*s)[index] - (*s)[index] = nil - *s = (*s)[:index] - return -} - -// truncate truncates this instance at index so that it contains only the -// first index items. index must be less than or equal to length. -func (s *items) truncate(index int) { - var toClear items - *s, toClear = (*s)[:index], (*s)[index:] - for len(toClear) > 0 { - toClear = toClear[copy(toClear, nilItems):] - } -} - -// find returns the index where the given item should be inserted into this -// list. 'found' is true if the item already exists in the list at the given -// index. -func (s items) find(item Item) (index int, found bool) { - i := sort.Search(len(s), func(i int) bool { - return item.Less(s[i]) - }) - if i > 0 && !s[i-1].Less(item) { - return i - 1, true - } - return i, false -} - -// children stores child nodes in a node. -type children []*node - -// insertAt inserts a value into the given index, pushing all subsequent values -// forward. -func (s *children) insertAt(index int, n *node) { - *s = append(*s, nil) - if index < len(*s) { - copy((*s)[index+1:], (*s)[index:]) - } - (*s)[index] = n -} - -// removeAt removes a value at a given index, pulling all subsequent values -// back. -func (s *children) removeAt(index int) *node { - n := (*s)[index] - copy((*s)[index:], (*s)[index+1:]) - (*s)[len(*s)-1] = nil - *s = (*s)[:len(*s)-1] - return n -} - -// pop removes and returns the last element in the list. -func (s *children) pop() (out *node) { - index := len(*s) - 1 - out = (*s)[index] - (*s)[index] = nil - *s = (*s)[:index] - return -} - -// truncate truncates this instance at index so that it contains only the -// first index children. index must be less than or equal to length. -func (s *children) truncate(index int) { - var toClear children - *s, toClear = (*s)[:index], (*s)[index:] - for len(toClear) > 0 { - toClear = toClear[copy(toClear, nilChildren):] - } -} - -// node is an internal node in a tree. -// -// It must at all times maintain the invariant that either -// * len(children) == 0, len(items) unconstrained -// * len(children) == len(items) + 1 -type node struct { - items items - children children - cow *copyOnWriteContext -} - -func (n *node) mutableFor(cow *copyOnWriteContext) *node { - if n.cow == cow { - return n - } - out := cow.newNode() - if cap(out.items) >= len(n.items) { - out.items = out.items[:len(n.items)] - } else { - out.items = make(items, len(n.items), cap(n.items)) - } - copy(out.items, n.items) - // Copy children - if cap(out.children) >= len(n.children) { - out.children = out.children[:len(n.children)] - } else { - out.children = make(children, len(n.children), cap(n.children)) - } - copy(out.children, n.children) - return out -} - -func (n *node) mutableChild(i int) *node { - c := n.children[i].mutableFor(n.cow) - n.children[i] = c - return c -} - -// split splits the given node at the given index. The current node shrinks, -// and this function returns the item that existed at that index and a new node -// containing all items/children after it. -func (n *node) split(i int) (Item, *node) { - item := n.items[i] - next := n.cow.newNode() - next.items = append(next.items, n.items[i+1:]...) - n.items.truncate(i) - if len(n.children) > 0 { - next.children = append(next.children, n.children[i+1:]...) - n.children.truncate(i + 1) - } - return item, next -} - -// maybeSplitChild checks if a child should be split, and if so splits it. -// Returns whether or not a split occurred. -func (n *node) maybeSplitChild(i, maxItems int) bool { - if len(n.children[i].items) < maxItems { - return false - } - first := n.mutableChild(i) - item, second := first.split(maxItems / 2) - n.items.insertAt(i, item) - n.children.insertAt(i+1, second) - return true -} - -// insert inserts an item into the subtree rooted at this node, making sure -// no nodes in the subtree exceed maxItems items. Should an equivalent item be -// be found/replaced by insert, it will be returned. -func (n *node) insert(item Item, maxItems int) Item { - i, found := n.items.find(item) - if found { - out := n.items[i] - n.items[i] = item - return out - } - if len(n.children) == 0 { - n.items.insertAt(i, item) - return nil - } - if n.maybeSplitChild(i, maxItems) { - inTree := n.items[i] - switch { - case item.Less(inTree): - // no change, we want first split node - case inTree.Less(item): - i++ // we want second split node - default: - out := n.items[i] - n.items[i] = item - return out - } - } - return n.mutableChild(i).insert(item, maxItems) -} - -// get finds the given key in the subtree and returns it. -func (n *node) get(key Item) Item { - i, found := n.items.find(key) - if found { - return n.items[i] - } else if len(n.children) > 0 { - return n.children[i].get(key) - } - return nil -} - -// min returns the first item in the subtree. -func min(n *node) Item { - if n == nil { - return nil - } - for len(n.children) > 0 { - n = n.children[0] - } - if len(n.items) == 0 { - return nil - } - return n.items[0] -} - -// max returns the last item in the subtree. -func max(n *node) Item { - if n == nil { - return nil - } - for len(n.children) > 0 { - n = n.children[len(n.children)-1] - } - if len(n.items) == 0 { - return nil - } - return n.items[len(n.items)-1] -} - -// toRemove details what item to remove in a node.remove call. -type toRemove int - -const ( - removeItem toRemove = iota // removes the given item - removeMin // removes smallest item in the subtree - removeMax // removes largest item in the subtree -) - -// remove removes an item from the subtree rooted at this node. -func (n *node) remove(item Item, minItems int, typ toRemove) Item { - var i int - var found bool - switch typ { - case removeMax: - if len(n.children) == 0 { - return n.items.pop() - } - i = len(n.items) - case removeMin: - if len(n.children) == 0 { - return n.items.removeAt(0) - } - i = 0 - case removeItem: - i, found = n.items.find(item) - if len(n.children) == 0 { - if found { - return n.items.removeAt(i) - } - return nil - } - default: - panic("invalid type") - } - // If we get to here, we have children. - if len(n.children[i].items) <= minItems { - return n.growChildAndRemove(i, item, minItems, typ) - } - child := n.mutableChild(i) - // Either we had enough items to begin with, or we've done some - // merging/stealing, because we've got enough now and we're ready to return - // stuff. - if found { - // The item exists at index 'i', and the child we've selected can give us a - // predecessor, since if we've gotten here it's got > minItems items in it. - out := n.items[i] - // We use our special-case 'remove' call with typ=maxItem to pull the - // predecessor of item i (the rightmost leaf of our immediate left child) - // and set it into where we pulled the item from. - n.items[i] = child.remove(nil, minItems, removeMax) - return out - } - // Final recursive call. Once we're here, we know that the item isn't in this - // node and that the child is big enough to remove from. - return child.remove(item, minItems, typ) -} - -// growChildAndRemove grows child 'i' to make sure it's possible to remove an -// item from it while keeping it at minItems, then calls remove to actually -// remove it. -// -// Most documentation says we have to do two sets of special casing: -// 1) item is in this node -// 2) item is in child -// In both cases, we need to handle the two subcases: -// A) node has enough values that it can spare one -// B) node doesn't have enough values -// For the latter, we have to check: -// a) left sibling has node to spare -// b) right sibling has node to spare -// c) we must merge -// To simplify our code here, we handle cases #1 and #2 the same: -// If a node doesn't have enough items, we make sure it does (using a,b,c). -// We then simply redo our remove call, and the second time (regardless of -// whether we're in case 1 or 2), we'll have enough items and can guarantee -// that we hit case A. -func (n *node) growChildAndRemove(i int, item Item, minItems int, typ toRemove) Item { - if i > 0 && len(n.children[i-1].items) > minItems { - // Steal from left child - child := n.mutableChild(i) - stealFrom := n.mutableChild(i - 1) - stolenItem := stealFrom.items.pop() - child.items.insertAt(0, n.items[i-1]) - n.items[i-1] = stolenItem - if len(stealFrom.children) > 0 { - child.children.insertAt(0, stealFrom.children.pop()) - } - } else if i < len(n.items) && len(n.children[i+1].items) > minItems { - // steal from right child - child := n.mutableChild(i) - stealFrom := n.mutableChild(i + 1) - stolenItem := stealFrom.items.removeAt(0) - child.items = append(child.items, n.items[i]) - n.items[i] = stolenItem - if len(stealFrom.children) > 0 { - child.children = append(child.children, stealFrom.children.removeAt(0)) - } - } else { - if i >= len(n.items) { - i-- - } - child := n.mutableChild(i) - // merge with right child - mergeItem := n.items.removeAt(i) - mergeChild := n.children.removeAt(i + 1) - child.items = append(child.items, mergeItem) - child.items = append(child.items, mergeChild.items...) - child.children = append(child.children, mergeChild.children...) - n.cow.freeNode(mergeChild) - } - return n.remove(item, minItems, typ) -} - -type direction int - -const ( - descend = direction(-1) - ascend = direction(+1) -) - -// iterate provides a simple method for iterating over elements in the tree. -// -// When ascending, the 'start' should be less than 'stop' and when descending, -// the 'start' should be greater than 'stop'. Setting 'includeStart' to true -// will force the iterator to include the first item when it equals 'start', -// thus creating a "greaterOrEqual" or "lessThanEqual" rather than just a -// "greaterThan" or "lessThan" queries. -func (n *node) iterate(dir direction, start, stop Item, includeStart bool, hit bool, iter ItemIterator) (bool, bool) { - var ok, found bool - var index int - switch dir { - case ascend: - if start != nil { - index, _ = n.items.find(start) - } - for i := index; i < len(n.items); i++ { - if len(n.children) > 0 { - if hit, ok = n.children[i].iterate(dir, start, stop, includeStart, hit, iter); !ok { - return hit, false - } - } - if !includeStart && !hit && start != nil && !start.Less(n.items[i]) { - hit = true - continue - } - hit = true - if stop != nil && !n.items[i].Less(stop) { - return hit, false - } - if !iter(n.items[i]) { - return hit, false - } - } - if len(n.children) > 0 { - if hit, ok = n.children[len(n.children)-1].iterate(dir, start, stop, includeStart, hit, iter); !ok { - return hit, false - } - } - case descend: - if start != nil { - index, found = n.items.find(start) - if !found { - index = index - 1 - } - } else { - index = len(n.items) - 1 - } - for i := index; i >= 0; i-- { - if start != nil && !n.items[i].Less(start) { - if !includeStart || hit || start.Less(n.items[i]) { - continue - } - } - if len(n.children) > 0 { - if hit, ok = n.children[i+1].iterate(dir, start, stop, includeStart, hit, iter); !ok { - return hit, false - } - } - if stop != nil && !stop.Less(n.items[i]) { - return hit, false // continue - } - hit = true - if !iter(n.items[i]) { - return hit, false - } - } - if len(n.children) > 0 { - if hit, ok = n.children[0].iterate(dir, start, stop, includeStart, hit, iter); !ok { - return hit, false - } - } - } - return hit, true -} - -// Used for testing/debugging purposes. -func (n *node) print(w io.Writer, level int) { - fmt.Fprintf(w, "%sNODE:%v\n", strings.Repeat(" ", level), n.items) - for _, c := range n.children { - c.print(w, level+1) - } -} - -// BTree is an implementation of a B-Tree. -// -// BTree stores Item instances in an ordered structure, allowing easy insertion, -// removal, and iteration. -// -// Write operations are not safe for concurrent mutation by multiple -// goroutines, but Read operations are. -type BTree struct { - degree int - length int - root *node - cow *copyOnWriteContext -} - -// copyOnWriteContext pointers determine node ownership... a tree with a write -// context equivalent to a node's write context is allowed to modify that node. -// A tree whose write context does not match a node's is not allowed to modify -// it, and must create a new, writable copy (IE: it's a Clone). -// -// When doing any write operation, we maintain the invariant that the current -// node's context is equal to the context of the tree that requested the write. -// We do this by, before we descend into any node, creating a copy with the -// correct context if the contexts don't match. -// -// Since the node we're currently visiting on any write has the requesting -// tree's context, that node is modifiable in place. Children of that node may -// not share context, but before we descend into them, we'll make a mutable -// copy. -type copyOnWriteContext struct { - freelist *FreeList -} - -// Clone clones the btree, lazily. Clone should not be called concurrently, -// but the original tree (t) and the new tree (t2) can be used concurrently -// once the Clone call completes. -// -// The internal tree structure of b is marked read-only and shared between t and -// t2. Writes to both t and t2 use copy-on-write logic, creating new nodes -// whenever one of b's original nodes would have been modified. Read operations -// should have no performance degredation. Write operations for both t and t2 -// will initially experience minor slow-downs caused by additional allocs and -// copies due to the aforementioned copy-on-write logic, but should converge to -// the original performance characteristics of the original tree. -func (t *BTree) Clone() (t2 *BTree) { - // Create two entirely new copy-on-write contexts. - // This operation effectively creates three trees: - // the original, shared nodes (old b.cow) - // the new b.cow nodes - // the new out.cow nodes - cow1, cow2 := *t.cow, *t.cow - out := *t - t.cow = &cow1 - out.cow = &cow2 - return &out -} - -// maxItems returns the max number of items to allow per node. -func (t *BTree) maxItems() int { - return t.degree*2 - 1 -} - -// minItems returns the min number of items to allow per node (ignored for the -// root node). -func (t *BTree) minItems() int { - return t.degree - 1 -} - -func (c *copyOnWriteContext) newNode() (n *node) { - n = c.freelist.newNode() - n.cow = c - return -} - -type freeType int - -const ( - ftFreelistFull freeType = iota // node was freed (available for GC, not stored in freelist) - ftStored // node was stored in the freelist for later use - ftNotOwned // node was ignored by COW, since it's owned by another one -) - -// freeNode frees a node within a given COW context, if it's owned by that -// context. It returns what happened to the node (see freeType const -// documentation). -func (c *copyOnWriteContext) freeNode(n *node) freeType { - if n.cow == c { - // clear to allow GC - n.items.truncate(0) - n.children.truncate(0) - n.cow = nil - if c.freelist.freeNode(n) { - return ftStored - } else { - return ftFreelistFull - } - } else { - return ftNotOwned - } -} - -// ReplaceOrInsert adds the given item to the tree. If an item in the tree -// already equals the given one, it is removed from the tree and returned. -// Otherwise, nil is returned. -// -// nil cannot be added to the tree (will panic). -func (t *BTree) ReplaceOrInsert(item Item) Item { - if item == nil { - panic("nil item being added to BTree") - } - if t.root == nil { - t.root = t.cow.newNode() - t.root.items = append(t.root.items, item) - t.length++ - return nil - } else { - t.root = t.root.mutableFor(t.cow) - if len(t.root.items) >= t.maxItems() { - item2, second := t.root.split(t.maxItems() / 2) - oldroot := t.root - t.root = t.cow.newNode() - t.root.items = append(t.root.items, item2) - t.root.children = append(t.root.children, oldroot, second) - } - } - out := t.root.insert(item, t.maxItems()) - if out == nil { - t.length++ - } - return out -} - -// Delete removes an item equal to the passed in item from the tree, returning -// it. If no such item exists, returns nil. -func (t *BTree) Delete(item Item) Item { - return t.deleteItem(item, removeItem) -} - -// DeleteMin removes the smallest item in the tree and returns it. -// If no such item exists, returns nil. -func (t *BTree) DeleteMin() Item { - return t.deleteItem(nil, removeMin) -} - -// DeleteMax removes the largest item in the tree and returns it. -// If no such item exists, returns nil. -func (t *BTree) DeleteMax() Item { - return t.deleteItem(nil, removeMax) -} - -func (t *BTree) deleteItem(item Item, typ toRemove) Item { - if t.root == nil || len(t.root.items) == 0 { - return nil - } - t.root = t.root.mutableFor(t.cow) - out := t.root.remove(item, t.minItems(), typ) - if len(t.root.items) == 0 && len(t.root.children) > 0 { - oldroot := t.root - t.root = t.root.children[0] - t.cow.freeNode(oldroot) - } - if out != nil { - t.length-- - } - return out -} - -// AscendRange calls the iterator for every value in the tree within the range -// [greaterOrEqual, lessThan), until iterator returns false. -func (t *BTree) AscendRange(greaterOrEqual, lessThan Item, iterator ItemIterator) { - if t.root == nil { - return - } - t.root.iterate(ascend, greaterOrEqual, lessThan, true, false, iterator) -} - -// AscendLessThan calls the iterator for every value in the tree within the range -// [first, pivot), until iterator returns false. -func (t *BTree) AscendLessThan(pivot Item, iterator ItemIterator) { - if t.root == nil { - return - } - t.root.iterate(ascend, nil, pivot, false, false, iterator) -} - -// AscendGreaterOrEqual calls the iterator for every value in the tree within -// the range [pivot, last], until iterator returns false. -func (t *BTree) AscendGreaterOrEqual(pivot Item, iterator ItemIterator) { - if t.root == nil { - return - } - t.root.iterate(ascend, pivot, nil, true, false, iterator) -} - -// Ascend calls the iterator for every value in the tree within the range -// [first, last], until iterator returns false. -func (t *BTree) Ascend(iterator ItemIterator) { - if t.root == nil { - return - } - t.root.iterate(ascend, nil, nil, false, false, iterator) -} - -// DescendRange calls the iterator for every value in the tree within the range -// [lessOrEqual, greaterThan), until iterator returns false. -func (t *BTree) DescendRange(lessOrEqual, greaterThan Item, iterator ItemIterator) { - if t.root == nil { - return - } - t.root.iterate(descend, lessOrEqual, greaterThan, true, false, iterator) -} - -// DescendLessOrEqual calls the iterator for every value in the tree within the range -// [pivot, first], until iterator returns false. -func (t *BTree) DescendLessOrEqual(pivot Item, iterator ItemIterator) { - if t.root == nil { - return - } - t.root.iterate(descend, pivot, nil, true, false, iterator) -} - -// DescendGreaterThan calls the iterator for every value in the tree within -// the range (pivot, last], until iterator returns false. -func (t *BTree) DescendGreaterThan(pivot Item, iterator ItemIterator) { - if t.root == nil { - return - } - t.root.iterate(descend, nil, pivot, false, false, iterator) -} - -// Descend calls the iterator for every value in the tree within the range -// [last, first], until iterator returns false. -func (t *BTree) Descend(iterator ItemIterator) { - if t.root == nil { - return - } - t.root.iterate(descend, nil, nil, false, false, iterator) -} - -// Get looks for the key item in the tree, returning it. It returns nil if -// unable to find that item. -func (t *BTree) Get(key Item) Item { - if t.root == nil { - return nil - } - return t.root.get(key) -} - -// Min returns the smallest item in the tree, or nil if the tree is empty. -func (t *BTree) Min() Item { - return min(t.root) -} - -// Max returns the largest item in the tree, or nil if the tree is empty. -func (t *BTree) Max() Item { - return max(t.root) -} - -// Has returns true if the given key is in the tree. -func (t *BTree) Has(key Item) bool { - return t.Get(key) != nil -} - -// Len returns the number of items currently in the tree. -func (t *BTree) Len() int { - return t.length -} - -// Clear removes all items from the btree. If addNodesToFreelist is true, -// t's nodes are added to its freelist as part of this call, until the freelist -// is full. Otherwise, the root node is simply dereferenced and the subtree -// left to Go's normal GC processes. -// -// This can be much faster -// than calling Delete on all elements, because that requires finding/removing -// each element in the tree and updating the tree accordingly. It also is -// somewhat faster than creating a new tree to replace the old one, because -// nodes from the old tree are reclaimed into the freelist for use by the new -// one, instead of being lost to the garbage collector. -// -// This call takes: -// O(1): when addNodesToFreelist is false, this is a single operation. -// O(1): when the freelist is already full, it breaks out immediately -// O(freelist size): when the freelist is empty and the nodes are all owned -// by this tree, nodes are added to the freelist until full. -// O(tree size): when all nodes are owned by another tree, all nodes are -// iterated over looking for nodes to add to the freelist, and due to -// ownership, none are. -func (t *BTree) Clear(addNodesToFreelist bool) { - if t.root != nil && addNodesToFreelist { - t.root.reset(t.cow) - } - t.root, t.length = nil, 0 -} - -// reset returns a subtree to the freelist. It breaks out immediately if the -// freelist is full, since the only benefit of iterating is to fill that -// freelist up. Returns true if parent reset call should continue. -func (n *node) reset(c *copyOnWriteContext) bool { - for _, child := range n.children { - if !child.reset(c) { - return false - } - } - return c.freeNode(n) != ftFreelistFull -} - -// Int implements the Item interface for integers. -type Int int - -// Less returns true if int(a) < int(b). -func (a Int) Less(b Item) bool { - return a < b.(Int) -} diff --git a/vendor/github.com/google/btree/btree_mem.go b/vendor/github.com/google/btree/btree_mem.go deleted file mode 100644 index cb95b7fa1b..0000000000 --- a/vendor/github.com/google/btree/btree_mem.go +++ /dev/null @@ -1,76 +0,0 @@ -// Copyright 2014 Google Inc. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -// +build ignore - -// This binary compares memory usage between btree and gollrb. -package main - -import ( - "flag" - "fmt" - "math/rand" - "runtime" - "time" - - "github.com/google/btree" - "github.com/petar/GoLLRB/llrb" -) - -var ( - size = flag.Int("size", 1000000, "size of the tree to build") - degree = flag.Int("degree", 8, "degree of btree") - gollrb = flag.Bool("llrb", false, "use llrb instead of btree") -) - -func main() { - flag.Parse() - vals := rand.Perm(*size) - var t, v interface{} - v = vals - var stats runtime.MemStats - for i := 0; i < 10; i++ { - runtime.GC() - } - fmt.Println("-------- BEFORE ----------") - runtime.ReadMemStats(&stats) - fmt.Printf("%+v\n", stats) - start := time.Now() - if *gollrb { - tr := llrb.New() - for _, v := range vals { - tr.ReplaceOrInsert(llrb.Int(v)) - } - t = tr // keep it around - } else { - tr := btree.New(*degree) - for _, v := range vals { - tr.ReplaceOrInsert(btree.Int(v)) - } - t = tr // keep it around - } - fmt.Printf("%v inserts in %v\n", *size, time.Since(start)) - fmt.Println("-------- AFTER ----------") - runtime.ReadMemStats(&stats) - fmt.Printf("%+v\n", stats) - for i := 0; i < 10; i++ { - runtime.GC() - } - fmt.Println("-------- AFTER GC ----------") - runtime.ReadMemStats(&stats) - fmt.Printf("%+v\n", stats) - if t == v { - fmt.Println("to make sure vals and tree aren't GC'd") - } -} diff --git a/vendor/github.com/google/gofuzz/go.mod b/vendor/github.com/google/gofuzz/go.mod deleted file mode 100644 index 8ec4fe9e97..0000000000 --- a/vendor/github.com/google/gofuzz/go.mod +++ /dev/null @@ -1,3 +0,0 @@ -module github.com/google/gofuzz - -go 1.12 diff --git a/vendor/github.com/google/uuid/.travis.yml b/vendor/github.com/google/uuid/.travis.yml deleted file mode 100644 index d8156a60ba..0000000000 --- a/vendor/github.com/google/uuid/.travis.yml +++ /dev/null @@ -1,9 +0,0 @@ -language: go - -go: - - 1.4.3 - - 1.5.3 - - tip - -script: - - go test -v ./... diff --git a/vendor/github.com/google/uuid/CONTRIBUTING.md b/vendor/github.com/google/uuid/CONTRIBUTING.md deleted file mode 100644 index 04fdf09f13..0000000000 --- a/vendor/github.com/google/uuid/CONTRIBUTING.md +++ /dev/null @@ -1,10 +0,0 @@ -# How to contribute - -We definitely welcome patches and contribution to this project! - -### Legal requirements - -In order to protect both you and ourselves, you will need to sign the -[Contributor License Agreement](https://cla.developers.google.com/clas). - -You may have already signed it for other Google projects. diff --git a/vendor/github.com/google/uuid/CONTRIBUTORS b/vendor/github.com/google/uuid/CONTRIBUTORS deleted file mode 100644 index b4bb97f6bc..0000000000 --- a/vendor/github.com/google/uuid/CONTRIBUTORS +++ /dev/null @@ -1,9 +0,0 @@ -Paul Borman -bmatsuo -shawnps -theory -jboverfelt -dsymonds -cd1 -wallclockbuilder -dansouza diff --git a/vendor/github.com/google/uuid/LICENSE b/vendor/github.com/google/uuid/LICENSE deleted file mode 100644 index 5dc68268d9..0000000000 --- a/vendor/github.com/google/uuid/LICENSE +++ /dev/null @@ -1,27 +0,0 @@ -Copyright (c) 2009,2014 Google Inc. All rights reserved. - -Redistribution and use in source and binary forms, with or without -modification, are permitted provided that the following conditions are -met: - - * Redistributions of source code must retain the above copyright -notice, this list of conditions and the following disclaimer. - * Redistributions in binary form must reproduce the above -copyright notice, this list of conditions and the following disclaimer -in the documentation and/or other materials provided with the -distribution. - * Neither the name of Google Inc. nor the names of its -contributors may be used to endorse or promote products derived from -this software without specific prior written permission. - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/vendor/github.com/google/uuid/README.md b/vendor/github.com/google/uuid/README.md deleted file mode 100644 index 9d92c11f16..0000000000 --- a/vendor/github.com/google/uuid/README.md +++ /dev/null @@ -1,19 +0,0 @@ -# uuid ![build status](https://travis-ci.org/google/uuid.svg?branch=master) -The uuid package generates and inspects UUIDs based on -[RFC 4122](http://tools.ietf.org/html/rfc4122) -and DCE 1.1: Authentication and Security Services. - -This package is based on the github.com/pborman/uuid package (previously named -code.google.com/p/go-uuid). It differs from these earlier packages in that -a UUID is a 16 byte array rather than a byte slice. One loss due to this -change is the ability to represent an invalid UUID (vs a NIL UUID). - -###### Install -`go get github.com/google/uuid` - -###### Documentation -[![GoDoc](https://godoc.org/github.com/google/uuid?status.svg)](http://godoc.org/github.com/google/uuid) - -Full `go doc` style documentation for the package can be viewed online without -installing this package by using the GoDoc site here: -http://godoc.org/github.com/google/uuid diff --git a/vendor/github.com/google/uuid/dce.go b/vendor/github.com/google/uuid/dce.go deleted file mode 100644 index fa820b9d30..0000000000 --- a/vendor/github.com/google/uuid/dce.go +++ /dev/null @@ -1,80 +0,0 @@ -// Copyright 2016 Google Inc. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package uuid - -import ( - "encoding/binary" - "fmt" - "os" -) - -// A Domain represents a Version 2 domain -type Domain byte - -// Domain constants for DCE Security (Version 2) UUIDs. -const ( - Person = Domain(0) - Group = Domain(1) - Org = Domain(2) -) - -// NewDCESecurity returns a DCE Security (Version 2) UUID. -// -// The domain should be one of Person, Group or Org. -// On a POSIX system the id should be the users UID for the Person -// domain and the users GID for the Group. The meaning of id for -// the domain Org or on non-POSIX systems is site defined. -// -// For a given domain/id pair the same token may be returned for up to -// 7 minutes and 10 seconds. -func NewDCESecurity(domain Domain, id uint32) (UUID, error) { - uuid, err := NewUUID() - if err == nil { - uuid[6] = (uuid[6] & 0x0f) | 0x20 // Version 2 - uuid[9] = byte(domain) - binary.BigEndian.PutUint32(uuid[0:], id) - } - return uuid, err -} - -// NewDCEPerson returns a DCE Security (Version 2) UUID in the person -// domain with the id returned by os.Getuid. -// -// NewDCESecurity(Person, uint32(os.Getuid())) -func NewDCEPerson() (UUID, error) { - return NewDCESecurity(Person, uint32(os.Getuid())) -} - -// NewDCEGroup returns a DCE Security (Version 2) UUID in the group -// domain with the id returned by os.Getgid. -// -// NewDCESecurity(Group, uint32(os.Getgid())) -func NewDCEGroup() (UUID, error) { - return NewDCESecurity(Group, uint32(os.Getgid())) -} - -// Domain returns the domain for a Version 2 UUID. Domains are only defined -// for Version 2 UUIDs. -func (uuid UUID) Domain() Domain { - return Domain(uuid[9]) -} - -// ID returns the id for a Version 2 UUID. IDs are only defined for Version 2 -// UUIDs. -func (uuid UUID) ID() uint32 { - return binary.BigEndian.Uint32(uuid[0:4]) -} - -func (d Domain) String() string { - switch d { - case Person: - return "Person" - case Group: - return "Group" - case Org: - return "Org" - } - return fmt.Sprintf("Domain%d", int(d)) -} diff --git a/vendor/github.com/google/uuid/doc.go b/vendor/github.com/google/uuid/doc.go deleted file mode 100644 index 5b8a4b9af8..0000000000 --- a/vendor/github.com/google/uuid/doc.go +++ /dev/null @@ -1,12 +0,0 @@ -// Copyright 2016 Google Inc. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// Package uuid generates and inspects UUIDs. -// -// UUIDs are based on RFC 4122 and DCE 1.1: Authentication and Security -// Services. -// -// A UUID is a 16 byte (128 bit) array. UUIDs may be used as keys to -// maps or compared directly. -package uuid diff --git a/vendor/github.com/google/uuid/go.mod b/vendor/github.com/google/uuid/go.mod deleted file mode 100644 index fc84cd79d4..0000000000 --- a/vendor/github.com/google/uuid/go.mod +++ /dev/null @@ -1 +0,0 @@ -module github.com/google/uuid diff --git a/vendor/github.com/google/uuid/hash.go b/vendor/github.com/google/uuid/hash.go deleted file mode 100644 index b174616315..0000000000 --- a/vendor/github.com/google/uuid/hash.go +++ /dev/null @@ -1,53 +0,0 @@ -// Copyright 2016 Google Inc. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package uuid - -import ( - "crypto/md5" - "crypto/sha1" - "hash" -) - -// Well known namespace IDs and UUIDs -var ( - NameSpaceDNS = Must(Parse("6ba7b810-9dad-11d1-80b4-00c04fd430c8")) - NameSpaceURL = Must(Parse("6ba7b811-9dad-11d1-80b4-00c04fd430c8")) - NameSpaceOID = Must(Parse("6ba7b812-9dad-11d1-80b4-00c04fd430c8")) - NameSpaceX500 = Must(Parse("6ba7b814-9dad-11d1-80b4-00c04fd430c8")) - Nil UUID // empty UUID, all zeros -) - -// NewHash returns a new UUID derived from the hash of space concatenated with -// data generated by h. The hash should be at least 16 byte in length. The -// first 16 bytes of the hash are used to form the UUID. The version of the -// UUID will be the lower 4 bits of version. NewHash is used to implement -// NewMD5 and NewSHA1. -func NewHash(h hash.Hash, space UUID, data []byte, version int) UUID { - h.Reset() - h.Write(space[:]) - h.Write(data) - s := h.Sum(nil) - var uuid UUID - copy(uuid[:], s) - uuid[6] = (uuid[6] & 0x0f) | uint8((version&0xf)<<4) - uuid[8] = (uuid[8] & 0x3f) | 0x80 // RFC 4122 variant - return uuid -} - -// NewMD5 returns a new MD5 (Version 3) UUID based on the -// supplied name space and data. It is the same as calling: -// -// NewHash(md5.New(), space, data, 3) -func NewMD5(space UUID, data []byte) UUID { - return NewHash(md5.New(), space, data, 3) -} - -// NewSHA1 returns a new SHA1 (Version 5) UUID based on the -// supplied name space and data. It is the same as calling: -// -// NewHash(sha1.New(), space, data, 5) -func NewSHA1(space UUID, data []byte) UUID { - return NewHash(sha1.New(), space, data, 5) -} diff --git a/vendor/github.com/google/uuid/marshal.go b/vendor/github.com/google/uuid/marshal.go deleted file mode 100644 index 7f9e0c6c0e..0000000000 --- a/vendor/github.com/google/uuid/marshal.go +++ /dev/null @@ -1,37 +0,0 @@ -// Copyright 2016 Google Inc. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package uuid - -import "fmt" - -// MarshalText implements encoding.TextMarshaler. -func (uuid UUID) MarshalText() ([]byte, error) { - var js [36]byte - encodeHex(js[:], uuid) - return js[:], nil -} - -// UnmarshalText implements encoding.TextUnmarshaler. -func (uuid *UUID) UnmarshalText(data []byte) error { - id, err := ParseBytes(data) - if err == nil { - *uuid = id - } - return err -} - -// MarshalBinary implements encoding.BinaryMarshaler. -func (uuid UUID) MarshalBinary() ([]byte, error) { - return uuid[:], nil -} - -// UnmarshalBinary implements encoding.BinaryUnmarshaler. -func (uuid *UUID) UnmarshalBinary(data []byte) error { - if len(data) != 16 { - return fmt.Errorf("invalid UUID (got %d bytes)", len(data)) - } - copy(uuid[:], data) - return nil -} diff --git a/vendor/github.com/google/uuid/node.go b/vendor/github.com/google/uuid/node.go deleted file mode 100644 index d651a2b061..0000000000 --- a/vendor/github.com/google/uuid/node.go +++ /dev/null @@ -1,90 +0,0 @@ -// Copyright 2016 Google Inc. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package uuid - -import ( - "sync" -) - -var ( - nodeMu sync.Mutex - ifname string // name of interface being used - nodeID [6]byte // hardware for version 1 UUIDs - zeroID [6]byte // nodeID with only 0's -) - -// NodeInterface returns the name of the interface from which the NodeID was -// derived. The interface "user" is returned if the NodeID was set by -// SetNodeID. -func NodeInterface() string { - defer nodeMu.Unlock() - nodeMu.Lock() - return ifname -} - -// SetNodeInterface selects the hardware address to be used for Version 1 UUIDs. -// If name is "" then the first usable interface found will be used or a random -// Node ID will be generated. If a named interface cannot be found then false -// is returned. -// -// SetNodeInterface never fails when name is "". -func SetNodeInterface(name string) bool { - defer nodeMu.Unlock() - nodeMu.Lock() - return setNodeInterface(name) -} - -func setNodeInterface(name string) bool { - iname, addr := getHardwareInterface(name) // null implementation for js - if iname != "" && addr != nil { - ifname = iname - copy(nodeID[:], addr) - return true - } - - // We found no interfaces with a valid hardware address. If name - // does not specify a specific interface generate a random Node ID - // (section 4.1.6) - if name == "" { - ifname = "random" - randomBits(nodeID[:]) - return true - } - return false -} - -// NodeID returns a slice of a copy of the current Node ID, setting the Node ID -// if not already set. -func NodeID() []byte { - defer nodeMu.Unlock() - nodeMu.Lock() - if nodeID == zeroID { - setNodeInterface("") - } - nid := nodeID - return nid[:] -} - -// SetNodeID sets the Node ID to be used for Version 1 UUIDs. The first 6 bytes -// of id are used. If id is less than 6 bytes then false is returned and the -// Node ID is not set. -func SetNodeID(id []byte) bool { - if len(id) < 6 { - return false - } - defer nodeMu.Unlock() - nodeMu.Lock() - copy(nodeID[:], id) - ifname = "user" - return true -} - -// NodeID returns the 6 byte node id encoded in uuid. It returns nil if uuid is -// not valid. The NodeID is only well defined for version 1 and 2 UUIDs. -func (uuid UUID) NodeID() []byte { - var node [6]byte - copy(node[:], uuid[10:]) - return node[:] -} diff --git a/vendor/github.com/google/uuid/node_js.go b/vendor/github.com/google/uuid/node_js.go deleted file mode 100644 index 24b78edc90..0000000000 --- a/vendor/github.com/google/uuid/node_js.go +++ /dev/null @@ -1,12 +0,0 @@ -// Copyright 2017 Google Inc. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build js - -package uuid - -// getHardwareInterface returns nil values for the JS version of the code. -// This remvoves the "net" dependency, because it is not used in the browser. -// Using the "net" library inflates the size of the transpiled JS code by 673k bytes. -func getHardwareInterface(name string) (string, []byte) { return "", nil } diff --git a/vendor/github.com/google/uuid/node_net.go b/vendor/github.com/google/uuid/node_net.go deleted file mode 100644 index 0cbbcddbd6..0000000000 --- a/vendor/github.com/google/uuid/node_net.go +++ /dev/null @@ -1,33 +0,0 @@ -// Copyright 2017 Google Inc. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build !js - -package uuid - -import "net" - -var interfaces []net.Interface // cached list of interfaces - -// getHardwareInterface returns the name and hardware address of interface name. -// If name is "" then the name and hardware address of one of the system's -// interfaces is returned. If no interfaces are found (name does not exist or -// there are no interfaces) then "", nil is returned. -// -// Only addresses of at least 6 bytes are returned. -func getHardwareInterface(name string) (string, []byte) { - if interfaces == nil { - var err error - interfaces, err = net.Interfaces() - if err != nil { - return "", nil - } - } - for _, ifs := range interfaces { - if len(ifs.HardwareAddr) >= 6 && (name == "" || name == ifs.Name) { - return ifs.Name, ifs.HardwareAddr - } - } - return "", nil -} diff --git a/vendor/github.com/google/uuid/sql.go b/vendor/github.com/google/uuid/sql.go deleted file mode 100644 index f326b54db3..0000000000 --- a/vendor/github.com/google/uuid/sql.go +++ /dev/null @@ -1,59 +0,0 @@ -// Copyright 2016 Google Inc. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package uuid - -import ( - "database/sql/driver" - "fmt" -) - -// Scan implements sql.Scanner so UUIDs can be read from databases transparently -// Currently, database types that map to string and []byte are supported. Please -// consult database-specific driver documentation for matching types. -func (uuid *UUID) Scan(src interface{}) error { - switch src := src.(type) { - case nil: - return nil - - case string: - // if an empty UUID comes from a table, we return a null UUID - if src == "" { - return nil - } - - // see Parse for required string format - u, err := Parse(src) - if err != nil { - return fmt.Errorf("Scan: %v", err) - } - - *uuid = u - - case []byte: - // if an empty UUID comes from a table, we return a null UUID - if len(src) == 0 { - return nil - } - - // assumes a simple slice of bytes if 16 bytes - // otherwise attempts to parse - if len(src) != 16 { - return uuid.Scan(string(src)) - } - copy((*uuid)[:], src) - - default: - return fmt.Errorf("Scan: unable to scan type %T into UUID", src) - } - - return nil -} - -// Value implements sql.Valuer so that UUIDs can be written to databases -// transparently. Currently, UUIDs map to strings. Please consult -// database-specific driver documentation for matching types. -func (uuid UUID) Value() (driver.Value, error) { - return uuid.String(), nil -} diff --git a/vendor/github.com/google/uuid/time.go b/vendor/github.com/google/uuid/time.go deleted file mode 100644 index e6ef06cdc8..0000000000 --- a/vendor/github.com/google/uuid/time.go +++ /dev/null @@ -1,123 +0,0 @@ -// Copyright 2016 Google Inc. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package uuid - -import ( - "encoding/binary" - "sync" - "time" -) - -// A Time represents a time as the number of 100's of nanoseconds since 15 Oct -// 1582. -type Time int64 - -const ( - lillian = 2299160 // Julian day of 15 Oct 1582 - unix = 2440587 // Julian day of 1 Jan 1970 - epoch = unix - lillian // Days between epochs - g1582 = epoch * 86400 // seconds between epochs - g1582ns100 = g1582 * 10000000 // 100s of a nanoseconds between epochs -) - -var ( - timeMu sync.Mutex - lasttime uint64 // last time we returned - clockSeq uint16 // clock sequence for this run - - timeNow = time.Now // for testing -) - -// UnixTime converts t the number of seconds and nanoseconds using the Unix -// epoch of 1 Jan 1970. -func (t Time) UnixTime() (sec, nsec int64) { - sec = int64(t - g1582ns100) - nsec = (sec % 10000000) * 100 - sec /= 10000000 - return sec, nsec -} - -// GetTime returns the current Time (100s of nanoseconds since 15 Oct 1582) and -// clock sequence as well as adjusting the clock sequence as needed. An error -// is returned if the current time cannot be determined. -func GetTime() (Time, uint16, error) { - defer timeMu.Unlock() - timeMu.Lock() - return getTime() -} - -func getTime() (Time, uint16, error) { - t := timeNow() - - // If we don't have a clock sequence already, set one. - if clockSeq == 0 { - setClockSequence(-1) - } - now := uint64(t.UnixNano()/100) + g1582ns100 - - // If time has gone backwards with this clock sequence then we - // increment the clock sequence - if now <= lasttime { - clockSeq = ((clockSeq + 1) & 0x3fff) | 0x8000 - } - lasttime = now - return Time(now), clockSeq, nil -} - -// ClockSequence returns the current clock sequence, generating one if not -// already set. The clock sequence is only used for Version 1 UUIDs. -// -// The uuid package does not use global static storage for the clock sequence or -// the last time a UUID was generated. Unless SetClockSequence is used, a new -// random clock sequence is generated the first time a clock sequence is -// requested by ClockSequence, GetTime, or NewUUID. (section 4.2.1.1) -func ClockSequence() int { - defer timeMu.Unlock() - timeMu.Lock() - return clockSequence() -} - -func clockSequence() int { - if clockSeq == 0 { - setClockSequence(-1) - } - return int(clockSeq & 0x3fff) -} - -// SetClockSequence sets the clock sequence to the lower 14 bits of seq. Setting to -// -1 causes a new sequence to be generated. -func SetClockSequence(seq int) { - defer timeMu.Unlock() - timeMu.Lock() - setClockSequence(seq) -} - -func setClockSequence(seq int) { - if seq == -1 { - var b [2]byte - randomBits(b[:]) // clock sequence - seq = int(b[0])<<8 | int(b[1]) - } - oldSeq := clockSeq - clockSeq = uint16(seq&0x3fff) | 0x8000 // Set our variant - if oldSeq != clockSeq { - lasttime = 0 - } -} - -// Time returns the time in 100s of nanoseconds since 15 Oct 1582 encoded in -// uuid. The time is only defined for version 1 and 2 UUIDs. -func (uuid UUID) Time() Time { - time := int64(binary.BigEndian.Uint32(uuid[0:4])) - time |= int64(binary.BigEndian.Uint16(uuid[4:6])) << 32 - time |= int64(binary.BigEndian.Uint16(uuid[6:8])&0xfff) << 48 - return Time(time) -} - -// ClockSequence returns the clock sequence encoded in uuid. -// The clock sequence is only well defined for version 1 and 2 UUIDs. -func (uuid UUID) ClockSequence() int { - return int(binary.BigEndian.Uint16(uuid[8:10])) & 0x3fff -} diff --git a/vendor/github.com/google/uuid/util.go b/vendor/github.com/google/uuid/util.go deleted file mode 100644 index 5ea6c73780..0000000000 --- a/vendor/github.com/google/uuid/util.go +++ /dev/null @@ -1,43 +0,0 @@ -// Copyright 2016 Google Inc. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package uuid - -import ( - "io" -) - -// randomBits completely fills slice b with random data. -func randomBits(b []byte) { - if _, err := io.ReadFull(rander, b); err != nil { - panic(err.Error()) // rand should never fail - } -} - -// xvalues returns the value of a byte as a hexadecimal digit or 255. -var xvalues = [256]byte{ - 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, - 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, - 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, - 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 255, 255, 255, 255, 255, 255, - 255, 10, 11, 12, 13, 14, 15, 255, 255, 255, 255, 255, 255, 255, 255, 255, - 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, - 255, 10, 11, 12, 13, 14, 15, 255, 255, 255, 255, 255, 255, 255, 255, 255, - 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, - 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, - 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, - 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, - 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, - 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, - 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, - 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, - 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, -} - -// xtob converts hex characters x1 and x2 into a byte. -func xtob(x1, x2 byte) (byte, bool) { - b1 := xvalues[x1] - b2 := xvalues[x2] - return (b1 << 4) | b2, b1 != 255 && b2 != 255 -} diff --git a/vendor/github.com/google/uuid/uuid.go b/vendor/github.com/google/uuid/uuid.go deleted file mode 100644 index 524404cc52..0000000000 --- a/vendor/github.com/google/uuid/uuid.go +++ /dev/null @@ -1,245 +0,0 @@ -// Copyright 2018 Google Inc. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package uuid - -import ( - "bytes" - "crypto/rand" - "encoding/hex" - "errors" - "fmt" - "io" - "strings" -) - -// A UUID is a 128 bit (16 byte) Universal Unique IDentifier as defined in RFC -// 4122. -type UUID [16]byte - -// A Version represents a UUID's version. -type Version byte - -// A Variant represents a UUID's variant. -type Variant byte - -// Constants returned by Variant. -const ( - Invalid = Variant(iota) // Invalid UUID - RFC4122 // The variant specified in RFC4122 - Reserved // Reserved, NCS backward compatibility. - Microsoft // Reserved, Microsoft Corporation backward compatibility. - Future // Reserved for future definition. -) - -var rander = rand.Reader // random function - -// Parse decodes s into a UUID or returns an error. Both the standard UUID -// forms of xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx and -// urn:uuid:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx are decoded as well as the -// Microsoft encoding {xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx} and the raw hex -// encoding: xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx. -func Parse(s string) (UUID, error) { - var uuid UUID - switch len(s) { - // xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx - case 36: - - // urn:uuid:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx - case 36 + 9: - if strings.ToLower(s[:9]) != "urn:uuid:" { - return uuid, fmt.Errorf("invalid urn prefix: %q", s[:9]) - } - s = s[9:] - - // {xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx} - case 36 + 2: - s = s[1:] - - // xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx - case 32: - var ok bool - for i := range uuid { - uuid[i], ok = xtob(s[i*2], s[i*2+1]) - if !ok { - return uuid, errors.New("invalid UUID format") - } - } - return uuid, nil - default: - return uuid, fmt.Errorf("invalid UUID length: %d", len(s)) - } - // s is now at least 36 bytes long - // it must be of the form xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx - if s[8] != '-' || s[13] != '-' || s[18] != '-' || s[23] != '-' { - return uuid, errors.New("invalid UUID format") - } - for i, x := range [16]int{ - 0, 2, 4, 6, - 9, 11, - 14, 16, - 19, 21, - 24, 26, 28, 30, 32, 34} { - v, ok := xtob(s[x], s[x+1]) - if !ok { - return uuid, errors.New("invalid UUID format") - } - uuid[i] = v - } - return uuid, nil -} - -// ParseBytes is like Parse, except it parses a byte slice instead of a string. -func ParseBytes(b []byte) (UUID, error) { - var uuid UUID - switch len(b) { - case 36: // xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx - case 36 + 9: // urn:uuid:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx - if !bytes.Equal(bytes.ToLower(b[:9]), []byte("urn:uuid:")) { - return uuid, fmt.Errorf("invalid urn prefix: %q", b[:9]) - } - b = b[9:] - case 36 + 2: // {xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx} - b = b[1:] - case 32: // xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx - var ok bool - for i := 0; i < 32; i += 2 { - uuid[i/2], ok = xtob(b[i], b[i+1]) - if !ok { - return uuid, errors.New("invalid UUID format") - } - } - return uuid, nil - default: - return uuid, fmt.Errorf("invalid UUID length: %d", len(b)) - } - // s is now at least 36 bytes long - // it must be of the form xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx - if b[8] != '-' || b[13] != '-' || b[18] != '-' || b[23] != '-' { - return uuid, errors.New("invalid UUID format") - } - for i, x := range [16]int{ - 0, 2, 4, 6, - 9, 11, - 14, 16, - 19, 21, - 24, 26, 28, 30, 32, 34} { - v, ok := xtob(b[x], b[x+1]) - if !ok { - return uuid, errors.New("invalid UUID format") - } - uuid[i] = v - } - return uuid, nil -} - -// MustParse is like Parse but panics if the string cannot be parsed. -// It simplifies safe initialization of global variables holding compiled UUIDs. -func MustParse(s string) UUID { - uuid, err := Parse(s) - if err != nil { - panic(`uuid: Parse(` + s + `): ` + err.Error()) - } - return uuid -} - -// FromBytes creates a new UUID from a byte slice. Returns an error if the slice -// does not have a length of 16. The bytes are copied from the slice. -func FromBytes(b []byte) (uuid UUID, err error) { - err = uuid.UnmarshalBinary(b) - return uuid, err -} - -// Must returns uuid if err is nil and panics otherwise. -func Must(uuid UUID, err error) UUID { - if err != nil { - panic(err) - } - return uuid -} - -// String returns the string form of uuid, xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx -// , or "" if uuid is invalid. -func (uuid UUID) String() string { - var buf [36]byte - encodeHex(buf[:], uuid) - return string(buf[:]) -} - -// URN returns the RFC 2141 URN form of uuid, -// urn:uuid:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx, or "" if uuid is invalid. -func (uuid UUID) URN() string { - var buf [36 + 9]byte - copy(buf[:], "urn:uuid:") - encodeHex(buf[9:], uuid) - return string(buf[:]) -} - -func encodeHex(dst []byte, uuid UUID) { - hex.Encode(dst, uuid[:4]) - dst[8] = '-' - hex.Encode(dst[9:13], uuid[4:6]) - dst[13] = '-' - hex.Encode(dst[14:18], uuid[6:8]) - dst[18] = '-' - hex.Encode(dst[19:23], uuid[8:10]) - dst[23] = '-' - hex.Encode(dst[24:], uuid[10:]) -} - -// Variant returns the variant encoded in uuid. -func (uuid UUID) Variant() Variant { - switch { - case (uuid[8] & 0xc0) == 0x80: - return RFC4122 - case (uuid[8] & 0xe0) == 0xc0: - return Microsoft - case (uuid[8] & 0xe0) == 0xe0: - return Future - default: - return Reserved - } -} - -// Version returns the version of uuid. -func (uuid UUID) Version() Version { - return Version(uuid[6] >> 4) -} - -func (v Version) String() string { - if v > 15 { - return fmt.Sprintf("BAD_VERSION_%d", v) - } - return fmt.Sprintf("VERSION_%d", v) -} - -func (v Variant) String() string { - switch v { - case RFC4122: - return "RFC4122" - case Reserved: - return "Reserved" - case Microsoft: - return "Microsoft" - case Future: - return "Future" - case Invalid: - return "Invalid" - } - return fmt.Sprintf("BadVariant%d", int(v)) -} - -// SetRand sets the random number generator to r, which implements io.Reader. -// If r.Read returns an error when the package requests random data then -// a panic will be issued. -// -// Calling SetRand with nil sets the random number generator to the default -// generator. -func SetRand(r io.Reader) { - if r == nil { - rander = rand.Reader - return - } - rander = r -} diff --git a/vendor/github.com/google/uuid/version1.go b/vendor/github.com/google/uuid/version1.go deleted file mode 100644 index 199a1ac654..0000000000 --- a/vendor/github.com/google/uuid/version1.go +++ /dev/null @@ -1,44 +0,0 @@ -// Copyright 2016 Google Inc. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package uuid - -import ( - "encoding/binary" -) - -// NewUUID returns a Version 1 UUID based on the current NodeID and clock -// sequence, and the current time. If the NodeID has not been set by SetNodeID -// or SetNodeInterface then it will be set automatically. If the NodeID cannot -// be set NewUUID returns nil. If clock sequence has not been set by -// SetClockSequence then it will be set automatically. If GetTime fails to -// return the current NewUUID returns nil and an error. -// -// In most cases, New should be used. -func NewUUID() (UUID, error) { - nodeMu.Lock() - if nodeID == zeroID { - setNodeInterface("") - } - nodeMu.Unlock() - - var uuid UUID - now, seq, err := GetTime() - if err != nil { - return uuid, err - } - - timeLow := uint32(now & 0xffffffff) - timeMid := uint16((now >> 32) & 0xffff) - timeHi := uint16((now >> 48) & 0x0fff) - timeHi |= 0x1000 // Version 1 - - binary.BigEndian.PutUint32(uuid[0:], timeLow) - binary.BigEndian.PutUint16(uuid[4:], timeMid) - binary.BigEndian.PutUint16(uuid[6:], timeHi) - binary.BigEndian.PutUint16(uuid[8:], seq) - copy(uuid[10:], nodeID[:]) - - return uuid, nil -} diff --git a/vendor/github.com/google/uuid/version4.go b/vendor/github.com/google/uuid/version4.go deleted file mode 100644 index 84af91c9f5..0000000000 --- a/vendor/github.com/google/uuid/version4.go +++ /dev/null @@ -1,38 +0,0 @@ -// Copyright 2016 Google Inc. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package uuid - -import "io" - -// New creates a new random UUID or panics. New is equivalent to -// the expression -// -// uuid.Must(uuid.NewRandom()) -func New() UUID { - return Must(NewRandom()) -} - -// NewRandom returns a Random (Version 4) UUID. -// -// The strength of the UUIDs is based on the strength of the crypto/rand -// package. -// -// A note about uniqueness derived from the UUID Wikipedia entry: -// -// Randomly generated UUIDs have 122 random bits. One's annual risk of being -// hit by a meteorite is estimated to be one chance in 17 billion, that -// means the probability is about 0.00000000006 (6 × 10−11), -// equivalent to the odds of creating a few tens of trillions of UUIDs in a -// year and having one duplicate. -func NewRandom() (UUID, error) { - var uuid UUID - _, err := io.ReadFull(rander, uuid[:]) - if err != nil { - return Nil, err - } - uuid[6] = (uuid[6] & 0x0f) | 0x40 // Version 4 - uuid[8] = (uuid[8] & 0x3f) | 0x80 // Variant is 10 - return uuid, nil -} diff --git a/vendor/github.com/gregjones/httpcache/.travis.yml b/vendor/github.com/gregjones/httpcache/.travis.yml deleted file mode 100644 index b5ffbe03d8..0000000000 --- a/vendor/github.com/gregjones/httpcache/.travis.yml +++ /dev/null @@ -1,19 +0,0 @@ -sudo: false -language: go -go: - - 1.6.x - - 1.7.x - - 1.8.x - - 1.9.x - - master -matrix: - allow_failures: - - go: master - fast_finish: true -install: - - # Do nothing. This is needed to prevent default install action "go get -t -v ./..." from happening here (we want it to happen inside script step). -script: - - go get -t -v ./... - - diff -u <(echo -n) <(gofmt -d .) - - go tool vet . - - go test -v -race ./... diff --git a/vendor/github.com/gregjones/httpcache/LICENSE.txt b/vendor/github.com/gregjones/httpcache/LICENSE.txt deleted file mode 100644 index 81316beb0c..0000000000 --- a/vendor/github.com/gregjones/httpcache/LICENSE.txt +++ /dev/null @@ -1,7 +0,0 @@ -Copyright © 2012 Greg Jones (greg.jones@gmail.com) - -Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the “Software”), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED “AS IS”, WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. \ No newline at end of file diff --git a/vendor/github.com/gregjones/httpcache/README.md b/vendor/github.com/gregjones/httpcache/README.md deleted file mode 100644 index 09c9e7c173..0000000000 --- a/vendor/github.com/gregjones/httpcache/README.md +++ /dev/null @@ -1,25 +0,0 @@ -httpcache -========= - -[![Build Status](https://travis-ci.org/gregjones/httpcache.svg?branch=master)](https://travis-ci.org/gregjones/httpcache) [![GoDoc](https://godoc.org/github.com/gregjones/httpcache?status.svg)](https://godoc.org/github.com/gregjones/httpcache) - -Package httpcache provides a http.RoundTripper implementation that works as a mostly [RFC 7234](https://tools.ietf.org/html/rfc7234) compliant cache for http responses. - -It is only suitable for use as a 'private' cache (i.e. for a web-browser or an API-client and not for a shared proxy). - -Cache Backends --------------- - -- The built-in 'memory' cache stores responses in an in-memory map. -- [`github.com/gregjones/httpcache/diskcache`](https://github.com/gregjones/httpcache/tree/master/diskcache) provides a filesystem-backed cache using the [diskv](https://github.com/peterbourgon/diskv) library. -- [`github.com/gregjones/httpcache/memcache`](https://github.com/gregjones/httpcache/tree/master/memcache) provides memcache implementations, for both App Engine and 'normal' memcache servers. -- [`sourcegraph.com/sourcegraph/s3cache`](https://sourcegraph.com/github.com/sourcegraph/s3cache) uses Amazon S3 for storage. -- [`github.com/gregjones/httpcache/leveldbcache`](https://github.com/gregjones/httpcache/tree/master/leveldbcache) provides a filesystem-backed cache using [leveldb](https://github.com/syndtr/goleveldb/leveldb). -- [`github.com/die-net/lrucache`](https://github.com/die-net/lrucache) provides an in-memory cache that will evict least-recently used entries. -- [`github.com/die-net/lrucache/twotier`](https://github.com/die-net/lrucache/tree/master/twotier) allows caches to be combined, for example to use lrucache above with a persistent disk-cache. -- [`github.com/birkelund/boltdbcache`](https://github.com/birkelund/boltdbcache) provides a BoltDB implementation (based on the [bbolt](https://github.com/coreos/bbolt) fork). - -License -------- - -- [MIT License](LICENSE.txt) diff --git a/vendor/github.com/gregjones/httpcache/diskcache/diskcache.go b/vendor/github.com/gregjones/httpcache/diskcache/diskcache.go deleted file mode 100644 index 42e3129d82..0000000000 --- a/vendor/github.com/gregjones/httpcache/diskcache/diskcache.go +++ /dev/null @@ -1,61 +0,0 @@ -// Package diskcache provides an implementation of httpcache.Cache that uses the diskv package -// to supplement an in-memory map with persistent storage -// -package diskcache - -import ( - "bytes" - "crypto/md5" - "encoding/hex" - "github.com/peterbourgon/diskv" - "io" -) - -// Cache is an implementation of httpcache.Cache that supplements the in-memory map with persistent storage -type Cache struct { - d *diskv.Diskv -} - -// Get returns the response corresponding to key if present -func (c *Cache) Get(key string) (resp []byte, ok bool) { - key = keyToFilename(key) - resp, err := c.d.Read(key) - if err != nil { - return []byte{}, false - } - return resp, true -} - -// Set saves a response to the cache as key -func (c *Cache) Set(key string, resp []byte) { - key = keyToFilename(key) - c.d.WriteStream(key, bytes.NewReader(resp), true) -} - -// Delete removes the response with key from the cache -func (c *Cache) Delete(key string) { - key = keyToFilename(key) - c.d.Erase(key) -} - -func keyToFilename(key string) string { - h := md5.New() - io.WriteString(h, key) - return hex.EncodeToString(h.Sum(nil)) -} - -// New returns a new Cache that will store files in basePath -func New(basePath string) *Cache { - return &Cache{ - d: diskv.New(diskv.Options{ - BasePath: basePath, - CacheSizeMax: 100 * 1024 * 1024, // 100MB - }), - } -} - -// NewWithDiskv returns a new Cache using the provided Diskv as underlying -// storage. -func NewWithDiskv(d *diskv.Diskv) *Cache { - return &Cache{d} -} diff --git a/vendor/github.com/gregjones/httpcache/httpcache.go b/vendor/github.com/gregjones/httpcache/httpcache.go deleted file mode 100644 index f6a2ec4a53..0000000000 --- a/vendor/github.com/gregjones/httpcache/httpcache.go +++ /dev/null @@ -1,551 +0,0 @@ -// Package httpcache provides a http.RoundTripper implementation that works as a -// mostly RFC-compliant cache for http responses. -// -// It is only suitable for use as a 'private' cache (i.e. for a web-browser or an API-client -// and not for a shared proxy). -// -package httpcache - -import ( - "bufio" - "bytes" - "errors" - "io" - "io/ioutil" - "net/http" - "net/http/httputil" - "strings" - "sync" - "time" -) - -const ( - stale = iota - fresh - transparent - // XFromCache is the header added to responses that are returned from the cache - XFromCache = "X-From-Cache" -) - -// A Cache interface is used by the Transport to store and retrieve responses. -type Cache interface { - // Get returns the []byte representation of a cached response and a bool - // set to true if the value isn't empty - Get(key string) (responseBytes []byte, ok bool) - // Set stores the []byte representation of a response against a key - Set(key string, responseBytes []byte) - // Delete removes the value associated with the key - Delete(key string) -} - -// cacheKey returns the cache key for req. -func cacheKey(req *http.Request) string { - if req.Method == http.MethodGet { - return req.URL.String() - } else { - return req.Method + " " + req.URL.String() - } -} - -// CachedResponse returns the cached http.Response for req if present, and nil -// otherwise. -func CachedResponse(c Cache, req *http.Request) (resp *http.Response, err error) { - cachedVal, ok := c.Get(cacheKey(req)) - if !ok { - return - } - - b := bytes.NewBuffer(cachedVal) - return http.ReadResponse(bufio.NewReader(b), req) -} - -// MemoryCache is an implemtation of Cache that stores responses in an in-memory map. -type MemoryCache struct { - mu sync.RWMutex - items map[string][]byte -} - -// Get returns the []byte representation of the response and true if present, false if not -func (c *MemoryCache) Get(key string) (resp []byte, ok bool) { - c.mu.RLock() - resp, ok = c.items[key] - c.mu.RUnlock() - return resp, ok -} - -// Set saves response resp to the cache with key -func (c *MemoryCache) Set(key string, resp []byte) { - c.mu.Lock() - c.items[key] = resp - c.mu.Unlock() -} - -// Delete removes key from the cache -func (c *MemoryCache) Delete(key string) { - c.mu.Lock() - delete(c.items, key) - c.mu.Unlock() -} - -// NewMemoryCache returns a new Cache that will store items in an in-memory map -func NewMemoryCache() *MemoryCache { - c := &MemoryCache{items: map[string][]byte{}} - return c -} - -// Transport is an implementation of http.RoundTripper that will return values from a cache -// where possible (avoiding a network request) and will additionally add validators (etag/if-modified-since) -// to repeated requests allowing servers to return 304 / Not Modified -type Transport struct { - // The RoundTripper interface actually used to make requests - // If nil, http.DefaultTransport is used - Transport http.RoundTripper - Cache Cache - // If true, responses returned from the cache will be given an extra header, X-From-Cache - MarkCachedResponses bool -} - -// NewTransport returns a new Transport with the -// provided Cache implementation and MarkCachedResponses set to true -func NewTransport(c Cache) *Transport { - return &Transport{Cache: c, MarkCachedResponses: true} -} - -// Client returns an *http.Client that caches responses. -func (t *Transport) Client() *http.Client { - return &http.Client{Transport: t} -} - -// varyMatches will return false unless all of the cached values for the headers listed in Vary -// match the new request -func varyMatches(cachedResp *http.Response, req *http.Request) bool { - for _, header := range headerAllCommaSepValues(cachedResp.Header, "vary") { - header = http.CanonicalHeaderKey(header) - if header != "" && req.Header.Get(header) != cachedResp.Header.Get("X-Varied-"+header) { - return false - } - } - return true -} - -// RoundTrip takes a Request and returns a Response -// -// If there is a fresh Response already in cache, then it will be returned without connecting to -// the server. -// -// If there is a stale Response, then any validators it contains will be set on the new request -// to give the server a chance to respond with NotModified. If this happens, then the cached Response -// will be returned. -func (t *Transport) RoundTrip(req *http.Request) (resp *http.Response, err error) { - cacheKey := cacheKey(req) - cacheable := (req.Method == "GET" || req.Method == "HEAD") && req.Header.Get("range") == "" - var cachedResp *http.Response - if cacheable { - cachedResp, err = CachedResponse(t.Cache, req) - } else { - // Need to invalidate an existing value - t.Cache.Delete(cacheKey) - } - - transport := t.Transport - if transport == nil { - transport = http.DefaultTransport - } - - if cacheable && cachedResp != nil && err == nil { - if t.MarkCachedResponses { - cachedResp.Header.Set(XFromCache, "1") - } - - if varyMatches(cachedResp, req) { - // Can only use cached value if the new request doesn't Vary significantly - freshness := getFreshness(cachedResp.Header, req.Header) - if freshness == fresh { - return cachedResp, nil - } - - if freshness == stale { - var req2 *http.Request - // Add validators if caller hasn't already done so - etag := cachedResp.Header.Get("etag") - if etag != "" && req.Header.Get("etag") == "" { - req2 = cloneRequest(req) - req2.Header.Set("if-none-match", etag) - } - lastModified := cachedResp.Header.Get("last-modified") - if lastModified != "" && req.Header.Get("last-modified") == "" { - if req2 == nil { - req2 = cloneRequest(req) - } - req2.Header.Set("if-modified-since", lastModified) - } - if req2 != nil { - req = req2 - } - } - } - - resp, err = transport.RoundTrip(req) - if err == nil && req.Method == "GET" && resp.StatusCode == http.StatusNotModified { - // Replace the 304 response with the one from cache, but update with some new headers - endToEndHeaders := getEndToEndHeaders(resp.Header) - for _, header := range endToEndHeaders { - cachedResp.Header[header] = resp.Header[header] - } - resp = cachedResp - } else if (err != nil || (cachedResp != nil && resp.StatusCode >= 500)) && - req.Method == "GET" && canStaleOnError(cachedResp.Header, req.Header) { - // In case of transport failure and stale-if-error activated, returns cached content - // when available - return cachedResp, nil - } else { - if err != nil || resp.StatusCode != http.StatusOK { - t.Cache.Delete(cacheKey) - } - if err != nil { - return nil, err - } - } - } else { - reqCacheControl := parseCacheControl(req.Header) - if _, ok := reqCacheControl["only-if-cached"]; ok { - resp = newGatewayTimeoutResponse(req) - } else { - resp, err = transport.RoundTrip(req) - if err != nil { - return nil, err - } - } - } - - if cacheable && canStore(parseCacheControl(req.Header), parseCacheControl(resp.Header)) { - for _, varyKey := range headerAllCommaSepValues(resp.Header, "vary") { - varyKey = http.CanonicalHeaderKey(varyKey) - fakeHeader := "X-Varied-" + varyKey - reqValue := req.Header.Get(varyKey) - if reqValue != "" { - resp.Header.Set(fakeHeader, reqValue) - } - } - switch req.Method { - case "GET": - // Delay caching until EOF is reached. - resp.Body = &cachingReadCloser{ - R: resp.Body, - OnEOF: func(r io.Reader) { - resp := *resp - resp.Body = ioutil.NopCloser(r) - respBytes, err := httputil.DumpResponse(&resp, true) - if err == nil { - t.Cache.Set(cacheKey, respBytes) - } - }, - } - default: - respBytes, err := httputil.DumpResponse(resp, true) - if err == nil { - t.Cache.Set(cacheKey, respBytes) - } - } - } else { - t.Cache.Delete(cacheKey) - } - return resp, nil -} - -// ErrNoDateHeader indicates that the HTTP headers contained no Date header. -var ErrNoDateHeader = errors.New("no Date header") - -// Date parses and returns the value of the Date header. -func Date(respHeaders http.Header) (date time.Time, err error) { - dateHeader := respHeaders.Get("date") - if dateHeader == "" { - err = ErrNoDateHeader - return - } - - return time.Parse(time.RFC1123, dateHeader) -} - -type realClock struct{} - -func (c *realClock) since(d time.Time) time.Duration { - return time.Since(d) -} - -type timer interface { - since(d time.Time) time.Duration -} - -var clock timer = &realClock{} - -// getFreshness will return one of fresh/stale/transparent based on the cache-control -// values of the request and the response -// -// fresh indicates the response can be returned -// stale indicates that the response needs validating before it is returned -// transparent indicates the response should not be used to fulfil the request -// -// Because this is only a private cache, 'public' and 'private' in cache-control aren't -// signficant. Similarly, smax-age isn't used. -func getFreshness(respHeaders, reqHeaders http.Header) (freshness int) { - respCacheControl := parseCacheControl(respHeaders) - reqCacheControl := parseCacheControl(reqHeaders) - if _, ok := reqCacheControl["no-cache"]; ok { - return transparent - } - if _, ok := respCacheControl["no-cache"]; ok { - return stale - } - if _, ok := reqCacheControl["only-if-cached"]; ok { - return fresh - } - - date, err := Date(respHeaders) - if err != nil { - return stale - } - currentAge := clock.since(date) - - var lifetime time.Duration - var zeroDuration time.Duration - - // If a response includes both an Expires header and a max-age directive, - // the max-age directive overrides the Expires header, even if the Expires header is more restrictive. - if maxAge, ok := respCacheControl["max-age"]; ok { - lifetime, err = time.ParseDuration(maxAge + "s") - if err != nil { - lifetime = zeroDuration - } - } else { - expiresHeader := respHeaders.Get("Expires") - if expiresHeader != "" { - expires, err := time.Parse(time.RFC1123, expiresHeader) - if err != nil { - lifetime = zeroDuration - } else { - lifetime = expires.Sub(date) - } - } - } - - if maxAge, ok := reqCacheControl["max-age"]; ok { - // the client is willing to accept a response whose age is no greater than the specified time in seconds - lifetime, err = time.ParseDuration(maxAge + "s") - if err != nil { - lifetime = zeroDuration - } - } - if minfresh, ok := reqCacheControl["min-fresh"]; ok { - // the client wants a response that will still be fresh for at least the specified number of seconds. - minfreshDuration, err := time.ParseDuration(minfresh + "s") - if err == nil { - currentAge = time.Duration(currentAge + minfreshDuration) - } - } - - if maxstale, ok := reqCacheControl["max-stale"]; ok { - // Indicates that the client is willing to accept a response that has exceeded its expiration time. - // If max-stale is assigned a value, then the client is willing to accept a response that has exceeded - // its expiration time by no more than the specified number of seconds. - // If no value is assigned to max-stale, then the client is willing to accept a stale response of any age. - // - // Responses served only because of a max-stale value are supposed to have a Warning header added to them, - // but that seems like a hassle, and is it actually useful? If so, then there needs to be a different - // return-value available here. - if maxstale == "" { - return fresh - } - maxstaleDuration, err := time.ParseDuration(maxstale + "s") - if err == nil { - currentAge = time.Duration(currentAge - maxstaleDuration) - } - } - - if lifetime > currentAge { - return fresh - } - - return stale -} - -// Returns true if either the request or the response includes the stale-if-error -// cache control extension: https://tools.ietf.org/html/rfc5861 -func canStaleOnError(respHeaders, reqHeaders http.Header) bool { - respCacheControl := parseCacheControl(respHeaders) - reqCacheControl := parseCacheControl(reqHeaders) - - var err error - lifetime := time.Duration(-1) - - if staleMaxAge, ok := respCacheControl["stale-if-error"]; ok { - if staleMaxAge != "" { - lifetime, err = time.ParseDuration(staleMaxAge + "s") - if err != nil { - return false - } - } else { - return true - } - } - if staleMaxAge, ok := reqCacheControl["stale-if-error"]; ok { - if staleMaxAge != "" { - lifetime, err = time.ParseDuration(staleMaxAge + "s") - if err != nil { - return false - } - } else { - return true - } - } - - if lifetime >= 0 { - date, err := Date(respHeaders) - if err != nil { - return false - } - currentAge := clock.since(date) - if lifetime > currentAge { - return true - } - } - - return false -} - -func getEndToEndHeaders(respHeaders http.Header) []string { - // These headers are always hop-by-hop - hopByHopHeaders := map[string]struct{}{ - "Connection": struct{}{}, - "Keep-Alive": struct{}{}, - "Proxy-Authenticate": struct{}{}, - "Proxy-Authorization": struct{}{}, - "Te": struct{}{}, - "Trailers": struct{}{}, - "Transfer-Encoding": struct{}{}, - "Upgrade": struct{}{}, - } - - for _, extra := range strings.Split(respHeaders.Get("connection"), ",") { - // any header listed in connection, if present, is also considered hop-by-hop - if strings.Trim(extra, " ") != "" { - hopByHopHeaders[http.CanonicalHeaderKey(extra)] = struct{}{} - } - } - endToEndHeaders := []string{} - for respHeader, _ := range respHeaders { - if _, ok := hopByHopHeaders[respHeader]; !ok { - endToEndHeaders = append(endToEndHeaders, respHeader) - } - } - return endToEndHeaders -} - -func canStore(reqCacheControl, respCacheControl cacheControl) (canStore bool) { - if _, ok := respCacheControl["no-store"]; ok { - return false - } - if _, ok := reqCacheControl["no-store"]; ok { - return false - } - return true -} - -func newGatewayTimeoutResponse(req *http.Request) *http.Response { - var braw bytes.Buffer - braw.WriteString("HTTP/1.1 504 Gateway Timeout\r\n\r\n") - resp, err := http.ReadResponse(bufio.NewReader(&braw), req) - if err != nil { - panic(err) - } - return resp -} - -// cloneRequest returns a clone of the provided *http.Request. -// The clone is a shallow copy of the struct and its Header map. -// (This function copyright goauth2 authors: https://code.google.com/p/goauth2) -func cloneRequest(r *http.Request) *http.Request { - // shallow copy of the struct - r2 := new(http.Request) - *r2 = *r - // deep copy of the Header - r2.Header = make(http.Header) - for k, s := range r.Header { - r2.Header[k] = s - } - return r2 -} - -type cacheControl map[string]string - -func parseCacheControl(headers http.Header) cacheControl { - cc := cacheControl{} - ccHeader := headers.Get("Cache-Control") - for _, part := range strings.Split(ccHeader, ",") { - part = strings.Trim(part, " ") - if part == "" { - continue - } - if strings.ContainsRune(part, '=') { - keyval := strings.Split(part, "=") - cc[strings.Trim(keyval[0], " ")] = strings.Trim(keyval[1], ",") - } else { - cc[part] = "" - } - } - return cc -} - -// headerAllCommaSepValues returns all comma-separated values (each -// with whitespace trimmed) for header name in headers. According to -// Section 4.2 of the HTTP/1.1 spec -// (http://www.w3.org/Protocols/rfc2616/rfc2616-sec4.html#sec4.2), -// values from multiple occurrences of a header should be concatenated, if -// the header's value is a comma-separated list. -func headerAllCommaSepValues(headers http.Header, name string) []string { - var vals []string - for _, val := range headers[http.CanonicalHeaderKey(name)] { - fields := strings.Split(val, ",") - for i, f := range fields { - fields[i] = strings.TrimSpace(f) - } - vals = append(vals, fields...) - } - return vals -} - -// cachingReadCloser is a wrapper around ReadCloser R that calls OnEOF -// handler with a full copy of the content read from R when EOF is -// reached. -type cachingReadCloser struct { - // Underlying ReadCloser. - R io.ReadCloser - // OnEOF is called with a copy of the content of R when EOF is reached. - OnEOF func(io.Reader) - - buf bytes.Buffer // buf stores a copy of the content of R. -} - -// Read reads the next len(p) bytes from R or until R is drained. The -// return value n is the number of bytes read. If R has no data to -// return, err is io.EOF and OnEOF is called with a full copy of what -// has been read so far. -func (r *cachingReadCloser) Read(p []byte) (n int, err error) { - n, err = r.R.Read(p) - r.buf.Write(p[:n]) - if err == io.EOF { - r.OnEOF(bytes.NewReader(r.buf.Bytes())) - } - return n, err -} - -func (r *cachingReadCloser) Close() error { - return r.R.Close() -} - -// NewMemoryCacheTransport returns a new Transport using the in-memory cache implementation -func NewMemoryCacheTransport() *Transport { - c := NewMemoryCache() - t := NewTransport(c) - return t -} diff --git a/vendor/github.com/hpcloud/tail/.gitignore b/vendor/github.com/hpcloud/tail/.gitignore new file mode 100644 index 0000000000..6d9953c3c6 --- /dev/null +++ b/vendor/github.com/hpcloud/tail/.gitignore @@ -0,0 +1,3 @@ +.test +.go + diff --git a/vendor/github.com/hpcloud/tail/.travis.yml b/vendor/github.com/hpcloud/tail/.travis.yml new file mode 100644 index 0000000000..9cf8bb7fc5 --- /dev/null +++ b/vendor/github.com/hpcloud/tail/.travis.yml @@ -0,0 +1,18 @@ +language: go + +script: + - go test -race -v ./... + +go: + - 1.4 + - 1.5 + - 1.6 + - tip + +matrix: + allow_failures: + - go: tip + +install: + - go get gopkg.in/fsnotify.v1 + - go get gopkg.in/tomb.v1 diff --git a/vendor/github.com/hpcloud/tail/CHANGES.md b/vendor/github.com/hpcloud/tail/CHANGES.md new file mode 100644 index 0000000000..422790c073 --- /dev/null +++ b/vendor/github.com/hpcloud/tail/CHANGES.md @@ -0,0 +1,63 @@ +# API v1 (gopkg.in/hpcloud/tail.v1) + +## April, 2016 + +* Migrated to godep, as depman is not longer supported +* Introduced golang vendoring feature +* Fixed issue [#57](https://github.com/hpcloud/tail/issues/57) related to reopen deleted file + +## July, 2015 + +* Fix inotify watcher leak; remove `Cleanup` (#51) + +# API v0 (gopkg.in/hpcloud/tail.v0) + +## June, 2015 + +* Don't return partial lines (PR #40) +* Use stable version of fsnotify (#46) + +## July, 2014 + +* Fix tail for Windows (PR #36) + +## May, 2014 + +* Improved rate limiting using leaky bucket (PR #29) +* Fix odd line splitting (PR #30) + +## Apr, 2014 + +* LimitRate now discards read buffer (PR #28) +* allow reading of longer lines if MaxLineSize is unset (PR #24) +* updated deps.json to latest fsnotify (441bbc86b1) + +## Feb, 2014 + +* added `Config.Logger` to suppress library logging + +## Nov, 2013 + +* add Cleanup to remove leaky inotify watches (PR #20) + +## Aug, 2013 + +* redesigned Location field (PR #12) +* add tail.Tell (PR #14) + +## July, 2013 + +* Rate limiting (PR #10) + +## May, 2013 + +* Detect file deletions/renames in polling file watcher (PR #1) +* Detect file truncation +* Fix potential race condition when reopening the file (issue 5) +* Fix potential blocking of `tail.Stop` (issue 4) +* Fix uncleaned up ChangeEvents goroutines after calling tail.Stop +* Support Follow=false + +## Feb, 2013 + +* Initial open source release diff --git a/vendor/github.com/hpcloud/tail/Dockerfile b/vendor/github.com/hpcloud/tail/Dockerfile new file mode 100644 index 0000000000..cd297b940a --- /dev/null +++ b/vendor/github.com/hpcloud/tail/Dockerfile @@ -0,0 +1,19 @@ +FROM golang + +RUN mkdir -p $GOPATH/src/github.com/hpcloud/tail/ +ADD . $GOPATH/src/github.com/hpcloud/tail/ + +# expecting to fetch dependencies successfully. +RUN go get -v github.com/hpcloud/tail + +# expecting to run the test successfully. +RUN go test -v github.com/hpcloud/tail + +# expecting to install successfully +RUN go install -v github.com/hpcloud/tail +RUN go install -v github.com/hpcloud/tail/cmd/gotail + +RUN $GOPATH/bin/gotail -h || true + +ENV PATH $GOPATH/bin:$PATH +CMD ["gotail"] diff --git a/vendor/github.com/hpcloud/tail/LICENSE.txt b/vendor/github.com/hpcloud/tail/LICENSE.txt new file mode 100644 index 0000000000..818d802a59 --- /dev/null +++ b/vendor/github.com/hpcloud/tail/LICENSE.txt @@ -0,0 +1,21 @@ +# The MIT License (MIT) + +# © Copyright 2015 Hewlett Packard Enterprise Development LP +Copyright (c) 2014 ActiveState + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. diff --git a/vendor/github.com/hpcloud/tail/Makefile b/vendor/github.com/hpcloud/tail/Makefile new file mode 100644 index 0000000000..6591b24fc1 --- /dev/null +++ b/vendor/github.com/hpcloud/tail/Makefile @@ -0,0 +1,11 @@ +default: test + +test: *.go + go test -v -race ./... + +fmt: + gofmt -w . + +# Run the test in an isolated environment. +fulltest: + docker build -t hpcloud/tail . diff --git a/vendor/github.com/hpcloud/tail/README.md b/vendor/github.com/hpcloud/tail/README.md new file mode 100644 index 0000000000..fb7fbc26c6 --- /dev/null +++ b/vendor/github.com/hpcloud/tail/README.md @@ -0,0 +1,28 @@ +[![Build Status](https://travis-ci.org/hpcloud/tail.svg)](https://travis-ci.org/hpcloud/tail) +[![Build status](https://ci.appveyor.com/api/projects/status/kohpsf3rvhjhrox6?svg=true)](https://ci.appveyor.com/project/HelionCloudFoundry/tail) + +# Go package for tail-ing files + +A Go package striving to emulate the features of the BSD `tail` program. + +```Go +t, err := tail.TailFile("/var/log/nginx.log", tail.Config{Follow: true}) +for line := range t.Lines { + fmt.Println(line.Text) +} +``` + +See [API documentation](http://godoc.org/github.com/hpcloud/tail). + +## Log rotation + +Tail comes with full support for truncation/move detection as it is +designed to work with log rotation tools. + +## Installing + + go get github.com/hpcloud/tail/... + +## Windows support + +This package [needs assistance](https://github.com/hpcloud/tail/labels/Windows) for full Windows support. diff --git a/vendor/github.com/hpcloud/tail/appveyor.yml b/vendor/github.com/hpcloud/tail/appveyor.yml new file mode 100644 index 0000000000..d370055b6f --- /dev/null +++ b/vendor/github.com/hpcloud/tail/appveyor.yml @@ -0,0 +1,11 @@ +version: 0.{build} +skip_tags: true +cache: C:\Users\appveyor\AppData\Local\NuGet\Cache +build_script: +- SET GOPATH=c:\workspace +- go test -v -race ./... +test: off +clone_folder: c:\workspace\src\github.com\hpcloud\tail +branches: + only: + - master diff --git a/vendor/github.com/dgrijalva/jwt-go/LICENSE b/vendor/github.com/hpcloud/tail/ratelimiter/Licence similarity index 96% rename from vendor/github.com/dgrijalva/jwt-go/LICENSE rename to vendor/github.com/hpcloud/tail/ratelimiter/Licence index df83a9c2f0..434aab19f1 100644 --- a/vendor/github.com/dgrijalva/jwt-go/LICENSE +++ b/vendor/github.com/hpcloud/tail/ratelimiter/Licence @@ -1,8 +1,7 @@ -Copyright (c) 2012 Dave Grijalva +Copyright (C) 2013 99designs Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. - diff --git a/vendor/github.com/hpcloud/tail/ratelimiter/leakybucket.go b/vendor/github.com/hpcloud/tail/ratelimiter/leakybucket.go new file mode 100644 index 0000000000..358b69e7f5 --- /dev/null +++ b/vendor/github.com/hpcloud/tail/ratelimiter/leakybucket.go @@ -0,0 +1,97 @@ +// Package ratelimiter implements the Leaky Bucket ratelimiting algorithm with memcached and in-memory backends. +package ratelimiter + +import ( + "time" +) + +type LeakyBucket struct { + Size uint16 + Fill float64 + LeakInterval time.Duration // time.Duration for 1 unit of size to leak + Lastupdate time.Time + Now func() time.Time +} + +func NewLeakyBucket(size uint16, leakInterval time.Duration) *LeakyBucket { + bucket := LeakyBucket{ + Size: size, + Fill: 0, + LeakInterval: leakInterval, + Now: time.Now, + Lastupdate: time.Now(), + } + + return &bucket +} + +func (b *LeakyBucket) updateFill() { + now := b.Now() + if b.Fill > 0 { + elapsed := now.Sub(b.Lastupdate) + + b.Fill -= float64(elapsed) / float64(b.LeakInterval) + if b.Fill < 0 { + b.Fill = 0 + } + } + b.Lastupdate = now +} + +func (b *LeakyBucket) Pour(amount uint16) bool { + b.updateFill() + + var newfill float64 = b.Fill + float64(amount) + + if newfill > float64(b.Size) { + return false + } + + b.Fill = newfill + + return true +} + +// The time at which this bucket will be completely drained +func (b *LeakyBucket) DrainedAt() time.Time { + return b.Lastupdate.Add(time.Duration(b.Fill * float64(b.LeakInterval))) +} + +// The duration until this bucket is completely drained +func (b *LeakyBucket) TimeToDrain() time.Duration { + return b.DrainedAt().Sub(b.Now()) +} + +func (b *LeakyBucket) TimeSinceLastUpdate() time.Duration { + return b.Now().Sub(b.Lastupdate) +} + +type LeakyBucketSer struct { + Size uint16 + Fill float64 + LeakInterval time.Duration // time.Duration for 1 unit of size to leak + Lastupdate time.Time +} + +func (b *LeakyBucket) Serialise() *LeakyBucketSer { + bucket := LeakyBucketSer{ + Size: b.Size, + Fill: b.Fill, + LeakInterval: b.LeakInterval, + Lastupdate: b.Lastupdate, + } + + return &bucket +} + +func (b *LeakyBucketSer) DeSerialise() *LeakyBucket { + bucket := LeakyBucket{ + Size: b.Size, + Fill: b.Fill, + LeakInterval: b.LeakInterval, + Lastupdate: b.Lastupdate, + Now: time.Now, + } + + return &bucket +} diff --git a/vendor/github.com/hpcloud/tail/ratelimiter/memory.go b/vendor/github.com/hpcloud/tail/ratelimiter/memory.go new file mode 100644 index 0000000000..8f6a5784a9 --- /dev/null +++ b/vendor/github.com/hpcloud/tail/ratelimiter/memory.go @@ -0,0 +1,58 @@ +package ratelimiter + +import ( + "errors" + "time" +) + +const GC_SIZE int = 100 + +type Memory struct { + store map[string]LeakyBucket + lastGCCollected time.Time +} + +func NewMemory() *Memory { + m := new(Memory) + m.store = make(map[string]LeakyBucket) + m.lastGCCollected = time.Now() + return m +} + +func (m *Memory) GetBucketFor(key string) (*LeakyBucket, error) { + + bucket, ok := m.store[key] + if !ok { + return nil, errors.New("miss") + } + + return &bucket, nil +} + +func (m *Memory) SetBucketFor(key string, bucket LeakyBucket) error { + + if len(m.store) > GC_SIZE { + m.GarbageCollect() + } + + m.store[key] = bucket + + return nil +} + +func (m *Memory) GarbageCollect() { + now := time.Now() + + // rate limit GC to once per minute + if now.Add(60*time.Second).Unix() > m.lastGCCollected.Unix() { + + for key, bucket := range m.store { + // if the bucket is drained, then GC + if bucket.DrainedAt().Unix() > now.Unix() { + delete(m.store, key) + } + } + + m.lastGCCollected = now + } +} diff --git a/vendor/github.com/hpcloud/tail/ratelimiter/storage.go b/vendor/github.com/hpcloud/tail/ratelimiter/storage.go new file mode 100644 index 0000000000..89b2fe8826 --- /dev/null +++ b/vendor/github.com/hpcloud/tail/ratelimiter/storage.go @@ -0,0 +1,6 @@ +package ratelimiter + +type Storage interface { + GetBucketFor(string) (*LeakyBucket, error) + SetBucketFor(string, LeakyBucket) error +} diff --git a/vendor/github.com/hpcloud/tail/tail.go b/vendor/github.com/hpcloud/tail/tail.go new file mode 100644 index 0000000000..2d252d6057 --- /dev/null +++ b/vendor/github.com/hpcloud/tail/tail.go @@ -0,0 +1,438 @@ +// Copyright (c) 2015 HPE Software Inc. All rights reserved. +// Copyright (c) 2013 ActiveState Software Inc. All rights reserved. + +package tail + +import ( + "bufio" + "errors" + "fmt" + "io" + "io/ioutil" + "log" + "os" + "strings" + "sync" + "time" + + "github.com/hpcloud/tail/ratelimiter" + "github.com/hpcloud/tail/util" + "github.com/hpcloud/tail/watch" + "gopkg.in/tomb.v1" +) + +var ( + ErrStop = fmt.Errorf("tail should now stop") +) + +type Line struct { + Text string + Time time.Time + Err error // Error from tail +} + +// NewLine returns a Line with present time. +func NewLine(text string) *Line { + return &Line{text, time.Now(), nil} +} + +// SeekInfo represents arguments to `os.Seek` +type SeekInfo struct { + Offset int64 + Whence int // os.SEEK_* +} + +type logger interface { + Fatal(v ...interface{}) + Fatalf(format string, v ...interface{}) + Fatalln(v ...interface{}) + Panic(v ...interface{}) + Panicf(format string, v ...interface{}) + Panicln(v ...interface{}) + Print(v ...interface{}) + Printf(format string, v ...interface{}) + Println(v ...interface{}) +} + +// Config is used to specify how a file must be tailed. +type Config struct { + // File-specifc + Location *SeekInfo // Seek to this location before tailing + ReOpen bool // Reopen recreated files (tail -F) + MustExist bool // Fail early if the file does not exist + Poll bool // Poll for file changes instead of using inotify + Pipe bool // Is a named pipe (mkfifo) + RateLimiter *ratelimiter.LeakyBucket + + // Generic IO + Follow bool // Continue looking for new lines (tail -f) + MaxLineSize int // If non-zero, split longer lines into multiple lines + + // Logger, when nil, is set to tail.DefaultLogger + // To disable logging: set field to tail.DiscardingLogger + Logger logger +} + +type Tail struct { + Filename string + Lines chan *Line + Config + + file *os.File + reader *bufio.Reader + + watcher watch.FileWatcher + changes *watch.FileChanges + + tomb.Tomb // provides: Done, Kill, Dying + + lk sync.Mutex +} + +var ( + // DefaultLogger is used when Config.Logger == nil + DefaultLogger = log.New(os.Stderr, "", log.LstdFlags) + // DiscardingLogger can be used to disable logging output + DiscardingLogger = log.New(ioutil.Discard, "", 0) +) + +// TailFile begins tailing the file. Output stream is made available +// via the `Tail.Lines` channel. To handle errors during tailing, +// invoke the `Wait` or `Err` method after finishing reading from the +// `Lines` channel. +func TailFile(filename string, config Config) (*Tail, error) { + if config.ReOpen && !config.Follow { + util.Fatal("cannot set ReOpen without Follow.") + } + + t := &Tail{ + Filename: filename, + Lines: make(chan *Line), + Config: config, + } + + // when Logger was not specified in config, use default logger + if t.Logger == nil { + t.Logger = log.New(os.Stderr, "", log.LstdFlags) + } + + if t.Poll { + t.watcher = watch.NewPollingFileWatcher(filename) + } else { + t.watcher = watch.NewInotifyFileWatcher(filename) + } + + if t.MustExist { + var err error + t.file, err = OpenFile(t.Filename) + if err != nil { + return nil, err + } + } + + go t.tailFileSync() + + return t, nil +} + +// Return the file's current position, like stdio's ftell(). +// But this value is not very accurate. +// it may readed one line in the chan(tail.Lines), +// so it may lost one line. +func (tail *Tail) Tell() (offset int64, err error) { + if tail.file == nil { + return + } + offset, err = tail.file.Seek(0, os.SEEK_CUR) + if err != nil { + return + } + + tail.lk.Lock() + defer tail.lk.Unlock() + if tail.reader == nil { + return + } + + offset -= int64(tail.reader.Buffered()) + return +} + +// Stop stops the tailing activity. +func (tail *Tail) Stop() error { + tail.Kill(nil) + return tail.Wait() +} + +// StopAtEOF stops tailing as soon as the end of the file is reached. +func (tail *Tail) StopAtEOF() error { + tail.Kill(errStopAtEOF) + return tail.Wait() +} + +var errStopAtEOF = errors.New("tail: stop at eof") + +func (tail *Tail) close() { + close(tail.Lines) + tail.closeFile() +} + +func (tail *Tail) closeFile() { + if tail.file != nil { + tail.file.Close() + tail.file = nil + } +} + +func (tail *Tail) reopen() error { + tail.closeFile() + for { + var err error + tail.file, err = OpenFile(tail.Filename) + if err != nil { + if os.IsNotExist(err) { + tail.Logger.Printf("Waiting for %s to appear...", tail.Filename) + if err := tail.watcher.BlockUntilExists(&tail.Tomb); err != nil { + if err == tomb.ErrDying { + return err + } + return fmt.Errorf("Failed to detect creation of %s: %s", tail.Filename, err) + } + continue + } + return fmt.Errorf("Unable to open file %s: %s", tail.Filename, err) + } + break + } + return nil +} + +func (tail *Tail) readLine() (string, error) { + tail.lk.Lock() + line, err := tail.reader.ReadString('\n') + tail.lk.Unlock() + if err != nil { + // Note ReadString "returns the data read before the error" in + // case of an error, including EOF, so we return it as is. The + // caller is expected to process it if err is EOF. + return line, err + } + + line = strings.TrimRight(line, "\n") + + return line, err +} + +func (tail *Tail) tailFileSync() { + defer tail.Done() + defer tail.close() + + if !tail.MustExist { + // deferred first open. + err := tail.reopen() + if err != nil { + if err != tomb.ErrDying { + tail.Kill(err) + } + return + } + } + + // Seek to requested location on first open of the file. + if tail.Location != nil { + _, err := tail.file.Seek(tail.Location.Offset, tail.Location.Whence) + tail.Logger.Printf("Seeked %s - %+v\n", tail.Filename, tail.Location) + if err != nil { + tail.Killf("Seek error on %s: %s", tail.Filename, err) + return + } + } + + tail.openReader() + + var offset int64 = 0 + var err error + + // Read line by line. + for { + // do not seek in named pipes + if !tail.Pipe { + // grab the position in case we need to back up in the event of a half-line + offset, err = tail.Tell() + if err != nil { + tail.Kill(err) + return + } + } + + line, err := tail.readLine() + + // Process `line` even if err is EOF. + if err == nil { + cooloff := !tail.sendLine(line) + if cooloff { + // Wait a second before seeking till the end of + // file when rate limit is reached. + msg := fmt.Sprintf( + "Too much log activity; waiting a second " + + "before resuming tailing") + tail.Lines <- &Line{msg, time.Now(), fmt.Errorf(msg)} + select { + case <-time.After(time.Second): + case <-tail.Dying(): + return + } + if err := tail.seekEnd(); err != nil { + tail.Kill(err) + return + } + } + } else if err == io.EOF { + if !tail.Follow { + if line != "" { + tail.sendLine(line) + } + return + } + + if tail.Follow && line != "" { + // this has the potential to never return the last line if + // it's not followed by a newline; seems a fair trade here + err := tail.seekTo(SeekInfo{Offset: offset, Whence: 0}) + if err != nil { + tail.Kill(err) + return + } + } + + // When EOF is reached, wait for more data to become + // available. Wait strategy is based on the `tail.watcher` + // implementation (inotify or polling). + err := tail.waitForChanges() + if err != nil { + if err != ErrStop { + tail.Kill(err) + } + return + } + } else { + // non-EOF error + tail.Killf("Error reading %s: %s", tail.Filename, err) + return + } + + select { + case <-tail.Dying(): + if tail.Err() == errStopAtEOF { + continue + } + return + default: + } + } +} + +// waitForChanges waits until the file has been appended, deleted, +// moved or truncated. When moved or deleted - the file will be +// reopened if ReOpen is true. Truncated files are always reopened. +func (tail *Tail) waitForChanges() error { + if tail.changes == nil { + pos, err := tail.file.Seek(0, os.SEEK_CUR) + if err != nil { + return err + } + tail.changes, err = tail.watcher.ChangeEvents(&tail.Tomb, pos) + if err != nil { + return err + } + } + + select { + case <-tail.changes.Modified: + return nil + case <-tail.changes.Deleted: + tail.changes = nil + if tail.ReOpen { + // XXX: we must not log from a library. + tail.Logger.Printf("Re-opening moved/deleted file %s ...", tail.Filename) + if err := tail.reopen(); err != nil { + return err + } + tail.Logger.Printf("Successfully reopened %s", tail.Filename) + tail.openReader() + return nil + } else { + tail.Logger.Printf("Stopping tail as file no longer exists: %s", tail.Filename) + return ErrStop + } + case <-tail.changes.Truncated: + // Always reopen truncated files (Follow is true) + tail.Logger.Printf("Re-opening truncated file %s ...", tail.Filename) + if err := tail.reopen(); err != nil { + return err + } + tail.Logger.Printf("Successfully reopened truncated %s", tail.Filename) + tail.openReader() + return nil + case <-tail.Dying(): + return ErrStop + } + panic("unreachable") +} + +func (tail *Tail) openReader() { + if tail.MaxLineSize > 0 { + // add 2 to account for newline characters + tail.reader = bufio.NewReaderSize(tail.file, tail.MaxLineSize+2) + } else { + tail.reader = bufio.NewReader(tail.file) + } +} + +func (tail *Tail) seekEnd() error { + return tail.seekTo(SeekInfo{Offset: 0, Whence: os.SEEK_END}) +} + +func (tail *Tail) seekTo(pos SeekInfo) error { + _, err := tail.file.Seek(pos.Offset, pos.Whence) + if err != nil { + return fmt.Errorf("Seek error on %s: %s", tail.Filename, err) + } + // Reset the read buffer whenever the file is re-seek'ed + tail.reader.Reset(tail.file) + return nil +} + +// sendLine sends the line(s) to Lines channel, splitting longer lines +// if necessary. Return false if rate limit is reached. +func (tail *Tail) sendLine(line string) bool { + now := time.Now() + lines := []string{line} + + // Split longer lines + if tail.MaxLineSize > 0 && len(line) > tail.MaxLineSize { + lines = util.PartitionString(line, tail.MaxLineSize) + } + + for _, line := range lines { + tail.Lines <- &Line{line, now, nil} + } + + if tail.Config.RateLimiter != nil { + ok := tail.Config.RateLimiter.Pour(uint16(len(lines))) + if !ok { + tail.Logger.Printf("Leaky bucket full (%v); entering 1s cooloff period.\n", + tail.Filename) + return false + } + } + + return true +} + +// Cleanup removes inotify watches added by the tail package. This function is +// meant to be invoked from a process's exit handler. Linux kernel may not +// automatically remove inotify watches after the process exits. +func (tail *Tail) Cleanup() { + watch.Cleanup(tail.Filename) +} diff --git a/vendor/github.com/hpcloud/tail/tail_posix.go b/vendor/github.com/hpcloud/tail/tail_posix.go new file mode 100644 index 0000000000..bc4dc3357a --- /dev/null +++ b/vendor/github.com/hpcloud/tail/tail_posix.go @@ -0,0 +1,11 @@ +// +build linux darwin freebsd netbsd openbsd + +package tail + +import ( + "os" +) + +func OpenFile(name string) (file *os.File, err error) { + return os.Open(name) +} diff --git a/vendor/github.com/hpcloud/tail/tail_windows.go b/vendor/github.com/hpcloud/tail/tail_windows.go new file mode 100644 index 0000000000..ef2cfca1b7 --- /dev/null +++ b/vendor/github.com/hpcloud/tail/tail_windows.go @@ -0,0 +1,12 @@ +// +build windows + +package tail + +import ( + "github.com/hpcloud/tail/winfile" + "os" +) + +func OpenFile(name string) (file *os.File, err error) { + return winfile.OpenFile(name, os.O_RDONLY, 0) +} diff --git a/vendor/github.com/hpcloud/tail/util/util.go b/vendor/github.com/hpcloud/tail/util/util.go new file mode 100644 index 0000000000..54151fe39f --- /dev/null +++ b/vendor/github.com/hpcloud/tail/util/util.go @@ -0,0 +1,48 @@ +// Copyright (c) 2015 HPE Software Inc. All rights reserved. +// Copyright (c) 2013 ActiveState Software Inc. All rights reserved. + +package util + +import ( + "fmt" + "log" + "os" + "runtime/debug" +) + +type Logger struct { + *log.Logger +} + +var LOGGER = &Logger{log.New(os.Stderr, "", log.LstdFlags)} + +// fatal is like panic except it displays only the current goroutine's stack. +func Fatal(format string, v ...interface{}) { + // https://github.com/hpcloud/log/blob/master/log.go#L45 + LOGGER.Output(2, fmt.Sprintf("FATAL -- "+format, v...)+"\n"+string(debug.Stack())) + os.Exit(1) +} + +// partitionString partitions the string into chunks of given size, +// with the last chunk of variable size. +func PartitionString(s string, chunkSize int) []string { + if chunkSize <= 0 { + panic("invalid chunkSize") + } + length := len(s) + chunks := 1 + length/chunkSize + start := 0 + end := chunkSize + parts := make([]string, 0, chunks) + for { + if end > length { + end = length + } + parts = append(parts, s[start:end]) + if end == length { + break + } + start, end = end, end+chunkSize + } + return parts +} diff --git a/vendor/github.com/hpcloud/tail/watch/filechanges.go b/vendor/github.com/hpcloud/tail/watch/filechanges.go new file mode 100644 index 0000000000..3ce5dcecbb --- /dev/null +++ b/vendor/github.com/hpcloud/tail/watch/filechanges.go @@ -0,0 +1,36 @@ +package watch + +type FileChanges struct { + Modified chan bool // Channel to get notified of modifications + Truncated chan bool // Channel to get notified of truncations + Deleted chan bool // Channel to get notified of deletions/renames +} + +func NewFileChanges() *FileChanges { + return &FileChanges{ + make(chan bool), make(chan bool), make(chan bool)} +} + +func (fc *FileChanges) NotifyModified() { + sendOnlyIfEmpty(fc.Modified) +} + +func (fc *FileChanges) NotifyTruncated() { + sendOnlyIfEmpty(fc.Truncated) +} + +func (fc *FileChanges) NotifyDeleted() { + sendOnlyIfEmpty(fc.Deleted) +} + +// sendOnlyIfEmpty sends on a bool channel only if the channel has no +// backlog to be read by other goroutines. This concurrency pattern +// can be used to notify other goroutines if and only if they are +// looking for it (i.e., subsequent notifications can be compressed +// into one). +func sendOnlyIfEmpty(ch chan bool) { + select { + case ch <- true: + default: + } +} diff --git a/vendor/github.com/hpcloud/tail/watch/inotify.go b/vendor/github.com/hpcloud/tail/watch/inotify.go new file mode 100644 index 0000000000..4478f1e1a0 --- /dev/null +++ b/vendor/github.com/hpcloud/tail/watch/inotify.go @@ -0,0 +1,128 @@ +// Copyright (c) 2015 HPE Software Inc. All rights reserved. +// Copyright (c) 2013 ActiveState Software Inc. All rights reserved. + +package watch + +import ( + "fmt" + "os" + "path/filepath" + + "github.com/hpcloud/tail/util" + + "gopkg.in/fsnotify.v1" + "gopkg.in/tomb.v1" +) + +// InotifyFileWatcher uses inotify to monitor file changes. +type InotifyFileWatcher struct { + Filename string + Size int64 +} + +func NewInotifyFileWatcher(filename string) *InotifyFileWatcher { + fw := &InotifyFileWatcher{filepath.Clean(filename), 0} + return fw +} + +func (fw *InotifyFileWatcher) BlockUntilExists(t *tomb.Tomb) error { + err := WatchCreate(fw.Filename) + if err != nil { + return err + } + defer RemoveWatchCreate(fw.Filename) + + // Do a real check now as the file might have been created before + // calling `WatchFlags` above. + if _, err = os.Stat(fw.Filename); !os.IsNotExist(err) { + // file exists, or stat returned an error. + return err + } + + events := Events(fw.Filename) + + for { + select { + case evt, ok := <-events: + if !ok { + return fmt.Errorf("inotify watcher has been closed") + } + evtName, err := filepath.Abs(evt.Name) + if err != nil { + return err + } + fwFilename, err := filepath.Abs(fw.Filename) + if err != nil { + return err + } + if evtName == fwFilename { + return nil + } + case <-t.Dying(): + return tomb.ErrDying + } + } + panic("unreachable") +} + +func (fw *InotifyFileWatcher) ChangeEvents(t *tomb.Tomb, pos int64) (*FileChanges, error) { + err := Watch(fw.Filename) + if err != nil { + return nil, err + } + + changes := NewFileChanges() + fw.Size = pos + + go func() { + defer RemoveWatch(fw.Filename) + + events := Events(fw.Filename) + + for { + prevSize := fw.Size + + var evt fsnotify.Event + var ok bool + + select { + case evt, ok = <-events: + if !ok { + return + } + case <-t.Dying(): + return + } + + switch { + case evt.Op&fsnotify.Remove == fsnotify.Remove: + fallthrough + + case evt.Op&fsnotify.Rename == fsnotify.Rename: + changes.NotifyDeleted() + return + + case evt.Op&fsnotify.Write == fsnotify.Write: + fi, err := os.Stat(fw.Filename) + if err != nil { + if os.IsNotExist(err) { + changes.NotifyDeleted() + return + } + // XXX: report this error back to the user + util.Fatal("Failed to stat file %v: %v", fw.Filename, err) + } + fw.Size = fi.Size() + + if prevSize > 0 && prevSize > fw.Size { + changes.NotifyTruncated() + } else { + changes.NotifyModified() + } + prevSize = fw.Size + } + } + }() + + return changes, nil +} diff --git a/vendor/github.com/hpcloud/tail/watch/inotify_tracker.go b/vendor/github.com/hpcloud/tail/watch/inotify_tracker.go new file mode 100644 index 0000000000..03be4275ca --- /dev/null +++ b/vendor/github.com/hpcloud/tail/watch/inotify_tracker.go @@ -0,0 +1,260 @@ +// Copyright (c) 2015 HPE Software Inc. All rights reserved. +// Copyright (c) 2013 ActiveState Software Inc. All rights reserved. + +package watch + +import ( + "log" + "os" + "path/filepath" + "sync" + "syscall" + + "github.com/hpcloud/tail/util" + + "gopkg.in/fsnotify.v1" +) + +type InotifyTracker struct { + mux sync.Mutex + watcher *fsnotify.Watcher + chans map[string]chan fsnotify.Event + done map[string]chan bool + watchNums map[string]int + watch chan *watchInfo + remove chan *watchInfo + error chan error +} + +type watchInfo struct { + op fsnotify.Op + fname string +} + +func (this *watchInfo) isCreate() bool { + return this.op == fsnotify.Create +} + +var ( + // globally shared InotifyTracker; ensures only one fsnotify.Watcher is used + shared *InotifyTracker + + // these are used to ensure the shared InotifyTracker is run exactly once + once = sync.Once{} + goRun = func() { + shared = &InotifyTracker{ + mux: sync.Mutex{}, + chans: make(map[string]chan fsnotify.Event), + done: make(map[string]chan bool), + watchNums: make(map[string]int), + watch: make(chan *watchInfo), + remove: make(chan *watchInfo), + error: make(chan error), + } + go shared.run() + } + + logger = log.New(os.Stderr, "", log.LstdFlags) +) + +// Watch signals the run goroutine to begin watching the input filename +func Watch(fname string) error { + return watch(&watchInfo{ + fname: fname, + }) +} + +// Watch create signals the run goroutine to begin watching the input filename +// if call the WatchCreate function, don't call the Cleanup, call the RemoveWatchCreate +func WatchCreate(fname string) error { + return watch(&watchInfo{ + op: fsnotify.Create, + fname: fname, + }) +} + +func watch(winfo *watchInfo) error { + // start running the shared InotifyTracker if not already running + once.Do(goRun) + + winfo.fname = filepath.Clean(winfo.fname) + shared.watch <- winfo + return <-shared.error +} + +// RemoveWatch signals the run goroutine to remove the watch for the input filename +func RemoveWatch(fname string) { + remove(&watchInfo{ + fname: fname, + }) +} + +// RemoveWatch create signals the run goroutine to remove the watch for the input filename +func RemoveWatchCreate(fname string) { + remove(&watchInfo{ + op: fsnotify.Create, + fname: fname, + }) +} + +func remove(winfo *watchInfo) { + // start running the shared InotifyTracker if not already running + once.Do(goRun) + + winfo.fname = filepath.Clean(winfo.fname) + shared.mux.Lock() + done := shared.done[winfo.fname] + if done != nil { + delete(shared.done, winfo.fname) + close(done) + } + + fname := winfo.fname + if winfo.isCreate() { + // Watch for new files to be created in the parent directory. + fname = filepath.Dir(fname) + } + shared.watchNums[fname]-- + watchNum := shared.watchNums[fname] + if watchNum == 0 { + delete(shared.watchNums, fname) + } + shared.mux.Unlock() + + // If we were the last ones to watch this file, unsubscribe from inotify. + // This needs to happen after releasing the lock because fsnotify waits + // synchronously for the kernel to acknowledge the removal of the watch + // for this file, which causes us to deadlock if we still held the lock. + if watchNum == 0 { + shared.watcher.Remove(fname) + } + shared.remove <- winfo +} + +// Events returns a channel to which FileEvents corresponding to the input filename +// will be sent. This channel will be closed when removeWatch is called on this +// filename. +func Events(fname string) <-chan fsnotify.Event { + shared.mux.Lock() + defer shared.mux.Unlock() + + return shared.chans[fname] +} + +// Cleanup removes the watch for the input filename if necessary. +func Cleanup(fname string) { + RemoveWatch(fname) +} + +// watchFlags calls fsnotify.WatchFlags for the input filename and flags, creating +// a new Watcher if the previous Watcher was closed. +func (shared *InotifyTracker) addWatch(winfo *watchInfo) error { + shared.mux.Lock() + defer shared.mux.Unlock() + + if shared.chans[winfo.fname] == nil { + shared.chans[winfo.fname] = make(chan fsnotify.Event) + shared.done[winfo.fname] = make(chan bool) + } + + fname := winfo.fname + if winfo.isCreate() { + // Watch for new files to be created in the parent directory. + fname = filepath.Dir(fname) + } + + // already in inotify watch + if shared.watchNums[fname] > 0 { + shared.watchNums[fname]++ + if winfo.isCreate() { + shared.watchNums[winfo.fname]++ + } + return nil + } + + err := shared.watcher.Add(fname) + if err == nil { + shared.watchNums[fname]++ + if winfo.isCreate() { + shared.watchNums[winfo.fname]++ + } + } + return err +} + +// removeWatch calls fsnotify.RemoveWatch for the input filename and closes the +// corresponding events channel. +func (shared *InotifyTracker) removeWatch(winfo *watchInfo) { + shared.mux.Lock() + defer shared.mux.Unlock() + + ch := shared.chans[winfo.fname] + if ch == nil { + return + } + + delete(shared.chans, winfo.fname) + close(ch) + + if !winfo.isCreate() { + return + } + + shared.watchNums[winfo.fname]-- + if shared.watchNums[winfo.fname] == 0 { + delete(shared.watchNums, winfo.fname) + } +} + +// sendEvent sends the input event to the appropriate Tail. +func (shared *InotifyTracker) sendEvent(event fsnotify.Event) { + name := filepath.Clean(event.Name) + + shared.mux.Lock() + ch := shared.chans[name] + done := shared.done[name] + shared.mux.Unlock() + + if ch != nil && done != nil { + select { + case ch <- event: + case <-done: + } + } +} + +// run starts the goroutine in which the shared struct reads events from its +// Watcher's Event channel and sends the events to the appropriate Tail. +func (shared *InotifyTracker) run() { + watcher, err := fsnotify.NewWatcher() + if err != nil { + util.Fatal("failed to create Watcher") + } + shared.watcher = watcher + + for { + select { + case winfo := <-shared.watch: + shared.error <- shared.addWatch(winfo) + + case winfo := <-shared.remove: + shared.removeWatch(winfo) + + case event, open := <-shared.watcher.Events: + if !open { + return + } + shared.sendEvent(event) + + case err, open := <-shared.watcher.Errors: + if !open { + return + } else if err != nil { + sysErr, ok := err.(*os.SyscallError) + if !ok || sysErr.Err != syscall.EINTR { + logger.Printf("Error in Watcher Error channel: %s", err) + } + } + } + } +} diff --git a/vendor/github.com/hpcloud/tail/watch/polling.go b/vendor/github.com/hpcloud/tail/watch/polling.go new file mode 100644 index 0000000000..49491f21db --- /dev/null +++ b/vendor/github.com/hpcloud/tail/watch/polling.go @@ -0,0 +1,118 @@ +// Copyright (c) 2015 HPE Software Inc. All rights reserved. +// Copyright (c) 2013 ActiveState Software Inc. All rights reserved. + +package watch + +import ( + "os" + "runtime" + "time" + + "github.com/hpcloud/tail/util" + "gopkg.in/tomb.v1" +) + +// PollingFileWatcher polls the file for changes. +type PollingFileWatcher struct { + Filename string + Size int64 +} + +func NewPollingFileWatcher(filename string) *PollingFileWatcher { + fw := &PollingFileWatcher{filename, 0} + return fw +} + +var POLL_DURATION time.Duration + +func (fw *PollingFileWatcher) BlockUntilExists(t *tomb.Tomb) error { + for { + if _, err := os.Stat(fw.Filename); err == nil { + return nil + } else if !os.IsNotExist(err) { + return err + } + select { + case <-time.After(POLL_DURATION): + continue + case <-t.Dying(): + return tomb.ErrDying + } + } + panic("unreachable") +} + +func (fw *PollingFileWatcher) ChangeEvents(t *tomb.Tomb, pos int64) (*FileChanges, error) { + origFi, err := os.Stat(fw.Filename) + if err != nil { + return nil, err + } + + changes := NewFileChanges() + var prevModTime time.Time + + // XXX: use tomb.Tomb to cleanly manage these goroutines. replace + // the fatal (below) with tomb's Kill. + + fw.Size = pos + + go func() { + prevSize := fw.Size + for { + select { + case <-t.Dying(): + return + default: + } + + time.Sleep(POLL_DURATION) + fi, err := os.Stat(fw.Filename) + if err != nil { + // Windows cannot delete a file if a handle is still open (tail keeps one open) + // so it gives access denied to anything trying to read it until all handles are released. + if os.IsNotExist(err) || (runtime.GOOS == "windows" && os.IsPermission(err)) { + // File does not exist (has been deleted). + changes.NotifyDeleted() + return + } + + // XXX: report this error back to the user + util.Fatal("Failed to stat file %v: %v", fw.Filename, err) + } + + // File got moved/renamed? + if !os.SameFile(origFi, fi) { + changes.NotifyDeleted() + return + } + + // File got truncated? + fw.Size = fi.Size() + if prevSize > 0 && prevSize > fw.Size { + changes.NotifyTruncated() + prevSize = fw.Size + continue + } + // File got bigger? + if prevSize > 0 && prevSize < fw.Size { + changes.NotifyModified() + prevSize = fw.Size + continue + } + prevSize = fw.Size + + // File was appended to (changed)? + modTime := fi.ModTime() + if modTime != prevModTime { + prevModTime = modTime + changes.NotifyModified() + } + } + }() + + return changes, nil +} + +func init() { + POLL_DURATION = 250 * time.Millisecond +} diff --git a/vendor/github.com/hpcloud/tail/watch/watch.go b/vendor/github.com/hpcloud/tail/watch/watch.go new file mode 100644 index 0000000000..2e1783ef0a --- /dev/null +++ b/vendor/github.com/hpcloud/tail/watch/watch.go @@ -0,0 +1,20 @@ +// Copyright (c) 2015 HPE Software Inc. All rights reserved. +// Copyright (c) 2013 ActiveState Software Inc. All rights reserved. + +package watch + +import "gopkg.in/tomb.v1" + +// FileWatcher monitors file-level events. +type FileWatcher interface { + // BlockUntilExists blocks until the file comes into existence. + BlockUntilExists(*tomb.Tomb) error + + // ChangeEvents reports on changes to a file, be it modification, + // deletion, renames or truncations. Returned FileChanges group of + // channels will be closed, thus become unusable, after a deletion + // or truncation event. + // In order to properly report truncations, ChangeEvents requires + // the caller to pass their current offset in the file. + ChangeEvents(*tomb.Tomb, int64) (*FileChanges, error) +} diff --git a/vendor/github.com/hpcloud/tail/winfile/winfile.go b/vendor/github.com/hpcloud/tail/winfile/winfile.go new file mode 100644 index 0000000000..aa7e7bc5df --- /dev/null +++ b/vendor/github.com/hpcloud/tail/winfile/winfile.go @@ -0,0 +1,92 @@ +// +build windows + +package winfile + +import ( + "os" + "syscall" + "unsafe" +) + +// issue also described here +//https://codereview.appspot.com/8203043/ + +// https://github.com/jnwhiteh/golang/blob/master/src/pkg/syscall/syscall_windows.go#L218 +func Open(path string, mode int, perm uint32) (fd syscall.Handle, err error) { + if len(path) == 0 { + return syscall.InvalidHandle, syscall.ERROR_FILE_NOT_FOUND + } + pathp, err := syscall.UTF16PtrFromString(path) + if err != nil { + return syscall.InvalidHandle, err + } + var access uint32 + switch mode & (syscall.O_RDONLY | syscall.O_WRONLY | syscall.O_RDWR) { + case syscall.O_RDONLY: + access = syscall.GENERIC_READ + case syscall.O_WRONLY: + access = syscall.GENERIC_WRITE + case syscall.O_RDWR: + access = syscall.GENERIC_READ | syscall.GENERIC_WRITE + } + if mode&syscall.O_CREAT != 0 { + access |= syscall.GENERIC_WRITE + } + if mode&syscall.O_APPEND != 0 { + access &^= syscall.GENERIC_WRITE + access |= syscall.FILE_APPEND_DATA + } + sharemode := uint32(syscall.FILE_SHARE_READ | syscall.FILE_SHARE_WRITE | syscall.FILE_SHARE_DELETE) + var sa *syscall.SecurityAttributes + if mode&syscall.O_CLOEXEC == 0 { + sa = makeInheritSa() + } + var createmode uint32 + switch { + case mode&(syscall.O_CREAT|syscall.O_EXCL) == (syscall.O_CREAT | syscall.O_EXCL): + createmode = syscall.CREATE_NEW + case mode&(syscall.O_CREAT|syscall.O_TRUNC) == (syscall.O_CREAT | syscall.O_TRUNC): + createmode = syscall.CREATE_ALWAYS + case mode&syscall.O_CREAT == syscall.O_CREAT: + createmode = syscall.OPEN_ALWAYS + case mode&syscall.O_TRUNC == syscall.O_TRUNC: + createmode = syscall.TRUNCATE_EXISTING + default: + createmode = syscall.OPEN_EXISTING + } + h, e := syscall.CreateFile(pathp, access, sharemode, sa, createmode, syscall.FILE_ATTRIBUTE_NORMAL, 0) + return h, e +} + +// https://github.com/jnwhiteh/golang/blob/master/src/pkg/syscall/syscall_windows.go#L211 +func makeInheritSa() *syscall.SecurityAttributes { + var sa syscall.SecurityAttributes + sa.Length = uint32(unsafe.Sizeof(sa)) + sa.InheritHandle = 1 + return &sa +} + +// https://github.com/jnwhiteh/golang/blob/master/src/pkg/os/file_windows.go#L133 +func OpenFile(name string, flag int, perm os.FileMode) (file *os.File, err error) { + r, e := Open(name, flag|syscall.O_CLOEXEC, syscallMode(perm)) + if e != nil { + return nil, e + } + return os.NewFile(uintptr(r), name), nil +} + +// https://github.com/jnwhiteh/golang/blob/master/src/pkg/os/file_posix.go#L61 +func syscallMode(i os.FileMode) (o uint32) { + o |= uint32(i.Perm()) + if i&os.ModeSetuid != 0 { + o |= syscall.S_ISUID + } + if i&os.ModeSetgid != 0 { + o |= syscall.S_ISGID + } + if i&os.ModeSticky != 0 { + o |= syscall.S_ISVTX + } + // No mapping for Go's ModeTemporary (plan9 only). + return +} diff --git a/vendor/github.com/imdario/mergo/README.md b/vendor/github.com/imdario/mergo/README.md index 02fc81e062..8b76f1fbf3 100644 --- a/vendor/github.com/imdario/mergo/README.md +++ b/vendor/github.com/imdario/mergo/README.md @@ -13,7 +13,6 @@ It is ready for production use. [It is used in several projects by Docker, Googl [![Build Status][1]][2] [![Coverage Status][7]][8] [![Sourcegraph][9]][10] -[![FOSSA Status](https://app.fossa.io/api/projects/git%2Bgithub.com%2Fimdario%2Fmergo.svg?type=shield)](https://app.fossa.io/projects/git%2Bgithub.com%2Fimdario%2Fmergo?ref=badge_shield) [1]: https://travis-ci.org/imdario/mergo.png [2]: https://travis-ci.org/imdario/mergo @@ -28,7 +27,7 @@ It is ready for production use. [It is used in several projects by Docker, Googl ### Latest release -[Release v0.3.7](https://github.com/imdario/mergo/releases/tag/v0.3.7). +[Release v0.3.6](https://github.com/imdario/mergo/releases/tag/v0.3.6). ### Important note @@ -218,21 +217,6 @@ If I can help you, you have an idea or you are using Mergo in your projects, don Written by [Dario Castañé](http://dario.im). -## Top Contributors - -[![0](https://sourcerer.io/fame/imdario/imdario/mergo/images/0)](https://sourcerer.io/fame/imdario/imdario/mergo/links/0) -[![1](https://sourcerer.io/fame/imdario/imdario/mergo/images/1)](https://sourcerer.io/fame/imdario/imdario/mergo/links/1) -[![2](https://sourcerer.io/fame/imdario/imdario/mergo/images/2)](https://sourcerer.io/fame/imdario/imdario/mergo/links/2) -[![3](https://sourcerer.io/fame/imdario/imdario/mergo/images/3)](https://sourcerer.io/fame/imdario/imdario/mergo/links/3) -[![4](https://sourcerer.io/fame/imdario/imdario/mergo/images/4)](https://sourcerer.io/fame/imdario/imdario/mergo/links/4) -[![5](https://sourcerer.io/fame/imdario/imdario/mergo/images/5)](https://sourcerer.io/fame/imdario/imdario/mergo/links/5) -[![6](https://sourcerer.io/fame/imdario/imdario/mergo/images/6)](https://sourcerer.io/fame/imdario/imdario/mergo/links/6) -[![7](https://sourcerer.io/fame/imdario/imdario/mergo/images/7)](https://sourcerer.io/fame/imdario/imdario/mergo/links/7) - - ## License [BSD 3-Clause](http://opensource.org/licenses/BSD-3-Clause) license, as [Go language](http://golang.org/LICENSE). - - -[![FOSSA Status](https://app.fossa.io/api/projects/git%2Bgithub.com%2Fimdario%2Fmergo.svg?type=large)](https://app.fossa.io/projects/git%2Bgithub.com%2Fimdario%2Fmergo?ref=badge_large) diff --git a/vendor/github.com/imdario/mergo/map.go b/vendor/github.com/imdario/mergo/map.go index 3f5afa83a1..6ea38e636b 100644 --- a/vendor/github.com/imdario/mergo/map.go +++ b/vendor/github.com/imdario/mergo/map.go @@ -72,7 +72,6 @@ func deepMap(dst, src reflect.Value, visited map[uintptr]*visit, depth int, conf case reflect.Struct: srcMap := src.Interface().(map[string]interface{}) for key := range srcMap { - config.overwriteWithEmptyValue = true srcValue := srcMap[key] fieldName := changeInitialCase(key, unicode.ToUpper) dstElement := dst.FieldByName(fieldName) diff --git a/vendor/github.com/imdario/mergo/merge.go b/vendor/github.com/imdario/mergo/merge.go index f8de6c5430..44f70a89d9 100644 --- a/vendor/github.com/imdario/mergo/merge.go +++ b/vendor/github.com/imdario/mergo/merge.go @@ -26,10 +26,9 @@ func hasExportedField(dst reflect.Value) (exported bool) { } type Config struct { - Overwrite bool - AppendSlice bool - Transformers Transformers - overwriteWithEmptyValue bool + Overwrite bool + AppendSlice bool + Transformers Transformers } type Transformers interface { @@ -41,8 +40,6 @@ type Transformers interface { // short circuiting on recursive types. func deepMerge(dst, src reflect.Value, visited map[uintptr]*visit, depth int, config *Config) (err error) { overwrite := config.Overwrite - overwriteWithEmptySrc := config.overwriteWithEmptyValue - config.overwriteWithEmptyValue = false if !src.IsValid() { return @@ -77,7 +74,7 @@ func deepMerge(dst, src reflect.Value, visited map[uintptr]*visit, depth int, co } } } else { - if dst.CanSet() && (!isEmptyValue(src) || overwriteWithEmptySrc) && (overwrite || isEmptyValue(dst)) { + if dst.CanSet() && !isEmptyValue(src) && (overwrite || isEmptyValue(dst)) { dst.Set(src) } } @@ -128,7 +125,7 @@ func deepMerge(dst, src reflect.Value, visited map[uintptr]*visit, depth int, co dstSlice = reflect.ValueOf(dstElement.Interface()) } - if (!isEmptyValue(src) || overwriteWithEmptySrc) && (overwrite || isEmptyValue(dst)) && !config.AppendSlice { + if !isEmptyValue(src) && (overwrite || isEmptyValue(dst)) && !config.AppendSlice { dstSlice = srcSlice } else if config.AppendSlice { if srcSlice.Type() != dstSlice.Type() { @@ -139,7 +136,7 @@ func deepMerge(dst, src reflect.Value, visited map[uintptr]*visit, depth int, co dst.SetMapIndex(key, dstSlice) } } - if dstElement.IsValid() && !isEmptyValue(dstElement) && (reflect.TypeOf(srcElement.Interface()).Kind() == reflect.Map || reflect.TypeOf(srcElement.Interface()).Kind() == reflect.Slice) { + if dstElement.IsValid() && reflect.TypeOf(srcElement.Interface()).Kind() == reflect.Map { continue } @@ -154,7 +151,7 @@ func deepMerge(dst, src reflect.Value, visited map[uintptr]*visit, depth int, co if !dst.CanSet() { break } - if (!isEmptyValue(src) || overwriteWithEmptySrc) && (overwrite || isEmptyValue(dst)) && !config.AppendSlice { + if !isEmptyValue(src) && (overwrite || isEmptyValue(dst)) && !config.AppendSlice { dst.Set(src) } else if config.AppendSlice { if src.Type() != dst.Type() { @@ -194,7 +191,7 @@ func deepMerge(dst, src reflect.Value, visited map[uintptr]*visit, depth int, co return } default: - if dst.CanSet() && (!isEmptyValue(src) || overwriteWithEmptySrc) && (overwrite || isEmptyValue(dst)) { + if dst.CanSet() && !isEmptyValue(src) && (overwrite || isEmptyValue(dst)) { dst.Set(src) } } diff --git a/vendor/github.com/joho/godotenv/.gitignore b/vendor/github.com/joho/godotenv/.gitignore deleted file mode 100644 index e43b0f9889..0000000000 --- a/vendor/github.com/joho/godotenv/.gitignore +++ /dev/null @@ -1 +0,0 @@ -.DS_Store diff --git a/vendor/github.com/joho/godotenv/.travis.yml b/vendor/github.com/joho/godotenv/.travis.yml deleted file mode 100644 index f0db1adcdb..0000000000 --- a/vendor/github.com/joho/godotenv/.travis.yml +++ /dev/null @@ -1,8 +0,0 @@ -language: go - -go: - - 1.x - -os: - - linux - - osx diff --git a/vendor/github.com/joho/godotenv/README.md b/vendor/github.com/joho/godotenv/README.md deleted file mode 100644 index 4e8fcf2e9c..0000000000 --- a/vendor/github.com/joho/godotenv/README.md +++ /dev/null @@ -1,163 +0,0 @@ -# GoDotEnv [![Build Status](https://travis-ci.org/joho/godotenv.svg?branch=master)](https://travis-ci.org/joho/godotenv) [![Build status](https://ci.appveyor.com/api/projects/status/9v40vnfvvgde64u4?svg=true)](https://ci.appveyor.com/project/joho/godotenv) [![Go Report Card](https://goreportcard.com/badge/github.com/joho/godotenv)](https://goreportcard.com/report/github.com/joho/godotenv) - -A Go (golang) port of the Ruby dotenv project (which loads env vars from a .env file) - -From the original Library: - -> Storing configuration in the environment is one of the tenets of a twelve-factor app. Anything that is likely to change between deployment environments–such as resource handles for databases or credentials for external services–should be extracted from the code into environment variables. -> -> But it is not always practical to set environment variables on development machines or continuous integration servers where multiple projects are run. Dotenv load variables from a .env file into ENV when the environment is bootstrapped. - -It can be used as a library (for loading in env for your own daemons etc) or as a bin command. - -There is test coverage and CI for both linuxish and windows environments, but I make no guarantees about the bin version working on windows. - -## Installation - -As a library - -```shell -go get github.com/joho/godotenv -``` - -or if you want to use it as a bin command -```shell -go get github.com/joho/godotenv/cmd/godotenv -``` - -## Usage - -Add your application configuration to your `.env` file in the root of your project: - -```shell -S3_BUCKET=YOURS3BUCKET -SECRET_KEY=YOURSECRETKEYGOESHERE -``` - -Then in your Go app you can do something like - -```go -package main - -import ( - "github.com/joho/godotenv" - "log" - "os" -) - -func main() { - err := godotenv.Load() - if err != nil { - log.Fatal("Error loading .env file") - } - - s3Bucket := os.Getenv("S3_BUCKET") - secretKey := os.Getenv("SECRET_KEY") - - // now do something with s3 or whatever -} -``` - -If you're even lazier than that, you can just take advantage of the autoload package which will read in `.env` on import - -```go -import _ "github.com/joho/godotenv/autoload" -``` - -While `.env` in the project root is the default, you don't have to be constrained, both examples below are 100% legit - -```go -_ = godotenv.Load("somerandomfile") -_ = godotenv.Load("filenumberone.env", "filenumbertwo.env") -``` - -If you want to be really fancy with your env file you can do comments and exports (below is a valid env file) - -```shell -# I am a comment and that is OK -SOME_VAR=someval -FOO=BAR # comments at line end are OK too -export BAR=BAZ -``` - -Or finally you can do YAML(ish) style - -```yaml -FOO: bar -BAR: baz -``` - -as a final aside, if you don't want godotenv munging your env you can just get a map back instead - -```go -var myEnv map[string]string -myEnv, err := godotenv.Read() - -s3Bucket := myEnv["S3_BUCKET"] -``` - -... or from an `io.Reader` instead of a local file - -```go -reader := getRemoteFile() -myEnv, err := godotenv.Parse(reader) -``` - -... or from a `string` if you so desire - -```go -content := getRemoteFileContent() -myEnv, err := godotenv.Unmarshal(content) -``` - -### Command Mode - -Assuming you've installed the command as above and you've got `$GOPATH/bin` in your `$PATH` - -``` -godotenv -f /some/path/to/.env some_command with some args -``` - -If you don't specify `-f` it will fall back on the default of loading `.env` in `PWD` - -### Writing Env Files - -Godotenv can also write a map representing the environment to a correctly-formatted and escaped file - -```go -env, err := godotenv.Unmarshal("KEY=value") -err := godotenv.Write(env, "./.env") -``` - -... or to a string - -```go -env, err := godotenv.Unmarshal("KEY=value") -content, err := godotenv.Marshal(env) -``` - -## Contributing - -Contributions are most welcome! The parser itself is pretty stupidly naive and I wouldn't be surprised if it breaks with edge cases. - -*code changes without tests will not be accepted* - -1. Fork it -2. Create your feature branch (`git checkout -b my-new-feature`) -3. Commit your changes (`git commit -am 'Added some feature'`) -4. Push to the branch (`git push origin my-new-feature`) -5. Create new Pull Request - -## Releases - -Releases should follow [Semver](http://semver.org/) though the first couple of releases are `v1` and `v1.1`. - -Use [annotated tags for all releases](https://github.com/joho/godotenv/issues/30). Example `git tag -a v1.2.1` - -## CI - -Linux: [![Build Status](https://travis-ci.org/joho/godotenv.svg?branch=master)](https://travis-ci.org/joho/godotenv) Windows: [![Build status](https://ci.appveyor.com/api/projects/status/9v40vnfvvgde64u4)](https://ci.appveyor.com/project/joho/godotenv) - -## Who? - -The original library [dotenv](https://github.com/bkeepers/dotenv) was written by [Brandon Keepers](http://opensoul.org/), and this port was done by [John Barton](https://johnbarton.co/) based off the tests/fixtures in the original library. diff --git a/vendor/github.com/joho/godotenv/godotenv.go b/vendor/github.com/joho/godotenv/godotenv.go deleted file mode 100644 index 29b436c77c..0000000000 --- a/vendor/github.com/joho/godotenv/godotenv.go +++ /dev/null @@ -1,346 +0,0 @@ -// Package godotenv is a go port of the ruby dotenv library (https://github.com/bkeepers/dotenv) -// -// Examples/readme can be found on the github page at https://github.com/joho/godotenv -// -// The TL;DR is that you make a .env file that looks something like -// -// SOME_ENV_VAR=somevalue -// -// and then in your go code you can call -// -// godotenv.Load() -// -// and all the env vars declared in .env will be available through os.Getenv("SOME_ENV_VAR") -package godotenv - -import ( - "bufio" - "errors" - "fmt" - "io" - "os" - "os/exec" - "regexp" - "sort" - "strings" -) - -const doubleQuoteSpecialChars = "\\\n\r\"!$`" - -// Load will read your env file(s) and load them into ENV for this process. -// -// Call this function as close as possible to the start of your program (ideally in main) -// -// If you call Load without any args it will default to loading .env in the current path -// -// You can otherwise tell it which files to load (there can be more than one) like -// -// godotenv.Load("fileone", "filetwo") -// -// It's important to note that it WILL NOT OVERRIDE an env variable that already exists - consider the .env file to set dev vars or sensible defaults -func Load(filenames ...string) (err error) { - filenames = filenamesOrDefault(filenames) - - for _, filename := range filenames { - err = loadFile(filename, false) - if err != nil { - return // return early on a spazout - } - } - return -} - -// Overload will read your env file(s) and load them into ENV for this process. -// -// Call this function as close as possible to the start of your program (ideally in main) -// -// If you call Overload without any args it will default to loading .env in the current path -// -// You can otherwise tell it which files to load (there can be more than one) like -// -// godotenv.Overload("fileone", "filetwo") -// -// It's important to note this WILL OVERRIDE an env variable that already exists - consider the .env file to forcefilly set all vars. -func Overload(filenames ...string) (err error) { - filenames = filenamesOrDefault(filenames) - - for _, filename := range filenames { - err = loadFile(filename, true) - if err != nil { - return // return early on a spazout - } - } - return -} - -// Read all env (with same file loading semantics as Load) but return values as -// a map rather than automatically writing values into env -func Read(filenames ...string) (envMap map[string]string, err error) { - filenames = filenamesOrDefault(filenames) - envMap = make(map[string]string) - - for _, filename := range filenames { - individualEnvMap, individualErr := readFile(filename) - - if individualErr != nil { - err = individualErr - return // return early on a spazout - } - - for key, value := range individualEnvMap { - envMap[key] = value - } - } - - return -} - -// Parse reads an env file from io.Reader, returning a map of keys and values. -func Parse(r io.Reader) (envMap map[string]string, err error) { - envMap = make(map[string]string) - - var lines []string - scanner := bufio.NewScanner(r) - for scanner.Scan() { - lines = append(lines, scanner.Text()) - } - - if err = scanner.Err(); err != nil { - return - } - - for _, fullLine := range lines { - if !isIgnoredLine(fullLine) { - var key, value string - key, value, err = parseLine(fullLine, envMap) - - if err != nil { - return - } - envMap[key] = value - } - } - return -} - -//Unmarshal reads an env file from a string, returning a map of keys and values. -func Unmarshal(str string) (envMap map[string]string, err error) { - return Parse(strings.NewReader(str)) -} - -// Exec loads env vars from the specified filenames (empty map falls back to default) -// then executes the cmd specified. -// -// Simply hooks up os.Stdin/err/out to the command and calls Run() -// -// If you want more fine grained control over your command it's recommended -// that you use `Load()` or `Read()` and the `os/exec` package yourself. -func Exec(filenames []string, cmd string, cmdArgs []string) error { - Load(filenames...) - - command := exec.Command(cmd, cmdArgs...) - command.Stdin = os.Stdin - command.Stdout = os.Stdout - command.Stderr = os.Stderr - return command.Run() -} - -// Write serializes the given environment and writes it to a file -func Write(envMap map[string]string, filename string) error { - content, error := Marshal(envMap) - if error != nil { - return error - } - file, error := os.Create(filename) - if error != nil { - return error - } - _, err := file.WriteString(content) - return err -} - -// Marshal outputs the given environment as a dotenv-formatted environment file. -// Each line is in the format: KEY="VALUE" where VALUE is backslash-escaped. -func Marshal(envMap map[string]string) (string, error) { - lines := make([]string, 0, len(envMap)) - for k, v := range envMap { - lines = append(lines, fmt.Sprintf(`%s="%s"`, k, doubleQuoteEscape(v))) - } - sort.Strings(lines) - return strings.Join(lines, "\n"), nil -} - -func filenamesOrDefault(filenames []string) []string { - if len(filenames) == 0 { - return []string{".env"} - } - return filenames -} - -func loadFile(filename string, overload bool) error { - envMap, err := readFile(filename) - if err != nil { - return err - } - - currentEnv := map[string]bool{} - rawEnv := os.Environ() - for _, rawEnvLine := range rawEnv { - key := strings.Split(rawEnvLine, "=")[0] - currentEnv[key] = true - } - - for key, value := range envMap { - if !currentEnv[key] || overload { - os.Setenv(key, value) - } - } - - return nil -} - -func readFile(filename string) (envMap map[string]string, err error) { - file, err := os.Open(filename) - if err != nil { - return - } - defer file.Close() - - return Parse(file) -} - -func parseLine(line string, envMap map[string]string) (key string, value string, err error) { - if len(line) == 0 { - err = errors.New("zero length string") - return - } - - // ditch the comments (but keep quoted hashes) - if strings.Contains(line, "#") { - segmentsBetweenHashes := strings.Split(line, "#") - quotesAreOpen := false - var segmentsToKeep []string - for _, segment := range segmentsBetweenHashes { - if strings.Count(segment, "\"") == 1 || strings.Count(segment, "'") == 1 { - if quotesAreOpen { - quotesAreOpen = false - segmentsToKeep = append(segmentsToKeep, segment) - } else { - quotesAreOpen = true - } - } - - if len(segmentsToKeep) == 0 || quotesAreOpen { - segmentsToKeep = append(segmentsToKeep, segment) - } - } - - line = strings.Join(segmentsToKeep, "#") - } - - firstEquals := strings.Index(line, "=") - firstColon := strings.Index(line, ":") - splitString := strings.SplitN(line, "=", 2) - if firstColon != -1 && (firstColon < firstEquals || firstEquals == -1) { - //this is a yaml-style line - splitString = strings.SplitN(line, ":", 2) - } - - if len(splitString) != 2 { - err = errors.New("Can't separate key from value") - return - } - - // Parse the key - key = splitString[0] - if strings.HasPrefix(key, "export") { - key = strings.TrimPrefix(key, "export") - } - key = strings.Trim(key, " ") - - // Parse the value - value = parseValue(splitString[1], envMap) - return -} - -func parseValue(value string, envMap map[string]string) string { - - // trim - value = strings.Trim(value, " ") - - // check if we've got quoted values or possible escapes - if len(value) > 1 { - rs := regexp.MustCompile(`\A'(.*)'\z`) - singleQuotes := rs.FindStringSubmatch(value) - - rd := regexp.MustCompile(`\A"(.*)"\z`) - doubleQuotes := rd.FindStringSubmatch(value) - - if singleQuotes != nil || doubleQuotes != nil { - // pull the quotes off the edges - value = value[1 : len(value)-1] - } - - if doubleQuotes != nil { - // expand newlines - escapeRegex := regexp.MustCompile(`\\.`) - value = escapeRegex.ReplaceAllStringFunc(value, func(match string) string { - c := strings.TrimPrefix(match, `\`) - switch c { - case "n": - return "\n" - case "r": - return "\r" - default: - return match - } - }) - // unescape characters - e := regexp.MustCompile(`\\([^$])`) - value = e.ReplaceAllString(value, "$1") - } - - if singleQuotes == nil { - value = expandVariables(value, envMap) - } - } - - return value -} - -func expandVariables(v string, m map[string]string) string { - r := regexp.MustCompile(`(\\)?(\$)(\()?\{?([A-Z0-9_]+)?\}?`) - - return r.ReplaceAllStringFunc(v, func(s string) string { - submatch := r.FindStringSubmatch(s) - - if submatch == nil { - return s - } - if submatch[1] == "\\" || submatch[2] == "(" { - return submatch[0][1:] - } else if submatch[4] != "" { - return m[submatch[4]] - } - return s - }) -} - -func isIgnoredLine(line string) bool { - trimmedLine := strings.Trim(line, " \n\t") - return len(trimmedLine) == 0 || strings.HasPrefix(trimmedLine, "#") -} - -func doubleQuoteEscape(line string) string { - for _, c := range doubleQuoteSpecialChars { - toReplace := "\\" + string(c) - if c == '\n' { - toReplace = `\n` - } - if c == '\r' { - toReplace = `\r` - } - line = strings.Replace(line, string(c), toReplace, -1) - } - return line -} diff --git a/vendor/github.com/mattn/go-colorable/.travis.yml b/vendor/github.com/mattn/go-colorable/.travis.yml new file mode 100644 index 0000000000..98db8f060b --- /dev/null +++ b/vendor/github.com/mattn/go-colorable/.travis.yml @@ -0,0 +1,9 @@ +language: go +go: + - tip + +before_install: + - go get github.com/mattn/goveralls + - go get golang.org/x/tools/cmd/cover +script: + - $HOME/gopath/bin/goveralls -repotoken xnXqRGwgW3SXIguzxf90ZSK1GPYZPaGrw diff --git a/vendor/github.com/mattn/go-colorable/LICENSE b/vendor/github.com/mattn/go-colorable/LICENSE new file mode 100644 index 0000000000..91b5cef30e --- /dev/null +++ b/vendor/github.com/mattn/go-colorable/LICENSE @@ -0,0 +1,21 @@ +The MIT License (MIT) + +Copyright (c) 2016 Yasuhiro Matsumoto + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. diff --git a/vendor/github.com/mattn/go-colorable/README.md b/vendor/github.com/mattn/go-colorable/README.md new file mode 100644 index 0000000000..56729a92ca --- /dev/null +++ b/vendor/github.com/mattn/go-colorable/README.md @@ -0,0 +1,48 @@ +# go-colorable + +[![Godoc Reference](https://godoc.org/github.com/mattn/go-colorable?status.svg)](http://godoc.org/github.com/mattn/go-colorable) +[![Build Status](https://travis-ci.org/mattn/go-colorable.svg?branch=master)](https://travis-ci.org/mattn/go-colorable) +[![Coverage Status](https://coveralls.io/repos/github/mattn/go-colorable/badge.svg?branch=master)](https://coveralls.io/github/mattn/go-colorable?branch=master) +[![Go Report Card](https://goreportcard.com/badge/mattn/go-colorable)](https://goreportcard.com/report/mattn/go-colorable) + +Colorable writer for windows. + +For example, most of logger packages doesn't show colors on windows. (I know we can do it with ansicon. But I don't want.) +This package is possible to handle escape sequence for ansi color on windows. + +## Too Bad! + +![](https://raw.githubusercontent.com/mattn/go-colorable/gh-pages/bad.png) + + +## So Good! + +![](https://raw.githubusercontent.com/mattn/go-colorable/gh-pages/good.png) + +## Usage + +```go +logrus.SetFormatter(&logrus.TextFormatter{ForceColors: true}) +logrus.SetOutput(colorable.NewColorableStdout()) + +logrus.Info("succeeded") +logrus.Warn("not correct") +logrus.Error("something error") +logrus.Fatal("panic") +``` + +You can compile above code on non-windows OSs. + +## Installation + +``` +$ go get github.com/mattn/go-colorable +``` + +# License + +MIT + +# Author + +Yasuhiro Matsumoto (a.k.a mattn) diff --git a/vendor/github.com/mattn/go-colorable/colorable_appengine.go b/vendor/github.com/mattn/go-colorable/colorable_appengine.go new file mode 100644 index 0000000000..1f28d773d7 --- /dev/null +++ b/vendor/github.com/mattn/go-colorable/colorable_appengine.go @@ -0,0 +1,29 @@ +// +build appengine + +package colorable + +import ( + "io" + "os" + + _ "github.com/mattn/go-isatty" +) + +// NewColorable return new instance of Writer which handle escape sequence. +func NewColorable(file *os.File) io.Writer { + if file == nil { + panic("nil passed instead of *os.File to NewColorable()") + } + + return file +} + +// NewColorableStdout return new instance of Writer which handle escape sequence for stdout. +func NewColorableStdout() io.Writer { + return os.Stdout +} + +// NewColorableStderr return new instance of Writer which handle escape sequence for stderr. +func NewColorableStderr() io.Writer { + return os.Stderr +} diff --git a/vendor/github.com/mattn/go-colorable/colorable_others.go b/vendor/github.com/mattn/go-colorable/colorable_others.go new file mode 100644 index 0000000000..887f203dc7 --- /dev/null +++ b/vendor/github.com/mattn/go-colorable/colorable_others.go @@ -0,0 +1,30 @@ +// +build !windows +// +build !appengine + +package colorable + +import ( + "io" + "os" + + _ "github.com/mattn/go-isatty" +) + +// NewColorable return new instance of Writer which handle escape sequence. +func NewColorable(file *os.File) io.Writer { + if file == nil { + panic("nil passed instead of *os.File to NewColorable()") + } + + return file +} + +// NewColorableStdout return new instance of Writer which handle escape sequence for stdout. +func NewColorableStdout() io.Writer { + return os.Stdout +} + +// NewColorableStderr return new instance of Writer which handle escape sequence for stderr. +func NewColorableStderr() io.Writer { + return os.Stderr +} diff --git a/vendor/github.com/mattn/go-colorable/colorable_windows.go b/vendor/github.com/mattn/go-colorable/colorable_windows.go new file mode 100644 index 0000000000..404e10ca02 --- /dev/null +++ b/vendor/github.com/mattn/go-colorable/colorable_windows.go @@ -0,0 +1,980 @@ +// +build windows +// +build !appengine + +package colorable + +import ( + "bytes" + "io" + "math" + "os" + "strconv" + "strings" + "syscall" + "unsafe" + + "github.com/mattn/go-isatty" +) + +const ( + foregroundBlue = 0x1 + foregroundGreen = 0x2 + foregroundRed = 0x4 + foregroundIntensity = 0x8 + foregroundMask = (foregroundRed | foregroundBlue | foregroundGreen | foregroundIntensity) + backgroundBlue = 0x10 + backgroundGreen = 0x20 + backgroundRed = 0x40 + backgroundIntensity = 0x80 + backgroundMask = (backgroundRed | backgroundBlue | backgroundGreen | backgroundIntensity) +) + +const ( + genericRead = 0x80000000 + genericWrite = 0x40000000 +) + +const ( + consoleTextmodeBuffer = 0x1 +) + +type wchar uint16 +type short int16 +type dword uint32 +type word uint16 + +type coord struct { + x short + y short +} + +type smallRect struct { + left short + top short + right short + bottom short +} + +type consoleScreenBufferInfo struct { + size coord + cursorPosition coord + attributes word + window smallRect + maximumWindowSize coord +} + +type consoleCursorInfo struct { + size dword + visible int32 +} + +var ( + kernel32 = syscall.NewLazyDLL("kernel32.dll") + procGetConsoleScreenBufferInfo = kernel32.NewProc("GetConsoleScreenBufferInfo") + procSetConsoleTextAttribute = kernel32.NewProc("SetConsoleTextAttribute") + procSetConsoleCursorPosition = kernel32.NewProc("SetConsoleCursorPosition") + procFillConsoleOutputCharacter = kernel32.NewProc("FillConsoleOutputCharacterW") + procFillConsoleOutputAttribute = kernel32.NewProc("FillConsoleOutputAttribute") + procGetConsoleCursorInfo = kernel32.NewProc("GetConsoleCursorInfo") + procSetConsoleCursorInfo = kernel32.NewProc("SetConsoleCursorInfo") + procSetConsoleTitle = kernel32.NewProc("SetConsoleTitleW") + procCreateConsoleScreenBuffer = kernel32.NewProc("CreateConsoleScreenBuffer") +) + +// Writer provide colorable Writer to the console +type Writer struct { + out io.Writer + handle syscall.Handle + althandle syscall.Handle + oldattr word + oldpos coord + rest bytes.Buffer +} + +// NewColorable return new instance of Writer which handle escape sequence from File. +func NewColorable(file *os.File) io.Writer { + if file == nil { + panic("nil passed instead of *os.File to NewColorable()") + } + + if isatty.IsTerminal(file.Fd()) { + var csbi consoleScreenBufferInfo + handle := syscall.Handle(file.Fd()) + procGetConsoleScreenBufferInfo.Call(uintptr(handle), uintptr(unsafe.Pointer(&csbi))) + return &Writer{out: file, handle: handle, oldattr: csbi.attributes, oldpos: coord{0, 0}} + } + return file +} + +// NewColorableStdout return new instance of Writer which handle escape sequence for stdout. +func NewColorableStdout() io.Writer { + return NewColorable(os.Stdout) +} + +// NewColorableStderr return new instance of Writer which handle escape sequence for stderr. +func NewColorableStderr() io.Writer { + return NewColorable(os.Stderr) +} + +var color256 = map[int]int{ + 0: 0x000000, + 1: 0x800000, + 2: 0x008000, + 3: 0x808000, + 4: 0x000080, + 5: 0x800080, + 6: 0x008080, + 7: 0xc0c0c0, + 8: 0x808080, + 9: 0xff0000, + 10: 0x00ff00, + 11: 0xffff00, + 12: 0x0000ff, + 13: 0xff00ff, + 14: 0x00ffff, + 15: 0xffffff, + 16: 0x000000, + 17: 0x00005f, + 18: 0x000087, + 19: 0x0000af, + 20: 0x0000d7, + 21: 0x0000ff, + 22: 0x005f00, + 23: 0x005f5f, + 24: 0x005f87, + 25: 0x005faf, + 26: 0x005fd7, + 27: 0x005fff, + 28: 0x008700, + 29: 0x00875f, + 30: 0x008787, + 31: 0x0087af, + 32: 0x0087d7, + 33: 0x0087ff, + 34: 0x00af00, + 35: 0x00af5f, + 36: 0x00af87, + 37: 0x00afaf, + 38: 0x00afd7, + 39: 0x00afff, + 40: 0x00d700, + 41: 0x00d75f, + 42: 0x00d787, + 43: 0x00d7af, + 44: 0x00d7d7, + 45: 0x00d7ff, + 46: 0x00ff00, + 47: 0x00ff5f, + 48: 0x00ff87, + 49: 0x00ffaf, + 50: 0x00ffd7, + 51: 0x00ffff, + 52: 0x5f0000, + 53: 0x5f005f, + 54: 0x5f0087, + 55: 0x5f00af, + 56: 0x5f00d7, + 57: 0x5f00ff, + 58: 0x5f5f00, + 59: 0x5f5f5f, + 60: 0x5f5f87, + 61: 0x5f5faf, + 62: 0x5f5fd7, + 63: 0x5f5fff, + 64: 0x5f8700, + 65: 0x5f875f, + 66: 0x5f8787, + 67: 0x5f87af, + 68: 0x5f87d7, + 69: 0x5f87ff, + 70: 0x5faf00, + 71: 0x5faf5f, + 72: 0x5faf87, + 73: 0x5fafaf, + 74: 0x5fafd7, + 75: 0x5fafff, + 76: 0x5fd700, + 77: 0x5fd75f, + 78: 0x5fd787, + 79: 0x5fd7af, + 80: 0x5fd7d7, + 81: 0x5fd7ff, + 82: 0x5fff00, + 83: 0x5fff5f, + 84: 0x5fff87, + 85: 0x5fffaf, + 86: 0x5fffd7, + 87: 0x5fffff, + 88: 0x870000, + 89: 0x87005f, + 90: 0x870087, + 91: 0x8700af, + 92: 0x8700d7, + 93: 0x8700ff, + 94: 0x875f00, + 95: 0x875f5f, + 96: 0x875f87, + 97: 0x875faf, + 98: 0x875fd7, + 99: 0x875fff, + 100: 0x878700, + 101: 0x87875f, + 102: 0x878787, + 103: 0x8787af, + 104: 0x8787d7, + 105: 0x8787ff, + 106: 0x87af00, + 107: 0x87af5f, + 108: 0x87af87, + 109: 0x87afaf, + 110: 0x87afd7, + 111: 0x87afff, + 112: 0x87d700, + 113: 0x87d75f, + 114: 0x87d787, + 115: 0x87d7af, + 116: 0x87d7d7, + 117: 0x87d7ff, + 118: 0x87ff00, + 119: 0x87ff5f, + 120: 0x87ff87, + 121: 0x87ffaf, + 122: 0x87ffd7, + 123: 0x87ffff, + 124: 0xaf0000, + 125: 0xaf005f, + 126: 0xaf0087, + 127: 0xaf00af, + 128: 0xaf00d7, + 129: 0xaf00ff, + 130: 0xaf5f00, + 131: 0xaf5f5f, + 132: 0xaf5f87, + 133: 0xaf5faf, + 134: 0xaf5fd7, + 135: 0xaf5fff, + 136: 0xaf8700, + 137: 0xaf875f, + 138: 0xaf8787, + 139: 0xaf87af, + 140: 0xaf87d7, + 141: 0xaf87ff, + 142: 0xafaf00, + 143: 0xafaf5f, + 144: 0xafaf87, + 145: 0xafafaf, + 146: 0xafafd7, + 147: 0xafafff, + 148: 0xafd700, + 149: 0xafd75f, + 150: 0xafd787, + 151: 0xafd7af, + 152: 0xafd7d7, + 153: 0xafd7ff, + 154: 0xafff00, + 155: 0xafff5f, + 156: 0xafff87, + 157: 0xafffaf, + 158: 0xafffd7, + 159: 0xafffff, + 160: 0xd70000, + 161: 0xd7005f, + 162: 0xd70087, + 163: 0xd700af, + 164: 0xd700d7, + 165: 0xd700ff, + 166: 0xd75f00, + 167: 0xd75f5f, + 168: 0xd75f87, + 169: 0xd75faf, + 170: 0xd75fd7, + 171: 0xd75fff, + 172: 0xd78700, + 173: 0xd7875f, + 174: 0xd78787, + 175: 0xd787af, + 176: 0xd787d7, + 177: 0xd787ff, + 178: 0xd7af00, + 179: 0xd7af5f, + 180: 0xd7af87, + 181: 0xd7afaf, + 182: 0xd7afd7, + 183: 0xd7afff, + 184: 0xd7d700, + 185: 0xd7d75f, + 186: 0xd7d787, + 187: 0xd7d7af, + 188: 0xd7d7d7, + 189: 0xd7d7ff, + 190: 0xd7ff00, + 191: 0xd7ff5f, + 192: 0xd7ff87, + 193: 0xd7ffaf, + 194: 0xd7ffd7, + 195: 0xd7ffff, + 196: 0xff0000, + 197: 0xff005f, + 198: 0xff0087, + 199: 0xff00af, + 200: 0xff00d7, + 201: 0xff00ff, + 202: 0xff5f00, + 203: 0xff5f5f, + 204: 0xff5f87, + 205: 0xff5faf, + 206: 0xff5fd7, + 207: 0xff5fff, + 208: 0xff8700, + 209: 0xff875f, + 210: 0xff8787, + 211: 0xff87af, + 212: 0xff87d7, + 213: 0xff87ff, + 214: 0xffaf00, + 215: 0xffaf5f, + 216: 0xffaf87, + 217: 0xffafaf, + 218: 0xffafd7, + 219: 0xffafff, + 220: 0xffd700, + 221: 0xffd75f, + 222: 0xffd787, + 223: 0xffd7af, + 224: 0xffd7d7, + 225: 0xffd7ff, + 226: 0xffff00, + 227: 0xffff5f, + 228: 0xffff87, + 229: 0xffffaf, + 230: 0xffffd7, + 231: 0xffffff, + 232: 0x080808, + 233: 0x121212, + 234: 0x1c1c1c, + 235: 0x262626, + 236: 0x303030, + 237: 0x3a3a3a, + 238: 0x444444, + 239: 0x4e4e4e, + 240: 0x585858, + 241: 0x626262, + 242: 0x6c6c6c, + 243: 0x767676, + 244: 0x808080, + 245: 0x8a8a8a, + 246: 0x949494, + 247: 0x9e9e9e, + 248: 0xa8a8a8, + 249: 0xb2b2b2, + 250: 0xbcbcbc, + 251: 0xc6c6c6, + 252: 0xd0d0d0, + 253: 0xdadada, + 254: 0xe4e4e4, + 255: 0xeeeeee, +} + +// `\033]0;TITLESTR\007` +func doTitleSequence(er *bytes.Reader) error { + var c byte + var err error + + c, err = er.ReadByte() + if err != nil { + return err + } + if c != '0' && c != '2' { + return nil + } + c, err = er.ReadByte() + if err != nil { + return err + } + if c != ';' { + return nil + } + title := make([]byte, 0, 80) + for { + c, err = er.ReadByte() + if err != nil { + return err + } + if c == 0x07 || c == '\n' { + break + } + title = append(title, c) + } + if len(title) > 0 { + title8, err := syscall.UTF16PtrFromString(string(title)) + if err == nil { + procSetConsoleTitle.Call(uintptr(unsafe.Pointer(title8))) + } + } + return nil +} + +// Write write data on console +func (w *Writer) Write(data []byte) (n int, err error) { + var csbi consoleScreenBufferInfo + procGetConsoleScreenBufferInfo.Call(uintptr(w.handle), uintptr(unsafe.Pointer(&csbi))) + + handle := w.handle + + var er *bytes.Reader + if w.rest.Len() > 0 { + var rest bytes.Buffer + w.rest.WriteTo(&rest) + w.rest.Reset() + rest.Write(data) + er = bytes.NewReader(rest.Bytes()) + } else { + er = bytes.NewReader(data) + } + var bw [1]byte +loop: + for { + c1, err := er.ReadByte() + if err != nil { + break loop + } + if c1 != 0x1b { + bw[0] = c1 + w.out.Write(bw[:]) + continue + } + c2, err := er.ReadByte() + if err != nil { + break loop + } + + switch c2 { + case '>': + continue + case ']': + w.rest.WriteByte(c1) + w.rest.WriteByte(c2) + er.WriteTo(&w.rest) + if bytes.IndexByte(w.rest.Bytes(), 0x07) == -1 { + break loop + } + er = bytes.NewReader(w.rest.Bytes()[2:]) + err := doTitleSequence(er) + if err != nil { + break loop + } + w.rest.Reset() + continue + // https://github.com/mattn/go-colorable/issues/27 + case '7': + procGetConsoleScreenBufferInfo.Call(uintptr(handle), uintptr(unsafe.Pointer(&csbi))) + w.oldpos = csbi.cursorPosition + continue + case '8': + procSetConsoleCursorPosition.Call(uintptr(handle), *(*uintptr)(unsafe.Pointer(&w.oldpos))) + continue + case 0x5b: + // execute part after switch + default: + continue + } + + w.rest.WriteByte(c1) + w.rest.WriteByte(c2) + er.WriteTo(&w.rest) + + var buf bytes.Buffer + var m byte + for i, c := range w.rest.Bytes()[2:] { + if ('a' <= c && c <= 'z') || ('A' <= c && c <= 'Z') || c == '@' { + m = c + er = bytes.NewReader(w.rest.Bytes()[2+i+1:]) + w.rest.Reset() + break + } + buf.Write([]byte(string(c))) + } + if m == 0 { + break loop + } + + switch m { + case 'A': + n, err = strconv.Atoi(buf.String()) + if err != nil { + continue + } + procGetConsoleScreenBufferInfo.Call(uintptr(handle), uintptr(unsafe.Pointer(&csbi))) + csbi.cursorPosition.y -= short(n) + procSetConsoleCursorPosition.Call(uintptr(handle), *(*uintptr)(unsafe.Pointer(&csbi.cursorPosition))) + case 'B': + n, err = strconv.Atoi(buf.String()) + if err != nil { + continue + } + procGetConsoleScreenBufferInfo.Call(uintptr(handle), uintptr(unsafe.Pointer(&csbi))) + csbi.cursorPosition.y += short(n) + procSetConsoleCursorPosition.Call(uintptr(handle), *(*uintptr)(unsafe.Pointer(&csbi.cursorPosition))) + case 'C': + n, err = strconv.Atoi(buf.String()) + if err != nil { + continue + } + procGetConsoleScreenBufferInfo.Call(uintptr(handle), uintptr(unsafe.Pointer(&csbi))) + csbi.cursorPosition.x += short(n) + procSetConsoleCursorPosition.Call(uintptr(handle), *(*uintptr)(unsafe.Pointer(&csbi.cursorPosition))) + case 'D': + n, err = strconv.Atoi(buf.String()) + if err != nil { + continue + } + procGetConsoleScreenBufferInfo.Call(uintptr(handle), uintptr(unsafe.Pointer(&csbi))) + csbi.cursorPosition.x -= short(n) + if csbi.cursorPosition.x < 0 { + csbi.cursorPosition.x = 0 + } + procSetConsoleCursorPosition.Call(uintptr(handle), *(*uintptr)(unsafe.Pointer(&csbi.cursorPosition))) + case 'E': + n, err = strconv.Atoi(buf.String()) + if err != nil { + continue + } + procGetConsoleScreenBufferInfo.Call(uintptr(handle), uintptr(unsafe.Pointer(&csbi))) + csbi.cursorPosition.x = 0 + csbi.cursorPosition.y += short(n) + procSetConsoleCursorPosition.Call(uintptr(handle), *(*uintptr)(unsafe.Pointer(&csbi.cursorPosition))) + case 'F': + n, err = strconv.Atoi(buf.String()) + if err != nil { + continue + } + procGetConsoleScreenBufferInfo.Call(uintptr(handle), uintptr(unsafe.Pointer(&csbi))) + csbi.cursorPosition.x = 0 + csbi.cursorPosition.y -= short(n) + procSetConsoleCursorPosition.Call(uintptr(handle), *(*uintptr)(unsafe.Pointer(&csbi.cursorPosition))) + case 'G': + n, err = strconv.Atoi(buf.String()) + if err != nil { + continue + } + procGetConsoleScreenBufferInfo.Call(uintptr(handle), uintptr(unsafe.Pointer(&csbi))) + csbi.cursorPosition.x = short(n - 1) + procSetConsoleCursorPosition.Call(uintptr(handle), *(*uintptr)(unsafe.Pointer(&csbi.cursorPosition))) + case 'H', 'f': + procGetConsoleScreenBufferInfo.Call(uintptr(handle), uintptr(unsafe.Pointer(&csbi))) + if buf.Len() > 0 { + token := strings.Split(buf.String(), ";") + switch len(token) { + case 1: + n1, err := strconv.Atoi(token[0]) + if err != nil { + continue + } + csbi.cursorPosition.y = short(n1 - 1) + case 2: + n1, err := strconv.Atoi(token[0]) + if err != nil { + continue + } + n2, err := strconv.Atoi(token[1]) + if err != nil { + continue + } + csbi.cursorPosition.x = short(n2 - 1) + csbi.cursorPosition.y = short(n1 - 1) + } + } else { + csbi.cursorPosition.y = 0 + } + procSetConsoleCursorPosition.Call(uintptr(handle), *(*uintptr)(unsafe.Pointer(&csbi.cursorPosition))) + case 'J': + n := 0 + if buf.Len() > 0 { + n, err = strconv.Atoi(buf.String()) + if err != nil { + continue + } + } + var count, written dword + var cursor coord + procGetConsoleScreenBufferInfo.Call(uintptr(handle), uintptr(unsafe.Pointer(&csbi))) + switch n { + case 0: + cursor = coord{x: csbi.cursorPosition.x, y: csbi.cursorPosition.y} + count = dword(csbi.size.x) - dword(csbi.cursorPosition.x) + dword(csbi.size.y-csbi.cursorPosition.y)*dword(csbi.size.x) + case 1: + cursor = coord{x: csbi.window.left, y: csbi.window.top} + count = dword(csbi.size.x) - dword(csbi.cursorPosition.x) + dword(csbi.window.top-csbi.cursorPosition.y)*dword(csbi.size.x) + case 2: + cursor = coord{x: csbi.window.left, y: csbi.window.top} + count = dword(csbi.size.x) - dword(csbi.cursorPosition.x) + dword(csbi.size.y-csbi.cursorPosition.y)*dword(csbi.size.x) + } + procFillConsoleOutputCharacter.Call(uintptr(handle), uintptr(' '), uintptr(count), *(*uintptr)(unsafe.Pointer(&cursor)), uintptr(unsafe.Pointer(&written))) + procFillConsoleOutputAttribute.Call(uintptr(handle), uintptr(csbi.attributes), uintptr(count), *(*uintptr)(unsafe.Pointer(&cursor)), uintptr(unsafe.Pointer(&written))) + case 'K': + n := 0 + if buf.Len() > 0 { + n, err = strconv.Atoi(buf.String()) + if err != nil { + continue + } + } + procGetConsoleScreenBufferInfo.Call(uintptr(handle), uintptr(unsafe.Pointer(&csbi))) + var cursor coord + var count, written dword + switch n { + case 0: + cursor = coord{x: csbi.cursorPosition.x, y: csbi.cursorPosition.y} + count = dword(csbi.size.x - csbi.cursorPosition.x) + case 1: + cursor = coord{x: csbi.window.left, y: csbi.cursorPosition.y} + count = dword(csbi.size.x - csbi.cursorPosition.x) + case 2: + cursor = coord{x: csbi.window.left, y: csbi.cursorPosition.y} + count = dword(csbi.size.x) + } + procFillConsoleOutputCharacter.Call(uintptr(handle), uintptr(' '), uintptr(count), *(*uintptr)(unsafe.Pointer(&cursor)), uintptr(unsafe.Pointer(&written))) + procFillConsoleOutputAttribute.Call(uintptr(handle), uintptr(csbi.attributes), uintptr(count), *(*uintptr)(unsafe.Pointer(&cursor)), uintptr(unsafe.Pointer(&written))) + case 'm': + procGetConsoleScreenBufferInfo.Call(uintptr(handle), uintptr(unsafe.Pointer(&csbi))) + attr := csbi.attributes + cs := buf.String() + if cs == "" { + procSetConsoleTextAttribute.Call(uintptr(handle), uintptr(w.oldattr)) + continue + } + token := strings.Split(cs, ";") + for i := 0; i < len(token); i++ { + ns := token[i] + if n, err = strconv.Atoi(ns); err == nil { + switch { + case n == 0 || n == 100: + attr = w.oldattr + case 1 <= n && n <= 5: + attr |= foregroundIntensity + case n == 7: + attr = ((attr & foregroundMask) << 4) | ((attr & backgroundMask) >> 4) + case n == 22 || n == 25: + attr |= foregroundIntensity + case n == 27: + attr = ((attr & foregroundMask) << 4) | ((attr & backgroundMask) >> 4) + case 30 <= n && n <= 37: + attr &= backgroundMask + if (n-30)&1 != 0 { + attr |= foregroundRed + } + if (n-30)&2 != 0 { + attr |= foregroundGreen + } + if (n-30)&4 != 0 { + attr |= foregroundBlue + } + case n == 38: // set foreground color. + if i < len(token)-2 && (token[i+1] == "5" || token[i+1] == "05") { + if n256, err := strconv.Atoi(token[i+2]); err == nil { + if n256foreAttr == nil { + n256setup() + } + attr &= backgroundMask + attr |= n256foreAttr[n256] + i += 2 + } + } else if len(token) == 5 && token[i+1] == "2" { + var r, g, b int + r, _ = strconv.Atoi(token[i+2]) + g, _ = strconv.Atoi(token[i+3]) + b, _ = strconv.Atoi(token[i+4]) + i += 4 + if r > 127 { + attr |= foregroundRed + } + if g > 127 { + attr |= foregroundGreen + } + if b > 127 { + attr |= foregroundBlue + } + } else { + attr = attr & (w.oldattr & backgroundMask) + } + case n == 39: // reset foreground color. + attr &= backgroundMask + attr |= w.oldattr & foregroundMask + case 40 <= n && n <= 47: + attr &= foregroundMask + if (n-40)&1 != 0 { + attr |= backgroundRed + } + if (n-40)&2 != 0 { + attr |= backgroundGreen + } + if (n-40)&4 != 0 { + attr |= backgroundBlue + } + case n == 48: // set background color. + if i < len(token)-2 && token[i+1] == "5" { + if n256, err := strconv.Atoi(token[i+2]); err == nil { + if n256backAttr == nil { + n256setup() + } + attr &= foregroundMask + attr |= n256backAttr[n256] + i += 2 + } + } else if len(token) == 5 && token[i+1] == "2" { + var r, g, b int + r, _ = strconv.Atoi(token[i+2]) + g, _ = strconv.Atoi(token[i+3]) + b, _ = strconv.Atoi(token[i+4]) + i += 4 + if r > 127 { + attr |= backgroundRed + } + if g > 127 { + attr |= backgroundGreen + } + if b > 127 { + attr |= backgroundBlue + } + } else { + attr = attr & (w.oldattr & foregroundMask) + } + case n == 49: // reset foreground color. + attr &= foregroundMask + attr |= w.oldattr & backgroundMask + case 90 <= n && n <= 97: + attr = (attr & backgroundMask) + attr |= foregroundIntensity + if (n-90)&1 != 0 { + attr |= foregroundRed + } + if (n-90)&2 != 0 { + attr |= foregroundGreen + } + if (n-90)&4 != 0 { + attr |= foregroundBlue + } + case 100 <= n && n <= 107: + attr = (attr & foregroundMask) + attr |= backgroundIntensity + if (n-100)&1 != 0 { + attr |= backgroundRed + } + if (n-100)&2 != 0 { + attr |= backgroundGreen + } + if (n-100)&4 != 0 { + attr |= backgroundBlue + } + } + procSetConsoleTextAttribute.Call(uintptr(handle), uintptr(attr)) + } + } + case 'h': + var ci consoleCursorInfo + cs := buf.String() + if cs == "5>" { + procGetConsoleCursorInfo.Call(uintptr(handle), uintptr(unsafe.Pointer(&ci))) + ci.visible = 0 + procSetConsoleCursorInfo.Call(uintptr(handle), uintptr(unsafe.Pointer(&ci))) + } else if cs == "?25" { + procGetConsoleCursorInfo.Call(uintptr(handle), uintptr(unsafe.Pointer(&ci))) + ci.visible = 1 + procSetConsoleCursorInfo.Call(uintptr(handle), uintptr(unsafe.Pointer(&ci))) + } else if cs == "?1049" { + if w.althandle == 0 { + h, _, _ := procCreateConsoleScreenBuffer.Call(uintptr(genericRead|genericWrite), 0, 0, uintptr(consoleTextmodeBuffer), 0, 0) + w.althandle = syscall.Handle(h) + if w.althandle != 0 { + handle = w.althandle + } + } + } + case 'l': + var ci consoleCursorInfo + cs := buf.String() + if cs == "5>" { + procGetConsoleCursorInfo.Call(uintptr(handle), uintptr(unsafe.Pointer(&ci))) + ci.visible = 1 + procSetConsoleCursorInfo.Call(uintptr(handle), uintptr(unsafe.Pointer(&ci))) + } else if cs == "?25" { + procGetConsoleCursorInfo.Call(uintptr(handle), uintptr(unsafe.Pointer(&ci))) + ci.visible = 0 + procSetConsoleCursorInfo.Call(uintptr(handle), uintptr(unsafe.Pointer(&ci))) + } else if cs == "?1049" { + if w.althandle != 0 { + syscall.CloseHandle(w.althandle) + w.althandle = 0 + handle = w.handle + } + } + case 's': + procGetConsoleScreenBufferInfo.Call(uintptr(handle), uintptr(unsafe.Pointer(&csbi))) + w.oldpos = csbi.cursorPosition + case 'u': + procSetConsoleCursorPosition.Call(uintptr(handle), *(*uintptr)(unsafe.Pointer(&w.oldpos))) + } + } + + return len(data), nil +} + +type consoleColor struct { + rgb int + red bool + green bool + blue bool + intensity bool +} + +func (c consoleColor) foregroundAttr() (attr word) { + if c.red { + attr |= foregroundRed + } + if c.green { + attr |= foregroundGreen + } + if c.blue { + attr |= foregroundBlue + } + if c.intensity { + attr |= foregroundIntensity + } + return +} + +func (c consoleColor) backgroundAttr() (attr word) { + if c.red { + attr |= backgroundRed + } + if c.green { + attr |= backgroundGreen + } + if c.blue { + attr |= backgroundBlue + } + if c.intensity { + attr |= backgroundIntensity + } + return +} + +var color16 = []consoleColor{ + {0x000000, false, false, false, false}, + {0x000080, false, false, true, false}, + {0x008000, false, true, false, false}, + {0x008080, false, true, true, false}, + {0x800000, true, false, false, false}, + {0x800080, true, false, true, false}, + {0x808000, true, true, false, false}, + {0xc0c0c0, true, true, true, false}, + {0x808080, false, false, false, true}, + {0x0000ff, false, false, true, true}, + {0x00ff00, false, true, false, true}, + {0x00ffff, false, true, true, true}, + {0xff0000, true, false, false, true}, + {0xff00ff, true, false, true, true}, + {0xffff00, true, true, false, true}, + {0xffffff, true, true, true, true}, +} + +type hsv struct { + h, s, v float32 +} + +func (a hsv) dist(b hsv) float32 { + dh := a.h - b.h + switch { + case dh > 0.5: + dh = 1 - dh + case dh < -0.5: + dh = -1 - dh + } + ds := a.s - b.s + dv := a.v - b.v + return float32(math.Sqrt(float64(dh*dh + ds*ds + dv*dv))) +} + +func toHSV(rgb int) hsv { + r, g, b := float32((rgb&0xFF0000)>>16)/256.0, + float32((rgb&0x00FF00)>>8)/256.0, + float32(rgb&0x0000FF)/256.0 + min, max := minmax3f(r, g, b) + h := max - min + if h > 0 { + if max == r { + h = (g - b) / h + if h < 0 { + h += 6 + } + } else if max == g { + h = 2 + (b-r)/h + } else { + h = 4 + (r-g)/h + } + } + h /= 6.0 + s := max - min + if max != 0 { + s /= max + } + v := max + return hsv{h: h, s: s, v: v} +} + +type hsvTable []hsv + +func toHSVTable(rgbTable []consoleColor) hsvTable { + t := make(hsvTable, len(rgbTable)) + for i, c := range rgbTable { + t[i] = toHSV(c.rgb) + } + return t +} + +func (t hsvTable) find(rgb int) consoleColor { + hsv := toHSV(rgb) + n := 7 + l := float32(5.0) + for i, p := range t { + d := hsv.dist(p) + if d < l { + l, n = d, i + } + } + return color16[n] +} + +func minmax3f(a, b, c float32) (min, max float32) { + if a < b { + if b < c { + return a, c + } else if a < c { + return a, b + } else { + return c, b + } + } else { + if a < c { + return b, c + } else if b < c { + return b, a + } else { + return c, a + } + } +} + +var n256foreAttr []word +var n256backAttr []word + +func n256setup() { + n256foreAttr = make([]word, 256) + n256backAttr = make([]word, 256) + t := toHSVTable(color16) + for i, rgb := range color256 { + c := t.find(rgb) + n256foreAttr[i] = c.foregroundAttr() + n256backAttr[i] = c.backgroundAttr() + } +} diff --git a/vendor/github.com/mattn/go-colorable/go.mod b/vendor/github.com/mattn/go-colorable/go.mod new file mode 100644 index 0000000000..ef3ca9d4c3 --- /dev/null +++ b/vendor/github.com/mattn/go-colorable/go.mod @@ -0,0 +1,3 @@ +module github.com/mattn/go-colorable + +require github.com/mattn/go-isatty v0.0.8 diff --git a/vendor/github.com/mattn/go-colorable/go.sum b/vendor/github.com/mattn/go-colorable/go.sum new file mode 100644 index 0000000000..2c12960ec7 --- /dev/null +++ b/vendor/github.com/mattn/go-colorable/go.sum @@ -0,0 +1,4 @@ +github.com/mattn/go-isatty v0.0.5 h1:tHXDdz1cpzGaovsTB+TVB8q90WEokoVmfMqoVcrLUgw= +github.com/mattn/go-isatty v0.0.5/go.mod h1:Iq45c/XA43vh69/j3iqttzPXn0bhXyGjM0Hdxcsrc5s= +golang.org/x/sys v0.0.0-20190222072716-a9d3bda3a223 h1:DH4skfRX4EBpamg7iV4ZlCpblAHI6s6TDM39bFZumv8= +golang.org/x/sys v0.0.0-20190222072716-a9d3bda3a223/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= diff --git a/vendor/github.com/mattn/go-colorable/noncolorable.go b/vendor/github.com/mattn/go-colorable/noncolorable.go new file mode 100644 index 0000000000..9721e16f4b --- /dev/null +++ b/vendor/github.com/mattn/go-colorable/noncolorable.go @@ -0,0 +1,55 @@ +package colorable + +import ( + "bytes" + "io" +) + +// NonColorable hold writer but remove escape sequence. +type NonColorable struct { + out io.Writer +} + +// NewNonColorable return new instance of Writer which remove escape sequence from Writer. +func NewNonColorable(w io.Writer) io.Writer { + return &NonColorable{out: w} +} + +// Write write data on console +func (w *NonColorable) Write(data []byte) (n int, err error) { + er := bytes.NewReader(data) + var bw [1]byte +loop: + for { + c1, err := er.ReadByte() + if err != nil { + break loop + } + if c1 != 0x1b { + bw[0] = c1 + w.out.Write(bw[:]) + continue + } + c2, err := er.ReadByte() + if err != nil { + break loop + } + if c2 != 0x5b { + continue + } + + var buf bytes.Buffer + for { + c, err := er.ReadByte() + if err != nil { + break loop + } + if ('a' <= c && c <= 'z') || ('A' <= c && c <= 'Z') || c == '@' { + break + } + buf.Write([]byte(string(c))) + } + } + + return len(data), nil +} diff --git a/vendor/github.com/mattn/go-isatty/.travis.yml b/vendor/github.com/mattn/go-isatty/.travis.yml new file mode 100644 index 0000000000..5597e026dd --- /dev/null +++ b/vendor/github.com/mattn/go-isatty/.travis.yml @@ -0,0 +1,13 @@ +language: go +go: + - tip + +os: + - linux + - osx + +before_install: + - go get github.com/mattn/goveralls + - go get golang.org/x/tools/cmd/cover +script: + - $HOME/gopath/bin/goveralls -repotoken 3gHdORO5k5ziZcWMBxnd9LrMZaJs8m9x5 diff --git a/vendor/github.com/gobuffalo/envy/LICENSE.txt b/vendor/github.com/mattn/go-isatty/LICENSE similarity index 93% rename from vendor/github.com/gobuffalo/envy/LICENSE.txt rename to vendor/github.com/mattn/go-isatty/LICENSE index 123ddc0d80..65dc692b6b 100644 --- a/vendor/github.com/gobuffalo/envy/LICENSE.txt +++ b/vendor/github.com/mattn/go-isatty/LICENSE @@ -1,5 +1,6 @@ -The MIT License (MIT) -Copyright (c) 2018 Mark Bates +Copyright (c) Yasuhiro MATSUMOTO + +MIT License (Expat) Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: diff --git a/vendor/github.com/mattn/go-isatty/README.md b/vendor/github.com/mattn/go-isatty/README.md new file mode 100644 index 0000000000..1e69004bb0 --- /dev/null +++ b/vendor/github.com/mattn/go-isatty/README.md @@ -0,0 +1,50 @@ +# go-isatty + +[![Godoc Reference](https://godoc.org/github.com/mattn/go-isatty?status.svg)](http://godoc.org/github.com/mattn/go-isatty) +[![Build Status](https://travis-ci.org/mattn/go-isatty.svg?branch=master)](https://travis-ci.org/mattn/go-isatty) +[![Coverage Status](https://coveralls.io/repos/github/mattn/go-isatty/badge.svg?branch=master)](https://coveralls.io/github/mattn/go-isatty?branch=master) +[![Go Report Card](https://goreportcard.com/badge/mattn/go-isatty)](https://goreportcard.com/report/mattn/go-isatty) + +isatty for golang + +## Usage + +```go +package main + +import ( + "fmt" + "github.com/mattn/go-isatty" + "os" +) + +func main() { + if isatty.IsTerminal(os.Stdout.Fd()) { + fmt.Println("Is Terminal") + } else if isatty.IsCygwinTerminal(os.Stdout.Fd()) { + fmt.Println("Is Cygwin/MSYS2 Terminal") + } else { + fmt.Println("Is Not Terminal") + } +} +``` + +## Installation + +``` +$ go get github.com/mattn/go-isatty +``` + +## License + +MIT + +## Author + +Yasuhiro Matsumoto (a.k.a mattn) + +## Thanks + +* k-takata: base idea for IsCygwinTerminal + + https://github.com/k-takata/go-iscygpty diff --git a/vendor/github.com/mattn/go-isatty/doc.go b/vendor/github.com/mattn/go-isatty/doc.go new file mode 100644 index 0000000000..17d4f90ebc --- /dev/null +++ b/vendor/github.com/mattn/go-isatty/doc.go @@ -0,0 +1,2 @@ +// Package isatty implements interface to isatty +package isatty diff --git a/vendor/github.com/mattn/go-isatty/go.mod b/vendor/github.com/mattn/go-isatty/go.mod new file mode 100644 index 0000000000..f310320c33 --- /dev/null +++ b/vendor/github.com/mattn/go-isatty/go.mod @@ -0,0 +1,3 @@ +module github.com/mattn/go-isatty + +require golang.org/x/sys v0.0.0-20190222072716-a9d3bda3a223 diff --git a/vendor/github.com/mattn/go-isatty/go.sum b/vendor/github.com/mattn/go-isatty/go.sum new file mode 100644 index 0000000000..426c8973c0 --- /dev/null +++ b/vendor/github.com/mattn/go-isatty/go.sum @@ -0,0 +1,2 @@ +golang.org/x/sys v0.0.0-20190222072716-a9d3bda3a223 h1:DH4skfRX4EBpamg7iV4ZlCpblAHI6s6TDM39bFZumv8= +golang.org/x/sys v0.0.0-20190222072716-a9d3bda3a223/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= diff --git a/vendor/github.com/mattn/go-isatty/isatty_android.go b/vendor/github.com/mattn/go-isatty/isatty_android.go new file mode 100644 index 0000000000..d3567cb5bf --- /dev/null +++ b/vendor/github.com/mattn/go-isatty/isatty_android.go @@ -0,0 +1,23 @@ +// +build android + +package isatty + +import ( + "syscall" + "unsafe" +) + +const ioctlReadTermios = syscall.TCGETS + +// IsTerminal return true if the file descriptor is terminal. +func IsTerminal(fd uintptr) bool { + var termios syscall.Termios + _, _, err := syscall.Syscall6(syscall.SYS_IOCTL, fd, ioctlReadTermios, uintptr(unsafe.Pointer(&termios)), 0, 0, 0) + return err == 0 +} + +// IsCygwinTerminal return true if the file descriptor is a cygwin or msys2 +// terminal. This is also always false on this environment. +func IsCygwinTerminal(fd uintptr) bool { + return false +} diff --git a/vendor/github.com/mattn/go-isatty/isatty_bsd.go b/vendor/github.com/mattn/go-isatty/isatty_bsd.go new file mode 100644 index 0000000000..07e93039db --- /dev/null +++ b/vendor/github.com/mattn/go-isatty/isatty_bsd.go @@ -0,0 +1,24 @@ +// +build darwin freebsd openbsd netbsd dragonfly +// +build !appengine + +package isatty + +import ( + "syscall" + "unsafe" +) + +const ioctlReadTermios = syscall.TIOCGETA + +// IsTerminal return true if the file descriptor is terminal. +func IsTerminal(fd uintptr) bool { + var termios syscall.Termios + _, _, err := syscall.Syscall6(syscall.SYS_IOCTL, fd, ioctlReadTermios, uintptr(unsafe.Pointer(&termios)), 0, 0, 0) + return err == 0 +} + +// IsCygwinTerminal return true if the file descriptor is a cygwin or msys2 +// terminal. This is also always false on this environment. +func IsCygwinTerminal(fd uintptr) bool { + return false +} diff --git a/vendor/github.com/mattn/go-isatty/isatty_others.go b/vendor/github.com/mattn/go-isatty/isatty_others.go new file mode 100644 index 0000000000..ff714a3761 --- /dev/null +++ b/vendor/github.com/mattn/go-isatty/isatty_others.go @@ -0,0 +1,15 @@ +// +build appengine js nacl + +package isatty + +// IsTerminal returns true if the file descriptor is terminal which +// is always false on js and appengine classic which is a sandboxed PaaS. +func IsTerminal(fd uintptr) bool { + return false +} + +// IsCygwinTerminal() return true if the file descriptor is a cygwin or msys2 +// terminal. This is also always false on this environment. +func IsCygwinTerminal(fd uintptr) bool { + return false +} diff --git a/vendor/github.com/mattn/go-isatty/isatty_solaris.go b/vendor/github.com/mattn/go-isatty/isatty_solaris.go new file mode 100644 index 0000000000..bdd5c79a07 --- /dev/null +++ b/vendor/github.com/mattn/go-isatty/isatty_solaris.go @@ -0,0 +1,22 @@ +// +build solaris +// +build !appengine + +package isatty + +import ( + "golang.org/x/sys/unix" +) + +// IsTerminal returns true if the given file descriptor is a terminal. +// see: http://src.illumos.org/source/xref/illumos-gate/usr/src/lib/libbc/libc/gen/common/isatty.c +func IsTerminal(fd uintptr) bool { + var termio unix.Termio + err := unix.IoctlSetTermio(int(fd), unix.TCGETA, &termio) + return err == nil +} + +// IsCygwinTerminal return true if the file descriptor is a cygwin or msys2 +// terminal. This is also always false on this environment. +func IsCygwinTerminal(fd uintptr) bool { + return false +} diff --git a/vendor/github.com/mattn/go-isatty/isatty_tcgets.go b/vendor/github.com/mattn/go-isatty/isatty_tcgets.go new file mode 100644 index 0000000000..453b025d0d --- /dev/null +++ b/vendor/github.com/mattn/go-isatty/isatty_tcgets.go @@ -0,0 +1,19 @@ +// +build linux aix +// +build !appengine +// +build !android + +package isatty + +import "golang.org/x/sys/unix" + +// IsTerminal return true if the file descriptor is terminal. +func IsTerminal(fd uintptr) bool { + _, err := unix.IoctlGetTermios(int(fd), unix.TCGETS) + return err == nil +} + +// IsCygwinTerminal return true if the file descriptor is a cygwin or msys2 +// terminal. This is also always false on this environment. +func IsCygwinTerminal(fd uintptr) bool { + return false +} diff --git a/vendor/github.com/mattn/go-isatty/isatty_windows.go b/vendor/github.com/mattn/go-isatty/isatty_windows.go new file mode 100644 index 0000000000..af51cbcaa4 --- /dev/null +++ b/vendor/github.com/mattn/go-isatty/isatty_windows.go @@ -0,0 +1,94 @@ +// +build windows +// +build !appengine + +package isatty + +import ( + "strings" + "syscall" + "unicode/utf16" + "unsafe" +) + +const ( + fileNameInfo uintptr = 2 + fileTypePipe = 3 +) + +var ( + kernel32 = syscall.NewLazyDLL("kernel32.dll") + procGetConsoleMode = kernel32.NewProc("GetConsoleMode") + procGetFileInformationByHandleEx = kernel32.NewProc("GetFileInformationByHandleEx") + procGetFileType = kernel32.NewProc("GetFileType") +) + +func init() { + // Check if GetFileInformationByHandleEx is available. + if procGetFileInformationByHandleEx.Find() != nil { + procGetFileInformationByHandleEx = nil + } +} + +// IsTerminal return true if the file descriptor is terminal. +func IsTerminal(fd uintptr) bool { + var st uint32 + r, _, e := syscall.Syscall(procGetConsoleMode.Addr(), 2, fd, uintptr(unsafe.Pointer(&st)), 0) + return r != 0 && e == 0 +} + +// Check pipe name is used for cygwin/msys2 pty. +// Cygwin/MSYS2 PTY has a name like: +// \{cygwin,msys}-XXXXXXXXXXXXXXXX-ptyN-{from,to}-master +func isCygwinPipeName(name string) bool { + token := strings.Split(name, "-") + if len(token) < 5 { + return false + } + + if token[0] != `\msys` && token[0] != `\cygwin` { + return false + } + + if token[1] == "" { + return false + } + + if !strings.HasPrefix(token[2], "pty") { + return false + } + + if token[3] != `from` && token[3] != `to` { + return false + } + + if token[4] != "master" { + return false + } + + return true +} + +// IsCygwinTerminal() return true if the file descriptor is a cygwin or msys2 +// terminal. +func IsCygwinTerminal(fd uintptr) bool { + if procGetFileInformationByHandleEx == nil { + return false + } + + // Cygwin/msys's pty is a pipe. + ft, _, e := syscall.Syscall(procGetFileType.Addr(), 1, fd, 0, 0) + if ft != fileTypePipe || e != 0 { + return false + } + + var buf [2 + syscall.MAX_PATH]uint16 + r, _, e := syscall.Syscall6(procGetFileInformationByHandleEx.Addr(), + 4, fd, fileNameInfo, uintptr(unsafe.Pointer(&buf)), + uintptr(len(buf)*2), 0, 0) + if r == 0 || e != 0 { + return false + } + + l := *(*uint32)(unsafe.Pointer(&buf)) + return isCygwinPipeName(string(utf16.Decode(buf[2 : 2+l/2]))) +} diff --git a/vendor/github.com/onsi/ginkgo/.gitignore b/vendor/github.com/onsi/ginkgo/.gitignore new file mode 100644 index 0000000000..b9f9659d29 --- /dev/null +++ b/vendor/github.com/onsi/ginkgo/.gitignore @@ -0,0 +1,7 @@ +.DS_Store +TODO +tmp/**/* +*.coverprofile +.vscode +.idea/ +*.log diff --git a/vendor/github.com/onsi/ginkgo/.travis.yml b/vendor/github.com/onsi/ginkgo/.travis.yml new file mode 100644 index 0000000000..72e8ccf0be --- /dev/null +++ b/vendor/github.com/onsi/ginkgo/.travis.yml @@ -0,0 +1,15 @@ +language: go +go: + - 1.10.x + - 1.11.x + - 1.12.x + - tip + +install: + - go get -v -t ./... + - go get golang.org/x/tools/cmd/cover + - go get github.com/onsi/gomega + - go install github.com/onsi/ginkgo/ginkgo + - export PATH=$PATH:$HOME/gopath/bin + +script: $HOME/gopath/bin/ginkgo -r --randomizeAllSpecs --randomizeSuites --race --trace && go vet diff --git a/vendor/github.com/onsi/ginkgo/CHANGELOG.md b/vendor/github.com/onsi/ginkgo/CHANGELOG.md new file mode 100644 index 0000000000..4920406aeb --- /dev/null +++ b/vendor/github.com/onsi/ginkgo/CHANGELOG.md @@ -0,0 +1,218 @@ +## 1.8.0 + +### New Features +- allow config of the vet flag for `go test` (#562) [3cd45fa] +- Support projects using go modules [d56ee76] + +### Fixes and Minor Improvements +- chore(godoc): fixes typos in Measurement funcs [dbaca8e] +- Optimize focus to avoid allocations [f493786] +- Ensure generated test file names are underscored [505cc35] + +## 1.7.0 + +### New Features +- Add JustAfterEach (#484) [0d4f080] + +### Fixes +- Correctly round suite time in junit reporter [2445fc1] +- Avoid using -i argument to go test for Golang 1.10+ [46bbc26] + +## 1.6.0 + +### New Features +- add --debug flag to emit node output to files (#499) [39febac] + +### Fixes +- fix: for `go vet` to pass [69338ec] +- docs: fix for contributing instructions [7004cb1] +- consolidate and streamline contribution docs (#494) [d848015] +- Make generated Junit file compatable with "Maven Surefire" (#488) [e51bee6] +- all: gofmt [000d317] +- Increase eventually timeout to 30s [c73579c] +- Clarify asynchronous test behaviour [294d8f4] +- Travis badge should only show master [26d2143] + +## 1.5.0 5/10/2018 + +### New Features +- Supports go v1.10 (#443, #446, #451) [e873237, 468e89e, e37dbfe, a37f4c0, c0b857d, bca5260, 4177ca8] +- Add a When() synonym for Context() (#386) [747514b, 7484dad, 7354a07, dd826c8] +- Re-add noisySkippings flag [652e15c] +- Allow coverage to be displayed for focused specs (#367) [11459a8] +- Handle -outputdir flag (#364) [228e3a8] +- Handle -coverprofile flag (#355) [43392d5] + +### Fixes +- When using custom reporters register the custom reporters *before* the default reporter. This allows users to see the output of any print statements in their customer reporters. (#365) [8382b23] +- When running a test and calculating the coverage using the `-coverprofile` and `-outputdir` flags, Ginkgo fails with an error if the directory does not exist. This is due to an [issue in go 1.10](https://github.com/golang/go/issues/24588) (#446) [b36a6e0] +- `unfocus` command ignores vendor folder (#459) [e5e551c, c556e43, a3b6351, 9a820dd] +- Ignore packages whose tests are all ignored by go (#456) [7430ca7, 6d8be98] +- Increase the threshold when checking time measuments (#455) [2f714bf, 68f622c] +- Fix race condition in coverage tests (#423) [a5a8ff7, ab9c08b] +- Add an extra new line after reporting spec run completion for test2json [874520d] +- added name name field to junit reported testsuite [ae61c63] +- Do not set the run time of a spec when the dryRun flag is used (#438) [457e2d9, ba8e856] +- Process FWhen and FSpecify when unfocusing (#434) [9008c7b, ee65bd, df87dfe] +- Synchronise the access to the state of specs to avoid race conditions (#430) [7d481bc, ae6829d] +- Added Duration on GinkgoTestDescription (#383) [5f49dad, 528417e, 0747408, 329d7ed] +- Fix Ginkgo stack trace on failure for Specify (#415) [b977ede, 65ca40e, 6c46eb8] +- Update README with Go 1.6+, Golang -> Go (#409) [17f6b97, bc14b66, 20d1598] +- Use fmt.Errorf instead of errors.New(fmt.Sprintf (#401) [a299f56, 44e2eaa] +- Imports in generated code should follow conventions (#398) [0bec0b0, e8536d8] +- Prevent data race error when Recording a benchmark value from multiple go routines (#390) [c0c4881, 7a241e9] +- Replace GOPATH in Environment [4b883f0] + + +## 1.4.0 7/16/2017 + +- `ginkgo` now provides a hint if you accidentally forget to run `ginkgo bootstrap` to generate a `*_suite_test.go` file that actually invokes the Ginkgo test runner. [#345](https://github.com/onsi/ginkgo/pull/345) +- thanks to improvements in `go test -c` `ginkgo` no longer needs to fix Go's compilation output to ensure compilation errors are expressed relative to the CWD. [#357] +- `ginkgo watch -watchRegExp=...` allows you to specify a custom regular expression to watch. Only files matching the regular expression are watched for changes (the default is `\.go$`) [#356] +- `ginkgo` now always emits compilation output. Previously, only failed compilation output was printed out. [#277] +- `ginkgo -requireSuite` now fails the test run if there are `*_test.go` files but `go test` fails to detect any tests. Typically this means you forgot to run `ginkgo bootstrap` to generate a suite file. [#344] +- `ginkgo -timeout=DURATION` allows you to adjust the timeout for the entire test suite (default is 24 hours) [#248] + +## 1.3.0 3/28/2017 + +Improvements: + +- Significantly improved parallel test distribution. Now instead of pre-sharding test cases across workers (which can result in idle workers and poor test performance) Ginkgo uses a shared queue to keep all workers busy until all tests are complete. This improves test-time performance and consistency. +- `Skip(message)` can be used to skip the current test. +- Added `extensions/table` - a Ginkgo DSL for [Table Driven Tests](http://onsi.github.io/ginkgo/#table-driven-tests) +- Add `GinkgoRandomSeed()` - shorthand for `config.GinkgoConfig.RandomSeed` +- Support for retrying flaky tests with `--flakeAttempts` +- `ginkgo ./...` now recurses as you'd expect +- Added `Specify` a synonym for `It` +- Support colorise on Windows +- Broader support for various go compilation flags in the `ginkgo` CLI + +Bug Fixes: + +- Ginkgo tests now fail when you `panic(nil)` (#167) + +## 1.2.0 5/31/2015 + +Improvements + +- `ginkgo -coverpkg` calls down to `go test -coverpkg` (#160) +- `ginkgo -afterSuiteHook COMMAND` invokes the passed-in `COMMAND` after a test suite completes (#152) +- Relaxed requirement for Go 1.4+. `ginkgo` now works with Go v1.3+ (#166) + +## 1.2.0-beta + +Ginkgo now requires Go 1.4+ + +Improvements: + +- Call reporters in reverse order when announcing spec completion -- allows custom reporters to emit output before the default reporter does. +- Improved focus behavior. Now, this: + + ```golang + FDescribe("Some describe", func() { + It("A", func() {}) + + FIt("B", func() {}) + }) + ``` + + will run `B` but *not* `A`. This tends to be a common usage pattern when in the thick of writing and debugging tests. +- When `SIGINT` is received, Ginkgo will emit the contents of the `GinkgoWriter` before running the `AfterSuite`. Useful for debugging stuck tests. +- When `--progress` is set, Ginkgo will write test progress (in particular, Ginkgo will say when it is about to run a BeforeEach, AfterEach, It, etc...) to the `GinkgoWriter`. This is useful for debugging stuck tests and tests that generate many logs. +- Improved output when an error occurs in a setup or teardown block. +- When `--dryRun` is set, Ginkgo will walk the spec tree and emit to its reporter *without* actually running anything. Best paired with `-v` to understand which specs will run in which order. +- Add `By` to help document long `It`s. `By` simply writes to the `GinkgoWriter`. +- Add support for precompiled tests: + - `ginkgo build ` will now compile the package, producing a file named `package.test` + - The compiled `package.test` file can be run directly. This runs the tests in series. + - To run precompiled tests in parallel, you can run: `ginkgo -p package.test` +- Support `bootstrap`ping and `generate`ing [Agouti](http://agouti.org) specs. +- `ginkgo generate` and `ginkgo bootstrap` now honor the package name already defined in a given directory +- The `ginkgo` CLI ignores `SIGQUIT`. Prevents its stack dump from interlacing with the underlying test suite's stack dump. +- The `ginkgo` CLI now compiles tests into a temporary directory instead of the package directory. This necessitates upgrading to Go v1.4+. +- `ginkgo -notify` now works on Linux + +Bug Fixes: + +- If --skipPackages is used and all packages are skipped, Ginkgo should exit 0. +- Fix tempfile leak when running in parallel +- Fix incorrect failure message when a panic occurs during a parallel test run +- Fixed an issue where a pending test within a focused context (or a focused test within a pending context) would skip all other tests. +- Be more consistent about handling SIGTERM as well as SIGINT +- When interupted while concurrently compiling test suites in the background, Ginkgo now cleans up the compiled artifacts. +- Fixed a long standing bug where `ginkgo -p` would hang if a process spawned by one of the Ginkgo parallel nodes does not exit. (Hooray!) + +## 1.1.0 (8/2/2014) + +No changes, just dropping the beta. + +## 1.1.0-beta (7/22/2014) +New Features: + +- `ginkgo watch` now monitors packages *and their dependencies* for changes. The depth of the dependency tree can be modified with the `-depth` flag. +- Test suites with a programmatic focus (`FIt`, `FDescribe`, etc...) exit with non-zero status code, even when they pass. This allows CI systems to detect accidental commits of focused test suites. +- `ginkgo -p` runs the testsuite in parallel with an auto-detected number of nodes. +- `ginkgo -tags=TAG_LIST` passes a list of tags down to the `go build` command. +- `ginkgo --failFast` aborts the test suite after the first failure. +- `ginkgo generate file_1 file_2` can take multiple file arguments. +- Ginkgo now summarizes any spec failures that occured at the end of the test run. +- `ginkgo --randomizeSuites` will run tests *suites* in random order using the generated/passed-in seed. + +Improvements: + +- `ginkgo -skipPackage` now takes a comma-separated list of strings. If the *relative path* to a package matches one of the entries in the comma-separated list, that package is skipped. +- `ginkgo --untilItFails` no longer recompiles between attempts. +- Ginkgo now panics when a runnable node (`It`, `BeforeEach`, `JustBeforeEach`, `AfterEach`, `Measure`) is nested within another runnable node. This is always a mistake. Any test suites that panic because of this change should be fixed. + +Bug Fixes: + +- `ginkgo boostrap` and `ginkgo generate` no longer fail when dealing with `hyphen-separated-packages`. +- parallel specs are now better distributed across nodes - fixed a crashing bug where (for example) distributing 11 tests across 7 nodes would panic + +## 1.0.0 (5/24/2014) +New Features: + +- Add `GinkgoParallelNode()` - shorthand for `config.GinkgoConfig.ParallelNode` + +Improvements: + +- When compilation fails, the compilation output is rewritten to present a correct *relative* path. Allows ⌘-clicking in iTerm open the file in your text editor. +- `--untilItFails` and `ginkgo watch` now generate new random seeds between test runs, unless a particular random seed is specified. + +Bug Fixes: + +- `-cover` now generates a correctly combined coverprofile when running with in parallel with multiple `-node`s. +- Print out the contents of the `GinkgoWriter` when `BeforeSuite` or `AfterSuite` fail. +- Fix all remaining race conditions in Ginkgo's test suite. + +## 1.0.0-beta (4/14/2014) +Breaking changes: + +- `thirdparty/gomocktestreporter` is gone. Use `GinkgoT()` instead +- Modified the Reporter interface +- `watch` is now a subcommand, not a flag. + +DSL changes: + +- `BeforeSuite` and `AfterSuite` for setting up and tearing down test suites. +- `AfterSuite` is triggered on interrupt (`^C`) as well as exit. +- `SynchronizedBeforeSuite` and `SynchronizedAfterSuite` for setting up and tearing down singleton resources across parallel nodes. + +CLI changes: + +- `watch` is now a subcommand, not a flag +- `--nodot` flag can be passed to `ginkgo generate` and `ginkgo bootstrap` to avoid dot imports. This explicitly imports all exported identifiers in Ginkgo and Gomega. Refreshing this list can be done by running `ginkgo nodot` +- Additional arguments can be passed to specs. Pass them after the `--` separator +- `--skipPackage` flag takes a regexp and ignores any packages with package names passing said regexp. +- `--trace` flag prints out full stack traces when errors occur, not just the line at which the error occurs. + +Misc: + +- Start using semantic versioning +- Start maintaining changelog + +Major refactor: + +- Pull out Ginkgo's internal to `internal` +- Rename `example` everywhere to `spec` +- Much more! diff --git a/vendor/github.com/onsi/ginkgo/CONTRIBUTING.md b/vendor/github.com/onsi/ginkgo/CONTRIBUTING.md new file mode 100644 index 0000000000..908b95c2c1 --- /dev/null +++ b/vendor/github.com/onsi/ginkgo/CONTRIBUTING.md @@ -0,0 +1,33 @@ +# Contributing to Ginkgo + +Your contributions to Ginkgo are essential for its long-term maintenance and improvement. + +- Please **open an issue first** - describe what problem you are trying to solve and give the community a forum for input and feedback ahead of investing time in writing code! +- Ensure adequate test coverage: + - When adding to the Ginkgo library, add unit and/or integration tests (under the `integration` folder). + - When adding to the Ginkgo CLI, note that there are very few unit tests. Please add an integration test. +- Update the documentation. Ginko uses `godoc` comments and documentation on the `gh-pages` branch. + If relevant, please submit a docs PR to that branch alongside your code PR. + +Thanks for supporting Ginkgo! + +## Setup + +Fork the repo, then: + +``` +go get github.com/onsi/ginkgo +go get github.com/onsi/gomega/... +cd $GOPATH/src/github.com/onsi/ginkgo +git remote add fork git@github.com:/ginkgo.git + +ginkgo -r -p # ensure tests are green +go vet ./... # ensure linter is happy +``` + +## Making the PR + - go to a new branch `git checkout -b my-feature` + - make your changes + - run tests and linter again (see above) + - `git push fork` + - open PR 🎉 diff --git a/vendor/github.com/joho/godotenv/LICENCE b/vendor/github.com/onsi/ginkgo/LICENSE similarity index 95% rename from vendor/github.com/joho/godotenv/LICENCE rename to vendor/github.com/onsi/ginkgo/LICENSE index e7ddd51be9..9415ee72c1 100644 --- a/vendor/github.com/joho/godotenv/LICENCE +++ b/vendor/github.com/onsi/ginkgo/LICENSE @@ -1,6 +1,4 @@ -Copyright (c) 2013 John Barton - -MIT License +Copyright (c) 2013-2014 Onsi Fakhouri Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the @@ -20,4 +18,3 @@ NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. - diff --git a/vendor/github.com/onsi/ginkgo/README.md b/vendor/github.com/onsi/ginkgo/README.md new file mode 100644 index 0000000000..cdf8d054a1 --- /dev/null +++ b/vendor/github.com/onsi/ginkgo/README.md @@ -0,0 +1,121 @@ +![Ginkgo: A Go BDD Testing Framework](http://onsi.github.io/ginkgo/images/ginkgo.png) + +[![Build Status](https://travis-ci.org/onsi/ginkgo.svg?branch=master)](https://travis-ci.org/onsi/ginkgo) + +Jump to the [docs](http://onsi.github.io/ginkgo/) to learn more. To start rolling your Ginkgo tests *now* [keep reading](#set-me-up)! + +If you have a question, comment, bug report, feature request, etc. please open a GitHub issue. + +## Feature List + +- Ginkgo uses Go's `testing` package and can live alongside your existing `testing` tests. It's easy to [bootstrap](http://onsi.github.io/ginkgo/#bootstrapping-a-suite) and start writing your [first tests](http://onsi.github.io/ginkgo/#adding-specs-to-a-suite) + +- Structure your BDD-style tests expressively: + - Nestable [`Describe`, `Context` and `When` container blocks](http://onsi.github.io/ginkgo/#organizing-specs-with-containers-describe-and-context) + - [`BeforeEach` and `AfterEach` blocks](http://onsi.github.io/ginkgo/#extracting-common-setup-beforeeach) for setup and teardown + - [`It` and `Specify` blocks](http://onsi.github.io/ginkgo/#individual-specs-) that hold your assertions + - [`JustBeforeEach` blocks](http://onsi.github.io/ginkgo/#separating-creation-and-configuration-justbeforeeach) that separate creation from configuration (also known as the subject action pattern). + - [`BeforeSuite` and `AfterSuite` blocks](http://onsi.github.io/ginkgo/#global-setup-and-teardown-beforesuite-and-aftersuite) to prep for and cleanup after a suite. + +- A comprehensive test runner that lets you: + - Mark specs as [pending](http://onsi.github.io/ginkgo/#pending-specs) + - [Focus](http://onsi.github.io/ginkgo/#focused-specs) individual specs, and groups of specs, either programmatically or on the command line + - Run your tests in [random order](http://onsi.github.io/ginkgo/#spec-permutation), and then reuse random seeds to replicate the same order. + - Break up your test suite into parallel processes for straightforward [test parallelization](http://onsi.github.io/ginkgo/#parallel-specs) + +- `ginkgo`: a command line interface with plenty of handy command line arguments for [running your tests](http://onsi.github.io/ginkgo/#running-tests) and [generating](http://onsi.github.io/ginkgo/#generators) test files. Here are a few choice examples: + - `ginkgo -nodes=N` runs your tests in `N` parallel processes and print out coherent output in realtime + - `ginkgo -cover` runs your tests using Go's code coverage tool + - `ginkgo convert` converts an XUnit-style `testing` package to a Ginkgo-style package + - `ginkgo -focus="REGEXP"` and `ginkgo -skip="REGEXP"` allow you to specify a subset of tests to run via regular expression + - `ginkgo -r` runs all tests suites under the current directory + - `ginkgo -v` prints out identifying information for each tests just before it runs + + And much more: run `ginkgo help` for details! + + The `ginkgo` CLI is convenient, but purely optional -- Ginkgo works just fine with `go test` + +- `ginkgo watch` [watches](https://onsi.github.io/ginkgo/#watching-for-changes) packages *and their dependencies* for changes, then reruns tests. Run tests immediately as you develop! + +- Built-in support for testing [asynchronicity](http://onsi.github.io/ginkgo/#asynchronous-tests) + +- Built-in support for [benchmarking](http://onsi.github.io/ginkgo/#benchmark-tests) your code. Control the number of benchmark samples as you gather runtimes and other, arbitrary, bits of numerical information about your code. + +- [Completions for Sublime Text](https://github.com/onsi/ginkgo-sublime-completions): just use [Package Control](https://sublime.wbond.net/) to install `Ginkgo Completions`. + +- [Completions for VSCode](https://github.com/onsi/vscode-ginkgo): just use VSCode's extension installer to install `vscode-ginkgo`. + +- Straightforward support for third-party testing libraries such as [Gomock](https://code.google.com/p/gomock/) and [Testify](https://github.com/stretchr/testify). Check out the [docs](http://onsi.github.io/ginkgo/#third-party-integrations) for details. + +- A modular architecture that lets you easily: + - Write [custom reporters](http://onsi.github.io/ginkgo/#writing-custom-reporters) (for example, Ginkgo comes with a [JUnit XML reporter](http://onsi.github.io/ginkgo/#generating-junit-xml-output) and a TeamCity reporter). + - [Adapt an existing matcher library (or write your own!)](http://onsi.github.io/ginkgo/#using-other-matcher-libraries) to work with Ginkgo + +## [Gomega](http://github.com/onsi/gomega): Ginkgo's Preferred Matcher Library + +Ginkgo is best paired with Gomega. Learn more about Gomega [here](http://onsi.github.io/gomega/) + +## [Agouti](http://github.com/sclevine/agouti): A Go Acceptance Testing Framework + +Agouti allows you run WebDriver integration tests. Learn more about Agouti [here](http://agouti.org) + +## Set Me Up! + +You'll need the Go command-line tools. Ginkgo is tested with Go 1.6+, but preferably you should get the latest. Follow the [installation instructions](https://golang.org/doc/install) if you don't have it installed. + +```bash + +go get -u github.com/onsi/ginkgo/ginkgo # installs the ginkgo CLI +go get -u github.com/onsi/gomega/... # fetches the matcher library + +cd path/to/package/you/want/to/test + +ginkgo bootstrap # set up a new ginkgo suite +ginkgo generate # will create a sample test file. edit this file and add your tests then... + +go test # to run your tests + +ginkgo # also runs your tests + +``` + +## I'm new to Go: What are my testing options? + +Of course, I heartily recommend [Ginkgo](https://github.com/onsi/ginkgo) and [Gomega](https://github.com/onsi/gomega). Both packages are seeing heavy, daily, production use on a number of projects and boast a mature and comprehensive feature-set. + +With that said, it's great to know what your options are :) + +### What Go gives you out of the box + +Testing is a first class citizen in Go, however Go's built-in testing primitives are somewhat limited: The [testing](http://golang.org/pkg/testing) package provides basic XUnit style tests and no assertion library. + +### Matcher libraries for Go's XUnit style tests + +A number of matcher libraries have been written to augment Go's built-in XUnit style tests. Here are two that have gained traction: + +- [testify](https://github.com/stretchr/testify) +- [gocheck](http://labix.org/gocheck) + +You can also use Ginkgo's matcher library [Gomega](https://github.com/onsi/gomega) in [XUnit style tests](http://onsi.github.io/gomega/#using-gomega-with-golangs-xunitstyle-tests) + +### BDD style testing frameworks + +There are a handful of BDD-style testing frameworks written for Go. Here are a few: + +- [Ginkgo](https://github.com/onsi/ginkgo) ;) +- [GoConvey](https://github.com/smartystreets/goconvey) +- [Goblin](https://github.com/franela/goblin) +- [Mao](https://github.com/azer/mao) +- [Zen](https://github.com/pranavraja/zen) + +Finally, @shageman has [put together](https://github.com/shageman/gotestit) a comprehensive comparison of Go testing libraries. + +Go explore! + +## License + +Ginkgo is MIT-Licensed + +## Contributing + +See [CONTRIBUTING.md](CONTRIBUTING.md) diff --git a/vendor/github.com/onsi/ginkgo/RELEASING.md b/vendor/github.com/onsi/ginkgo/RELEASING.md new file mode 100644 index 0000000000..1e298c2da7 --- /dev/null +++ b/vendor/github.com/onsi/ginkgo/RELEASING.md @@ -0,0 +1,14 @@ +A Ginkgo release is a tagged git sha and a GitHub release. To cut a release: + +1. Ensure CHANGELOG.md is up to date. + - Use `git log --pretty=format:'- %s [%h]' HEAD...vX.X.X` to list all the commits since the last release + - Categorize the changes into + - Breaking Changes (requires a major version) + - New Features (minor version) + - Fixes (fix version) + - Maintenance (which in general should not be mentioned in `CHANGELOG.md` as they have no user impact) +1. Update `VERSION` in `config/config.go` +1. Create a commit with the version number as the commit message (e.g. `v1.3.0`) +1. Tag the commit with the version number as the tag name (e.g. `v1.3.0`) +1. Push the commit and tag to GitHub +1. Create a new [GitHub release](https://help.github.com/articles/creating-releases/) with the version number as the tag (e.g. `v1.3.0`). List the key changes in the release notes. diff --git a/vendor/github.com/onsi/ginkgo/config/config.go b/vendor/github.com/onsi/ginkgo/config/config.go new file mode 100644 index 0000000000..dab2a24708 --- /dev/null +++ b/vendor/github.com/onsi/ginkgo/config/config.go @@ -0,0 +1,200 @@ +/* +Ginkgo accepts a number of configuration options. + +These are documented [here](http://onsi.github.io/ginkgo/#the_ginkgo_cli) + +You can also learn more via + + ginkgo help + +or (I kid you not): + + go test -asdf +*/ +package config + +import ( + "flag" + "time" + + "fmt" +) + +const VERSION = "1.8.0" + +type GinkgoConfigType struct { + RandomSeed int64 + RandomizeAllSpecs bool + RegexScansFilePath bool + FocusString string + SkipString string + SkipMeasurements bool + FailOnPending bool + FailFast bool + FlakeAttempts int + EmitSpecProgress bool + DryRun bool + DebugParallel bool + + ParallelNode int + ParallelTotal int + SyncHost string + StreamHost string +} + +var GinkgoConfig = GinkgoConfigType{} + +type DefaultReporterConfigType struct { + NoColor bool + SlowSpecThreshold float64 + NoisyPendings bool + NoisySkippings bool + Succinct bool + Verbose bool + FullTrace bool +} + +var DefaultReporterConfig = DefaultReporterConfigType{} + +func processPrefix(prefix string) string { + if prefix != "" { + prefix = prefix + "." + } + return prefix +} + +func Flags(flagSet *flag.FlagSet, prefix string, includeParallelFlags bool) { + prefix = processPrefix(prefix) + flagSet.Int64Var(&(GinkgoConfig.RandomSeed), prefix+"seed", time.Now().Unix(), "The seed used to randomize the spec suite.") + flagSet.BoolVar(&(GinkgoConfig.RandomizeAllSpecs), prefix+"randomizeAllSpecs", false, "If set, ginkgo will randomize all specs together. By default, ginkgo only randomizes the top level Describe, Context and When groups.") + flagSet.BoolVar(&(GinkgoConfig.SkipMeasurements), prefix+"skipMeasurements", false, "If set, ginkgo will skip any measurement specs.") + flagSet.BoolVar(&(GinkgoConfig.FailOnPending), prefix+"failOnPending", false, "If set, ginkgo will mark the test suite as failed if any specs are pending.") + flagSet.BoolVar(&(GinkgoConfig.FailFast), prefix+"failFast", false, "If set, ginkgo will stop running a test suite after a failure occurs.") + + flagSet.BoolVar(&(GinkgoConfig.DryRun), prefix+"dryRun", false, "If set, ginkgo will walk the test hierarchy without actually running anything. Best paired with -v.") + + flagSet.StringVar(&(GinkgoConfig.FocusString), prefix+"focus", "", "If set, ginkgo will only run specs that match this regular expression.") + flagSet.StringVar(&(GinkgoConfig.SkipString), prefix+"skip", "", "If set, ginkgo will only run specs that do not match this regular expression.") + + flagSet.BoolVar(&(GinkgoConfig.RegexScansFilePath), prefix+"regexScansFilePath", false, "If set, ginkgo regex matching also will look at the file path (code location).") + + flagSet.IntVar(&(GinkgoConfig.FlakeAttempts), prefix+"flakeAttempts", 1, "Make up to this many attempts to run each spec. Please note that if any of the attempts succeed, the suite will not be failed. But any failures will still be recorded.") + + flagSet.BoolVar(&(GinkgoConfig.EmitSpecProgress), prefix+"progress", false, "If set, ginkgo will emit progress information as each spec runs to the GinkgoWriter.") + + flagSet.BoolVar(&(GinkgoConfig.DebugParallel), prefix+"debug", false, "If set, ginkgo will emit node output to files when running in parallel.") + + if includeParallelFlags { + flagSet.IntVar(&(GinkgoConfig.ParallelNode), prefix+"parallel.node", 1, "This worker node's (one-indexed) node number. For running specs in parallel.") + flagSet.IntVar(&(GinkgoConfig.ParallelTotal), prefix+"parallel.total", 1, "The total number of worker nodes. For running specs in parallel.") + flagSet.StringVar(&(GinkgoConfig.SyncHost), prefix+"parallel.synchost", "", "The address for the server that will synchronize the running nodes.") + flagSet.StringVar(&(GinkgoConfig.StreamHost), prefix+"parallel.streamhost", "", "The address for the server that the running nodes should stream data to.") + } + + flagSet.BoolVar(&(DefaultReporterConfig.NoColor), prefix+"noColor", false, "If set, suppress color output in default reporter.") + flagSet.Float64Var(&(DefaultReporterConfig.SlowSpecThreshold), prefix+"slowSpecThreshold", 5.0, "(in seconds) Specs that take longer to run than this threshold are flagged as slow by the default reporter.") + flagSet.BoolVar(&(DefaultReporterConfig.NoisyPendings), prefix+"noisyPendings", true, "If set, default reporter will shout about pending tests.") + flagSet.BoolVar(&(DefaultReporterConfig.NoisySkippings), prefix+"noisySkippings", true, "If set, default reporter will shout about skipping tests.") + flagSet.BoolVar(&(DefaultReporterConfig.Verbose), prefix+"v", false, "If set, default reporter print out all specs as they begin.") + flagSet.BoolVar(&(DefaultReporterConfig.Succinct), prefix+"succinct", false, "If set, default reporter prints out a very succinct report") + flagSet.BoolVar(&(DefaultReporterConfig.FullTrace), prefix+"trace", false, "If set, default reporter prints out the full stack trace when a failure occurs") +} + +func BuildFlagArgs(prefix string, ginkgo GinkgoConfigType, reporter DefaultReporterConfigType) []string { + prefix = processPrefix(prefix) + result := make([]string, 0) + + if ginkgo.RandomSeed > 0 { + result = append(result, fmt.Sprintf("--%sseed=%d", prefix, ginkgo.RandomSeed)) + } + + if ginkgo.RandomizeAllSpecs { + result = append(result, fmt.Sprintf("--%srandomizeAllSpecs", prefix)) + } + + if ginkgo.SkipMeasurements { + result = append(result, fmt.Sprintf("--%sskipMeasurements", prefix)) + } + + if ginkgo.FailOnPending { + result = append(result, fmt.Sprintf("--%sfailOnPending", prefix)) + } + + if ginkgo.FailFast { + result = append(result, fmt.Sprintf("--%sfailFast", prefix)) + } + + if ginkgo.DryRun { + result = append(result, fmt.Sprintf("--%sdryRun", prefix)) + } + + if ginkgo.FocusString != "" { + result = append(result, fmt.Sprintf("--%sfocus=%s", prefix, ginkgo.FocusString)) + } + + if ginkgo.SkipString != "" { + result = append(result, fmt.Sprintf("--%sskip=%s", prefix, ginkgo.SkipString)) + } + + if ginkgo.FlakeAttempts > 1 { + result = append(result, fmt.Sprintf("--%sflakeAttempts=%d", prefix, ginkgo.FlakeAttempts)) + } + + if ginkgo.EmitSpecProgress { + result = append(result, fmt.Sprintf("--%sprogress", prefix)) + } + + if ginkgo.DebugParallel { + result = append(result, fmt.Sprintf("--%sdebug", prefix)) + } + + if ginkgo.ParallelNode != 0 { + result = append(result, fmt.Sprintf("--%sparallel.node=%d", prefix, ginkgo.ParallelNode)) + } + + if ginkgo.ParallelTotal != 0 { + result = append(result, fmt.Sprintf("--%sparallel.total=%d", prefix, ginkgo.ParallelTotal)) + } + + if ginkgo.StreamHost != "" { + result = append(result, fmt.Sprintf("--%sparallel.streamhost=%s", prefix, ginkgo.StreamHost)) + } + + if ginkgo.SyncHost != "" { + result = append(result, fmt.Sprintf("--%sparallel.synchost=%s", prefix, ginkgo.SyncHost)) + } + + if ginkgo.RegexScansFilePath { + result = append(result, fmt.Sprintf("--%sregexScansFilePath", prefix)) + } + + if reporter.NoColor { + result = append(result, fmt.Sprintf("--%snoColor", prefix)) + } + + if reporter.SlowSpecThreshold > 0 { + result = append(result, fmt.Sprintf("--%sslowSpecThreshold=%.5f", prefix, reporter.SlowSpecThreshold)) + } + + if !reporter.NoisyPendings { + result = append(result, fmt.Sprintf("--%snoisyPendings=false", prefix)) + } + + if !reporter.NoisySkippings { + result = append(result, fmt.Sprintf("--%snoisySkippings=false", prefix)) + } + + if reporter.Verbose { + result = append(result, fmt.Sprintf("--%sv", prefix)) + } + + if reporter.Succinct { + result = append(result, fmt.Sprintf("--%ssuccinct", prefix)) + } + + if reporter.FullTrace { + result = append(result, fmt.Sprintf("--%strace", prefix)) + } + + return result +} diff --git a/vendor/github.com/onsi/ginkgo/ginkgo_dsl.go b/vendor/github.com/onsi/ginkgo/ginkgo_dsl.go new file mode 100644 index 0000000000..a6b96d88fd --- /dev/null +++ b/vendor/github.com/onsi/ginkgo/ginkgo_dsl.go @@ -0,0 +1,619 @@ +/* +Ginkgo is a BDD-style testing framework for Golang + +The godoc documentation describes Ginkgo's API. More comprehensive documentation (with examples!) is available at http://onsi.github.io/ginkgo/ + +Ginkgo's preferred matcher library is [Gomega](http://github.com/onsi/gomega) + +Ginkgo on Github: http://github.com/onsi/ginkgo + +Ginkgo is MIT-Licensed +*/ +package ginkgo + +import ( + "flag" + "fmt" + "io" + "net/http" + "os" + "strings" + "time" + + "github.com/onsi/ginkgo/config" + "github.com/onsi/ginkgo/internal/codelocation" + "github.com/onsi/ginkgo/internal/failer" + "github.com/onsi/ginkgo/internal/remote" + "github.com/onsi/ginkgo/internal/suite" + "github.com/onsi/ginkgo/internal/testingtproxy" + "github.com/onsi/ginkgo/internal/writer" + "github.com/onsi/ginkgo/reporters" + "github.com/onsi/ginkgo/reporters/stenographer" + colorable "github.com/onsi/ginkgo/reporters/stenographer/support/go-colorable" + "github.com/onsi/ginkgo/types" +) + +const GINKGO_VERSION = config.VERSION +const GINKGO_PANIC = ` +Your test failed. +Ginkgo panics to prevent subsequent assertions from running. +Normally Ginkgo rescues this panic so you shouldn't see it. + +But, if you make an assertion in a goroutine, Ginkgo can't capture the panic. +To circumvent this, you should call + + defer GinkgoRecover() + +at the top of the goroutine that caused this panic. +` +const defaultTimeout = 1 + +var globalSuite *suite.Suite +var globalFailer *failer.Failer + +func init() { + config.Flags(flag.CommandLine, "ginkgo", true) + GinkgoWriter = writer.New(os.Stdout) + globalFailer = failer.New() + globalSuite = suite.New(globalFailer) +} + +//GinkgoWriter implements an io.Writer +//When running in verbose mode any writes to GinkgoWriter will be immediately printed +//to stdout. Otherwise, GinkgoWriter will buffer any writes produced during the current test and flush them to screen +//only if the current test fails. +var GinkgoWriter io.Writer + +//The interface by which Ginkgo receives *testing.T +type GinkgoTestingT interface { + Fail() +} + +//GinkgoRandomSeed returns the seed used to randomize spec execution order. It is +//useful for seeding your own pseudorandom number generators (PRNGs) to ensure +//consistent executions from run to run, where your tests contain variability (for +//example, when selecting random test data). +func GinkgoRandomSeed() int64 { + return config.GinkgoConfig.RandomSeed +} + +//GinkgoParallelNode returns the parallel node number for the current ginkgo process +//The node number is 1-indexed +func GinkgoParallelNode() int { + return config.GinkgoConfig.ParallelNode +} + +//Some matcher libraries or legacy codebases require a *testing.T +//GinkgoT implements an interface analogous to *testing.T and can be used if +//the library in question accepts *testing.T through an interface +// +// For example, with testify: +// assert.Equal(GinkgoT(), 123, 123, "they should be equal") +// +// Or with gomock: +// gomock.NewController(GinkgoT()) +// +// GinkgoT() takes an optional offset argument that can be used to get the +// correct line number associated with the failure. +func GinkgoT(optionalOffset ...int) GinkgoTInterface { + offset := 3 + if len(optionalOffset) > 0 { + offset = optionalOffset[0] + } + return testingtproxy.New(GinkgoWriter, Fail, offset) +} + +//The interface returned by GinkgoT(). This covers most of the methods +//in the testing package's T. +type GinkgoTInterface interface { + Fail() + Error(args ...interface{}) + Errorf(format string, args ...interface{}) + FailNow() + Fatal(args ...interface{}) + Fatalf(format string, args ...interface{}) + Log(args ...interface{}) + Logf(format string, args ...interface{}) + Failed() bool + Parallel() + Skip(args ...interface{}) + Skipf(format string, args ...interface{}) + SkipNow() + Skipped() bool +} + +//Custom Ginkgo test reporters must implement the Reporter interface. +// +//The custom reporter is passed in a SuiteSummary when the suite begins and ends, +//and a SpecSummary just before a spec begins and just after a spec ends +type Reporter reporters.Reporter + +//Asynchronous specs are given a channel of the Done type. You must close or write to the channel +//to tell Ginkgo that your async test is done. +type Done chan<- interface{} + +//GinkgoTestDescription represents the information about the current running test returned by CurrentGinkgoTestDescription +// FullTestText: a concatenation of ComponentTexts and the TestText +// ComponentTexts: a list of all texts for the Describes & Contexts leading up to the current test +// TestText: the text in the actual It or Measure node +// IsMeasurement: true if the current test is a measurement +// FileName: the name of the file containing the current test +// LineNumber: the line number for the current test +// Failed: if the current test has failed, this will be true (useful in an AfterEach) +type GinkgoTestDescription struct { + FullTestText string + ComponentTexts []string + TestText string + + IsMeasurement bool + + FileName string + LineNumber int + + Failed bool + Duration time.Duration +} + +//CurrentGinkgoTestDescripton returns information about the current running test. +func CurrentGinkgoTestDescription() GinkgoTestDescription { + summary, ok := globalSuite.CurrentRunningSpecSummary() + if !ok { + return GinkgoTestDescription{} + } + + subjectCodeLocation := summary.ComponentCodeLocations[len(summary.ComponentCodeLocations)-1] + + return GinkgoTestDescription{ + ComponentTexts: summary.ComponentTexts[1:], + FullTestText: strings.Join(summary.ComponentTexts[1:], " "), + TestText: summary.ComponentTexts[len(summary.ComponentTexts)-1], + IsMeasurement: summary.IsMeasurement, + FileName: subjectCodeLocation.FileName, + LineNumber: subjectCodeLocation.LineNumber, + Failed: summary.HasFailureState(), + Duration: summary.RunTime, + } +} + +//Measurement tests receive a Benchmarker. +// +//You use the Time() function to time how long the passed in body function takes to run +//You use the RecordValue() function to track arbitrary numerical measurements. +//The RecordValueWithPrecision() function can be used alternatively to provide the unit +//and resolution of the numeric measurement. +//The optional info argument is passed to the test reporter and can be used to +// provide the measurement data to a custom reporter with context. +// +//See http://onsi.github.io/ginkgo/#benchmark_tests for more details +type Benchmarker interface { + Time(name string, body func(), info ...interface{}) (elapsedTime time.Duration) + RecordValue(name string, value float64, info ...interface{}) + RecordValueWithPrecision(name string, value float64, units string, precision int, info ...interface{}) +} + +//RunSpecs is the entry point for the Ginkgo test runner. +//You must call this within a Golang testing TestX(t *testing.T) function. +// +//To bootstrap a test suite you can use the Ginkgo CLI: +// +// ginkgo bootstrap +func RunSpecs(t GinkgoTestingT, description string) bool { + specReporters := []Reporter{buildDefaultReporter()} + return RunSpecsWithCustomReporters(t, description, specReporters) +} + +//To run your tests with Ginkgo's default reporter and your custom reporter(s), replace +//RunSpecs() with this method. +func RunSpecsWithDefaultAndCustomReporters(t GinkgoTestingT, description string, specReporters []Reporter) bool { + specReporters = append(specReporters, buildDefaultReporter()) + return RunSpecsWithCustomReporters(t, description, specReporters) +} + +//To run your tests with your custom reporter(s) (and *not* Ginkgo's default reporter), replace +//RunSpecs() with this method. Note that parallel tests will not work correctly without the default reporter +func RunSpecsWithCustomReporters(t GinkgoTestingT, description string, specReporters []Reporter) bool { + writer := GinkgoWriter.(*writer.Writer) + writer.SetStream(config.DefaultReporterConfig.Verbose) + reporters := make([]reporters.Reporter, len(specReporters)) + for i, reporter := range specReporters { + reporters[i] = reporter + } + passed, hasFocusedTests := globalSuite.Run(t, description, reporters, writer, config.GinkgoConfig) + if passed && hasFocusedTests && strings.TrimSpace(os.Getenv("GINKGO_EDITOR_INTEGRATION")) == "" { + fmt.Println("PASS | FOCUSED") + os.Exit(types.GINKGO_FOCUS_EXIT_CODE) + } + return passed +} + +func buildDefaultReporter() Reporter { + remoteReportingServer := config.GinkgoConfig.StreamHost + if remoteReportingServer == "" { + stenographer := stenographer.New(!config.DefaultReporterConfig.NoColor, config.GinkgoConfig.FlakeAttempts > 1, colorable.NewColorableStdout()) + return reporters.NewDefaultReporter(config.DefaultReporterConfig, stenographer) + } else { + debugFile := "" + if config.GinkgoConfig.DebugParallel { + debugFile = fmt.Sprintf("ginkgo-node-%d.log", config.GinkgoConfig.ParallelNode) + } + return remote.NewForwardingReporter(config.DefaultReporterConfig, remoteReportingServer, &http.Client{}, remote.NewOutputInterceptor(), GinkgoWriter.(*writer.Writer), debugFile) + } +} + +//Skip notifies Ginkgo that the current spec was skipped. +func Skip(message string, callerSkip ...int) { + skip := 0 + if len(callerSkip) > 0 { + skip = callerSkip[0] + } + + globalFailer.Skip(message, codelocation.New(skip+1)) + panic(GINKGO_PANIC) +} + +//Fail notifies Ginkgo that the current spec has failed. (Gomega will call Fail for you automatically when an assertion fails.) +func Fail(message string, callerSkip ...int) { + skip := 0 + if len(callerSkip) > 0 { + skip = callerSkip[0] + } + + globalFailer.Fail(message, codelocation.New(skip+1)) + panic(GINKGO_PANIC) +} + +//GinkgoRecover should be deferred at the top of any spawned goroutine that (may) call `Fail` +//Since Gomega assertions call fail, you should throw a `defer GinkgoRecover()` at the top of any goroutine that +//calls out to Gomega +// +//Here's why: Ginkgo's `Fail` method records the failure and then panics to prevent +//further assertions from running. This panic must be recovered. Ginkgo does this for you +//if the panic originates in a Ginkgo node (an It, BeforeEach, etc...) +// +//Unfortunately, if a panic originates on a goroutine *launched* from one of these nodes there's no +//way for Ginkgo to rescue the panic. To do this, you must remember to `defer GinkgoRecover()` at the top of such a goroutine. +func GinkgoRecover() { + e := recover() + if e != nil { + globalFailer.Panic(codelocation.New(1), e) + } +} + +//Describe blocks allow you to organize your specs. A Describe block can contain any number of +//BeforeEach, AfterEach, JustBeforeEach, It, and Measurement blocks. +// +//In addition you can nest Describe, Context and When blocks. Describe, Context and When blocks are functionally +//equivalent. The difference is purely semantic -- you typical Describe the behavior of an object +//or method and, within that Describe, outline a number of Contexts and Whens. +func Describe(text string, body func()) bool { + globalSuite.PushContainerNode(text, body, types.FlagTypeNone, codelocation.New(1)) + return true +} + +//You can focus the tests within a describe block using FDescribe +func FDescribe(text string, body func()) bool { + globalSuite.PushContainerNode(text, body, types.FlagTypeFocused, codelocation.New(1)) + return true +} + +//You can mark the tests within a describe block as pending using PDescribe +func PDescribe(text string, body func()) bool { + globalSuite.PushContainerNode(text, body, types.FlagTypePending, codelocation.New(1)) + return true +} + +//You can mark the tests within a describe block as pending using XDescribe +func XDescribe(text string, body func()) bool { + globalSuite.PushContainerNode(text, body, types.FlagTypePending, codelocation.New(1)) + return true +} + +//Context blocks allow you to organize your specs. A Context block can contain any number of +//BeforeEach, AfterEach, JustBeforeEach, It, and Measurement blocks. +// +//In addition you can nest Describe, Context and When blocks. Describe, Context and When blocks are functionally +//equivalent. The difference is purely semantic -- you typical Describe the behavior of an object +//or method and, within that Describe, outline a number of Contexts and Whens. +func Context(text string, body func()) bool { + globalSuite.PushContainerNode(text, body, types.FlagTypeNone, codelocation.New(1)) + return true +} + +//You can focus the tests within a describe block using FContext +func FContext(text string, body func()) bool { + globalSuite.PushContainerNode(text, body, types.FlagTypeFocused, codelocation.New(1)) + return true +} + +//You can mark the tests within a describe block as pending using PContext +func PContext(text string, body func()) bool { + globalSuite.PushContainerNode(text, body, types.FlagTypePending, codelocation.New(1)) + return true +} + +//You can mark the tests within a describe block as pending using XContext +func XContext(text string, body func()) bool { + globalSuite.PushContainerNode(text, body, types.FlagTypePending, codelocation.New(1)) + return true +} + +//When blocks allow you to organize your specs. A When block can contain any number of +//BeforeEach, AfterEach, JustBeforeEach, It, and Measurement blocks. +// +//In addition you can nest Describe, Context and When blocks. Describe, Context and When blocks are functionally +//equivalent. The difference is purely semantic -- you typical Describe the behavior of an object +//or method and, within that Describe, outline a number of Contexts and Whens. +func When(text string, body func()) bool { + globalSuite.PushContainerNode("when "+text, body, types.FlagTypeNone, codelocation.New(1)) + return true +} + +//You can focus the tests within a describe block using FWhen +func FWhen(text string, body func()) bool { + globalSuite.PushContainerNode("when "+text, body, types.FlagTypeFocused, codelocation.New(1)) + return true +} + +//You can mark the tests within a describe block as pending using PWhen +func PWhen(text string, body func()) bool { + globalSuite.PushContainerNode("when "+text, body, types.FlagTypePending, codelocation.New(1)) + return true +} + +//You can mark the tests within a describe block as pending using XWhen +func XWhen(text string, body func()) bool { + globalSuite.PushContainerNode("when "+text, body, types.FlagTypePending, codelocation.New(1)) + return true +} + +//It blocks contain your test code and assertions. You cannot nest any other Ginkgo blocks +//within an It block. +// +//Ginkgo will normally run It blocks synchronously. To perform asynchronous tests, pass a +//function that accepts a Done channel. When you do this, you can also provide an optional timeout. +func It(text string, body interface{}, timeout ...float64) bool { + globalSuite.PushItNode(text, body, types.FlagTypeNone, codelocation.New(1), parseTimeout(timeout...)) + return true +} + +//You can focus individual Its using FIt +func FIt(text string, body interface{}, timeout ...float64) bool { + globalSuite.PushItNode(text, body, types.FlagTypeFocused, codelocation.New(1), parseTimeout(timeout...)) + return true +} + +//You can mark Its as pending using PIt +func PIt(text string, _ ...interface{}) bool { + globalSuite.PushItNode(text, func() {}, types.FlagTypePending, codelocation.New(1), 0) + return true +} + +//You can mark Its as pending using XIt +func XIt(text string, _ ...interface{}) bool { + globalSuite.PushItNode(text, func() {}, types.FlagTypePending, codelocation.New(1), 0) + return true +} + +//Specify blocks are aliases for It blocks and allow for more natural wording in situations +//which "It" does not fit into a natural sentence flow. All the same protocols apply for Specify blocks +//which apply to It blocks. +func Specify(text string, body interface{}, timeout ...float64) bool { + globalSuite.PushItNode(text, body, types.FlagTypeNone, codelocation.New(1), parseTimeout(timeout...)) + return true +} + +//You can focus individual Specifys using FSpecify +func FSpecify(text string, body interface{}, timeout ...float64) bool { + globalSuite.PushItNode(text, body, types.FlagTypeFocused, codelocation.New(1), parseTimeout(timeout...)) + return true +} + +//You can mark Specifys as pending using PSpecify +func PSpecify(text string, is ...interface{}) bool { + globalSuite.PushItNode(text, func() {}, types.FlagTypePending, codelocation.New(1), 0) + return true +} + +//You can mark Specifys as pending using XSpecify +func XSpecify(text string, is ...interface{}) bool { + globalSuite.PushItNode(text, func() {}, types.FlagTypePending, codelocation.New(1), 0) + return true +} + +//By allows you to better document large Its. +// +//Generally you should try to keep your Its short and to the point. This is not always possible, however, +//especially in the context of integration tests that capture a particular workflow. +// +//By allows you to document such flows. By must be called within a runnable node (It, BeforeEach, Measure, etc...) +//By will simply log the passed in text to the GinkgoWriter. If By is handed a function it will immediately run the function. +func By(text string, callbacks ...func()) { + preamble := "\x1b[1mSTEP\x1b[0m" + if config.DefaultReporterConfig.NoColor { + preamble = "STEP" + } + fmt.Fprintln(GinkgoWriter, preamble+": "+text) + if len(callbacks) == 1 { + callbacks[0]() + } + if len(callbacks) > 1 { + panic("just one callback per By, please") + } +} + +//Measure blocks run the passed in body function repeatedly (determined by the samples argument) +//and accumulate metrics provided to the Benchmarker by the body function. +// +//The body function must have the signature: +// func(b Benchmarker) +func Measure(text string, body interface{}, samples int) bool { + globalSuite.PushMeasureNode(text, body, types.FlagTypeNone, codelocation.New(1), samples) + return true +} + +//You can focus individual Measures using FMeasure +func FMeasure(text string, body interface{}, samples int) bool { + globalSuite.PushMeasureNode(text, body, types.FlagTypeFocused, codelocation.New(1), samples) + return true +} + +//You can mark Measurements as pending using PMeasure +func PMeasure(text string, _ ...interface{}) bool { + globalSuite.PushMeasureNode(text, func(b Benchmarker) {}, types.FlagTypePending, codelocation.New(1), 0) + return true +} + +//You can mark Measurements as pending using XMeasure +func XMeasure(text string, _ ...interface{}) bool { + globalSuite.PushMeasureNode(text, func(b Benchmarker) {}, types.FlagTypePending, codelocation.New(1), 0) + return true +} + +//BeforeSuite blocks are run just once before any specs are run. When running in parallel, each +//parallel node process will call BeforeSuite. +// +//BeforeSuite blocks can be made asynchronous by providing a body function that accepts a Done channel +// +//You may only register *one* BeforeSuite handler per test suite. You typically do so in your bootstrap file at the top level. +func BeforeSuite(body interface{}, timeout ...float64) bool { + globalSuite.SetBeforeSuiteNode(body, codelocation.New(1), parseTimeout(timeout...)) + return true +} + +//AfterSuite blocks are *always* run after all the specs regardless of whether specs have passed or failed. +//Moreover, if Ginkgo receives an interrupt signal (^C) it will attempt to run the AfterSuite before exiting. +// +//When running in parallel, each parallel node process will call AfterSuite. +// +//AfterSuite blocks can be made asynchronous by providing a body function that accepts a Done channel +// +//You may only register *one* AfterSuite handler per test suite. You typically do so in your bootstrap file at the top level. +func AfterSuite(body interface{}, timeout ...float64) bool { + globalSuite.SetAfterSuiteNode(body, codelocation.New(1), parseTimeout(timeout...)) + return true +} + +//SynchronizedBeforeSuite blocks are primarily meant to solve the problem of setting up singleton external resources shared across +//nodes when running tests in parallel. For example, say you have a shared database that you can only start one instance of that +//must be used in your tests. When running in parallel, only one node should set up the database and all other nodes should wait +//until that node is done before running. +// +//SynchronizedBeforeSuite accomplishes this by taking *two* function arguments. The first is only run on parallel node #1. The second is +//run on all nodes, but *only* after the first function completes succesfully. Ginkgo also makes it possible to send data from the first function (on Node 1) +//to the second function (on all the other nodes). +// +//The functions have the following signatures. The first function (which only runs on node 1) has the signature: +// +// func() []byte +// +//or, to run asynchronously: +// +// func(done Done) []byte +// +//The byte array returned by the first function is then passed to the second function, which has the signature: +// +// func(data []byte) +// +//or, to run asynchronously: +// +// func(data []byte, done Done) +// +//Here's a simple pseudo-code example that starts a shared database on Node 1 and shares the database's address with the other nodes: +// +// var dbClient db.Client +// var dbRunner db.Runner +// +// var _ = SynchronizedBeforeSuite(func() []byte { +// dbRunner = db.NewRunner() +// err := dbRunner.Start() +// Ω(err).ShouldNot(HaveOccurred()) +// return []byte(dbRunner.URL) +// }, func(data []byte) { +// dbClient = db.NewClient() +// err := dbClient.Connect(string(data)) +// Ω(err).ShouldNot(HaveOccurred()) +// }) +func SynchronizedBeforeSuite(node1Body interface{}, allNodesBody interface{}, timeout ...float64) bool { + globalSuite.SetSynchronizedBeforeSuiteNode( + node1Body, + allNodesBody, + codelocation.New(1), + parseTimeout(timeout...), + ) + return true +} + +//SynchronizedAfterSuite blocks complement the SynchronizedBeforeSuite blocks in solving the problem of setting up +//external singleton resources shared across nodes when running tests in parallel. +// +//SynchronizedAfterSuite accomplishes this by taking *two* function arguments. The first runs on all nodes. The second runs only on parallel node #1 +//and *only* after all other nodes have finished and exited. This ensures that node 1, and any resources it is running, remain alive until +//all other nodes are finished. +// +//Both functions have the same signature: either func() or func(done Done) to run asynchronously. +// +//Here's a pseudo-code example that complements that given in SynchronizedBeforeSuite. Here, SynchronizedAfterSuite is used to tear down the shared database +//only after all nodes have finished: +// +// var _ = SynchronizedAfterSuite(func() { +// dbClient.Cleanup() +// }, func() { +// dbRunner.Stop() +// }) +func SynchronizedAfterSuite(allNodesBody interface{}, node1Body interface{}, timeout ...float64) bool { + globalSuite.SetSynchronizedAfterSuiteNode( + allNodesBody, + node1Body, + codelocation.New(1), + parseTimeout(timeout...), + ) + return true +} + +//BeforeEach blocks are run before It blocks. When multiple BeforeEach blocks are defined in nested +//Describe and Context blocks the outermost BeforeEach blocks are run first. +// +//Like It blocks, BeforeEach blocks can be made asynchronous by providing a body function that accepts +//a Done channel +func BeforeEach(body interface{}, timeout ...float64) bool { + globalSuite.PushBeforeEachNode(body, codelocation.New(1), parseTimeout(timeout...)) + return true +} + +//JustBeforeEach blocks are run before It blocks but *after* all BeforeEach blocks. For more details, +//read the [documentation](http://onsi.github.io/ginkgo/#separating_creation_and_configuration_) +// +//Like It blocks, BeforeEach blocks can be made asynchronous by providing a body function that accepts +//a Done channel +func JustBeforeEach(body interface{}, timeout ...float64) bool { + globalSuite.PushJustBeforeEachNode(body, codelocation.New(1), parseTimeout(timeout...)) + return true +} + +//JustAfterEach blocks are run after It blocks but *before* all AfterEach blocks. For more details, +//read the [documentation](http://onsi.github.io/ginkgo/#separating_creation_and_configuration_) +// +//Like It blocks, JustAfterEach blocks can be made asynchronous by providing a body function that accepts +//a Done channel +func JustAfterEach(body interface{}, timeout ...float64) bool { + globalSuite.PushJustAfterEachNode(body, codelocation.New(1), parseTimeout(timeout...)) + return true +} + +//AfterEach blocks are run after It blocks. When multiple AfterEach blocks are defined in nested +//Describe and Context blocks the innermost AfterEach blocks are run first. +// +//Like It blocks, AfterEach blocks can be made asynchronous by providing a body function that accepts +//a Done channel +func AfterEach(body interface{}, timeout ...float64) bool { + globalSuite.PushAfterEachNode(body, codelocation.New(1), parseTimeout(timeout...)) + return true +} + +func parseTimeout(timeout ...float64) time.Duration { + if len(timeout) == 0 { + return time.Duration(defaultTimeout * int64(time.Second)) + } else { + return time.Duration(timeout[0] * float64(time.Second)) + } +} diff --git a/vendor/github.com/onsi/ginkgo/internal/codelocation/code_location.go b/vendor/github.com/onsi/ginkgo/internal/codelocation/code_location.go new file mode 100644 index 0000000000..fa2f0bf730 --- /dev/null +++ b/vendor/github.com/onsi/ginkgo/internal/codelocation/code_location.go @@ -0,0 +1,32 @@ +package codelocation + +import ( + "regexp" + "runtime" + "runtime/debug" + "strings" + + "github.com/onsi/ginkgo/types" +) + +func New(skip int) types.CodeLocation { + _, file, line, _ := runtime.Caller(skip + 1) + stackTrace := PruneStack(string(debug.Stack()), skip) + return types.CodeLocation{FileName: file, LineNumber: line, FullStackTrace: stackTrace} +} + +func PruneStack(fullStackTrace string, skip int) string { + stack := strings.Split(fullStackTrace, "\n") + if len(stack) > 2*(skip+1) { + stack = stack[2*(skip+1):] + } + prunedStack := []string{} + re := regexp.MustCompile(`\/ginkgo\/|\/pkg\/testing\/|\/pkg\/runtime\/`) + for i := 0; i < len(stack)/2; i++ { + if !re.Match([]byte(stack[i*2])) { + prunedStack = append(prunedStack, stack[i*2]) + prunedStack = append(prunedStack, stack[i*2+1]) + } + } + return strings.Join(prunedStack, "\n") +} diff --git a/vendor/github.com/onsi/ginkgo/internal/containernode/container_node.go b/vendor/github.com/onsi/ginkgo/internal/containernode/container_node.go new file mode 100644 index 0000000000..0737746dcf --- /dev/null +++ b/vendor/github.com/onsi/ginkgo/internal/containernode/container_node.go @@ -0,0 +1,151 @@ +package containernode + +import ( + "math/rand" + "sort" + + "github.com/onsi/ginkgo/internal/leafnodes" + "github.com/onsi/ginkgo/types" +) + +type subjectOrContainerNode struct { + containerNode *ContainerNode + subjectNode leafnodes.SubjectNode +} + +func (n subjectOrContainerNode) text() string { + if n.containerNode != nil { + return n.containerNode.Text() + } else { + return n.subjectNode.Text() + } +} + +type CollatedNodes struct { + Containers []*ContainerNode + Subject leafnodes.SubjectNode +} + +type ContainerNode struct { + text string + flag types.FlagType + codeLocation types.CodeLocation + + setupNodes []leafnodes.BasicNode + subjectAndContainerNodes []subjectOrContainerNode +} + +func New(text string, flag types.FlagType, codeLocation types.CodeLocation) *ContainerNode { + return &ContainerNode{ + text: text, + flag: flag, + codeLocation: codeLocation, + } +} + +func (container *ContainerNode) Shuffle(r *rand.Rand) { + sort.Sort(container) + permutation := r.Perm(len(container.subjectAndContainerNodes)) + shuffledNodes := make([]subjectOrContainerNode, len(container.subjectAndContainerNodes)) + for i, j := range permutation { + shuffledNodes[i] = container.subjectAndContainerNodes[j] + } + container.subjectAndContainerNodes = shuffledNodes +} + +func (node *ContainerNode) BackPropagateProgrammaticFocus() bool { + if node.flag == types.FlagTypePending { + return false + } + + shouldUnfocus := false + for _, subjectOrContainerNode := range node.subjectAndContainerNodes { + if subjectOrContainerNode.containerNode != nil { + shouldUnfocus = subjectOrContainerNode.containerNode.BackPropagateProgrammaticFocus() || shouldUnfocus + } else { + shouldUnfocus = (subjectOrContainerNode.subjectNode.Flag() == types.FlagTypeFocused) || shouldUnfocus + } + } + + if shouldUnfocus { + if node.flag == types.FlagTypeFocused { + node.flag = types.FlagTypeNone + } + return true + } + + return node.flag == types.FlagTypeFocused +} + +func (node *ContainerNode) Collate() []CollatedNodes { + return node.collate([]*ContainerNode{}) +} + +func (node *ContainerNode) collate(enclosingContainers []*ContainerNode) []CollatedNodes { + collated := make([]CollatedNodes, 0) + + containers := make([]*ContainerNode, len(enclosingContainers)) + copy(containers, enclosingContainers) + containers = append(containers, node) + + for _, subjectOrContainer := range node.subjectAndContainerNodes { + if subjectOrContainer.containerNode != nil { + collated = append(collated, subjectOrContainer.containerNode.collate(containers)...) + } else { + collated = append(collated, CollatedNodes{ + Containers: containers, + Subject: subjectOrContainer.subjectNode, + }) + } + } + + return collated +} + +func (node *ContainerNode) PushContainerNode(container *ContainerNode) { + node.subjectAndContainerNodes = append(node.subjectAndContainerNodes, subjectOrContainerNode{containerNode: container}) +} + +func (node *ContainerNode) PushSubjectNode(subject leafnodes.SubjectNode) { + node.subjectAndContainerNodes = append(node.subjectAndContainerNodes, subjectOrContainerNode{subjectNode: subject}) +} + +func (node *ContainerNode) PushSetupNode(setupNode leafnodes.BasicNode) { + node.setupNodes = append(node.setupNodes, setupNode) +} + +func (node *ContainerNode) SetupNodesOfType(nodeType types.SpecComponentType) []leafnodes.BasicNode { + nodes := []leafnodes.BasicNode{} + for _, setupNode := range node.setupNodes { + if setupNode.Type() == nodeType { + nodes = append(nodes, setupNode) + } + } + return nodes +} + +func (node *ContainerNode) Text() string { + return node.text +} + +func (node *ContainerNode) CodeLocation() types.CodeLocation { + return node.codeLocation +} + +func (node *ContainerNode) Flag() types.FlagType { + return node.flag +} + +//sort.Interface + +func (node *ContainerNode) Len() int { + return len(node.subjectAndContainerNodes) +} + +func (node *ContainerNode) Less(i, j int) bool { + return node.subjectAndContainerNodes[i].text() < node.subjectAndContainerNodes[j].text() +} + +func (node *ContainerNode) Swap(i, j int) { + node.subjectAndContainerNodes[i], node.subjectAndContainerNodes[j] = node.subjectAndContainerNodes[j], node.subjectAndContainerNodes[i] +} diff --git a/vendor/github.com/onsi/ginkgo/internal/failer/failer.go b/vendor/github.com/onsi/ginkgo/internal/failer/failer.go new file mode 100644 index 0000000000..678ea2514a --- /dev/null +++ b/vendor/github.com/onsi/ginkgo/internal/failer/failer.go @@ -0,0 +1,92 @@ +package failer + +import ( + "fmt" + "sync" + + "github.com/onsi/ginkgo/types" +) + +type Failer struct { + lock *sync.Mutex + failure types.SpecFailure + state types.SpecState +} + +func New() *Failer { + return &Failer{ + lock: &sync.Mutex{}, + state: types.SpecStatePassed, + } +} + +func (f *Failer) Panic(location types.CodeLocation, forwardedPanic interface{}) { + f.lock.Lock() + defer f.lock.Unlock() + + if f.state == types.SpecStatePassed { + f.state = types.SpecStatePanicked + f.failure = types.SpecFailure{ + Message: "Test Panicked", + Location: location, + ForwardedPanic: fmt.Sprintf("%v", forwardedPanic), + } + } +} + +func (f *Failer) Timeout(location types.CodeLocation) { + f.lock.Lock() + defer f.lock.Unlock() + + if f.state == types.SpecStatePassed { + f.state = types.SpecStateTimedOut + f.failure = types.SpecFailure{ + Message: "Timed out", + Location: location, + } + } +} + +func (f *Failer) Fail(message string, location types.CodeLocation) { + f.lock.Lock() + defer f.lock.Unlock() + + if f.state == types.SpecStatePassed { + f.state = types.SpecStateFailed + f.failure = types.SpecFailure{ + Message: message, + Location: location, + } + } +} + +func (f *Failer) Drain(componentType types.SpecComponentType, componentIndex int, componentCodeLocation types.CodeLocation) (types.SpecFailure, types.SpecState) { + f.lock.Lock() + defer f.lock.Unlock() + + failure := f.failure + outcome := f.state + if outcome != types.SpecStatePassed { + failure.ComponentType = componentType + failure.ComponentIndex = componentIndex + failure.ComponentCodeLocation = componentCodeLocation + } + + f.state = types.SpecStatePassed + f.failure = types.SpecFailure{} + + return failure, outcome +} + +func (f *Failer) Skip(message string, location types.CodeLocation) { + f.lock.Lock() + defer f.lock.Unlock() + + if f.state == types.SpecStatePassed { + f.state = types.SpecStateSkipped + f.failure = types.SpecFailure{ + Message: message, + Location: location, + } + } +} diff --git a/vendor/github.com/onsi/ginkgo/internal/leafnodes/benchmarker.go b/vendor/github.com/onsi/ginkgo/internal/leafnodes/benchmarker.go new file mode 100644 index 0000000000..d6d54234c2 --- /dev/null +++ b/vendor/github.com/onsi/ginkgo/internal/leafnodes/benchmarker.go @@ -0,0 +1,103 @@ +package leafnodes + +import ( + "math" + "time" + + "sync" + + "github.com/onsi/ginkgo/types" +) + +type benchmarker struct { + mu sync.Mutex + measurements map[string]*types.SpecMeasurement + orderCounter int +} + +func newBenchmarker() *benchmarker { + return &benchmarker{ + measurements: make(map[string]*types.SpecMeasurement, 0), + } +} + +func (b *benchmarker) Time(name string, body func(), info ...interface{}) (elapsedTime time.Duration) { + t := time.Now() + body() + elapsedTime = time.Since(t) + + b.mu.Lock() + defer b.mu.Unlock() + measurement := b.getMeasurement(name, "Fastest Time", "Slowest Time", "Average Time", "s", 3, info...) + measurement.Results = append(measurement.Results, elapsedTime.Seconds()) + + return +} + +func (b *benchmarker) RecordValue(name string, value float64, info ...interface{}) { + b.mu.Lock() + measurement := b.getMeasurement(name, "Smallest", " Largest", " Average", "", 3, info...) + defer b.mu.Unlock() + measurement.Results = append(measurement.Results, value) +} + +func (b *benchmarker) RecordValueWithPrecision(name string, value float64, units string, precision int, info ...interface{}) { + b.mu.Lock() + measurement := b.getMeasurement(name, "Smallest", " Largest", " Average", units, precision, info...) + defer b.mu.Unlock() + measurement.Results = append(measurement.Results, value) +} + +func (b *benchmarker) getMeasurement(name string, smallestLabel string, largestLabel string, averageLabel string, units string, precision int, info ...interface{}) *types.SpecMeasurement { + measurement, ok := b.measurements[name] + if !ok { + var computedInfo interface{} + computedInfo = nil + if len(info) > 0 { + computedInfo = info[0] + } + measurement = &types.SpecMeasurement{ + Name: name, + Info: computedInfo, + Order: b.orderCounter, + SmallestLabel: smallestLabel, + LargestLabel: largestLabel, + AverageLabel: averageLabel, + Units: units, + Precision: precision, + Results: make([]float64, 0), + } + b.measurements[name] = measurement + b.orderCounter++ + } + + return measurement +} + +func (b *benchmarker) measurementsReport() map[string]*types.SpecMeasurement { + b.mu.Lock() + defer b.mu.Unlock() + for _, measurement := range b.measurements { + measurement.Smallest = math.MaxFloat64 + measurement.Largest = -math.MaxFloat64 + sum := float64(0) + sumOfSquares := float64(0) + + for _, result := range measurement.Results { + if result > measurement.Largest { + measurement.Largest = result + } + if result < measurement.Smallest { + measurement.Smallest = result + } + sum += result + sumOfSquares += result * result + } + + n := float64(len(measurement.Results)) + measurement.Average = sum / n + measurement.StdDeviation = math.Sqrt(sumOfSquares/n - (sum/n)*(sum/n)) + } + + return b.measurements +} diff --git a/vendor/github.com/onsi/ginkgo/internal/leafnodes/interfaces.go b/vendor/github.com/onsi/ginkgo/internal/leafnodes/interfaces.go new file mode 100644 index 0000000000..8c3902d601 --- /dev/null +++ b/vendor/github.com/onsi/ginkgo/internal/leafnodes/interfaces.go @@ -0,0 +1,19 @@ +package leafnodes + +import ( + "github.com/onsi/ginkgo/types" +) + +type BasicNode interface { + Type() types.SpecComponentType + Run() (types.SpecState, types.SpecFailure) + CodeLocation() types.CodeLocation +} + +type SubjectNode interface { + BasicNode + + Text() string + Flag() types.FlagType + Samples() int +} diff --git a/vendor/github.com/onsi/ginkgo/internal/leafnodes/it_node.go b/vendor/github.com/onsi/ginkgo/internal/leafnodes/it_node.go new file mode 100644 index 0000000000..6eded7b763 --- /dev/null +++ b/vendor/github.com/onsi/ginkgo/internal/leafnodes/it_node.go @@ -0,0 +1,47 @@ +package leafnodes + +import ( + "time" + + "github.com/onsi/ginkgo/internal/failer" + "github.com/onsi/ginkgo/types" +) + +type ItNode struct { + runner *runner + + flag types.FlagType + text string +} + +func NewItNode(text string, body interface{}, flag types.FlagType, codeLocation types.CodeLocation, timeout time.Duration, failer *failer.Failer, componentIndex int) *ItNode { + return &ItNode{ + runner: newRunner(body, codeLocation, timeout, failer, types.SpecComponentTypeIt, componentIndex), + flag: flag, + text: text, + } +} + +func (node *ItNode) Run() (outcome types.SpecState, failure types.SpecFailure) { + return node.runner.run() +} + +func (node *ItNode) Type() types.SpecComponentType { + return types.SpecComponentTypeIt +} + +func (node *ItNode) Text() string { + return node.text +} + +func (node *ItNode) Flag() types.FlagType { + return node.flag +} + +func (node *ItNode) CodeLocation() types.CodeLocation { + return node.runner.codeLocation +} + +func (node *ItNode) Samples() int { + return 1 +} diff --git a/vendor/github.com/onsi/ginkgo/internal/leafnodes/measure_node.go b/vendor/github.com/onsi/ginkgo/internal/leafnodes/measure_node.go new file mode 100644 index 0000000000..3ab9a6d552 --- /dev/null +++ b/vendor/github.com/onsi/ginkgo/internal/leafnodes/measure_node.go @@ -0,0 +1,62 @@ +package leafnodes + +import ( + "reflect" + + "github.com/onsi/ginkgo/internal/failer" + "github.com/onsi/ginkgo/types" +) + +type MeasureNode struct { + runner *runner + + text string + flag types.FlagType + samples int + benchmarker *benchmarker +} + +func NewMeasureNode(text string, body interface{}, flag types.FlagType, codeLocation types.CodeLocation, samples int, failer *failer.Failer, componentIndex int) *MeasureNode { + benchmarker := newBenchmarker() + + wrappedBody := func() { + reflect.ValueOf(body).Call([]reflect.Value{reflect.ValueOf(benchmarker)}) + } + + return &MeasureNode{ + runner: newRunner(wrappedBody, codeLocation, 0, failer, types.SpecComponentTypeMeasure, componentIndex), + + text: text, + flag: flag, + samples: samples, + benchmarker: benchmarker, + } +} + +func (node *MeasureNode) Run() (outcome types.SpecState, failure types.SpecFailure) { + return node.runner.run() +} + +func (node *MeasureNode) MeasurementsReport() map[string]*types.SpecMeasurement { + return node.benchmarker.measurementsReport() +} + +func (node *MeasureNode) Type() types.SpecComponentType { + return types.SpecComponentTypeMeasure +} + +func (node *MeasureNode) Text() string { + return node.text +} + +func (node *MeasureNode) Flag() types.FlagType { + return node.flag +} + +func (node *MeasureNode) CodeLocation() types.CodeLocation { + return node.runner.codeLocation +} + +func (node *MeasureNode) Samples() int { + return node.samples +} diff --git a/vendor/github.com/onsi/ginkgo/internal/leafnodes/runner.go b/vendor/github.com/onsi/ginkgo/internal/leafnodes/runner.go new file mode 100644 index 0000000000..16cb66c3e4 --- /dev/null +++ b/vendor/github.com/onsi/ginkgo/internal/leafnodes/runner.go @@ -0,0 +1,117 @@ +package leafnodes + +import ( + "fmt" + "reflect" + "time" + + "github.com/onsi/ginkgo/internal/codelocation" + "github.com/onsi/ginkgo/internal/failer" + "github.com/onsi/ginkgo/types" +) + +type runner struct { + isAsync bool + asyncFunc func(chan<- interface{}) + syncFunc func() + codeLocation types.CodeLocation + timeoutThreshold time.Duration + nodeType types.SpecComponentType + componentIndex int + failer *failer.Failer +} + +func newRunner(body interface{}, codeLocation types.CodeLocation, timeout time.Duration, failer *failer.Failer, nodeType types.SpecComponentType, componentIndex int) *runner { + bodyType := reflect.TypeOf(body) + if bodyType.Kind() != reflect.Func { + panic(fmt.Sprintf("Expected a function but got something else at %v", codeLocation)) + } + + runner := &runner{ + codeLocation: codeLocation, + timeoutThreshold: timeout, + failer: failer, + nodeType: nodeType, + componentIndex: componentIndex, + } + + switch bodyType.NumIn() { + case 0: + runner.syncFunc = body.(func()) + return runner + case 1: + if !(bodyType.In(0).Kind() == reflect.Chan && bodyType.In(0).Elem().Kind() == reflect.Interface) { + panic(fmt.Sprintf("Must pass a Done channel to function at %v", codeLocation)) + } + + wrappedBody := func(done chan<- interface{}) { + bodyValue := reflect.ValueOf(body) + bodyValue.Call([]reflect.Value{reflect.ValueOf(done)}) + } + + runner.isAsync = true + runner.asyncFunc = wrappedBody + return runner + } + + panic(fmt.Sprintf("Too many arguments to function at %v", codeLocation)) +} + +func (r *runner) run() (outcome types.SpecState, failure types.SpecFailure) { + if r.isAsync { + return r.runAsync() + } else { + return r.runSync() + } +} + +func (r *runner) runAsync() (outcome types.SpecState, failure types.SpecFailure) { + done := make(chan interface{}, 1) + + go func() { + finished := false + + defer func() { + if e := recover(); e != nil || !finished { + r.failer.Panic(codelocation.New(2), e) + select { + case <-done: + break + default: + close(done) + } + } + }() + + r.asyncFunc(done) + finished = true + }() + + // If this goroutine gets no CPU time before the select block, + // the <-done case may complete even if the test took longer than the timeoutThreshold. + // This can cause flaky behaviour, but we haven't seen it in the wild. + select { + case <-done: + case <-time.After(r.timeoutThreshold): + r.failer.Timeout(r.codeLocation) + } + + failure, outcome = r.failer.Drain(r.nodeType, r.componentIndex, r.codeLocation) + return +} +func (r *runner) runSync() (outcome types.SpecState, failure types.SpecFailure) { + finished := false + + defer func() { + if e := recover(); e != nil || !finished { + r.failer.Panic(codelocation.New(2), e) + } + + failure, outcome = r.failer.Drain(r.nodeType, r.componentIndex, r.codeLocation) + }() + + r.syncFunc() + finished = true + + return +} diff --git a/vendor/github.com/onsi/ginkgo/internal/leafnodes/setup_nodes.go b/vendor/github.com/onsi/ginkgo/internal/leafnodes/setup_nodes.go new file mode 100644 index 0000000000..e3e9cb7c58 --- /dev/null +++ b/vendor/github.com/onsi/ginkgo/internal/leafnodes/setup_nodes.go @@ -0,0 +1,48 @@ +package leafnodes + +import ( + "time" + + "github.com/onsi/ginkgo/internal/failer" + "github.com/onsi/ginkgo/types" +) + +type SetupNode struct { + runner *runner +} + +func (node *SetupNode) Run() (outcome types.SpecState, failure types.SpecFailure) { + return node.runner.run() +} + +func (node *SetupNode) Type() types.SpecComponentType { + return node.runner.nodeType +} + +func (node *SetupNode) CodeLocation() types.CodeLocation { + return node.runner.codeLocation +} + +func NewBeforeEachNode(body interface{}, codeLocation types.CodeLocation, timeout time.Duration, failer *failer.Failer, componentIndex int) *SetupNode { + return &SetupNode{ + runner: newRunner(body, codeLocation, timeout, failer, types.SpecComponentTypeBeforeEach, componentIndex), + } +} + +func NewAfterEachNode(body interface{}, codeLocation types.CodeLocation, timeout time.Duration, failer *failer.Failer, componentIndex int) *SetupNode { + return &SetupNode{ + runner: newRunner(body, codeLocation, timeout, failer, types.SpecComponentTypeAfterEach, componentIndex), + } +} + +func NewJustBeforeEachNode(body interface{}, codeLocation types.CodeLocation, timeout time.Duration, failer *failer.Failer, componentIndex int) *SetupNode { + return &SetupNode{ + runner: newRunner(body, codeLocation, timeout, failer, types.SpecComponentTypeJustBeforeEach, componentIndex), + } +} + +func NewJustAfterEachNode(body interface{}, codeLocation types.CodeLocation, timeout time.Duration, failer *failer.Failer, componentIndex int) *SetupNode { + return &SetupNode{ + runner: newRunner(body, codeLocation, timeout, failer, types.SpecComponentTypeJustAfterEach, componentIndex), + } +} diff --git a/vendor/github.com/onsi/ginkgo/internal/leafnodes/suite_nodes.go b/vendor/github.com/onsi/ginkgo/internal/leafnodes/suite_nodes.go new file mode 100644 index 0000000000..80f16ed786 --- /dev/null +++ b/vendor/github.com/onsi/ginkgo/internal/leafnodes/suite_nodes.go @@ -0,0 +1,55 @@ +package leafnodes + +import ( + "time" + + "github.com/onsi/ginkgo/internal/failer" + "github.com/onsi/ginkgo/types" +) + +type SuiteNode interface { + Run(parallelNode int, parallelTotal int, syncHost string) bool + Passed() bool + Summary() *types.SetupSummary +} + +type simpleSuiteNode struct { + runner *runner + outcome types.SpecState + failure types.SpecFailure + runTime time.Duration +} + +func (node *simpleSuiteNode) Run(parallelNode int, parallelTotal int, syncHost string) bool { + t := time.Now() + node.outcome, node.failure = node.runner.run() + node.runTime = time.Since(t) + + return node.outcome == types.SpecStatePassed +} + +func (node *simpleSuiteNode) Passed() bool { + return node.outcome == types.SpecStatePassed +} + +func (node *simpleSuiteNode) Summary() *types.SetupSummary { + return &types.SetupSummary{ + ComponentType: node.runner.nodeType, + CodeLocation: node.runner.codeLocation, + State: node.outcome, + RunTime: node.runTime, + Failure: node.failure, + } +} + +func NewBeforeSuiteNode(body interface{}, codeLocation types.CodeLocation, timeout time.Duration, failer *failer.Failer) SuiteNode { + return &simpleSuiteNode{ + runner: newRunner(body, codeLocation, timeout, failer, types.SpecComponentTypeBeforeSuite, 0), + } +} + +func NewAfterSuiteNode(body interface{}, codeLocation types.CodeLocation, timeout time.Duration, failer *failer.Failer) SuiteNode { + return &simpleSuiteNode{ + runner: newRunner(body, codeLocation, timeout, failer, types.SpecComponentTypeAfterSuite, 0), + } +} diff --git a/vendor/github.com/onsi/ginkgo/internal/leafnodes/synchronized_after_suite_node.go b/vendor/github.com/onsi/ginkgo/internal/leafnodes/synchronized_after_suite_node.go new file mode 100644 index 0000000000..a721d0cf7f --- /dev/null +++ b/vendor/github.com/onsi/ginkgo/internal/leafnodes/synchronized_after_suite_node.go @@ -0,0 +1,90 @@ +package leafnodes + +import ( + "encoding/json" + "io/ioutil" + "net/http" + "time" + + "github.com/onsi/ginkgo/internal/failer" + "github.com/onsi/ginkgo/types" +) + +type synchronizedAfterSuiteNode struct { + runnerA *runner + runnerB *runner + + outcome types.SpecState + failure types.SpecFailure + runTime time.Duration +} + +func NewSynchronizedAfterSuiteNode(bodyA interface{}, bodyB interface{}, codeLocation types.CodeLocation, timeout time.Duration, failer *failer.Failer) SuiteNode { + return &synchronizedAfterSuiteNode{ + runnerA: newRunner(bodyA, codeLocation, timeout, failer, types.SpecComponentTypeAfterSuite, 0), + runnerB: newRunner(bodyB, codeLocation, timeout, failer, types.SpecComponentTypeAfterSuite, 0), + } +} + +func (node *synchronizedAfterSuiteNode) Run(parallelNode int, parallelTotal int, syncHost string) bool { + node.outcome, node.failure = node.runnerA.run() + + if parallelNode == 1 { + if parallelTotal > 1 { + node.waitUntilOtherNodesAreDone(syncHost) + } + + outcome, failure := node.runnerB.run() + + if node.outcome == types.SpecStatePassed { + node.outcome, node.failure = outcome, failure + } + } + + return node.outcome == types.SpecStatePassed +} + +func (node *synchronizedAfterSuiteNode) Passed() bool { + return node.outcome == types.SpecStatePassed +} + +func (node *synchronizedAfterSuiteNode) Summary() *types.SetupSummary { + return &types.SetupSummary{ + ComponentType: node.runnerA.nodeType, + CodeLocation: node.runnerA.codeLocation, + State: node.outcome, + RunTime: node.runTime, + Failure: node.failure, + } +} + +func (node *synchronizedAfterSuiteNode) waitUntilOtherNodesAreDone(syncHost string) { + for { + if node.canRun(syncHost) { + return + } + + time.Sleep(50 * time.Millisecond) + } +} + +func (node *synchronizedAfterSuiteNode) canRun(syncHost string) bool { + resp, err := http.Get(syncHost + "/RemoteAfterSuiteData") + if err != nil || resp.StatusCode != http.StatusOK { + return false + } + + body, err := ioutil.ReadAll(resp.Body) + if err != nil { + return false + } + resp.Body.Close() + + afterSuiteData := types.RemoteAfterSuiteData{} + err = json.Unmarshal(body, &afterSuiteData) + if err != nil { + return false + } + + return afterSuiteData.CanRun +} diff --git a/vendor/github.com/onsi/ginkgo/internal/leafnodes/synchronized_before_suite_node.go b/vendor/github.com/onsi/ginkgo/internal/leafnodes/synchronized_before_suite_node.go new file mode 100644 index 0000000000..d5c8893194 --- /dev/null +++ b/vendor/github.com/onsi/ginkgo/internal/leafnodes/synchronized_before_suite_node.go @@ -0,0 +1,181 @@ +package leafnodes + +import ( + "bytes" + "encoding/json" + "io/ioutil" + "net/http" + "reflect" + "time" + + "github.com/onsi/ginkgo/internal/failer" + "github.com/onsi/ginkgo/types" +) + +type synchronizedBeforeSuiteNode struct { + runnerA *runner + runnerB *runner + + data []byte + + outcome types.SpecState + failure types.SpecFailure + runTime time.Duration +} + +func NewSynchronizedBeforeSuiteNode(bodyA interface{}, bodyB interface{}, codeLocation types.CodeLocation, timeout time.Duration, failer *failer.Failer) SuiteNode { + node := &synchronizedBeforeSuiteNode{} + + node.runnerA = newRunner(node.wrapA(bodyA), codeLocation, timeout, failer, types.SpecComponentTypeBeforeSuite, 0) + node.runnerB = newRunner(node.wrapB(bodyB), codeLocation, timeout, failer, types.SpecComponentTypeBeforeSuite, 0) + + return node +} + +func (node *synchronizedBeforeSuiteNode) Run(parallelNode int, parallelTotal int, syncHost string) bool { + t := time.Now() + defer func() { + node.runTime = time.Since(t) + }() + + if parallelNode == 1 { + node.outcome, node.failure = node.runA(parallelTotal, syncHost) + } else { + node.outcome, node.failure = node.waitForA(syncHost) + } + + if node.outcome != types.SpecStatePassed { + return false + } + node.outcome, node.failure = node.runnerB.run() + + return node.outcome == types.SpecStatePassed +} + +func (node *synchronizedBeforeSuiteNode) runA(parallelTotal int, syncHost string) (types.SpecState, types.SpecFailure) { + outcome, failure := node.runnerA.run() + + if parallelTotal > 1 { + state := types.RemoteBeforeSuiteStatePassed + if outcome != types.SpecStatePassed { + state = types.RemoteBeforeSuiteStateFailed + } + json := (types.RemoteBeforeSuiteData{ + Data: node.data, + State: state, + }).ToJSON() + http.Post(syncHost+"/BeforeSuiteState", "application/json", bytes.NewBuffer(json)) + } + + return outcome, failure +} + +func (node *synchronizedBeforeSuiteNode) waitForA(syncHost string) (types.SpecState, types.SpecFailure) { + failure := func(message string) types.SpecFailure { + return types.SpecFailure{ + Message: message, + Location: node.runnerA.codeLocation, + ComponentType: node.runnerA.nodeType, + ComponentIndex: node.runnerA.componentIndex, + ComponentCodeLocation: node.runnerA.codeLocation, + } + } + for { + resp, err := http.Get(syncHost + "/BeforeSuiteState") + if err != nil || resp.StatusCode != http.StatusOK { + return types.SpecStateFailed, failure("Failed to fetch BeforeSuite state") + } + + body, err := ioutil.ReadAll(resp.Body) + if err != nil { + return types.SpecStateFailed, failure("Failed to read BeforeSuite state") + } + resp.Body.Close() + + beforeSuiteData := types.RemoteBeforeSuiteData{} + err = json.Unmarshal(body, &beforeSuiteData) + if err != nil { + return types.SpecStateFailed, failure("Failed to decode BeforeSuite state") + } + + switch beforeSuiteData.State { + case types.RemoteBeforeSuiteStatePassed: + node.data = beforeSuiteData.Data + return types.SpecStatePassed, types.SpecFailure{} + case types.RemoteBeforeSuiteStateFailed: + return types.SpecStateFailed, failure("BeforeSuite on Node 1 failed") + case types.RemoteBeforeSuiteStateDisappeared: + return types.SpecStateFailed, failure("Node 1 disappeared before completing BeforeSuite") + } + + time.Sleep(50 * time.Millisecond) + } +} + +func (node *synchronizedBeforeSuiteNode) Passed() bool { + return node.outcome == types.SpecStatePassed +} + +func (node *synchronizedBeforeSuiteNode) Summary() *types.SetupSummary { + return &types.SetupSummary{ + ComponentType: node.runnerA.nodeType, + CodeLocation: node.runnerA.codeLocation, + State: node.outcome, + RunTime: node.runTime, + Failure: node.failure, + } +} + +func (node *synchronizedBeforeSuiteNode) wrapA(bodyA interface{}) interface{} { + typeA := reflect.TypeOf(bodyA) + if typeA.Kind() != reflect.Func { + panic("SynchronizedBeforeSuite expects a function as its first argument") + } + + takesNothing := typeA.NumIn() == 0 + takesADoneChannel := typeA.NumIn() == 1 && typeA.In(0).Kind() == reflect.Chan && typeA.In(0).Elem().Kind() == reflect.Interface + returnsBytes := typeA.NumOut() == 1 && typeA.Out(0).Kind() == reflect.Slice && typeA.Out(0).Elem().Kind() == reflect.Uint8 + + if !((takesNothing || takesADoneChannel) && returnsBytes) { + panic("SynchronizedBeforeSuite's first argument should be a function that returns []byte and either takes no arguments or takes a Done channel.") + } + + if takesADoneChannel { + return func(done chan<- interface{}) { + out := reflect.ValueOf(bodyA).Call([]reflect.Value{reflect.ValueOf(done)}) + node.data = out[0].Interface().([]byte) + } + } + + return func() { + out := reflect.ValueOf(bodyA).Call([]reflect.Value{}) + node.data = out[0].Interface().([]byte) + } +} + +func (node *synchronizedBeforeSuiteNode) wrapB(bodyB interface{}) interface{} { + typeB := reflect.TypeOf(bodyB) + if typeB.Kind() != reflect.Func { + panic("SynchronizedBeforeSuite expects a function as its second argument") + } + + returnsNothing := typeB.NumOut() == 0 + takesBytesOnly := typeB.NumIn() == 1 && typeB.In(0).Kind() == reflect.Slice && typeB.In(0).Elem().Kind() == reflect.Uint8 + takesBytesAndDone := typeB.NumIn() == 2 && + typeB.In(0).Kind() == reflect.Slice && typeB.In(0).Elem().Kind() == reflect.Uint8 && + typeB.In(1).Kind() == reflect.Chan && typeB.In(1).Elem().Kind() == reflect.Interface + + if !((takesBytesOnly || takesBytesAndDone) && returnsNothing) { + panic("SynchronizedBeforeSuite's second argument should be a function that returns nothing and either takes []byte or ([]byte, Done)") + } + + if takesBytesAndDone { + return func(done chan<- interface{}) { + reflect.ValueOf(bodyB).Call([]reflect.Value{reflect.ValueOf(node.data), reflect.ValueOf(done)}) + } + } + + return func() { + reflect.ValueOf(bodyB).Call([]reflect.Value{reflect.ValueOf(node.data)}) + } +} diff --git a/vendor/github.com/onsi/ginkgo/internal/remote/aggregator.go b/vendor/github.com/onsi/ginkgo/internal/remote/aggregator.go new file mode 100644 index 0000000000..6b54afe014 --- /dev/null +++ b/vendor/github.com/onsi/ginkgo/internal/remote/aggregator.go @@ -0,0 +1,249 @@ +/* + +Aggregator is a reporter used by the Ginkgo CLI to aggregate and present parallel test output +coherently as tests complete. You shouldn't need to use this in your code. To run tests in parallel: + + ginkgo -nodes=N + +where N is the number of nodes you desire. +*/ +package remote + +import ( + "time" + + "github.com/onsi/ginkgo/config" + "github.com/onsi/ginkgo/reporters/stenographer" + "github.com/onsi/ginkgo/types" +) + +type configAndSuite struct { + config config.GinkgoConfigType + summary *types.SuiteSummary +} + +type Aggregator struct { + nodeCount int + config config.DefaultReporterConfigType + stenographer stenographer.Stenographer + result chan bool + + suiteBeginnings chan configAndSuite + aggregatedSuiteBeginnings []configAndSuite + + beforeSuites chan *types.SetupSummary + aggregatedBeforeSuites []*types.SetupSummary + + afterSuites chan *types.SetupSummary + aggregatedAfterSuites []*types.SetupSummary + + specCompletions chan *types.SpecSummary + completedSpecs []*types.SpecSummary + + suiteEndings chan *types.SuiteSummary + aggregatedSuiteEndings []*types.SuiteSummary + specs []*types.SpecSummary + + startTime time.Time +} + +func NewAggregator(nodeCount int, result chan bool, config config.DefaultReporterConfigType, stenographer stenographer.Stenographer) *Aggregator { + aggregator := &Aggregator{ + nodeCount: nodeCount, + result: result, + config: config, + stenographer: stenographer, + + suiteBeginnings: make(chan configAndSuite, 0), + beforeSuites: make(chan *types.SetupSummary, 0), + afterSuites: make(chan *types.SetupSummary, 0), + specCompletions: make(chan *types.SpecSummary, 0), + suiteEndings: make(chan *types.SuiteSummary, 0), + } + + go aggregator.mux() + + return aggregator +} + +func (aggregator *Aggregator) SpecSuiteWillBegin(config config.GinkgoConfigType, summary *types.SuiteSummary) { + aggregator.suiteBeginnings <- configAndSuite{config, summary} +} + +func (aggregator *Aggregator) BeforeSuiteDidRun(setupSummary *types.SetupSummary) { + aggregator.beforeSuites <- setupSummary +} + +func (aggregator *Aggregator) AfterSuiteDidRun(setupSummary *types.SetupSummary) { + aggregator.afterSuites <- setupSummary +} + +func (aggregator *Aggregator) SpecWillRun(specSummary *types.SpecSummary) { + //noop +} + +func (aggregator *Aggregator) SpecDidComplete(specSummary *types.SpecSummary) { + aggregator.specCompletions <- specSummary +} + +func (aggregator *Aggregator) SpecSuiteDidEnd(summary *types.SuiteSummary) { + aggregator.suiteEndings <- summary +} + +func (aggregator *Aggregator) mux() { +loop: + for { + select { + case configAndSuite := <-aggregator.suiteBeginnings: + aggregator.registerSuiteBeginning(configAndSuite) + case setupSummary := <-aggregator.beforeSuites: + aggregator.registerBeforeSuite(setupSummary) + case setupSummary := <-aggregator.afterSuites: + aggregator.registerAfterSuite(setupSummary) + case specSummary := <-aggregator.specCompletions: + aggregator.registerSpecCompletion(specSummary) + case suite := <-aggregator.suiteEndings: + finished, passed := aggregator.registerSuiteEnding(suite) + if finished { + aggregator.result <- passed + break loop + } + } + } +} + +func (aggregator *Aggregator) registerSuiteBeginning(configAndSuite configAndSuite) { + aggregator.aggregatedSuiteBeginnings = append(aggregator.aggregatedSuiteBeginnings, configAndSuite) + + if len(aggregator.aggregatedSuiteBeginnings) == 1 { + aggregator.startTime = time.Now() + } + + if len(aggregator.aggregatedSuiteBeginnings) != aggregator.nodeCount { + return + } + + aggregator.stenographer.AnnounceSuite(configAndSuite.summary.SuiteDescription, configAndSuite.config.RandomSeed, configAndSuite.config.RandomizeAllSpecs, aggregator.config.Succinct) + + totalNumberOfSpecs := 0 + if len(aggregator.aggregatedSuiteBeginnings) > 0 { + totalNumberOfSpecs = configAndSuite.summary.NumberOfSpecsBeforeParallelization + } + + aggregator.stenographer.AnnounceTotalNumberOfSpecs(totalNumberOfSpecs, aggregator.config.Succinct) + aggregator.stenographer.AnnounceAggregatedParallelRun(aggregator.nodeCount, aggregator.config.Succinct) + aggregator.flushCompletedSpecs() +} + +func (aggregator *Aggregator) registerBeforeSuite(setupSummary *types.SetupSummary) { + aggregator.aggregatedBeforeSuites = append(aggregator.aggregatedBeforeSuites, setupSummary) + aggregator.flushCompletedSpecs() +} + +func (aggregator *Aggregator) registerAfterSuite(setupSummary *types.SetupSummary) { + aggregator.aggregatedAfterSuites = append(aggregator.aggregatedAfterSuites, setupSummary) + aggregator.flushCompletedSpecs() +} + +func (aggregator *Aggregator) registerSpecCompletion(specSummary *types.SpecSummary) { + aggregator.completedSpecs = append(aggregator.completedSpecs, specSummary) + aggregator.specs = append(aggregator.specs, specSummary) + aggregator.flushCompletedSpecs() +} + +func (aggregator *Aggregator) flushCompletedSpecs() { + if len(aggregator.aggregatedSuiteBeginnings) != aggregator.nodeCount { + return + } + + for _, setupSummary := range aggregator.aggregatedBeforeSuites { + aggregator.announceBeforeSuite(setupSummary) + } + + for _, specSummary := range aggregator.completedSpecs { + aggregator.announceSpec(specSummary) + } + + for _, setupSummary := range aggregator.aggregatedAfterSuites { + aggregator.announceAfterSuite(setupSummary) + } + + aggregator.aggregatedBeforeSuites = []*types.SetupSummary{} + aggregator.completedSpecs = []*types.SpecSummary{} + aggregator.aggregatedAfterSuites = []*types.SetupSummary{} +} + +func (aggregator *Aggregator) announceBeforeSuite(setupSummary *types.SetupSummary) { + aggregator.stenographer.AnnounceCapturedOutput(setupSummary.CapturedOutput) + if setupSummary.State != types.SpecStatePassed { + aggregator.stenographer.AnnounceBeforeSuiteFailure(setupSummary, aggregator.config.Succinct, aggregator.config.FullTrace) + } +} + +func (aggregator *Aggregator) announceAfterSuite(setupSummary *types.SetupSummary) { + aggregator.stenographer.AnnounceCapturedOutput(setupSummary.CapturedOutput) + if setupSummary.State != types.SpecStatePassed { + aggregator.stenographer.AnnounceAfterSuiteFailure(setupSummary, aggregator.config.Succinct, aggregator.config.FullTrace) + } +} + +func (aggregator *Aggregator) announceSpec(specSummary *types.SpecSummary) { + if aggregator.config.Verbose && specSummary.State != types.SpecStatePending && specSummary.State != types.SpecStateSkipped { + aggregator.stenographer.AnnounceSpecWillRun(specSummary) + } + + aggregator.stenographer.AnnounceCapturedOutput(specSummary.CapturedOutput) + + switch specSummary.State { + case types.SpecStatePassed: + if specSummary.IsMeasurement { + aggregator.stenographer.AnnounceSuccesfulMeasurement(specSummary, aggregator.config.Succinct) + } else if specSummary.RunTime.Seconds() >= aggregator.config.SlowSpecThreshold { + aggregator.stenographer.AnnounceSuccesfulSlowSpec(specSummary, aggregator.config.Succinct) + } else { + aggregator.stenographer.AnnounceSuccesfulSpec(specSummary) + } + + case types.SpecStatePending: + aggregator.stenographer.AnnouncePendingSpec(specSummary, aggregator.config.NoisyPendings && !aggregator.config.Succinct) + case types.SpecStateSkipped: + aggregator.stenographer.AnnounceSkippedSpec(specSummary, aggregator.config.Succinct || !aggregator.config.NoisySkippings, aggregator.config.FullTrace) + case types.SpecStateTimedOut: + aggregator.stenographer.AnnounceSpecTimedOut(specSummary, aggregator.config.Succinct, aggregator.config.FullTrace) + case types.SpecStatePanicked: + aggregator.stenographer.AnnounceSpecPanicked(specSummary, aggregator.config.Succinct, aggregator.config.FullTrace) + case types.SpecStateFailed: + aggregator.stenographer.AnnounceSpecFailed(specSummary, aggregator.config.Succinct, aggregator.config.FullTrace) + } +} + +func (aggregator *Aggregator) registerSuiteEnding(suite *types.SuiteSummary) (finished bool, passed bool) { + aggregator.aggregatedSuiteEndings = append(aggregator.aggregatedSuiteEndings, suite) + if len(aggregator.aggregatedSuiteEndings) < aggregator.nodeCount { + return false, false + } + + aggregatedSuiteSummary := &types.SuiteSummary{} + aggregatedSuiteSummary.SuiteSucceeded = true + + for _, suiteSummary := range aggregator.aggregatedSuiteEndings { + if suiteSummary.SuiteSucceeded == false { + aggregatedSuiteSummary.SuiteSucceeded = false + } + + aggregatedSuiteSummary.NumberOfSpecsThatWillBeRun += suiteSummary.NumberOfSpecsThatWillBeRun + aggregatedSuiteSummary.NumberOfTotalSpecs += suiteSummary.NumberOfTotalSpecs + aggregatedSuiteSummary.NumberOfPassedSpecs += suiteSummary.NumberOfPassedSpecs + aggregatedSuiteSummary.NumberOfFailedSpecs += suiteSummary.NumberOfFailedSpecs + aggregatedSuiteSummary.NumberOfPendingSpecs += suiteSummary.NumberOfPendingSpecs + aggregatedSuiteSummary.NumberOfSkippedSpecs += suiteSummary.NumberOfSkippedSpecs + aggregatedSuiteSummary.NumberOfFlakedSpecs += suiteSummary.NumberOfFlakedSpecs + } + + aggregatedSuiteSummary.RunTime = time.Since(aggregator.startTime) + + aggregator.stenographer.SummarizeFailures(aggregator.specs) + aggregator.stenographer.AnnounceSpecRunCompletion(aggregatedSuiteSummary, aggregator.config.Succinct) + + return true, aggregatedSuiteSummary.SuiteSucceeded +} diff --git a/vendor/github.com/onsi/ginkgo/internal/remote/forwarding_reporter.go b/vendor/github.com/onsi/ginkgo/internal/remote/forwarding_reporter.go new file mode 100644 index 0000000000..284bc62e5e --- /dev/null +++ b/vendor/github.com/onsi/ginkgo/internal/remote/forwarding_reporter.go @@ -0,0 +1,147 @@ +package remote + +import ( + "bytes" + "encoding/json" + "fmt" + "io" + "net/http" + "os" + + "github.com/onsi/ginkgo/internal/writer" + "github.com/onsi/ginkgo/reporters" + "github.com/onsi/ginkgo/reporters/stenographer" + + "github.com/onsi/ginkgo/config" + "github.com/onsi/ginkgo/types" +) + +//An interface to net/http's client to allow the injection of fakes under test +type Poster interface { + Post(url string, bodyType string, body io.Reader) (resp *http.Response, err error) +} + +/* +The ForwardingReporter is a Ginkgo reporter that forwards information to +a Ginkgo remote server. + +When streaming parallel test output, this repoter is automatically installed by Ginkgo. + +This is accomplished by passing in the GINKGO_REMOTE_REPORTING_SERVER environment variable to `go test`, the Ginkgo test runner +detects this environment variable (which should contain the host of the server) and automatically installs a ForwardingReporter +in place of Ginkgo's DefaultReporter. +*/ + +type ForwardingReporter struct { + serverHost string + poster Poster + outputInterceptor OutputInterceptor + debugMode bool + debugFile *os.File + nestedReporter *reporters.DefaultReporter +} + +func NewForwardingReporter(config config.DefaultReporterConfigType, serverHost string, poster Poster, outputInterceptor OutputInterceptor, ginkgoWriter *writer.Writer, debugFile string) *ForwardingReporter { + reporter := &ForwardingReporter{ + serverHost: serverHost, + poster: poster, + outputInterceptor: outputInterceptor, + } + + if debugFile != "" { + var err error + reporter.debugMode = true + reporter.debugFile, err = os.Create(debugFile) + if err != nil { + fmt.Println(err.Error()) + os.Exit(1) + } + + if !config.Verbose { + //if verbose is true then the GinkgoWriter emits to stdout. Don't _also_ redirect GinkgoWriter output as that will result in duplication. + ginkgoWriter.AndRedirectTo(reporter.debugFile) + } + outputInterceptor.StreamTo(reporter.debugFile) //This is not working + + stenographer := stenographer.New(false, true, reporter.debugFile) + config.Succinct = false + config.Verbose = true + config.FullTrace = true + reporter.nestedReporter = reporters.NewDefaultReporter(config, stenographer) + } + + return reporter +} + +func (reporter *ForwardingReporter) post(path string, data interface{}) { + encoded, _ := json.Marshal(data) + buffer := bytes.NewBuffer(encoded) + reporter.poster.Post(reporter.serverHost+path, "application/json", buffer) +} + +func (reporter *ForwardingReporter) SpecSuiteWillBegin(conf config.GinkgoConfigType, summary *types.SuiteSummary) { + data := struct { + Config config.GinkgoConfigType `json:"config"` + Summary *types.SuiteSummary `json:"suite-summary"` + }{ + conf, + summary, + } + + reporter.outputInterceptor.StartInterceptingOutput() + if reporter.debugMode { + reporter.nestedReporter.SpecSuiteWillBegin(conf, summary) + reporter.debugFile.Sync() + } + reporter.post("/SpecSuiteWillBegin", data) +} + +func (reporter *ForwardingReporter) BeforeSuiteDidRun(setupSummary *types.SetupSummary) { + output, _ := reporter.outputInterceptor.StopInterceptingAndReturnOutput() + reporter.outputInterceptor.StartInterceptingOutput() + setupSummary.CapturedOutput = output + if reporter.debugMode { + reporter.nestedReporter.BeforeSuiteDidRun(setupSummary) + reporter.debugFile.Sync() + } + reporter.post("/BeforeSuiteDidRun", setupSummary) +} + +func (reporter *ForwardingReporter) SpecWillRun(specSummary *types.SpecSummary) { + if reporter.debugMode { + reporter.nestedReporter.SpecWillRun(specSummary) + reporter.debugFile.Sync() + } + reporter.post("/SpecWillRun", specSummary) +} + +func (reporter *ForwardingReporter) SpecDidComplete(specSummary *types.SpecSummary) { + output, _ := reporter.outputInterceptor.StopInterceptingAndReturnOutput() + reporter.outputInterceptor.StartInterceptingOutput() + specSummary.CapturedOutput = output + if reporter.debugMode { + reporter.nestedReporter.SpecDidComplete(specSummary) + reporter.debugFile.Sync() + } + reporter.post("/SpecDidComplete", specSummary) +} + +func (reporter *ForwardingReporter) AfterSuiteDidRun(setupSummary *types.SetupSummary) { + output, _ := reporter.outputInterceptor.StopInterceptingAndReturnOutput() + reporter.outputInterceptor.StartInterceptingOutput() + setupSummary.CapturedOutput = output + if reporter.debugMode { + reporter.nestedReporter.AfterSuiteDidRun(setupSummary) + reporter.debugFile.Sync() + } + reporter.post("/AfterSuiteDidRun", setupSummary) +} + +func (reporter *ForwardingReporter) SpecSuiteDidEnd(summary *types.SuiteSummary) { + reporter.outputInterceptor.StopInterceptingAndReturnOutput() + if reporter.debugMode { + reporter.nestedReporter.SpecSuiteDidEnd(summary) + reporter.debugFile.Sync() + } + reporter.post("/SpecSuiteDidEnd", summary) +} diff --git a/vendor/github.com/onsi/ginkgo/internal/remote/output_interceptor.go b/vendor/github.com/onsi/ginkgo/internal/remote/output_interceptor.go new file mode 100644 index 0000000000..5154abe87d --- /dev/null +++ b/vendor/github.com/onsi/ginkgo/internal/remote/output_interceptor.go @@ -0,0 +1,13 @@ +package remote + +import "os" + +/* +The OutputInterceptor is used by the ForwardingReporter to +intercept and capture all stdin and stderr output during a test run. +*/ +type OutputInterceptor interface { + StartInterceptingOutput() error + StopInterceptingAndReturnOutput() (string, error) + StreamTo(*os.File) +} diff --git a/vendor/github.com/onsi/ginkgo/internal/remote/output_interceptor_unix.go b/vendor/github.com/onsi/ginkgo/internal/remote/output_interceptor_unix.go new file mode 100644 index 0000000000..ab6622a29c --- /dev/null +++ b/vendor/github.com/onsi/ginkgo/internal/remote/output_interceptor_unix.go @@ -0,0 +1,83 @@ +// +build freebsd openbsd netbsd dragonfly darwin linux solaris + +package remote + +import ( + "errors" + "io/ioutil" + "os" + + "github.com/hpcloud/tail" +) + +func NewOutputInterceptor() OutputInterceptor { + return &outputInterceptor{} +} + +type outputInterceptor struct { + redirectFile *os.File + streamTarget *os.File + intercepting bool + tailer *tail.Tail + doneTailing chan bool +} + +func (interceptor *outputInterceptor) StartInterceptingOutput() error { + if interceptor.intercepting { + return errors.New("Already intercepting output!") + } + interceptor.intercepting = true + + var err error + + interceptor.redirectFile, err = ioutil.TempFile("", "ginkgo-output") + if err != nil { + return err + } + + // Call a function in ./syscall_dup_*.go + // If building for everything other than linux_arm64, + // use a "normal" syscall.Dup2(oldfd, newfd) call. If building for linux_arm64 (which doesn't have syscall.Dup2) + // call syscall.Dup3(oldfd, newfd, 0). They are nearly identical, see: http://linux.die.net/man/2/dup3 + syscallDup(int(interceptor.redirectFile.Fd()), 1) + syscallDup(int(interceptor.redirectFile.Fd()), 2) + + if interceptor.streamTarget != nil { + interceptor.tailer, _ = tail.TailFile(interceptor.redirectFile.Name(), tail.Config{Follow: true}) + interceptor.doneTailing = make(chan bool) + + go func() { + for line := range interceptor.tailer.Lines { + interceptor.streamTarget.Write([]byte(line.Text + "\n")) + } + close(interceptor.doneTailing) + }() + } + + return nil +} + +func (interceptor *outputInterceptor) StopInterceptingAndReturnOutput() (string, error) { + if !interceptor.intercepting { + return "", errors.New("Not intercepting output!") + } + + interceptor.redirectFile.Close() + output, err := ioutil.ReadFile(interceptor.redirectFile.Name()) + os.Remove(interceptor.redirectFile.Name()) + + interceptor.intercepting = false + + if interceptor.streamTarget != nil { + interceptor.tailer.Stop() + interceptor.tailer.Cleanup() + <-interceptor.doneTailing + interceptor.streamTarget.Sync() + } + + return string(output), err +} + +func (interceptor *outputInterceptor) StreamTo(out *os.File) { + interceptor.streamTarget = out +} diff --git a/vendor/github.com/onsi/ginkgo/internal/remote/output_interceptor_win.go b/vendor/github.com/onsi/ginkgo/internal/remote/output_interceptor_win.go new file mode 100644 index 0000000000..40c790336c --- /dev/null +++ b/vendor/github.com/onsi/ginkgo/internal/remote/output_interceptor_win.go @@ -0,0 +1,36 @@ +// +build windows + +package remote + +import ( + "errors" + "os" +) + +func NewOutputInterceptor() OutputInterceptor { + return &outputInterceptor{} +} + +type outputInterceptor struct { + intercepting bool +} + +func (interceptor *outputInterceptor) StartInterceptingOutput() error { + if interceptor.intercepting { + return errors.New("Already intercepting output!") + } + interceptor.intercepting = true + + // not working on windows... + + return nil +} + +func (interceptor *outputInterceptor) StopInterceptingAndReturnOutput() (string, error) { + // not working on windows... + interceptor.intercepting = false + + return "", nil +} + +func (interceptor *outputInterceptor) StreamTo(*os.File) {} diff --git a/vendor/github.com/onsi/ginkgo/internal/remote/server.go b/vendor/github.com/onsi/ginkgo/internal/remote/server.go new file mode 100644 index 0000000000..367c54daff --- /dev/null +++ b/vendor/github.com/onsi/ginkgo/internal/remote/server.go @@ -0,0 +1,224 @@ +/* + +The remote package provides the pieces to allow Ginkgo test suites to report to remote listeners. +This is used, primarily, to enable streaming parallel test output but has, in principal, broader applications (e.g. streaming test output to a browser). + +*/ + +package remote + +import ( + "encoding/json" + "io/ioutil" + "net" + "net/http" + "sync" + + "github.com/onsi/ginkgo/internal/spec_iterator" + + "github.com/onsi/ginkgo/config" + "github.com/onsi/ginkgo/reporters" + "github.com/onsi/ginkgo/types" +) + +/* +Server spins up on an automatically selected port and listens for communication from the forwarding reporter. +It then forwards that communication to attached reporters. +*/ +type Server struct { + listener net.Listener + reporters []reporters.Reporter + alives []func() bool + lock *sync.Mutex + beforeSuiteData types.RemoteBeforeSuiteData + parallelTotal int + counter int +} + +//Create a new server, automatically selecting a port +func NewServer(parallelTotal int) (*Server, error) { + listener, err := net.Listen("tcp", "127.0.0.1:0") + if err != nil { + return nil, err + } + return &Server{ + listener: listener, + lock: &sync.Mutex{}, + alives: make([]func() bool, parallelTotal), + beforeSuiteData: types.RemoteBeforeSuiteData{Data: nil, State: types.RemoteBeforeSuiteStatePending}, + parallelTotal: parallelTotal, + }, nil +} + +//Start the server. You don't need to `go s.Start()`, just `s.Start()` +func (server *Server) Start() { + httpServer := &http.Server{} + mux := http.NewServeMux() + httpServer.Handler = mux + + //streaming endpoints + mux.HandleFunc("/SpecSuiteWillBegin", server.specSuiteWillBegin) + mux.HandleFunc("/BeforeSuiteDidRun", server.beforeSuiteDidRun) + mux.HandleFunc("/AfterSuiteDidRun", server.afterSuiteDidRun) + mux.HandleFunc("/SpecWillRun", server.specWillRun) + mux.HandleFunc("/SpecDidComplete", server.specDidComplete) + mux.HandleFunc("/SpecSuiteDidEnd", server.specSuiteDidEnd) + + //synchronization endpoints + mux.HandleFunc("/BeforeSuiteState", server.handleBeforeSuiteState) + mux.HandleFunc("/RemoteAfterSuiteData", server.handleRemoteAfterSuiteData) + mux.HandleFunc("/counter", server.handleCounter) + mux.HandleFunc("/has-counter", server.handleHasCounter) //for backward compatibility + + go httpServer.Serve(server.listener) +} + +//Stop the server +func (server *Server) Close() { + server.listener.Close() +} + +//The address the server can be reached it. Pass this into the `ForwardingReporter`. +func (server *Server) Address() string { + return "http://" + server.listener.Addr().String() +} + +// +// Streaming Endpoints +// + +//The server will forward all received messages to Ginkgo reporters registered with `RegisterReporters` +func (server *Server) readAll(request *http.Request) []byte { + defer request.Body.Close() + body, _ := ioutil.ReadAll(request.Body) + return body +} + +func (server *Server) RegisterReporters(reporters ...reporters.Reporter) { + server.reporters = reporters +} + +func (server *Server) specSuiteWillBegin(writer http.ResponseWriter, request *http.Request) { + body := server.readAll(request) + + var data struct { + Config config.GinkgoConfigType `json:"config"` + Summary *types.SuiteSummary `json:"suite-summary"` + } + + json.Unmarshal(body, &data) + + for _, reporter := range server.reporters { + reporter.SpecSuiteWillBegin(data.Config, data.Summary) + } +} + +func (server *Server) beforeSuiteDidRun(writer http.ResponseWriter, request *http.Request) { + body := server.readAll(request) + var setupSummary *types.SetupSummary + json.Unmarshal(body, &setupSummary) + + for _, reporter := range server.reporters { + reporter.BeforeSuiteDidRun(setupSummary) + } +} + +func (server *Server) afterSuiteDidRun(writer http.ResponseWriter, request *http.Request) { + body := server.readAll(request) + var setupSummary *types.SetupSummary + json.Unmarshal(body, &setupSummary) + + for _, reporter := range server.reporters { + reporter.AfterSuiteDidRun(setupSummary) + } +} + +func (server *Server) specWillRun(writer http.ResponseWriter, request *http.Request) { + body := server.readAll(request) + var specSummary *types.SpecSummary + json.Unmarshal(body, &specSummary) + + for _, reporter := range server.reporters { + reporter.SpecWillRun(specSummary) + } +} + +func (server *Server) specDidComplete(writer http.ResponseWriter, request *http.Request) { + body := server.readAll(request) + var specSummary *types.SpecSummary + json.Unmarshal(body, &specSummary) + + for _, reporter := range server.reporters { + reporter.SpecDidComplete(specSummary) + } +} + +func (server *Server) specSuiteDidEnd(writer http.ResponseWriter, request *http.Request) { + body := server.readAll(request) + var suiteSummary *types.SuiteSummary + json.Unmarshal(body, &suiteSummary) + + for _, reporter := range server.reporters { + reporter.SpecSuiteDidEnd(suiteSummary) + } +} + +// +// Synchronization Endpoints +// + +func (server *Server) RegisterAlive(node int, alive func() bool) { + server.lock.Lock() + defer server.lock.Unlock() + server.alives[node-1] = alive +} + +func (server *Server) nodeIsAlive(node int) bool { + server.lock.Lock() + defer server.lock.Unlock() + alive := server.alives[node-1] + if alive == nil { + return true + } + return alive() +} + +func (server *Server) handleBeforeSuiteState(writer http.ResponseWriter, request *http.Request) { + if request.Method == "POST" { + dec := json.NewDecoder(request.Body) + dec.Decode(&(server.beforeSuiteData)) + } else { + beforeSuiteData := server.beforeSuiteData + if beforeSuiteData.State == types.RemoteBeforeSuiteStatePending && !server.nodeIsAlive(1) { + beforeSuiteData.State = types.RemoteBeforeSuiteStateDisappeared + } + enc := json.NewEncoder(writer) + enc.Encode(beforeSuiteData) + } +} + +func (server *Server) handleRemoteAfterSuiteData(writer http.ResponseWriter, request *http.Request) { + afterSuiteData := types.RemoteAfterSuiteData{ + CanRun: true, + } + for i := 2; i <= server.parallelTotal; i++ { + afterSuiteData.CanRun = afterSuiteData.CanRun && !server.nodeIsAlive(i) + } + + enc := json.NewEncoder(writer) + enc.Encode(afterSuiteData) +} + +func (server *Server) handleCounter(writer http.ResponseWriter, request *http.Request) { + c := spec_iterator.Counter{} + server.lock.Lock() + c.Index = server.counter + server.counter = server.counter + 1 + server.lock.Unlock() + + json.NewEncoder(writer).Encode(c) +} + +func (server *Server) handleHasCounter(writer http.ResponseWriter, request *http.Request) { + writer.Write([]byte("")) +} diff --git a/vendor/github.com/onsi/ginkgo/internal/remote/syscall_dup_linux_arm64.go b/vendor/github.com/onsi/ginkgo/internal/remote/syscall_dup_linux_arm64.go new file mode 100644 index 0000000000..9550d37b36 --- /dev/null +++ b/vendor/github.com/onsi/ginkgo/internal/remote/syscall_dup_linux_arm64.go @@ -0,0 +1,11 @@ +// +build linux,arm64 + +package remote + +import "syscall" + +// linux_arm64 doesn't have syscall.Dup2 which ginkgo uses, so +// use the nearly identical syscall.Dup3 instead +func syscallDup(oldfd int, newfd int) (err error) { + return syscall.Dup3(oldfd, newfd, 0) +} diff --git a/vendor/github.com/onsi/ginkgo/internal/remote/syscall_dup_solaris.go b/vendor/github.com/onsi/ginkgo/internal/remote/syscall_dup_solaris.go new file mode 100644 index 0000000000..75ef7fb78e --- /dev/null +++ b/vendor/github.com/onsi/ginkgo/internal/remote/syscall_dup_solaris.go @@ -0,0 +1,9 @@ +// +build solaris + +package remote + +import "golang.org/x/sys/unix" + +func syscallDup(oldfd int, newfd int) (err error) { + return unix.Dup2(oldfd, newfd) +} diff --git a/vendor/github.com/onsi/ginkgo/internal/remote/syscall_dup_unix.go b/vendor/github.com/onsi/ginkgo/internal/remote/syscall_dup_unix.go new file mode 100644 index 0000000000..ef62559600 --- /dev/null +++ b/vendor/github.com/onsi/ginkgo/internal/remote/syscall_dup_unix.go @@ -0,0 +1,11 @@ +// +build !linux !arm64 +// +build !windows +// +build !solaris + +package remote + +import "syscall" + +func syscallDup(oldfd int, newfd int) (err error) { + return syscall.Dup2(oldfd, newfd) +} diff --git a/vendor/github.com/onsi/ginkgo/internal/spec/spec.go b/vendor/github.com/onsi/ginkgo/internal/spec/spec.go new file mode 100644 index 0000000000..7fd68ee8e0 --- /dev/null +++ b/vendor/github.com/onsi/ginkgo/internal/spec/spec.go @@ -0,0 +1,247 @@ +package spec + +import ( + "fmt" + "io" + "time" + + "sync" + + "github.com/onsi/ginkgo/internal/containernode" + "github.com/onsi/ginkgo/internal/leafnodes" + "github.com/onsi/ginkgo/types" +) + +type Spec struct { + subject leafnodes.SubjectNode + focused bool + announceProgress bool + + containers []*containernode.ContainerNode + + state types.SpecState + runTime time.Duration + startTime time.Time + failure types.SpecFailure + previousFailures bool + + stateMutex *sync.Mutex +} + +func New(subject leafnodes.SubjectNode, containers []*containernode.ContainerNode, announceProgress bool) *Spec { + spec := &Spec{ + subject: subject, + containers: containers, + focused: subject.Flag() == types.FlagTypeFocused, + announceProgress: announceProgress, + stateMutex: &sync.Mutex{}, + } + + spec.processFlag(subject.Flag()) + for i := len(containers) - 1; i >= 0; i-- { + spec.processFlag(containers[i].Flag()) + } + + return spec +} + +func (spec *Spec) processFlag(flag types.FlagType) { + if flag == types.FlagTypeFocused { + spec.focused = true + } else if flag == types.FlagTypePending { + spec.setState(types.SpecStatePending) + } +} + +func (spec *Spec) Skip() { + spec.setState(types.SpecStateSkipped) +} + +func (spec *Spec) Failed() bool { + return spec.getState() == types.SpecStateFailed || spec.getState() == types.SpecStatePanicked || spec.getState() == types.SpecStateTimedOut +} + +func (spec *Spec) Passed() bool { + return spec.getState() == types.SpecStatePassed +} + +func (spec *Spec) Flaked() bool { + return spec.getState() == types.SpecStatePassed && spec.previousFailures +} + +func (spec *Spec) Pending() bool { + return spec.getState() == types.SpecStatePending +} + +func (spec *Spec) Skipped() bool { + return spec.getState() == types.SpecStateSkipped +} + +func (spec *Spec) Focused() bool { + return spec.focused +} + +func (spec *Spec) IsMeasurement() bool { + return spec.subject.Type() == types.SpecComponentTypeMeasure +} + +func (spec *Spec) Summary(suiteID string) *types.SpecSummary { + componentTexts := make([]string, len(spec.containers)+1) + componentCodeLocations := make([]types.CodeLocation, len(spec.containers)+1) + + for i, container := range spec.containers { + componentTexts[i] = container.Text() + componentCodeLocations[i] = container.CodeLocation() + } + + componentTexts[len(spec.containers)] = spec.subject.Text() + componentCodeLocations[len(spec.containers)] = spec.subject.CodeLocation() + + runTime := spec.runTime + if runTime == 0 && !spec.startTime.IsZero() { + runTime = time.Since(spec.startTime) + } + + return &types.SpecSummary{ + IsMeasurement: spec.IsMeasurement(), + NumberOfSamples: spec.subject.Samples(), + ComponentTexts: componentTexts, + ComponentCodeLocations: componentCodeLocations, + State: spec.getState(), + RunTime: runTime, + Failure: spec.failure, + Measurements: spec.measurementsReport(), + SuiteID: suiteID, + } +} + +func (spec *Spec) ConcatenatedString() string { + s := "" + for _, container := range spec.containers { + s += container.Text() + " " + } + + return s + spec.subject.Text() +} + +func (spec *Spec) Run(writer io.Writer) { + if spec.getState() == types.SpecStateFailed { + spec.previousFailures = true + } + + spec.startTime = time.Now() + defer func() { + spec.runTime = time.Since(spec.startTime) + }() + + for sample := 0; sample < spec.subject.Samples(); sample++ { + spec.runSample(sample, writer) + + if spec.getState() != types.SpecStatePassed { + return + } + } +} + +func (spec *Spec) getState() types.SpecState { + spec.stateMutex.Lock() + defer spec.stateMutex.Unlock() + return spec.state +} + +func (spec *Spec) setState(state types.SpecState) { + spec.stateMutex.Lock() + defer spec.stateMutex.Unlock() + spec.state = state +} + +func (spec *Spec) runSample(sample int, writer io.Writer) { + spec.setState(types.SpecStatePassed) + spec.failure = types.SpecFailure{} + innerMostContainerIndexToUnwind := -1 + + defer func() { + for i := innerMostContainerIndexToUnwind; i >= 0; i-- { + container := spec.containers[i] + for _, justAfterEach := range container.SetupNodesOfType(types.SpecComponentTypeJustAfterEach) { + spec.announceSetupNode(writer, "JustAfterEach", container, justAfterEach) + justAfterEachState, justAfterEachFailure := justAfterEach.Run() + if justAfterEachState != types.SpecStatePassed && spec.state == types.SpecStatePassed { + spec.state = justAfterEachState + spec.failure = justAfterEachFailure + } + } + } + + for i := innerMostContainerIndexToUnwind; i >= 0; i-- { + container := spec.containers[i] + for _, afterEach := range container.SetupNodesOfType(types.SpecComponentTypeAfterEach) { + spec.announceSetupNode(writer, "AfterEach", container, afterEach) + afterEachState, afterEachFailure := afterEach.Run() + if afterEachState != types.SpecStatePassed && spec.getState() == types.SpecStatePassed { + spec.setState(afterEachState) + spec.failure = afterEachFailure + } + } + } + }() + + for i, container := range spec.containers { + innerMostContainerIndexToUnwind = i + for _, beforeEach := range container.SetupNodesOfType(types.SpecComponentTypeBeforeEach) { + spec.announceSetupNode(writer, "BeforeEach", container, beforeEach) + s, f := beforeEach.Run() + spec.failure = f + spec.setState(s) + if spec.getState() != types.SpecStatePassed { + return + } + } + } + + for _, container := range spec.containers { + for _, justBeforeEach := range container.SetupNodesOfType(types.SpecComponentTypeJustBeforeEach) { + spec.announceSetupNode(writer, "JustBeforeEach", container, justBeforeEach) + s, f := justBeforeEach.Run() + spec.failure = f + spec.setState(s) + if spec.getState() != types.SpecStatePassed { + return + } + } + } + + spec.announceSubject(writer, spec.subject) + s, f := spec.subject.Run() + spec.failure = f + spec.setState(s) +} + +func (spec *Spec) announceSetupNode(writer io.Writer, nodeType string, container *containernode.ContainerNode, setupNode leafnodes.BasicNode) { + if spec.announceProgress { + s := fmt.Sprintf("[%s] %s\n %s\n", nodeType, container.Text(), setupNode.CodeLocation().String()) + writer.Write([]byte(s)) + } +} + +func (spec *Spec) announceSubject(writer io.Writer, subject leafnodes.SubjectNode) { + if spec.announceProgress { + nodeType := "" + switch subject.Type() { + case types.SpecComponentTypeIt: + nodeType = "It" + case types.SpecComponentTypeMeasure: + nodeType = "Measure" + } + s := fmt.Sprintf("[%s] %s\n %s\n", nodeType, subject.Text(), subject.CodeLocation().String()) + writer.Write([]byte(s)) + } +} + +func (spec *Spec) measurementsReport() map[string]*types.SpecMeasurement { + if !spec.IsMeasurement() || spec.Failed() { + return map[string]*types.SpecMeasurement{} + } + + return spec.subject.(*leafnodes.MeasureNode).MeasurementsReport() +} diff --git a/vendor/github.com/onsi/ginkgo/internal/spec/specs.go b/vendor/github.com/onsi/ginkgo/internal/spec/specs.go new file mode 100644 index 0000000000..27c0d1d6cd --- /dev/null +++ b/vendor/github.com/onsi/ginkgo/internal/spec/specs.go @@ -0,0 +1,144 @@ +package spec + +import ( + "math/rand" + "regexp" + "sort" +) + +type Specs struct { + specs []*Spec + names []string + + hasProgrammaticFocus bool + RegexScansFilePath bool +} + +func NewSpecs(specs []*Spec) *Specs { + names := make([]string, len(specs)) + for i, spec := range specs { + names[i] = spec.ConcatenatedString() + } + return &Specs{ + specs: specs, + names: names, + } +} + +func (e *Specs) Specs() []*Spec { + return e.specs +} + +func (e *Specs) HasProgrammaticFocus() bool { + return e.hasProgrammaticFocus +} + +func (e *Specs) Shuffle(r *rand.Rand) { + sort.Sort(e) + permutation := r.Perm(len(e.specs)) + shuffledSpecs := make([]*Spec, len(e.specs)) + names := make([]string, len(e.specs)) + for i, j := range permutation { + shuffledSpecs[i] = e.specs[j] + names[i] = e.names[j] + } + e.specs = shuffledSpecs + e.names = names +} + +func (e *Specs) ApplyFocus(description string, focusString string, skipString string) { + if focusString == "" && skipString == "" { + e.applyProgrammaticFocus() + } else { + e.applyRegExpFocusAndSkip(description, focusString, skipString) + } +} + +func (e *Specs) applyProgrammaticFocus() { + e.hasProgrammaticFocus = false + for _, spec := range e.specs { + if spec.Focused() && !spec.Pending() { + e.hasProgrammaticFocus = true + break + } + } + + if e.hasProgrammaticFocus { + for _, spec := range e.specs { + if !spec.Focused() { + spec.Skip() + } + } + } +} + +// toMatch returns a byte[] to be used by regex matchers. When adding new behaviours to the matching function, +// this is the place which we append to. +func (e *Specs) toMatch(description string, i int) []byte { + if i > len(e.names) { + return nil + } + if e.RegexScansFilePath { + return []byte( + description + " " + + e.names[i] + " " + + e.specs[i].subject.CodeLocation().FileName) + } else { + return []byte( + description + " " + + e.names[i]) + } +} + +func (e *Specs) applyRegExpFocusAndSkip(description string, focusString string, skipString string) { + var focusFilter *regexp.Regexp + if focusString != "" { + focusFilter = regexp.MustCompile(focusString) + } + var skipFilter *regexp.Regexp + if skipString != "" { + skipFilter = regexp.MustCompile(skipString) + } + + for i, spec := range e.specs { + matchesFocus := true + matchesSkip := false + + toMatch := e.toMatch(description, i) + + if focusFilter != nil { + matchesFocus = focusFilter.Match([]byte(toMatch)) + } + + if skipFilter != nil { + matchesSkip = skipFilter.Match([]byte(toMatch)) + } + + if !matchesFocus || matchesSkip { + spec.Skip() + } + } +} + +func (e *Specs) SkipMeasurements() { + for _, spec := range e.specs { + if spec.IsMeasurement() { + spec.Skip() + } + } +} + +//sort.Interface + +func (e *Specs) Len() int { + return len(e.specs) +} + +func (e *Specs) Less(i, j int) bool { + return e.names[i] < e.names[j] +} + +func (e *Specs) Swap(i, j int) { + e.names[i], e.names[j] = e.names[j], e.names[i] + e.specs[i], e.specs[j] = e.specs[j], e.specs[i] +} diff --git a/vendor/github.com/onsi/ginkgo/internal/spec_iterator/index_computer.go b/vendor/github.com/onsi/ginkgo/internal/spec_iterator/index_computer.go new file mode 100644 index 0000000000..82272554aa --- /dev/null +++ b/vendor/github.com/onsi/ginkgo/internal/spec_iterator/index_computer.go @@ -0,0 +1,55 @@ +package spec_iterator + +func ParallelizedIndexRange(length int, parallelTotal int, parallelNode int) (startIndex int, count int) { + if length == 0 { + return 0, 0 + } + + // We have more nodes than tests. Trivial case. + if parallelTotal >= length { + if parallelNode > length { + return 0, 0 + } else { + return parallelNode - 1, 1 + } + } + + // This is the minimum amount of tests that a node will be required to run + minTestsPerNode := length / parallelTotal + + // This is the maximum amount of tests that a node will be required to run + // The algorithm guarantees that this would be equal to at least the minimum amount + // and at most one more + maxTestsPerNode := minTestsPerNode + if length%parallelTotal != 0 { + maxTestsPerNode++ + } + + // Number of nodes that will have to run the maximum amount of tests per node + numMaxLoadNodes := length % parallelTotal + + // Number of nodes that precede the current node and will have to run the maximum amount of tests per node + var numPrecedingMaxLoadNodes int + if parallelNode > numMaxLoadNodes { + numPrecedingMaxLoadNodes = numMaxLoadNodes + } else { + numPrecedingMaxLoadNodes = parallelNode - 1 + } + + // Number of nodes that precede the current node and will have to run the minimum amount of tests per node + var numPrecedingMinLoadNodes int + if parallelNode <= numMaxLoadNodes { + numPrecedingMinLoadNodes = 0 + } else { + numPrecedingMinLoadNodes = parallelNode - numMaxLoadNodes - 1 + } + + // Evaluate the test start index and number of tests to run + startIndex = numPrecedingMaxLoadNodes*maxTestsPerNode + numPrecedingMinLoadNodes*minTestsPerNode + if parallelNode > numMaxLoadNodes { + count = minTestsPerNode + } else { + count = maxTestsPerNode + } + return +} diff --git a/vendor/github.com/onsi/ginkgo/internal/spec_iterator/parallel_spec_iterator.go b/vendor/github.com/onsi/ginkgo/internal/spec_iterator/parallel_spec_iterator.go new file mode 100644 index 0000000000..99f548bca4 --- /dev/null +++ b/vendor/github.com/onsi/ginkgo/internal/spec_iterator/parallel_spec_iterator.go @@ -0,0 +1,59 @@ +package spec_iterator + +import ( + "encoding/json" + "fmt" + "net/http" + + "github.com/onsi/ginkgo/internal/spec" +) + +type ParallelIterator struct { + specs []*spec.Spec + host string + client *http.Client +} + +func NewParallelIterator(specs []*spec.Spec, host string) *ParallelIterator { + return &ParallelIterator{ + specs: specs, + host: host, + client: &http.Client{}, + } +} + +func (s *ParallelIterator) Next() (*spec.Spec, error) { + resp, err := s.client.Get(s.host + "/counter") + if err != nil { + return nil, err + } + defer resp.Body.Close() + + if resp.StatusCode != http.StatusOK { + return nil, fmt.Errorf("unexpected status code %d", resp.StatusCode) + } + + var counter Counter + err = json.NewDecoder(resp.Body).Decode(&counter) + if err != nil { + return nil, err + } + + if counter.Index >= len(s.specs) { + return nil, ErrClosed + } + + return s.specs[counter.Index], nil +} + +func (s *ParallelIterator) NumberOfSpecsPriorToIteration() int { + return len(s.specs) +} + +func (s *ParallelIterator) NumberOfSpecsToProcessIfKnown() (int, bool) { + return -1, false +} + +func (s *ParallelIterator) NumberOfSpecsThatWillBeRunIfKnown() (int, bool) { + return -1, false +} diff --git a/vendor/github.com/onsi/ginkgo/internal/spec_iterator/serial_spec_iterator.go b/vendor/github.com/onsi/ginkgo/internal/spec_iterator/serial_spec_iterator.go new file mode 100644 index 0000000000..a51c93b8b6 --- /dev/null +++ b/vendor/github.com/onsi/ginkgo/internal/spec_iterator/serial_spec_iterator.go @@ -0,0 +1,45 @@ +package spec_iterator + +import ( + "github.com/onsi/ginkgo/internal/spec" +) + +type SerialIterator struct { + specs []*spec.Spec + index int +} + +func NewSerialIterator(specs []*spec.Spec) *SerialIterator { + return &SerialIterator{ + specs: specs, + index: 0, + } +} + +func (s *SerialIterator) Next() (*spec.Spec, error) { + if s.index >= len(s.specs) { + return nil, ErrClosed + } + + spec := s.specs[s.index] + s.index += 1 + return spec, nil +} + +func (s *SerialIterator) NumberOfSpecsPriorToIteration() int { + return len(s.specs) +} + +func (s *SerialIterator) NumberOfSpecsToProcessIfKnown() (int, bool) { + return len(s.specs), true +} + +func (s *SerialIterator) NumberOfSpecsThatWillBeRunIfKnown() (int, bool) { + count := 0 + for _, s := range s.specs { + if !s.Skipped() && !s.Pending() { + count += 1 + } + } + return count, true +} diff --git a/vendor/github.com/onsi/ginkgo/internal/spec_iterator/sharded_parallel_spec_iterator.go b/vendor/github.com/onsi/ginkgo/internal/spec_iterator/sharded_parallel_spec_iterator.go new file mode 100644 index 0000000000..ad4a3ea3c6 --- /dev/null +++ b/vendor/github.com/onsi/ginkgo/internal/spec_iterator/sharded_parallel_spec_iterator.go @@ -0,0 +1,47 @@ +package spec_iterator + +import "github.com/onsi/ginkgo/internal/spec" + +type ShardedParallelIterator struct { + specs []*spec.Spec + index int + maxIndex int +} + +func NewShardedParallelIterator(specs []*spec.Spec, total int, node int) *ShardedParallelIterator { + startIndex, count := ParallelizedIndexRange(len(specs), total, node) + + return &ShardedParallelIterator{ + specs: specs, + index: startIndex, + maxIndex: startIndex + count, + } +} + +func (s *ShardedParallelIterator) Next() (*spec.Spec, error) { + if s.index >= s.maxIndex { + return nil, ErrClosed + } + + spec := s.specs[s.index] + s.index += 1 + return spec, nil +} + +func (s *ShardedParallelIterator) NumberOfSpecsPriorToIteration() int { + return len(s.specs) +} + +func (s *ShardedParallelIterator) NumberOfSpecsToProcessIfKnown() (int, bool) { + return s.maxIndex - s.index, true +} + +func (s *ShardedParallelIterator) NumberOfSpecsThatWillBeRunIfKnown() (int, bool) { + count := 0 + for i := s.index; i < s.maxIndex; i += 1 { + if !s.specs[i].Skipped() && !s.specs[i].Pending() { + count += 1 + } + } + return count, true +} diff --git a/vendor/github.com/onsi/ginkgo/internal/spec_iterator/spec_iterator.go b/vendor/github.com/onsi/ginkgo/internal/spec_iterator/spec_iterator.go new file mode 100644 index 0000000000..74bffad646 --- /dev/null +++ b/vendor/github.com/onsi/ginkgo/internal/spec_iterator/spec_iterator.go @@ -0,0 +1,20 @@ +package spec_iterator + +import ( + "errors" + + "github.com/onsi/ginkgo/internal/spec" +) + +var ErrClosed = errors.New("no more specs to run") + +type SpecIterator interface { + Next() (*spec.Spec, error) + NumberOfSpecsPriorToIteration() int + NumberOfSpecsToProcessIfKnown() (int, bool) + NumberOfSpecsThatWillBeRunIfKnown() (int, bool) +} + +type Counter struct { + Index int `json:"index"` +} diff --git a/vendor/github.com/onsi/ginkgo/internal/specrunner/random_id.go b/vendor/github.com/onsi/ginkgo/internal/specrunner/random_id.go new file mode 100644 index 0000000000..a0b8b62d52 --- /dev/null +++ b/vendor/github.com/onsi/ginkgo/internal/specrunner/random_id.go @@ -0,0 +1,15 @@ +package specrunner + +import ( + "crypto/rand" + "fmt" +) + +func randomID() string { + b := make([]byte, 8) + _, err := rand.Read(b) + if err != nil { + return "" + } + return fmt.Sprintf("%x-%x-%x-%x", b[0:2], b[2:4], b[4:6], b[6:8]) +} diff --git a/vendor/github.com/onsi/ginkgo/internal/specrunner/spec_runner.go b/vendor/github.com/onsi/ginkgo/internal/specrunner/spec_runner.go new file mode 100644 index 0000000000..2c683cb8b9 --- /dev/null +++ b/vendor/github.com/onsi/ginkgo/internal/specrunner/spec_runner.go @@ -0,0 +1,411 @@ +package specrunner + +import ( + "fmt" + "os" + "os/signal" + "sync" + "syscall" + + "github.com/onsi/ginkgo/internal/spec_iterator" + + "github.com/onsi/ginkgo/config" + "github.com/onsi/ginkgo/internal/leafnodes" + "github.com/onsi/ginkgo/internal/spec" + Writer "github.com/onsi/ginkgo/internal/writer" + "github.com/onsi/ginkgo/reporters" + "github.com/onsi/ginkgo/types" + + "time" +) + +type SpecRunner struct { + description string + beforeSuiteNode leafnodes.SuiteNode + iterator spec_iterator.SpecIterator + afterSuiteNode leafnodes.SuiteNode + reporters []reporters.Reporter + startTime time.Time + suiteID string + runningSpec *spec.Spec + writer Writer.WriterInterface + config config.GinkgoConfigType + interrupted bool + processedSpecs []*spec.Spec + lock *sync.Mutex +} + +func New(description string, beforeSuiteNode leafnodes.SuiteNode, iterator spec_iterator.SpecIterator, afterSuiteNode leafnodes.SuiteNode, reporters []reporters.Reporter, writer Writer.WriterInterface, config config.GinkgoConfigType) *SpecRunner { + return &SpecRunner{ + description: description, + beforeSuiteNode: beforeSuiteNode, + iterator: iterator, + afterSuiteNode: afterSuiteNode, + reporters: reporters, + writer: writer, + config: config, + suiteID: randomID(), + lock: &sync.Mutex{}, + } +} + +func (runner *SpecRunner) Run() bool { + if runner.config.DryRun { + runner.performDryRun() + return true + } + + runner.reportSuiteWillBegin() + signalRegistered := make(chan struct{}) + go runner.registerForInterrupts(signalRegistered) + <-signalRegistered + + suitePassed := runner.runBeforeSuite() + + if suitePassed { + suitePassed = runner.runSpecs() + } + + runner.blockForeverIfInterrupted() + + suitePassed = runner.runAfterSuite() && suitePassed + + runner.reportSuiteDidEnd(suitePassed) + + return suitePassed +} + +func (runner *SpecRunner) performDryRun() { + runner.reportSuiteWillBegin() + + if runner.beforeSuiteNode != nil { + summary := runner.beforeSuiteNode.Summary() + summary.State = types.SpecStatePassed + runner.reportBeforeSuite(summary) + } + + for { + spec, err := runner.iterator.Next() + if err == spec_iterator.ErrClosed { + break + } + if err != nil { + fmt.Println("failed to iterate over tests:\n" + err.Error()) + break + } + + runner.processedSpecs = append(runner.processedSpecs, spec) + + summary := spec.Summary(runner.suiteID) + runner.reportSpecWillRun(summary) + if summary.State == types.SpecStateInvalid { + summary.State = types.SpecStatePassed + } + runner.reportSpecDidComplete(summary, false) + } + + if runner.afterSuiteNode != nil { + summary := runner.afterSuiteNode.Summary() + summary.State = types.SpecStatePassed + runner.reportAfterSuite(summary) + } + + runner.reportSuiteDidEnd(true) +} + +func (runner *SpecRunner) runBeforeSuite() bool { + if runner.beforeSuiteNode == nil || runner.wasInterrupted() { + return true + } + + runner.writer.Truncate() + conf := runner.config + passed := runner.beforeSuiteNode.Run(conf.ParallelNode, conf.ParallelTotal, conf.SyncHost) + if !passed { + runner.writer.DumpOut() + } + runner.reportBeforeSuite(runner.beforeSuiteNode.Summary()) + return passed +} + +func (runner *SpecRunner) runAfterSuite() bool { + if runner.afterSuiteNode == nil { + return true + } + + runner.writer.Truncate() + conf := runner.config + passed := runner.afterSuiteNode.Run(conf.ParallelNode, conf.ParallelTotal, conf.SyncHost) + if !passed { + runner.writer.DumpOut() + } + runner.reportAfterSuite(runner.afterSuiteNode.Summary()) + return passed +} + +func (runner *SpecRunner) runSpecs() bool { + suiteFailed := false + skipRemainingSpecs := false + for { + spec, err := runner.iterator.Next() + if err == spec_iterator.ErrClosed { + break + } + if err != nil { + fmt.Println("failed to iterate over tests:\n" + err.Error()) + suiteFailed = true + break + } + + runner.processedSpecs = append(runner.processedSpecs, spec) + + if runner.wasInterrupted() { + break + } + if skipRemainingSpecs { + spec.Skip() + } + + if !spec.Skipped() && !spec.Pending() { + if passed := runner.runSpec(spec); !passed { + suiteFailed = true + } + } else if spec.Pending() && runner.config.FailOnPending { + runner.reportSpecWillRun(spec.Summary(runner.suiteID)) + suiteFailed = true + runner.reportSpecDidComplete(spec.Summary(runner.suiteID), spec.Failed()) + } else { + runner.reportSpecWillRun(spec.Summary(runner.suiteID)) + runner.reportSpecDidComplete(spec.Summary(runner.suiteID), spec.Failed()) + } + + if spec.Failed() && runner.config.FailFast { + skipRemainingSpecs = true + } + } + + return !suiteFailed +} + +func (runner *SpecRunner) runSpec(spec *spec.Spec) (passed bool) { + maxAttempts := 1 + if runner.config.FlakeAttempts > 0 { + // uninitialized configs count as 1 + maxAttempts = runner.config.FlakeAttempts + } + + for i := 0; i < maxAttempts; i++ { + runner.reportSpecWillRun(spec.Summary(runner.suiteID)) + runner.runningSpec = spec + spec.Run(runner.writer) + runner.runningSpec = nil + runner.reportSpecDidComplete(spec.Summary(runner.suiteID), spec.Failed()) + if !spec.Failed() { + return true + } + } + return false +} + +func (runner *SpecRunner) CurrentSpecSummary() (*types.SpecSummary, bool) { + if runner.runningSpec == nil { + return nil, false + } + + return runner.runningSpec.Summary(runner.suiteID), true +} + +func (runner *SpecRunner) registerForInterrupts(signalRegistered chan struct{}) { + c := make(chan os.Signal, 1) + signal.Notify(c, os.Interrupt, syscall.SIGTERM) + close(signalRegistered) + + <-c + signal.Stop(c) + runner.markInterrupted() + go runner.registerForHardInterrupts() + runner.writer.DumpOutWithHeader(` +Received interrupt. Emitting contents of GinkgoWriter... +--------------------------------------------------------- +`) + if runner.afterSuiteNode != nil { + fmt.Fprint(os.Stderr, ` +--------------------------------------------------------- +Received interrupt. Running AfterSuite... +^C again to terminate immediately +`) + runner.runAfterSuite() + } + runner.reportSuiteDidEnd(false) + os.Exit(1) +} + +func (runner *SpecRunner) registerForHardInterrupts() { + c := make(chan os.Signal, 1) + signal.Notify(c, os.Interrupt, syscall.SIGTERM) + + <-c + fmt.Fprintln(os.Stderr, "\nReceived second interrupt. Shutting down.") + os.Exit(1) +} + +func (runner *SpecRunner) blockForeverIfInterrupted() { + runner.lock.Lock() + interrupted := runner.interrupted + runner.lock.Unlock() + + if interrupted { + select {} + } +} + +func (runner *SpecRunner) markInterrupted() { + runner.lock.Lock() + defer runner.lock.Unlock() + runner.interrupted = true +} + +func (runner *SpecRunner) wasInterrupted() bool { + runner.lock.Lock() + defer runner.lock.Unlock() + return runner.interrupted +} + +func (runner *SpecRunner) reportSuiteWillBegin() { + runner.startTime = time.Now() + summary := runner.suiteWillBeginSummary() + for _, reporter := range runner.reporters { + reporter.SpecSuiteWillBegin(runner.config, summary) + } +} + +func (runner *SpecRunner) reportBeforeSuite(summary *types.SetupSummary) { + for _, reporter := range runner.reporters { + reporter.BeforeSuiteDidRun(summary) + } +} + +func (runner *SpecRunner) reportAfterSuite(summary *types.SetupSummary) { + for _, reporter := range runner.reporters { + reporter.AfterSuiteDidRun(summary) + } +} + +func (runner *SpecRunner) reportSpecWillRun(summary *types.SpecSummary) { + runner.writer.Truncate() + + for _, reporter := range runner.reporters { + reporter.SpecWillRun(summary) + } +} + +func (runner *SpecRunner) reportSpecDidComplete(summary *types.SpecSummary, failed bool) { + if failed && len(summary.CapturedOutput) == 0 { + summary.CapturedOutput = string(runner.writer.Bytes()) + } + for i := len(runner.reporters) - 1; i >= 1; i-- { + runner.reporters[i].SpecDidComplete(summary) + } + + if failed { + runner.writer.DumpOut() + } + + runner.reporters[0].SpecDidComplete(summary) +} + +func (runner *SpecRunner) reportSuiteDidEnd(success bool) { + summary := runner.suiteDidEndSummary(success) + summary.RunTime = time.Since(runner.startTime) + for _, reporter := range runner.reporters { + reporter.SpecSuiteDidEnd(summary) + } +} + +func (runner *SpecRunner) countSpecsThatRanSatisfying(filter func(ex *spec.Spec) bool) (count int) { + count = 0 + + for _, spec := range runner.processedSpecs { + if filter(spec) { + count++ + } + } + + return count +} + +func (runner *SpecRunner) suiteDidEndSummary(success bool) *types.SuiteSummary { + numberOfSpecsThatWillBeRun := runner.countSpecsThatRanSatisfying(func(ex *spec.Spec) bool { + return !ex.Skipped() && !ex.Pending() + }) + + numberOfPendingSpecs := runner.countSpecsThatRanSatisfying(func(ex *spec.Spec) bool { + return ex.Pending() + }) + + numberOfSkippedSpecs := runner.countSpecsThatRanSatisfying(func(ex *spec.Spec) bool { + return ex.Skipped() + }) + + numberOfPassedSpecs := runner.countSpecsThatRanSatisfying(func(ex *spec.Spec) bool { + return ex.Passed() + }) + + numberOfFlakedSpecs := runner.countSpecsThatRanSatisfying(func(ex *spec.Spec) bool { + return ex.Flaked() + }) + + numberOfFailedSpecs := runner.countSpecsThatRanSatisfying(func(ex *spec.Spec) bool { + return ex.Failed() + }) + + if runner.beforeSuiteNode != nil && !runner.beforeSuiteNode.Passed() && !runner.config.DryRun { + var known bool + numberOfSpecsThatWillBeRun, known = runner.iterator.NumberOfSpecsThatWillBeRunIfKnown() + if !known { + numberOfSpecsThatWillBeRun = runner.iterator.NumberOfSpecsPriorToIteration() + } + numberOfFailedSpecs = numberOfSpecsThatWillBeRun + } + + return &types.SuiteSummary{ + SuiteDescription: runner.description, + SuiteSucceeded: success, + SuiteID: runner.suiteID, + + NumberOfSpecsBeforeParallelization: runner.iterator.NumberOfSpecsPriorToIteration(), + NumberOfTotalSpecs: len(runner.processedSpecs), + NumberOfSpecsThatWillBeRun: numberOfSpecsThatWillBeRun, + NumberOfPendingSpecs: numberOfPendingSpecs, + NumberOfSkippedSpecs: numberOfSkippedSpecs, + NumberOfPassedSpecs: numberOfPassedSpecs, + NumberOfFailedSpecs: numberOfFailedSpecs, + NumberOfFlakedSpecs: numberOfFlakedSpecs, + } +} + +func (runner *SpecRunner) suiteWillBeginSummary() *types.SuiteSummary { + numTotal, known := runner.iterator.NumberOfSpecsToProcessIfKnown() + if !known { + numTotal = -1 + } + + numToRun, known := runner.iterator.NumberOfSpecsThatWillBeRunIfKnown() + if !known { + numToRun = -1 + } + + return &types.SuiteSummary{ + SuiteDescription: runner.description, + SuiteID: runner.suiteID, + + NumberOfSpecsBeforeParallelization: runner.iterator.NumberOfSpecsPriorToIteration(), + NumberOfTotalSpecs: numTotal, + NumberOfSpecsThatWillBeRun: numToRun, + NumberOfPendingSpecs: -1, + NumberOfSkippedSpecs: -1, + NumberOfPassedSpecs: -1, + NumberOfFailedSpecs: -1, + NumberOfFlakedSpecs: -1, + } +} diff --git a/vendor/github.com/onsi/ginkgo/internal/suite/suite.go b/vendor/github.com/onsi/ginkgo/internal/suite/suite.go new file mode 100644 index 0000000000..3104bbc88c --- /dev/null +++ b/vendor/github.com/onsi/ginkgo/internal/suite/suite.go @@ -0,0 +1,190 @@ +package suite + +import ( + "math/rand" + "net/http" + "time" + + "github.com/onsi/ginkgo/internal/spec_iterator" + + "github.com/onsi/ginkgo/config" + "github.com/onsi/ginkgo/internal/containernode" + "github.com/onsi/ginkgo/internal/failer" + "github.com/onsi/ginkgo/internal/leafnodes" + "github.com/onsi/ginkgo/internal/spec" + "github.com/onsi/ginkgo/internal/specrunner" + "github.com/onsi/ginkgo/internal/writer" + "github.com/onsi/ginkgo/reporters" + "github.com/onsi/ginkgo/types" +) + +type ginkgoTestingT interface { + Fail() +} + +type Suite struct { + topLevelContainer *containernode.ContainerNode + currentContainer *containernode.ContainerNode + containerIndex int + beforeSuiteNode leafnodes.SuiteNode + afterSuiteNode leafnodes.SuiteNode + runner *specrunner.SpecRunner + failer *failer.Failer + running bool +} + +func New(failer *failer.Failer) *Suite { + topLevelContainer := containernode.New("[Top Level]", types.FlagTypeNone, types.CodeLocation{}) + + return &Suite{ + topLevelContainer: topLevelContainer, + currentContainer: topLevelContainer, + failer: failer, + containerIndex: 1, + } +} + +func (suite *Suite) Run(t ginkgoTestingT, description string, reporters []reporters.Reporter, writer writer.WriterInterface, config config.GinkgoConfigType) (bool, bool) { + if config.ParallelTotal < 1 { + panic("ginkgo.parallel.total must be >= 1") + } + + if config.ParallelNode > config.ParallelTotal || config.ParallelNode < 1 { + panic("ginkgo.parallel.node is one-indexed and must be <= ginkgo.parallel.total") + } + + r := rand.New(rand.NewSource(config.RandomSeed)) + suite.topLevelContainer.Shuffle(r) + iterator, hasProgrammaticFocus := suite.generateSpecsIterator(description, config) + suite.runner = specrunner.New(description, suite.beforeSuiteNode, iterator, suite.afterSuiteNode, reporters, writer, config) + + suite.running = true + success := suite.runner.Run() + if !success { + t.Fail() + } + return success, hasProgrammaticFocus +} + +func (suite *Suite) generateSpecsIterator(description string, config config.GinkgoConfigType) (spec_iterator.SpecIterator, bool) { + specsSlice := []*spec.Spec{} + suite.topLevelContainer.BackPropagateProgrammaticFocus() + for _, collatedNodes := range suite.topLevelContainer.Collate() { + specsSlice = append(specsSlice, spec.New(collatedNodes.Subject, collatedNodes.Containers, config.EmitSpecProgress)) + } + + specs := spec.NewSpecs(specsSlice) + specs.RegexScansFilePath = config.RegexScansFilePath + + if config.RandomizeAllSpecs { + specs.Shuffle(rand.New(rand.NewSource(config.RandomSeed))) + } + + specs.ApplyFocus(description, config.FocusString, config.SkipString) + + if config.SkipMeasurements { + specs.SkipMeasurements() + } + + var iterator spec_iterator.SpecIterator + + if config.ParallelTotal > 1 { + iterator = spec_iterator.NewParallelIterator(specs.Specs(), config.SyncHost) + resp, err := http.Get(config.SyncHost + "/has-counter") + if err != nil || resp.StatusCode != http.StatusOK { + iterator = spec_iterator.NewShardedParallelIterator(specs.Specs(), config.ParallelTotal, config.ParallelNode) + } + } else { + iterator = spec_iterator.NewSerialIterator(specs.Specs()) + } + + return iterator, specs.HasProgrammaticFocus() +} + +func (suite *Suite) CurrentRunningSpecSummary() (*types.SpecSummary, bool) { + return suite.runner.CurrentSpecSummary() +} + +func (suite *Suite) SetBeforeSuiteNode(body interface{}, codeLocation types.CodeLocation, timeout time.Duration) { + if suite.beforeSuiteNode != nil { + panic("You may only call BeforeSuite once!") + } + suite.beforeSuiteNode = leafnodes.NewBeforeSuiteNode(body, codeLocation, timeout, suite.failer) +} + +func (suite *Suite) SetAfterSuiteNode(body interface{}, codeLocation types.CodeLocation, timeout time.Duration) { + if suite.afterSuiteNode != nil { + panic("You may only call AfterSuite once!") + } + suite.afterSuiteNode = leafnodes.NewAfterSuiteNode(body, codeLocation, timeout, suite.failer) +} + +func (suite *Suite) SetSynchronizedBeforeSuiteNode(bodyA interface{}, bodyB interface{}, codeLocation types.CodeLocation, timeout time.Duration) { + if suite.beforeSuiteNode != nil { + panic("You may only call BeforeSuite once!") + } + suite.beforeSuiteNode = leafnodes.NewSynchronizedBeforeSuiteNode(bodyA, bodyB, codeLocation, timeout, suite.failer) +} + +func (suite *Suite) SetSynchronizedAfterSuiteNode(bodyA interface{}, bodyB interface{}, codeLocation types.CodeLocation, timeout time.Duration) { + if suite.afterSuiteNode != nil { + panic("You may only call AfterSuite once!") + } + suite.afterSuiteNode = leafnodes.NewSynchronizedAfterSuiteNode(bodyA, bodyB, codeLocation, timeout, suite.failer) +} + +func (suite *Suite) PushContainerNode(text string, body func(), flag types.FlagType, codeLocation types.CodeLocation) { + container := containernode.New(text, flag, codeLocation) + suite.currentContainer.PushContainerNode(container) + + previousContainer := suite.currentContainer + suite.currentContainer = container + suite.containerIndex++ + + body() + + suite.containerIndex-- + suite.currentContainer = previousContainer +} + +func (suite *Suite) PushItNode(text string, body interface{}, flag types.FlagType, codeLocation types.CodeLocation, timeout time.Duration) { + if suite.running { + suite.failer.Fail("You may only call It from within a Describe, Context or When", codeLocation) + } + suite.currentContainer.PushSubjectNode(leafnodes.NewItNode(text, body, flag, codeLocation, timeout, suite.failer, suite.containerIndex)) +} + +func (suite *Suite) PushMeasureNode(text string, body interface{}, flag types.FlagType, codeLocation types.CodeLocation, samples int) { + if suite.running { + suite.failer.Fail("You may only call Measure from within a Describe, Context or When", codeLocation) + } + suite.currentContainer.PushSubjectNode(leafnodes.NewMeasureNode(text, body, flag, codeLocation, samples, suite.failer, suite.containerIndex)) +} + +func (suite *Suite) PushBeforeEachNode(body interface{}, codeLocation types.CodeLocation, timeout time.Duration) { + if suite.running { + suite.failer.Fail("You may only call BeforeEach from within a Describe, Context or When", codeLocation) + } + suite.currentContainer.PushSetupNode(leafnodes.NewBeforeEachNode(body, codeLocation, timeout, suite.failer, suite.containerIndex)) +} + +func (suite *Suite) PushJustBeforeEachNode(body interface{}, codeLocation types.CodeLocation, timeout time.Duration) { + if suite.running { + suite.failer.Fail("You may only call JustBeforeEach from within a Describe, Context or When", codeLocation) + } + suite.currentContainer.PushSetupNode(leafnodes.NewJustBeforeEachNode(body, codeLocation, timeout, suite.failer, suite.containerIndex)) +} + +func (suite *Suite) PushJustAfterEachNode(body interface{}, codeLocation types.CodeLocation, timeout time.Duration) { + if suite.running { + suite.failer.Fail("You may only call JustAfterEach from within a Describe or Context", codeLocation) + } + suite.currentContainer.PushSetupNode(leafnodes.NewJustAfterEachNode(body, codeLocation, timeout, suite.failer, suite.containerIndex)) +} + +func (suite *Suite) PushAfterEachNode(body interface{}, codeLocation types.CodeLocation, timeout time.Duration) { + if suite.running { + suite.failer.Fail("You may only call AfterEach from within a Describe, Context or When", codeLocation) + } + suite.currentContainer.PushSetupNode(leafnodes.NewAfterEachNode(body, codeLocation, timeout, suite.failer, suite.containerIndex)) +} diff --git a/vendor/github.com/onsi/ginkgo/internal/testingtproxy/testing_t_proxy.go b/vendor/github.com/onsi/ginkgo/internal/testingtproxy/testing_t_proxy.go new file mode 100644 index 0000000000..090445d084 --- /dev/null +++ b/vendor/github.com/onsi/ginkgo/internal/testingtproxy/testing_t_proxy.go @@ -0,0 +1,76 @@ +package testingtproxy + +import ( + "fmt" + "io" +) + +type failFunc func(message string, callerSkip ...int) + +func New(writer io.Writer, fail failFunc, offset int) *ginkgoTestingTProxy { + return &ginkgoTestingTProxy{ + fail: fail, + offset: offset, + writer: writer, + } +} + +type ginkgoTestingTProxy struct { + fail failFunc + offset int + writer io.Writer +} + +func (t *ginkgoTestingTProxy) Error(args ...interface{}) { + t.fail(fmt.Sprintln(args...), t.offset) +} + +func (t *ginkgoTestingTProxy) Errorf(format string, args ...interface{}) { + t.fail(fmt.Sprintf(format, args...), t.offset) +} + +func (t *ginkgoTestingTProxy) Fail() { + t.fail("failed", t.offset) +} + +func (t *ginkgoTestingTProxy) FailNow() { + t.fail("failed", t.offset) +} + +func (t *ginkgoTestingTProxy) Fatal(args ...interface{}) { + t.fail(fmt.Sprintln(args...), t.offset) +} + +func (t *ginkgoTestingTProxy) Fatalf(format string, args ...interface{}) { + t.fail(fmt.Sprintf(format, args...), t.offset) +} + +func (t *ginkgoTestingTProxy) Log(args ...interface{}) { + fmt.Fprintln(t.writer, args...) +} + +func (t *ginkgoTestingTProxy) Logf(format string, args ...interface{}) { + t.Log(fmt.Sprintf(format, args...)) +} + +func (t *ginkgoTestingTProxy) Failed() bool { + return false +} + +func (t *ginkgoTestingTProxy) Parallel() { +} + +func (t *ginkgoTestingTProxy) Skip(args ...interface{}) { + fmt.Println(args...) +} + +func (t *ginkgoTestingTProxy) Skipf(format string, args ...interface{}) { + t.Skip(fmt.Sprintf(format, args...)) +} + +func (t *ginkgoTestingTProxy) SkipNow() { +} + +func (t *ginkgoTestingTProxy) Skipped() bool { + return false +} diff --git a/vendor/github.com/onsi/ginkgo/internal/writer/fake_writer.go b/vendor/github.com/onsi/ginkgo/internal/writer/fake_writer.go new file mode 100644 index 0000000000..6739c3f605 --- /dev/null +++ b/vendor/github.com/onsi/ginkgo/internal/writer/fake_writer.go @@ -0,0 +1,36 @@ +package writer + +type FakeGinkgoWriter struct { + EventStream []string +} + +func NewFake() *FakeGinkgoWriter { + return &FakeGinkgoWriter{ + EventStream: []string{}, + } +} + +func (writer *FakeGinkgoWriter) AddEvent(event string) { + writer.EventStream = append(writer.EventStream, event) +} + +func (writer *FakeGinkgoWriter) Truncate() { + writer.EventStream = append(writer.EventStream, "TRUNCATE") +} + +func (writer *FakeGinkgoWriter) DumpOut() { + writer.EventStream = append(writer.EventStream, "DUMP") +} + +func (writer *FakeGinkgoWriter) DumpOutWithHeader(header string) { + writer.EventStream = append(writer.EventStream, "DUMP_WITH_HEADER: "+header) +} + +func (writer *FakeGinkgoWriter) Bytes() []byte { + writer.EventStream = append(writer.EventStream, "BYTES") + return nil +} + +func (writer *FakeGinkgoWriter) Write(data []byte) (n int, err error) { + return 0, nil +} diff --git a/vendor/github.com/onsi/ginkgo/internal/writer/writer.go b/vendor/github.com/onsi/ginkgo/internal/writer/writer.go new file mode 100644 index 0000000000..98eca3bdd2 --- /dev/null +++ b/vendor/github.com/onsi/ginkgo/internal/writer/writer.go @@ -0,0 +1,89 @@ +package writer + +import ( + "bytes" + "io" + "sync" +) + +type WriterInterface interface { + io.Writer + + Truncate() + DumpOut() + DumpOutWithHeader(header string) + Bytes() []byte +} + +type Writer struct { + buffer *bytes.Buffer + outWriter io.Writer + lock *sync.Mutex + stream bool + redirector io.Writer +} + +func New(outWriter io.Writer) *Writer { + return &Writer{ + buffer: &bytes.Buffer{}, + lock: &sync.Mutex{}, + outWriter: outWriter, + stream: true, + } +} + +func (w *Writer) AndRedirectTo(writer io.Writer) { + w.redirector = writer +} + +func (w *Writer) SetStream(stream bool) { + w.lock.Lock() + defer w.lock.Unlock() + w.stream = stream +} + +func (w *Writer) Write(b []byte) (n int, err error) { + w.lock.Lock() + defer w.lock.Unlock() + + n, err = w.buffer.Write(b) + if w.redirector != nil { + w.redirector.Write(b) + } + if w.stream { + return w.outWriter.Write(b) + } + return n, err +} + +func (w *Writer) Truncate() { + w.lock.Lock() + defer w.lock.Unlock() + w.buffer.Reset() +} + +func (w *Writer) DumpOut() { + w.lock.Lock() + defer w.lock.Unlock() + if !w.stream { + w.buffer.WriteTo(w.outWriter) + } +} + +func (w *Writer) Bytes() []byte { + w.lock.Lock() + defer w.lock.Unlock() + b := w.buffer.Bytes() + copied := make([]byte, len(b)) + copy(copied, b) + return copied +} + +func (w *Writer) DumpOutWithHeader(header string) { + w.lock.Lock() + defer w.lock.Unlock() + if !w.stream && w.buffer.Len() > 0 { + w.outWriter.Write([]byte(header)) + w.buffer.WriteTo(w.outWriter) + } +} diff --git a/vendor/github.com/onsi/ginkgo/reporters/default_reporter.go b/vendor/github.com/onsi/ginkgo/reporters/default_reporter.go new file mode 100644 index 0000000000..ac58dd5f7a --- /dev/null +++ b/vendor/github.com/onsi/ginkgo/reporters/default_reporter.go @@ -0,0 +1,84 @@ +/* +Ginkgo's Default Reporter + +A number of command line flags are available to tweak Ginkgo's default output. + +These are documented [here](http://onsi.github.io/ginkgo/#running_tests) +*/ +package reporters + +import ( + "github.com/onsi/ginkgo/config" + "github.com/onsi/ginkgo/reporters/stenographer" + "github.com/onsi/ginkgo/types" +) + +type DefaultReporter struct { + config config.DefaultReporterConfigType + stenographer stenographer.Stenographer + specSummaries []*types.SpecSummary +} + +func NewDefaultReporter(config config.DefaultReporterConfigType, stenographer stenographer.Stenographer) *DefaultReporter { + return &DefaultReporter{ + config: config, + stenographer: stenographer, + } +} + +func (reporter *DefaultReporter) SpecSuiteWillBegin(config config.GinkgoConfigType, summary *types.SuiteSummary) { + reporter.stenographer.AnnounceSuite(summary.SuiteDescription, config.RandomSeed, config.RandomizeAllSpecs, reporter.config.Succinct) + if config.ParallelTotal > 1 { + reporter.stenographer.AnnounceParallelRun(config.ParallelNode, config.ParallelTotal, reporter.config.Succinct) + } else { + reporter.stenographer.AnnounceNumberOfSpecs(summary.NumberOfSpecsThatWillBeRun, summary.NumberOfTotalSpecs, reporter.config.Succinct) + } +} + +func (reporter *DefaultReporter) BeforeSuiteDidRun(setupSummary *types.SetupSummary) { + if setupSummary.State != types.SpecStatePassed { + reporter.stenographer.AnnounceBeforeSuiteFailure(setupSummary, reporter.config.Succinct, reporter.config.FullTrace) + } +} + +func (reporter *DefaultReporter) AfterSuiteDidRun(setupSummary *types.SetupSummary) { + if setupSummary.State != types.SpecStatePassed { + reporter.stenographer.AnnounceAfterSuiteFailure(setupSummary, reporter.config.Succinct, reporter.config.FullTrace) + } +} + +func (reporter *DefaultReporter) SpecWillRun(specSummary *types.SpecSummary) { + if reporter.config.Verbose && !reporter.config.Succinct && specSummary.State != types.SpecStatePending && specSummary.State != types.SpecStateSkipped { + reporter.stenographer.AnnounceSpecWillRun(specSummary) + } +} + +func (reporter *DefaultReporter) SpecDidComplete(specSummary *types.SpecSummary) { + switch specSummary.State { + case types.SpecStatePassed: + if specSummary.IsMeasurement { + reporter.stenographer.AnnounceSuccesfulMeasurement(specSummary, reporter.config.Succinct) + } else if specSummary.RunTime.Seconds() >= reporter.config.SlowSpecThreshold { + reporter.stenographer.AnnounceSuccesfulSlowSpec(specSummary, reporter.config.Succinct) + } else { + reporter.stenographer.AnnounceSuccesfulSpec(specSummary) + } + case types.SpecStatePending: + reporter.stenographer.AnnouncePendingSpec(specSummary, reporter.config.NoisyPendings && !reporter.config.Succinct) + case types.SpecStateSkipped: + reporter.stenographer.AnnounceSkippedSpec(specSummary, reporter.config.Succinct || !reporter.config.NoisySkippings, reporter.config.FullTrace) + case types.SpecStateTimedOut: + reporter.stenographer.AnnounceSpecTimedOut(specSummary, reporter.config.Succinct, reporter.config.FullTrace) + case types.SpecStatePanicked: + reporter.stenographer.AnnounceSpecPanicked(specSummary, reporter.config.Succinct, reporter.config.FullTrace) + case types.SpecStateFailed: + reporter.stenographer.AnnounceSpecFailed(specSummary, reporter.config.Succinct, reporter.config.FullTrace) + } + + reporter.specSummaries = append(reporter.specSummaries, specSummary) +} + +func (reporter *DefaultReporter) SpecSuiteDidEnd(summary *types.SuiteSummary) { + reporter.stenographer.SummarizeFailures(reporter.specSummaries) + reporter.stenographer.AnnounceSpecRunCompletion(summary, reporter.config.Succinct) +} diff --git a/vendor/github.com/onsi/ginkgo/reporters/fake_reporter.go b/vendor/github.com/onsi/ginkgo/reporters/fake_reporter.go new file mode 100644 index 0000000000..27db479490 --- /dev/null +++ b/vendor/github.com/onsi/ginkgo/reporters/fake_reporter.go @@ -0,0 +1,59 @@ +package reporters + +import ( + "github.com/onsi/ginkgo/config" + "github.com/onsi/ginkgo/types" +) + +//FakeReporter is useful for testing purposes +type FakeReporter struct { + Config config.GinkgoConfigType + + BeginSummary *types.SuiteSummary + BeforeSuiteSummary *types.SetupSummary + SpecWillRunSummaries []*types.SpecSummary + SpecSummaries []*types.SpecSummary + AfterSuiteSummary *types.SetupSummary + EndSummary *types.SuiteSummary + + SpecWillRunStub func(specSummary *types.SpecSummary) + SpecDidCompleteStub func(specSummary *types.SpecSummary) +} + +func NewFakeReporter() *FakeReporter { + return &FakeReporter{ + SpecWillRunSummaries: make([]*types.SpecSummary, 0), + SpecSummaries: make([]*types.SpecSummary, 0), + } +} + +func (fakeR *FakeReporter) SpecSuiteWillBegin(config config.GinkgoConfigType, summary *types.SuiteSummary) { + fakeR.Config = config + fakeR.BeginSummary = summary +} + +func (fakeR *FakeReporter) BeforeSuiteDidRun(setupSummary *types.SetupSummary) { + fakeR.BeforeSuiteSummary = setupSummary +} + +func (fakeR *FakeReporter) SpecWillRun(specSummary *types.SpecSummary) { + if fakeR.SpecWillRunStub != nil { + fakeR.SpecWillRunStub(specSummary) + } + fakeR.SpecWillRunSummaries = append(fakeR.SpecWillRunSummaries, specSummary) +} + +func (fakeR *FakeReporter) SpecDidComplete(specSummary *types.SpecSummary) { + if fakeR.SpecDidCompleteStub != nil { + fakeR.SpecDidCompleteStub(specSummary) + } + fakeR.SpecSummaries = append(fakeR.SpecSummaries, specSummary) +} + +func (fakeR *FakeReporter) AfterSuiteDidRun(setupSummary *types.SetupSummary) { + fakeR.AfterSuiteSummary = setupSummary +} + +func (fakeR *FakeReporter) SpecSuiteDidEnd(summary *types.SuiteSummary) { + fakeR.EndSummary = summary +} diff --git a/vendor/github.com/onsi/ginkgo/reporters/junit_reporter.go b/vendor/github.com/onsi/ginkgo/reporters/junit_reporter.go new file mode 100644 index 0000000000..2c9f3c7929 --- /dev/null +++ b/vendor/github.com/onsi/ginkgo/reporters/junit_reporter.go @@ -0,0 +1,152 @@ +/* + +JUnit XML Reporter for Ginkgo + +For usage instructions: http://onsi.github.io/ginkgo/#generating_junit_xml_output + +*/ + +package reporters + +import ( + "encoding/xml" + "fmt" + "math" + "os" + "strings" + + "github.com/onsi/ginkgo/config" + "github.com/onsi/ginkgo/types" +) + +type JUnitTestSuite struct { + XMLName xml.Name `xml:"testsuite"` + TestCases []JUnitTestCase `xml:"testcase"` + Name string `xml:"name,attr"` + Tests int `xml:"tests,attr"` + Failures int `xml:"failures,attr"` + Errors int `xml:"errors,attr"` + Time float64 `xml:"time,attr"` +} + +type JUnitTestCase struct { + Name string `xml:"name,attr"` + ClassName string `xml:"classname,attr"` + FailureMessage *JUnitFailureMessage `xml:"failure,omitempty"` + Skipped *JUnitSkipped `xml:"skipped,omitempty"` + Time float64 `xml:"time,attr"` + SystemOut string `xml:"system-out,omitempty"` +} + +type JUnitFailureMessage struct { + Type string `xml:"type,attr"` + Message string `xml:",chardata"` +} + +type JUnitSkipped struct { + XMLName xml.Name `xml:"skipped"` +} + +type JUnitReporter struct { + suite JUnitTestSuite + filename string + testSuiteName string +} + +//NewJUnitReporter creates a new JUnit XML reporter. The XML will be stored in the passed in filename. +func NewJUnitReporter(filename string) *JUnitReporter { + return &JUnitReporter{ + filename: filename, + } +} + +func (reporter *JUnitReporter) SpecSuiteWillBegin(config config.GinkgoConfigType, summary *types.SuiteSummary) { + reporter.suite = JUnitTestSuite{ + Name: summary.SuiteDescription, + TestCases: []JUnitTestCase{}, + } + reporter.testSuiteName = summary.SuiteDescription +} + +func (reporter *JUnitReporter) SpecWillRun(specSummary *types.SpecSummary) { +} + +func (reporter *JUnitReporter) BeforeSuiteDidRun(setupSummary *types.SetupSummary) { + reporter.handleSetupSummary("BeforeSuite", setupSummary) +} + +func (reporter *JUnitReporter) AfterSuiteDidRun(setupSummary *types.SetupSummary) { + reporter.handleSetupSummary("AfterSuite", setupSummary) +} + +func failureMessage(failure types.SpecFailure) string { + return fmt.Sprintf("%s\n%s\n%s", failure.ComponentCodeLocation.String(), failure.Message, failure.Location.String()) +} + +func (reporter *JUnitReporter) handleSetupSummary(name string, setupSummary *types.SetupSummary) { + if setupSummary.State != types.SpecStatePassed { + testCase := JUnitTestCase{ + Name: name, + ClassName: reporter.testSuiteName, + } + + testCase.FailureMessage = &JUnitFailureMessage{ + Type: reporter.failureTypeForState(setupSummary.State), + Message: failureMessage(setupSummary.Failure), + } + testCase.SystemOut = setupSummary.CapturedOutput + testCase.Time = setupSummary.RunTime.Seconds() + reporter.suite.TestCases = append(reporter.suite.TestCases, testCase) + } +} + +func (reporter *JUnitReporter) SpecDidComplete(specSummary *types.SpecSummary) { + testCase := JUnitTestCase{ + Name: strings.Join(specSummary.ComponentTexts[1:], " "), + ClassName: reporter.testSuiteName, + } + if specSummary.State == types.SpecStateFailed || specSummary.State == types.SpecStateTimedOut || specSummary.State == types.SpecStatePanicked { + testCase.FailureMessage = &JUnitFailureMessage{ + Type: reporter.failureTypeForState(specSummary.State), + Message: failureMessage(specSummary.Failure), + } + testCase.SystemOut = specSummary.CapturedOutput + } + if specSummary.State == types.SpecStateSkipped || specSummary.State == types.SpecStatePending { + testCase.Skipped = &JUnitSkipped{} + } + testCase.Time = specSummary.RunTime.Seconds() + reporter.suite.TestCases = append(reporter.suite.TestCases, testCase) +} + +func (reporter *JUnitReporter) SpecSuiteDidEnd(summary *types.SuiteSummary) { + reporter.suite.Tests = summary.NumberOfSpecsThatWillBeRun + reporter.suite.Time = math.Trunc(summary.RunTime.Seconds()*1000) / 1000 + reporter.suite.Failures = summary.NumberOfFailedSpecs + reporter.suite.Errors = 0 + file, err := os.Create(reporter.filename) + if err != nil { + fmt.Printf("Failed to create JUnit report file: %s\n\t%s", reporter.filename, err.Error()) + } + defer file.Close() + file.WriteString(xml.Header) + encoder := xml.NewEncoder(file) + encoder.Indent(" ", " ") + err = encoder.Encode(reporter.suite) + if err != nil { + fmt.Printf("Failed to generate JUnit report\n\t%s", err.Error()) + } +} + +func (reporter *JUnitReporter) failureTypeForState(state types.SpecState) string { + switch state { + case types.SpecStateFailed: + return "Failure" + case types.SpecStateTimedOut: + return "Timeout" + case types.SpecStatePanicked: + return "Panic" + default: + return "" + } +} diff --git a/vendor/github.com/onsi/ginkgo/reporters/reporter.go b/vendor/github.com/onsi/ginkgo/reporters/reporter.go new file mode 100644 index 0000000000..348b9dfce1 --- /dev/null +++ b/vendor/github.com/onsi/ginkgo/reporters/reporter.go @@ -0,0 +1,15 @@ +package reporters + +import ( + "github.com/onsi/ginkgo/config" + "github.com/onsi/ginkgo/types" +) + +type Reporter interface { + SpecSuiteWillBegin(config config.GinkgoConfigType, summary *types.SuiteSummary) + BeforeSuiteDidRun(setupSummary *types.SetupSummary) + SpecWillRun(specSummary *types.SpecSummary) + SpecDidComplete(specSummary *types.SpecSummary) + AfterSuiteDidRun(setupSummary *types.SetupSummary) + SpecSuiteDidEnd(summary *types.SuiteSummary) +} diff --git a/vendor/github.com/onsi/ginkgo/reporters/stenographer/console_logging.go b/vendor/github.com/onsi/ginkgo/reporters/stenographer/console_logging.go new file mode 100644 index 0000000000..45b8f88690 --- /dev/null +++ b/vendor/github.com/onsi/ginkgo/reporters/stenographer/console_logging.go @@ -0,0 +1,64 @@ +package stenographer + +import ( + "fmt" + "strings" +) + +func (s *consoleStenographer) colorize(colorCode string, format string, args ...interface{}) string { + var out string + + if len(args) > 0 { + out = fmt.Sprintf(format, args...) + } else { + out = format + } + + if s.color { + return fmt.Sprintf("%s%s%s", colorCode, out, defaultStyle) + } else { + return out + } +} + +func (s *consoleStenographer) printBanner(text string, bannerCharacter string) { + fmt.Fprintln(s.w, text) + fmt.Fprintln(s.w, strings.Repeat(bannerCharacter, len(text))) +} + +func (s *consoleStenographer) printNewLine() { + fmt.Fprintln(s.w, "") +} + +func (s *consoleStenographer) printDelimiter() { + fmt.Fprintln(s.w, s.colorize(grayColor, "%s", strings.Repeat("-", 30))) +} + +func (s *consoleStenographer) print(indentation int, format string, args ...interface{}) { + fmt.Fprint(s.w, s.indent(indentation, format, args...)) +} + +func (s *consoleStenographer) println(indentation int, format string, args ...interface{}) { + fmt.Fprintln(s.w, s.indent(indentation, format, args...)) +} + +func (s *consoleStenographer) indent(indentation int, format string, args ...interface{}) string { + var text string + + if len(args) > 0 { + text = fmt.Sprintf(format, args...) + } else { + text = format + } + + stringArray := strings.Split(text, "\n") + padding := "" + if indentation >= 0 { + padding = strings.Repeat(" ", indentation) + } + for i, s := range stringArray { + stringArray[i] = fmt.Sprintf("%s%s", padding, s) + } + + return strings.Join(stringArray, "\n") +} diff --git a/vendor/github.com/onsi/ginkgo/reporters/stenographer/fake_stenographer.go b/vendor/github.com/onsi/ginkgo/reporters/stenographer/fake_stenographer.go new file mode 100644 index 0000000000..98854e7d9a --- /dev/null +++ b/vendor/github.com/onsi/ginkgo/reporters/stenographer/fake_stenographer.go @@ -0,0 +1,142 @@ +package stenographer + +import ( + "sync" + + "github.com/onsi/ginkgo/types" +) + +func NewFakeStenographerCall(method string, args ...interface{}) FakeStenographerCall { + return FakeStenographerCall{ + Method: method, + Args: args, + } +} + +type FakeStenographer struct { + calls []FakeStenographerCall + lock *sync.Mutex +} + +type FakeStenographerCall struct { + Method string + Args []interface{} +} + +func NewFakeStenographer() *FakeStenographer { + stenographer := &FakeStenographer{ + lock: &sync.Mutex{}, + } + stenographer.Reset() + return stenographer +} + +func (stenographer *FakeStenographer) Calls() []FakeStenographerCall { + stenographer.lock.Lock() + defer stenographer.lock.Unlock() + + return stenographer.calls +} + +func (stenographer *FakeStenographer) Reset() { + stenographer.lock.Lock() + defer stenographer.lock.Unlock() + + stenographer.calls = make([]FakeStenographerCall, 0) +} + +func (stenographer *FakeStenographer) CallsTo(method string) []FakeStenographerCall { + stenographer.lock.Lock() + defer stenographer.lock.Unlock() + + results := make([]FakeStenographerCall, 0) + for _, call := range stenographer.calls { + if call.Method == method { + results = append(results, call) + } + } + + return results +} + +func (stenographer *FakeStenographer) registerCall(method string, args ...interface{}) { + stenographer.lock.Lock() + defer stenographer.lock.Unlock() + + stenographer.calls = append(stenographer.calls, NewFakeStenographerCall(method, args...)) +} + +func (stenographer *FakeStenographer) AnnounceSuite(description string, randomSeed int64, randomizingAll bool, succinct bool) { + stenographer.registerCall("AnnounceSuite", description, randomSeed, randomizingAll, succinct) +} + +func (stenographer *FakeStenographer) AnnounceAggregatedParallelRun(nodes int, succinct bool) { + stenographer.registerCall("AnnounceAggregatedParallelRun", nodes, succinct) +} + +func (stenographer *FakeStenographer) AnnounceParallelRun(node int, nodes int, succinct bool) { + stenographer.registerCall("AnnounceParallelRun", node, nodes, succinct) +} + +func (stenographer *FakeStenographer) AnnounceNumberOfSpecs(specsToRun int, total int, succinct bool) { + stenographer.registerCall("AnnounceNumberOfSpecs", specsToRun, total, succinct) +} + +func (stenographer *FakeStenographer) AnnounceTotalNumberOfSpecs(total int, succinct bool) { + stenographer.registerCall("AnnounceTotalNumberOfSpecs", total, succinct) +} + +func (stenographer *FakeStenographer) AnnounceSpecRunCompletion(summary *types.SuiteSummary, succinct bool) { + stenographer.registerCall("AnnounceSpecRunCompletion", summary, succinct) +} + +func (stenographer *FakeStenographer) AnnounceSpecWillRun(spec *types.SpecSummary) { + stenographer.registerCall("AnnounceSpecWillRun", spec) +} + +func (stenographer *FakeStenographer) AnnounceBeforeSuiteFailure(summary *types.SetupSummary, succinct bool, fullTrace bool) { + stenographer.registerCall("AnnounceBeforeSuiteFailure", summary, succinct, fullTrace) +} + +func (stenographer *FakeStenographer) AnnounceAfterSuiteFailure(summary *types.SetupSummary, succinct bool, fullTrace bool) { + stenographer.registerCall("AnnounceAfterSuiteFailure", summary, succinct, fullTrace) +} +func (stenographer *FakeStenographer) AnnounceCapturedOutput(output string) { + stenographer.registerCall("AnnounceCapturedOutput", output) +} + +func (stenographer *FakeStenographer) AnnounceSuccesfulSpec(spec *types.SpecSummary) { + stenographer.registerCall("AnnounceSuccesfulSpec", spec) +} + +func (stenographer *FakeStenographer) AnnounceSuccesfulSlowSpec(spec *types.SpecSummary, succinct bool) { + stenographer.registerCall("AnnounceSuccesfulSlowSpec", spec, succinct) +} + +func (stenographer *FakeStenographer) AnnounceSuccesfulMeasurement(spec *types.SpecSummary, succinct bool) { + stenographer.registerCall("AnnounceSuccesfulMeasurement", spec, succinct) +} + +func (stenographer *FakeStenographer) AnnouncePendingSpec(spec *types.SpecSummary, noisy bool) { + stenographer.registerCall("AnnouncePendingSpec", spec, noisy) +} + +func (stenographer *FakeStenographer) AnnounceSkippedSpec(spec *types.SpecSummary, succinct bool, fullTrace bool) { + stenographer.registerCall("AnnounceSkippedSpec", spec, succinct, fullTrace) +} + +func (stenographer *FakeStenographer) AnnounceSpecTimedOut(spec *types.SpecSummary, succinct bool, fullTrace bool) { + stenographer.registerCall("AnnounceSpecTimedOut", spec, succinct, fullTrace) +} + +func (stenographer *FakeStenographer) AnnounceSpecPanicked(spec *types.SpecSummary, succinct bool, fullTrace bool) { + stenographer.registerCall("AnnounceSpecPanicked", spec, succinct, fullTrace) +} + +func (stenographer *FakeStenographer) AnnounceSpecFailed(spec *types.SpecSummary, succinct bool, fullTrace bool) { + stenographer.registerCall("AnnounceSpecFailed", spec, succinct, fullTrace) +} + +func (stenographer *FakeStenographer) SummarizeFailures(summaries []*types.SpecSummary) { + stenographer.registerCall("SummarizeFailures", summaries) +} diff --git a/vendor/github.com/onsi/ginkgo/reporters/stenographer/stenographer.go b/vendor/github.com/onsi/ginkgo/reporters/stenographer/stenographer.go new file mode 100644 index 0000000000..601c74d66e --- /dev/null +++ b/vendor/github.com/onsi/ginkgo/reporters/stenographer/stenographer.go @@ -0,0 +1,572 @@ +/* +The stenographer is used by Ginkgo's reporters to generate output. + +Move along, nothing to see here. +*/ + +package stenographer + +import ( + "fmt" + "io" + "runtime" + "strings" + + "github.com/onsi/ginkgo/types" +) + +const defaultStyle = "\x1b[0m" +const boldStyle = "\x1b[1m" +const redColor = "\x1b[91m" +const greenColor = "\x1b[32m" +const yellowColor = "\x1b[33m" +const cyanColor = "\x1b[36m" +const grayColor = "\x1b[90m" +const lightGrayColor = "\x1b[37m" + +type cursorStateType int + +const ( + cursorStateTop cursorStateType = iota + cursorStateStreaming + cursorStateMidBlock + cursorStateEndBlock +) + +type Stenographer interface { + AnnounceSuite(description string, randomSeed int64, randomizingAll bool, succinct bool) + AnnounceAggregatedParallelRun(nodes int, succinct bool) + AnnounceParallelRun(node int, nodes int, succinct bool) + AnnounceTotalNumberOfSpecs(total int, succinct bool) + AnnounceNumberOfSpecs(specsToRun int, total int, succinct bool) + AnnounceSpecRunCompletion(summary *types.SuiteSummary, succinct bool) + + AnnounceSpecWillRun(spec *types.SpecSummary) + AnnounceBeforeSuiteFailure(summary *types.SetupSummary, succinct bool, fullTrace bool) + AnnounceAfterSuiteFailure(summary *types.SetupSummary, succinct bool, fullTrace bool) + + AnnounceCapturedOutput(output string) + + AnnounceSuccesfulSpec(spec *types.SpecSummary) + AnnounceSuccesfulSlowSpec(spec *types.SpecSummary, succinct bool) + AnnounceSuccesfulMeasurement(spec *types.SpecSummary, succinct bool) + + AnnouncePendingSpec(spec *types.SpecSummary, noisy bool) + AnnounceSkippedSpec(spec *types.SpecSummary, succinct bool, fullTrace bool) + + AnnounceSpecTimedOut(spec *types.SpecSummary, succinct bool, fullTrace bool) + AnnounceSpecPanicked(spec *types.SpecSummary, succinct bool, fullTrace bool) + AnnounceSpecFailed(spec *types.SpecSummary, succinct bool, fullTrace bool) + + SummarizeFailures(summaries []*types.SpecSummary) +} + +func New(color bool, enableFlakes bool, writer io.Writer) Stenographer { + denoter := "•" + if runtime.GOOS == "windows" { + denoter = "+" + } + return &consoleStenographer{ + color: color, + denoter: denoter, + cursorState: cursorStateTop, + enableFlakes: enableFlakes, + w: writer, + } +} + +type consoleStenographer struct { + color bool + denoter string + cursorState cursorStateType + enableFlakes bool + w io.Writer +} + +var alternatingColors = []string{defaultStyle, grayColor} + +func (s *consoleStenographer) AnnounceSuite(description string, randomSeed int64, randomizingAll bool, succinct bool) { + if succinct { + s.print(0, "[%d] %s ", randomSeed, s.colorize(boldStyle, description)) + return + } + s.printBanner(fmt.Sprintf("Running Suite: %s", description), "=") + s.print(0, "Random Seed: %s", s.colorize(boldStyle, "%d", randomSeed)) + if randomizingAll { + s.print(0, " - Will randomize all specs") + } + s.printNewLine() +} + +func (s *consoleStenographer) AnnounceParallelRun(node int, nodes int, succinct bool) { + if succinct { + s.print(0, "- node #%d ", node) + return + } + s.println(0, + "Parallel test node %s/%s.", + s.colorize(boldStyle, "%d", node), + s.colorize(boldStyle, "%d", nodes), + ) + s.printNewLine() +} + +func (s *consoleStenographer) AnnounceAggregatedParallelRun(nodes int, succinct bool) { + if succinct { + s.print(0, "- %d nodes ", nodes) + return + } + s.println(0, + "Running in parallel across %s nodes", + s.colorize(boldStyle, "%d", nodes), + ) + s.printNewLine() +} + +func (s *consoleStenographer) AnnounceNumberOfSpecs(specsToRun int, total int, succinct bool) { + if succinct { + s.print(0, "- %d/%d specs ", specsToRun, total) + s.stream() + return + } + s.println(0, + "Will run %s of %s specs", + s.colorize(boldStyle, "%d", specsToRun), + s.colorize(boldStyle, "%d", total), + ) + + s.printNewLine() +} + +func (s *consoleStenographer) AnnounceTotalNumberOfSpecs(total int, succinct bool) { + if succinct { + s.print(0, "- %d specs ", total) + s.stream() + return + } + s.println(0, + "Will run %s specs", + s.colorize(boldStyle, "%d", total), + ) + + s.printNewLine() +} + +func (s *consoleStenographer) AnnounceSpecRunCompletion(summary *types.SuiteSummary, succinct bool) { + if succinct && summary.SuiteSucceeded { + s.print(0, " %s %s ", s.colorize(greenColor, "SUCCESS!"), summary.RunTime) + return + } + s.printNewLine() + color := greenColor + if !summary.SuiteSucceeded { + color = redColor + } + s.println(0, s.colorize(boldStyle+color, "Ran %d of %d Specs in %.3f seconds", summary.NumberOfSpecsThatWillBeRun, summary.NumberOfTotalSpecs, summary.RunTime.Seconds())) + + status := "" + if summary.SuiteSucceeded { + status = s.colorize(boldStyle+greenColor, "SUCCESS!") + } else { + status = s.colorize(boldStyle+redColor, "FAIL!") + } + + flakes := "" + if s.enableFlakes { + flakes = " | " + s.colorize(yellowColor+boldStyle, "%d Flaked", summary.NumberOfFlakedSpecs) + } + + s.print(0, + "%s -- %s | %s | %s | %s\n", + status, + s.colorize(greenColor+boldStyle, "%d Passed", summary.NumberOfPassedSpecs), + s.colorize(redColor+boldStyle, "%d Failed", summary.NumberOfFailedSpecs)+flakes, + s.colorize(yellowColor+boldStyle, "%d Pending", summary.NumberOfPendingSpecs), + s.colorize(cyanColor+boldStyle, "%d Skipped", summary.NumberOfSkippedSpecs), + ) +} + +func (s *consoleStenographer) AnnounceSpecWillRun(spec *types.SpecSummary) { + s.startBlock() + for i, text := range spec.ComponentTexts[1 : len(spec.ComponentTexts)-1] { + s.print(0, s.colorize(alternatingColors[i%2], text)+" ") + } + + indentation := 0 + if len(spec.ComponentTexts) > 2 { + indentation = 1 + s.printNewLine() + } + index := len(spec.ComponentTexts) - 1 + s.print(indentation, s.colorize(boldStyle, spec.ComponentTexts[index])) + s.printNewLine() + s.print(indentation, s.colorize(lightGrayColor, spec.ComponentCodeLocations[index].String())) + s.printNewLine() + s.midBlock() +} + +func (s *consoleStenographer) AnnounceBeforeSuiteFailure(summary *types.SetupSummary, succinct bool, fullTrace bool) { + s.announceSetupFailure("BeforeSuite", summary, succinct, fullTrace) +} + +func (s *consoleStenographer) AnnounceAfterSuiteFailure(summary *types.SetupSummary, succinct bool, fullTrace bool) { + s.announceSetupFailure("AfterSuite", summary, succinct, fullTrace) +} + +func (s *consoleStenographer) announceSetupFailure(name string, summary *types.SetupSummary, succinct bool, fullTrace bool) { + s.startBlock() + var message string + switch summary.State { + case types.SpecStateFailed: + message = "Failure" + case types.SpecStatePanicked: + message = "Panic" + case types.SpecStateTimedOut: + message = "Timeout" + } + + s.println(0, s.colorize(redColor+boldStyle, "%s [%.3f seconds]", message, summary.RunTime.Seconds())) + + indentation := s.printCodeLocationBlock([]string{name}, []types.CodeLocation{summary.CodeLocation}, summary.ComponentType, 0, summary.State, true) + + s.printNewLine() + s.printFailure(indentation, summary.State, summary.Failure, fullTrace) + + s.endBlock() +} + +func (s *consoleStenographer) AnnounceCapturedOutput(output string) { + if output == "" { + return + } + + s.startBlock() + s.println(0, output) + s.midBlock() +} + +func (s *consoleStenographer) AnnounceSuccesfulSpec(spec *types.SpecSummary) { + s.print(0, s.colorize(greenColor, s.denoter)) + s.stream() +} + +func (s *consoleStenographer) AnnounceSuccesfulSlowSpec(spec *types.SpecSummary, succinct bool) { + s.printBlockWithMessage( + s.colorize(greenColor, "%s [SLOW TEST:%.3f seconds]", s.denoter, spec.RunTime.Seconds()), + "", + spec, + succinct, + ) +} + +func (s *consoleStenographer) AnnounceSuccesfulMeasurement(spec *types.SpecSummary, succinct bool) { + s.printBlockWithMessage( + s.colorize(greenColor, "%s [MEASUREMENT]", s.denoter), + s.measurementReport(spec, succinct), + spec, + succinct, + ) +} + +func (s *consoleStenographer) AnnouncePendingSpec(spec *types.SpecSummary, noisy bool) { + if noisy { + s.printBlockWithMessage( + s.colorize(yellowColor, "P [PENDING]"), + "", + spec, + false, + ) + } else { + s.print(0, s.colorize(yellowColor, "P")) + s.stream() + } +} + +func (s *consoleStenographer) AnnounceSkippedSpec(spec *types.SpecSummary, succinct bool, fullTrace bool) { + // Skips at runtime will have a non-empty spec.Failure. All others should be succinct. + if succinct || spec.Failure == (types.SpecFailure{}) { + s.print(0, s.colorize(cyanColor, "S")) + s.stream() + } else { + s.startBlock() + s.println(0, s.colorize(cyanColor+boldStyle, "S [SKIPPING]%s [%.3f seconds]", s.failureContext(spec.Failure.ComponentType), spec.RunTime.Seconds())) + + indentation := s.printCodeLocationBlock(spec.ComponentTexts, spec.ComponentCodeLocations, spec.Failure.ComponentType, spec.Failure.ComponentIndex, spec.State, succinct) + + s.printNewLine() + s.printSkip(indentation, spec.Failure) + s.endBlock() + } +} + +func (s *consoleStenographer) AnnounceSpecTimedOut(spec *types.SpecSummary, succinct bool, fullTrace bool) { + s.printSpecFailure(fmt.Sprintf("%s... Timeout", s.denoter), spec, succinct, fullTrace) +} + +func (s *consoleStenographer) AnnounceSpecPanicked(spec *types.SpecSummary, succinct bool, fullTrace bool) { + s.printSpecFailure(fmt.Sprintf("%s! Panic", s.denoter), spec, succinct, fullTrace) +} + +func (s *consoleStenographer) AnnounceSpecFailed(spec *types.SpecSummary, succinct bool, fullTrace bool) { + s.printSpecFailure(fmt.Sprintf("%s Failure", s.denoter), spec, succinct, fullTrace) +} + +func (s *consoleStenographer) SummarizeFailures(summaries []*types.SpecSummary) { + failingSpecs := []*types.SpecSummary{} + + for _, summary := range summaries { + if summary.HasFailureState() { + failingSpecs = append(failingSpecs, summary) + } + } + + if len(failingSpecs) == 0 { + return + } + + s.printNewLine() + s.printNewLine() + plural := "s" + if len(failingSpecs) == 1 { + plural = "" + } + s.println(0, s.colorize(redColor+boldStyle, "Summarizing %d Failure%s:", len(failingSpecs), plural)) + for _, summary := range failingSpecs { + s.printNewLine() + if summary.HasFailureState() { + if summary.TimedOut() { + s.print(0, s.colorize(redColor+boldStyle, "[Timeout...] ")) + } else if summary.Panicked() { + s.print(0, s.colorize(redColor+boldStyle, "[Panic!] ")) + } else if summary.Failed() { + s.print(0, s.colorize(redColor+boldStyle, "[Fail] ")) + } + s.printSpecContext(summary.ComponentTexts, summary.ComponentCodeLocations, summary.Failure.ComponentType, summary.Failure.ComponentIndex, summary.State, true) + s.printNewLine() + s.println(0, s.colorize(lightGrayColor, summary.Failure.Location.String())) + } + } +} + +func (s *consoleStenographer) startBlock() { + if s.cursorState == cursorStateStreaming { + s.printNewLine() + s.printDelimiter() + } else if s.cursorState == cursorStateMidBlock { + s.printNewLine() + } +} + +func (s *consoleStenographer) midBlock() { + s.cursorState = cursorStateMidBlock +} + +func (s *consoleStenographer) endBlock() { + s.printDelimiter() + s.cursorState = cursorStateEndBlock +} + +func (s *consoleStenographer) stream() { + s.cursorState = cursorStateStreaming +} + +func (s *consoleStenographer) printBlockWithMessage(header string, message string, spec *types.SpecSummary, succinct bool) { + s.startBlock() + s.println(0, header) + + indentation := s.printCodeLocationBlock(spec.ComponentTexts, spec.ComponentCodeLocations, types.SpecComponentTypeInvalid, 0, spec.State, succinct) + + if message != "" { + s.printNewLine() + s.println(indentation, message) + } + + s.endBlock() +} + +func (s *consoleStenographer) printSpecFailure(message string, spec *types.SpecSummary, succinct bool, fullTrace bool) { + s.startBlock() + s.println(0, s.colorize(redColor+boldStyle, "%s%s [%.3f seconds]", message, s.failureContext(spec.Failure.ComponentType), spec.RunTime.Seconds())) + + indentation := s.printCodeLocationBlock(spec.ComponentTexts, spec.ComponentCodeLocations, spec.Failure.ComponentType, spec.Failure.ComponentIndex, spec.State, succinct) + + s.printNewLine() + s.printFailure(indentation, spec.State, spec.Failure, fullTrace) + s.endBlock() +} + +func (s *consoleStenographer) failureContext(failedComponentType types.SpecComponentType) string { + switch failedComponentType { + case types.SpecComponentTypeBeforeSuite: + return " in Suite Setup (BeforeSuite)" + case types.SpecComponentTypeAfterSuite: + return " in Suite Teardown (AfterSuite)" + case types.SpecComponentTypeBeforeEach: + return " in Spec Setup (BeforeEach)" + case types.SpecComponentTypeJustBeforeEach: + return " in Spec Setup (JustBeforeEach)" + case types.SpecComponentTypeAfterEach: + return " in Spec Teardown (AfterEach)" + } + + return "" +} + +func (s *consoleStenographer) printSkip(indentation int, spec types.SpecFailure) { + s.println(indentation, s.colorize(cyanColor, spec.Message)) + s.printNewLine() + s.println(indentation, spec.Location.String()) +} + +func (s *consoleStenographer) printFailure(indentation int, state types.SpecState, failure types.SpecFailure, fullTrace bool) { + if state == types.SpecStatePanicked { + s.println(indentation, s.colorize(redColor+boldStyle, failure.Message)) + s.println(indentation, s.colorize(redColor, failure.ForwardedPanic)) + s.println(indentation, failure.Location.String()) + s.printNewLine() + s.println(indentation, s.colorize(redColor, "Full Stack Trace")) + s.println(indentation, failure.Location.FullStackTrace) + } else { + s.println(indentation, s.colorize(redColor, failure.Message)) + s.printNewLine() + s.println(indentation, failure.Location.String()) + if fullTrace { + s.printNewLine() + s.println(indentation, s.colorize(redColor, "Full Stack Trace")) + s.println(indentation, failure.Location.FullStackTrace) + } + } +} + +func (s *consoleStenographer) printSpecContext(componentTexts []string, componentCodeLocations []types.CodeLocation, failedComponentType types.SpecComponentType, failedComponentIndex int, state types.SpecState, succinct bool) int { + startIndex := 1 + indentation := 0 + + if len(componentTexts) == 1 { + startIndex = 0 + } + + for i := startIndex; i < len(componentTexts); i++ { + if (state.IsFailure() || state == types.SpecStateSkipped) && i == failedComponentIndex { + color := redColor + if state == types.SpecStateSkipped { + color = cyanColor + } + blockType := "" + switch failedComponentType { + case types.SpecComponentTypeBeforeSuite: + blockType = "BeforeSuite" + case types.SpecComponentTypeAfterSuite: + blockType = "AfterSuite" + case types.SpecComponentTypeBeforeEach: + blockType = "BeforeEach" + case types.SpecComponentTypeJustBeforeEach: + blockType = "JustBeforeEach" + case types.SpecComponentTypeAfterEach: + blockType = "AfterEach" + case types.SpecComponentTypeIt: + blockType = "It" + case types.SpecComponentTypeMeasure: + blockType = "Measurement" + } + if succinct { + s.print(0, s.colorize(color+boldStyle, "[%s] %s ", blockType, componentTexts[i])) + } else { + s.println(indentation, s.colorize(color+boldStyle, "%s [%s]", componentTexts[i], blockType)) + s.println(indentation, s.colorize(grayColor, "%s", componentCodeLocations[i])) + } + } else { + if succinct { + s.print(0, s.colorize(alternatingColors[i%2], "%s ", componentTexts[i])) + } else { + s.println(indentation, componentTexts[i]) + s.println(indentation, s.colorize(grayColor, "%s", componentCodeLocations[i])) + } + } + indentation++ + } + + return indentation +} + +func (s *consoleStenographer) printCodeLocationBlock(componentTexts []string, componentCodeLocations []types.CodeLocation, failedComponentType types.SpecComponentType, failedComponentIndex int, state types.SpecState, succinct bool) int { + indentation := s.printSpecContext(componentTexts, componentCodeLocations, failedComponentType, failedComponentIndex, state, succinct) + + if succinct { + if len(componentTexts) > 0 { + s.printNewLine() + s.print(0, s.colorize(lightGrayColor, "%s", componentCodeLocations[len(componentCodeLocations)-1])) + } + s.printNewLine() + indentation = 1 + } else { + indentation-- + } + + return indentation +} + +func (s *consoleStenographer) orderedMeasurementKeys(measurements map[string]*types.SpecMeasurement) []string { + orderedKeys := make([]string, len(measurements)) + for key, measurement := range measurements { + orderedKeys[measurement.Order] = key + } + return orderedKeys +} + +func (s *consoleStenographer) measurementReport(spec *types.SpecSummary, succinct bool) string { + if len(spec.Measurements) == 0 { + return "Found no measurements" + } + + message := []string{} + orderedKeys := s.orderedMeasurementKeys(spec.Measurements) + + if succinct { + message = append(message, fmt.Sprintf("%s samples:", s.colorize(boldStyle, "%d", spec.NumberOfSamples))) + for _, key := range orderedKeys { + measurement := spec.Measurements[key] + message = append(message, fmt.Sprintf(" %s - %s: %s%s, %s: %s%s ± %s%s, %s: %s%s", + s.colorize(boldStyle, "%s", measurement.Name), + measurement.SmallestLabel, + s.colorize(greenColor, measurement.PrecisionFmt(), measurement.Smallest), + measurement.Units, + measurement.AverageLabel, + s.colorize(cyanColor, measurement.PrecisionFmt(), measurement.Average), + measurement.Units, + s.colorize(cyanColor, measurement.PrecisionFmt(), measurement.StdDeviation), + measurement.Units, + measurement.LargestLabel, + s.colorize(redColor, measurement.PrecisionFmt(), measurement.Largest), + measurement.Units, + )) + } + } else { + message = append(message, fmt.Sprintf("Ran %s samples:", s.colorize(boldStyle, "%d", spec.NumberOfSamples))) + for _, key := range orderedKeys { + measurement := spec.Measurements[key] + info := "" + if measurement.Info != nil { + message = append(message, fmt.Sprintf("%v", measurement.Info)) + } + + message = append(message, fmt.Sprintf("%s:\n%s %s: %s%s\n %s: %s%s\n %s: %s%s ± %s%s", + s.colorize(boldStyle, "%s", measurement.Name), + info, + measurement.SmallestLabel, + s.colorize(greenColor, measurement.PrecisionFmt(), measurement.Smallest), + measurement.Units, + measurement.LargestLabel, + s.colorize(redColor, measurement.PrecisionFmt(), measurement.Largest), + measurement.Units, + measurement.AverageLabel, + s.colorize(cyanColor, measurement.PrecisionFmt(), measurement.Average), + measurement.Units, + s.colorize(cyanColor, measurement.PrecisionFmt(), measurement.StdDeviation), + measurement.Units, + )) + } + } + + return strings.Join(message, "\n") +} diff --git a/vendor/github.com/onsi/ginkgo/reporters/stenographer/support/go-colorable/LICENSE b/vendor/github.com/onsi/ginkgo/reporters/stenographer/support/go-colorable/LICENSE new file mode 100644 index 0000000000..91b5cef30e --- /dev/null +++ b/vendor/github.com/onsi/ginkgo/reporters/stenographer/support/go-colorable/LICENSE @@ -0,0 +1,21 @@ +The MIT License (MIT) + +Copyright (c) 2016 Yasuhiro Matsumoto + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. diff --git a/vendor/github.com/onsi/ginkgo/reporters/stenographer/support/go-colorable/README.md b/vendor/github.com/onsi/ginkgo/reporters/stenographer/support/go-colorable/README.md new file mode 100644 index 0000000000..e84226a735 --- /dev/null +++ b/vendor/github.com/onsi/ginkgo/reporters/stenographer/support/go-colorable/README.md @@ -0,0 +1,43 @@ +# go-colorable + +Colorable writer for windows. + +For example, most of logger packages doesn't show colors on windows. (I know we can do it with ansicon. But I don't want.) +This package is possible to handle escape sequence for ansi color on windows. + +## Too Bad! + +![](https://raw.githubusercontent.com/mattn/go-colorable/gh-pages/bad.png) + + +## So Good! + +![](https://raw.githubusercontent.com/mattn/go-colorable/gh-pages/good.png) + +## Usage + +```go +logrus.SetFormatter(&logrus.TextFormatter{ForceColors: true}) +logrus.SetOutput(colorable.NewColorableStdout()) + +logrus.Info("succeeded") +logrus.Warn("not correct") +logrus.Error("something error") +logrus.Fatal("panic") +``` + +You can compile above code on non-windows OSs. + +## Installation + +``` +$ go get github.com/mattn/go-colorable +``` + +# License + +MIT + +# Author + +Yasuhiro Matsumoto (a.k.a mattn) diff --git a/vendor/github.com/onsi/ginkgo/reporters/stenographer/support/go-colorable/colorable_others.go b/vendor/github.com/onsi/ginkgo/reporters/stenographer/support/go-colorable/colorable_others.go new file mode 100644 index 0000000000..52d6653b34 --- /dev/null +++ b/vendor/github.com/onsi/ginkgo/reporters/stenographer/support/go-colorable/colorable_others.go @@ -0,0 +1,24 @@ +// +build !windows + +package colorable + +import ( + "io" + "os" +) + +func NewColorable(file *os.File) io.Writer { + if file == nil { + panic("nil passed instead of *os.File to NewColorable()") + } + + return file +} + +func NewColorableStdout() io.Writer { + return os.Stdout +} + +func NewColorableStderr() io.Writer { + return os.Stderr +} diff --git a/vendor/github.com/onsi/ginkgo/reporters/stenographer/support/go-colorable/colorable_windows.go b/vendor/github.com/onsi/ginkgo/reporters/stenographer/support/go-colorable/colorable_windows.go new file mode 100644 index 0000000000..1088009230 --- /dev/null +++ b/vendor/github.com/onsi/ginkgo/reporters/stenographer/support/go-colorable/colorable_windows.go @@ -0,0 +1,783 @@ +package colorable + +import ( + "bytes" + "fmt" + "io" + "math" + "os" + "strconv" + "strings" + "syscall" + "unsafe" + + "github.com/onsi/ginkgo/reporters/stenographer/support/go-isatty" +) + +const ( + foregroundBlue = 0x1 + foregroundGreen = 0x2 + foregroundRed = 0x4 + foregroundIntensity = 0x8 + foregroundMask = (foregroundRed | foregroundBlue | foregroundGreen | foregroundIntensity) + backgroundBlue = 0x10 + backgroundGreen = 0x20 + backgroundRed = 0x40 + backgroundIntensity = 0x80 + backgroundMask = (backgroundRed | backgroundBlue | backgroundGreen | backgroundIntensity) +) + +type wchar uint16 +type short int16 +type dword uint32 +type word uint16 + +type coord struct { + x short + y short +} + +type smallRect struct { + left short + top short + right short + bottom short +} + +type consoleScreenBufferInfo struct { + size coord + cursorPosition coord + attributes word + window smallRect + maximumWindowSize coord +} + +var ( + kernel32 = syscall.NewLazyDLL("kernel32.dll") + procGetConsoleScreenBufferInfo = kernel32.NewProc("GetConsoleScreenBufferInfo") + procSetConsoleTextAttribute = kernel32.NewProc("SetConsoleTextAttribute") + procSetConsoleCursorPosition = kernel32.NewProc("SetConsoleCursorPosition") + procFillConsoleOutputCharacter = kernel32.NewProc("FillConsoleOutputCharacterW") + procFillConsoleOutputAttribute = kernel32.NewProc("FillConsoleOutputAttribute") +) + +type Writer struct { + out io.Writer + handle syscall.Handle + lastbuf bytes.Buffer + oldattr word +} + +func NewColorable(file *os.File) io.Writer { + if file == nil { + panic("nil passed instead of *os.File to NewColorable()") + } + + if isatty.IsTerminal(file.Fd()) { + var csbi consoleScreenBufferInfo + handle := syscall.Handle(file.Fd()) + procGetConsoleScreenBufferInfo.Call(uintptr(handle), uintptr(unsafe.Pointer(&csbi))) + return &Writer{out: file, handle: handle, oldattr: csbi.attributes} + } else { + return file + } +} + +func NewColorableStdout() io.Writer { + return NewColorable(os.Stdout) +} + +func NewColorableStderr() io.Writer { + return NewColorable(os.Stderr) +} + +var color256 = map[int]int{ + 0: 0x000000, + 1: 0x800000, + 2: 0x008000, + 3: 0x808000, + 4: 0x000080, + 5: 0x800080, + 6: 0x008080, + 7: 0xc0c0c0, + 8: 0x808080, + 9: 0xff0000, + 10: 0x00ff00, + 11: 0xffff00, + 12: 0x0000ff, + 13: 0xff00ff, + 14: 0x00ffff, + 15: 0xffffff, + 16: 0x000000, + 17: 0x00005f, + 18: 0x000087, + 19: 0x0000af, + 20: 0x0000d7, + 21: 0x0000ff, + 22: 0x005f00, + 23: 0x005f5f, + 24: 0x005f87, + 25: 0x005faf, + 26: 0x005fd7, + 27: 0x005fff, + 28: 0x008700, + 29: 0x00875f, + 30: 0x008787, + 31: 0x0087af, + 32: 0x0087d7, + 33: 0x0087ff, + 34: 0x00af00, + 35: 0x00af5f, + 36: 0x00af87, + 37: 0x00afaf, + 38: 0x00afd7, + 39: 0x00afff, + 40: 0x00d700, + 41: 0x00d75f, + 42: 0x00d787, + 43: 0x00d7af, + 44: 0x00d7d7, + 45: 0x00d7ff, + 46: 0x00ff00, + 47: 0x00ff5f, + 48: 0x00ff87, + 49: 0x00ffaf, + 50: 0x00ffd7, + 51: 0x00ffff, + 52: 0x5f0000, + 53: 0x5f005f, + 54: 0x5f0087, + 55: 0x5f00af, + 56: 0x5f00d7, + 57: 0x5f00ff, + 58: 0x5f5f00, + 59: 0x5f5f5f, + 60: 0x5f5f87, + 61: 0x5f5faf, + 62: 0x5f5fd7, + 63: 0x5f5fff, + 64: 0x5f8700, + 65: 0x5f875f, + 66: 0x5f8787, + 67: 0x5f87af, + 68: 0x5f87d7, + 69: 0x5f87ff, + 70: 0x5faf00, + 71: 0x5faf5f, + 72: 0x5faf87, + 73: 0x5fafaf, + 74: 0x5fafd7, + 75: 0x5fafff, + 76: 0x5fd700, + 77: 0x5fd75f, + 78: 0x5fd787, + 79: 0x5fd7af, + 80: 0x5fd7d7, + 81: 0x5fd7ff, + 82: 0x5fff00, + 83: 0x5fff5f, + 84: 0x5fff87, + 85: 0x5fffaf, + 86: 0x5fffd7, + 87: 0x5fffff, + 88: 0x870000, + 89: 0x87005f, + 90: 0x870087, + 91: 0x8700af, + 92: 0x8700d7, + 93: 0x8700ff, + 94: 0x875f00, + 95: 0x875f5f, + 96: 0x875f87, + 97: 0x875faf, + 98: 0x875fd7, + 99: 0x875fff, + 100: 0x878700, + 101: 0x87875f, + 102: 0x878787, + 103: 0x8787af, + 104: 0x8787d7, + 105: 0x8787ff, + 106: 0x87af00, + 107: 0x87af5f, + 108: 0x87af87, + 109: 0x87afaf, + 110: 0x87afd7, + 111: 0x87afff, + 112: 0x87d700, + 113: 0x87d75f, + 114: 0x87d787, + 115: 0x87d7af, + 116: 0x87d7d7, + 117: 0x87d7ff, + 118: 0x87ff00, + 119: 0x87ff5f, + 120: 0x87ff87, + 121: 0x87ffaf, + 122: 0x87ffd7, + 123: 0x87ffff, + 124: 0xaf0000, + 125: 0xaf005f, + 126: 0xaf0087, + 127: 0xaf00af, + 128: 0xaf00d7, + 129: 0xaf00ff, + 130: 0xaf5f00, + 131: 0xaf5f5f, + 132: 0xaf5f87, + 133: 0xaf5faf, + 134: 0xaf5fd7, + 135: 0xaf5fff, + 136: 0xaf8700, + 137: 0xaf875f, + 138: 0xaf8787, + 139: 0xaf87af, + 140: 0xaf87d7, + 141: 0xaf87ff, + 142: 0xafaf00, + 143: 0xafaf5f, + 144: 0xafaf87, + 145: 0xafafaf, + 146: 0xafafd7, + 147: 0xafafff, + 148: 0xafd700, + 149: 0xafd75f, + 150: 0xafd787, + 151: 0xafd7af, + 152: 0xafd7d7, + 153: 0xafd7ff, + 154: 0xafff00, + 155: 0xafff5f, + 156: 0xafff87, + 157: 0xafffaf, + 158: 0xafffd7, + 159: 0xafffff, + 160: 0xd70000, + 161: 0xd7005f, + 162: 0xd70087, + 163: 0xd700af, + 164: 0xd700d7, + 165: 0xd700ff, + 166: 0xd75f00, + 167: 0xd75f5f, + 168: 0xd75f87, + 169: 0xd75faf, + 170: 0xd75fd7, + 171: 0xd75fff, + 172: 0xd78700, + 173: 0xd7875f, + 174: 0xd78787, + 175: 0xd787af, + 176: 0xd787d7, + 177: 0xd787ff, + 178: 0xd7af00, + 179: 0xd7af5f, + 180: 0xd7af87, + 181: 0xd7afaf, + 182: 0xd7afd7, + 183: 0xd7afff, + 184: 0xd7d700, + 185: 0xd7d75f, + 186: 0xd7d787, + 187: 0xd7d7af, + 188: 0xd7d7d7, + 189: 0xd7d7ff, + 190: 0xd7ff00, + 191: 0xd7ff5f, + 192: 0xd7ff87, + 193: 0xd7ffaf, + 194: 0xd7ffd7, + 195: 0xd7ffff, + 196: 0xff0000, + 197: 0xff005f, + 198: 0xff0087, + 199: 0xff00af, + 200: 0xff00d7, + 201: 0xff00ff, + 202: 0xff5f00, + 203: 0xff5f5f, + 204: 0xff5f87, + 205: 0xff5faf, + 206: 0xff5fd7, + 207: 0xff5fff, + 208: 0xff8700, + 209: 0xff875f, + 210: 0xff8787, + 211: 0xff87af, + 212: 0xff87d7, + 213: 0xff87ff, + 214: 0xffaf00, + 215: 0xffaf5f, + 216: 0xffaf87, + 217: 0xffafaf, + 218: 0xffafd7, + 219: 0xffafff, + 220: 0xffd700, + 221: 0xffd75f, + 222: 0xffd787, + 223: 0xffd7af, + 224: 0xffd7d7, + 225: 0xffd7ff, + 226: 0xffff00, + 227: 0xffff5f, + 228: 0xffff87, + 229: 0xffffaf, + 230: 0xffffd7, + 231: 0xffffff, + 232: 0x080808, + 233: 0x121212, + 234: 0x1c1c1c, + 235: 0x262626, + 236: 0x303030, + 237: 0x3a3a3a, + 238: 0x444444, + 239: 0x4e4e4e, + 240: 0x585858, + 241: 0x626262, + 242: 0x6c6c6c, + 243: 0x767676, + 244: 0x808080, + 245: 0x8a8a8a, + 246: 0x949494, + 247: 0x9e9e9e, + 248: 0xa8a8a8, + 249: 0xb2b2b2, + 250: 0xbcbcbc, + 251: 0xc6c6c6, + 252: 0xd0d0d0, + 253: 0xdadada, + 254: 0xe4e4e4, + 255: 0xeeeeee, +} + +func (w *Writer) Write(data []byte) (n int, err error) { + var csbi consoleScreenBufferInfo + procGetConsoleScreenBufferInfo.Call(uintptr(w.handle), uintptr(unsafe.Pointer(&csbi))) + + er := bytes.NewBuffer(data) +loop: + for { + r1, _, err := procGetConsoleScreenBufferInfo.Call(uintptr(w.handle), uintptr(unsafe.Pointer(&csbi))) + if r1 == 0 { + break loop + } + + c1, _, err := er.ReadRune() + if err != nil { + break loop + } + if c1 != 0x1b { + fmt.Fprint(w.out, string(c1)) + continue + } + c2, _, err := er.ReadRune() + if err != nil { + w.lastbuf.WriteRune(c1) + break loop + } + if c2 != 0x5b { + w.lastbuf.WriteRune(c1) + w.lastbuf.WriteRune(c2) + continue + } + + var buf bytes.Buffer + var m rune + for { + c, _, err := er.ReadRune() + if err != nil { + w.lastbuf.WriteRune(c1) + w.lastbuf.WriteRune(c2) + w.lastbuf.Write(buf.Bytes()) + break loop + } + if ('a' <= c && c <= 'z') || ('A' <= c && c <= 'Z') || c == '@' { + m = c + break + } + buf.Write([]byte(string(c))) + } + + var csbi consoleScreenBufferInfo + switch m { + case 'A': + n, err = strconv.Atoi(buf.String()) + if err != nil { + continue + } + procGetConsoleScreenBufferInfo.Call(uintptr(w.handle), uintptr(unsafe.Pointer(&csbi))) + csbi.cursorPosition.y -= short(n) + procSetConsoleCursorPosition.Call(uintptr(w.handle), *(*uintptr)(unsafe.Pointer(&csbi.cursorPosition))) + case 'B': + n, err = strconv.Atoi(buf.String()) + if err != nil { + continue + } + procGetConsoleScreenBufferInfo.Call(uintptr(w.handle), uintptr(unsafe.Pointer(&csbi))) + csbi.cursorPosition.y += short(n) + procSetConsoleCursorPosition.Call(uintptr(w.handle), *(*uintptr)(unsafe.Pointer(&csbi.cursorPosition))) + case 'C': + n, err = strconv.Atoi(buf.String()) + if err != nil { + continue + } + procGetConsoleScreenBufferInfo.Call(uintptr(w.handle), uintptr(unsafe.Pointer(&csbi))) + csbi.cursorPosition.x -= short(n) + procSetConsoleCursorPosition.Call(uintptr(w.handle), *(*uintptr)(unsafe.Pointer(&csbi.cursorPosition))) + case 'D': + n, err = strconv.Atoi(buf.String()) + if err != nil { + continue + } + if n, err = strconv.Atoi(buf.String()); err == nil { + var csbi consoleScreenBufferInfo + procGetConsoleScreenBufferInfo.Call(uintptr(w.handle), uintptr(unsafe.Pointer(&csbi))) + csbi.cursorPosition.x += short(n) + procSetConsoleCursorPosition.Call(uintptr(w.handle), *(*uintptr)(unsafe.Pointer(&csbi.cursorPosition))) + } + case 'E': + n, err = strconv.Atoi(buf.String()) + if err != nil { + continue + } + procGetConsoleScreenBufferInfo.Call(uintptr(w.handle), uintptr(unsafe.Pointer(&csbi))) + csbi.cursorPosition.x = 0 + csbi.cursorPosition.y += short(n) + procSetConsoleCursorPosition.Call(uintptr(w.handle), *(*uintptr)(unsafe.Pointer(&csbi.cursorPosition))) + case 'F': + n, err = strconv.Atoi(buf.String()) + if err != nil { + continue + } + procGetConsoleScreenBufferInfo.Call(uintptr(w.handle), uintptr(unsafe.Pointer(&csbi))) + csbi.cursorPosition.x = 0 + csbi.cursorPosition.y -= short(n) + procSetConsoleCursorPosition.Call(uintptr(w.handle), *(*uintptr)(unsafe.Pointer(&csbi.cursorPosition))) + case 'G': + n, err = strconv.Atoi(buf.String()) + if err != nil { + continue + } + procGetConsoleScreenBufferInfo.Call(uintptr(w.handle), uintptr(unsafe.Pointer(&csbi))) + csbi.cursorPosition.x = short(n) + procSetConsoleCursorPosition.Call(uintptr(w.handle), *(*uintptr)(unsafe.Pointer(&csbi.cursorPosition))) + case 'H': + token := strings.Split(buf.String(), ";") + if len(token) != 2 { + continue + } + n1, err := strconv.Atoi(token[0]) + if err != nil { + continue + } + n2, err := strconv.Atoi(token[1]) + if err != nil { + continue + } + csbi.cursorPosition.x = short(n2) + csbi.cursorPosition.x = short(n1) + procSetConsoleCursorPosition.Call(uintptr(w.handle), *(*uintptr)(unsafe.Pointer(&csbi.cursorPosition))) + case 'J': + n, err := strconv.Atoi(buf.String()) + if err != nil { + continue + } + var cursor coord + switch n { + case 0: + cursor = coord{x: csbi.cursorPosition.x, y: csbi.cursorPosition.y} + case 1: + cursor = coord{x: csbi.window.left, y: csbi.window.top} + case 2: + cursor = coord{x: csbi.window.left, y: csbi.window.top} + } + var count, written dword + count = dword(csbi.size.x - csbi.cursorPosition.x + (csbi.size.y-csbi.cursorPosition.y)*csbi.size.x) + procFillConsoleOutputCharacter.Call(uintptr(w.handle), uintptr(' '), uintptr(count), *(*uintptr)(unsafe.Pointer(&cursor)), uintptr(unsafe.Pointer(&written))) + procFillConsoleOutputAttribute.Call(uintptr(w.handle), uintptr(csbi.attributes), uintptr(count), *(*uintptr)(unsafe.Pointer(&cursor)), uintptr(unsafe.Pointer(&written))) + case 'K': + n, err := strconv.Atoi(buf.String()) + if err != nil { + continue + } + var cursor coord + switch n { + case 0: + cursor = coord{x: csbi.cursorPosition.x, y: csbi.cursorPosition.y} + case 1: + cursor = coord{x: csbi.window.left, y: csbi.window.top + csbi.cursorPosition.y} + case 2: + cursor = coord{x: csbi.window.left, y: csbi.window.top + csbi.cursorPosition.y} + } + var count, written dword + count = dword(csbi.size.x - csbi.cursorPosition.x) + procFillConsoleOutputCharacter.Call(uintptr(w.handle), uintptr(' '), uintptr(count), *(*uintptr)(unsafe.Pointer(&cursor)), uintptr(unsafe.Pointer(&written))) + procFillConsoleOutputAttribute.Call(uintptr(w.handle), uintptr(csbi.attributes), uintptr(count), *(*uintptr)(unsafe.Pointer(&cursor)), uintptr(unsafe.Pointer(&written))) + case 'm': + attr := csbi.attributes + cs := buf.String() + if cs == "" { + procSetConsoleTextAttribute.Call(uintptr(w.handle), uintptr(w.oldattr)) + continue + } + token := strings.Split(cs, ";") + for i := 0; i < len(token); i += 1 { + ns := token[i] + if n, err = strconv.Atoi(ns); err == nil { + switch { + case n == 0 || n == 100: + attr = w.oldattr + case 1 <= n && n <= 5: + attr |= foregroundIntensity + case n == 7: + attr = ((attr & foregroundMask) << 4) | ((attr & backgroundMask) >> 4) + case 22 == n || n == 25 || n == 25: + attr |= foregroundIntensity + case n == 27: + attr = ((attr & foregroundMask) << 4) | ((attr & backgroundMask) >> 4) + case 30 <= n && n <= 37: + attr = (attr & backgroundMask) + if (n-30)&1 != 0 { + attr |= foregroundRed + } + if (n-30)&2 != 0 { + attr |= foregroundGreen + } + if (n-30)&4 != 0 { + attr |= foregroundBlue + } + case n == 38: // set foreground color. + if i < len(token)-2 && (token[i+1] == "5" || token[i+1] == "05") { + if n256, err := strconv.Atoi(token[i+2]); err == nil { + if n256foreAttr == nil { + n256setup() + } + attr &= backgroundMask + attr |= n256foreAttr[n256] + i += 2 + } + } else { + attr = attr & (w.oldattr & backgroundMask) + } + case n == 39: // reset foreground color. + attr &= backgroundMask + attr |= w.oldattr & foregroundMask + case 40 <= n && n <= 47: + attr = (attr & foregroundMask) + if (n-40)&1 != 0 { + attr |= backgroundRed + } + if (n-40)&2 != 0 { + attr |= backgroundGreen + } + if (n-40)&4 != 0 { + attr |= backgroundBlue + } + case n == 48: // set background color. + if i < len(token)-2 && token[i+1] == "5" { + if n256, err := strconv.Atoi(token[i+2]); err == nil { + if n256backAttr == nil { + n256setup() + } + attr &= foregroundMask + attr |= n256backAttr[n256] + i += 2 + } + } else { + attr = attr & (w.oldattr & foregroundMask) + } + case n == 49: // reset foreground color. + attr &= foregroundMask + attr |= w.oldattr & backgroundMask + case 90 <= n && n <= 97: + attr = (attr & backgroundMask) + attr |= foregroundIntensity + if (n-90)&1 != 0 { + attr |= foregroundRed + } + if (n-90)&2 != 0 { + attr |= foregroundGreen + } + if (n-90)&4 != 0 { + attr |= foregroundBlue + } + case 100 <= n && n <= 107: + attr = (attr & foregroundMask) + attr |= backgroundIntensity + if (n-100)&1 != 0 { + attr |= backgroundRed + } + if (n-100)&2 != 0 { + attr |= backgroundGreen + } + if (n-100)&4 != 0 { + attr |= backgroundBlue + } + } + procSetConsoleTextAttribute.Call(uintptr(w.handle), uintptr(attr)) + } + } + } + } + return len(data) - w.lastbuf.Len(), nil +} + +type consoleColor struct { + rgb int + red bool + green bool + blue bool + intensity bool +} + +func (c consoleColor) foregroundAttr() (attr word) { + if c.red { + attr |= foregroundRed + } + if c.green { + attr |= foregroundGreen + } + if c.blue { + attr |= foregroundBlue + } + if c.intensity { + attr |= foregroundIntensity + } + return +} + +func (c consoleColor) backgroundAttr() (attr word) { + if c.red { + attr |= backgroundRed + } + if c.green { + attr |= backgroundGreen + } + if c.blue { + attr |= backgroundBlue + } + if c.intensity { + attr |= backgroundIntensity + } + return +} + +var color16 = []consoleColor{ + consoleColor{0x000000, false, false, false, false}, + consoleColor{0x000080, false, false, true, false}, + consoleColor{0x008000, false, true, false, false}, + consoleColor{0x008080, false, true, true, false}, + consoleColor{0x800000, true, false, false, false}, + consoleColor{0x800080, true, false, true, false}, + consoleColor{0x808000, true, true, false, false}, + consoleColor{0xc0c0c0, true, true, true, false}, + consoleColor{0x808080, false, false, false, true}, + consoleColor{0x0000ff, false, false, true, true}, + consoleColor{0x00ff00, false, true, false, true}, + consoleColor{0x00ffff, false, true, true, true}, + consoleColor{0xff0000, true, false, false, true}, + consoleColor{0xff00ff, true, false, true, true}, + consoleColor{0xffff00, true, true, false, true}, + consoleColor{0xffffff, true, true, true, true}, +} + +type hsv struct { + h, s, v float32 +} + +func (a hsv) dist(b hsv) float32 { + dh := a.h - b.h + switch { + case dh > 0.5: + dh = 1 - dh + case dh < -0.5: + dh = -1 - dh + } + ds := a.s - b.s + dv := a.v - b.v + return float32(math.Sqrt(float64(dh*dh + ds*ds + dv*dv))) +} + +func toHSV(rgb int) hsv { + r, g, b := float32((rgb&0xFF0000)>>16)/256.0, + float32((rgb&0x00FF00)>>8)/256.0, + float32(rgb&0x0000FF)/256.0 + min, max := minmax3f(r, g, b) + h := max - min + if h > 0 { + if max == r { + h = (g - b) / h + if h < 0 { + h += 6 + } + } else if max == g { + h = 2 + (b-r)/h + } else { + h = 4 + (r-g)/h + } + } + h /= 6.0 + s := max - min + if max != 0 { + s /= max + } + v := max + return hsv{h: h, s: s, v: v} +} + +type hsvTable []hsv + +func toHSVTable(rgbTable []consoleColor) hsvTable { + t := make(hsvTable, len(rgbTable)) + for i, c := range rgbTable { + t[i] = toHSV(c.rgb) + } + return t +} + +func (t hsvTable) find(rgb int) consoleColor { + hsv := toHSV(rgb) + n := 7 + l := float32(5.0) + for i, p := range t { + d := hsv.dist(p) + if d < l { + l, n = d, i + } + } + return color16[n] +} + +func minmax3f(a, b, c float32) (min, max float32) { + if a < b { + if b < c { + return a, c + } else if a < c { + return a, b + } else { + return c, b + } + } else { + if a < c { + return b, c + } else if b < c { + return b, a + } else { + return c, a + } + } +} + +var n256foreAttr []word +var n256backAttr []word + +func n256setup() { + n256foreAttr = make([]word, 256) + n256backAttr = make([]word, 256) + t := toHSVTable(color16) + for i, rgb := range color256 { + c := t.find(rgb) + n256foreAttr[i] = c.foregroundAttr() + n256backAttr[i] = c.backgroundAttr() + } +} diff --git a/vendor/github.com/onsi/ginkgo/reporters/stenographer/support/go-colorable/noncolorable.go b/vendor/github.com/onsi/ginkgo/reporters/stenographer/support/go-colorable/noncolorable.go new file mode 100644 index 0000000000..fb976dbd8b --- /dev/null +++ b/vendor/github.com/onsi/ginkgo/reporters/stenographer/support/go-colorable/noncolorable.go @@ -0,0 +1,57 @@ +package colorable + +import ( + "bytes" + "fmt" + "io" +) + +type NonColorable struct { + out io.Writer + lastbuf bytes.Buffer +} + +func NewNonColorable(w io.Writer) io.Writer { + return &NonColorable{out: w} +} + +func (w *NonColorable) Write(data []byte) (n int, err error) { + er := bytes.NewBuffer(data) +loop: + for { + c1, _, err := er.ReadRune() + if err != nil { + break loop + } + if c1 != 0x1b { + fmt.Fprint(w.out, string(c1)) + continue + } + c2, _, err := er.ReadRune() + if err != nil { + w.lastbuf.WriteRune(c1) + break loop + } + if c2 != 0x5b { + w.lastbuf.WriteRune(c1) + w.lastbuf.WriteRune(c2) + continue + } + + var buf bytes.Buffer + for { + c, _, err := er.ReadRune() + if err != nil { + w.lastbuf.WriteRune(c1) + w.lastbuf.WriteRune(c2) + w.lastbuf.Write(buf.Bytes()) + break loop + } + if ('a' <= c && c <= 'z') || ('A' <= c && c <= 'Z') || c == '@' { + break + } + buf.Write([]byte(string(c))) + } + } + return len(data) - w.lastbuf.Len(), nil +} diff --git a/vendor/github.com/gobuffalo/flect/LICENSE.txt b/vendor/github.com/onsi/ginkgo/reporters/stenographer/support/go-isatty/LICENSE similarity index 93% rename from vendor/github.com/gobuffalo/flect/LICENSE.txt rename to vendor/github.com/onsi/ginkgo/reporters/stenographer/support/go-isatty/LICENSE index 123ddc0d80..65dc692b6b 100644 --- a/vendor/github.com/gobuffalo/flect/LICENSE.txt +++ b/vendor/github.com/onsi/ginkgo/reporters/stenographer/support/go-isatty/LICENSE @@ -1,5 +1,6 @@ -The MIT License (MIT) -Copyright (c) 2018 Mark Bates +Copyright (c) Yasuhiro MATSUMOTO + +MIT License (Expat) Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: diff --git a/vendor/github.com/onsi/ginkgo/reporters/stenographer/support/go-isatty/README.md b/vendor/github.com/onsi/ginkgo/reporters/stenographer/support/go-isatty/README.md new file mode 100644 index 0000000000..74845de4a2 --- /dev/null +++ b/vendor/github.com/onsi/ginkgo/reporters/stenographer/support/go-isatty/README.md @@ -0,0 +1,37 @@ +# go-isatty + +isatty for golang + +## Usage + +```go +package main + +import ( + "fmt" + "github.com/mattn/go-isatty" + "os" +) + +func main() { + if isatty.IsTerminal(os.Stdout.Fd()) { + fmt.Println("Is Terminal") + } else { + fmt.Println("Is Not Terminal") + } +} +``` + +## Installation + +``` +$ go get github.com/mattn/go-isatty +``` + +# License + +MIT + +# Author + +Yasuhiro Matsumoto (a.k.a mattn) diff --git a/vendor/github.com/onsi/ginkgo/reporters/stenographer/support/go-isatty/doc.go b/vendor/github.com/onsi/ginkgo/reporters/stenographer/support/go-isatty/doc.go new file mode 100644 index 0000000000..17d4f90ebc --- /dev/null +++ b/vendor/github.com/onsi/ginkgo/reporters/stenographer/support/go-isatty/doc.go @@ -0,0 +1,2 @@ +// Package isatty implements interface to isatty +package isatty diff --git a/vendor/github.com/onsi/ginkgo/reporters/stenographer/support/go-isatty/isatty_appengine.go b/vendor/github.com/onsi/ginkgo/reporters/stenographer/support/go-isatty/isatty_appengine.go new file mode 100644 index 0000000000..83c588773c --- /dev/null +++ b/vendor/github.com/onsi/ginkgo/reporters/stenographer/support/go-isatty/isatty_appengine.go @@ -0,0 +1,9 @@ +// +build appengine + +package isatty + +// IsTerminal returns true if the file descriptor is terminal which +// is always false on on appengine classic which is a sandboxed PaaS. +func IsTerminal(fd uintptr) bool { + return false +} diff --git a/vendor/github.com/onsi/ginkgo/reporters/stenographer/support/go-isatty/isatty_bsd.go b/vendor/github.com/onsi/ginkgo/reporters/stenographer/support/go-isatty/isatty_bsd.go new file mode 100644 index 0000000000..98ffe86a4b --- /dev/null +++ b/vendor/github.com/onsi/ginkgo/reporters/stenographer/support/go-isatty/isatty_bsd.go @@ -0,0 +1,18 @@ +// +build darwin freebsd openbsd netbsd +// +build !appengine + +package isatty + +import ( + "syscall" + "unsafe" +) + +const ioctlReadTermios = syscall.TIOCGETA + +// IsTerminal return true if the file descriptor is terminal. +func IsTerminal(fd uintptr) bool { + var termios syscall.Termios + _, _, err := syscall.Syscall6(syscall.SYS_IOCTL, fd, ioctlReadTermios, uintptr(unsafe.Pointer(&termios)), 0, 0, 0) + return err == 0 +} diff --git a/vendor/github.com/onsi/ginkgo/reporters/stenographer/support/go-isatty/isatty_linux.go b/vendor/github.com/onsi/ginkgo/reporters/stenographer/support/go-isatty/isatty_linux.go new file mode 100644 index 0000000000..9d24bac1db --- /dev/null +++ b/vendor/github.com/onsi/ginkgo/reporters/stenographer/support/go-isatty/isatty_linux.go @@ -0,0 +1,18 @@ +// +build linux +// +build !appengine + +package isatty + +import ( + "syscall" + "unsafe" +) + +const ioctlReadTermios = syscall.TCGETS + +// IsTerminal return true if the file descriptor is terminal. +func IsTerminal(fd uintptr) bool { + var termios syscall.Termios + _, _, err := syscall.Syscall6(syscall.SYS_IOCTL, fd, ioctlReadTermios, uintptr(unsafe.Pointer(&termios)), 0, 0, 0) + return err == 0 +} diff --git a/vendor/github.com/onsi/ginkgo/reporters/stenographer/support/go-isatty/isatty_solaris.go b/vendor/github.com/onsi/ginkgo/reporters/stenographer/support/go-isatty/isatty_solaris.go new file mode 100644 index 0000000000..1f0c6bf53d --- /dev/null +++ b/vendor/github.com/onsi/ginkgo/reporters/stenographer/support/go-isatty/isatty_solaris.go @@ -0,0 +1,16 @@ +// +build solaris +// +build !appengine + +package isatty + +import ( + "golang.org/x/sys/unix" +) + +// IsTerminal returns true if the given file descriptor is a terminal. +// see: http://src.illumos.org/source/xref/illumos-gate/usr/src/lib/libbc/libc/gen/common/isatty.c +func IsTerminal(fd uintptr) bool { + var termio unix.Termio + err := unix.IoctlSetTermio(int(fd), unix.TCGETA, &termio) + return err == nil +} diff --git a/vendor/github.com/onsi/ginkgo/reporters/stenographer/support/go-isatty/isatty_windows.go b/vendor/github.com/onsi/ginkgo/reporters/stenographer/support/go-isatty/isatty_windows.go new file mode 100644 index 0000000000..83c398b16d --- /dev/null +++ b/vendor/github.com/onsi/ginkgo/reporters/stenographer/support/go-isatty/isatty_windows.go @@ -0,0 +1,19 @@ +// +build windows +// +build !appengine + +package isatty + +import ( + "syscall" + "unsafe" +) + +var kernel32 = syscall.NewLazyDLL("kernel32.dll") +var procGetConsoleMode = kernel32.NewProc("GetConsoleMode") + +// IsTerminal return true if the file descriptor is terminal. +func IsTerminal(fd uintptr) bool { + var st uint32 + r, _, e := syscall.Syscall(procGetConsoleMode.Addr(), 2, fd, uintptr(unsafe.Pointer(&st)), 0) + return r != 0 && e == 0 +} diff --git a/vendor/github.com/onsi/ginkgo/reporters/teamcity_reporter.go b/vendor/github.com/onsi/ginkgo/reporters/teamcity_reporter.go new file mode 100644 index 0000000000..36ee2a6005 --- /dev/null +++ b/vendor/github.com/onsi/ginkgo/reporters/teamcity_reporter.go @@ -0,0 +1,93 @@ +/* + +TeamCity Reporter for Ginkgo + +Makes use of TeamCity's support for Service Messages +http://confluence.jetbrains.com/display/TCD7/Build+Script+Interaction+with+TeamCity#BuildScriptInteractionwithTeamCity-ReportingTests +*/ + +package reporters + +import ( + "fmt" + "io" + "strings" + + "github.com/onsi/ginkgo/config" + "github.com/onsi/ginkgo/types" +) + +const ( + messageId = "##teamcity" +) + +type TeamCityReporter struct { + writer io.Writer + testSuiteName string +} + +func NewTeamCityReporter(writer io.Writer) *TeamCityReporter { + return &TeamCityReporter{ + writer: writer, + } +} + +func (reporter *TeamCityReporter) SpecSuiteWillBegin(config config.GinkgoConfigType, summary *types.SuiteSummary) { + reporter.testSuiteName = escape(summary.SuiteDescription) + fmt.Fprintf(reporter.writer, "%s[testSuiteStarted name='%s']", messageId, reporter.testSuiteName) +} + +func (reporter *TeamCityReporter) BeforeSuiteDidRun(setupSummary *types.SetupSummary) { + reporter.handleSetupSummary("BeforeSuite", setupSummary) +} + +func (reporter *TeamCityReporter) AfterSuiteDidRun(setupSummary *types.SetupSummary) { + reporter.handleSetupSummary("AfterSuite", setupSummary) +} + +func (reporter *TeamCityReporter) handleSetupSummary(name string, setupSummary *types.SetupSummary) { + if setupSummary.State != types.SpecStatePassed { + testName := escape(name) + fmt.Fprintf(reporter.writer, "%s[testStarted name='%s']", messageId, testName) + message := escape(setupSummary.Failure.ComponentCodeLocation.String()) + details := escape(setupSummary.Failure.Message) + fmt.Fprintf(reporter.writer, "%s[testFailed name='%s' message='%s' details='%s']", messageId, testName, message, details) + durationInMilliseconds := setupSummary.RunTime.Seconds() * 1000 + fmt.Fprintf(reporter.writer, "%s[testFinished name='%s' duration='%v']", messageId, testName, durationInMilliseconds) + } +} + +func (reporter *TeamCityReporter) SpecWillRun(specSummary *types.SpecSummary) { + testName := escape(strings.Join(specSummary.ComponentTexts[1:], " ")) + fmt.Fprintf(reporter.writer, "%s[testStarted name='%s']", messageId, testName) +} + +func (reporter *TeamCityReporter) SpecDidComplete(specSummary *types.SpecSummary) { + testName := escape(strings.Join(specSummary.ComponentTexts[1:], " ")) + + if specSummary.State == types.SpecStateFailed || specSummary.State == types.SpecStateTimedOut || specSummary.State == types.SpecStatePanicked { + message := escape(specSummary.Failure.ComponentCodeLocation.String()) + details := escape(specSummary.Failure.Message) + fmt.Fprintf(reporter.writer, "%s[testFailed name='%s' message='%s' details='%s']", messageId, testName, message, details) + } + if specSummary.State == types.SpecStateSkipped || specSummary.State == types.SpecStatePending { + fmt.Fprintf(reporter.writer, "%s[testIgnored name='%s']", messageId, testName) + } + + durationInMilliseconds := specSummary.RunTime.Seconds() * 1000 + fmt.Fprintf(reporter.writer, "%s[testFinished name='%s' duration='%v']", messageId, testName, durationInMilliseconds) +} + +func (reporter *TeamCityReporter) SpecSuiteDidEnd(summary *types.SuiteSummary) { + fmt.Fprintf(reporter.writer, "%s[testSuiteFinished name='%s']", messageId, reporter.testSuiteName) +} + +func escape(output string) string { + output = strings.Replace(output, "|", "||", -1) + output = strings.Replace(output, "'", "|'", -1) + output = strings.Replace(output, "\n", "|n", -1) + output = strings.Replace(output, "\r", "|r", -1) + output = strings.Replace(output, "[", "|[", -1) + output = strings.Replace(output, "]", "|]", -1) + return output +} diff --git a/vendor/github.com/onsi/ginkgo/types/code_location.go b/vendor/github.com/onsi/ginkgo/types/code_location.go new file mode 100644 index 0000000000..935a89e136 --- /dev/null +++ b/vendor/github.com/onsi/ginkgo/types/code_location.go @@ -0,0 +1,15 @@ +package types + +import ( + "fmt" +) + +type CodeLocation struct { + FileName string + LineNumber int + FullStackTrace string +} + +func (codeLocation CodeLocation) String() string { + return fmt.Sprintf("%s:%d", codeLocation.FileName, codeLocation.LineNumber) +} diff --git a/vendor/github.com/onsi/ginkgo/types/synchronization.go b/vendor/github.com/onsi/ginkgo/types/synchronization.go new file mode 100644 index 0000000000..fdd6ed5bdf --- /dev/null +++ b/vendor/github.com/onsi/ginkgo/types/synchronization.go @@ -0,0 +1,30 @@ +package types + +import ( + "encoding/json" +) + +type RemoteBeforeSuiteState int + +const ( + RemoteBeforeSuiteStateInvalid RemoteBeforeSuiteState = iota + + RemoteBeforeSuiteStatePending + RemoteBeforeSuiteStatePassed + RemoteBeforeSuiteStateFailed + RemoteBeforeSuiteStateDisappeared +) + +type RemoteBeforeSuiteData struct { + Data []byte + State RemoteBeforeSuiteState +} + +func (r RemoteBeforeSuiteData) ToJSON() []byte { + data, _ := json.Marshal(r) + return data +} + +type RemoteAfterSuiteData struct { + CanRun bool +} diff --git a/vendor/github.com/onsi/ginkgo/types/types.go b/vendor/github.com/onsi/ginkgo/types/types.go new file mode 100644 index 0000000000..0e89521be9 --- /dev/null +++ b/vendor/github.com/onsi/ginkgo/types/types.go @@ -0,0 +1,174 @@ +package types + +import ( + "strconv" + "time" +) + +const GINKGO_FOCUS_EXIT_CODE = 197 + +/* +SuiteSummary represents the a summary of the test suite and is passed to both +Reporter.SpecSuiteWillBegin +Reporter.SpecSuiteDidEnd + +this is unfortunate as these two methods should receive different objects. When running in parallel +each node does not deterministically know how many specs it will end up running. + +Unfortunately making such a change would break backward compatibility. + +Until Ginkgo 2.0 comes out we will continue to reuse this struct but populate unkown fields +with -1. +*/ +type SuiteSummary struct { + SuiteDescription string + SuiteSucceeded bool + SuiteID string + + NumberOfSpecsBeforeParallelization int + NumberOfTotalSpecs int + NumberOfSpecsThatWillBeRun int + NumberOfPendingSpecs int + NumberOfSkippedSpecs int + NumberOfPassedSpecs int + NumberOfFailedSpecs int + // Flaked specs are those that failed initially, but then passed on a + // subsequent try. + NumberOfFlakedSpecs int + RunTime time.Duration +} + +type SpecSummary struct { + ComponentTexts []string + ComponentCodeLocations []CodeLocation + + State SpecState + RunTime time.Duration + Failure SpecFailure + IsMeasurement bool + NumberOfSamples int + Measurements map[string]*SpecMeasurement + + CapturedOutput string + SuiteID string +} + +func (s SpecSummary) HasFailureState() bool { + return s.State.IsFailure() +} + +func (s SpecSummary) TimedOut() bool { + return s.State == SpecStateTimedOut +} + +func (s SpecSummary) Panicked() bool { + return s.State == SpecStatePanicked +} + +func (s SpecSummary) Failed() bool { + return s.State == SpecStateFailed +} + +func (s SpecSummary) Passed() bool { + return s.State == SpecStatePassed +} + +func (s SpecSummary) Skipped() bool { + return s.State == SpecStateSkipped +} + +func (s SpecSummary) Pending() bool { + return s.State == SpecStatePending +} + +type SetupSummary struct { + ComponentType SpecComponentType + CodeLocation CodeLocation + + State SpecState + RunTime time.Duration + Failure SpecFailure + + CapturedOutput string + SuiteID string +} + +type SpecFailure struct { + Message string + Location CodeLocation + ForwardedPanic string + + ComponentIndex int + ComponentType SpecComponentType + ComponentCodeLocation CodeLocation +} + +type SpecMeasurement struct { + Name string + Info interface{} + Order int + + Results []float64 + + Smallest float64 + Largest float64 + Average float64 + StdDeviation float64 + + SmallestLabel string + LargestLabel string + AverageLabel string + Units string + Precision int +} + +func (s SpecMeasurement) PrecisionFmt() string { + if s.Precision == 0 { + return "%f" + } + + str := strconv.Itoa(s.Precision) + + return "%." + str + "f" +} + +type SpecState uint + +const ( + SpecStateInvalid SpecState = iota + + SpecStatePending + SpecStateSkipped + SpecStatePassed + SpecStateFailed + SpecStatePanicked + SpecStateTimedOut +) + +func (state SpecState) IsFailure() bool { + return state == SpecStateTimedOut || state == SpecStatePanicked || state == SpecStateFailed +} + +type SpecComponentType uint + +const ( + SpecComponentTypeInvalid SpecComponentType = iota + + SpecComponentTypeContainer + SpecComponentTypeBeforeSuite + SpecComponentTypeAfterSuite + SpecComponentTypeBeforeEach + SpecComponentTypeJustBeforeEach + SpecComponentTypeJustAfterEach + SpecComponentTypeAfterEach + SpecComponentTypeIt + SpecComponentTypeMeasure +) + +type FlagType uint + +const ( + FlagTypeNone FlagType = iota + FlagTypeFocused + FlagTypePending +) diff --git a/vendor/github.com/pborman/uuid/.travis.yml b/vendor/github.com/pborman/uuid/.travis.yml index 3deb4a1243..d8156a60ba 100644 --- a/vendor/github.com/pborman/uuid/.travis.yml +++ b/vendor/github.com/pborman/uuid/.travis.yml @@ -1,9 +1,8 @@ language: go go: - - "1.9" - - "1.10" - - "1.11" + - 1.4.3 + - 1.5.3 - tip script: diff --git a/vendor/github.com/pborman/uuid/README.md b/vendor/github.com/pborman/uuid/README.md index 810ad40dc9..b0396b2747 100644 --- a/vendor/github.com/pborman/uuid/README.md +++ b/vendor/github.com/pborman/uuid/README.md @@ -3,8 +3,6 @@ This project was automatically exported from code.google.com/p/go-uuid # uuid ![build status](https://travis-ci.org/pborman/uuid.svg?branch=master) The uuid package generates and inspects UUIDs based on [RFC 4122](http://tools.ietf.org/html/rfc4122) and DCE 1.1: Authentication and Security Services. -This package now leverages the github.com/google/uuid package (which is based off an earlier version of this package). - ###### Install `go get github.com/pborman/uuid` diff --git a/vendor/github.com/pborman/uuid/doc.go b/vendor/github.com/pborman/uuid/doc.go index 727d761674..d8bd013e68 100644 --- a/vendor/github.com/pborman/uuid/doc.go +++ b/vendor/github.com/pborman/uuid/doc.go @@ -4,10 +4,5 @@ // The uuid package generates and inspects UUIDs. // -// UUIDs are based on RFC 4122 and DCE 1.1: Authentication and Security -// Services. -// -// This package is a partial wrapper around the github.com/google/uuid package. -// This package represents a UUID as []byte while github.com/google/uuid -// represents a UUID as [16]byte. +// UUIDs are based on RFC 4122 and DCE 1.1: Authentication and Security Services. package uuid diff --git a/vendor/github.com/pborman/uuid/go.mod b/vendor/github.com/pborman/uuid/go.mod deleted file mode 100644 index 099fc7de0d..0000000000 --- a/vendor/github.com/pborman/uuid/go.mod +++ /dev/null @@ -1,3 +0,0 @@ -module github.com/pborman/uuid - -require github.com/google/uuid v1.0.0 diff --git a/vendor/github.com/pborman/uuid/go.sum b/vendor/github.com/pborman/uuid/go.sum deleted file mode 100644 index db2574a9c3..0000000000 --- a/vendor/github.com/pborman/uuid/go.sum +++ /dev/null @@ -1,2 +0,0 @@ -github.com/google/uuid v1.0.0 h1:b4Gk+7WdP/d3HZH8EJsZpvV7EtDOgaZLtnaNGIu1adA= -github.com/google/uuid v1.0.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= diff --git a/vendor/github.com/pborman/uuid/marshal.go b/vendor/github.com/pborman/uuid/marshal.go index 35b89352ad..6621dd54be 100644 --- a/vendor/github.com/pborman/uuid/marshal.go +++ b/vendor/github.com/pborman/uuid/marshal.go @@ -7,8 +7,6 @@ package uuid import ( "errors" "fmt" - - guuid "github.com/google/uuid" ) // MarshalText implements encoding.TextMarshaler. @@ -62,11 +60,11 @@ func (u Array) MarshalText() ([]byte, error) { // UnmarshalText implements encoding.TextUnmarshaler. func (u *Array) UnmarshalText(data []byte) error { - id, err := guuid.ParseBytes(data) - if err != nil { - return err + id := Parse(string(data)) + if id == nil { + return errors.New("invalid UUID") } - *u = Array(id) + *u = id.Array() return nil } diff --git a/vendor/github.com/pborman/uuid/node.go b/vendor/github.com/pborman/uuid/node.go index e524e0101b..42d60da8f1 100644 --- a/vendor/github.com/pborman/uuid/node.go +++ b/vendor/github.com/pborman/uuid/node.go @@ -5,14 +5,24 @@ package uuid import ( - guuid "github.com/google/uuid" + "net" + "sync" +) + +var ( + nodeMu sync.Mutex + interfaces []net.Interface // cached list of interfaces + ifname string // name of interface being used + nodeID []byte // hardware for version 1 UUIDs ) // NodeInterface returns the name of the interface from which the NodeID was // derived. The interface "user" is returned if the NodeID was set by // SetNodeID. func NodeInterface() string { - return guuid.NodeInterface() + defer nodeMu.Unlock() + nodeMu.Lock() + return ifname } // SetNodeInterface selects the hardware address to be used for Version 1 UUIDs. @@ -22,20 +32,77 @@ func NodeInterface() string { // // SetNodeInterface never fails when name is "". func SetNodeInterface(name string) bool { - return guuid.SetNodeInterface(name) + defer nodeMu.Unlock() + nodeMu.Lock() + return setNodeInterface(name) +} + +func setNodeInterface(name string) bool { + if interfaces == nil { + var err error + interfaces, err = net.Interfaces() + if err != nil && name != "" { + return false + } + } + + for _, ifs := range interfaces { + if len(ifs.HardwareAddr) >= 6 && (name == "" || name == ifs.Name) { + if setNodeID(ifs.HardwareAddr) { + ifname = ifs.Name + return true + } + } + } + + // We found no interfaces with a valid hardware address. If name + // does not specify a specific interface generate a random Node ID + // (section 4.1.6) + if name == "" { + if nodeID == nil { + nodeID = make([]byte, 6) + } + randomBits(nodeID) + return true + } + return false } // NodeID returns a slice of a copy of the current Node ID, setting the Node ID // if not already set. func NodeID() []byte { - return guuid.NodeID() + defer nodeMu.Unlock() + nodeMu.Lock() + if nodeID == nil { + setNodeInterface("") + } + nid := make([]byte, 6) + copy(nid, nodeID) + return nid } // SetNodeID sets the Node ID to be used for Version 1 UUIDs. The first 6 bytes // of id are used. If id is less than 6 bytes then false is returned and the // Node ID is not set. func SetNodeID(id []byte) bool { - return guuid.SetNodeID(id) + defer nodeMu.Unlock() + nodeMu.Lock() + if setNodeID(id) { + ifname = "user" + return true + } + return false +} + +func setNodeID(id []byte) bool { + if len(id) < 6 { + return false + } + if nodeID == nil { + nodeID = make([]byte, 6) + } + copy(nodeID, id) + return true } // NodeID returns the 6 byte node id encoded in uuid. It returns nil if uuid is diff --git a/vendor/github.com/pborman/uuid/sql.go b/vendor/github.com/pborman/uuid/sql.go index 929c3847e2..d015bfd132 100644 --- a/vendor/github.com/pborman/uuid/sql.go +++ b/vendor/github.com/pborman/uuid/sql.go @@ -40,9 +40,7 @@ func (uuid *UUID) Scan(src interface{}) error { // assumes a simple slice of bytes if 16 bytes // otherwise attempts to parse if len(b) == 16 { - parsed := make([]byte, 16) - copy(parsed, b) - *uuid = UUID(parsed) + *uuid = UUID(b) } else { u := Parse(string(b)) diff --git a/vendor/github.com/pborman/uuid/time.go b/vendor/github.com/pborman/uuid/time.go index 5c0960d872..eedf242194 100644 --- a/vendor/github.com/pborman/uuid/time.go +++ b/vendor/github.com/pborman/uuid/time.go @@ -6,18 +6,65 @@ package uuid import ( "encoding/binary" - - guuid "github.com/google/uuid" + "sync" + "time" ) // A Time represents a time as the number of 100's of nanoseconds since 15 Oct // 1582. -type Time = guuid.Time +type Time int64 + +const ( + lillian = 2299160 // Julian day of 15 Oct 1582 + unix = 2440587 // Julian day of 1 Jan 1970 + epoch = unix - lillian // Days between epochs + g1582 = epoch * 86400 // seconds between epochs + g1582ns100 = g1582 * 10000000 // 100s of a nanoseconds between epochs +) + +var ( + timeMu sync.Mutex + lasttime uint64 // last time we returned + clock_seq uint16 // clock sequence for this run + + timeNow = time.Now // for testing +) + +// UnixTime converts t the number of seconds and nanoseconds using the Unix +// epoch of 1 Jan 1970. +func (t Time) UnixTime() (sec, nsec int64) { + sec = int64(t - g1582ns100) + nsec = (sec % 10000000) * 100 + sec /= 10000000 + return sec, nsec +} // GetTime returns the current Time (100s of nanoseconds since 15 Oct 1582) and // clock sequence as well as adjusting the clock sequence as needed. An error // is returned if the current time cannot be determined. -func GetTime() (Time, uint16, error) { return guuid.GetTime() } +func GetTime() (Time, uint16, error) { + defer timeMu.Unlock() + timeMu.Lock() + return getTime() +} + +func getTime() (Time, uint16, error) { + t := timeNow() + + // If we don't have a clock sequence already, set one. + if clock_seq == 0 { + setClockSequence(-1) + } + now := uint64(t.UnixNano()/100) + g1582ns100 + + // If time has gone backwards with this clock sequence then we + // increment the clock sequence + if now <= lasttime { + clock_seq = ((clock_seq + 1) & 0x3fff) | 0x8000 + } + lasttime = now + return Time(now), clock_seq, nil +} // ClockSequence returns the current clock sequence, generating one if not // already set. The clock sequence is only used for Version 1 UUIDs. @@ -27,11 +74,39 @@ func GetTime() (Time, uint16, error) { return guuid.GetTime() } // clock sequence is generated the first time a clock sequence is requested by // ClockSequence, GetTime, or NewUUID. (section 4.2.1.1) sequence is generated // for -func ClockSequence() int { return guuid.ClockSequence() } +func ClockSequence() int { + defer timeMu.Unlock() + timeMu.Lock() + return clockSequence() +} + +func clockSequence() int { + if clock_seq == 0 { + setClockSequence(-1) + } + return int(clock_seq & 0x3fff) +} // SetClockSeq sets the clock sequence to the lower 14 bits of seq. Setting to // -1 causes a new sequence to be generated. -func SetClockSequence(seq int) { guuid.SetClockSequence(seq) } +func SetClockSequence(seq int) { + defer timeMu.Unlock() + timeMu.Lock() + setClockSequence(seq) +} + +func setClockSequence(seq int) { + if seq == -1 { + var b [2]byte + randomBits(b[:]) // clock sequence + seq = int(b[0])<<8 | int(b[1]) + } + old_seq := clock_seq + clock_seq = uint16(seq&0x3fff) | 0x8000 // Set our variant + if old_seq != clock_seq { + lasttime = 0 + } +} // Time returns the time in 100s of nanoseconds since 15 Oct 1582 encoded in // uuid. It returns false if uuid is not valid. The time is only well defined diff --git a/vendor/github.com/pborman/uuid/util.go b/vendor/github.com/pborman/uuid/util.go index 255b5e2485..fc8e052c7a 100644 --- a/vendor/github.com/pborman/uuid/util.go +++ b/vendor/github.com/pborman/uuid/util.go @@ -4,6 +4,17 @@ package uuid +import ( + "io" +) + +// randomBits completely fills slice b with random data. +func randomBits(b []byte) { + if _, err := io.ReadFull(rander, b); err != nil { + panic(err.Error()) // rand should never fail + } +} + // xvalues returns the value of a byte as a hexadecimal digit or 255. var xvalues = [256]byte{ 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, diff --git a/vendor/github.com/pborman/uuid/uuid.go b/vendor/github.com/pborman/uuid/uuid.go index 3370004207..7c643cf0a3 100644 --- a/vendor/github.com/pborman/uuid/uuid.go +++ b/vendor/github.com/pborman/uuid/uuid.go @@ -8,9 +8,9 @@ import ( "bytes" "crypto/rand" "encoding/hex" + "fmt" "io" - - guuid "github.com/google/uuid" + "strings" ) // Array is a pass-by-value UUID that can be used as an effecient key in a map. @@ -24,7 +24,7 @@ func (uuid Array) UUID() UUID { // String returns the string representation of uuid, // xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx. func (uuid Array) String() string { - return guuid.UUID(uuid).String() + return uuid.UUID().String() } // A UUID is a 128 bit (16 byte) Universal Unique IDentifier as defined in RFC @@ -32,18 +32,18 @@ func (uuid Array) String() string { type UUID []byte // A Version represents a UUIDs version. -type Version = guuid.Version +type Version byte // A Variant represents a UUIDs variant. -type Variant = guuid.Variant +type Variant byte // Constants returned by Variant. const ( - Invalid = guuid.Invalid // Invalid UUID - RFC4122 = guuid.RFC4122 // The variant specified in RFC4122 - Reserved = guuid.Reserved // Reserved, NCS backward compatibility. - Microsoft = guuid.Microsoft // Reserved, Microsoft Corporation backward compatibility. - Future = guuid.Future // Reserved for future definition. + Invalid = Variant(iota) // Invalid UUID + RFC4122 // The variant specified in RFC4122 + Reserved // Reserved, NCS backward compatibility. + Microsoft // Reserved, Microsoft Corporation backward compatibility. + Future // Reserved for future definition. ) var rander = rand.Reader // random function @@ -54,23 +54,35 @@ func New() string { return NewRandom().String() } -// Parse decodes s into a UUID or returns nil. See github.com/google/uuid for -// the formats parsed. +// Parse decodes s into a UUID or returns nil. Both the UUID form of +// xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx and +// urn:uuid:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx are decoded. func Parse(s string) UUID { - gu, err := guuid.Parse(s) - if err == nil { - return gu[:] + if len(s) == 36+9 { + if strings.ToLower(s[:9]) != "urn:uuid:" { + return nil + } + s = s[9:] + } else if len(s) != 36 { + return nil } - return nil -} - -// ParseBytes is like Parse, except it parses a byte slice instead of a string. -func ParseBytes(b []byte) (UUID, error) { - gu, err := guuid.ParseBytes(b) - if err == nil { - return gu[:], nil + if s[8] != '-' || s[13] != '-' || s[18] != '-' || s[23] != '-' { + return nil } - return nil, err + var uuid [16]byte + for i, x := range [16]int{ + 0, 2, 4, 6, + 9, 11, + 14, 16, + 19, 21, + 24, 26, 28, 30, 32, 34} { + if v, ok := xtob(s[x:]); !ok { + return nil + } else { + uuid[i] = v + } + } + return uuid[:] } // Equal returns true if uuid1 and uuid2 are equal. @@ -151,6 +163,29 @@ func (uuid UUID) Version() (Version, bool) { return Version(uuid[6] >> 4), true } +func (v Version) String() string { + if v > 15 { + return fmt.Sprintf("BAD_VERSION_%d", v) + } + return fmt.Sprintf("VERSION_%d", v) +} + +func (v Variant) String() string { + switch v { + case RFC4122: + return "RFC4122" + case Reserved: + return "Reserved" + case Microsoft: + return "Microsoft" + case Future: + return "Future" + case Invalid: + return "Invalid" + } + return fmt.Sprintf("BadVariant%d", int(v)) +} + // SetRand sets the random number generator to r, which implements io.Reader. // If r.Read returns an error when the package requests random data then // a panic will be issued. @@ -158,5 +193,9 @@ func (uuid UUID) Version() (Version, bool) { // Calling SetRand with nil sets the random number generator to the default // generator. func SetRand(r io.Reader) { - guuid.SetRand(r) + if r == nil { + rander = rand.Reader + return + } + rander = r } diff --git a/vendor/github.com/pborman/uuid/version1.go b/vendor/github.com/pborman/uuid/version1.go index 7af948da79..0127eacfab 100644 --- a/vendor/github.com/pborman/uuid/version1.go +++ b/vendor/github.com/pborman/uuid/version1.go @@ -5,7 +5,7 @@ package uuid import ( - guuid "github.com/google/uuid" + "encoding/binary" ) // NewUUID returns a Version 1 UUID based on the current NodeID and clock @@ -15,9 +15,27 @@ import ( // SetClockSequence then it will be set automatically. If GetTime fails to // return the current NewUUID returns nil. func NewUUID() UUID { - gu, err := guuid.NewUUID() - if err == nil { - return UUID(gu[:]) + if nodeID == nil { + SetNodeInterface("") } - return nil + + now, seq, err := GetTime() + if err != nil { + return nil + } + + uuid := make([]byte, 16) + + time_low := uint32(now & 0xffffffff) + time_mid := uint16((now >> 32) & 0xffff) + time_hi := uint16((now >> 48) & 0x0fff) + time_hi |= 0x1000 // Version 1 + + binary.BigEndian.PutUint32(uuid[0:], time_low) + binary.BigEndian.PutUint16(uuid[4:], time_mid) + binary.BigEndian.PutUint16(uuid[6:], time_hi) + binary.BigEndian.PutUint16(uuid[8:], seq) + copy(uuid[10:], nodeID) + + return uuid } diff --git a/vendor/github.com/pborman/uuid/version4.go b/vendor/github.com/pborman/uuid/version4.go index b459d46d13..b3d4a368dd 100644 --- a/vendor/github.com/pborman/uuid/version4.go +++ b/vendor/github.com/pborman/uuid/version4.go @@ -4,14 +4,12 @@ package uuid -import guuid "github.com/google/uuid" - // Random returns a Random (Version 4) UUID or panics. // // The strength of the UUIDs is based on the strength of the crypto/rand // package. // -// A note about uniqueness derived from the UUID Wikipedia entry: +// A note about uniqueness derived from from the UUID Wikipedia entry: // // Randomly generated UUIDs have 122 random bits. One's annual risk of being // hit by a meteorite is estimated to be one chance in 17 billion, that @@ -19,8 +17,9 @@ import guuid "github.com/google/uuid" // equivalent to the odds of creating a few tens of trillions of UUIDs in a // year and having one duplicate. func NewRandom() UUID { - if gu, err := guuid.NewRandom(); err == nil { - return UUID(gu[:]) - } - return nil + uuid := make([]byte, 16) + randomBits([]byte(uuid)) + uuid[6] = (uuid[6] & 0x0f) | 0x40 // Version 4 + uuid[8] = (uuid[8] & 0x3f) | 0x80 // Variant is 10 + return uuid } diff --git a/vendor/github.com/peterbourgon/diskv/README.md b/vendor/github.com/peterbourgon/diskv/README.md deleted file mode 100644 index 3474739edc..0000000000 --- a/vendor/github.com/peterbourgon/diskv/README.md +++ /dev/null @@ -1,141 +0,0 @@ -# What is diskv? - -Diskv (disk-vee) is a simple, persistent key-value store written in the Go -language. It starts with an incredibly simple API for storing arbitrary data on -a filesystem by key, and builds several layers of performance-enhancing -abstraction on top. The end result is a conceptually simple, but highly -performant, disk-backed storage system. - -[![Build Status][1]][2] - -[1]: https://drone.io/github.com/peterbourgon/diskv/status.png -[2]: https://drone.io/github.com/peterbourgon/diskv/latest - - -# Installing - -Install [Go 1][3], either [from source][4] or [with a prepackaged binary][5]. -Then, - -```bash -$ go get github.com/peterbourgon/diskv -``` - -[3]: http://golang.org -[4]: http://golang.org/doc/install/source -[5]: http://golang.org/doc/install - - -# Usage - -```go -package main - -import ( - "fmt" - "github.com/peterbourgon/diskv" -) - -func main() { - // Simplest transform function: put all the data files into the base dir. - flatTransform := func(s string) []string { return []string{} } - - // Initialize a new diskv store, rooted at "my-data-dir", with a 1MB cache. - d := diskv.New(diskv.Options{ - BasePath: "my-data-dir", - Transform: flatTransform, - CacheSizeMax: 1024 * 1024, - }) - - // Write three bytes to the key "alpha". - key := "alpha" - d.Write(key, []byte{'1', '2', '3'}) - - // Read the value back out of the store. - value, _ := d.Read(key) - fmt.Printf("%v\n", value) - - // Erase the key+value from the store (and the disk). - d.Erase(key) -} -``` - -More complex examples can be found in the "examples" subdirectory. - - -# Theory - -## Basic idea - -At its core, diskv is a map of a key (`string`) to arbitrary data (`[]byte`). -The data is written to a single file on disk, with the same name as the key. -The key determines where that file will be stored, via a user-provided -`TransformFunc`, which takes a key and returns a slice (`[]string`) -corresponding to a path list where the key file will be stored. The simplest -TransformFunc, - -```go -func SimpleTransform (key string) []string { - return []string{} -} -``` - -will place all keys in the same, base directory. The design is inspired by -[Redis diskstore][6]; a TransformFunc which emulates the default diskstore -behavior is available in the content-addressable-storage example. - -[6]: http://groups.google.com/group/redis-db/browse_thread/thread/d444bc786689bde9?pli=1 - -**Note** that your TransformFunc should ensure that one valid key doesn't -transform to a subset of another valid key. That is, it shouldn't be possible -to construct valid keys that resolve to directory names. As a concrete example, -if your TransformFunc splits on every 3 characters, then - -```go -d.Write("abcabc", val) // OK: written to /abc/abc/abcabc -d.Write("abc", val) // Error: attempted write to /abc/abc, but it's a directory -``` - -This will be addressed in an upcoming version of diskv. - -Probably the most important design principle behind diskv is that your data is -always flatly available on the disk. diskv will never do anything that would -prevent you from accessing, copying, backing up, or otherwise interacting with -your data via common UNIX commandline tools. - -## Adding a cache - -An in-memory caching layer is provided by combining the BasicStore -functionality with a simple map structure, and keeping it up-to-date as -appropriate. Since the map structure in Go is not threadsafe, it's combined -with a RWMutex to provide safe concurrent access. - -## Adding order - -diskv is a key-value store and therefore inherently unordered. An ordering -system can be injected into the store by passing something which satisfies the -diskv.Index interface. (A default implementation, using Google's -[btree][7] package, is provided.) Basically, diskv keeps an ordered (by a -user-provided Less function) index of the keys, which can be queried. - -[7]: https://github.com/google/btree - -## Adding compression - -Something which implements the diskv.Compression interface may be passed -during store creation, so that all Writes and Reads are filtered through -a compression/decompression pipeline. Several default implementations, -using stdlib compression algorithms, are provided. Note that data is cached -compressed; the cost of decompression is borne with each Read. - -## Streaming - -diskv also now provides ReadStream and WriteStream methods, to allow very large -data to be handled efficiently. - - -# Future plans - - * Needs plenty of robust testing: huge datasets, etc... - * More thorough benchmarking - * Your suggestions for use-cases I haven't thought of diff --git a/vendor/github.com/peterbourgon/diskv/compression.go b/vendor/github.com/peterbourgon/diskv/compression.go deleted file mode 100644 index 5192b02733..0000000000 --- a/vendor/github.com/peterbourgon/diskv/compression.go +++ /dev/null @@ -1,64 +0,0 @@ -package diskv - -import ( - "compress/flate" - "compress/gzip" - "compress/zlib" - "io" -) - -// Compression is an interface that Diskv uses to implement compression of -// data. Writer takes a destination io.Writer and returns a WriteCloser that -// compresses all data written through it. Reader takes a source io.Reader and -// returns a ReadCloser that decompresses all data read through it. You may -// define these methods on your own type, or use one of the NewCompression -// helpers. -type Compression interface { - Writer(dst io.Writer) (io.WriteCloser, error) - Reader(src io.Reader) (io.ReadCloser, error) -} - -// NewGzipCompression returns a Gzip-based Compression. -func NewGzipCompression() Compression { - return NewGzipCompressionLevel(flate.DefaultCompression) -} - -// NewGzipCompressionLevel returns a Gzip-based Compression with the given level. -func NewGzipCompressionLevel(level int) Compression { - return &genericCompression{ - wf: func(w io.Writer) (io.WriteCloser, error) { return gzip.NewWriterLevel(w, level) }, - rf: func(r io.Reader) (io.ReadCloser, error) { return gzip.NewReader(r) }, - } -} - -// NewZlibCompression returns a Zlib-based Compression. -func NewZlibCompression() Compression { - return NewZlibCompressionLevel(flate.DefaultCompression) -} - -// NewZlibCompressionLevel returns a Zlib-based Compression with the given level. -func NewZlibCompressionLevel(level int) Compression { - return NewZlibCompressionLevelDict(level, nil) -} - -// NewZlibCompressionLevelDict returns a Zlib-based Compression with the given -// level, based on the given dictionary. -func NewZlibCompressionLevelDict(level int, dict []byte) Compression { - return &genericCompression{ - func(w io.Writer) (io.WriteCloser, error) { return zlib.NewWriterLevelDict(w, level, dict) }, - func(r io.Reader) (io.ReadCloser, error) { return zlib.NewReaderDict(r, dict) }, - } -} - -type genericCompression struct { - wf func(w io.Writer) (io.WriteCloser, error) - rf func(r io.Reader) (io.ReadCloser, error) -} - -func (g *genericCompression) Writer(dst io.Writer) (io.WriteCloser, error) { - return g.wf(dst) -} - -func (g *genericCompression) Reader(src io.Reader) (io.ReadCloser, error) { - return g.rf(src) -} diff --git a/vendor/github.com/peterbourgon/diskv/diskv.go b/vendor/github.com/peterbourgon/diskv/diskv.go deleted file mode 100644 index 524dc0a6e3..0000000000 --- a/vendor/github.com/peterbourgon/diskv/diskv.go +++ /dev/null @@ -1,624 +0,0 @@ -// Diskv (disk-vee) is a simple, persistent, key-value store. -// It stores all data flatly on the filesystem. - -package diskv - -import ( - "bytes" - "errors" - "fmt" - "io" - "io/ioutil" - "os" - "path/filepath" - "strings" - "sync" - "syscall" -) - -const ( - defaultBasePath = "diskv" - defaultFilePerm os.FileMode = 0666 - defaultPathPerm os.FileMode = 0777 -) - -var ( - defaultTransform = func(s string) []string { return []string{} } - errCanceled = errors.New("canceled") - errEmptyKey = errors.New("empty key") - errBadKey = errors.New("bad key") - errImportDirectory = errors.New("can't import a directory") -) - -// TransformFunction transforms a key into a slice of strings, with each -// element in the slice representing a directory in the file path where the -// key's entry will eventually be stored. -// -// For example, if TransformFunc transforms "abcdef" to ["ab", "cde", "f"], -// the final location of the data file will be /ab/cde/f/abcdef -type TransformFunction func(s string) []string - -// Options define a set of properties that dictate Diskv behavior. -// All values are optional. -type Options struct { - BasePath string - Transform TransformFunction - CacheSizeMax uint64 // bytes - PathPerm os.FileMode - FilePerm os.FileMode - // If TempDir is set, it will enable filesystem atomic writes by - // writing temporary files to that location before being moved - // to BasePath. - // Note that TempDir MUST be on the same device/partition as - // BasePath. - TempDir string - - Index Index - IndexLess LessFunction - - Compression Compression -} - -// Diskv implements the Diskv interface. You shouldn't construct Diskv -// structures directly; instead, use the New constructor. -type Diskv struct { - Options - mu sync.RWMutex - cache map[string][]byte - cacheSize uint64 -} - -// New returns an initialized Diskv structure, ready to use. -// If the path identified by baseDir already contains data, -// it will be accessible, but not yet cached. -func New(o Options) *Diskv { - if o.BasePath == "" { - o.BasePath = defaultBasePath - } - if o.Transform == nil { - o.Transform = defaultTransform - } - if o.PathPerm == 0 { - o.PathPerm = defaultPathPerm - } - if o.FilePerm == 0 { - o.FilePerm = defaultFilePerm - } - - d := &Diskv{ - Options: o, - cache: map[string][]byte{}, - cacheSize: 0, - } - - if d.Index != nil && d.IndexLess != nil { - d.Index.Initialize(d.IndexLess, d.Keys(nil)) - } - - return d -} - -// Write synchronously writes the key-value pair to disk, making it immediately -// available for reads. Write relies on the filesystem to perform an eventual -// sync to physical media. If you need stronger guarantees, see WriteStream. -func (d *Diskv) Write(key string, val []byte) error { - return d.WriteStream(key, bytes.NewBuffer(val), false) -} - -// WriteStream writes the data represented by the io.Reader to the disk, under -// the provided key. If sync is true, WriteStream performs an explicit sync on -// the file as soon as it's written. -// -// bytes.Buffer provides io.Reader semantics for basic data types. -func (d *Diskv) WriteStream(key string, r io.Reader, sync bool) error { - if len(key) <= 0 { - return errEmptyKey - } - - d.mu.Lock() - defer d.mu.Unlock() - - return d.writeStreamWithLock(key, r, sync) -} - -// createKeyFileWithLock either creates the key file directly, or -// creates a temporary file in TempDir if it is set. -func (d *Diskv) createKeyFileWithLock(key string) (*os.File, error) { - if d.TempDir != "" { - if err := os.MkdirAll(d.TempDir, d.PathPerm); err != nil { - return nil, fmt.Errorf("temp mkdir: %s", err) - } - f, err := ioutil.TempFile(d.TempDir, "") - if err != nil { - return nil, fmt.Errorf("temp file: %s", err) - } - - if err := f.Chmod(d.FilePerm); err != nil { - f.Close() // error deliberately ignored - os.Remove(f.Name()) // error deliberately ignored - return nil, fmt.Errorf("chmod: %s", err) - } - return f, nil - } - - mode := os.O_WRONLY | os.O_CREATE | os.O_TRUNC // overwrite if exists - f, err := os.OpenFile(d.completeFilename(key), mode, d.FilePerm) - if err != nil { - return nil, fmt.Errorf("open file: %s", err) - } - return f, nil -} - -// writeStream does no input validation checking. -func (d *Diskv) writeStreamWithLock(key string, r io.Reader, sync bool) error { - if err := d.ensurePathWithLock(key); err != nil { - return fmt.Errorf("ensure path: %s", err) - } - - f, err := d.createKeyFileWithLock(key) - if err != nil { - return fmt.Errorf("create key file: %s", err) - } - - wc := io.WriteCloser(&nopWriteCloser{f}) - if d.Compression != nil { - wc, err = d.Compression.Writer(f) - if err != nil { - f.Close() // error deliberately ignored - os.Remove(f.Name()) // error deliberately ignored - return fmt.Errorf("compression writer: %s", err) - } - } - - if _, err := io.Copy(wc, r); err != nil { - f.Close() // error deliberately ignored - os.Remove(f.Name()) // error deliberately ignored - return fmt.Errorf("i/o copy: %s", err) - } - - if err := wc.Close(); err != nil { - f.Close() // error deliberately ignored - os.Remove(f.Name()) // error deliberately ignored - return fmt.Errorf("compression close: %s", err) - } - - if sync { - if err := f.Sync(); err != nil { - f.Close() // error deliberately ignored - os.Remove(f.Name()) // error deliberately ignored - return fmt.Errorf("file sync: %s", err) - } - } - - if err := f.Close(); err != nil { - return fmt.Errorf("file close: %s", err) - } - - if f.Name() != d.completeFilename(key) { - if err := os.Rename(f.Name(), d.completeFilename(key)); err != nil { - os.Remove(f.Name()) // error deliberately ignored - return fmt.Errorf("rename: %s", err) - } - } - - if d.Index != nil { - d.Index.Insert(key) - } - - d.bustCacheWithLock(key) // cache only on read - - return nil -} - -// Import imports the source file into diskv under the destination key. If the -// destination key already exists, it's overwritten. If move is true, the -// source file is removed after a successful import. -func (d *Diskv) Import(srcFilename, dstKey string, move bool) (err error) { - if dstKey == "" { - return errEmptyKey - } - - if fi, err := os.Stat(srcFilename); err != nil { - return err - } else if fi.IsDir() { - return errImportDirectory - } - - d.mu.Lock() - defer d.mu.Unlock() - - if err := d.ensurePathWithLock(dstKey); err != nil { - return fmt.Errorf("ensure path: %s", err) - } - - if move { - if err := syscall.Rename(srcFilename, d.completeFilename(dstKey)); err == nil { - d.bustCacheWithLock(dstKey) - return nil - } else if err != syscall.EXDEV { - // If it failed due to being on a different device, fall back to copying - return err - } - } - - f, err := os.Open(srcFilename) - if err != nil { - return err - } - defer f.Close() - err = d.writeStreamWithLock(dstKey, f, false) - if err == nil && move { - err = os.Remove(srcFilename) - } - return err -} - -// Read reads the key and returns the value. -// If the key is available in the cache, Read won't touch the disk. -// If the key is not in the cache, Read will have the side-effect of -// lazily caching the value. -func (d *Diskv) Read(key string) ([]byte, error) { - rc, err := d.ReadStream(key, false) - if err != nil { - return []byte{}, err - } - defer rc.Close() - return ioutil.ReadAll(rc) -} - -// ReadStream reads the key and returns the value (data) as an io.ReadCloser. -// If the value is cached from a previous read, and direct is false, -// ReadStream will use the cached value. Otherwise, it will return a handle to -// the file on disk, and cache the data on read. -// -// If direct is true, ReadStream will lazily delete any cached value for the -// key, and return a direct handle to the file on disk. -// -// If compression is enabled, ReadStream taps into the io.Reader stream prior -// to decompression, and caches the compressed data. -func (d *Diskv) ReadStream(key string, direct bool) (io.ReadCloser, error) { - d.mu.RLock() - defer d.mu.RUnlock() - - if val, ok := d.cache[key]; ok { - if !direct { - buf := bytes.NewBuffer(val) - if d.Compression != nil { - return d.Compression.Reader(buf) - } - return ioutil.NopCloser(buf), nil - } - - go func() { - d.mu.Lock() - defer d.mu.Unlock() - d.uncacheWithLock(key, uint64(len(val))) - }() - } - - return d.readWithRLock(key) -} - -// read ignores the cache, and returns an io.ReadCloser representing the -// decompressed data for the given key, streamed from the disk. Clients should -// acquire a read lock on the Diskv and check the cache themselves before -// calling read. -func (d *Diskv) readWithRLock(key string) (io.ReadCloser, error) { - filename := d.completeFilename(key) - - fi, err := os.Stat(filename) - if err != nil { - return nil, err - } - if fi.IsDir() { - return nil, os.ErrNotExist - } - - f, err := os.Open(filename) - if err != nil { - return nil, err - } - - var r io.Reader - if d.CacheSizeMax > 0 { - r = newSiphon(f, d, key) - } else { - r = &closingReader{f} - } - - var rc = io.ReadCloser(ioutil.NopCloser(r)) - if d.Compression != nil { - rc, err = d.Compression.Reader(r) - if err != nil { - return nil, err - } - } - - return rc, nil -} - -// closingReader provides a Reader that automatically closes the -// embedded ReadCloser when it reaches EOF -type closingReader struct { - rc io.ReadCloser -} - -func (cr closingReader) Read(p []byte) (int, error) { - n, err := cr.rc.Read(p) - if err == io.EOF { - if closeErr := cr.rc.Close(); closeErr != nil { - return n, closeErr // close must succeed for Read to succeed - } - } - return n, err -} - -// siphon is like a TeeReader: it copies all data read through it to an -// internal buffer, and moves that buffer to the cache at EOF. -type siphon struct { - f *os.File - d *Diskv - key string - buf *bytes.Buffer -} - -// newSiphon constructs a siphoning reader that represents the passed file. -// When a successful series of reads ends in an EOF, the siphon will write -// the buffered data to Diskv's cache under the given key. -func newSiphon(f *os.File, d *Diskv, key string) io.Reader { - return &siphon{ - f: f, - d: d, - key: key, - buf: &bytes.Buffer{}, - } -} - -// Read implements the io.Reader interface for siphon. -func (s *siphon) Read(p []byte) (int, error) { - n, err := s.f.Read(p) - - if err == nil { - return s.buf.Write(p[0:n]) // Write must succeed for Read to succeed - } - - if err == io.EOF { - s.d.cacheWithoutLock(s.key, s.buf.Bytes()) // cache may fail - if closeErr := s.f.Close(); closeErr != nil { - return n, closeErr // close must succeed for Read to succeed - } - return n, err - } - - return n, err -} - -// Erase synchronously erases the given key from the disk and the cache. -func (d *Diskv) Erase(key string) error { - d.mu.Lock() - defer d.mu.Unlock() - - d.bustCacheWithLock(key) - - // erase from index - if d.Index != nil { - d.Index.Delete(key) - } - - // erase from disk - filename := d.completeFilename(key) - if s, err := os.Stat(filename); err == nil { - if s.IsDir() { - return errBadKey - } - if err = os.Remove(filename); err != nil { - return err - } - } else { - // Return err as-is so caller can do os.IsNotExist(err). - return err - } - - // clean up and return - d.pruneDirsWithLock(key) - return nil -} - -// EraseAll will delete all of the data from the store, both in the cache and on -// the disk. Note that EraseAll doesn't distinguish diskv-related data from non- -// diskv-related data. Care should be taken to always specify a diskv base -// directory that is exclusively for diskv data. -func (d *Diskv) EraseAll() error { - d.mu.Lock() - defer d.mu.Unlock() - d.cache = make(map[string][]byte) - d.cacheSize = 0 - if d.TempDir != "" { - os.RemoveAll(d.TempDir) // errors ignored - } - return os.RemoveAll(d.BasePath) -} - -// Has returns true if the given key exists. -func (d *Diskv) Has(key string) bool { - d.mu.Lock() - defer d.mu.Unlock() - - if _, ok := d.cache[key]; ok { - return true - } - - filename := d.completeFilename(key) - s, err := os.Stat(filename) - if err != nil { - return false - } - if s.IsDir() { - return false - } - - return true -} - -// Keys returns a channel that will yield every key accessible by the store, -// in undefined order. If a cancel channel is provided, closing it will -// terminate and close the keys channel. -func (d *Diskv) Keys(cancel <-chan struct{}) <-chan string { - return d.KeysPrefix("", cancel) -} - -// KeysPrefix returns a channel that will yield every key accessible by the -// store with the given prefix, in undefined order. If a cancel channel is -// provided, closing it will terminate and close the keys channel. If the -// provided prefix is the empty string, all keys will be yielded. -func (d *Diskv) KeysPrefix(prefix string, cancel <-chan struct{}) <-chan string { - var prepath string - if prefix == "" { - prepath = d.BasePath - } else { - prepath = d.pathFor(prefix) - } - c := make(chan string) - go func() { - filepath.Walk(prepath, walker(c, prefix, cancel)) - close(c) - }() - return c -} - -// walker returns a function which satisfies the filepath.WalkFunc interface. -// It sends every non-directory file entry down the channel c. -func walker(c chan<- string, prefix string, cancel <-chan struct{}) filepath.WalkFunc { - return func(path string, info os.FileInfo, err error) error { - if err != nil { - return err - } - - if info.IsDir() || !strings.HasPrefix(info.Name(), prefix) { - return nil // "pass" - } - - select { - case c <- info.Name(): - case <-cancel: - return errCanceled - } - - return nil - } -} - -// pathFor returns the absolute path for location on the filesystem where the -// data for the given key will be stored. -func (d *Diskv) pathFor(key string) string { - return filepath.Join(d.BasePath, filepath.Join(d.Transform(key)...)) -} - -// ensurePathWithLock is a helper function that generates all necessary -// directories on the filesystem for the given key. -func (d *Diskv) ensurePathWithLock(key string) error { - return os.MkdirAll(d.pathFor(key), d.PathPerm) -} - -// completeFilename returns the absolute path to the file for the given key. -func (d *Diskv) completeFilename(key string) string { - return filepath.Join(d.pathFor(key), key) -} - -// cacheWithLock attempts to cache the given key-value pair in the store's -// cache. It can fail if the value is larger than the cache's maximum size. -func (d *Diskv) cacheWithLock(key string, val []byte) error { - valueSize := uint64(len(val)) - if err := d.ensureCacheSpaceWithLock(valueSize); err != nil { - return fmt.Errorf("%s; not caching", err) - } - - // be very strict about memory guarantees - if (d.cacheSize + valueSize) > d.CacheSizeMax { - panic(fmt.Sprintf("failed to make room for value (%d/%d)", valueSize, d.CacheSizeMax)) - } - - d.cache[key] = val - d.cacheSize += valueSize - return nil -} - -// cacheWithoutLock acquires the store's (write) mutex and calls cacheWithLock. -func (d *Diskv) cacheWithoutLock(key string, val []byte) error { - d.mu.Lock() - defer d.mu.Unlock() - return d.cacheWithLock(key, val) -} - -func (d *Diskv) bustCacheWithLock(key string) { - if val, ok := d.cache[key]; ok { - d.uncacheWithLock(key, uint64(len(val))) - } -} - -func (d *Diskv) uncacheWithLock(key string, sz uint64) { - d.cacheSize -= sz - delete(d.cache, key) -} - -// pruneDirsWithLock deletes empty directories in the path walk leading to the -// key k. Typically this function is called after an Erase is made. -func (d *Diskv) pruneDirsWithLock(key string) error { - pathlist := d.Transform(key) - for i := range pathlist { - dir := filepath.Join(d.BasePath, filepath.Join(pathlist[:len(pathlist)-i]...)) - - // thanks to Steven Blenkinsop for this snippet - switch fi, err := os.Stat(dir); true { - case err != nil: - return err - case !fi.IsDir(): - panic(fmt.Sprintf("corrupt dirstate at %s", dir)) - } - - nlinks, err := filepath.Glob(filepath.Join(dir, "*")) - if err != nil { - return err - } else if len(nlinks) > 0 { - return nil // has subdirs -- do not prune - } - if err = os.Remove(dir); err != nil { - return err - } - } - - return nil -} - -// ensureCacheSpaceWithLock deletes entries from the cache in arbitrary order -// until the cache has at least valueSize bytes available. -func (d *Diskv) ensureCacheSpaceWithLock(valueSize uint64) error { - if valueSize > d.CacheSizeMax { - return fmt.Errorf("value size (%d bytes) too large for cache (%d bytes)", valueSize, d.CacheSizeMax) - } - - safe := func() bool { return (d.cacheSize + valueSize) <= d.CacheSizeMax } - - for key, val := range d.cache { - if safe() { - break - } - - d.uncacheWithLock(key, uint64(len(val))) - } - - if !safe() { - panic(fmt.Sprintf("%d bytes still won't fit in the cache! (max %d bytes)", valueSize, d.CacheSizeMax)) - } - - return nil -} - -// nopWriteCloser wraps an io.Writer and provides a no-op Close method to -// satisfy the io.WriteCloser interface. -type nopWriteCloser struct { - io.Writer -} - -func (wc *nopWriteCloser) Write(p []byte) (int, error) { return wc.Writer.Write(p) } -func (wc *nopWriteCloser) Close() error { return nil } diff --git a/vendor/github.com/peterbourgon/diskv/index.go b/vendor/github.com/peterbourgon/diskv/index.go deleted file mode 100644 index 96fee5152b..0000000000 --- a/vendor/github.com/peterbourgon/diskv/index.go +++ /dev/null @@ -1,115 +0,0 @@ -package diskv - -import ( - "sync" - - "github.com/google/btree" -) - -// Index is a generic interface for things that can -// provide an ordered list of keys. -type Index interface { - Initialize(less LessFunction, keys <-chan string) - Insert(key string) - Delete(key string) - Keys(from string, n int) []string -} - -// LessFunction is used to initialize an Index of keys in a specific order. -type LessFunction func(string, string) bool - -// btreeString is a custom data type that satisfies the BTree Less interface, -// making the strings it wraps sortable by the BTree package. -type btreeString struct { - s string - l LessFunction -} - -// Less satisfies the BTree.Less interface using the btreeString's LessFunction. -func (s btreeString) Less(i btree.Item) bool { - return s.l(s.s, i.(btreeString).s) -} - -// BTreeIndex is an implementation of the Index interface using google/btree. -type BTreeIndex struct { - sync.RWMutex - LessFunction - *btree.BTree -} - -// Initialize populates the BTree tree with data from the keys channel, -// according to the passed less function. It's destructive to the BTreeIndex. -func (i *BTreeIndex) Initialize(less LessFunction, keys <-chan string) { - i.Lock() - defer i.Unlock() - i.LessFunction = less - i.BTree = rebuild(less, keys) -} - -// Insert inserts the given key (only) into the BTree tree. -func (i *BTreeIndex) Insert(key string) { - i.Lock() - defer i.Unlock() - if i.BTree == nil || i.LessFunction == nil { - panic("uninitialized index") - } - i.BTree.ReplaceOrInsert(btreeString{s: key, l: i.LessFunction}) -} - -// Delete removes the given key (only) from the BTree tree. -func (i *BTreeIndex) Delete(key string) { - i.Lock() - defer i.Unlock() - if i.BTree == nil || i.LessFunction == nil { - panic("uninitialized index") - } - i.BTree.Delete(btreeString{s: key, l: i.LessFunction}) -} - -// Keys yields a maximum of n keys in order. If the passed 'from' key is empty, -// Keys will return the first n keys. If the passed 'from' key is non-empty, the -// first key in the returned slice will be the key that immediately follows the -// passed key, in key order. -func (i *BTreeIndex) Keys(from string, n int) []string { - i.RLock() - defer i.RUnlock() - - if i.BTree == nil || i.LessFunction == nil { - panic("uninitialized index") - } - - if i.BTree.Len() <= 0 { - return []string{} - } - - btreeFrom := btreeString{s: from, l: i.LessFunction} - skipFirst := true - if len(from) <= 0 || !i.BTree.Has(btreeFrom) { - // no such key, so fabricate an always-smallest item - btreeFrom = btreeString{s: "", l: func(string, string) bool { return true }} - skipFirst = false - } - - keys := []string{} - iterator := func(i btree.Item) bool { - keys = append(keys, i.(btreeString).s) - return len(keys) < n - } - i.BTree.AscendGreaterOrEqual(btreeFrom, iterator) - - if skipFirst && len(keys) > 0 { - keys = keys[1:] - } - - return keys -} - -// rebuildIndex does the work of regenerating the index -// with the given keys. -func rebuild(less LessFunction, keys <-chan string) *btree.BTree { - tree := btree.New(2) - for key := range keys { - tree.ReplaceOrInsert(btreeString{s: key, l: less}) - } - return tree -} diff --git a/vendor/github.com/prometheus/client_golang/prometheus/collector.go b/vendor/github.com/prometheus/client_golang/prometheus/collector.go index c0d70b2faf..1e839650d4 100644 --- a/vendor/github.com/prometheus/client_golang/prometheus/collector.go +++ b/vendor/github.com/prometheus/client_golang/prometheus/collector.go @@ -79,7 +79,7 @@ type Collector interface { // of the Describe method. If a Collector sometimes collects no metrics at all // (for example vectors like CounterVec, GaugeVec, etc., which only collect // metrics after a metric with a fully specified label set has been accessed), -// it might even get registered as an unchecked Collecter (cf. the Register +// it might even get registered as an unchecked Collector (cf. the Register // method of the Registerer interface). Hence, only use this shortcut // implementation of Describe if you are certain to fulfill the contract. // diff --git a/vendor/github.com/prometheus/client_golang/prometheus/doc.go b/vendor/github.com/prometheus/client_golang/prometheus/doc.go index 5d9525defc..1e0d578ee7 100644 --- a/vendor/github.com/prometheus/client_golang/prometheus/doc.go +++ b/vendor/github.com/prometheus/client_golang/prometheus/doc.go @@ -122,13 +122,13 @@ // the Collect method. The Describe method has to return separate Desc // instances, representative of the “throw-away” metrics to be created later. // NewDesc comes in handy to create those Desc instances. Alternatively, you -// could return no Desc at all, which will marke the Collector “unchecked”. No -// checks are porformed at registration time, but metric consistency will still +// could return no Desc at all, which will mark the Collector “unchecked”. No +// checks are performed at registration time, but metric consistency will still // be ensured at scrape time, i.e. any inconsistencies will lead to scrape // errors. Thus, with unchecked Collectors, the responsibility to not collect // metrics that lead to inconsistencies in the total scrape result lies with the // implementer of the Collector. While this is not a desirable state, it is -// sometimes necessary. The typical use case is a situatios where the exact +// sometimes necessary. The typical use case is a situation where the exact // metrics to be returned by a Collector cannot be predicted at registration // time, but the implementer has sufficient knowledge of the whole system to // guarantee metric consistency. diff --git a/vendor/github.com/prometheus/client_golang/prometheus/histogram.go b/vendor/github.com/prometheus/client_golang/prometheus/histogram.go index f88da707bc..20ee1afb5d 100644 --- a/vendor/github.com/prometheus/client_golang/prometheus/histogram.go +++ b/vendor/github.com/prometheus/client_golang/prometheus/histogram.go @@ -204,8 +204,8 @@ func newHistogram(desc *Desc, opts HistogramOpts, labelValues ...string) Histogr } } } - // Finally we know the final length of h.upperBounds and can make counts - // for both states: + // Finally we know the final length of h.upperBounds and can make buckets + // for both counts: h.counts[0].buckets = make([]uint64, len(h.upperBounds)) h.counts[1].buckets = make([]uint64, len(h.upperBounds)) diff --git a/vendor/github.com/prometheus/client_golang/prometheus/http.go b/vendor/github.com/prometheus/client_golang/prometheus/http.go index 9f0875bfc8..0fa339aec0 100644 --- a/vendor/github.com/prometheus/client_golang/prometheus/http.go +++ b/vendor/github.com/prometheus/client_golang/prometheus/http.go @@ -34,7 +34,6 @@ import ( const ( contentTypeHeader = "Content-Type" - contentLengthHeader = "Content-Length" contentEncodingHeader = "Content-Encoding" acceptEncodingHeader = "Accept-Encoding" ) diff --git a/vendor/github.com/prometheus/client_golang/prometheus/promhttp/delegator.go b/vendor/github.com/prometheus/client_golang/prometheus/promhttp/delegator.go index 67b56d37cf..5de5bbc685 100644 --- a/vendor/github.com/prometheus/client_golang/prometheus/promhttp/delegator.go +++ b/vendor/github.com/prometheus/client_golang/prometheus/promhttp/delegator.go @@ -38,7 +38,6 @@ type delegator interface { type responseWriterDelegator struct { http.ResponseWriter - handler, method string status int written int64 wroteHeader bool diff --git a/vendor/github.com/prometheus/client_golang/prometheus/promhttp/http.go b/vendor/github.com/prometheus/client_golang/prometheus/promhttp/http.go index 668eb6b3c9..b137c88307 100644 --- a/vendor/github.com/prometheus/client_golang/prometheus/promhttp/http.go +++ b/vendor/github.com/prometheus/client_golang/prometheus/promhttp/http.go @@ -47,7 +47,6 @@ import ( const ( contentTypeHeader = "Content-Type" - contentLengthHeader = "Content-Length" contentEncodingHeader = "Content-Encoding" acceptEncodingHeader = "Accept-Encoding" ) diff --git a/vendor/github.com/prometheus/client_golang/prometheus/registry.go b/vendor/github.com/prometheus/client_golang/prometheus/registry.go index b5e70b93fa..f2fb67aeeb 100644 --- a/vendor/github.com/prometheus/client_golang/prometheus/registry.go +++ b/vendor/github.com/prometheus/client_golang/prometheus/registry.go @@ -680,7 +680,7 @@ func processMetric( // Gatherers is a slice of Gatherer instances that implements the Gatherer // interface itself. Its Gather method calls Gather on all Gatherers in the // slice in order and returns the merged results. Errors returned from the -// Gather calles are all returned in a flattened MultiError. Duplicate and +// Gather calls are all returned in a flattened MultiError. Duplicate and // inconsistent Metrics are skipped (first occurrence in slice order wins) and // reported in the returned error. // diff --git a/vendor/github.com/prometheus/client_golang/prometheus/summary.go b/vendor/github.com/prometheus/client_golang/prometheus/summary.go index 2980614dff..e4c87145cb 100644 --- a/vendor/github.com/prometheus/client_golang/prometheus/summary.go +++ b/vendor/github.com/prometheus/client_golang/prometheus/summary.go @@ -16,8 +16,10 @@ package prometheus import ( "fmt" "math" + "runtime" "sort" "sync" + "sync/atomic" "time" "github.com/beorn7/perks/quantile" @@ -151,7 +153,7 @@ type SummaryOpts struct { BufCap uint32 } -// Great fuck-up with the sliding-window decay algorithm... The Merge method of +// Problem with the sliding-window decay algorithm... The Merge method of // perk/quantile is actually not working as advertised - and it might be // unfixable, as the underlying algorithm is apparently not capable of merging // summaries in the first place. To avoid using Merge, we are currently adding @@ -214,6 +216,17 @@ func newSummary(desc *Desc, opts SummaryOpts, labelValues ...string) Summary { opts.BufCap = DefBufCap } + if len(opts.Objectives) == 0 { + // Use the lock-free implementation of a Summary without objectives. + s := &noObjectivesSummary{ + desc: desc, + labelPairs: makeLabelPairs(desc, labelValues), + counts: [2]*summaryCounts{&summaryCounts{}, &summaryCounts{}}, + } + s.init(s) // Init self-collection. + return s + } + s := &summary{ desc: desc, @@ -382,6 +395,142 @@ func (s *summary) swapBufs(now time.Time) { } } +type summaryCounts struct { + // sumBits contains the bits of the float64 representing the sum of all + // observations. sumBits and count have to go first in the struct to + // guarantee alignment for atomic operations. + // http://golang.org/pkg/sync/atomic/#pkg-note-BUG + sumBits uint64 + count uint64 +} + +type noObjectivesSummary struct { + // countAndHotIdx is a complicated one. For lock-free yet atomic + // observations, we need to save the total count of observations again, + // combined with the index of the currently-hot counts struct, so that + // we can perform the operation on both values atomically. The least + // significant bit defines the hot counts struct. The remaining 63 bits + // represent the total count of observations. This happens under the + // assumption that the 63bit count will never overflow. Rationale: An + // observations takes about 30ns. Let's assume it could happen in + // 10ns. Overflowing the counter will then take at least (2^63)*10ns, + // which is about 3000 years. + // + // This has to be first in the struct for 64bit alignment. See + // http://golang.org/pkg/sync/atomic/#pkg-note-BUG + countAndHotIdx uint64 + + selfCollector + desc *Desc + writeMtx sync.Mutex // Only used in the Write method. + + // Two counts, one is "hot" for lock-free observations, the other is + // "cold" for writing out a dto.Metric. It has to be an array of + // pointers to guarantee 64bit alignment of the histogramCounts, see + // http://golang.org/pkg/sync/atomic/#pkg-note-BUG. + counts [2]*summaryCounts + hotIdx int // Index of currently-hot counts. Only used within Write. + + labelPairs []*dto.LabelPair +} + +func (s *noObjectivesSummary) Desc() *Desc { + return s.desc +} + +func (s *noObjectivesSummary) Observe(v float64) { + // We increment s.countAndHotIdx by 2 so that the counter in the upper + // 63 bits gets incremented by 1. At the same time, we get the new value + // back, which we can use to find the currently-hot counts. + n := atomic.AddUint64(&s.countAndHotIdx, 2) + hotCounts := s.counts[n%2] + + for { + oldBits := atomic.LoadUint64(&hotCounts.sumBits) + newBits := math.Float64bits(math.Float64frombits(oldBits) + v) + if atomic.CompareAndSwapUint64(&hotCounts.sumBits, oldBits, newBits) { + break + } + } + // Increment count last as we take it as a signal that the observation + // is complete. + atomic.AddUint64(&hotCounts.count, 1) +} + +func (s *noObjectivesSummary) Write(out *dto.Metric) error { + var ( + sum = &dto.Summary{} + hotCounts, coldCounts *summaryCounts + count uint64 + ) + + // For simplicity, we mutex the rest of this method. It is not in the + // hot path, i.e. Observe is called much more often than Write. The + // complication of making Write lock-free isn't worth it. + s.writeMtx.Lock() + defer s.writeMtx.Unlock() + + // This is a bit arcane, which is why the following spells out this if + // clause in English: + // + // If the currently-hot counts struct is #0, we atomically increment + // s.countAndHotIdx by 1 so that from now on Observe will use the counts + // struct #1. Furthermore, the atomic increment gives us the new value, + // which, in its most significant 63 bits, tells us the count of + // observations done so far up to and including currently ongoing + // observations still using the counts struct just changed from hot to + // cold. To have a normal uint64 for the count, we bitshift by 1 and + // save the result in count. We also set s.hotIdx to 1 for the next + // Write call, and we will refer to counts #1 as hotCounts and to counts + // #0 as coldCounts. + // + // If the currently-hot counts struct is #1, we do the corresponding + // things the other way round. We have to _decrement_ s.countAndHotIdx + // (which is a bit arcane in itself, as we have to express -1 with an + // unsigned int...). + if s.hotIdx == 0 { + count = atomic.AddUint64(&s.countAndHotIdx, 1) >> 1 + s.hotIdx = 1 + hotCounts = s.counts[1] + coldCounts = s.counts[0] + } else { + count = atomic.AddUint64(&s.countAndHotIdx, ^uint64(0)) >> 1 // Decrement. + s.hotIdx = 0 + hotCounts = s.counts[0] + coldCounts = s.counts[1] + } + + // Now we have to wait for the now-declared-cold counts to actually cool + // down, i.e. wait for all observations still using it to finish. That's + // the case once the count in the cold counts struct is the same as the + // one atomically retrieved from the upper 63bits of s.countAndHotIdx. + for { + if count == atomic.LoadUint64(&coldCounts.count) { + break + } + runtime.Gosched() // Let observations get work done. + } + + sum.SampleCount = proto.Uint64(count) + sum.SampleSum = proto.Float64(math.Float64frombits(atomic.LoadUint64(&coldCounts.sumBits))) + + out.Summary = sum + out.Label = s.labelPairs + + // Finally add all the cold counts to the new hot counts and reset the cold counts. + atomic.AddUint64(&hotCounts.count, count) + atomic.StoreUint64(&coldCounts.count, 0) + for { + oldBits := atomic.LoadUint64(&hotCounts.sumBits) + newBits := math.Float64bits(math.Float64frombits(oldBits) + sum.GetSampleSum()) + if atomic.CompareAndSwapUint64(&hotCounts.sumBits, oldBits, newBits) { + atomic.StoreUint64(&coldCounts.sumBits, 0) + break + } + } + return nil +} + type quantSort []*dto.Quantile func (s quantSort) Len() int { diff --git a/vendor/github.com/prometheus/common/expfmt/text_create.go b/vendor/github.com/prometheus/common/expfmt/text_create.go index 16655d417c..8e473d0fe9 100644 --- a/vendor/github.com/prometheus/common/expfmt/text_create.go +++ b/vendor/github.com/prometheus/common/expfmt/text_create.go @@ -436,11 +436,11 @@ func writeEscapedString(w enhancedWriter, v string, includeDoubleQuote bool) (in func writeFloat(w enhancedWriter, f float64) (int, error) { switch { case f == 1: - return w.WriteString("1.0") + return 1, w.WriteByte('1') case f == 0: - return w.WriteString("0.0") + return 1, w.WriteByte('0') case f == -1: - return w.WriteString("-1.0") + return w.WriteString("-1") case math.IsNaN(f): return w.WriteString("NaN") case math.IsInf(f, +1): @@ -450,12 +450,6 @@ func writeFloat(w enhancedWriter, f float64) (int, error) { default: bp := numBufPool.Get().(*[]byte) *bp = strconv.AppendFloat((*bp)[:0], f, 'g', -1, 64) - // Add a .0 if used fixed point and there is no decimal - // point already. This is for future proofing with OpenMetrics, - // where floats always contain either an exponent or decimal. - if !bytes.ContainsAny(*bp, "e.") { - *bp = append(*bp, '.', '0') - } written, err := w.Write(*bp) numBufPool.Put(bp) return written, err diff --git a/vendor/github.com/rogpeppe/go-internal/LICENSE b/vendor/github.com/rogpeppe/go-internal/LICENSE deleted file mode 100644 index 49ea0f9288..0000000000 --- a/vendor/github.com/rogpeppe/go-internal/LICENSE +++ /dev/null @@ -1,27 +0,0 @@ -Copyright (c) 2018 The Go Authors. All rights reserved. - -Redistribution and use in source and binary forms, with or without -modification, are permitted provided that the following conditions are -met: - - * Redistributions of source code must retain the above copyright -notice, this list of conditions and the following disclaimer. - * Redistributions in binary form must reproduce the above -copyright notice, this list of conditions and the following disclaimer -in the documentation and/or other materials provided with the -distribution. - * Neither the name of Google Inc. nor the names of its -contributors may be used to endorse or promote products derived from -this software without specific prior written permission. - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/vendor/github.com/rogpeppe/go-internal/modfile/gopkgin.go b/vendor/github.com/rogpeppe/go-internal/modfile/gopkgin.go deleted file mode 100644 index c94b3848a0..0000000000 --- a/vendor/github.com/rogpeppe/go-internal/modfile/gopkgin.go +++ /dev/null @@ -1,47 +0,0 @@ -// Copyright 2018 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// TODO: Figure out what gopkg.in should do. - -package modfile - -import "strings" - -// ParseGopkgIn splits gopkg.in import paths into their constituent parts -func ParseGopkgIn(path string) (root, repo, major, subdir string, ok bool) { - if !strings.HasPrefix(path, "gopkg.in/") { - return - } - f := strings.Split(path, "/") - if len(f) >= 2 { - if elem, v, ok := dotV(f[1]); ok { - root = strings.Join(f[:2], "/") - repo = "github.com/go-" + elem + "/" + elem - major = v - subdir = strings.Join(f[2:], "/") - return root, repo, major, subdir, true - } - } - if len(f) >= 3 { - if elem, v, ok := dotV(f[2]); ok { - root = strings.Join(f[:3], "/") - repo = "github.com/" + f[1] + "/" + elem - major = v - subdir = strings.Join(f[3:], "/") - return root, repo, major, subdir, true - } - } - return -} - -func dotV(name string) (elem, v string, ok bool) { - i := len(name) - 1 - for i >= 0 && '0' <= name[i] && name[i] <= '9' { - i-- - } - if i <= 2 || i+1 >= len(name) || name[i-1] != '.' || name[i] != 'v' || name[i+1] == '0' && len(name) != i+2 { - return "", "", false - } - return name[:i-1], name[i:], true -} diff --git a/vendor/github.com/rogpeppe/go-internal/modfile/print.go b/vendor/github.com/rogpeppe/go-internal/modfile/print.go deleted file mode 100644 index 7b1dd8f953..0000000000 --- a/vendor/github.com/rogpeppe/go-internal/modfile/print.go +++ /dev/null @@ -1,164 +0,0 @@ -// Copyright 2018 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// Package modfile implements parsing and formatting for -// go.mod files. -package modfile - -import ( - "bytes" - "fmt" - "strings" -) - -func Format(f *FileSyntax) []byte { - pr := &printer{} - pr.file(f) - return pr.Bytes() -} - -// A printer collects the state during printing of a file or expression. -type printer struct { - bytes.Buffer // output buffer - comment []Comment // pending end-of-line comments - margin int // left margin (indent), a number of tabs -} - -// printf prints to the buffer. -func (p *printer) printf(format string, args ...interface{}) { - fmt.Fprintf(p, format, args...) -} - -// indent returns the position on the current line, in bytes, 0-indexed. -func (p *printer) indent() int { - b := p.Bytes() - n := 0 - for n < len(b) && b[len(b)-1-n] != '\n' { - n++ - } - return n -} - -// newline ends the current line, flushing end-of-line comments. -func (p *printer) newline() { - if len(p.comment) > 0 { - p.printf(" ") - for i, com := range p.comment { - if i > 0 { - p.trim() - p.printf("\n") - for i := 0; i < p.margin; i++ { - p.printf("\t") - } - } - p.printf("%s", strings.TrimSpace(com.Token)) - } - p.comment = p.comment[:0] - } - - p.trim() - p.printf("\n") - for i := 0; i < p.margin; i++ { - p.printf("\t") - } -} - -// trim removes trailing spaces and tabs from the current line. -func (p *printer) trim() { - // Remove trailing spaces and tabs from line we're about to end. - b := p.Bytes() - n := len(b) - for n > 0 && (b[n-1] == '\t' || b[n-1] == ' ') { - n-- - } - p.Truncate(n) -} - -// file formats the given file into the print buffer. -func (p *printer) file(f *FileSyntax) { - for _, com := range f.Before { - p.printf("%s", strings.TrimSpace(com.Token)) - p.newline() - } - - for i, stmt := range f.Stmt { - switch x := stmt.(type) { - case *CommentBlock: - // comments already handled - p.expr(x) - - default: - p.expr(x) - p.newline() - } - - for _, com := range stmt.Comment().After { - p.printf("%s", strings.TrimSpace(com.Token)) - p.newline() - } - - if i+1 < len(f.Stmt) { - p.newline() - } - } -} - -func (p *printer) expr(x Expr) { - // Emit line-comments preceding this expression. - if before := x.Comment().Before; len(before) > 0 { - // Want to print a line comment. - // Line comments must be at the current margin. - p.trim() - if p.indent() > 0 { - // There's other text on the line. Start a new line. - p.printf("\n") - } - // Re-indent to margin. - for i := 0; i < p.margin; i++ { - p.printf("\t") - } - for _, com := range before { - p.printf("%s", strings.TrimSpace(com.Token)) - p.newline() - } - } - - switch x := x.(type) { - default: - panic(fmt.Errorf("printer: unexpected type %T", x)) - - case *CommentBlock: - // done - - case *LParen: - p.printf("(") - case *RParen: - p.printf(")") - - case *Line: - sep := "" - for _, tok := range x.Token { - p.printf("%s%s", sep, tok) - sep = " " - } - - case *LineBlock: - for _, tok := range x.Token { - p.printf("%s ", tok) - } - p.expr(&x.LParen) - p.margin++ - for _, l := range x.Line { - p.newline() - p.expr(l) - } - p.margin-- - p.newline() - p.expr(&x.RParen) - } - - // Queue end-of-line comments for printing when we - // reach the end of the line. - p.comment = append(p.comment, x.Comment().Suffix...) -} diff --git a/vendor/github.com/rogpeppe/go-internal/modfile/read.go b/vendor/github.com/rogpeppe/go-internal/modfile/read.go deleted file mode 100644 index 1d81ff1ab7..0000000000 --- a/vendor/github.com/rogpeppe/go-internal/modfile/read.go +++ /dev/null @@ -1,869 +0,0 @@ -// Copyright 2018 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// Module file parser. -// This is a simplified copy of Google's buildifier parser. - -package modfile - -import ( - "bytes" - "fmt" - "os" - "strconv" - "strings" - "unicode" - "unicode/utf8" -) - -// A Position describes the position between two bytes of input. -type Position struct { - Line int // line in input (starting at 1) - LineRune int // rune in line (starting at 1) - Byte int // byte in input (starting at 0) -} - -// add returns the position at the end of s, assuming it starts at p. -func (p Position) add(s string) Position { - p.Byte += len(s) - if n := strings.Count(s, "\n"); n > 0 { - p.Line += n - s = s[strings.LastIndex(s, "\n")+1:] - p.LineRune = 1 - } - p.LineRune += utf8.RuneCountInString(s) - return p -} - -// An Expr represents an input element. -type Expr interface { - // Span returns the start and end position of the expression, - // excluding leading or trailing comments. - Span() (start, end Position) - - // Comment returns the comments attached to the expression. - // This method would normally be named 'Comments' but that - // would interfere with embedding a type of the same name. - Comment() *Comments -} - -// A Comment represents a single // comment. -type Comment struct { - Start Position - Token string // without trailing newline - Suffix bool // an end of line (not whole line) comment -} - -// Comments collects the comments associated with an expression. -type Comments struct { - Before []Comment // whole-line comments before this expression - Suffix []Comment // end-of-line comments after this expression - - // For top-level expressions only, After lists whole-line - // comments following the expression. - After []Comment -} - -// Comment returns the receiver. This isn't useful by itself, but -// a Comments struct is embedded into all the expression -// implementation types, and this gives each of those a Comment -// method to satisfy the Expr interface. -func (c *Comments) Comment() *Comments { - return c -} - -// A FileSyntax represents an entire go.mod file. -type FileSyntax struct { - Name string // file path - Comments - Stmt []Expr -} - -func (x *FileSyntax) Span() (start, end Position) { - if len(x.Stmt) == 0 { - return - } - start, _ = x.Stmt[0].Span() - _, end = x.Stmt[len(x.Stmt)-1].Span() - return start, end -} - -func (x *FileSyntax) addLine(hint Expr, tokens ...string) *Line { - if hint == nil { - // If no hint given, add to the last statement of the given type. - Loop: - for i := len(x.Stmt) - 1; i >= 0; i-- { - stmt := x.Stmt[i] - switch stmt := stmt.(type) { - case *Line: - if stmt.Token != nil && stmt.Token[0] == tokens[0] { - hint = stmt - break Loop - } - case *LineBlock: - if stmt.Token[0] == tokens[0] { - hint = stmt - break Loop - } - } - } - } - - if hint != nil { - for i, stmt := range x.Stmt { - switch stmt := stmt.(type) { - case *Line: - if stmt == hint { - // Convert line to line block. - stmt.InBlock = true - block := &LineBlock{Token: stmt.Token[:1], Line: []*Line{stmt}} - stmt.Token = stmt.Token[1:] - x.Stmt[i] = block - new := &Line{Token: tokens[1:], InBlock: true} - block.Line = append(block.Line, new) - return new - } - case *LineBlock: - if stmt == hint { - new := &Line{Token: tokens[1:], InBlock: true} - stmt.Line = append(stmt.Line, new) - return new - } - for j, line := range stmt.Line { - if line == hint { - // Add new line after hint. - stmt.Line = append(stmt.Line, nil) - copy(stmt.Line[j+2:], stmt.Line[j+1:]) - new := &Line{Token: tokens[1:], InBlock: true} - stmt.Line[j+1] = new - return new - } - } - } - } - } - - new := &Line{Token: tokens} - x.Stmt = append(x.Stmt, new) - return new -} - -func (x *FileSyntax) updateLine(line *Line, tokens ...string) { - if line.InBlock { - tokens = tokens[1:] - } - line.Token = tokens -} - -func (x *FileSyntax) removeLine(line *Line) { - line.Token = nil -} - -// Cleanup cleans up the file syntax x after any edit operations. -// To avoid quadratic behavior, removeLine marks the line as dead -// by setting line.Token = nil but does not remove it from the slice -// in which it appears. After edits have all been indicated, -// calling Cleanup cleans out the dead lines. -func (x *FileSyntax) Cleanup() { - w := 0 - for _, stmt := range x.Stmt { - switch stmt := stmt.(type) { - case *Line: - if stmt.Token == nil { - continue - } - case *LineBlock: - ww := 0 - for _, line := range stmt.Line { - if line.Token != nil { - stmt.Line[ww] = line - ww++ - } - } - if ww == 0 { - continue - } - if ww == 1 { - // Collapse block into single line. - line := &Line{ - Comments: Comments{ - Before: commentsAdd(stmt.Before, stmt.Line[0].Before), - Suffix: commentsAdd(stmt.Line[0].Suffix, stmt.Suffix), - After: commentsAdd(stmt.Line[0].After, stmt.After), - }, - Token: stringsAdd(stmt.Token, stmt.Line[0].Token), - } - x.Stmt[w] = line - w++ - continue - } - stmt.Line = stmt.Line[:ww] - } - x.Stmt[w] = stmt - w++ - } - x.Stmt = x.Stmt[:w] -} - -func commentsAdd(x, y []Comment) []Comment { - return append(x[:len(x):len(x)], y...) -} - -func stringsAdd(x, y []string) []string { - return append(x[:len(x):len(x)], y...) -} - -// A CommentBlock represents a top-level block of comments separate -// from any rule. -type CommentBlock struct { - Comments - Start Position -} - -func (x *CommentBlock) Span() (start, end Position) { - return x.Start, x.Start -} - -// A Line is a single line of tokens. -type Line struct { - Comments - Start Position - Token []string - InBlock bool - End Position -} - -func (x *Line) Span() (start, end Position) { - return x.Start, x.End -} - -// A LineBlock is a factored block of lines, like -// -// require ( -// "x" -// "y" -// ) -// -type LineBlock struct { - Comments - Start Position - LParen LParen - Token []string - Line []*Line - RParen RParen -} - -func (x *LineBlock) Span() (start, end Position) { - return x.Start, x.RParen.Pos.add(")") -} - -// An LParen represents the beginning of a parenthesized line block. -// It is a place to store suffix comments. -type LParen struct { - Comments - Pos Position -} - -func (x *LParen) Span() (start, end Position) { - return x.Pos, x.Pos.add(")") -} - -// An RParen represents the end of a parenthesized line block. -// It is a place to store whole-line (before) comments. -type RParen struct { - Comments - Pos Position -} - -func (x *RParen) Span() (start, end Position) { - return x.Pos, x.Pos.add(")") -} - -// An input represents a single input file being parsed. -type input struct { - // Lexing state. - filename string // name of input file, for errors - complete []byte // entire input - remaining []byte // remaining input - token []byte // token being scanned - lastToken string // most recently returned token, for error messages - pos Position // current input position - comments []Comment // accumulated comments - endRule int // position of end of current rule - - // Parser state. - file *FileSyntax // returned top-level syntax tree - parseError error // error encountered during parsing - - // Comment assignment state. - pre []Expr // all expressions, in preorder traversal - post []Expr // all expressions, in postorder traversal -} - -func newInput(filename string, data []byte) *input { - return &input{ - filename: filename, - complete: data, - remaining: data, - pos: Position{Line: 1, LineRune: 1, Byte: 0}, - } -} - -// parse parses the input file. -func parse(file string, data []byte) (f *FileSyntax, err error) { - in := newInput(file, data) - // The parser panics for both routine errors like syntax errors - // and for programmer bugs like array index errors. - // Turn both into error returns. Catching bug panics is - // especially important when processing many files. - defer func() { - if e := recover(); e != nil { - if e == in.parseError { - err = in.parseError - } else { - err = fmt.Errorf("%s:%d:%d: internal error: %v", in.filename, in.pos.Line, in.pos.LineRune, e) - } - } - }() - - // Invoke the parser. - in.parseFile() - if in.parseError != nil { - return nil, in.parseError - } - in.file.Name = in.filename - - // Assign comments to nearby syntax. - in.assignComments() - - return in.file, nil -} - -// Error is called to report an error. -// The reason s is often "syntax error". -// Error does not return: it panics. -func (in *input) Error(s string) { - if s == "syntax error" && in.lastToken != "" { - s += " near " + in.lastToken - } - in.parseError = fmt.Errorf("%s:%d:%d: %v", in.filename, in.pos.Line, in.pos.LineRune, s) - panic(in.parseError) -} - -// eof reports whether the input has reached end of file. -func (in *input) eof() bool { - return len(in.remaining) == 0 -} - -// peekRune returns the next rune in the input without consuming it. -func (in *input) peekRune() int { - if len(in.remaining) == 0 { - return 0 - } - r, _ := utf8.DecodeRune(in.remaining) - return int(r) -} - -// peekPrefix reports whether the remaining input begins with the given prefix. -func (in *input) peekPrefix(prefix string) bool { - // This is like bytes.HasPrefix(in.remaining, []byte(prefix)) - // but without the allocation of the []byte copy of prefix. - for i := 0; i < len(prefix); i++ { - if i >= len(in.remaining) || in.remaining[i] != prefix[i] { - return false - } - } - return true -} - -// readRune consumes and returns the next rune in the input. -func (in *input) readRune() int { - if len(in.remaining) == 0 { - in.Error("internal lexer error: readRune at EOF") - } - r, size := utf8.DecodeRune(in.remaining) - in.remaining = in.remaining[size:] - if r == '\n' { - in.pos.Line++ - in.pos.LineRune = 1 - } else { - in.pos.LineRune++ - } - in.pos.Byte += size - return int(r) -} - -type symType struct { - pos Position - endPos Position - text string -} - -// startToken marks the beginning of the next input token. -// It must be followed by a call to endToken, once the token has -// been consumed using readRune. -func (in *input) startToken(sym *symType) { - in.token = in.remaining - sym.text = "" - sym.pos = in.pos -} - -// endToken marks the end of an input token. -// It records the actual token string in sym.text if the caller -// has not done that already. -func (in *input) endToken(sym *symType) { - if sym.text == "" { - tok := string(in.token[:len(in.token)-len(in.remaining)]) - sym.text = tok - in.lastToken = sym.text - } - sym.endPos = in.pos -} - -// lex is called from the parser to obtain the next input token. -// It returns the token value (either a rune like '+' or a symbolic token _FOR) -// and sets val to the data associated with the token. -// For all our input tokens, the associated data is -// val.Pos (the position where the token begins) -// and val.Token (the input string corresponding to the token). -func (in *input) lex(sym *symType) int { - // Skip past spaces, stopping at non-space or EOF. - countNL := 0 // number of newlines we've skipped past - for !in.eof() { - // Skip over spaces. Count newlines so we can give the parser - // information about where top-level blank lines are, - // for top-level comment assignment. - c := in.peekRune() - if c == ' ' || c == '\t' || c == '\r' { - in.readRune() - continue - } - - // Comment runs to end of line. - if in.peekPrefix("//") { - in.startToken(sym) - - // Is this comment the only thing on its line? - // Find the last \n before this // and see if it's all - // spaces from there to here. - i := bytes.LastIndex(in.complete[:in.pos.Byte], []byte("\n")) - suffix := len(bytes.TrimSpace(in.complete[i+1:in.pos.Byte])) > 0 - in.readRune() - in.readRune() - - // Consume comment. - for len(in.remaining) > 0 && in.readRune() != '\n' { - } - in.endToken(sym) - - sym.text = strings.TrimRight(sym.text, "\n") - in.lastToken = "comment" - - // If we are at top level (not in a statement), hand the comment to - // the parser as a _COMMENT token. The grammar is written - // to handle top-level comments itself. - if !suffix { - // Not in a statement. Tell parser about top-level comment. - return _COMMENT - } - - // Otherwise, save comment for later attachment to syntax tree. - if countNL > 1 { - in.comments = append(in.comments, Comment{sym.pos, "", false}) - } - in.comments = append(in.comments, Comment{sym.pos, sym.text, suffix}) - countNL = 1 - return _EOL - } - - if in.peekPrefix("/*") { - in.Error(fmt.Sprintf("mod files must use // comments (not /* */ comments)")) - } - - // Found non-space non-comment. - break - } - - // Found the beginning of the next token. - in.startToken(sym) - defer in.endToken(sym) - - // End of file. - if in.eof() { - in.lastToken = "EOF" - return _EOF - } - - // Punctuation tokens. - switch c := in.peekRune(); c { - case '\n': - in.readRune() - return c - - case '(': - in.readRune() - return c - - case ')': - in.readRune() - return c - - case '"', '`': // quoted string - quote := c - in.readRune() - for { - if in.eof() { - in.pos = sym.pos - in.Error("unexpected EOF in string") - } - if in.peekRune() == '\n' { - in.Error("unexpected newline in string") - } - c := in.readRune() - if c == quote { - break - } - if c == '\\' && quote != '`' { - if in.eof() { - in.pos = sym.pos - in.Error("unexpected EOF in string") - } - in.readRune() - } - } - in.endToken(sym) - return _STRING - } - - // Checked all punctuation. Must be identifier token. - if c := in.peekRune(); !isIdent(c) { - in.Error(fmt.Sprintf("unexpected input character %#q", c)) - } - - // Scan over identifier. - for isIdent(in.peekRune()) { - if in.peekPrefix("//") { - break - } - if in.peekPrefix("/*") { - in.Error(fmt.Sprintf("mod files must use // comments (not /* */ comments)")) - } - in.readRune() - } - return _IDENT -} - -// isIdent reports whether c is an identifier rune. -// We treat nearly all runes as identifier runes. -func isIdent(c int) bool { - return c != 0 && !unicode.IsSpace(rune(c)) -} - -// Comment assignment. -// We build two lists of all subexpressions, preorder and postorder. -// The preorder list is ordered by start location, with outer expressions first. -// The postorder list is ordered by end location, with outer expressions last. -// We use the preorder list to assign each whole-line comment to the syntax -// immediately following it, and we use the postorder list to assign each -// end-of-line comment to the syntax immediately preceding it. - -// order walks the expression adding it and its subexpressions to the -// preorder and postorder lists. -func (in *input) order(x Expr) { - if x != nil { - in.pre = append(in.pre, x) - } - switch x := x.(type) { - default: - panic(fmt.Errorf("order: unexpected type %T", x)) - case nil: - // nothing - case *LParen, *RParen: - // nothing - case *CommentBlock: - // nothing - case *Line: - // nothing - case *FileSyntax: - for _, stmt := range x.Stmt { - in.order(stmt) - } - case *LineBlock: - in.order(&x.LParen) - for _, l := range x.Line { - in.order(l) - } - in.order(&x.RParen) - } - if x != nil { - in.post = append(in.post, x) - } -} - -// assignComments attaches comments to nearby syntax. -func (in *input) assignComments() { - const debug = false - - // Generate preorder and postorder lists. - in.order(in.file) - - // Split into whole-line comments and suffix comments. - var line, suffix []Comment - for _, com := range in.comments { - if com.Suffix { - suffix = append(suffix, com) - } else { - line = append(line, com) - } - } - - if debug { - for _, c := range line { - fmt.Fprintf(os.Stderr, "LINE %q :%d:%d #%d\n", c.Token, c.Start.Line, c.Start.LineRune, c.Start.Byte) - } - } - - // Assign line comments to syntax immediately following. - for _, x := range in.pre { - start, _ := x.Span() - if debug { - fmt.Printf("pre %T :%d:%d #%d\n", x, start.Line, start.LineRune, start.Byte) - } - xcom := x.Comment() - for len(line) > 0 && start.Byte >= line[0].Start.Byte { - if debug { - fmt.Fprintf(os.Stderr, "ASSIGN LINE %q #%d\n", line[0].Token, line[0].Start.Byte) - } - xcom.Before = append(xcom.Before, line[0]) - line = line[1:] - } - } - - // Remaining line comments go at end of file. - in.file.After = append(in.file.After, line...) - - if debug { - for _, c := range suffix { - fmt.Fprintf(os.Stderr, "SUFFIX %q :%d:%d #%d\n", c.Token, c.Start.Line, c.Start.LineRune, c.Start.Byte) - } - } - - // Assign suffix comments to syntax immediately before. - for i := len(in.post) - 1; i >= 0; i-- { - x := in.post[i] - - start, end := x.Span() - if debug { - fmt.Printf("post %T :%d:%d #%d :%d:%d #%d\n", x, start.Line, start.LineRune, start.Byte, end.Line, end.LineRune, end.Byte) - } - - // Do not assign suffix comments to end of line block or whole file. - // Instead assign them to the last element inside. - switch x.(type) { - case *FileSyntax: - continue - } - - // Do not assign suffix comments to something that starts - // on an earlier line, so that in - // - // x ( y - // z ) // comment - // - // we assign the comment to z and not to x ( ... ). - if start.Line != end.Line { - continue - } - xcom := x.Comment() - for len(suffix) > 0 && end.Byte <= suffix[len(suffix)-1].Start.Byte { - if debug { - fmt.Fprintf(os.Stderr, "ASSIGN SUFFIX %q #%d\n", suffix[len(suffix)-1].Token, suffix[len(suffix)-1].Start.Byte) - } - xcom.Suffix = append(xcom.Suffix, suffix[len(suffix)-1]) - suffix = suffix[:len(suffix)-1] - } - } - - // We assigned suffix comments in reverse. - // If multiple suffix comments were appended to the same - // expression node, they are now in reverse. Fix that. - for _, x := range in.post { - reverseComments(x.Comment().Suffix) - } - - // Remaining suffix comments go at beginning of file. - in.file.Before = append(in.file.Before, suffix...) -} - -// reverseComments reverses the []Comment list. -func reverseComments(list []Comment) { - for i, j := 0, len(list)-1; i < j; i, j = i+1, j-1 { - list[i], list[j] = list[j], list[i] - } -} - -func (in *input) parseFile() { - in.file = new(FileSyntax) - var sym symType - var cb *CommentBlock - for { - tok := in.lex(&sym) - switch tok { - case '\n': - if cb != nil { - in.file.Stmt = append(in.file.Stmt, cb) - cb = nil - } - case _COMMENT: - if cb == nil { - cb = &CommentBlock{Start: sym.pos} - } - com := cb.Comment() - com.Before = append(com.Before, Comment{Start: sym.pos, Token: sym.text}) - case _EOF: - if cb != nil { - in.file.Stmt = append(in.file.Stmt, cb) - } - return - default: - in.parseStmt(&sym) - if cb != nil { - in.file.Stmt[len(in.file.Stmt)-1].Comment().Before = cb.Before - cb = nil - } - } - } -} - -func (in *input) parseStmt(sym *symType) { - start := sym.pos - end := sym.endPos - token := []string{sym.text} - for { - tok := in.lex(sym) - switch tok { - case '\n', _EOF, _EOL: - in.file.Stmt = append(in.file.Stmt, &Line{ - Start: start, - Token: token, - End: end, - }) - return - case '(': - in.file.Stmt = append(in.file.Stmt, in.parseLineBlock(start, token, sym)) - return - default: - token = append(token, sym.text) - end = sym.endPos - } - } -} - -func (in *input) parseLineBlock(start Position, token []string, sym *symType) *LineBlock { - x := &LineBlock{ - Start: start, - Token: token, - LParen: LParen{Pos: sym.pos}, - } - var comments []Comment - for { - tok := in.lex(sym) - switch tok { - case _EOL: - // ignore - case '\n': - if len(comments) == 0 && len(x.Line) > 0 || len(comments) > 0 && comments[len(comments)-1].Token != "" { - comments = append(comments, Comment{}) - } - case _COMMENT: - comments = append(comments, Comment{Start: sym.pos, Token: sym.text}) - case _EOF: - in.Error(fmt.Sprintf("syntax error (unterminated block started at %s:%d:%d)", in.filename, x.Start.Line, x.Start.LineRune)) - case ')': - x.RParen.Before = comments - x.RParen.Pos = sym.pos - tok = in.lex(sym) - if tok != '\n' && tok != _EOF && tok != _EOL { - in.Error("syntax error (expected newline after closing paren)") - } - return x - default: - l := in.parseLine(sym) - x.Line = append(x.Line, l) - l.Comment().Before = comments - comments = nil - } - } -} - -func (in *input) parseLine(sym *symType) *Line { - start := sym.pos - end := sym.endPos - token := []string{sym.text} - for { - tok := in.lex(sym) - switch tok { - case '\n', _EOF, _EOL: - return &Line{ - Start: start, - Token: token, - End: end, - InBlock: true, - } - default: - token = append(token, sym.text) - end = sym.endPos - } - } -} - -const ( - _EOF = -(1 + iota) - _EOL - _IDENT - _STRING - _COMMENT -) - -var ( - slashSlash = []byte("//") - moduleStr = []byte("module") -) - -// ModulePath returns the module path from the gomod file text. -// If it cannot find a module path, it returns an empty string. -// It is tolerant of unrelated problems in the go.mod file. -func ModulePath(mod []byte) string { - for len(mod) > 0 { - line := mod - mod = nil - if i := bytes.IndexByte(line, '\n'); i >= 0 { - line, mod = line[:i], line[i+1:] - } - if i := bytes.Index(line, slashSlash); i >= 0 { - line = line[:i] - } - line = bytes.TrimSpace(line) - if !bytes.HasPrefix(line, moduleStr) { - continue - } - line = line[len(moduleStr):] - n := len(line) - line = bytes.TrimSpace(line) - if len(line) == n || len(line) == 0 { - continue - } - - if line[0] == '"' || line[0] == '`' { - p, err := strconv.Unquote(string(line)) - if err != nil { - return "" // malformed quoted string or multiline module path - } - return p - } - - return string(line) - } - return "" // missing module path -} diff --git a/vendor/github.com/rogpeppe/go-internal/modfile/rule.go b/vendor/github.com/rogpeppe/go-internal/modfile/rule.go deleted file mode 100644 index 24d275f12f..0000000000 --- a/vendor/github.com/rogpeppe/go-internal/modfile/rule.go +++ /dev/null @@ -1,724 +0,0 @@ -// Copyright 2018 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package modfile - -import ( - "bytes" - "errors" - "fmt" - "path/filepath" - "regexp" - "sort" - "strconv" - "strings" - "unicode" - - "github.com/rogpeppe/go-internal/module" - "github.com/rogpeppe/go-internal/semver" -) - -// A File is the parsed, interpreted form of a go.mod file. -type File struct { - Module *Module - Go *Go - Require []*Require - Exclude []*Exclude - Replace []*Replace - - Syntax *FileSyntax -} - -// A Module is the module statement. -type Module struct { - Mod module.Version - Syntax *Line -} - -// A Go is the go statement. -type Go struct { - Version string // "1.23" - Syntax *Line -} - -// A Require is a single require statement. -type Require struct { - Mod module.Version - Indirect bool // has "// indirect" comment - Syntax *Line -} - -// An Exclude is a single exclude statement. -type Exclude struct { - Mod module.Version - Syntax *Line -} - -// A Replace is a single replace statement. -type Replace struct { - Old module.Version - New module.Version - Syntax *Line -} - -func (f *File) AddModuleStmt(path string) error { - if f.Syntax == nil { - f.Syntax = new(FileSyntax) - } - if f.Module == nil { - f.Module = &Module{ - Mod: module.Version{Path: path}, - Syntax: f.Syntax.addLine(nil, "module", AutoQuote(path)), - } - } else { - f.Module.Mod.Path = path - f.Syntax.updateLine(f.Module.Syntax, "module", AutoQuote(path)) - } - return nil -} - -func (f *File) AddComment(text string) { - if f.Syntax == nil { - f.Syntax = new(FileSyntax) - } - f.Syntax.Stmt = append(f.Syntax.Stmt, &CommentBlock{ - Comments: Comments{ - Before: []Comment{ - { - Token: text, - }, - }, - }, - }) -} - -type VersionFixer func(path, version string) (string, error) - -// Parse parses the data, reported in errors as being from file, -// into a File struct. It applies fix, if non-nil, to canonicalize all module versions found. -func Parse(file string, data []byte, fix VersionFixer) (*File, error) { - return parseToFile(file, data, fix, true) -} - -// ParseLax is like Parse but ignores unknown statements. -// It is used when parsing go.mod files other than the main module, -// under the theory that most statement types we add in the future will -// only apply in the main module, like exclude and replace, -// and so we get better gradual deployments if old go commands -// simply ignore those statements when found in go.mod files -// in dependencies. -func ParseLax(file string, data []byte, fix VersionFixer) (*File, error) { - return parseToFile(file, data, fix, false) -} - -func parseToFile(file string, data []byte, fix VersionFixer, strict bool) (*File, error) { - fs, err := parse(file, data) - if err != nil { - return nil, err - } - f := &File{ - Syntax: fs, - } - - var errs bytes.Buffer - for _, x := range fs.Stmt { - switch x := x.(type) { - case *Line: - f.add(&errs, x, x.Token[0], x.Token[1:], fix, strict) - - case *LineBlock: - if len(x.Token) > 1 { - if strict { - fmt.Fprintf(&errs, "%s:%d: unknown block type: %s\n", file, x.Start.Line, strings.Join(x.Token, " ")) - } - continue - } - switch x.Token[0] { - default: - if strict { - fmt.Fprintf(&errs, "%s:%d: unknown block type: %s\n", file, x.Start.Line, strings.Join(x.Token, " ")) - } - continue - case "module", "require", "exclude", "replace": - for _, l := range x.Line { - f.add(&errs, l, x.Token[0], l.Token, fix, strict) - } - } - } - } - - if errs.Len() > 0 { - return nil, errors.New(strings.TrimRight(errs.String(), "\n")) - } - return f, nil -} - -var goVersionRE = regexp.MustCompile(`([1-9][0-9]*)\.(0|[1-9][0-9]*)`) - -func (f *File) add(errs *bytes.Buffer, line *Line, verb string, args []string, fix VersionFixer, strict bool) { - // If strict is false, this module is a dependency. - // We ignore all unknown directives as well as main-module-only - // directives like replace and exclude. It will work better for - // forward compatibility if we can depend on modules that have unknown - // statements (presumed relevant only when acting as the main module) - // and simply ignore those statements. - if !strict { - switch verb { - case "module", "require", "go": - // want these even for dependency go.mods - default: - return - } - } - - switch verb { - default: - fmt.Fprintf(errs, "%s:%d: unknown directive: %s\n", f.Syntax.Name, line.Start.Line, verb) - - case "go": - if f.Go != nil { - fmt.Fprintf(errs, "%s:%d: repeated go statement\n", f.Syntax.Name, line.Start.Line) - return - } - if len(args) != 1 || !goVersionRE.MatchString(args[0]) { - fmt.Fprintf(errs, "%s:%d: usage: go 1.23\n", f.Syntax.Name, line.Start.Line) - return - } - f.Go = &Go{Syntax: line} - f.Go.Version = args[0] - case "module": - if f.Module != nil { - fmt.Fprintf(errs, "%s:%d: repeated module statement\n", f.Syntax.Name, line.Start.Line) - return - } - f.Module = &Module{Syntax: line} - if len(args) != 1 { - - fmt.Fprintf(errs, "%s:%d: usage: module module/path [version]\n", f.Syntax.Name, line.Start.Line) - return - } - s, err := parseString(&args[0]) - if err != nil { - fmt.Fprintf(errs, "%s:%d: invalid quoted string: %v\n", f.Syntax.Name, line.Start.Line, err) - return - } - f.Module.Mod = module.Version{Path: s} - case "require", "exclude": - if len(args) != 2 { - fmt.Fprintf(errs, "%s:%d: usage: %s module/path v1.2.3\n", f.Syntax.Name, line.Start.Line, verb) - return - } - s, err := parseString(&args[0]) - if err != nil { - fmt.Fprintf(errs, "%s:%d: invalid quoted string: %v\n", f.Syntax.Name, line.Start.Line, err) - return - } - old := args[1] - v, err := parseVersion(s, &args[1], fix) - if err != nil { - fmt.Fprintf(errs, "%s:%d: invalid module version %q: %v\n", f.Syntax.Name, line.Start.Line, old, err) - return - } - pathMajor, err := modulePathMajor(s) - if err != nil { - fmt.Fprintf(errs, "%s:%d: %v\n", f.Syntax.Name, line.Start.Line, err) - return - } - if !module.MatchPathMajor(v, pathMajor) { - if pathMajor == "" { - pathMajor = "v0 or v1" - } - fmt.Fprintf(errs, "%s:%d: invalid module: %s should be %s, not %s (%s)\n", f.Syntax.Name, line.Start.Line, s, pathMajor, semver.Major(v), v) - return - } - if verb == "require" { - f.Require = append(f.Require, &Require{ - Mod: module.Version{Path: s, Version: v}, - Syntax: line, - Indirect: isIndirect(line), - }) - } else { - f.Exclude = append(f.Exclude, &Exclude{ - Mod: module.Version{Path: s, Version: v}, - Syntax: line, - }) - } - case "replace": - arrow := 2 - if len(args) >= 2 && args[1] == "=>" { - arrow = 1 - } - if len(args) < arrow+2 || len(args) > arrow+3 || args[arrow] != "=>" { - fmt.Fprintf(errs, "%s:%d: usage: %s module/path [v1.2.3] => other/module v1.4\n\t or %s module/path [v1.2.3] => ../local/directory\n", f.Syntax.Name, line.Start.Line, verb, verb) - return - } - s, err := parseString(&args[0]) - if err != nil { - fmt.Fprintf(errs, "%s:%d: invalid quoted string: %v\n", f.Syntax.Name, line.Start.Line, err) - return - } - pathMajor, err := modulePathMajor(s) - if err != nil { - fmt.Fprintf(errs, "%s:%d: %v\n", f.Syntax.Name, line.Start.Line, err) - return - } - var v string - if arrow == 2 { - old := args[1] - v, err = parseVersion(s, &args[1], fix) - if err != nil { - fmt.Fprintf(errs, "%s:%d: invalid module version %v: %v\n", f.Syntax.Name, line.Start.Line, old, err) - return - } - if !module.MatchPathMajor(v, pathMajor) { - if pathMajor == "" { - pathMajor = "v0 or v1" - } - fmt.Fprintf(errs, "%s:%d: invalid module: %s should be %s, not %s (%s)\n", f.Syntax.Name, line.Start.Line, s, pathMajor, semver.Major(v), v) - return - } - } - ns, err := parseString(&args[arrow+1]) - if err != nil { - fmt.Fprintf(errs, "%s:%d: invalid quoted string: %v\n", f.Syntax.Name, line.Start.Line, err) - return - } - nv := "" - if len(args) == arrow+2 { - if !IsDirectoryPath(ns) { - fmt.Fprintf(errs, "%s:%d: replacement module without version must be directory path (rooted or starting with ./ or ../)\n", f.Syntax.Name, line.Start.Line) - return - } - if filepath.Separator == '/' && strings.Contains(ns, `\`) { - fmt.Fprintf(errs, "%s:%d: replacement directory appears to be Windows path (on a non-windows system)\n", f.Syntax.Name, line.Start.Line) - return - } - } - if len(args) == arrow+3 { - old := args[arrow+1] - nv, err = parseVersion(ns, &args[arrow+2], fix) - if err != nil { - fmt.Fprintf(errs, "%s:%d: invalid module version %v: %v\n", f.Syntax.Name, line.Start.Line, old, err) - return - } - if IsDirectoryPath(ns) { - fmt.Fprintf(errs, "%s:%d: replacement module directory path %q cannot have version\n", f.Syntax.Name, line.Start.Line, ns) - return - } - } - f.Replace = append(f.Replace, &Replace{ - Old: module.Version{Path: s, Version: v}, - New: module.Version{Path: ns, Version: nv}, - Syntax: line, - }) - } -} - -// isIndirect reports whether line has a "// indirect" comment, -// meaning it is in go.mod only for its effect on indirect dependencies, -// so that it can be dropped entirely once the effective version of the -// indirect dependency reaches the given minimum version. -func isIndirect(line *Line) bool { - if len(line.Suffix) == 0 { - return false - } - f := strings.Fields(line.Suffix[0].Token) - return (len(f) == 2 && f[1] == "indirect" || len(f) > 2 && f[1] == "indirect;") && f[0] == "//" -} - -// setIndirect sets line to have (or not have) a "// indirect" comment. -func setIndirect(line *Line, indirect bool) { - if isIndirect(line) == indirect { - return - } - if indirect { - // Adding comment. - if len(line.Suffix) == 0 { - // New comment. - line.Suffix = []Comment{{Token: "// indirect", Suffix: true}} - return - } - // Insert at beginning of existing comment. - com := &line.Suffix[0] - space := " " - if len(com.Token) > 2 && com.Token[2] == ' ' || com.Token[2] == '\t' { - space = "" - } - com.Token = "// indirect;" + space + com.Token[2:] - return - } - - // Removing comment. - f := strings.Fields(line.Suffix[0].Token) - if len(f) == 2 { - // Remove whole comment. - line.Suffix = nil - return - } - - // Remove comment prefix. - com := &line.Suffix[0] - i := strings.Index(com.Token, "indirect;") - com.Token = "//" + com.Token[i+len("indirect;"):] -} - -// IsDirectoryPath reports whether the given path should be interpreted -// as a directory path. Just like on the go command line, relative paths -// and rooted paths are directory paths; the rest are module paths. -func IsDirectoryPath(ns string) bool { - // Because go.mod files can move from one system to another, - // we check all known path syntaxes, both Unix and Windows. - return strings.HasPrefix(ns, "./") || strings.HasPrefix(ns, "../") || strings.HasPrefix(ns, "/") || - strings.HasPrefix(ns, `.\`) || strings.HasPrefix(ns, `..\`) || strings.HasPrefix(ns, `\`) || - len(ns) >= 2 && ('A' <= ns[0] && ns[0] <= 'Z' || 'a' <= ns[0] && ns[0] <= 'z') && ns[1] == ':' -} - -// MustQuote reports whether s must be quoted in order to appear as -// a single token in a go.mod line. -func MustQuote(s string) bool { - for _, r := range s { - if !unicode.IsPrint(r) || r == ' ' || r == '"' || r == '\'' || r == '`' { - return true - } - } - return s == "" || strings.Contains(s, "//") || strings.Contains(s, "/*") -} - -// AutoQuote returns s or, if quoting is required for s to appear in a go.mod, -// the quotation of s. -func AutoQuote(s string) string { - if MustQuote(s) { - return strconv.Quote(s) - } - return s -} - -func parseString(s *string) (string, error) { - t := *s - if strings.HasPrefix(t, `"`) { - var err error - if t, err = strconv.Unquote(t); err != nil { - return "", err - } - } else if strings.ContainsAny(t, "\"'`") { - // Other quotes are reserved both for possible future expansion - // and to avoid confusion. For example if someone types 'x' - // we want that to be a syntax error and not a literal x in literal quotation marks. - return "", fmt.Errorf("unquoted string cannot contain quote") - } - *s = AutoQuote(t) - return t, nil -} - -func parseVersion(path string, s *string, fix VersionFixer) (string, error) { - t, err := parseString(s) - if err != nil { - return "", err - } - if fix != nil { - var err error - t, err = fix(path, t) - if err != nil { - return "", err - } - } - if v := module.CanonicalVersion(t); v != "" { - *s = v - return *s, nil - } - return "", fmt.Errorf("version must be of the form v1.2.3") -} - -func modulePathMajor(path string) (string, error) { - _, major, ok := module.SplitPathVersion(path) - if !ok { - return "", fmt.Errorf("invalid module path") - } - return major, nil -} - -func (f *File) Format() ([]byte, error) { - return Format(f.Syntax), nil -} - -// Cleanup cleans up the file f after any edit operations. -// To avoid quadratic behavior, modifications like DropRequire -// clear the entry but do not remove it from the slice. -// Cleanup cleans out all the cleared entries. -func (f *File) Cleanup() { - w := 0 - for _, r := range f.Require { - if r.Mod.Path != "" { - f.Require[w] = r - w++ - } - } - f.Require = f.Require[:w] - - w = 0 - for _, x := range f.Exclude { - if x.Mod.Path != "" { - f.Exclude[w] = x - w++ - } - } - f.Exclude = f.Exclude[:w] - - w = 0 - for _, r := range f.Replace { - if r.Old.Path != "" { - f.Replace[w] = r - w++ - } - } - f.Replace = f.Replace[:w] - - f.Syntax.Cleanup() -} - -func (f *File) AddRequire(path, vers string) error { - need := true - for _, r := range f.Require { - if r.Mod.Path == path { - if need { - r.Mod.Version = vers - f.Syntax.updateLine(r.Syntax, "require", AutoQuote(path), vers) - need = false - } else { - f.Syntax.removeLine(r.Syntax) - *r = Require{} - } - } - } - - if need { - f.AddNewRequire(path, vers, false) - } - return nil -} - -func (f *File) AddNewRequire(path, vers string, indirect bool) { - line := f.Syntax.addLine(nil, "require", AutoQuote(path), vers) - setIndirect(line, indirect) - f.Require = append(f.Require, &Require{module.Version{Path: path, Version: vers}, indirect, line}) -} - -func (f *File) SetRequire(req []*Require) { - need := make(map[string]string) - indirect := make(map[string]bool) - for _, r := range req { - need[r.Mod.Path] = r.Mod.Version - indirect[r.Mod.Path] = r.Indirect - } - - for _, r := range f.Require { - if v, ok := need[r.Mod.Path]; ok { - r.Mod.Version = v - r.Indirect = indirect[r.Mod.Path] - } - } - - var newStmts []Expr - for _, stmt := range f.Syntax.Stmt { - switch stmt := stmt.(type) { - case *LineBlock: - if len(stmt.Token) > 0 && stmt.Token[0] == "require" { - var newLines []*Line - for _, line := range stmt.Line { - if p, err := parseString(&line.Token[0]); err == nil && need[p] != "" { - line.Token[1] = need[p] - delete(need, p) - setIndirect(line, indirect[p]) - newLines = append(newLines, line) - } - } - if len(newLines) == 0 { - continue // drop stmt - } - stmt.Line = newLines - } - - case *Line: - if len(stmt.Token) > 0 && stmt.Token[0] == "require" { - if p, err := parseString(&stmt.Token[1]); err == nil && need[p] != "" { - stmt.Token[2] = need[p] - delete(need, p) - setIndirect(stmt, indirect[p]) - } else { - continue // drop stmt - } - } - } - newStmts = append(newStmts, stmt) - } - f.Syntax.Stmt = newStmts - - for path, vers := range need { - f.AddNewRequire(path, vers, indirect[path]) - } - f.SortBlocks() -} - -func (f *File) DropRequire(path string) error { - for _, r := range f.Require { - if r.Mod.Path == path { - f.Syntax.removeLine(r.Syntax) - *r = Require{} - } - } - return nil -} - -func (f *File) AddExclude(path, vers string) error { - var hint *Line - for _, x := range f.Exclude { - if x.Mod.Path == path && x.Mod.Version == vers { - return nil - } - if x.Mod.Path == path { - hint = x.Syntax - } - } - - f.Exclude = append(f.Exclude, &Exclude{Mod: module.Version{Path: path, Version: vers}, Syntax: f.Syntax.addLine(hint, "exclude", AutoQuote(path), vers)}) - return nil -} - -func (f *File) DropExclude(path, vers string) error { - for _, x := range f.Exclude { - if x.Mod.Path == path && x.Mod.Version == vers { - f.Syntax.removeLine(x.Syntax) - *x = Exclude{} - } - } - return nil -} - -func (f *File) AddReplace(oldPath, oldVers, newPath, newVers string) error { - need := true - old := module.Version{Path: oldPath, Version: oldVers} - new := module.Version{Path: newPath, Version: newVers} - tokens := []string{"replace", AutoQuote(oldPath)} - if oldVers != "" { - tokens = append(tokens, oldVers) - } - tokens = append(tokens, "=>", AutoQuote(newPath)) - if newVers != "" { - tokens = append(tokens, newVers) - } - - var hint *Line - for _, r := range f.Replace { - if r.Old.Path == oldPath && (oldVers == "" || r.Old.Version == oldVers) { - if need { - // Found replacement for old; update to use new. - r.New = new - f.Syntax.updateLine(r.Syntax, tokens...) - need = false - continue - } - // Already added; delete other replacements for same. - f.Syntax.removeLine(r.Syntax) - *r = Replace{} - } - if r.Old.Path == oldPath { - hint = r.Syntax - } - } - if need { - f.Replace = append(f.Replace, &Replace{Old: old, New: new, Syntax: f.Syntax.addLine(hint, tokens...)}) - } - return nil -} - -func (f *File) DropReplace(oldPath, oldVers string) error { - for _, r := range f.Replace { - if r.Old.Path == oldPath && r.Old.Version == oldVers { - f.Syntax.removeLine(r.Syntax) - *r = Replace{} - } - } - return nil -} - -func (f *File) SortBlocks() { - f.removeDups() // otherwise sorting is unsafe - - for _, stmt := range f.Syntax.Stmt { - block, ok := stmt.(*LineBlock) - if !ok { - continue - } - sort.Slice(block.Line, func(i, j int) bool { - li := block.Line[i] - lj := block.Line[j] - for k := 0; k < len(li.Token) && k < len(lj.Token); k++ { - if li.Token[k] != lj.Token[k] { - return li.Token[k] < lj.Token[k] - } - } - return len(li.Token) < len(lj.Token) - }) - } -} - -func (f *File) removeDups() { - have := make(map[module.Version]bool) - kill := make(map[*Line]bool) - for _, x := range f.Exclude { - if have[x.Mod] { - kill[x.Syntax] = true - continue - } - have[x.Mod] = true - } - var excl []*Exclude - for _, x := range f.Exclude { - if !kill[x.Syntax] { - excl = append(excl, x) - } - } - f.Exclude = excl - - have = make(map[module.Version]bool) - // Later replacements take priority over earlier ones. - for i := len(f.Replace) - 1; i >= 0; i-- { - x := f.Replace[i] - if have[x.Old] { - kill[x.Syntax] = true - continue - } - have[x.Old] = true - } - var repl []*Replace - for _, x := range f.Replace { - if !kill[x.Syntax] { - repl = append(repl, x) - } - } - f.Replace = repl - - var stmts []Expr - for _, stmt := range f.Syntax.Stmt { - switch stmt := stmt.(type) { - case *Line: - if kill[stmt] { - continue - } - case *LineBlock: - var lines []*Line - for _, line := range stmt.Line { - if !kill[line] { - lines = append(lines, line) - } - } - stmt.Line = lines - if len(lines) == 0 { - continue - } - } - stmts = append(stmts, stmt) - } - f.Syntax.Stmt = stmts -} diff --git a/vendor/github.com/rogpeppe/go-internal/module/module.go b/vendor/github.com/rogpeppe/go-internal/module/module.go deleted file mode 100644 index 3ff6d9bf53..0000000000 --- a/vendor/github.com/rogpeppe/go-internal/module/module.go +++ /dev/null @@ -1,540 +0,0 @@ -// Copyright 2018 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// Package module defines the module.Version type -// along with support code. -package module - -// IMPORTANT NOTE -// -// This file essentially defines the set of valid import paths for the go command. -// There are many subtle considerations, including Unicode ambiguity, -// security, network, and file system representations. -// -// This file also defines the set of valid module path and version combinations, -// another topic with many subtle considerations. -// -// Changes to the semantics in this file require approval from rsc. - -import ( - "fmt" - "sort" - "strings" - "unicode" - "unicode/utf8" - - "github.com/rogpeppe/go-internal/semver" -) - -// A Version is defined by a module path and version pair. -type Version struct { - Path string - - // Version is usually a semantic version in canonical form. - // There are two exceptions to this general rule. - // First, the top-level target of a build has no specific version - // and uses Version = "". - // Second, during MVS calculations the version "none" is used - // to represent the decision to take no version of a given module. - Version string `json:",omitempty"` -} - -// Check checks that a given module path, version pair is valid. -// In addition to the path being a valid module path -// and the version being a valid semantic version, -// the two must correspond. -// For example, the path "yaml/v2" only corresponds to -// semantic versions beginning with "v2.". -func Check(path, version string) error { - if err := CheckPath(path); err != nil { - return err - } - if !semver.IsValid(version) { - return fmt.Errorf("malformed semantic version %v", version) - } - _, pathMajor, _ := SplitPathVersion(path) - if !MatchPathMajor(version, pathMajor) { - if pathMajor == "" { - pathMajor = "v0 or v1" - } - if pathMajor[0] == '.' { // .v1 - pathMajor = pathMajor[1:] - } - return fmt.Errorf("mismatched module path %v and version %v (want %v)", path, version, pathMajor) - } - return nil -} - -// firstPathOK reports whether r can appear in the first element of a module path. -// The first element of the path must be an LDH domain name, at least for now. -// To avoid case ambiguity, the domain name must be entirely lower case. -func firstPathOK(r rune) bool { - return r == '-' || r == '.' || - '0' <= r && r <= '9' || - 'a' <= r && r <= 'z' -} - -// pathOK reports whether r can appear in an import path element. -// Paths can be ASCII letters, ASCII digits, and limited ASCII punctuation: + - . _ and ~. -// This matches what "go get" has historically recognized in import paths. -// TODO(rsc): We would like to allow Unicode letters, but that requires additional -// care in the safe encoding (see note below). -func pathOK(r rune) bool { - if r < utf8.RuneSelf { - return r == '+' || r == '-' || r == '.' || r == '_' || r == '~' || - '0' <= r && r <= '9' || - 'A' <= r && r <= 'Z' || - 'a' <= r && r <= 'z' - } - return false -} - -// fileNameOK reports whether r can appear in a file name. -// For now we allow all Unicode letters but otherwise limit to pathOK plus a few more punctuation characters. -// If we expand the set of allowed characters here, we have to -// work harder at detecting potential case-folding and normalization collisions. -// See note about "safe encoding" below. -func fileNameOK(r rune) bool { - if r < utf8.RuneSelf { - // Entire set of ASCII punctuation, from which we remove characters: - // ! " # $ % & ' ( ) * + , - . / : ; < = > ? @ [ \ ] ^ _ ` { | } ~ - // We disallow some shell special characters: " ' * < > ? ` | - // (Note that some of those are disallowed by the Windows file system as well.) - // We also disallow path separators / : and \ (fileNameOK is only called on path element characters). - // We allow spaces (U+0020) in file names. - const allowed = "!#$%&()+,-.=@[]^_{}~ " - if '0' <= r && r <= '9' || 'A' <= r && r <= 'Z' || 'a' <= r && r <= 'z' { - return true - } - for i := 0; i < len(allowed); i++ { - if rune(allowed[i]) == r { - return true - } - } - return false - } - // It may be OK to add more ASCII punctuation here, but only carefully. - // For example Windows disallows < > \, and macOS disallows :, so we must not allow those. - return unicode.IsLetter(r) -} - -// CheckPath checks that a module path is valid. -func CheckPath(path string) error { - if err := checkPath(path, false); err != nil { - return fmt.Errorf("malformed module path %q: %v", path, err) - } - i := strings.Index(path, "/") - if i < 0 { - i = len(path) - } - if i == 0 { - return fmt.Errorf("malformed module path %q: leading slash", path) - } - if !strings.Contains(path[:i], ".") { - return fmt.Errorf("malformed module path %q: missing dot in first path element", path) - } - if path[0] == '-' { - return fmt.Errorf("malformed module path %q: leading dash in first path element", path) - } - for _, r := range path[:i] { - if !firstPathOK(r) { - return fmt.Errorf("malformed module path %q: invalid char %q in first path element", path, r) - } - } - if _, _, ok := SplitPathVersion(path); !ok { - return fmt.Errorf("malformed module path %q: invalid version", path) - } - return nil -} - -// CheckImportPath checks that an import path is valid. -func CheckImportPath(path string) error { - if err := checkPath(path, false); err != nil { - return fmt.Errorf("malformed import path %q: %v", path, err) - } - return nil -} - -// checkPath checks that a general path is valid. -// It returns an error describing why but not mentioning path. -// Because these checks apply to both module paths and import paths, -// the caller is expected to add the "malformed ___ path %q: " prefix. -// fileName indicates whether the final element of the path is a file name -// (as opposed to a directory name). -func checkPath(path string, fileName bool) error { - if !utf8.ValidString(path) { - return fmt.Errorf("invalid UTF-8") - } - if path == "" { - return fmt.Errorf("empty string") - } - if strings.Contains(path, "..") { - return fmt.Errorf("double dot") - } - if strings.Contains(path, "//") { - return fmt.Errorf("double slash") - } - if path[len(path)-1] == '/' { - return fmt.Errorf("trailing slash") - } - elemStart := 0 - for i, r := range path { - if r == '/' { - if err := checkElem(path[elemStart:i], fileName); err != nil { - return err - } - elemStart = i + 1 - } - } - if err := checkElem(path[elemStart:], fileName); err != nil { - return err - } - return nil -} - -// checkElem checks whether an individual path element is valid. -// fileName indicates whether the element is a file name (not a directory name). -func checkElem(elem string, fileName bool) error { - if elem == "" { - return fmt.Errorf("empty path element") - } - if strings.Count(elem, ".") == len(elem) { - return fmt.Errorf("invalid path element %q", elem) - } - if elem[0] == '.' && !fileName { - return fmt.Errorf("leading dot in path element") - } - if elem[len(elem)-1] == '.' { - return fmt.Errorf("trailing dot in path element") - } - charOK := pathOK - if fileName { - charOK = fileNameOK - } - for _, r := range elem { - if !charOK(r) { - return fmt.Errorf("invalid char %q", r) - } - } - - // Windows disallows a bunch of path elements, sadly. - // See https://docs.microsoft.com/en-us/windows/desktop/fileio/naming-a-file - short := elem - if i := strings.Index(short, "."); i >= 0 { - short = short[:i] - } - for _, bad := range badWindowsNames { - if strings.EqualFold(bad, short) { - return fmt.Errorf("disallowed path element %q", elem) - } - } - return nil -} - -// CheckFilePath checks whether a slash-separated file path is valid. -func CheckFilePath(path string) error { - if err := checkPath(path, true); err != nil { - return fmt.Errorf("malformed file path %q: %v", path, err) - } - return nil -} - -// badWindowsNames are the reserved file path elements on Windows. -// See https://docs.microsoft.com/en-us/windows/desktop/fileio/naming-a-file -var badWindowsNames = []string{ - "CON", - "PRN", - "AUX", - "NUL", - "COM1", - "COM2", - "COM3", - "COM4", - "COM5", - "COM6", - "COM7", - "COM8", - "COM9", - "LPT1", - "LPT2", - "LPT3", - "LPT4", - "LPT5", - "LPT6", - "LPT7", - "LPT8", - "LPT9", -} - -// SplitPathVersion returns prefix and major version such that prefix+pathMajor == path -// and version is either empty or "/vN" for N >= 2. -// As a special case, gopkg.in paths are recognized directly; -// they require ".vN" instead of "/vN", and for all N, not just N >= 2. -func SplitPathVersion(path string) (prefix, pathMajor string, ok bool) { - if strings.HasPrefix(path, "gopkg.in/") { - return splitGopkgIn(path) - } - - i := len(path) - dot := false - for i > 0 && ('0' <= path[i-1] && path[i-1] <= '9' || path[i-1] == '.') { - if path[i-1] == '.' { - dot = true - } - i-- - } - if i <= 1 || path[i-1] != 'v' || path[i-2] != '/' { - return path, "", true - } - prefix, pathMajor = path[:i-2], path[i-2:] - if dot || len(pathMajor) <= 2 || pathMajor[2] == '0' || pathMajor == "/v1" { - return path, "", false - } - return prefix, pathMajor, true -} - -// splitGopkgIn is like SplitPathVersion but only for gopkg.in paths. -func splitGopkgIn(path string) (prefix, pathMajor string, ok bool) { - if !strings.HasPrefix(path, "gopkg.in/") { - return path, "", false - } - i := len(path) - if strings.HasSuffix(path, "-unstable") { - i -= len("-unstable") - } - for i > 0 && ('0' <= path[i-1] && path[i-1] <= '9') { - i-- - } - if i <= 1 || path[i-1] != 'v' || path[i-2] != '.' { - // All gopkg.in paths must end in vN for some N. - return path, "", false - } - prefix, pathMajor = path[:i-2], path[i-2:] - if len(pathMajor) <= 2 || pathMajor[2] == '0' && pathMajor != ".v0" { - return path, "", false - } - return prefix, pathMajor, true -} - -// MatchPathMajor reports whether the semantic version v -// matches the path major version pathMajor. -func MatchPathMajor(v, pathMajor string) bool { - if strings.HasPrefix(pathMajor, ".v") && strings.HasSuffix(pathMajor, "-unstable") { - pathMajor = strings.TrimSuffix(pathMajor, "-unstable") - } - if strings.HasPrefix(v, "v0.0.0-") && pathMajor == ".v1" { - // Allow old bug in pseudo-versions that generated v0.0.0- pseudoversion for gopkg .v1. - // For example, gopkg.in/yaml.v2@v2.2.1's go.mod requires gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405. - return true - } - m := semver.Major(v) - if pathMajor == "" { - return m == "v0" || m == "v1" || semver.Build(v) == "+incompatible" - } - return (pathMajor[0] == '/' || pathMajor[0] == '.') && m == pathMajor[1:] -} - -// CanonicalVersion returns the canonical form of the version string v. -// It is the same as semver.Canonical(v) except that it preserves the special build suffix "+incompatible". -func CanonicalVersion(v string) string { - cv := semver.Canonical(v) - if semver.Build(v) == "+incompatible" { - cv += "+incompatible" - } - return cv -} - -// Sort sorts the list by Path, breaking ties by comparing Versions. -func Sort(list []Version) { - sort.Slice(list, func(i, j int) bool { - mi := list[i] - mj := list[j] - if mi.Path != mj.Path { - return mi.Path < mj.Path - } - // To help go.sum formatting, allow version/file. - // Compare semver prefix by semver rules, - // file by string order. - vi := mi.Version - vj := mj.Version - var fi, fj string - if k := strings.Index(vi, "/"); k >= 0 { - vi, fi = vi[:k], vi[k:] - } - if k := strings.Index(vj, "/"); k >= 0 { - vj, fj = vj[:k], vj[k:] - } - if vi != vj { - return semver.Compare(vi, vj) < 0 - } - return fi < fj - }) -} - -// Safe encodings -// -// Module paths appear as substrings of file system paths -// (in the download cache) and of web server URLs in the proxy protocol. -// In general we cannot rely on file systems to be case-sensitive, -// nor can we rely on web servers, since they read from file systems. -// That is, we cannot rely on the file system to keep rsc.io/QUOTE -// and rsc.io/quote separate. Windows and macOS don't. -// Instead, we must never require two different casings of a file path. -// Because we want the download cache to match the proxy protocol, -// and because we want the proxy protocol to be possible to serve -// from a tree of static files (which might be stored on a case-insensitive -// file system), the proxy protocol must never require two different casings -// of a URL path either. -// -// One possibility would be to make the safe encoding be the lowercase -// hexadecimal encoding of the actual path bytes. This would avoid ever -// needing different casings of a file path, but it would be fairly illegible -// to most programmers when those paths appeared in the file system -// (including in file paths in compiler errors and stack traces) -// in web server logs, and so on. Instead, we want a safe encoding that -// leaves most paths unaltered. -// -// The safe encoding is this: -// replace every uppercase letter with an exclamation mark -// followed by the letter's lowercase equivalent. -// -// For example, -// github.com/Azure/azure-sdk-for-go -> github.com/!azure/azure-sdk-for-go. -// github.com/GoogleCloudPlatform/cloudsql-proxy -> github.com/!google!cloud!platform/cloudsql-proxy -// github.com/Sirupsen/logrus -> github.com/!sirupsen/logrus. -// -// Import paths that avoid upper-case letters are left unchanged. -// Note that because import paths are ASCII-only and avoid various -// problematic punctuation (like : < and >), the safe encoding is also ASCII-only -// and avoids the same problematic punctuation. -// -// Import paths have never allowed exclamation marks, so there is no -// need to define how to encode a literal !. -// -// Although paths are disallowed from using Unicode (see pathOK above), -// the eventual plan is to allow Unicode letters as well, to assume that -// file systems and URLs are Unicode-safe (storing UTF-8), and apply -// the !-for-uppercase convention. Note however that not all runes that -// are different but case-fold equivalent are an upper/lower pair. -// For example, U+004B ('K'), U+006B ('k'), and U+212A ('K' for Kelvin) -// are considered to case-fold to each other. When we do add Unicode -// letters, we must not assume that upper/lower are the only case-equivalent pairs. -// Perhaps the Kelvin symbol would be disallowed entirely, for example. -// Or perhaps it would encode as "!!k", or perhaps as "(212A)". -// -// Also, it would be nice to allow Unicode marks as well as letters, -// but marks include combining marks, and then we must deal not -// only with case folding but also normalization: both U+00E9 ('é') -// and U+0065 U+0301 ('e' followed by combining acute accent) -// look the same on the page and are treated by some file systems -// as the same path. If we do allow Unicode marks in paths, there -// must be some kind of normalization to allow only one canonical -// encoding of any character used in an import path. - -// EncodePath returns the safe encoding of the given module path. -// It fails if the module path is invalid. -func EncodePath(path string) (encoding string, err error) { - if err := CheckPath(path); err != nil { - return "", err - } - - return encodeString(path) -} - -// EncodeVersion returns the safe encoding of the given module version. -// Versions are allowed to be in non-semver form but must be valid file names -// and not contain exclamation marks. -func EncodeVersion(v string) (encoding string, err error) { - if err := checkElem(v, true); err != nil || strings.Contains(v, "!") { - return "", fmt.Errorf("disallowed version string %q", v) - } - return encodeString(v) -} - -func encodeString(s string) (encoding string, err error) { - haveUpper := false - for _, r := range s { - if r == '!' || r >= utf8.RuneSelf { - // This should be disallowed by CheckPath, but diagnose anyway. - // The correctness of the encoding loop below depends on it. - return "", fmt.Errorf("internal error: inconsistency in EncodePath") - } - if 'A' <= r && r <= 'Z' { - haveUpper = true - } - } - - if !haveUpper { - return s, nil - } - - var buf []byte - for _, r := range s { - if 'A' <= r && r <= 'Z' { - buf = append(buf, '!', byte(r+'a'-'A')) - } else { - buf = append(buf, byte(r)) - } - } - return string(buf), nil -} - -// DecodePath returns the module path of the given safe encoding. -// It fails if the encoding is invalid or encodes an invalid path. -func DecodePath(encoding string) (path string, err error) { - path, ok := decodeString(encoding) - if !ok { - return "", fmt.Errorf("invalid module path encoding %q", encoding) - } - if err := CheckPath(path); err != nil { - return "", fmt.Errorf("invalid module path encoding %q: %v", encoding, err) - } - return path, nil -} - -// DecodeVersion returns the version string for the given safe encoding. -// It fails if the encoding is invalid or encodes an invalid version. -// Versions are allowed to be in non-semver form but must be valid file names -// and not contain exclamation marks. -func DecodeVersion(encoding string) (v string, err error) { - v, ok := decodeString(encoding) - if !ok { - return "", fmt.Errorf("invalid version encoding %q", encoding) - } - if err := checkElem(v, true); err != nil { - return "", fmt.Errorf("disallowed version string %q", v) - } - return v, nil -} - -func decodeString(encoding string) (string, bool) { - var buf []byte - - bang := false - for _, r := range encoding { - if r >= utf8.RuneSelf { - return "", false - } - if bang { - bang = false - if r < 'a' || 'z' < r { - return "", false - } - buf = append(buf, byte(r+'A'-'a')) - continue - } - if r == '!' { - bang = true - continue - } - if 'A' <= r && r <= 'Z' { - return "", false - } - buf = append(buf, byte(r)) - } - if bang { - return "", false - } - return string(buf), true -} diff --git a/vendor/github.com/rogpeppe/go-internal/semver/semver.go b/vendor/github.com/rogpeppe/go-internal/semver/semver.go deleted file mode 100644 index 4af7118e55..0000000000 --- a/vendor/github.com/rogpeppe/go-internal/semver/semver.go +++ /dev/null @@ -1,388 +0,0 @@ -// Copyright 2018 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// Package semver implements comparison of semantic version strings. -// In this package, semantic version strings must begin with a leading "v", -// as in "v1.0.0". -// -// The general form of a semantic version string accepted by this package is -// -// vMAJOR[.MINOR[.PATCH[-PRERELEASE][+BUILD]]] -// -// where square brackets indicate optional parts of the syntax; -// MAJOR, MINOR, and PATCH are decimal integers without extra leading zeros; -// PRERELEASE and BUILD are each a series of non-empty dot-separated identifiers -// using only alphanumeric characters and hyphens; and -// all-numeric PRERELEASE identifiers must not have leading zeros. -// -// This package follows Semantic Versioning 2.0.0 (see semver.org) -// with two exceptions. First, it requires the "v" prefix. Second, it recognizes -// vMAJOR and vMAJOR.MINOR (with no prerelease or build suffixes) -// as shorthands for vMAJOR.0.0 and vMAJOR.MINOR.0. -package semver - -// parsed returns the parsed form of a semantic version string. -type parsed struct { - major string - minor string - patch string - short string - prerelease string - build string - err string -} - -// IsValid reports whether v is a valid semantic version string. -func IsValid(v string) bool { - _, ok := parse(v) - return ok -} - -// Canonical returns the canonical formatting of the semantic version v. -// It fills in any missing .MINOR or .PATCH and discards build metadata. -// Two semantic versions compare equal only if their canonical formattings -// are identical strings. -// The canonical invalid semantic version is the empty string. -func Canonical(v string) string { - p, ok := parse(v) - if !ok { - return "" - } - if p.build != "" { - return v[:len(v)-len(p.build)] - } - if p.short != "" { - return v + p.short - } - return v -} - -// Major returns the major version prefix of the semantic version v. -// For example, Major("v2.1.0") == "v2". -// If v is an invalid semantic version string, Major returns the empty string. -func Major(v string) string { - pv, ok := parse(v) - if !ok { - return "" - } - return v[:1+len(pv.major)] -} - -// MajorMinor returns the major.minor version prefix of the semantic version v. -// For example, MajorMinor("v2.1.0") == "v2.1". -// If v is an invalid semantic version string, MajorMinor returns the empty string. -func MajorMinor(v string) string { - pv, ok := parse(v) - if !ok { - return "" - } - i := 1 + len(pv.major) - if j := i + 1 + len(pv.minor); j <= len(v) && v[i] == '.' && v[i+1:j] == pv.minor { - return v[:j] - } - return v[:i] + "." + pv.minor -} - -// Prerelease returns the prerelease suffix of the semantic version v. -// For example, Prerelease("v2.1.0-pre+meta") == "-pre". -// If v is an invalid semantic version string, Prerelease returns the empty string. -func Prerelease(v string) string { - pv, ok := parse(v) - if !ok { - return "" - } - return pv.prerelease -} - -// Build returns the build suffix of the semantic version v. -// For example, Build("v2.1.0+meta") == "+meta". -// If v is an invalid semantic version string, Build returns the empty string. -func Build(v string) string { - pv, ok := parse(v) - if !ok { - return "" - } - return pv.build -} - -// Compare returns an integer comparing two versions according to -// according to semantic version precedence. -// The result will be 0 if v == w, -1 if v < w, or +1 if v > w. -// -// An invalid semantic version string is considered less than a valid one. -// All invalid semantic version strings compare equal to each other. -func Compare(v, w string) int { - pv, ok1 := parse(v) - pw, ok2 := parse(w) - if !ok1 && !ok2 { - return 0 - } - if !ok1 { - return -1 - } - if !ok2 { - return +1 - } - if c := compareInt(pv.major, pw.major); c != 0 { - return c - } - if c := compareInt(pv.minor, pw.minor); c != 0 { - return c - } - if c := compareInt(pv.patch, pw.patch); c != 0 { - return c - } - return comparePrerelease(pv.prerelease, pw.prerelease) -} - -// Max canonicalizes its arguments and then returns the version string -// that compares greater. -func Max(v, w string) string { - v = Canonical(v) - w = Canonical(w) - if Compare(v, w) > 0 { - return v - } - return w -} - -func parse(v string) (p parsed, ok bool) { - if v == "" || v[0] != 'v' { - p.err = "missing v prefix" - return - } - p.major, v, ok = parseInt(v[1:]) - if !ok { - p.err = "bad major version" - return - } - if v == "" { - p.minor = "0" - p.patch = "0" - p.short = ".0.0" - return - } - if v[0] != '.' { - p.err = "bad minor prefix" - ok = false - return - } - p.minor, v, ok = parseInt(v[1:]) - if !ok { - p.err = "bad minor version" - return - } - if v == "" { - p.patch = "0" - p.short = ".0" - return - } - if v[0] != '.' { - p.err = "bad patch prefix" - ok = false - return - } - p.patch, v, ok = parseInt(v[1:]) - if !ok { - p.err = "bad patch version" - return - } - if len(v) > 0 && v[0] == '-' { - p.prerelease, v, ok = parsePrerelease(v) - if !ok { - p.err = "bad prerelease" - return - } - } - if len(v) > 0 && v[0] == '+' { - p.build, v, ok = parseBuild(v) - if !ok { - p.err = "bad build" - return - } - } - if v != "" { - p.err = "junk on end" - ok = false - return - } - ok = true - return -} - -func parseInt(v string) (t, rest string, ok bool) { - if v == "" { - return - } - if v[0] < '0' || '9' < v[0] { - return - } - i := 1 - for i < len(v) && '0' <= v[i] && v[i] <= '9' { - i++ - } - if v[0] == '0' && i != 1 { - return - } - return v[:i], v[i:], true -} - -func parsePrerelease(v string) (t, rest string, ok bool) { - // "A pre-release version MAY be denoted by appending a hyphen and - // a series of dot separated identifiers immediately following the patch version. - // Identifiers MUST comprise only ASCII alphanumerics and hyphen [0-9A-Za-z-]. - // Identifiers MUST NOT be empty. Numeric identifiers MUST NOT include leading zeroes." - if v == "" || v[0] != '-' { - return - } - i := 1 - start := 1 - for i < len(v) && v[i] != '+' { - if !isIdentChar(v[i]) && v[i] != '.' { - return - } - if v[i] == '.' { - if start == i || isBadNum(v[start:i]) { - return - } - start = i + 1 - } - i++ - } - if start == i || isBadNum(v[start:i]) { - return - } - return v[:i], v[i:], true -} - -func parseBuild(v string) (t, rest string, ok bool) { - if v == "" || v[0] != '+' { - return - } - i := 1 - start := 1 - for i < len(v) { - if !isIdentChar(v[i]) { - return - } - if v[i] == '.' { - if start == i { - return - } - start = i + 1 - } - i++ - } - if start == i { - return - } - return v[:i], v[i:], true -} - -func isIdentChar(c byte) bool { - return 'A' <= c && c <= 'Z' || 'a' <= c && c <= 'z' || '0' <= c && c <= '9' || c == '-' -} - -func isBadNum(v string) bool { - i := 0 - for i < len(v) && '0' <= v[i] && v[i] <= '9' { - i++ - } - return i == len(v) && i > 1 && v[0] == '0' -} - -func isNum(v string) bool { - i := 0 - for i < len(v) && '0' <= v[i] && v[i] <= '9' { - i++ - } - return i == len(v) -} - -func compareInt(x, y string) int { - if x == y { - return 0 - } - if len(x) < len(y) { - return -1 - } - if len(x) > len(y) { - return +1 - } - if x < y { - return -1 - } else { - return +1 - } -} - -func comparePrerelease(x, y string) int { - // "When major, minor, and patch are equal, a pre-release version has - // lower precedence than a normal version. - // Example: 1.0.0-alpha < 1.0.0. - // Precedence for two pre-release versions with the same major, minor, - // and patch version MUST be determined by comparing each dot separated - // identifier from left to right until a difference is found as follows: - // identifiers consisting of only digits are compared numerically and - // identifiers with letters or hyphens are compared lexically in ASCII - // sort order. Numeric identifiers always have lower precedence than - // non-numeric identifiers. A larger set of pre-release fields has a - // higher precedence than a smaller set, if all of the preceding - // identifiers are equal. - // Example: 1.0.0-alpha < 1.0.0-alpha.1 < 1.0.0-alpha.beta < - // 1.0.0-beta < 1.0.0-beta.2 < 1.0.0-beta.11 < 1.0.0-rc.1 < 1.0.0." - if x == y { - return 0 - } - if x == "" { - return +1 - } - if y == "" { - return -1 - } - for x != "" && y != "" { - x = x[1:] // skip - or . - y = y[1:] // skip - or . - var dx, dy string - dx, x = nextIdent(x) - dy, y = nextIdent(y) - if dx != dy { - ix := isNum(dx) - iy := isNum(dy) - if ix != iy { - if ix { - return -1 - } else { - return +1 - } - } - if ix { - if len(dx) < len(dy) { - return -1 - } - if len(dx) > len(dy) { - return +1 - } - } - if dx < dy { - return -1 - } else { - return +1 - } - } - } - if x == "" { - return -1 - } else { - return +1 - } -} - -func nextIdent(x string) (dx, rest string) { - i := 0 - for i < len(x) && x[i] != '.' { - i++ - } - return x[:i], x[i:] -} diff --git a/vendor/github.com/spf13/afero/.travis.yml b/vendor/github.com/spf13/afero/.travis.yml deleted file mode 100644 index 0637db726d..0000000000 --- a/vendor/github.com/spf13/afero/.travis.yml +++ /dev/null @@ -1,21 +0,0 @@ -sudo: false -language: go - -go: - - 1.9 - - "1.10" - - tip - -os: - - linux - - osx - -matrix: - allow_failures: - - go: tip - fast_finish: true - -script: - - go build - - go test -race -v ./... - diff --git a/vendor/github.com/spf13/afero/LICENSE.txt b/vendor/github.com/spf13/afero/LICENSE.txt deleted file mode 100644 index 298f0e2665..0000000000 --- a/vendor/github.com/spf13/afero/LICENSE.txt +++ /dev/null @@ -1,174 +0,0 @@ - Apache License - Version 2.0, January 2004 - http://www.apache.org/licenses/ - - TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - - 1. Definitions. - - "License" shall mean the terms and conditions for use, reproduction, - and distribution as defined by Sections 1 through 9 of this document. - - "Licensor" shall mean the copyright owner or entity authorized by - the copyright owner that is granting the License. - - "Legal Entity" shall mean the union of the acting entity and all - other entities that control, are controlled by, or are under common - control with that entity. For the purposes of this definition, - "control" means (i) the power, direct or indirect, to cause the - direction or management of such entity, whether by contract or - otherwise, or (ii) ownership of fifty percent (50%) or more of the - outstanding shares, or (iii) beneficial ownership of such entity. - - "You" (or "Your") shall mean an individual or Legal Entity - exercising permissions granted by this License. - - "Source" form shall mean the preferred form for making modifications, - including but not limited to software source code, documentation - source, and configuration files. - - "Object" form shall mean any form resulting from mechanical - transformation or translation of a Source form, including but - not limited to compiled object code, generated documentation, - and conversions to other media types. - - "Work" shall mean the work of authorship, whether in Source or - Object form, made available under the License, as indicated by a - copyright notice that is included in or attached to the work - (an example is provided in the Appendix below). - - "Derivative Works" shall mean any work, whether in Source or Object - form, that is based on (or derived from) the Work and for which the - editorial revisions, annotations, elaborations, or other modifications - represent, as a whole, an original work of authorship. For the purposes - of this License, Derivative Works shall not include works that remain - separable from, or merely link (or bind by name) to the interfaces of, - the Work and Derivative Works thereof. - - "Contribution" shall mean any work of authorship, including - the original version of the Work and any modifications or additions - to that Work or Derivative Works thereof, that is intentionally - submitted to Licensor for inclusion in the Work by the copyright owner - or by an individual or Legal Entity authorized to submit on behalf of - the copyright owner. For the purposes of this definition, "submitted" - means any form of electronic, verbal, or written communication sent - to the Licensor or its representatives, including but not limited to - communication on electronic mailing lists, source code control systems, - and issue tracking systems that are managed by, or on behalf of, the - Licensor for the purpose of discussing and improving the Work, but - excluding communication that is conspicuously marked or otherwise - designated in writing by the copyright owner as "Not a Contribution." - - "Contributor" shall mean Licensor and any individual or Legal Entity - on behalf of whom a Contribution has been received by Licensor and - subsequently incorporated within the Work. - - 2. Grant of Copyright License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - copyright license to reproduce, prepare Derivative Works of, - publicly display, publicly perform, sublicense, and distribute the - Work and such Derivative Works in Source or Object form. - - 3. Grant of Patent License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - (except as stated in this section) patent license to make, have made, - use, offer to sell, sell, import, and otherwise transfer the Work, - where such license applies only to those patent claims licensable - by such Contributor that are necessarily infringed by their - Contribution(s) alone or by combination of their Contribution(s) - with the Work to which such Contribution(s) was submitted. If You - institute patent litigation against any entity (including a - cross-claim or counterclaim in a lawsuit) alleging that the Work - or a Contribution incorporated within the Work constitutes direct - or contributory patent infringement, then any patent licenses - granted to You under this License for that Work shall terminate - as of the date such litigation is filed. - - 4. Redistribution. You may reproduce and distribute copies of the - Work or Derivative Works thereof in any medium, with or without - modifications, and in Source or Object form, provided that You - meet the following conditions: - - (a) You must give any other recipients of the Work or - Derivative Works a copy of this License; and - - (b) You must cause any modified files to carry prominent notices - stating that You changed the files; and - - (c) You must retain, in the Source form of any Derivative Works - that You distribute, all copyright, patent, trademark, and - attribution notices from the Source form of the Work, - excluding those notices that do not pertain to any part of - the Derivative Works; and - - (d) If the Work includes a "NOTICE" text file as part of its - distribution, then any Derivative Works that You distribute must - include a readable copy of the attribution notices contained - within such NOTICE file, excluding those notices that do not - pertain to any part of the Derivative Works, in at least one - of the following places: within a NOTICE text file distributed - as part of the Derivative Works; within the Source form or - documentation, if provided along with the Derivative Works; or, - within a display generated by the Derivative Works, if and - wherever such third-party notices normally appear. The contents - of the NOTICE file are for informational purposes only and - do not modify the License. You may add Your own attribution - notices within Derivative Works that You distribute, alongside - or as an addendum to the NOTICE text from the Work, provided - that such additional attribution notices cannot be construed - as modifying the License. - - You may add Your own copyright statement to Your modifications and - may provide additional or different license terms and conditions - for use, reproduction, or distribution of Your modifications, or - for any such Derivative Works as a whole, provided Your use, - reproduction, and distribution of the Work otherwise complies with - the conditions stated in this License. - - 5. Submission of Contributions. Unless You explicitly state otherwise, - any Contribution intentionally submitted for inclusion in the Work - by You to the Licensor shall be under the terms and conditions of - this License, without any additional terms or conditions. - Notwithstanding the above, nothing herein shall supersede or modify - the terms of any separate license agreement you may have executed - with Licensor regarding such Contributions. - - 6. Trademarks. This License does not grant permission to use the trade - names, trademarks, service marks, or product names of the Licensor, - except as required for reasonable and customary use in describing the - origin of the Work and reproducing the content of the NOTICE file. - - 7. Disclaimer of Warranty. Unless required by applicable law or - agreed to in writing, Licensor provides the Work (and each - Contributor provides its Contributions) on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or - implied, including, without limitation, any warranties or conditions - of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A - PARTICULAR PURPOSE. You are solely responsible for determining the - appropriateness of using or redistributing the Work and assume any - risks associated with Your exercise of permissions under this License. - - 8. Limitation of Liability. In no event and under no legal theory, - whether in tort (including negligence), contract, or otherwise, - unless required by applicable law (such as deliberate and grossly - negligent acts) or agreed to in writing, shall any Contributor be - liable to You for damages, including any direct, indirect, special, - incidental, or consequential damages of any character arising as a - result of this License or out of the use or inability to use the - Work (including but not limited to damages for loss of goodwill, - work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses), even if such Contributor - has been advised of the possibility of such damages. - - 9. Accepting Warranty or Additional Liability. While redistributing - the Work or Derivative Works thereof, You may choose to offer, - and charge a fee for, acceptance of support, warranty, indemnity, - or other liability obligations and/or rights consistent with this - License. However, in accepting such obligations, You may act only - on Your own behalf and on Your sole responsibility, not on behalf - of any other Contributor, and only if You agree to indemnify, - defend, and hold each Contributor harmless for any liability - incurred by, or claims asserted against, such Contributor by reason - of your accepting any such warranty or additional liability. diff --git a/vendor/github.com/spf13/afero/README.md b/vendor/github.com/spf13/afero/README.md deleted file mode 100644 index 0c9b04b53f..0000000000 --- a/vendor/github.com/spf13/afero/README.md +++ /dev/null @@ -1,452 +0,0 @@ -![afero logo-sm](https://cloud.githubusercontent.com/assets/173412/11490338/d50e16dc-97a5-11e5-8b12-019a300d0fcb.png) - -A FileSystem Abstraction System for Go - -[![Build Status](https://travis-ci.org/spf13/afero.svg)](https://travis-ci.org/spf13/afero) [![Build status](https://ci.appveyor.com/api/projects/status/github/spf13/afero?branch=master&svg=true)](https://ci.appveyor.com/project/spf13/afero) [![GoDoc](https://godoc.org/github.com/spf13/afero?status.svg)](https://godoc.org/github.com/spf13/afero) [![Join the chat at https://gitter.im/spf13/afero](https://badges.gitter.im/Dev%20Chat.svg)](https://gitter.im/spf13/afero?utm_source=badge&utm_medium=badge&utm_campaign=pr-badge&utm_content=badge) - -# Overview - -Afero is an filesystem framework providing a simple, uniform and universal API -interacting with any filesystem, as an abstraction layer providing interfaces, -types and methods. Afero has an exceptionally clean interface and simple design -without needless constructors or initialization methods. - -Afero is also a library providing a base set of interoperable backend -filesystems that make it easy to work with afero while retaining all the power -and benefit of the os and ioutil packages. - -Afero provides significant improvements over using the os package alone, most -notably the ability to create mock and testing filesystems without relying on the disk. - -It is suitable for use in a any situation where you would consider using the OS -package as it provides an additional abstraction that makes it easy to use a -memory backed file system during testing. It also adds support for the http -filesystem for full interoperability. - - -## Afero Features - -* A single consistent API for accessing a variety of filesystems -* Interoperation between a variety of file system types -* A set of interfaces to encourage and enforce interoperability between backends -* An atomic cross platform memory backed file system -* Support for compositional (union) file systems by combining multiple file systems acting as one -* Specialized backends which modify existing filesystems (Read Only, Regexp filtered) -* A set of utility functions ported from io, ioutil & hugo to be afero aware - - -# Using Afero - -Afero is easy to use and easier to adopt. - -A few different ways you could use Afero: - -* Use the interfaces alone to define you own file system. -* Wrap for the OS packages. -* Define different filesystems for different parts of your application. -* Use Afero for mock filesystems while testing - -## Step 1: Install Afero - -First use go get to install the latest version of the library. - - $ go get github.com/spf13/afero - -Next include Afero in your application. -```go -import "github.com/spf13/afero" -``` - -## Step 2: Declare a backend - -First define a package variable and set it to a pointer to a filesystem. -```go -var AppFs = afero.NewMemMapFs() - -or - -var AppFs = afero.NewOsFs() -``` -It is important to note that if you repeat the composite literal you -will be using a completely new and isolated filesystem. In the case of -OsFs it will still use the same underlying filesystem but will reduce -the ability to drop in other filesystems as desired. - -## Step 3: Use it like you would the OS package - -Throughout your application use any function and method like you normally -would. - -So if my application before had: -```go -os.Open('/tmp/foo') -``` -We would replace it with: -```go -AppFs.Open('/tmp/foo') -``` - -`AppFs` being the variable we defined above. - - -## List of all available functions - -File System Methods Available: -```go -Chmod(name string, mode os.FileMode) : error -Chtimes(name string, atime time.Time, mtime time.Time) : error -Create(name string) : File, error -Mkdir(name string, perm os.FileMode) : error -MkdirAll(path string, perm os.FileMode) : error -Name() : string -Open(name string) : File, error -OpenFile(name string, flag int, perm os.FileMode) : File, error -Remove(name string) : error -RemoveAll(path string) : error -Rename(oldname, newname string) : error -Stat(name string) : os.FileInfo, error -``` -File Interfaces and Methods Available: -```go -io.Closer -io.Reader -io.ReaderAt -io.Seeker -io.Writer -io.WriterAt - -Name() : string -Readdir(count int) : []os.FileInfo, error -Readdirnames(n int) : []string, error -Stat() : os.FileInfo, error -Sync() : error -Truncate(size int64) : error -WriteString(s string) : ret int, err error -``` -In some applications it may make sense to define a new package that -simply exports the file system variable for easy access from anywhere. - -## Using Afero's utility functions - -Afero provides a set of functions to make it easier to use the underlying file systems. -These functions have been primarily ported from io & ioutil with some developed for Hugo. - -The afero utilities support all afero compatible backends. - -The list of utilities includes: - -```go -DirExists(path string) (bool, error) -Exists(path string) (bool, error) -FileContainsBytes(filename string, subslice []byte) (bool, error) -GetTempDir(subPath string) string -IsDir(path string) (bool, error) -IsEmpty(path string) (bool, error) -ReadDir(dirname string) ([]os.FileInfo, error) -ReadFile(filename string) ([]byte, error) -SafeWriteReader(path string, r io.Reader) (err error) -TempDir(dir, prefix string) (name string, err error) -TempFile(dir, prefix string) (f File, err error) -Walk(root string, walkFn filepath.WalkFunc) error -WriteFile(filename string, data []byte, perm os.FileMode) error -WriteReader(path string, r io.Reader) (err error) -``` -For a complete list see [Afero's GoDoc](https://godoc.org/github.com/spf13/afero) - -They are available under two different approaches to use. You can either call -them directly where the first parameter of each function will be the file -system, or you can declare a new `Afero`, a custom type used to bind these -functions as methods to a given filesystem. - -### Calling utilities directly - -```go -fs := new(afero.MemMapFs) -f, err := afero.TempFile(fs,"", "ioutil-test") - -``` - -### Calling via Afero - -```go -fs := afero.NewMemMapFs() -afs := &afero.Afero{Fs: fs} -f, err := afs.TempFile("", "ioutil-test") -``` - -## Using Afero for Testing - -There is a large benefit to using a mock filesystem for testing. It has a -completely blank state every time it is initialized and can be easily -reproducible regardless of OS. You could create files to your heart’s content -and the file access would be fast while also saving you from all the annoying -issues with deleting temporary files, Windows file locking, etc. The MemMapFs -backend is perfect for testing. - -* Much faster than performing I/O operations on disk -* Avoid security issues and permissions -* Far more control. 'rm -rf /' with confidence -* Test setup is far more easier to do -* No test cleanup needed - -One way to accomplish this is to define a variable as mentioned above. -In your application this will be set to afero.NewOsFs() during testing you -can set it to afero.NewMemMapFs(). - -It wouldn't be uncommon to have each test initialize a blank slate memory -backend. To do this I would define my `appFS = afero.NewOsFs()` somewhere -appropriate in my application code. This approach ensures that Tests are order -independent, with no test relying on the state left by an earlier test. - -Then in my tests I would initialize a new MemMapFs for each test: -```go -func TestExist(t *testing.T) { - appFS := afero.NewMemMapFs() - // create test files and directories - appFS.MkdirAll("src/a", 0755) - afero.WriteFile(appFS, "src/a/b", []byte("file b"), 0644) - afero.WriteFile(appFS, "src/c", []byte("file c"), 0644) - name := "src/c" - _, err := appFS.Stat(name) - if os.IsNotExist(err) { - t.Errorf("file \"%s\" does not exist.\n", name) - } -} -``` - -# Available Backends - -## Operating System Native - -### OsFs - -The first is simply a wrapper around the native OS calls. This makes it -very easy to use as all of the calls are the same as the existing OS -calls. It also makes it trivial to have your code use the OS during -operation and a mock filesystem during testing or as needed. - -```go -appfs := afero.NewOsFs() -appfs.MkdirAll("src/a", 0755)) -``` - -## Memory Backed Storage - -### MemMapFs - -Afero also provides a fully atomic memory backed filesystem perfect for use in -mocking and to speed up unnecessary disk io when persistence isn’t -necessary. It is fully concurrent and will work within go routines -safely. - -```go -mm := afero.NewMemMapFs() -mm.MkdirAll("src/a", 0755)) -``` - -#### InMemoryFile - -As part of MemMapFs, Afero also provides an atomic, fully concurrent memory -backed file implementation. This can be used in other memory backed file -systems with ease. Plans are to add a radix tree memory stored file -system using InMemoryFile. - -## Network Interfaces - -### SftpFs - -Afero has experimental support for secure file transfer protocol (sftp). Which can -be used to perform file operations over a encrypted channel. - -## Filtering Backends - -### BasePathFs - -The BasePathFs restricts all operations to a given path within an Fs. -The given file name to the operations on this Fs will be prepended with -the base path before calling the source Fs. - -```go -bp := afero.NewBasePathFs(afero.NewOsFs(), "/base/path") -``` - -### ReadOnlyFs - -A thin wrapper around the source Fs providing a read only view. - -```go -fs := afero.NewReadOnlyFs(afero.NewOsFs()) -_, err := fs.Create("/file.txt") -// err = syscall.EPERM -``` - -# RegexpFs - -A filtered view on file names, any file NOT matching -the passed regexp will be treated as non-existing. -Files not matching the regexp provided will not be created. -Directories are not filtered. - -```go -fs := afero.NewRegexpFs(afero.NewMemMapFs(), regexp.MustCompile(`\.txt$`)) -_, err := fs.Create("/file.html") -// err = syscall.ENOENT -``` - -### HttpFs - -Afero provides an http compatible backend which can wrap any of the existing -backends. - -The Http package requires a slightly specific version of Open which -returns an http.File type. - -Afero provides an httpFs file system which satisfies this requirement. -Any Afero FileSystem can be used as an httpFs. - -```go -httpFs := afero.NewHttpFs() -fileserver := http.FileServer(httpFs.Dir())) -http.Handle("/", fileserver) -``` - -## Composite Backends - -Afero provides the ability have two filesystems (or more) act as a single -file system. - -### CacheOnReadFs - -The CacheOnReadFs will lazily make copies of any accessed files from the base -layer into the overlay. Subsequent reads will be pulled from the overlay -directly permitting the request is within the cache duration of when it was -created in the overlay. - -If the base filesystem is writeable, any changes to files will be -done first to the base, then to the overlay layer. Write calls to open file -handles like `Write()` or `Truncate()` to the overlay first. - -To writing files to the overlay only, you can use the overlay Fs directly (not -via the union Fs). - -Cache files in the layer for the given time.Duration, a cache duration of 0 -means "forever" meaning the file will not be re-requested from the base ever. - -A read-only base will make the overlay also read-only but still copy files -from the base to the overlay when they're not present (or outdated) in the -caching layer. - -```go -base := afero.NewOsFs() -layer := afero.NewMemMapFs() -ufs := afero.NewCacheOnReadFs(base, layer, 100 * time.Second) -``` - -### CopyOnWriteFs() - -The CopyOnWriteFs is a read only base file system with a potentially -writeable layer on top. - -Read operations will first look in the overlay and if not found there, will -serve the file from the base. - -Changes to the file system will only be made in the overlay. - -Any attempt to modify a file found only in the base will copy the file to the -overlay layer before modification (including opening a file with a writable -handle). - -Removing and Renaming files present only in the base layer is not currently -permitted. If a file is present in the base layer and the overlay, only the -overlay will be removed/renamed. - -```go - base := afero.NewOsFs() - roBase := afero.NewReadOnlyFs(base) - ufs := afero.NewCopyOnWriteFs(roBase, afero.NewMemMapFs()) - - fh, _ = ufs.Create("/home/test/file2.txt") - fh.WriteString("This is a test") - fh.Close() -``` - -In this example all write operations will only occur in memory (MemMapFs) -leaving the base filesystem (OsFs) untouched. - - -## Desired/possible backends - -The following is a short list of possible backends we hope someone will -implement: - -* SSH -* ZIP -* TAR -* S3 - -# About the project - -## What's in the name - -Afero comes from the latin roots Ad-Facere. - -**"Ad"** is a prefix meaning "to". - -**"Facere"** is a form of the root "faciō" making "make or do". - -The literal meaning of afero is "to make" or "to do" which seems very fitting -for a library that allows one to make files and directories and do things with them. - -The English word that shares the same roots as Afero is "affair". Affair shares -the same concept but as a noun it means "something that is made or done" or "an -object of a particular type". - -It's also nice that unlike some of my other libraries (hugo, cobra, viper) it -Googles very well. - -## Release Notes - -* **0.10.0** 2015.12.10 - * Full compatibility with Windows - * Introduction of afero utilities - * Test suite rewritten to work cross platform - * Normalize paths for MemMapFs - * Adding Sync to the file interface - * **Breaking Change** Walk and ReadDir have changed parameter order - * Moving types used by MemMapFs to a subpackage - * General bugfixes and improvements -* **0.9.0** 2015.11.05 - * New Walk function similar to filepath.Walk - * MemMapFs.OpenFile handles O_CREATE, O_APPEND, O_TRUNC - * MemMapFs.Remove now really deletes the file - * InMemoryFile.Readdir and Readdirnames work correctly - * InMemoryFile functions lock it for concurrent access - * Test suite improvements -* **0.8.0** 2014.10.28 - * First public version - * Interfaces feel ready for people to build using - * Interfaces satisfy all known uses - * MemMapFs passes the majority of the OS test suite - * OsFs passes the majority of the OS test suite - -## Contributing - -1. Fork it -2. Create your feature branch (`git checkout -b my-new-feature`) -3. Commit your changes (`git commit -am 'Add some feature'`) -4. Push to the branch (`git push origin my-new-feature`) -5. Create new Pull Request - -## Contributors - -Names in no particular order: - -* [spf13](https://github.com/spf13) -* [jaqx0r](https://github.com/jaqx0r) -* [mbertschler](https://github.com/mbertschler) -* [xor-gate](https://github.com/xor-gate) - -## License - -Afero is released under the Apache 2.0 license. See -[LICENSE.txt](https://github.com/spf13/afero/blob/master/LICENSE.txt) diff --git a/vendor/github.com/spf13/afero/afero.go b/vendor/github.com/spf13/afero/afero.go deleted file mode 100644 index f5b5e127cd..0000000000 --- a/vendor/github.com/spf13/afero/afero.go +++ /dev/null @@ -1,108 +0,0 @@ -// Copyright © 2014 Steve Francia . -// Copyright 2013 tsuru authors. All rights reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -// Package afero provides types and methods for interacting with the filesystem, -// as an abstraction layer. - -// Afero also provides a few implementations that are mostly interoperable. One that -// uses the operating system filesystem, one that uses memory to store files -// (cross platform) and an interface that should be implemented if you want to -// provide your own filesystem. - -package afero - -import ( - "errors" - "io" - "os" - "time" -) - -type Afero struct { - Fs -} - -// File represents a file in the filesystem. -type File interface { - io.Closer - io.Reader - io.ReaderAt - io.Seeker - io.Writer - io.WriterAt - - Name() string - Readdir(count int) ([]os.FileInfo, error) - Readdirnames(n int) ([]string, error) - Stat() (os.FileInfo, error) - Sync() error - Truncate(size int64) error - WriteString(s string) (ret int, err error) -} - -// Fs is the filesystem interface. -// -// Any simulated or real filesystem should implement this interface. -type Fs interface { - // Create creates a file in the filesystem, returning the file and an - // error, if any happens. - Create(name string) (File, error) - - // Mkdir creates a directory in the filesystem, return an error if any - // happens. - Mkdir(name string, perm os.FileMode) error - - // MkdirAll creates a directory path and all parents that does not exist - // yet. - MkdirAll(path string, perm os.FileMode) error - - // Open opens a file, returning it or an error, if any happens. - Open(name string) (File, error) - - // OpenFile opens a file using the given flags and the given mode. - OpenFile(name string, flag int, perm os.FileMode) (File, error) - - // Remove removes a file identified by name, returning an error, if any - // happens. - Remove(name string) error - - // RemoveAll removes a directory path and any children it contains. It - // does not fail if the path does not exist (return nil). - RemoveAll(path string) error - - // Rename renames a file. - Rename(oldname, newname string) error - - // Stat returns a FileInfo describing the named file, or an error, if any - // happens. - Stat(name string) (os.FileInfo, error) - - // The name of this FileSystem - Name() string - - //Chmod changes the mode of the named file to mode. - Chmod(name string, mode os.FileMode) error - - //Chtimes changes the access and modification times of the named file - Chtimes(name string, atime time.Time, mtime time.Time) error -} - -var ( - ErrFileClosed = errors.New("File is closed") - ErrOutOfRange = errors.New("Out of range") - ErrTooLarge = errors.New("Too large") - ErrFileNotFound = os.ErrNotExist - ErrFileExists = os.ErrExist - ErrDestinationExists = os.ErrExist -) diff --git a/vendor/github.com/spf13/afero/appveyor.yml b/vendor/github.com/spf13/afero/appveyor.yml deleted file mode 100644 index a633ad500c..0000000000 --- a/vendor/github.com/spf13/afero/appveyor.yml +++ /dev/null @@ -1,15 +0,0 @@ -version: '{build}' -clone_folder: C:\gopath\src\github.com\spf13\afero -environment: - GOPATH: C:\gopath -build_script: -- cmd: >- - go version - - go env - - go get -v github.com/spf13/afero/... - - go build github.com/spf13/afero -test_script: -- cmd: go test -race -v github.com/spf13/afero/... diff --git a/vendor/github.com/spf13/afero/basepath.go b/vendor/github.com/spf13/afero/basepath.go deleted file mode 100644 index 616ff8ff74..0000000000 --- a/vendor/github.com/spf13/afero/basepath.go +++ /dev/null @@ -1,180 +0,0 @@ -package afero - -import ( - "os" - "path/filepath" - "runtime" - "strings" - "time" -) - -var _ Lstater = (*BasePathFs)(nil) - -// The BasePathFs restricts all operations to a given path within an Fs. -// The given file name to the operations on this Fs will be prepended with -// the base path before calling the base Fs. -// Any file name (after filepath.Clean()) outside this base path will be -// treated as non existing file. -// -// Note that it does not clean the error messages on return, so you may -// reveal the real path on errors. -type BasePathFs struct { - source Fs - path string -} - -type BasePathFile struct { - File - path string -} - -func (f *BasePathFile) Name() string { - sourcename := f.File.Name() - return strings.TrimPrefix(sourcename, filepath.Clean(f.path)) -} - -func NewBasePathFs(source Fs, path string) Fs { - return &BasePathFs{source: source, path: path} -} - -// on a file outside the base path it returns the given file name and an error, -// else the given file with the base path prepended -func (b *BasePathFs) RealPath(name string) (path string, err error) { - if err := validateBasePathName(name); err != nil { - return name, err - } - - bpath := filepath.Clean(b.path) - path = filepath.Clean(filepath.Join(bpath, name)) - if !strings.HasPrefix(path, bpath) { - return name, os.ErrNotExist - } - - return path, nil -} - -func validateBasePathName(name string) error { - if runtime.GOOS != "windows" { - // Not much to do here; - // the virtual file paths all look absolute on *nix. - return nil - } - - // On Windows a common mistake would be to provide an absolute OS path - // We could strip out the base part, but that would not be very portable. - if filepath.IsAbs(name) { - return os.ErrNotExist - } - - return nil -} - -func (b *BasePathFs) Chtimes(name string, atime, mtime time.Time) (err error) { - if name, err = b.RealPath(name); err != nil { - return &os.PathError{Op: "chtimes", Path: name, Err: err} - } - return b.source.Chtimes(name, atime, mtime) -} - -func (b *BasePathFs) Chmod(name string, mode os.FileMode) (err error) { - if name, err = b.RealPath(name); err != nil { - return &os.PathError{Op: "chmod", Path: name, Err: err} - } - return b.source.Chmod(name, mode) -} - -func (b *BasePathFs) Name() string { - return "BasePathFs" -} - -func (b *BasePathFs) Stat(name string) (fi os.FileInfo, err error) { - if name, err = b.RealPath(name); err != nil { - return nil, &os.PathError{Op: "stat", Path: name, Err: err} - } - return b.source.Stat(name) -} - -func (b *BasePathFs) Rename(oldname, newname string) (err error) { - if oldname, err = b.RealPath(oldname); err != nil { - return &os.PathError{Op: "rename", Path: oldname, Err: err} - } - if newname, err = b.RealPath(newname); err != nil { - return &os.PathError{Op: "rename", Path: newname, Err: err} - } - return b.source.Rename(oldname, newname) -} - -func (b *BasePathFs) RemoveAll(name string) (err error) { - if name, err = b.RealPath(name); err != nil { - return &os.PathError{Op: "remove_all", Path: name, Err: err} - } - return b.source.RemoveAll(name) -} - -func (b *BasePathFs) Remove(name string) (err error) { - if name, err = b.RealPath(name); err != nil { - return &os.PathError{Op: "remove", Path: name, Err: err} - } - return b.source.Remove(name) -} - -func (b *BasePathFs) OpenFile(name string, flag int, mode os.FileMode) (f File, err error) { - if name, err = b.RealPath(name); err != nil { - return nil, &os.PathError{Op: "openfile", Path: name, Err: err} - } - sourcef, err := b.source.OpenFile(name, flag, mode) - if err != nil { - return nil, err - } - return &BasePathFile{sourcef, b.path}, nil -} - -func (b *BasePathFs) Open(name string) (f File, err error) { - if name, err = b.RealPath(name); err != nil { - return nil, &os.PathError{Op: "open", Path: name, Err: err} - } - sourcef, err := b.source.Open(name) - if err != nil { - return nil, err - } - return &BasePathFile{File: sourcef, path: b.path}, nil -} - -func (b *BasePathFs) Mkdir(name string, mode os.FileMode) (err error) { - if name, err = b.RealPath(name); err != nil { - return &os.PathError{Op: "mkdir", Path: name, Err: err} - } - return b.source.Mkdir(name, mode) -} - -func (b *BasePathFs) MkdirAll(name string, mode os.FileMode) (err error) { - if name, err = b.RealPath(name); err != nil { - return &os.PathError{Op: "mkdir", Path: name, Err: err} - } - return b.source.MkdirAll(name, mode) -} - -func (b *BasePathFs) Create(name string) (f File, err error) { - if name, err = b.RealPath(name); err != nil { - return nil, &os.PathError{Op: "create", Path: name, Err: err} - } - sourcef, err := b.source.Create(name) - if err != nil { - return nil, err - } - return &BasePathFile{File: sourcef, path: b.path}, nil -} - -func (b *BasePathFs) LstatIfPossible(name string) (os.FileInfo, bool, error) { - name, err := b.RealPath(name) - if err != nil { - return nil, false, &os.PathError{Op: "lstat", Path: name, Err: err} - } - if lstater, ok := b.source.(Lstater); ok { - return lstater.LstatIfPossible(name) - } - fi, err := b.source.Stat(name) - return fi, false, err -} - -// vim: ts=4 sw=4 noexpandtab nolist syn=go diff --git a/vendor/github.com/spf13/afero/cacheOnReadFs.go b/vendor/github.com/spf13/afero/cacheOnReadFs.go deleted file mode 100644 index 29a26c67dd..0000000000 --- a/vendor/github.com/spf13/afero/cacheOnReadFs.go +++ /dev/null @@ -1,290 +0,0 @@ -package afero - -import ( - "os" - "syscall" - "time" -) - -// If the cache duration is 0, cache time will be unlimited, i.e. once -// a file is in the layer, the base will never be read again for this file. -// -// For cache times greater than 0, the modification time of a file is -// checked. Note that a lot of file system implementations only allow a -// resolution of a second for timestamps... or as the godoc for os.Chtimes() -// states: "The underlying filesystem may truncate or round the values to a -// less precise time unit." -// -// This caching union will forward all write calls also to the base file -// system first. To prevent writing to the base Fs, wrap it in a read-only -// filter - Note: this will also make the overlay read-only, for writing files -// in the overlay, use the overlay Fs directly, not via the union Fs. -type CacheOnReadFs struct { - base Fs - layer Fs - cacheTime time.Duration -} - -func NewCacheOnReadFs(base Fs, layer Fs, cacheTime time.Duration) Fs { - return &CacheOnReadFs{base: base, layer: layer, cacheTime: cacheTime} -} - -type cacheState int - -const ( - // not present in the overlay, unknown if it exists in the base: - cacheMiss cacheState = iota - // present in the overlay and in base, base file is newer: - cacheStale - // present in the overlay - with cache time == 0 it may exist in the base, - // with cacheTime > 0 it exists in the base and is same age or newer in the - // overlay - cacheHit - // happens if someone writes directly to the overlay without - // going through this union - cacheLocal -) - -func (u *CacheOnReadFs) cacheStatus(name string) (state cacheState, fi os.FileInfo, err error) { - var lfi, bfi os.FileInfo - lfi, err = u.layer.Stat(name) - if err == nil { - if u.cacheTime == 0 { - return cacheHit, lfi, nil - } - if lfi.ModTime().Add(u.cacheTime).Before(time.Now()) { - bfi, err = u.base.Stat(name) - if err != nil { - return cacheLocal, lfi, nil - } - if bfi.ModTime().After(lfi.ModTime()) { - return cacheStale, bfi, nil - } - } - return cacheHit, lfi, nil - } - - if err == syscall.ENOENT || os.IsNotExist(err) { - return cacheMiss, nil, nil - } - - return cacheMiss, nil, err -} - -func (u *CacheOnReadFs) copyToLayer(name string) error { - return copyToLayer(u.base, u.layer, name) -} - -func (u *CacheOnReadFs) Chtimes(name string, atime, mtime time.Time) error { - st, _, err := u.cacheStatus(name) - if err != nil { - return err - } - switch st { - case cacheLocal: - case cacheHit: - err = u.base.Chtimes(name, atime, mtime) - case cacheStale, cacheMiss: - if err := u.copyToLayer(name); err != nil { - return err - } - err = u.base.Chtimes(name, atime, mtime) - } - if err != nil { - return err - } - return u.layer.Chtimes(name, atime, mtime) -} - -func (u *CacheOnReadFs) Chmod(name string, mode os.FileMode) error { - st, _, err := u.cacheStatus(name) - if err != nil { - return err - } - switch st { - case cacheLocal: - case cacheHit: - err = u.base.Chmod(name, mode) - case cacheStale, cacheMiss: - if err := u.copyToLayer(name); err != nil { - return err - } - err = u.base.Chmod(name, mode) - } - if err != nil { - return err - } - return u.layer.Chmod(name, mode) -} - -func (u *CacheOnReadFs) Stat(name string) (os.FileInfo, error) { - st, fi, err := u.cacheStatus(name) - if err != nil { - return nil, err - } - switch st { - case cacheMiss: - return u.base.Stat(name) - default: // cacheStale has base, cacheHit and cacheLocal the layer os.FileInfo - return fi, nil - } -} - -func (u *CacheOnReadFs) Rename(oldname, newname string) error { - st, _, err := u.cacheStatus(oldname) - if err != nil { - return err - } - switch st { - case cacheLocal: - case cacheHit: - err = u.base.Rename(oldname, newname) - case cacheStale, cacheMiss: - if err := u.copyToLayer(oldname); err != nil { - return err - } - err = u.base.Rename(oldname, newname) - } - if err != nil { - return err - } - return u.layer.Rename(oldname, newname) -} - -func (u *CacheOnReadFs) Remove(name string) error { - st, _, err := u.cacheStatus(name) - if err != nil { - return err - } - switch st { - case cacheLocal: - case cacheHit, cacheStale, cacheMiss: - err = u.base.Remove(name) - } - if err != nil { - return err - } - return u.layer.Remove(name) -} - -func (u *CacheOnReadFs) RemoveAll(name string) error { - st, _, err := u.cacheStatus(name) - if err != nil { - return err - } - switch st { - case cacheLocal: - case cacheHit, cacheStale, cacheMiss: - err = u.base.RemoveAll(name) - } - if err != nil { - return err - } - return u.layer.RemoveAll(name) -} - -func (u *CacheOnReadFs) OpenFile(name string, flag int, perm os.FileMode) (File, error) { - st, _, err := u.cacheStatus(name) - if err != nil { - return nil, err - } - switch st { - case cacheLocal, cacheHit: - default: - if err := u.copyToLayer(name); err != nil { - return nil, err - } - } - if flag&(os.O_WRONLY|syscall.O_RDWR|os.O_APPEND|os.O_CREATE|os.O_TRUNC) != 0 { - bfi, err := u.base.OpenFile(name, flag, perm) - if err != nil { - return nil, err - } - lfi, err := u.layer.OpenFile(name, flag, perm) - if err != nil { - bfi.Close() // oops, what if O_TRUNC was set and file opening in the layer failed...? - return nil, err - } - return &UnionFile{Base: bfi, Layer: lfi}, nil - } - return u.layer.OpenFile(name, flag, perm) -} - -func (u *CacheOnReadFs) Open(name string) (File, error) { - st, fi, err := u.cacheStatus(name) - if err != nil { - return nil, err - } - - switch st { - case cacheLocal: - return u.layer.Open(name) - - case cacheMiss: - bfi, err := u.base.Stat(name) - if err != nil { - return nil, err - } - if bfi.IsDir() { - return u.base.Open(name) - } - if err := u.copyToLayer(name); err != nil { - return nil, err - } - return u.layer.Open(name) - - case cacheStale: - if !fi.IsDir() { - if err := u.copyToLayer(name); err != nil { - return nil, err - } - return u.layer.Open(name) - } - case cacheHit: - if !fi.IsDir() { - return u.layer.Open(name) - } - } - // the dirs from cacheHit, cacheStale fall down here: - bfile, _ := u.base.Open(name) - lfile, err := u.layer.Open(name) - if err != nil && bfile == nil { - return nil, err - } - return &UnionFile{Base: bfile, Layer: lfile}, nil -} - -func (u *CacheOnReadFs) Mkdir(name string, perm os.FileMode) error { - err := u.base.Mkdir(name, perm) - if err != nil { - return err - } - return u.layer.MkdirAll(name, perm) // yes, MkdirAll... we cannot assume it exists in the cache -} - -func (u *CacheOnReadFs) Name() string { - return "CacheOnReadFs" -} - -func (u *CacheOnReadFs) MkdirAll(name string, perm os.FileMode) error { - err := u.base.MkdirAll(name, perm) - if err != nil { - return err - } - return u.layer.MkdirAll(name, perm) -} - -func (u *CacheOnReadFs) Create(name string) (File, error) { - bfh, err := u.base.Create(name) - if err != nil { - return nil, err - } - lfh, err := u.layer.Create(name) - if err != nil { - // oops, see comment about OS_TRUNC above, should we remove? then we have to - // remember if the file did not exist before - bfh.Close() - return nil, err - } - return &UnionFile{Base: bfh, Layer: lfh}, nil -} diff --git a/vendor/github.com/spf13/afero/const_bsds.go b/vendor/github.com/spf13/afero/const_bsds.go deleted file mode 100644 index 5728243d96..0000000000 --- a/vendor/github.com/spf13/afero/const_bsds.go +++ /dev/null @@ -1,22 +0,0 @@ -// Copyright © 2016 Steve Francia . -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -// +build darwin openbsd freebsd netbsd dragonfly - -package afero - -import ( - "syscall" -) - -const BADFD = syscall.EBADF diff --git a/vendor/github.com/spf13/afero/const_win_unix.go b/vendor/github.com/spf13/afero/const_win_unix.go deleted file mode 100644 index 968fc2783e..0000000000 --- a/vendor/github.com/spf13/afero/const_win_unix.go +++ /dev/null @@ -1,25 +0,0 @@ -// Copyright © 2016 Steve Francia . -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. -// +build !darwin -// +build !openbsd -// +build !freebsd -// +build !dragonfly -// +build !netbsd - -package afero - -import ( - "syscall" -) - -const BADFD = syscall.EBADFD diff --git a/vendor/github.com/spf13/afero/copyOnWriteFs.go b/vendor/github.com/spf13/afero/copyOnWriteFs.go deleted file mode 100644 index e8108a851e..0000000000 --- a/vendor/github.com/spf13/afero/copyOnWriteFs.go +++ /dev/null @@ -1,293 +0,0 @@ -package afero - -import ( - "fmt" - "os" - "path/filepath" - "syscall" - "time" -) - -var _ Lstater = (*CopyOnWriteFs)(nil) - -// The CopyOnWriteFs is a union filesystem: a read only base file system with -// a possibly writeable layer on top. Changes to the file system will only -// be made in the overlay: Changing an existing file in the base layer which -// is not present in the overlay will copy the file to the overlay ("changing" -// includes also calls to e.g. Chtimes() and Chmod()). -// -// Reading directories is currently only supported via Open(), not OpenFile(). -type CopyOnWriteFs struct { - base Fs - layer Fs -} - -func NewCopyOnWriteFs(base Fs, layer Fs) Fs { - return &CopyOnWriteFs{base: base, layer: layer} -} - -// Returns true if the file is not in the overlay -func (u *CopyOnWriteFs) isBaseFile(name string) (bool, error) { - if _, err := u.layer.Stat(name); err == nil { - return false, nil - } - _, err := u.base.Stat(name) - if err != nil { - if oerr, ok := err.(*os.PathError); ok { - if oerr.Err == os.ErrNotExist || oerr.Err == syscall.ENOENT || oerr.Err == syscall.ENOTDIR { - return false, nil - } - } - if err == syscall.ENOENT { - return false, nil - } - } - return true, err -} - -func (u *CopyOnWriteFs) copyToLayer(name string) error { - return copyToLayer(u.base, u.layer, name) -} - -func (u *CopyOnWriteFs) Chtimes(name string, atime, mtime time.Time) error { - b, err := u.isBaseFile(name) - if err != nil { - return err - } - if b { - if err := u.copyToLayer(name); err != nil { - return err - } - } - return u.layer.Chtimes(name, atime, mtime) -} - -func (u *CopyOnWriteFs) Chmod(name string, mode os.FileMode) error { - b, err := u.isBaseFile(name) - if err != nil { - return err - } - if b { - if err := u.copyToLayer(name); err != nil { - return err - } - } - return u.layer.Chmod(name, mode) -} - -func (u *CopyOnWriteFs) Stat(name string) (os.FileInfo, error) { - fi, err := u.layer.Stat(name) - if err != nil { - isNotExist := u.isNotExist(err) - if isNotExist { - return u.base.Stat(name) - } - return nil, err - } - return fi, nil -} - -func (u *CopyOnWriteFs) LstatIfPossible(name string) (os.FileInfo, bool, error) { - llayer, ok1 := u.layer.(Lstater) - lbase, ok2 := u.base.(Lstater) - - if ok1 { - fi, b, err := llayer.LstatIfPossible(name) - if err == nil { - return fi, b, nil - } - - if !u.isNotExist(err) { - return nil, b, err - } - } - - if ok2 { - fi, b, err := lbase.LstatIfPossible(name) - if err == nil { - return fi, b, nil - } - if !u.isNotExist(err) { - return nil, b, err - } - } - - fi, err := u.Stat(name) - - return fi, false, err -} - -func (u *CopyOnWriteFs) isNotExist(err error) bool { - if e, ok := err.(*os.PathError); ok { - err = e.Err - } - if err == os.ErrNotExist || err == syscall.ENOENT || err == syscall.ENOTDIR { - return true - } - return false -} - -// Renaming files present only in the base layer is not permitted -func (u *CopyOnWriteFs) Rename(oldname, newname string) error { - b, err := u.isBaseFile(oldname) - if err != nil { - return err - } - if b { - return syscall.EPERM - } - return u.layer.Rename(oldname, newname) -} - -// Removing files present only in the base layer is not permitted. If -// a file is present in the base layer and the overlay, only the overlay -// will be removed. -func (u *CopyOnWriteFs) Remove(name string) error { - err := u.layer.Remove(name) - switch err { - case syscall.ENOENT: - _, err = u.base.Stat(name) - if err == nil { - return syscall.EPERM - } - return syscall.ENOENT - default: - return err - } -} - -func (u *CopyOnWriteFs) RemoveAll(name string) error { - err := u.layer.RemoveAll(name) - switch err { - case syscall.ENOENT: - _, err = u.base.Stat(name) - if err == nil { - return syscall.EPERM - } - return syscall.ENOENT - default: - return err - } -} - -func (u *CopyOnWriteFs) OpenFile(name string, flag int, perm os.FileMode) (File, error) { - b, err := u.isBaseFile(name) - if err != nil { - return nil, err - } - - if flag&(os.O_WRONLY|os.O_RDWR|os.O_APPEND|os.O_CREATE|os.O_TRUNC) != 0 { - if b { - if err = u.copyToLayer(name); err != nil { - return nil, err - } - return u.layer.OpenFile(name, flag, perm) - } - - dir := filepath.Dir(name) - isaDir, err := IsDir(u.base, dir) - if err != nil && !os.IsNotExist(err) { - return nil, err - } - if isaDir { - if err = u.layer.MkdirAll(dir, 0777); err != nil { - return nil, err - } - return u.layer.OpenFile(name, flag, perm) - } - - isaDir, err = IsDir(u.layer, dir) - if err != nil { - return nil, err - } - if isaDir { - return u.layer.OpenFile(name, flag, perm) - } - - return nil, &os.PathError{Op: "open", Path: name, Err: syscall.ENOTDIR} // ...or os.ErrNotExist? - } - if b { - return u.base.OpenFile(name, flag, perm) - } - return u.layer.OpenFile(name, flag, perm) -} - -// This function handles the 9 different possibilities caused -// by the union which are the intersection of the following... -// layer: doesn't exist, exists as a file, and exists as a directory -// base: doesn't exist, exists as a file, and exists as a directory -func (u *CopyOnWriteFs) Open(name string) (File, error) { - // Since the overlay overrides the base we check that first - b, err := u.isBaseFile(name) - if err != nil { - return nil, err - } - - // If overlay doesn't exist, return the base (base state irrelevant) - if b { - return u.base.Open(name) - } - - // If overlay is a file, return it (base state irrelevant) - dir, err := IsDir(u.layer, name) - if err != nil { - return nil, err - } - if !dir { - return u.layer.Open(name) - } - - // Overlay is a directory, base state now matters. - // Base state has 3 states to check but 2 outcomes: - // A. It's a file or non-readable in the base (return just the overlay) - // B. It's an accessible directory in the base (return a UnionFile) - - // If base is file or nonreadable, return overlay - dir, err = IsDir(u.base, name) - if !dir || err != nil { - return u.layer.Open(name) - } - - // Both base & layer are directories - // Return union file (if opens are without error) - bfile, bErr := u.base.Open(name) - lfile, lErr := u.layer.Open(name) - - // If either have errors at this point something is very wrong. Return nil and the errors - if bErr != nil || lErr != nil { - return nil, fmt.Errorf("BaseErr: %v\nOverlayErr: %v", bErr, lErr) - } - - return &UnionFile{Base: bfile, Layer: lfile}, nil -} - -func (u *CopyOnWriteFs) Mkdir(name string, perm os.FileMode) error { - dir, err := IsDir(u.base, name) - if err != nil { - return u.layer.MkdirAll(name, perm) - } - if dir { - return ErrFileExists - } - return u.layer.MkdirAll(name, perm) -} - -func (u *CopyOnWriteFs) Name() string { - return "CopyOnWriteFs" -} - -func (u *CopyOnWriteFs) MkdirAll(name string, perm os.FileMode) error { - dir, err := IsDir(u.base, name) - if err != nil { - return u.layer.MkdirAll(name, perm) - } - if dir { - // This is in line with how os.MkdirAll behaves. - return nil - } - return u.layer.MkdirAll(name, perm) -} - -func (u *CopyOnWriteFs) Create(name string) (File, error) { - return u.OpenFile(name, os.O_CREATE|os.O_TRUNC|os.O_RDWR, 0666) -} diff --git a/vendor/github.com/spf13/afero/go.mod b/vendor/github.com/spf13/afero/go.mod deleted file mode 100644 index 0868550995..0000000000 --- a/vendor/github.com/spf13/afero/go.mod +++ /dev/null @@ -1,3 +0,0 @@ -module github.com/spf13/afero - -require golang.org/x/text v0.3.0 diff --git a/vendor/github.com/spf13/afero/go.sum b/vendor/github.com/spf13/afero/go.sum deleted file mode 100644 index 6bad37b2a7..0000000000 --- a/vendor/github.com/spf13/afero/go.sum +++ /dev/null @@ -1,2 +0,0 @@ -golang.org/x/text v0.3.0 h1:g61tztE5qeGQ89tm6NTjjM9VPIm088od1l6aSorWRWg= -golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= diff --git a/vendor/github.com/spf13/afero/httpFs.go b/vendor/github.com/spf13/afero/httpFs.go deleted file mode 100644 index c42193688c..0000000000 --- a/vendor/github.com/spf13/afero/httpFs.go +++ /dev/null @@ -1,110 +0,0 @@ -// Copyright © 2014 Steve Francia . -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package afero - -import ( - "errors" - "net/http" - "os" - "path" - "path/filepath" - "strings" - "time" -) - -type httpDir struct { - basePath string - fs HttpFs -} - -func (d httpDir) Open(name string) (http.File, error) { - if filepath.Separator != '/' && strings.IndexRune(name, filepath.Separator) >= 0 || - strings.Contains(name, "\x00") { - return nil, errors.New("http: invalid character in file path") - } - dir := string(d.basePath) - if dir == "" { - dir = "." - } - - f, err := d.fs.Open(filepath.Join(dir, filepath.FromSlash(path.Clean("/"+name)))) - if err != nil { - return nil, err - } - return f, nil -} - -type HttpFs struct { - source Fs -} - -func NewHttpFs(source Fs) *HttpFs { - return &HttpFs{source: source} -} - -func (h HttpFs) Dir(s string) *httpDir { - return &httpDir{basePath: s, fs: h} -} - -func (h HttpFs) Name() string { return "h HttpFs" } - -func (h HttpFs) Create(name string) (File, error) { - return h.source.Create(name) -} - -func (h HttpFs) Chmod(name string, mode os.FileMode) error { - return h.source.Chmod(name, mode) -} - -func (h HttpFs) Chtimes(name string, atime time.Time, mtime time.Time) error { - return h.source.Chtimes(name, atime, mtime) -} - -func (h HttpFs) Mkdir(name string, perm os.FileMode) error { - return h.source.Mkdir(name, perm) -} - -func (h HttpFs) MkdirAll(path string, perm os.FileMode) error { - return h.source.MkdirAll(path, perm) -} - -func (h HttpFs) Open(name string) (http.File, error) { - f, err := h.source.Open(name) - if err == nil { - if httpfile, ok := f.(http.File); ok { - return httpfile, nil - } - } - return nil, err -} - -func (h HttpFs) OpenFile(name string, flag int, perm os.FileMode) (File, error) { - return h.source.OpenFile(name, flag, perm) -} - -func (h HttpFs) Remove(name string) error { - return h.source.Remove(name) -} - -func (h HttpFs) RemoveAll(path string) error { - return h.source.RemoveAll(path) -} - -func (h HttpFs) Rename(oldname, newname string) error { - return h.source.Rename(oldname, newname) -} - -func (h HttpFs) Stat(name string) (os.FileInfo, error) { - return h.source.Stat(name) -} diff --git a/vendor/github.com/spf13/afero/ioutil.go b/vendor/github.com/spf13/afero/ioutil.go deleted file mode 100644 index 5c3a3d8fff..0000000000 --- a/vendor/github.com/spf13/afero/ioutil.go +++ /dev/null @@ -1,230 +0,0 @@ -// Copyright ©2015 The Go Authors -// Copyright ©2015 Steve Francia -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package afero - -import ( - "bytes" - "io" - "os" - "path/filepath" - "sort" - "strconv" - "sync" - "time" -) - -// byName implements sort.Interface. -type byName []os.FileInfo - -func (f byName) Len() int { return len(f) } -func (f byName) Less(i, j int) bool { return f[i].Name() < f[j].Name() } -func (f byName) Swap(i, j int) { f[i], f[j] = f[j], f[i] } - -// ReadDir reads the directory named by dirname and returns -// a list of sorted directory entries. -func (a Afero) ReadDir(dirname string) ([]os.FileInfo, error) { - return ReadDir(a.Fs, dirname) -} - -func ReadDir(fs Fs, dirname string) ([]os.FileInfo, error) { - f, err := fs.Open(dirname) - if err != nil { - return nil, err - } - list, err := f.Readdir(-1) - f.Close() - if err != nil { - return nil, err - } - sort.Sort(byName(list)) - return list, nil -} - -// ReadFile reads the file named by filename and returns the contents. -// A successful call returns err == nil, not err == EOF. Because ReadFile -// reads the whole file, it does not treat an EOF from Read as an error -// to be reported. -func (a Afero) ReadFile(filename string) ([]byte, error) { - return ReadFile(a.Fs, filename) -} - -func ReadFile(fs Fs, filename string) ([]byte, error) { - f, err := fs.Open(filename) - if err != nil { - return nil, err - } - defer f.Close() - // It's a good but not certain bet that FileInfo will tell us exactly how much to - // read, so let's try it but be prepared for the answer to be wrong. - var n int64 - - if fi, err := f.Stat(); err == nil { - // Don't preallocate a huge buffer, just in case. - if size := fi.Size(); size < 1e9 { - n = size - } - } - // As initial capacity for readAll, use n + a little extra in case Size is zero, - // and to avoid another allocation after Read has filled the buffer. The readAll - // call will read into its allocated internal buffer cheaply. If the size was - // wrong, we'll either waste some space off the end or reallocate as needed, but - // in the overwhelmingly common case we'll get it just right. - return readAll(f, n+bytes.MinRead) -} - -// readAll reads from r until an error or EOF and returns the data it read -// from the internal buffer allocated with a specified capacity. -func readAll(r io.Reader, capacity int64) (b []byte, err error) { - buf := bytes.NewBuffer(make([]byte, 0, capacity)) - // If the buffer overflows, we will get bytes.ErrTooLarge. - // Return that as an error. Any other panic remains. - defer func() { - e := recover() - if e == nil { - return - } - if panicErr, ok := e.(error); ok && panicErr == bytes.ErrTooLarge { - err = panicErr - } else { - panic(e) - } - }() - _, err = buf.ReadFrom(r) - return buf.Bytes(), err -} - -// ReadAll reads from r until an error or EOF and returns the data it read. -// A successful call returns err == nil, not err == EOF. Because ReadAll is -// defined to read from src until EOF, it does not treat an EOF from Read -// as an error to be reported. -func ReadAll(r io.Reader) ([]byte, error) { - return readAll(r, bytes.MinRead) -} - -// WriteFile writes data to a file named by filename. -// If the file does not exist, WriteFile creates it with permissions perm; -// otherwise WriteFile truncates it before writing. -func (a Afero) WriteFile(filename string, data []byte, perm os.FileMode) error { - return WriteFile(a.Fs, filename, data, perm) -} - -func WriteFile(fs Fs, filename string, data []byte, perm os.FileMode) error { - f, err := fs.OpenFile(filename, os.O_WRONLY|os.O_CREATE|os.O_TRUNC, perm) - if err != nil { - return err - } - n, err := f.Write(data) - if err == nil && n < len(data) { - err = io.ErrShortWrite - } - if err1 := f.Close(); err == nil { - err = err1 - } - return err -} - -// Random number state. -// We generate random temporary file names so that there's a good -// chance the file doesn't exist yet - keeps the number of tries in -// TempFile to a minimum. -var rand uint32 -var randmu sync.Mutex - -func reseed() uint32 { - return uint32(time.Now().UnixNano() + int64(os.Getpid())) -} - -func nextSuffix() string { - randmu.Lock() - r := rand - if r == 0 { - r = reseed() - } - r = r*1664525 + 1013904223 // constants from Numerical Recipes - rand = r - randmu.Unlock() - return strconv.Itoa(int(1e9 + r%1e9))[1:] -} - -// TempFile creates a new temporary file in the directory dir -// with a name beginning with prefix, opens the file for reading -// and writing, and returns the resulting *File. -// If dir is the empty string, TempFile uses the default directory -// for temporary files (see os.TempDir). -// Multiple programs calling TempFile simultaneously -// will not choose the same file. The caller can use f.Name() -// to find the pathname of the file. It is the caller's responsibility -// to remove the file when no longer needed. -func (a Afero) TempFile(dir, prefix string) (f File, err error) { - return TempFile(a.Fs, dir, prefix) -} - -func TempFile(fs Fs, dir, prefix string) (f File, err error) { - if dir == "" { - dir = os.TempDir() - } - - nconflict := 0 - for i := 0; i < 10000; i++ { - name := filepath.Join(dir, prefix+nextSuffix()) - f, err = fs.OpenFile(name, os.O_RDWR|os.O_CREATE|os.O_EXCL, 0600) - if os.IsExist(err) { - if nconflict++; nconflict > 10 { - randmu.Lock() - rand = reseed() - randmu.Unlock() - } - continue - } - break - } - return -} - -// TempDir creates a new temporary directory in the directory dir -// with a name beginning with prefix and returns the path of the -// new directory. If dir is the empty string, TempDir uses the -// default directory for temporary files (see os.TempDir). -// Multiple programs calling TempDir simultaneously -// will not choose the same directory. It is the caller's responsibility -// to remove the directory when no longer needed. -func (a Afero) TempDir(dir, prefix string) (name string, err error) { - return TempDir(a.Fs, dir, prefix) -} -func TempDir(fs Fs, dir, prefix string) (name string, err error) { - if dir == "" { - dir = os.TempDir() - } - - nconflict := 0 - for i := 0; i < 10000; i++ { - try := filepath.Join(dir, prefix+nextSuffix()) - err = fs.Mkdir(try, 0700) - if os.IsExist(err) { - if nconflict++; nconflict > 10 { - randmu.Lock() - rand = reseed() - randmu.Unlock() - } - continue - } - if err == nil { - name = try - } - break - } - return -} diff --git a/vendor/github.com/spf13/afero/lstater.go b/vendor/github.com/spf13/afero/lstater.go deleted file mode 100644 index 89c1bfc0a7..0000000000 --- a/vendor/github.com/spf13/afero/lstater.go +++ /dev/null @@ -1,27 +0,0 @@ -// Copyright © 2018 Steve Francia . -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package afero - -import ( - "os" -) - -// Lstater is an optional interface in Afero. It is only implemented by the -// filesystems saying so. -// It will call Lstat if the filesystem iself is, or it delegates to, the os filesystem. -// Else it will call Stat. -// In addtion to the FileInfo, it will return a boolean telling whether Lstat was called or not. -type Lstater interface { - LstatIfPossible(name string) (os.FileInfo, bool, error) -} diff --git a/vendor/github.com/spf13/afero/match.go b/vendor/github.com/spf13/afero/match.go deleted file mode 100644 index c18a87fb71..0000000000 --- a/vendor/github.com/spf13/afero/match.go +++ /dev/null @@ -1,110 +0,0 @@ -// Copyright © 2014 Steve Francia . -// Copyright 2009 The Go Authors. All rights reserved. - -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package afero - -import ( - "path/filepath" - "sort" - "strings" -) - -// Glob returns the names of all files matching pattern or nil -// if there is no matching file. The syntax of patterns is the same -// as in Match. The pattern may describe hierarchical names such as -// /usr/*/bin/ed (assuming the Separator is '/'). -// -// Glob ignores file system errors such as I/O errors reading directories. -// The only possible returned error is ErrBadPattern, when pattern -// is malformed. -// -// This was adapted from (http://golang.org/pkg/path/filepath) and uses several -// built-ins from that package. -func Glob(fs Fs, pattern string) (matches []string, err error) { - if !hasMeta(pattern) { - // Lstat not supported by a ll filesystems. - if _, err = lstatIfPossible(fs, pattern); err != nil { - return nil, nil - } - return []string{pattern}, nil - } - - dir, file := filepath.Split(pattern) - switch dir { - case "": - dir = "." - case string(filepath.Separator): - // nothing - default: - dir = dir[0 : len(dir)-1] // chop off trailing separator - } - - if !hasMeta(dir) { - return glob(fs, dir, file, nil) - } - - var m []string - m, err = Glob(fs, dir) - if err != nil { - return - } - for _, d := range m { - matches, err = glob(fs, d, file, matches) - if err != nil { - return - } - } - return -} - -// glob searches for files matching pattern in the directory dir -// and appends them to matches. If the directory cannot be -// opened, it returns the existing matches. New matches are -// added in lexicographical order. -func glob(fs Fs, dir, pattern string, matches []string) (m []string, e error) { - m = matches - fi, err := fs.Stat(dir) - if err != nil { - return - } - if !fi.IsDir() { - return - } - d, err := fs.Open(dir) - if err != nil { - return - } - defer d.Close() - - names, _ := d.Readdirnames(-1) - sort.Strings(names) - - for _, n := range names { - matched, err := filepath.Match(pattern, n) - if err != nil { - return m, err - } - if matched { - m = append(m, filepath.Join(dir, n)) - } - } - return -} - -// hasMeta reports whether path contains any of the magic characters -// recognized by Match. -func hasMeta(path string) bool { - // TODO(niemeyer): Should other magic characters be added here? - return strings.IndexAny(path, "*?[") >= 0 -} diff --git a/vendor/github.com/spf13/afero/mem/dir.go b/vendor/github.com/spf13/afero/mem/dir.go deleted file mode 100644 index e104013f45..0000000000 --- a/vendor/github.com/spf13/afero/mem/dir.go +++ /dev/null @@ -1,37 +0,0 @@ -// Copyright © 2014 Steve Francia . -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package mem - -type Dir interface { - Len() int - Names() []string - Files() []*FileData - Add(*FileData) - Remove(*FileData) -} - -func RemoveFromMemDir(dir *FileData, f *FileData) { - dir.memDir.Remove(f) -} - -func AddToMemDir(dir *FileData, f *FileData) { - dir.memDir.Add(f) -} - -func InitializeDir(d *FileData) { - if d.memDir == nil { - d.dir = true - d.memDir = &DirMap{} - } -} diff --git a/vendor/github.com/spf13/afero/mem/dirmap.go b/vendor/github.com/spf13/afero/mem/dirmap.go deleted file mode 100644 index 03a57ee5b5..0000000000 --- a/vendor/github.com/spf13/afero/mem/dirmap.go +++ /dev/null @@ -1,43 +0,0 @@ -// Copyright © 2015 Steve Francia . -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package mem - -import "sort" - -type DirMap map[string]*FileData - -func (m DirMap) Len() int { return len(m) } -func (m DirMap) Add(f *FileData) { m[f.name] = f } -func (m DirMap) Remove(f *FileData) { delete(m, f.name) } -func (m DirMap) Files() (files []*FileData) { - for _, f := range m { - files = append(files, f) - } - sort.Sort(filesSorter(files)) - return files -} - -// implement sort.Interface for []*FileData -type filesSorter []*FileData - -func (s filesSorter) Len() int { return len(s) } -func (s filesSorter) Swap(i, j int) { s[i], s[j] = s[j], s[i] } -func (s filesSorter) Less(i, j int) bool { return s[i].name < s[j].name } - -func (m DirMap) Names() (names []string) { - for x := range m { - names = append(names, x) - } - return names -} diff --git a/vendor/github.com/spf13/afero/mem/file.go b/vendor/github.com/spf13/afero/mem/file.go deleted file mode 100644 index 7af2fb56ff..0000000000 --- a/vendor/github.com/spf13/afero/mem/file.go +++ /dev/null @@ -1,317 +0,0 @@ -// Copyright © 2015 Steve Francia . -// Copyright 2013 tsuru authors. All rights reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package mem - -import ( - "bytes" - "errors" - "io" - "os" - "path/filepath" - "sync" - "sync/atomic" -) - -import "time" - -const FilePathSeparator = string(filepath.Separator) - -type File struct { - // atomic requires 64-bit alignment for struct field access - at int64 - readDirCount int64 - closed bool - readOnly bool - fileData *FileData -} - -func NewFileHandle(data *FileData) *File { - return &File{fileData: data} -} - -func NewReadOnlyFileHandle(data *FileData) *File { - return &File{fileData: data, readOnly: true} -} - -func (f File) Data() *FileData { - return f.fileData -} - -type FileData struct { - sync.Mutex - name string - data []byte - memDir Dir - dir bool - mode os.FileMode - modtime time.Time -} - -func (d *FileData) Name() string { - d.Lock() - defer d.Unlock() - return d.name -} - -func CreateFile(name string) *FileData { - return &FileData{name: name, mode: os.ModeTemporary, modtime: time.Now()} -} - -func CreateDir(name string) *FileData { - return &FileData{name: name, memDir: &DirMap{}, dir: true} -} - -func ChangeFileName(f *FileData, newname string) { - f.Lock() - f.name = newname - f.Unlock() -} - -func SetMode(f *FileData, mode os.FileMode) { - f.Lock() - f.mode = mode - f.Unlock() -} - -func SetModTime(f *FileData, mtime time.Time) { - f.Lock() - setModTime(f, mtime) - f.Unlock() -} - -func setModTime(f *FileData, mtime time.Time) { - f.modtime = mtime -} - -func GetFileInfo(f *FileData) *FileInfo { - return &FileInfo{f} -} - -func (f *File) Open() error { - atomic.StoreInt64(&f.at, 0) - atomic.StoreInt64(&f.readDirCount, 0) - f.fileData.Lock() - f.closed = false - f.fileData.Unlock() - return nil -} - -func (f *File) Close() error { - f.fileData.Lock() - f.closed = true - if !f.readOnly { - setModTime(f.fileData, time.Now()) - } - f.fileData.Unlock() - return nil -} - -func (f *File) Name() string { - return f.fileData.Name() -} - -func (f *File) Stat() (os.FileInfo, error) { - return &FileInfo{f.fileData}, nil -} - -func (f *File) Sync() error { - return nil -} - -func (f *File) Readdir(count int) (res []os.FileInfo, err error) { - if !f.fileData.dir { - return nil, &os.PathError{Op: "readdir", Path: f.fileData.name, Err: errors.New("not a dir")} - } - var outLength int64 - - f.fileData.Lock() - files := f.fileData.memDir.Files()[f.readDirCount:] - if count > 0 { - if len(files) < count { - outLength = int64(len(files)) - } else { - outLength = int64(count) - } - if len(files) == 0 { - err = io.EOF - } - } else { - outLength = int64(len(files)) - } - f.readDirCount += outLength - f.fileData.Unlock() - - res = make([]os.FileInfo, outLength) - for i := range res { - res[i] = &FileInfo{files[i]} - } - - return res, err -} - -func (f *File) Readdirnames(n int) (names []string, err error) { - fi, err := f.Readdir(n) - names = make([]string, len(fi)) - for i, f := range fi { - _, names[i] = filepath.Split(f.Name()) - } - return names, err -} - -func (f *File) Read(b []byte) (n int, err error) { - f.fileData.Lock() - defer f.fileData.Unlock() - if f.closed == true { - return 0, ErrFileClosed - } - if len(b) > 0 && int(f.at) == len(f.fileData.data) { - return 0, io.EOF - } - if int(f.at) > len(f.fileData.data) { - return 0, io.ErrUnexpectedEOF - } - if len(f.fileData.data)-int(f.at) >= len(b) { - n = len(b) - } else { - n = len(f.fileData.data) - int(f.at) - } - copy(b, f.fileData.data[f.at:f.at+int64(n)]) - atomic.AddInt64(&f.at, int64(n)) - return -} - -func (f *File) ReadAt(b []byte, off int64) (n int, err error) { - atomic.StoreInt64(&f.at, off) - return f.Read(b) -} - -func (f *File) Truncate(size int64) error { - if f.closed == true { - return ErrFileClosed - } - if f.readOnly { - return &os.PathError{Op: "truncate", Path: f.fileData.name, Err: errors.New("file handle is read only")} - } - if size < 0 { - return ErrOutOfRange - } - if size > int64(len(f.fileData.data)) { - diff := size - int64(len(f.fileData.data)) - f.fileData.data = append(f.fileData.data, bytes.Repeat([]byte{00}, int(diff))...) - } else { - f.fileData.data = f.fileData.data[0:size] - } - setModTime(f.fileData, time.Now()) - return nil -} - -func (f *File) Seek(offset int64, whence int) (int64, error) { - if f.closed == true { - return 0, ErrFileClosed - } - switch whence { - case 0: - atomic.StoreInt64(&f.at, offset) - case 1: - atomic.AddInt64(&f.at, int64(offset)) - case 2: - atomic.StoreInt64(&f.at, int64(len(f.fileData.data))+offset) - } - return f.at, nil -} - -func (f *File) Write(b []byte) (n int, err error) { - if f.readOnly { - return 0, &os.PathError{Op: "write", Path: f.fileData.name, Err: errors.New("file handle is read only")} - } - n = len(b) - cur := atomic.LoadInt64(&f.at) - f.fileData.Lock() - defer f.fileData.Unlock() - diff := cur - int64(len(f.fileData.data)) - var tail []byte - if n+int(cur) < len(f.fileData.data) { - tail = f.fileData.data[n+int(cur):] - } - if diff > 0 { - f.fileData.data = append(bytes.Repeat([]byte{00}, int(diff)), b...) - f.fileData.data = append(f.fileData.data, tail...) - } else { - f.fileData.data = append(f.fileData.data[:cur], b...) - f.fileData.data = append(f.fileData.data, tail...) - } - setModTime(f.fileData, time.Now()) - - atomic.StoreInt64(&f.at, int64(len(f.fileData.data))) - return -} - -func (f *File) WriteAt(b []byte, off int64) (n int, err error) { - atomic.StoreInt64(&f.at, off) - return f.Write(b) -} - -func (f *File) WriteString(s string) (ret int, err error) { - return f.Write([]byte(s)) -} - -func (f *File) Info() *FileInfo { - return &FileInfo{f.fileData} -} - -type FileInfo struct { - *FileData -} - -// Implements os.FileInfo -func (s *FileInfo) Name() string { - s.Lock() - _, name := filepath.Split(s.name) - s.Unlock() - return name -} -func (s *FileInfo) Mode() os.FileMode { - s.Lock() - defer s.Unlock() - return s.mode -} -func (s *FileInfo) ModTime() time.Time { - s.Lock() - defer s.Unlock() - return s.modtime -} -func (s *FileInfo) IsDir() bool { - s.Lock() - defer s.Unlock() - return s.dir -} -func (s *FileInfo) Sys() interface{} { return nil } -func (s *FileInfo) Size() int64 { - if s.IsDir() { - return int64(42) - } - s.Lock() - defer s.Unlock() - return int64(len(s.data)) -} - -var ( - ErrFileClosed = errors.New("File is closed") - ErrOutOfRange = errors.New("Out of range") - ErrTooLarge = errors.New("Too large") - ErrFileNotFound = os.ErrNotExist - ErrFileExists = os.ErrExist - ErrDestinationExists = os.ErrExist -) diff --git a/vendor/github.com/spf13/afero/memmap.go b/vendor/github.com/spf13/afero/memmap.go deleted file mode 100644 index 09498e70fb..0000000000 --- a/vendor/github.com/spf13/afero/memmap.go +++ /dev/null @@ -1,365 +0,0 @@ -// Copyright © 2014 Steve Francia . -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package afero - -import ( - "fmt" - "log" - "os" - "path/filepath" - "strings" - "sync" - "time" - - "github.com/spf13/afero/mem" -) - -type MemMapFs struct { - mu sync.RWMutex - data map[string]*mem.FileData - init sync.Once -} - -func NewMemMapFs() Fs { - return &MemMapFs{} -} - -func (m *MemMapFs) getData() map[string]*mem.FileData { - m.init.Do(func() { - m.data = make(map[string]*mem.FileData) - // Root should always exist, right? - // TODO: what about windows? - m.data[FilePathSeparator] = mem.CreateDir(FilePathSeparator) - }) - return m.data -} - -func (*MemMapFs) Name() string { return "MemMapFS" } - -func (m *MemMapFs) Create(name string) (File, error) { - name = normalizePath(name) - m.mu.Lock() - file := mem.CreateFile(name) - m.getData()[name] = file - m.registerWithParent(file) - m.mu.Unlock() - return mem.NewFileHandle(file), nil -} - -func (m *MemMapFs) unRegisterWithParent(fileName string) error { - f, err := m.lockfreeOpen(fileName) - if err != nil { - return err - } - parent := m.findParent(f) - if parent == nil { - log.Panic("parent of ", f.Name(), " is nil") - } - - parent.Lock() - mem.RemoveFromMemDir(parent, f) - parent.Unlock() - return nil -} - -func (m *MemMapFs) findParent(f *mem.FileData) *mem.FileData { - pdir, _ := filepath.Split(f.Name()) - pdir = filepath.Clean(pdir) - pfile, err := m.lockfreeOpen(pdir) - if err != nil { - return nil - } - return pfile -} - -func (m *MemMapFs) registerWithParent(f *mem.FileData) { - if f == nil { - return - } - parent := m.findParent(f) - if parent == nil { - pdir := filepath.Dir(filepath.Clean(f.Name())) - err := m.lockfreeMkdir(pdir, 0777) - if err != nil { - //log.Println("Mkdir error:", err) - return - } - parent, err = m.lockfreeOpen(pdir) - if err != nil { - //log.Println("Open after Mkdir error:", err) - return - } - } - - parent.Lock() - mem.InitializeDir(parent) - mem.AddToMemDir(parent, f) - parent.Unlock() -} - -func (m *MemMapFs) lockfreeMkdir(name string, perm os.FileMode) error { - name = normalizePath(name) - x, ok := m.getData()[name] - if ok { - // Only return ErrFileExists if it's a file, not a directory. - i := mem.FileInfo{FileData: x} - if !i.IsDir() { - return ErrFileExists - } - } else { - item := mem.CreateDir(name) - m.getData()[name] = item - m.registerWithParent(item) - } - return nil -} - -func (m *MemMapFs) Mkdir(name string, perm os.FileMode) error { - name = normalizePath(name) - - m.mu.RLock() - _, ok := m.getData()[name] - m.mu.RUnlock() - if ok { - return &os.PathError{Op: "mkdir", Path: name, Err: ErrFileExists} - } - - m.mu.Lock() - item := mem.CreateDir(name) - m.getData()[name] = item - m.registerWithParent(item) - m.mu.Unlock() - - m.Chmod(name, perm|os.ModeDir) - - return nil -} - -func (m *MemMapFs) MkdirAll(path string, perm os.FileMode) error { - err := m.Mkdir(path, perm) - if err != nil { - if err.(*os.PathError).Err == ErrFileExists { - return nil - } - return err - } - return nil -} - -// Handle some relative paths -func normalizePath(path string) string { - path = filepath.Clean(path) - - switch path { - case ".": - return FilePathSeparator - case "..": - return FilePathSeparator - default: - return path - } -} - -func (m *MemMapFs) Open(name string) (File, error) { - f, err := m.open(name) - if f != nil { - return mem.NewReadOnlyFileHandle(f), err - } - return nil, err -} - -func (m *MemMapFs) openWrite(name string) (File, error) { - f, err := m.open(name) - if f != nil { - return mem.NewFileHandle(f), err - } - return nil, err -} - -func (m *MemMapFs) open(name string) (*mem.FileData, error) { - name = normalizePath(name) - - m.mu.RLock() - f, ok := m.getData()[name] - m.mu.RUnlock() - if !ok { - return nil, &os.PathError{Op: "open", Path: name, Err: ErrFileNotFound} - } - return f, nil -} - -func (m *MemMapFs) lockfreeOpen(name string) (*mem.FileData, error) { - name = normalizePath(name) - f, ok := m.getData()[name] - if ok { - return f, nil - } else { - return nil, ErrFileNotFound - } -} - -func (m *MemMapFs) OpenFile(name string, flag int, perm os.FileMode) (File, error) { - chmod := false - file, err := m.openWrite(name) - if os.IsNotExist(err) && (flag&os.O_CREATE > 0) { - file, err = m.Create(name) - chmod = true - } - if err != nil { - return nil, err - } - if flag == os.O_RDONLY { - file = mem.NewReadOnlyFileHandle(file.(*mem.File).Data()) - } - if flag&os.O_APPEND > 0 { - _, err = file.Seek(0, os.SEEK_END) - if err != nil { - file.Close() - return nil, err - } - } - if flag&os.O_TRUNC > 0 && flag&(os.O_RDWR|os.O_WRONLY) > 0 { - err = file.Truncate(0) - if err != nil { - file.Close() - return nil, err - } - } - if chmod { - m.Chmod(name, perm) - } - return file, nil -} - -func (m *MemMapFs) Remove(name string) error { - name = normalizePath(name) - - m.mu.Lock() - defer m.mu.Unlock() - - if _, ok := m.getData()[name]; ok { - err := m.unRegisterWithParent(name) - if err != nil { - return &os.PathError{Op: "remove", Path: name, Err: err} - } - delete(m.getData(), name) - } else { - return &os.PathError{Op: "remove", Path: name, Err: os.ErrNotExist} - } - return nil -} - -func (m *MemMapFs) RemoveAll(path string) error { - path = normalizePath(path) - m.mu.Lock() - m.unRegisterWithParent(path) - m.mu.Unlock() - - m.mu.RLock() - defer m.mu.RUnlock() - - for p, _ := range m.getData() { - if strings.HasPrefix(p, path) { - m.mu.RUnlock() - m.mu.Lock() - delete(m.getData(), p) - m.mu.Unlock() - m.mu.RLock() - } - } - return nil -} - -func (m *MemMapFs) Rename(oldname, newname string) error { - oldname = normalizePath(oldname) - newname = normalizePath(newname) - - if oldname == newname { - return nil - } - - m.mu.RLock() - defer m.mu.RUnlock() - if _, ok := m.getData()[oldname]; ok { - m.mu.RUnlock() - m.mu.Lock() - m.unRegisterWithParent(oldname) - fileData := m.getData()[oldname] - delete(m.getData(), oldname) - mem.ChangeFileName(fileData, newname) - m.getData()[newname] = fileData - m.registerWithParent(fileData) - m.mu.Unlock() - m.mu.RLock() - } else { - return &os.PathError{Op: "rename", Path: oldname, Err: ErrFileNotFound} - } - return nil -} - -func (m *MemMapFs) Stat(name string) (os.FileInfo, error) { - f, err := m.Open(name) - if err != nil { - return nil, err - } - fi := mem.GetFileInfo(f.(*mem.File).Data()) - return fi, nil -} - -func (m *MemMapFs) Chmod(name string, mode os.FileMode) error { - name = normalizePath(name) - - m.mu.RLock() - f, ok := m.getData()[name] - m.mu.RUnlock() - if !ok { - return &os.PathError{Op: "chmod", Path: name, Err: ErrFileNotFound} - } - - m.mu.Lock() - mem.SetMode(f, mode) - m.mu.Unlock() - - return nil -} - -func (m *MemMapFs) Chtimes(name string, atime time.Time, mtime time.Time) error { - name = normalizePath(name) - - m.mu.RLock() - f, ok := m.getData()[name] - m.mu.RUnlock() - if !ok { - return &os.PathError{Op: "chtimes", Path: name, Err: ErrFileNotFound} - } - - m.mu.Lock() - mem.SetModTime(f, mtime) - m.mu.Unlock() - - return nil -} - -func (m *MemMapFs) List() { - for _, x := range m.data { - y := mem.FileInfo{FileData: x} - fmt.Println(x.Name(), y.Size()) - } -} - -// func debugMemMapList(fs Fs) { -// if x, ok := fs.(*MemMapFs); ok { -// x.List() -// } -// } diff --git a/vendor/github.com/spf13/afero/os.go b/vendor/github.com/spf13/afero/os.go deleted file mode 100644 index 13cc1b84c9..0000000000 --- a/vendor/github.com/spf13/afero/os.go +++ /dev/null @@ -1,101 +0,0 @@ -// Copyright © 2014 Steve Francia . -// Copyright 2013 tsuru authors. All rights reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package afero - -import ( - "os" - "time" -) - -var _ Lstater = (*OsFs)(nil) - -// OsFs is a Fs implementation that uses functions provided by the os package. -// -// For details in any method, check the documentation of the os package -// (http://golang.org/pkg/os/). -type OsFs struct{} - -func NewOsFs() Fs { - return &OsFs{} -} - -func (OsFs) Name() string { return "OsFs" } - -func (OsFs) Create(name string) (File, error) { - f, e := os.Create(name) - if f == nil { - // while this looks strange, we need to return a bare nil (of type nil) not - // a nil value of type *os.File or nil won't be nil - return nil, e - } - return f, e -} - -func (OsFs) Mkdir(name string, perm os.FileMode) error { - return os.Mkdir(name, perm) -} - -func (OsFs) MkdirAll(path string, perm os.FileMode) error { - return os.MkdirAll(path, perm) -} - -func (OsFs) Open(name string) (File, error) { - f, e := os.Open(name) - if f == nil { - // while this looks strange, we need to return a bare nil (of type nil) not - // a nil value of type *os.File or nil won't be nil - return nil, e - } - return f, e -} - -func (OsFs) OpenFile(name string, flag int, perm os.FileMode) (File, error) { - f, e := os.OpenFile(name, flag, perm) - if f == nil { - // while this looks strange, we need to return a bare nil (of type nil) not - // a nil value of type *os.File or nil won't be nil - return nil, e - } - return f, e -} - -func (OsFs) Remove(name string) error { - return os.Remove(name) -} - -func (OsFs) RemoveAll(path string) error { - return os.RemoveAll(path) -} - -func (OsFs) Rename(oldname, newname string) error { - return os.Rename(oldname, newname) -} - -func (OsFs) Stat(name string) (os.FileInfo, error) { - return os.Stat(name) -} - -func (OsFs) Chmod(name string, mode os.FileMode) error { - return os.Chmod(name, mode) -} - -func (OsFs) Chtimes(name string, atime time.Time, mtime time.Time) error { - return os.Chtimes(name, atime, mtime) -} - -func (OsFs) LstatIfPossible(name string) (os.FileInfo, bool, error) { - fi, err := os.Lstat(name) - return fi, true, err -} diff --git a/vendor/github.com/spf13/afero/path.go b/vendor/github.com/spf13/afero/path.go deleted file mode 100644 index 18f60a0f6b..0000000000 --- a/vendor/github.com/spf13/afero/path.go +++ /dev/null @@ -1,106 +0,0 @@ -// Copyright ©2015 The Go Authors -// Copyright ©2015 Steve Francia -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package afero - -import ( - "os" - "path/filepath" - "sort" -) - -// readDirNames reads the directory named by dirname and returns -// a sorted list of directory entries. -// adapted from https://golang.org/src/path/filepath/path.go -func readDirNames(fs Fs, dirname string) ([]string, error) { - f, err := fs.Open(dirname) - if err != nil { - return nil, err - } - names, err := f.Readdirnames(-1) - f.Close() - if err != nil { - return nil, err - } - sort.Strings(names) - return names, nil -} - -// walk recursively descends path, calling walkFn -// adapted from https://golang.org/src/path/filepath/path.go -func walk(fs Fs, path string, info os.FileInfo, walkFn filepath.WalkFunc) error { - err := walkFn(path, info, nil) - if err != nil { - if info.IsDir() && err == filepath.SkipDir { - return nil - } - return err - } - - if !info.IsDir() { - return nil - } - - names, err := readDirNames(fs, path) - if err != nil { - return walkFn(path, info, err) - } - - for _, name := range names { - filename := filepath.Join(path, name) - fileInfo, err := lstatIfPossible(fs, filename) - if err != nil { - if err := walkFn(filename, fileInfo, err); err != nil && err != filepath.SkipDir { - return err - } - } else { - err = walk(fs, filename, fileInfo, walkFn) - if err != nil { - if !fileInfo.IsDir() || err != filepath.SkipDir { - return err - } - } - } - } - return nil -} - -// if the filesystem supports it, use Lstat, else use fs.Stat -func lstatIfPossible(fs Fs, path string) (os.FileInfo, error) { - if lfs, ok := fs.(Lstater); ok { - fi, _, err := lfs.LstatIfPossible(path) - return fi, err - } - return fs.Stat(path) -} - -// Walk walks the file tree rooted at root, calling walkFn for each file or -// directory in the tree, including root. All errors that arise visiting files -// and directories are filtered by walkFn. The files are walked in lexical -// order, which makes the output deterministic but means that for very -// large directories Walk can be inefficient. -// Walk does not follow symbolic links. - -func (a Afero) Walk(root string, walkFn filepath.WalkFunc) error { - return Walk(a.Fs, root, walkFn) -} - -func Walk(fs Fs, root string, walkFn filepath.WalkFunc) error { - info, err := lstatIfPossible(fs, root) - if err != nil { - return walkFn(root, nil, err) - } - return walk(fs, root, info, walkFn) -} diff --git a/vendor/github.com/spf13/afero/readonlyfs.go b/vendor/github.com/spf13/afero/readonlyfs.go deleted file mode 100644 index c6376ec373..0000000000 --- a/vendor/github.com/spf13/afero/readonlyfs.go +++ /dev/null @@ -1,80 +0,0 @@ -package afero - -import ( - "os" - "syscall" - "time" -) - -var _ Lstater = (*ReadOnlyFs)(nil) - -type ReadOnlyFs struct { - source Fs -} - -func NewReadOnlyFs(source Fs) Fs { - return &ReadOnlyFs{source: source} -} - -func (r *ReadOnlyFs) ReadDir(name string) ([]os.FileInfo, error) { - return ReadDir(r.source, name) -} - -func (r *ReadOnlyFs) Chtimes(n string, a, m time.Time) error { - return syscall.EPERM -} - -func (r *ReadOnlyFs) Chmod(n string, m os.FileMode) error { - return syscall.EPERM -} - -func (r *ReadOnlyFs) Name() string { - return "ReadOnlyFilter" -} - -func (r *ReadOnlyFs) Stat(name string) (os.FileInfo, error) { - return r.source.Stat(name) -} - -func (r *ReadOnlyFs) LstatIfPossible(name string) (os.FileInfo, bool, error) { - if lsf, ok := r.source.(Lstater); ok { - return lsf.LstatIfPossible(name) - } - fi, err := r.Stat(name) - return fi, false, err -} - -func (r *ReadOnlyFs) Rename(o, n string) error { - return syscall.EPERM -} - -func (r *ReadOnlyFs) RemoveAll(p string) error { - return syscall.EPERM -} - -func (r *ReadOnlyFs) Remove(n string) error { - return syscall.EPERM -} - -func (r *ReadOnlyFs) OpenFile(name string, flag int, perm os.FileMode) (File, error) { - if flag&(os.O_WRONLY|syscall.O_RDWR|os.O_APPEND|os.O_CREATE|os.O_TRUNC) != 0 { - return nil, syscall.EPERM - } - return r.source.OpenFile(name, flag, perm) -} - -func (r *ReadOnlyFs) Open(n string) (File, error) { - return r.source.Open(n) -} - -func (r *ReadOnlyFs) Mkdir(n string, p os.FileMode) error { - return syscall.EPERM -} - -func (r *ReadOnlyFs) MkdirAll(n string, p os.FileMode) error { - return syscall.EPERM -} - -func (r *ReadOnlyFs) Create(n string) (File, error) { - return nil, syscall.EPERM -} diff --git a/vendor/github.com/spf13/afero/regexpfs.go b/vendor/github.com/spf13/afero/regexpfs.go deleted file mode 100644 index 9d92dbc051..0000000000 --- a/vendor/github.com/spf13/afero/regexpfs.go +++ /dev/null @@ -1,214 +0,0 @@ -package afero - -import ( - "os" - "regexp" - "syscall" - "time" -) - -// The RegexpFs filters files (not directories) by regular expression. Only -// files matching the given regexp will be allowed, all others get a ENOENT error ( -// "No such file or directory"). -// -type RegexpFs struct { - re *regexp.Regexp - source Fs -} - -func NewRegexpFs(source Fs, re *regexp.Regexp) Fs { - return &RegexpFs{source: source, re: re} -} - -type RegexpFile struct { - f File - re *regexp.Regexp -} - -func (r *RegexpFs) matchesName(name string) error { - if r.re == nil { - return nil - } - if r.re.MatchString(name) { - return nil - } - return syscall.ENOENT -} - -func (r *RegexpFs) dirOrMatches(name string) error { - dir, err := IsDir(r.source, name) - if err != nil { - return err - } - if dir { - return nil - } - return r.matchesName(name) -} - -func (r *RegexpFs) Chtimes(name string, a, m time.Time) error { - if err := r.dirOrMatches(name); err != nil { - return err - } - return r.source.Chtimes(name, a, m) -} - -func (r *RegexpFs) Chmod(name string, mode os.FileMode) error { - if err := r.dirOrMatches(name); err != nil { - return err - } - return r.source.Chmod(name, mode) -} - -func (r *RegexpFs) Name() string { - return "RegexpFs" -} - -func (r *RegexpFs) Stat(name string) (os.FileInfo, error) { - if err := r.dirOrMatches(name); err != nil { - return nil, err - } - return r.source.Stat(name) -} - -func (r *RegexpFs) Rename(oldname, newname string) error { - dir, err := IsDir(r.source, oldname) - if err != nil { - return err - } - if dir { - return nil - } - if err := r.matchesName(oldname); err != nil { - return err - } - if err := r.matchesName(newname); err != nil { - return err - } - return r.source.Rename(oldname, newname) -} - -func (r *RegexpFs) RemoveAll(p string) error { - dir, err := IsDir(r.source, p) - if err != nil { - return err - } - if !dir { - if err := r.matchesName(p); err != nil { - return err - } - } - return r.source.RemoveAll(p) -} - -func (r *RegexpFs) Remove(name string) error { - if err := r.dirOrMatches(name); err != nil { - return err - } - return r.source.Remove(name) -} - -func (r *RegexpFs) OpenFile(name string, flag int, perm os.FileMode) (File, error) { - if err := r.dirOrMatches(name); err != nil { - return nil, err - } - return r.source.OpenFile(name, flag, perm) -} - -func (r *RegexpFs) Open(name string) (File, error) { - dir, err := IsDir(r.source, name) - if err != nil { - return nil, err - } - if !dir { - if err := r.matchesName(name); err != nil { - return nil, err - } - } - f, err := r.source.Open(name) - return &RegexpFile{f: f, re: r.re}, nil -} - -func (r *RegexpFs) Mkdir(n string, p os.FileMode) error { - return r.source.Mkdir(n, p) -} - -func (r *RegexpFs) MkdirAll(n string, p os.FileMode) error { - return r.source.MkdirAll(n, p) -} - -func (r *RegexpFs) Create(name string) (File, error) { - if err := r.matchesName(name); err != nil { - return nil, err - } - return r.source.Create(name) -} - -func (f *RegexpFile) Close() error { - return f.f.Close() -} - -func (f *RegexpFile) Read(s []byte) (int, error) { - return f.f.Read(s) -} - -func (f *RegexpFile) ReadAt(s []byte, o int64) (int, error) { - return f.f.ReadAt(s, o) -} - -func (f *RegexpFile) Seek(o int64, w int) (int64, error) { - return f.f.Seek(o, w) -} - -func (f *RegexpFile) Write(s []byte) (int, error) { - return f.f.Write(s) -} - -func (f *RegexpFile) WriteAt(s []byte, o int64) (int, error) { - return f.f.WriteAt(s, o) -} - -func (f *RegexpFile) Name() string { - return f.f.Name() -} - -func (f *RegexpFile) Readdir(c int) (fi []os.FileInfo, err error) { - var rfi []os.FileInfo - rfi, err = f.f.Readdir(c) - if err != nil { - return nil, err - } - for _, i := range rfi { - if i.IsDir() || f.re.MatchString(i.Name()) { - fi = append(fi, i) - } - } - return fi, nil -} - -func (f *RegexpFile) Readdirnames(c int) (n []string, err error) { - fi, err := f.Readdir(c) - if err != nil { - return nil, err - } - for _, s := range fi { - n = append(n, s.Name()) - } - return n, nil -} - -func (f *RegexpFile) Stat() (os.FileInfo, error) { - return f.f.Stat() -} - -func (f *RegexpFile) Sync() error { - return f.f.Sync() -} - -func (f *RegexpFile) Truncate(s int64) error { - return f.f.Truncate(s) -} - -func (f *RegexpFile) WriteString(s string) (int, error) { - return f.f.WriteString(s) -} diff --git a/vendor/github.com/spf13/afero/unionFile.go b/vendor/github.com/spf13/afero/unionFile.go deleted file mode 100644 index eda96312df..0000000000 --- a/vendor/github.com/spf13/afero/unionFile.go +++ /dev/null @@ -1,320 +0,0 @@ -package afero - -import ( - "io" - "os" - "path/filepath" - "syscall" -) - -// The UnionFile implements the afero.File interface and will be returned -// when reading a directory present at least in the overlay or opening a file -// for writing. -// -// The calls to -// Readdir() and Readdirnames() merge the file os.FileInfo / names from the -// base and the overlay - for files present in both layers, only those -// from the overlay will be used. -// -// When opening files for writing (Create() / OpenFile() with the right flags) -// the operations will be done in both layers, starting with the overlay. A -// successful read in the overlay will move the cursor position in the base layer -// by the number of bytes read. -type UnionFile struct { - Base File - Layer File - Merger DirsMerger - off int - files []os.FileInfo -} - -func (f *UnionFile) Close() error { - // first close base, so we have a newer timestamp in the overlay. If we'd close - // the overlay first, we'd get a cacheStale the next time we access this file - // -> cache would be useless ;-) - if f.Base != nil { - f.Base.Close() - } - if f.Layer != nil { - return f.Layer.Close() - } - return BADFD -} - -func (f *UnionFile) Read(s []byte) (int, error) { - if f.Layer != nil { - n, err := f.Layer.Read(s) - if (err == nil || err == io.EOF) && f.Base != nil { - // advance the file position also in the base file, the next - // call may be a write at this position (or a seek with SEEK_CUR) - if _, seekErr := f.Base.Seek(int64(n), os.SEEK_CUR); seekErr != nil { - // only overwrite err in case the seek fails: we need to - // report an eventual io.EOF to the caller - err = seekErr - } - } - return n, err - } - if f.Base != nil { - return f.Base.Read(s) - } - return 0, BADFD -} - -func (f *UnionFile) ReadAt(s []byte, o int64) (int, error) { - if f.Layer != nil { - n, err := f.Layer.ReadAt(s, o) - if (err == nil || err == io.EOF) && f.Base != nil { - _, err = f.Base.Seek(o+int64(n), os.SEEK_SET) - } - return n, err - } - if f.Base != nil { - return f.Base.ReadAt(s, o) - } - return 0, BADFD -} - -func (f *UnionFile) Seek(o int64, w int) (pos int64, err error) { - if f.Layer != nil { - pos, err = f.Layer.Seek(o, w) - if (err == nil || err == io.EOF) && f.Base != nil { - _, err = f.Base.Seek(o, w) - } - return pos, err - } - if f.Base != nil { - return f.Base.Seek(o, w) - } - return 0, BADFD -} - -func (f *UnionFile) Write(s []byte) (n int, err error) { - if f.Layer != nil { - n, err = f.Layer.Write(s) - if err == nil && f.Base != nil { // hmm, do we have fixed size files where a write may hit the EOF mark? - _, err = f.Base.Write(s) - } - return n, err - } - if f.Base != nil { - return f.Base.Write(s) - } - return 0, BADFD -} - -func (f *UnionFile) WriteAt(s []byte, o int64) (n int, err error) { - if f.Layer != nil { - n, err = f.Layer.WriteAt(s, o) - if err == nil && f.Base != nil { - _, err = f.Base.WriteAt(s, o) - } - return n, err - } - if f.Base != nil { - return f.Base.WriteAt(s, o) - } - return 0, BADFD -} - -func (f *UnionFile) Name() string { - if f.Layer != nil { - return f.Layer.Name() - } - return f.Base.Name() -} - -// DirsMerger is how UnionFile weaves two directories together. -// It takes the FileInfo slices from the layer and the base and returns a -// single view. -type DirsMerger func(lofi, bofi []os.FileInfo) ([]os.FileInfo, error) - -var defaultUnionMergeDirsFn = func(lofi, bofi []os.FileInfo) ([]os.FileInfo, error) { - var files = make(map[string]os.FileInfo) - - for _, fi := range lofi { - files[fi.Name()] = fi - } - - for _, fi := range bofi { - if _, exists := files[fi.Name()]; !exists { - files[fi.Name()] = fi - } - } - - rfi := make([]os.FileInfo, len(files)) - - i := 0 - for _, fi := range files { - rfi[i] = fi - i++ - } - - return rfi, nil - -} - -// Readdir will weave the two directories together and -// return a single view of the overlayed directories. -// At the end of the directory view, the error is io.EOF if c > 0. -func (f *UnionFile) Readdir(c int) (ofi []os.FileInfo, err error) { - var merge DirsMerger = f.Merger - if merge == nil { - merge = defaultUnionMergeDirsFn - } - - if f.off == 0 { - var lfi []os.FileInfo - if f.Layer != nil { - lfi, err = f.Layer.Readdir(-1) - if err != nil { - return nil, err - } - } - - var bfi []os.FileInfo - if f.Base != nil { - bfi, err = f.Base.Readdir(-1) - if err != nil { - return nil, err - } - - } - merged, err := merge(lfi, bfi) - if err != nil { - return nil, err - } - f.files = append(f.files, merged...) - } - - if c <= 0 && len(f.files) == 0 { - return f.files, nil - } - - if f.off >= len(f.files) { - return nil, io.EOF - } - - if c <= 0 { - return f.files[f.off:], nil - } - - if c > len(f.files) { - c = len(f.files) - } - - defer func() { f.off += c }() - return f.files[f.off:c], nil -} - -func (f *UnionFile) Readdirnames(c int) ([]string, error) { - rfi, err := f.Readdir(c) - if err != nil { - return nil, err - } - var names []string - for _, fi := range rfi { - names = append(names, fi.Name()) - } - return names, nil -} - -func (f *UnionFile) Stat() (os.FileInfo, error) { - if f.Layer != nil { - return f.Layer.Stat() - } - if f.Base != nil { - return f.Base.Stat() - } - return nil, BADFD -} - -func (f *UnionFile) Sync() (err error) { - if f.Layer != nil { - err = f.Layer.Sync() - if err == nil && f.Base != nil { - err = f.Base.Sync() - } - return err - } - if f.Base != nil { - return f.Base.Sync() - } - return BADFD -} - -func (f *UnionFile) Truncate(s int64) (err error) { - if f.Layer != nil { - err = f.Layer.Truncate(s) - if err == nil && f.Base != nil { - err = f.Base.Truncate(s) - } - return err - } - if f.Base != nil { - return f.Base.Truncate(s) - } - return BADFD -} - -func (f *UnionFile) WriteString(s string) (n int, err error) { - if f.Layer != nil { - n, err = f.Layer.WriteString(s) - if err == nil && f.Base != nil { - _, err = f.Base.WriteString(s) - } - return n, err - } - if f.Base != nil { - return f.Base.WriteString(s) - } - return 0, BADFD -} - -func copyToLayer(base Fs, layer Fs, name string) error { - bfh, err := base.Open(name) - if err != nil { - return err - } - defer bfh.Close() - - // First make sure the directory exists - exists, err := Exists(layer, filepath.Dir(name)) - if err != nil { - return err - } - if !exists { - err = layer.MkdirAll(filepath.Dir(name), 0777) // FIXME? - if err != nil { - return err - } - } - - // Create the file on the overlay - lfh, err := layer.Create(name) - if err != nil { - return err - } - n, err := io.Copy(lfh, bfh) - if err != nil { - // If anything fails, clean up the file - layer.Remove(name) - lfh.Close() - return err - } - - bfi, err := bfh.Stat() - if err != nil || bfi.Size() != n { - layer.Remove(name) - lfh.Close() - return syscall.EIO - } - - err = lfh.Close() - if err != nil { - layer.Remove(name) - lfh.Close() - return err - } - return layer.Chtimes(name, bfi.ModTime(), bfi.ModTime()) -} diff --git a/vendor/github.com/spf13/afero/util.go b/vendor/github.com/spf13/afero/util.go deleted file mode 100644 index 4f253f481e..0000000000 --- a/vendor/github.com/spf13/afero/util.go +++ /dev/null @@ -1,330 +0,0 @@ -// Copyright ©2015 Steve Francia -// Portions Copyright ©2015 The Hugo Authors -// Portions Copyright 2016-present Bjørn Erik Pedersen -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package afero - -import ( - "bytes" - "fmt" - "io" - "os" - "path/filepath" - "strings" - "unicode" - - "golang.org/x/text/transform" - "golang.org/x/text/unicode/norm" -) - -// Filepath separator defined by os.Separator. -const FilePathSeparator = string(filepath.Separator) - -// Takes a reader and a path and writes the content -func (a Afero) WriteReader(path string, r io.Reader) (err error) { - return WriteReader(a.Fs, path, r) -} - -func WriteReader(fs Fs, path string, r io.Reader) (err error) { - dir, _ := filepath.Split(path) - ospath := filepath.FromSlash(dir) - - if ospath != "" { - err = fs.MkdirAll(ospath, 0777) // rwx, rw, r - if err != nil { - if err != os.ErrExist { - return err - } - } - } - - file, err := fs.Create(path) - if err != nil { - return - } - defer file.Close() - - _, err = io.Copy(file, r) - return -} - -// Same as WriteReader but checks to see if file/directory already exists. -func (a Afero) SafeWriteReader(path string, r io.Reader) (err error) { - return SafeWriteReader(a.Fs, path, r) -} - -func SafeWriteReader(fs Fs, path string, r io.Reader) (err error) { - dir, _ := filepath.Split(path) - ospath := filepath.FromSlash(dir) - - if ospath != "" { - err = fs.MkdirAll(ospath, 0777) // rwx, rw, r - if err != nil { - return - } - } - - exists, err := Exists(fs, path) - if err != nil { - return - } - if exists { - return fmt.Errorf("%v already exists", path) - } - - file, err := fs.Create(path) - if err != nil { - return - } - defer file.Close() - - _, err = io.Copy(file, r) - return -} - -func (a Afero) GetTempDir(subPath string) string { - return GetTempDir(a.Fs, subPath) -} - -// GetTempDir returns the default temp directory with trailing slash -// if subPath is not empty then it will be created recursively with mode 777 rwx rwx rwx -func GetTempDir(fs Fs, subPath string) string { - addSlash := func(p string) string { - if FilePathSeparator != p[len(p)-1:] { - p = p + FilePathSeparator - } - return p - } - dir := addSlash(os.TempDir()) - - if subPath != "" { - // preserve windows backslash :-( - if FilePathSeparator == "\\" { - subPath = strings.Replace(subPath, "\\", "____", -1) - } - dir = dir + UnicodeSanitize((subPath)) - if FilePathSeparator == "\\" { - dir = strings.Replace(dir, "____", "\\", -1) - } - - if exists, _ := Exists(fs, dir); exists { - return addSlash(dir) - } - - err := fs.MkdirAll(dir, 0777) - if err != nil { - panic(err) - } - dir = addSlash(dir) - } - return dir -} - -// Rewrite string to remove non-standard path characters -func UnicodeSanitize(s string) string { - source := []rune(s) - target := make([]rune, 0, len(source)) - - for _, r := range source { - if unicode.IsLetter(r) || - unicode.IsDigit(r) || - unicode.IsMark(r) || - r == '.' || - r == '/' || - r == '\\' || - r == '_' || - r == '-' || - r == '%' || - r == ' ' || - r == '#' { - target = append(target, r) - } - } - - return string(target) -} - -// Transform characters with accents into plain forms. -func NeuterAccents(s string) string { - t := transform.Chain(norm.NFD, transform.RemoveFunc(isMn), norm.NFC) - result, _, _ := transform.String(t, string(s)) - - return result -} - -func isMn(r rune) bool { - return unicode.Is(unicode.Mn, r) // Mn: nonspacing marks -} - -func (a Afero) FileContainsBytes(filename string, subslice []byte) (bool, error) { - return FileContainsBytes(a.Fs, filename, subslice) -} - -// Check if a file contains a specified byte slice. -func FileContainsBytes(fs Fs, filename string, subslice []byte) (bool, error) { - f, err := fs.Open(filename) - if err != nil { - return false, err - } - defer f.Close() - - return readerContainsAny(f, subslice), nil -} - -func (a Afero) FileContainsAnyBytes(filename string, subslices [][]byte) (bool, error) { - return FileContainsAnyBytes(a.Fs, filename, subslices) -} - -// Check if a file contains any of the specified byte slices. -func FileContainsAnyBytes(fs Fs, filename string, subslices [][]byte) (bool, error) { - f, err := fs.Open(filename) - if err != nil { - return false, err - } - defer f.Close() - - return readerContainsAny(f, subslices...), nil -} - -// readerContains reports whether any of the subslices is within r. -func readerContainsAny(r io.Reader, subslices ...[]byte) bool { - - if r == nil || len(subslices) == 0 { - return false - } - - largestSlice := 0 - - for _, sl := range subslices { - if len(sl) > largestSlice { - largestSlice = len(sl) - } - } - - if largestSlice == 0 { - return false - } - - bufflen := largestSlice * 4 - halflen := bufflen / 2 - buff := make([]byte, bufflen) - var err error - var n, i int - - for { - i++ - if i == 1 { - n, err = io.ReadAtLeast(r, buff[:halflen], halflen) - } else { - if i != 2 { - // shift left to catch overlapping matches - copy(buff[:], buff[halflen:]) - } - n, err = io.ReadAtLeast(r, buff[halflen:], halflen) - } - - if n > 0 { - for _, sl := range subslices { - if bytes.Contains(buff, sl) { - return true - } - } - } - - if err != nil { - break - } - } - return false -} - -func (a Afero) DirExists(path string) (bool, error) { - return DirExists(a.Fs, path) -} - -// DirExists checks if a path exists and is a directory. -func DirExists(fs Fs, path string) (bool, error) { - fi, err := fs.Stat(path) - if err == nil && fi.IsDir() { - return true, nil - } - if os.IsNotExist(err) { - return false, nil - } - return false, err -} - -func (a Afero) IsDir(path string) (bool, error) { - return IsDir(a.Fs, path) -} - -// IsDir checks if a given path is a directory. -func IsDir(fs Fs, path string) (bool, error) { - fi, err := fs.Stat(path) - if err != nil { - return false, err - } - return fi.IsDir(), nil -} - -func (a Afero) IsEmpty(path string) (bool, error) { - return IsEmpty(a.Fs, path) -} - -// IsEmpty checks if a given file or directory is empty. -func IsEmpty(fs Fs, path string) (bool, error) { - if b, _ := Exists(fs, path); !b { - return false, fmt.Errorf("%q path does not exist", path) - } - fi, err := fs.Stat(path) - if err != nil { - return false, err - } - if fi.IsDir() { - f, err := fs.Open(path) - if err != nil { - return false, err - } - defer f.Close() - list, err := f.Readdir(-1) - return len(list) == 0, nil - } - return fi.Size() == 0, nil -} - -func (a Afero) Exists(path string) (bool, error) { - return Exists(a.Fs, path) -} - -// Check if a file or directory exists. -func Exists(fs Fs, path string) (bool, error) { - _, err := fs.Stat(path) - if err == nil { - return true, nil - } - if os.IsNotExist(err) { - return false, nil - } - return false, err -} - -func FullBaseFsPath(basePathFs *BasePathFs, relativePath string) string { - combinedPath := filepath.Join(basePathFs.path, relativePath) - if parent, ok := basePathFs.source.(*BasePathFs); ok { - return FullBaseFsPath(parent, combinedPath) - } - - return combinedPath -} diff --git a/vendor/go.opencensus.io/.gitignore b/vendor/go.opencensus.io/.gitignore deleted file mode 100644 index 74a6db472e..0000000000 --- a/vendor/go.opencensus.io/.gitignore +++ /dev/null @@ -1,9 +0,0 @@ -/.idea/ - -# go.opencensus.io/exporter/aws -/exporter/aws/ - -# Exclude vendor, use dep ensure after checkout: -/vendor/github.com/ -/vendor/golang.org/ -/vendor/google.golang.org/ diff --git a/vendor/go.opencensus.io/.travis.yml b/vendor/go.opencensus.io/.travis.yml deleted file mode 100644 index 73c8571c33..0000000000 --- a/vendor/go.opencensus.io/.travis.yml +++ /dev/null @@ -1,27 +0,0 @@ -language: go - -go: - # 1.8 is tested by AppVeyor - - 1.11.x - -go_import_path: go.opencensus.io - -# Don't email me the results of the test runs. -notifications: - email: false - -before_script: - - GO_FILES=$(find . -iname '*.go' | grep -v /vendor/) # All the .go files, excluding vendor/ if any - - PKGS=$(go list ./... | grep -v /vendor/) # All the import paths, excluding vendor/ if any - - curl https://raw.githubusercontent.com/golang/dep/master/install.sh | sh # Install latest dep release - - go get github.com/rakyll/embedmd - -script: - - embedmd -d README.md # Ensure embedded code is up-to-date - - go build ./... # Ensure dependency updates don't break build - - if [ -n "$(gofmt -s -l $GO_FILES)" ]; then echo "gofmt the following files:"; gofmt -s -l $GO_FILES; exit 1; fi - - go vet ./... - - go test -v -race $PKGS # Run all the tests with the race detector enabled - - GOARCH=386 go test -v $PKGS # Run all tests against a 386 architecture - - 'if [[ $TRAVIS_GO_VERSION = 1.8* ]]; then ! golint ./... | grep -vE "(_mock|_string|\.pb)\.go:"; fi' - - go run internal/check/version.go diff --git a/vendor/go.opencensus.io/AUTHORS b/vendor/go.opencensus.io/AUTHORS deleted file mode 100644 index e491a9e7f7..0000000000 --- a/vendor/go.opencensus.io/AUTHORS +++ /dev/null @@ -1 +0,0 @@ -Google Inc. diff --git a/vendor/go.opencensus.io/CONTRIBUTING.md b/vendor/go.opencensus.io/CONTRIBUTING.md deleted file mode 100644 index 3f3aed3961..0000000000 --- a/vendor/go.opencensus.io/CONTRIBUTING.md +++ /dev/null @@ -1,56 +0,0 @@ -# How to contribute - -We'd love to accept your patches and contributions to this project. There are -just a few small guidelines you need to follow. - -## Contributor License Agreement - -Contributions to this project must be accompanied by a Contributor License -Agreement. You (or your employer) retain the copyright to your contribution, -this simply gives us permission to use and redistribute your contributions as -part of the project. Head over to to see -your current agreements on file or to sign a new one. - -You generally only need to submit a CLA once, so if you've already submitted one -(even if it was for a different project), you probably don't need to do it -again. - -## Code reviews - -All submissions, including submissions by project members, require review. We -use GitHub pull requests for this purpose. Consult [GitHub Help] for more -information on using pull requests. - -[GitHub Help]: https://help.github.com/articles/about-pull-requests/ - -## Instructions - -Fork the repo, checkout the upstream repo to your GOPATH by: - -``` -$ go get -d go.opencensus.io -``` - -Add your fork as an origin: - -``` -cd $(go env GOPATH)/src/go.opencensus.io -git remote add fork git@github.com:YOUR_GITHUB_USERNAME/opencensus-go.git -``` - -Run tests: - -``` -$ go test ./... -``` - -Checkout a new branch, make modifications and push the branch to your fork: - -``` -$ git checkout -b feature -# edit files -$ git commit -$ git push fork feature -``` - -Open a pull request against the main opencensus-go repo. diff --git a/vendor/go.opencensus.io/Gopkg.lock b/vendor/go.opencensus.io/Gopkg.lock deleted file mode 100644 index 3be12ac8f2..0000000000 --- a/vendor/go.opencensus.io/Gopkg.lock +++ /dev/null @@ -1,231 +0,0 @@ -# This file is autogenerated, do not edit; changes may be undone by the next 'dep ensure'. - - -[[projects]] - branch = "master" - digest = "1:eee9386329f4fcdf8d6c0def0c9771b634bdd5ba460d888aa98c17d59b37a76c" - name = "git.apache.org/thrift.git" - packages = ["lib/go/thrift"] - pruneopts = "UT" - revision = "6e67faa92827ece022380b211c2caaadd6145bf5" - source = "github.com/apache/thrift" - -[[projects]] - branch = "master" - digest = "1:d6afaeed1502aa28e80a4ed0981d570ad91b2579193404256ce672ed0a609e0d" - name = "github.com/beorn7/perks" - packages = ["quantile"] - pruneopts = "UT" - revision = "3a771d992973f24aa725d07868b467d1ddfceafb" - -[[projects]] - digest = "1:4c0989ca0bcd10799064318923b9bc2db6b4d6338dd75f3f2d86c3511aaaf5cf" - name = "github.com/golang/protobuf" - packages = [ - "proto", - "ptypes", - "ptypes/any", - "ptypes/duration", - "ptypes/timestamp", - ] - pruneopts = "UT" - revision = "aa810b61a9c79d51363740d207bb46cf8e620ed5" - version = "v1.2.0" - -[[projects]] - digest = "1:ff5ebae34cfbf047d505ee150de27e60570e8c394b3b8fdbb720ff6ac71985fc" - name = "github.com/matttproud/golang_protobuf_extensions" - packages = ["pbutil"] - pruneopts = "UT" - revision = "c12348ce28de40eed0136aa2b644d0ee0650e56c" - version = "v1.0.1" - -[[projects]] - digest = "1:824c8f3aa4c5f23928fa84ebbd5ed2e9443b3f0cb958a40c1f2fbed5cf5e64b1" - name = "github.com/openzipkin/zipkin-go" - packages = [ - ".", - "idgenerator", - "model", - "propagation", - "reporter", - "reporter/http", - ] - pruneopts = "UT" - revision = "d455a5674050831c1e187644faa4046d653433c2" - version = "v0.1.1" - -[[projects]] - digest = "1:d14a5f4bfecf017cb780bdde1b6483e5deb87e12c332544d2c430eda58734bcb" - name = "github.com/prometheus/client_golang" - packages = [ - "prometheus", - "prometheus/promhttp", - ] - pruneopts = "UT" - revision = "c5b7fccd204277076155f10851dad72b76a49317" - version = "v0.8.0" - -[[projects]] - branch = "master" - digest = "1:2d5cd61daa5565187e1d96bae64dbbc6080dacf741448e9629c64fd93203b0d4" - name = "github.com/prometheus/client_model" - packages = ["go"] - pruneopts = "UT" - revision = "5c3871d89910bfb32f5fcab2aa4b9ec68e65a99f" - -[[projects]] - branch = "master" - digest = "1:63b68062b8968092eb86bedc4e68894bd096ea6b24920faca8b9dcf451f54bb5" - name = "github.com/prometheus/common" - packages = [ - "expfmt", - "internal/bitbucket.org/ww/goautoneg", - "model", - ] - pruneopts = "UT" - revision = "c7de2306084e37d54b8be01f3541a8464345e9a5" - -[[projects]] - branch = "master" - digest = "1:8c49953a1414305f2ff5465147ee576dd705487c35b15918fcd4efdc0cb7a290" - name = "github.com/prometheus/procfs" - packages = [ - ".", - "internal/util", - "nfs", - "xfs", - ] - pruneopts = "UT" - revision = "05ee40e3a273f7245e8777337fc7b46e533a9a92" - -[[projects]] - branch = "master" - digest = "1:deafe4ab271911fec7de5b693d7faae3f38796d9eb8622e2b9e7df42bb3dfea9" - name = "golang.org/x/net" - packages = [ - "context", - "http/httpguts", - "http2", - "http2/hpack", - "idna", - "internal/timeseries", - "trace", - ] - pruneopts = "UT" - revision = "922f4815f713f213882e8ef45e0d315b164d705c" - -[[projects]] - branch = "master" - digest = "1:e0140c0c868c6e0f01c0380865194592c011fe521d6e12d78bfd33e756fe018a" - name = "golang.org/x/sync" - packages = ["semaphore"] - pruneopts = "UT" - revision = "1d60e4601c6fd243af51cc01ddf169918a5407ca" - -[[projects]] - branch = "master" - digest = "1:a3f00ac457c955fe86a41e1495e8f4c54cb5399d609374c5cc26aa7d72e542c8" - name = "golang.org/x/sys" - packages = ["unix"] - pruneopts = "UT" - revision = "3b58ed4ad3395d483fc92d5d14123ce2c3581fec" - -[[projects]] - digest = "1:a2ab62866c75542dd18d2b069fec854577a20211d7c0ea6ae746072a1dccdd18" - name = "golang.org/x/text" - packages = [ - "collate", - "collate/build", - "internal/colltab", - "internal/gen", - "internal/tag", - "internal/triegen", - "internal/ucd", - "language", - "secure/bidirule", - "transform", - "unicode/bidi", - "unicode/cldr", - "unicode/norm", - "unicode/rangetable", - ] - pruneopts = "UT" - revision = "f21a4dfb5e38f5895301dc265a8def02365cc3d0" - version = "v0.3.0" - -[[projects]] - branch = "master" - digest = "1:c0c17c94fe8bc1ab34e7f586a4a8b788c5e1f4f9f750ff23395b8b2f5a523530" - name = "google.golang.org/api" - packages = ["support/bundler"] - pruneopts = "UT" - revision = "e21acd801f91da814261b938941d193bb036441a" - -[[projects]] - branch = "master" - digest = "1:077c1c599507b3b3e9156d17d36e1e61928ee9b53a5b420f10f28ebd4a0b275c" - name = "google.golang.org/genproto" - packages = ["googleapis/rpc/status"] - pruneopts = "UT" - revision = "c66870c02cf823ceb633bcd05be3c7cda29976f4" - -[[projects]] - digest = "1:3dd7996ce6bf52dec6a2f69fa43e7c4cefea1d4dfa3c8ab7a5f8a9f7434e239d" - name = "google.golang.org/grpc" - packages = [ - ".", - "balancer", - "balancer/base", - "balancer/roundrobin", - "codes", - "connectivity", - "credentials", - "encoding", - "encoding/proto", - "grpclog", - "internal", - "internal/backoff", - "internal/channelz", - "internal/envconfig", - "internal/grpcrand", - "internal/transport", - "keepalive", - "metadata", - "naming", - "peer", - "resolver", - "resolver/dns", - "resolver/passthrough", - "stats", - "status", - "tap", - ] - pruneopts = "UT" - revision = "32fb0ac620c32ba40a4626ddf94d90d12cce3455" - version = "v1.14.0" - -[solve-meta] - analyzer-name = "dep" - analyzer-version = 1 - input-imports = [ - "git.apache.org/thrift.git/lib/go/thrift", - "github.com/golang/protobuf/proto", - "github.com/openzipkin/zipkin-go", - "github.com/openzipkin/zipkin-go/model", - "github.com/openzipkin/zipkin-go/reporter", - "github.com/openzipkin/zipkin-go/reporter/http", - "github.com/prometheus/client_golang/prometheus", - "github.com/prometheus/client_golang/prometheus/promhttp", - "golang.org/x/net/context", - "golang.org/x/net/http2", - "google.golang.org/api/support/bundler", - "google.golang.org/grpc", - "google.golang.org/grpc/codes", - "google.golang.org/grpc/grpclog", - "google.golang.org/grpc/metadata", - "google.golang.org/grpc/stats", - "google.golang.org/grpc/status", - ] - solver-name = "gps-cdcl" - solver-version = 1 diff --git a/vendor/go.opencensus.io/Gopkg.toml b/vendor/go.opencensus.io/Gopkg.toml deleted file mode 100644 index a9f3cd68eb..0000000000 --- a/vendor/go.opencensus.io/Gopkg.toml +++ /dev/null @@ -1,36 +0,0 @@ -# For v0.x.y dependencies, prefer adding a constraints of the form: version=">= 0.x.y" -# to avoid locking to a particular minor version which can cause dep to not be -# able to find a satisfying dependency graph. - -[[constraint]] - branch = "master" - name = "git.apache.org/thrift.git" - source = "github.com/apache/thrift" - -[[constraint]] - name = "github.com/golang/protobuf" - version = "1.0.0" - -[[constraint]] - name = "github.com/openzipkin/zipkin-go" - version = ">=0.1.0" - -[[constraint]] - name = "github.com/prometheus/client_golang" - version = ">=0.8.0" - -[[constraint]] - branch = "master" - name = "golang.org/x/net" - -[[constraint]] - branch = "master" - name = "google.golang.org/api" - -[[constraint]] - name = "google.golang.org/grpc" - version = "1.11.3" - -[prune] - go-tests = true - unused-packages = true diff --git a/vendor/go.opencensus.io/LICENSE b/vendor/go.opencensus.io/LICENSE deleted file mode 100644 index 7a4a3ea242..0000000000 --- a/vendor/go.opencensus.io/LICENSE +++ /dev/null @@ -1,202 +0,0 @@ - - Apache License - Version 2.0, January 2004 - http://www.apache.org/licenses/ - - TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - - 1. Definitions. - - "License" shall mean the terms and conditions for use, reproduction, - and distribution as defined by Sections 1 through 9 of this document. - - "Licensor" shall mean the copyright owner or entity authorized by - the copyright owner that is granting the License. - - "Legal Entity" shall mean the union of the acting entity and all - other entities that control, are controlled by, or are under common - control with that entity. For the purposes of this definition, - "control" means (i) the power, direct or indirect, to cause the - direction or management of such entity, whether by contract or - otherwise, or (ii) ownership of fifty percent (50%) or more of the - outstanding shares, or (iii) beneficial ownership of such entity. - - "You" (or "Your") shall mean an individual or Legal Entity - exercising permissions granted by this License. - - "Source" form shall mean the preferred form for making modifications, - including but not limited to software source code, documentation - source, and configuration files. - - "Object" form shall mean any form resulting from mechanical - transformation or translation of a Source form, including but - not limited to compiled object code, generated documentation, - and conversions to other media types. - - "Work" shall mean the work of authorship, whether in Source or - Object form, made available under the License, as indicated by a - copyright notice that is included in or attached to the work - (an example is provided in the Appendix below). - - "Derivative Works" shall mean any work, whether in Source or Object - form, that is based on (or derived from) the Work and for which the - editorial revisions, annotations, elaborations, or other modifications - represent, as a whole, an original work of authorship. For the purposes - of this License, Derivative Works shall not include works that remain - separable from, or merely link (or bind by name) to the interfaces of, - the Work and Derivative Works thereof. - - "Contribution" shall mean any work of authorship, including - the original version of the Work and any modifications or additions - to that Work or Derivative Works thereof, that is intentionally - submitted to Licensor for inclusion in the Work by the copyright owner - or by an individual or Legal Entity authorized to submit on behalf of - the copyright owner. For the purposes of this definition, "submitted" - means any form of electronic, verbal, or written communication sent - to the Licensor or its representatives, including but not limited to - communication on electronic mailing lists, source code control systems, - and issue tracking systems that are managed by, or on behalf of, the - Licensor for the purpose of discussing and improving the Work, but - excluding communication that is conspicuously marked or otherwise - designated in writing by the copyright owner as "Not a Contribution." - - "Contributor" shall mean Licensor and any individual or Legal Entity - on behalf of whom a Contribution has been received by Licensor and - subsequently incorporated within the Work. - - 2. Grant of Copyright License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - copyright license to reproduce, prepare Derivative Works of, - publicly display, publicly perform, sublicense, and distribute the - Work and such Derivative Works in Source or Object form. - - 3. Grant of Patent License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - (except as stated in this section) patent license to make, have made, - use, offer to sell, sell, import, and otherwise transfer the Work, - where such license applies only to those patent claims licensable - by such Contributor that are necessarily infringed by their - Contribution(s) alone or by combination of their Contribution(s) - with the Work to which such Contribution(s) was submitted. If You - institute patent litigation against any entity (including a - cross-claim or counterclaim in a lawsuit) alleging that the Work - or a Contribution incorporated within the Work constitutes direct - or contributory patent infringement, then any patent licenses - granted to You under this License for that Work shall terminate - as of the date such litigation is filed. - - 4. Redistribution. You may reproduce and distribute copies of the - Work or Derivative Works thereof in any medium, with or without - modifications, and in Source or Object form, provided that You - meet the following conditions: - - (a) You must give any other recipients of the Work or - Derivative Works a copy of this License; and - - (b) You must cause any modified files to carry prominent notices - stating that You changed the files; and - - (c) You must retain, in the Source form of any Derivative Works - that You distribute, all copyright, patent, trademark, and - attribution notices from the Source form of the Work, - excluding those notices that do not pertain to any part of - the Derivative Works; and - - (d) If the Work includes a "NOTICE" text file as part of its - distribution, then any Derivative Works that You distribute must - include a readable copy of the attribution notices contained - within such NOTICE file, excluding those notices that do not - pertain to any part of the Derivative Works, in at least one - of the following places: within a NOTICE text file distributed - as part of the Derivative Works; within the Source form or - documentation, if provided along with the Derivative Works; or, - within a display generated by the Derivative Works, if and - wherever such third-party notices normally appear. The contents - of the NOTICE file are for informational purposes only and - do not modify the License. You may add Your own attribution - notices within Derivative Works that You distribute, alongside - or as an addendum to the NOTICE text from the Work, provided - that such additional attribution notices cannot be construed - as modifying the License. - - You may add Your own copyright statement to Your modifications and - may provide additional or different license terms and conditions - for use, reproduction, or distribution of Your modifications, or - for any such Derivative Works as a whole, provided Your use, - reproduction, and distribution of the Work otherwise complies with - the conditions stated in this License. - - 5. Submission of Contributions. Unless You explicitly state otherwise, - any Contribution intentionally submitted for inclusion in the Work - by You to the Licensor shall be under the terms and conditions of - this License, without any additional terms or conditions. - Notwithstanding the above, nothing herein shall supersede or modify - the terms of any separate license agreement you may have executed - with Licensor regarding such Contributions. - - 6. Trademarks. This License does not grant permission to use the trade - names, trademarks, service marks, or product names of the Licensor, - except as required for reasonable and customary use in describing the - origin of the Work and reproducing the content of the NOTICE file. - - 7. Disclaimer of Warranty. Unless required by applicable law or - agreed to in writing, Licensor provides the Work (and each - Contributor provides its Contributions) on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or - implied, including, without limitation, any warranties or conditions - of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A - PARTICULAR PURPOSE. You are solely responsible for determining the - appropriateness of using or redistributing the Work and assume any - risks associated with Your exercise of permissions under this License. - - 8. Limitation of Liability. In no event and under no legal theory, - whether in tort (including negligence), contract, or otherwise, - unless required by applicable law (such as deliberate and grossly - negligent acts) or agreed to in writing, shall any Contributor be - liable to You for damages, including any direct, indirect, special, - incidental, or consequential damages of any character arising as a - result of this License or out of the use or inability to use the - Work (including but not limited to damages for loss of goodwill, - work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses), even if such Contributor - has been advised of the possibility of such damages. - - 9. Accepting Warranty or Additional Liability. While redistributing - the Work or Derivative Works thereof, You may choose to offer, - and charge a fee for, acceptance of support, warranty, indemnity, - or other liability obligations and/or rights consistent with this - License. However, in accepting such obligations, You may act only - on Your own behalf and on Your sole responsibility, not on behalf - of any other Contributor, and only if You agree to indemnify, - defend, and hold each Contributor harmless for any liability - incurred by, or claims asserted against, such Contributor by reason - of your accepting any such warranty or additional liability. - - END OF TERMS AND CONDITIONS - - APPENDIX: How to apply the Apache License to your work. - - To apply the Apache License to your work, attach the following - boilerplate notice, with the fields enclosed by brackets "[]" - replaced with your own identifying information. (Don't include - the brackets!) The text should be enclosed in the appropriate - comment syntax for the file format. We also recommend that a - file or class name and description of purpose be included on the - same "printed page" as the copyright notice for easier - identification within third-party archives. - - Copyright [yyyy] [name of copyright owner] - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. \ No newline at end of file diff --git a/vendor/go.opencensus.io/README.md b/vendor/go.opencensus.io/README.md deleted file mode 100644 index 97d66983d3..0000000000 --- a/vendor/go.opencensus.io/README.md +++ /dev/null @@ -1,263 +0,0 @@ -# OpenCensus Libraries for Go - -[![Build Status][travis-image]][travis-url] -[![Windows Build Status][appveyor-image]][appveyor-url] -[![GoDoc][godoc-image]][godoc-url] -[![Gitter chat][gitter-image]][gitter-url] - -OpenCensus Go is a Go implementation of OpenCensus, a toolkit for -collecting application performance and behavior monitoring data. -Currently it consists of three major components: tags, stats and tracing. - -## Installation - -``` -$ go get -u go.opencensus.io -``` - -The API of this project is still evolving, see: [Deprecation Policy](#deprecation-policy). -The use of vendoring or a dependency management tool is recommended. - -## Prerequisites - -OpenCensus Go libraries require Go 1.8 or later. - -## Getting Started - -The easiest way to get started using OpenCensus in your application is to use an existing -integration with your RPC framework: - -* [net/http](https://godoc.org/go.opencensus.io/plugin/ochttp) -* [gRPC](https://godoc.org/go.opencensus.io/plugin/ocgrpc) -* [database/sql](https://godoc.org/github.com/basvanbeek/ocsql) -* [Go kit](https://godoc.org/github.com/go-kit/kit/tracing/opencensus) -* [Groupcache](https://godoc.org/github.com/orijtech/groupcache) -* [Caddy webserver](https://godoc.org/github.com/orijtech/caddy) -* [MongoDB](https://godoc.org/github.com/orijtech/mongo-go-driver) -* [Redis gomodule/redigo](https://godoc.org/github.com/orijtech/redigo) -* [Redis goredis/redis](https://godoc.org/github.com/orijtech/redis) -* [Memcache](https://godoc.org/github.com/orijtech/gomemcache) - -If you're using a framework not listed here, you could either implement your own middleware for your -framework or use [custom stats](#stats) and [spans](#spans) directly in your application. - -## Exporters - -OpenCensus can export instrumentation data to various backends. -OpenCensus has exporter implementations for the following, users -can implement their own exporters by implementing the exporter interfaces -([stats](https://godoc.org/go.opencensus.io/stats/view#Exporter), -[trace](https://godoc.org/go.opencensus.io/trace#Exporter)): - -* [Prometheus][exporter-prom] for stats -* [OpenZipkin][exporter-zipkin] for traces -* [Stackdriver][exporter-stackdriver] Monitoring for stats and Trace for traces -* [Jaeger][exporter-jaeger] for traces -* [AWS X-Ray][exporter-xray] for traces -* [Datadog][exporter-datadog] for stats and traces -* [Graphite][exporter-graphite] for stats -* [Honeycomb][exporter-honeycomb] for traces - -## Overview - -![OpenCensus Overview](https://i.imgur.com/cf4ElHE.jpg) - -In a microservices environment, a user request may go through -multiple services until there is a response. OpenCensus allows -you to instrument your services and collect diagnostics data all -through your services end-to-end. - -## Tags - -Tags represent propagated key-value pairs. They are propagated using `context.Context` -in the same process or can be encoded to be transmitted on the wire. Usually, this will -be handled by an integration plugin, e.g. `ocgrpc.ServerHandler` and `ocgrpc.ClientHandler` -for gRPC. - -Package `tag` allows adding or modifying tags in the current context. - -[embedmd]:# (internal/readme/tags.go new) -```go -ctx, err = tag.New(ctx, - tag.Insert(osKey, "macOS-10.12.5"), - tag.Upsert(userIDKey, "cde36753ed"), -) -if err != nil { - log.Fatal(err) -} -``` - -## Stats - -OpenCensus is a low-overhead framework even if instrumentation is always enabled. -In order to be so, it is optimized to make recording of data points fast -and separate from the data aggregation. - -OpenCensus stats collection happens in two stages: - -* Definition of measures and recording of data points -* Definition of views and aggregation of the recorded data - -### Recording - -Measurements are data points associated with a measure. -Recording implicitly tags the set of Measurements with the tags from the -provided context: - -[embedmd]:# (internal/readme/stats.go record) -```go -stats.Record(ctx, videoSize.M(102478)) -``` - -### Views - -Views are how Measures are aggregated. You can think of them as queries over the -set of recorded data points (measurements). - -Views have two parts: the tags to group by and the aggregation type used. - -Currently three types of aggregations are supported: -* CountAggregation is used to count the number of times a sample was recorded. -* DistributionAggregation is used to provide a histogram of the values of the samples. -* SumAggregation is used to sum up all sample values. - -[embedmd]:# (internal/readme/stats.go aggs) -```go -distAgg := view.Distribution(0, 1<<32, 2<<32, 3<<32) -countAgg := view.Count() -sumAgg := view.Sum() -``` - -Here we create a view with the DistributionAggregation over our measure. - -[embedmd]:# (internal/readme/stats.go view) -```go -if err := view.Register(&view.View{ - Name: "example.com/video_size_distribution", - Description: "distribution of processed video size over time", - Measure: videoSize, - Aggregation: view.Distribution(0, 1<<32, 2<<32, 3<<32), -}); err != nil { - log.Fatalf("Failed to register view: %v", err) -} -``` - -Register begins collecting data for the view. Registered views' data will be -exported via the registered exporters. - -## Traces - -A distributed trace tracks the progression of a single user request as -it is handled by the services and processes that make up an application. -Each step is called a span in the trace. Spans include metadata about the step, -including especially the time spent in the step, called the span’s latency. - -Below you see a trace and several spans underneath it. - -![Traces and spans](https://i.imgur.com/7hZwRVj.png) - -### Spans - -Span is the unit step in a trace. Each span has a name, latency, status and -additional metadata. - -Below we are starting a span for a cache read and ending it -when we are done: - -[embedmd]:# (internal/readme/trace.go startend) -```go -ctx, span := trace.StartSpan(ctx, "cache.Get") -defer span.End() - -// Do work to get from cache. -``` - -### Propagation - -Spans can have parents or can be root spans if they don't have any parents. -The current span is propagated in-process and across the network to allow associating -new child spans with the parent. - -In the same process, `context.Context` is used to propagate spans. -`trace.StartSpan` creates a new span as a root if the current context -doesn't contain a span. Or, it creates a child of the span that is -already in current context. The returned context can be used to keep -propagating the newly created span in the current context. - -[embedmd]:# (internal/readme/trace.go startend) -```go -ctx, span := trace.StartSpan(ctx, "cache.Get") -defer span.End() - -// Do work to get from cache. -``` - -Across the network, OpenCensus provides different propagation -methods for different protocols. - -* gRPC integrations use the OpenCensus' [binary propagation format](https://godoc.org/go.opencensus.io/trace/propagation). -* HTTP integrations use Zipkin's [B3](https://github.com/openzipkin/b3-propagation) - by default but can be configured to use a custom propagation method by setting another - [propagation.HTTPFormat](https://godoc.org/go.opencensus.io/trace/propagation#HTTPFormat). - -## Execution Tracer - -With Go 1.11, OpenCensus Go will support integration with the Go execution tracer. -See [Debugging Latency in Go](https://medium.com/observability/debugging-latency-in-go-1-11-9f97a7910d68) -for an example of their mutual use. - -## Profiles - -OpenCensus tags can be applied as profiler labels -for users who are on Go 1.9 and above. - -[embedmd]:# (internal/readme/tags.go profiler) -```go -ctx, err = tag.New(ctx, - tag.Insert(osKey, "macOS-10.12.5"), - tag.Insert(userIDKey, "fff0989878"), -) -if err != nil { - log.Fatal(err) -} -tag.Do(ctx, func(ctx context.Context) { - // Do work. - // When profiling is on, samples will be - // recorded with the key/values from the tag map. -}) -``` - -A screenshot of the CPU profile from the program above: - -![CPU profile](https://i.imgur.com/jBKjlkw.png) - -## Deprecation Policy - -Before version 1.0.0, the following deprecation policy will be observed: - -No backwards-incompatible changes will be made except for the removal of symbols that have -been marked as *Deprecated* for at least one minor release (e.g. 0.9.0 to 0.10.0). A release -removing the *Deprecated* functionality will be made no sooner than 28 days after the first -release in which the functionality was marked *Deprecated*. - -[travis-image]: https://travis-ci.org/census-instrumentation/opencensus-go.svg?branch=master -[travis-url]: https://travis-ci.org/census-instrumentation/opencensus-go -[appveyor-image]: https://ci.appveyor.com/api/projects/status/vgtt29ps1783ig38?svg=true -[appveyor-url]: https://ci.appveyor.com/project/opencensusgoteam/opencensus-go/branch/master -[godoc-image]: https://godoc.org/go.opencensus.io?status.svg -[godoc-url]: https://godoc.org/go.opencensus.io -[gitter-image]: https://badges.gitter.im/census-instrumentation/lobby.svg -[gitter-url]: https://gitter.im/census-instrumentation/lobby?utm_source=badge&utm_medium=badge&utm_campaign=pr-badge&utm_content=badge - - -[new-ex]: https://godoc.org/go.opencensus.io/tag#example-NewMap -[new-replace-ex]: https://godoc.org/go.opencensus.io/tag#example-NewMap--Replace - -[exporter-prom]: https://godoc.org/go.opencensus.io/exporter/prometheus -[exporter-stackdriver]: https://godoc.org/contrib.go.opencensus.io/exporter/stackdriver -[exporter-zipkin]: https://godoc.org/go.opencensus.io/exporter/zipkin -[exporter-jaeger]: https://godoc.org/go.opencensus.io/exporter/jaeger -[exporter-xray]: https://github.com/census-ecosystem/opencensus-go-exporter-aws -[exporter-datadog]: https://github.com/DataDog/opencensus-go-exporter-datadog -[exporter-graphite]: https://github.com/census-ecosystem/opencensus-go-exporter-graphite -[exporter-honeycomb]: https://github.com/honeycombio/opencensus-exporter diff --git a/vendor/go.opencensus.io/appveyor.yml b/vendor/go.opencensus.io/appveyor.yml deleted file mode 100644 index 98057888a3..0000000000 --- a/vendor/go.opencensus.io/appveyor.yml +++ /dev/null @@ -1,24 +0,0 @@ -version: "{build}" - -platform: x64 - -clone_folder: c:\gopath\src\go.opencensus.io - -environment: - GOPATH: 'c:\gopath' - GOVERSION: '1.11' - GO111MODULE: 'on' - CGO_ENABLED: '0' # See: https://github.com/appveyor/ci/issues/2613 - -install: - - set PATH=%GOPATH%\bin;c:\go\bin;%PATH% - - go version - - go env - -build: false -deploy: false - -test_script: - - cd %APPVEYOR_BUILD_FOLDER% - - go build -v .\... - - go test -v .\... # No -race because cgo is disabled diff --git a/vendor/go.opencensus.io/exemplar/exemplar.go b/vendor/go.opencensus.io/exemplar/exemplar.go deleted file mode 100644 index e676df837f..0000000000 --- a/vendor/go.opencensus.io/exemplar/exemplar.go +++ /dev/null @@ -1,78 +0,0 @@ -// Copyright 2018, OpenCensus Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -// Package exemplar implements support for exemplars. Exemplars are additional -// data associated with each measurement. -// -// Their purpose it to provide an example of the kind of thing -// (request, RPC, trace span, etc.) that resulted in that measurement. -package exemplar - -import ( - "context" - "time" -) - -const ( - KeyTraceID = "trace_id" - KeySpanID = "span_id" - KeyPrefixTag = "tag:" -) - -// Exemplar is an example data point associated with each bucket of a -// distribution type aggregation. -type Exemplar struct { - Value float64 // the value that was recorded - Timestamp time.Time // the time the value was recorded - Attachments Attachments // attachments (if any) -} - -// Attachments is a map of extra values associated with a recorded data point. -// The map should only be mutated from AttachmentExtractor functions. -type Attachments map[string]string - -// AttachmentExtractor is a function capable of extracting exemplar attachments -// from the context used to record measurements. -// The map passed to the function should be mutated and returned. It will -// initially be nil: the first AttachmentExtractor that would like to add keys to the -// map is responsible for initializing it. -type AttachmentExtractor func(ctx context.Context, a Attachments) Attachments - -var extractors []AttachmentExtractor - -// RegisterAttachmentExtractor registers the given extractor associated with the exemplar -// type name. -// -// Extractors will be used to attempt to extract exemplars from the context -// associated with each recorded measurement. -// -// Packages that support exemplars should register their extractor functions on -// initialization. -// -// RegisterAttachmentExtractor should not be called after any measurements have -// been recorded. -func RegisterAttachmentExtractor(e AttachmentExtractor) { - extractors = append(extractors, e) -} - -// NewFromContext extracts exemplars from the given context. -// Each registered AttachmentExtractor (see RegisterAttachmentExtractor) is called in an -// unspecified order to add attachments to the exemplar. -func AttachmentsFromContext(ctx context.Context) Attachments { - var a Attachments - for _, extractor := range extractors { - a = extractor(ctx, a) - } - return a -} diff --git a/vendor/go.opencensus.io/go.mod b/vendor/go.opencensus.io/go.mod deleted file mode 100644 index 1236f4c2f3..0000000000 --- a/vendor/go.opencensus.io/go.mod +++ /dev/null @@ -1,25 +0,0 @@ -module go.opencensus.io - -require ( - git.apache.org/thrift.git v0.0.0-20180902110319-2566ecd5d999 - github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973 - github.com/ghodss/yaml v1.0.0 // indirect - github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b // indirect - github.com/golang/protobuf v1.2.0 - github.com/google/go-cmp v0.2.0 - github.com/grpc-ecosystem/grpc-gateway v1.5.0 // indirect - github.com/matttproud/golang_protobuf_extensions v1.0.1 - github.com/openzipkin/zipkin-go v0.1.1 - github.com/prometheus/client_golang v0.8.0 - github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910 - github.com/prometheus/common v0.0.0-20180801064454-c7de2306084e - github.com/prometheus/procfs v0.0.0-20180725123919-05ee40e3a273 - golang.org/x/net v0.0.0-20180906233101-161cd47e91fd - golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f - golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e - golang.org/x/text v0.3.0 - google.golang.org/api v0.0.0-20180910000450-7ca32eb868bf - google.golang.org/genproto v0.0.0-20180831171423-11092d34479b - google.golang.org/grpc v1.14.0 - gopkg.in/yaml.v2 v2.2.1 // indirect -) diff --git a/vendor/go.opencensus.io/go.sum b/vendor/go.opencensus.io/go.sum deleted file mode 100644 index 3e0bab8845..0000000000 --- a/vendor/go.opencensus.io/go.sum +++ /dev/null @@ -1,48 +0,0 @@ -git.apache.org/thrift.git v0.0.0-20180807212849-6e67faa92827/go.mod h1:fPE2ZNJGynbRyZ4dJvy6G277gSllfV2HJqblrnkyeyg= -git.apache.org/thrift.git v0.0.0-20180902110319-2566ecd5d999 h1:sihTnRgTOUSCQz0iS0pjZuFQy/z7GXCJgSBg3+rZKHw= -git.apache.org/thrift.git v0.0.0-20180902110319-2566ecd5d999/go.mod h1:fPE2ZNJGynbRyZ4dJvy6G277gSllfV2HJqblrnkyeyg= -github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973 h1:xJ4a3vCFaGF/jqvzLMYoU8P317H5OQ+Via4RmuPwCS0= -github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q= -github.com/ghodss/yaml v1.0.0 h1:wQHKEahhL6wmXdzwWG11gIVCkOv05bNOh+Rxn0yngAk= -github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04= -github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b h1:VKtxabqXZkF25pY9ekfRL6a582T4P37/31XEstQ5p58= -github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q= -github.com/golang/protobuf v1.2.0 h1:P3YflyNX/ehuJFLhxviNdFxQPkGK5cDcApsge1SqnvM= -github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= -github.com/google/go-cmp v0.2.0 h1:+dTQ8DZQJz0Mb/HjFlkptS1FeQ4cWSnN941F8aEG4SQ= -github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M= -github.com/grpc-ecosystem/grpc-gateway v1.5.0 h1:WcmKMm43DR7RdtlkEXQJyo5ws8iTp98CyhCCbOHMvNI= -github.com/grpc-ecosystem/grpc-gateway v1.5.0/go.mod h1:RSKVYQBd5MCa4OVpNdGskqpgL2+G+NZTnrVHpWWfpdw= -github.com/matttproud/golang_protobuf_extensions v1.0.1 h1:4hp9jkHxhMHkqkrB3Ix0jegS5sx/RkqARlsWZ6pIwiU= -github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0= -github.com/openzipkin/zipkin-go v0.1.1 h1:A/ADD6HaPnAKj3yS7HjGHRK77qi41Hi0DirOOIQAeIw= -github.com/openzipkin/zipkin-go v0.1.1/go.mod h1:NtoC/o8u3JlF1lSlyPNswIbeQH9bJTmOf0Erfk+hxe8= -github.com/prometheus/client_golang v0.8.0 h1:1921Yw9Gc3iSc4VQh3PIoOqgPCZS7G/4xQNVUp8Mda8= -github.com/prometheus/client_golang v0.8.0/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw= -github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910 h1:idejC8f05m9MGOsuEi1ATq9shN03HrxNkD/luQvxCv8= -github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo= -github.com/prometheus/common v0.0.0-20180801064454-c7de2306084e h1:n/3MEhJQjQxrOUCzh1Y3Re6aJUUWRp2M9+Oc3eVn/54= -github.com/prometheus/common v0.0.0-20180801064454-c7de2306084e/go.mod h1:daVV7qP5qjZbuso7PdcryaAu0sAZbrN9i7WWcTMWvro= -github.com/prometheus/procfs v0.0.0-20180725123919-05ee40e3a273 h1:agujYaXJSxSo18YNX3jzl+4G6Bstwt+kqv47GS12uL0= -github.com/prometheus/procfs v0.0.0-20180725123919-05ee40e3a273/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= -golang.org/x/net v0.0.0-20180821023952-922f4815f713/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/net v0.0.0-20180906233101-161cd47e91fd h1:nTDtHvHSdCn1m6ITfMRqtOd/9+7a3s8RBNOZ3eYZzJA= -golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f h1:wMNYb4v58l5UBM7MYRLPG6ZhfOqbKu7X5eyFl8ZhKvA= -golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sys v0.0.0-20180821140842-3b58ed4ad339/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e h1:o3PsSEY8E4eXWkXrIP9YJALUkVZqzHJT5DOasTyn8Vs= -golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/text v0.3.0 h1:g61tztE5qeGQ89tm6NTjjM9VPIm088od1l6aSorWRWg= -golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= -google.golang.org/api v0.0.0-20180818000503-e21acd801f91/go.mod h1:4mhQ8q/RsB7i+udVvVy5NUi08OU8ZlA0gRVgrF7VFY0= -google.golang.org/api v0.0.0-20180910000450-7ca32eb868bf h1:rjxqQmxjyqerRKEj+tZW+MCm4LgpFXu18bsEoCMgDsk= -google.golang.org/api v0.0.0-20180910000450-7ca32eb868bf/go.mod h1:4mhQ8q/RsB7i+udVvVy5NUi08OU8ZlA0gRVgrF7VFY0= -google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= -google.golang.org/genproto v0.0.0-20180831171423-11092d34479b h1:lohp5blsw53GBXtLyLNaTXPXS9pJ1tiTw61ZHUoE9Qw= -google.golang.org/genproto v0.0.0-20180831171423-11092d34479b/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= -google.golang.org/grpc v1.14.0 h1:ArxJuB1NWfPY6r9Gp9gqwplT0Ge7nqv9msgu03lHLmo= -google.golang.org/grpc v1.14.0/go.mod h1:yo6s7OP7yaDglbqo1J04qKzAhqBH6lvTonzMVmEdcZw= -gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= -gopkg.in/yaml.v2 v2.2.1 h1:mUhvW9EsL+naU5Q3cakzfE91YhliOondGd6ZrsDBHQE= -gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= diff --git a/vendor/go.opencensus.io/internal/internal.go b/vendor/go.opencensus.io/internal/internal.go deleted file mode 100644 index e1d1238d01..0000000000 --- a/vendor/go.opencensus.io/internal/internal.go +++ /dev/null @@ -1,37 +0,0 @@ -// Copyright 2017, OpenCensus Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package internal // import "go.opencensus.io/internal" - -import ( - "fmt" - "time" - - "go.opencensus.io" -) - -// UserAgent is the user agent to be added to the outgoing -// requests from the exporters. -var UserAgent = fmt.Sprintf("opencensus-go [%s]", opencensus.Version()) - -// MonotonicEndTime returns the end time at present -// but offset from start, monotonically. -// -// The monotonic clock is used in subtractions hence -// the duration since start added back to start gives -// end as a monotonic time. -// See https://golang.org/pkg/time/#hdr-Monotonic_Clocks -func MonotonicEndTime(start time.Time) time.Time { - return start.Add(time.Now().Sub(start)) -} diff --git a/vendor/go.opencensus.io/internal/sanitize.go b/vendor/go.opencensus.io/internal/sanitize.go deleted file mode 100644 index de8ccf236c..0000000000 --- a/vendor/go.opencensus.io/internal/sanitize.go +++ /dev/null @@ -1,50 +0,0 @@ -// Copyright 2017, OpenCensus Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package internal - -import ( - "strings" - "unicode" -) - -const labelKeySizeLimit = 100 - -// Sanitize returns a string that is trunacated to 100 characters if it's too -// long, and replaces non-alphanumeric characters to underscores. -func Sanitize(s string) string { - if len(s) == 0 { - return s - } - if len(s) > labelKeySizeLimit { - s = s[:labelKeySizeLimit] - } - s = strings.Map(sanitizeRune, s) - if unicode.IsDigit(rune(s[0])) { - s = "key_" + s - } - if s[0] == '_' { - s = "key" + s - } - return s -} - -// converts anything that is not a letter or digit to an underscore -func sanitizeRune(r rune) rune { - if unicode.IsLetter(r) || unicode.IsDigit(r) { - return r - } - // Everything else turns into an underscore - return '_' -} diff --git a/vendor/go.opencensus.io/internal/tagencoding/tagencoding.go b/vendor/go.opencensus.io/internal/tagencoding/tagencoding.go deleted file mode 100644 index 3b1af8b4b8..0000000000 --- a/vendor/go.opencensus.io/internal/tagencoding/tagencoding.go +++ /dev/null @@ -1,72 +0,0 @@ -// Copyright 2017, OpenCensus Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. -// - -// Package tagencoding contains the tag encoding -// used interally by the stats collector. -package tagencoding // import "go.opencensus.io/internal/tagencoding" - -type Values struct { - Buffer []byte - WriteIndex int - ReadIndex int -} - -func (vb *Values) growIfRequired(expected int) { - if len(vb.Buffer)-vb.WriteIndex < expected { - tmp := make([]byte, 2*(len(vb.Buffer)+1)+expected) - copy(tmp, vb.Buffer) - vb.Buffer = tmp - } -} - -func (vb *Values) WriteValue(v []byte) { - length := len(v) & 0xff - vb.growIfRequired(1 + length) - - // writing length of v - vb.Buffer[vb.WriteIndex] = byte(length) - vb.WriteIndex++ - - if length == 0 { - // No value was encoded for this key - return - } - - // writing v - copy(vb.Buffer[vb.WriteIndex:], v[:length]) - vb.WriteIndex += length -} - -// ReadValue is the helper method to read the values when decoding valuesBytes to a map[Key][]byte. -func (vb *Values) ReadValue() []byte { - // read length of v - length := int(vb.Buffer[vb.ReadIndex]) - vb.ReadIndex++ - if length == 0 { - // No value was encoded for this key - return nil - } - - // read value of v - v := make([]byte, length) - endIdx := vb.ReadIndex + length - copy(v, vb.Buffer[vb.ReadIndex:endIdx]) - vb.ReadIndex = endIdx - return v -} - -func (vb *Values) Bytes() []byte { - return vb.Buffer[:vb.WriteIndex] -} diff --git a/vendor/go.opencensus.io/internal/traceinternals.go b/vendor/go.opencensus.io/internal/traceinternals.go deleted file mode 100644 index 553ca68dc4..0000000000 --- a/vendor/go.opencensus.io/internal/traceinternals.go +++ /dev/null @@ -1,52 +0,0 @@ -// Copyright 2017, OpenCensus Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package internal - -import ( - "time" -) - -// Trace allows internal access to some trace functionality. -// TODO(#412): remove this -var Trace interface{} - -var LocalSpanStoreEnabled bool - -// BucketConfiguration stores the number of samples to store for span buckets -// for successful and failed spans for a particular span name. -type BucketConfiguration struct { - Name string - MaxRequestsSucceeded int - MaxRequestsErrors int -} - -// PerMethodSummary is a summary of the spans stored for a single span name. -type PerMethodSummary struct { - Active int - LatencyBuckets []LatencyBucketSummary - ErrorBuckets []ErrorBucketSummary -} - -// LatencyBucketSummary is a summary of a latency bucket. -type LatencyBucketSummary struct { - MinLatency, MaxLatency time.Duration - Size int -} - -// ErrorBucketSummary is a summary of an error bucket. -type ErrorBucketSummary struct { - ErrorCode int32 - Size int -} diff --git a/vendor/go.opencensus.io/opencensus.go b/vendor/go.opencensus.io/opencensus.go deleted file mode 100644 index 62f03486a2..0000000000 --- a/vendor/go.opencensus.io/opencensus.go +++ /dev/null @@ -1,21 +0,0 @@ -// Copyright 2017, OpenCensus Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -// Package opencensus contains Go support for OpenCensus. -package opencensus // import "go.opencensus.io" - -// Version is the current release version of OpenCensus in use. -func Version() string { - return "0.18.0" -} diff --git a/vendor/go.opencensus.io/plugin/ochttp/client.go b/vendor/go.opencensus.io/plugin/ochttp/client.go deleted file mode 100644 index da815b2a73..0000000000 --- a/vendor/go.opencensus.io/plugin/ochttp/client.go +++ /dev/null @@ -1,117 +0,0 @@ -// Copyright 2018, OpenCensus Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package ochttp - -import ( - "net/http" - "net/http/httptrace" - - "go.opencensus.io/trace" - "go.opencensus.io/trace/propagation" -) - -// Transport is an http.RoundTripper that instruments all outgoing requests with -// OpenCensus stats and tracing. -// -// The zero value is intended to be a useful default, but for -// now it's recommended that you explicitly set Propagation, since the default -// for this may change. -type Transport struct { - // Base may be set to wrap another http.RoundTripper that does the actual - // requests. By default http.DefaultTransport is used. - // - // If base HTTP roundtripper implements CancelRequest, - // the returned round tripper will be cancelable. - Base http.RoundTripper - - // Propagation defines how traces are propagated. If unspecified, a default - // (currently B3 format) will be used. - Propagation propagation.HTTPFormat - - // StartOptions are applied to the span started by this Transport around each - // request. - // - // StartOptions.SpanKind will always be set to trace.SpanKindClient - // for spans started by this transport. - StartOptions trace.StartOptions - - // GetStartOptions allows to set start options per request. If set, - // StartOptions is going to be ignored. - GetStartOptions func(*http.Request) trace.StartOptions - - // NameFromRequest holds the function to use for generating the span name - // from the information found in the outgoing HTTP Request. By default the - // name equals the URL Path. - FormatSpanName func(*http.Request) string - - // NewClientTrace may be set to a function allowing the current *trace.Span - // to be annotated with HTTP request event information emitted by the - // httptrace package. - NewClientTrace func(*http.Request, *trace.Span) *httptrace.ClientTrace - - // TODO: Implement tag propagation for HTTP. -} - -// RoundTrip implements http.RoundTripper, delegating to Base and recording stats and traces for the request. -func (t *Transport) RoundTrip(req *http.Request) (*http.Response, error) { - rt := t.base() - if isHealthEndpoint(req.URL.Path) { - return rt.RoundTrip(req) - } - // TODO: remove excessive nesting of http.RoundTrippers here. - format := t.Propagation - if format == nil { - format = defaultFormat - } - spanNameFormatter := t.FormatSpanName - if spanNameFormatter == nil { - spanNameFormatter = spanNameFromURL - } - - startOpts := t.StartOptions - if t.GetStartOptions != nil { - startOpts = t.GetStartOptions(req) - } - - rt = &traceTransport{ - base: rt, - format: format, - startOptions: trace.StartOptions{ - Sampler: startOpts.Sampler, - SpanKind: trace.SpanKindClient, - }, - formatSpanName: spanNameFormatter, - newClientTrace: t.NewClientTrace, - } - rt = statsTransport{base: rt} - return rt.RoundTrip(req) -} - -func (t *Transport) base() http.RoundTripper { - if t.Base != nil { - return t.Base - } - return http.DefaultTransport -} - -// CancelRequest cancels an in-flight request by closing its connection. -func (t *Transport) CancelRequest(req *http.Request) { - type canceler interface { - CancelRequest(*http.Request) - } - if cr, ok := t.base().(canceler); ok { - cr.CancelRequest(req) - } -} diff --git a/vendor/go.opencensus.io/plugin/ochttp/client_stats.go b/vendor/go.opencensus.io/plugin/ochttp/client_stats.go deleted file mode 100644 index 066ebb87f8..0000000000 --- a/vendor/go.opencensus.io/plugin/ochttp/client_stats.go +++ /dev/null @@ -1,135 +0,0 @@ -// Copyright 2018, OpenCensus Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package ochttp - -import ( - "context" - "io" - "net/http" - "strconv" - "sync" - "time" - - "go.opencensus.io/stats" - "go.opencensus.io/tag" -) - -// statsTransport is an http.RoundTripper that collects stats for the outgoing requests. -type statsTransport struct { - base http.RoundTripper -} - -// RoundTrip implements http.RoundTripper, delegating to Base and recording stats for the request. -func (t statsTransport) RoundTrip(req *http.Request) (*http.Response, error) { - ctx, _ := tag.New(req.Context(), - tag.Upsert(KeyClientHost, req.URL.Host), - tag.Upsert(Host, req.URL.Host), - tag.Upsert(KeyClientPath, req.URL.Path), - tag.Upsert(Path, req.URL.Path), - tag.Upsert(KeyClientMethod, req.Method), - tag.Upsert(Method, req.Method)) - req = req.WithContext(ctx) - track := &tracker{ - start: time.Now(), - ctx: ctx, - } - if req.Body == nil { - // TODO: Handle cases where ContentLength is not set. - track.reqSize = -1 - } else if req.ContentLength > 0 { - track.reqSize = req.ContentLength - } - stats.Record(ctx, ClientRequestCount.M(1)) - - // Perform request. - resp, err := t.base.RoundTrip(req) - - if err != nil { - track.statusCode = http.StatusInternalServerError - track.end() - } else { - track.statusCode = resp.StatusCode - if resp.Body == nil { - track.end() - } else { - track.body = resp.Body - resp.Body = track - } - } - return resp, err -} - -// CancelRequest cancels an in-flight request by closing its connection. -func (t statsTransport) CancelRequest(req *http.Request) { - type canceler interface { - CancelRequest(*http.Request) - } - if cr, ok := t.base.(canceler); ok { - cr.CancelRequest(req) - } -} - -type tracker struct { - ctx context.Context - respSize int64 - reqSize int64 - start time.Time - body io.ReadCloser - statusCode int - endOnce sync.Once -} - -var _ io.ReadCloser = (*tracker)(nil) - -func (t *tracker) end() { - t.endOnce.Do(func() { - latencyMs := float64(time.Since(t.start)) / float64(time.Millisecond) - m := []stats.Measurement{ - ClientSentBytes.M(t.reqSize), - ClientReceivedBytes.M(t.respSize), - ClientRoundtripLatency.M(latencyMs), - ClientLatency.M(latencyMs), - ClientResponseBytes.M(t.respSize), - } - if t.reqSize >= 0 { - m = append(m, ClientRequestBytes.M(t.reqSize)) - } - - stats.RecordWithTags(t.ctx, []tag.Mutator{ - tag.Upsert(StatusCode, strconv.Itoa(t.statusCode)), - tag.Upsert(KeyClientStatus, strconv.Itoa(t.statusCode)), - }, m...) - }) -} - -func (t *tracker) Read(b []byte) (int, error) { - n, err := t.body.Read(b) - switch err { - case nil: - t.respSize += int64(n) - return n, nil - case io.EOF: - t.end() - } - return n, err -} - -func (t *tracker) Close() error { - // Invoking endSpan on Close will help catch the cases - // in which a read returned a non-nil error, we set the - // span status but didn't end the span. - t.end() - return t.body.Close() -} diff --git a/vendor/go.opencensus.io/plugin/ochttp/doc.go b/vendor/go.opencensus.io/plugin/ochttp/doc.go deleted file mode 100644 index 10e626b16e..0000000000 --- a/vendor/go.opencensus.io/plugin/ochttp/doc.go +++ /dev/null @@ -1,19 +0,0 @@ -// Copyright 2018, OpenCensus Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -// Package ochttp provides OpenCensus instrumentation for net/http package. -// -// For server instrumentation, see Handler. For client-side instrumentation, -// see Transport. -package ochttp // import "go.opencensus.io/plugin/ochttp" diff --git a/vendor/go.opencensus.io/plugin/ochttp/propagation/b3/b3.go b/vendor/go.opencensus.io/plugin/ochttp/propagation/b3/b3.go deleted file mode 100644 index f777772ec9..0000000000 --- a/vendor/go.opencensus.io/plugin/ochttp/propagation/b3/b3.go +++ /dev/null @@ -1,123 +0,0 @@ -// Copyright 2018, OpenCensus Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -// Package b3 contains a propagation.HTTPFormat implementation -// for B3 propagation. See https://github.com/openzipkin/b3-propagation -// for more details. -package b3 // import "go.opencensus.io/plugin/ochttp/propagation/b3" - -import ( - "encoding/hex" - "net/http" - - "go.opencensus.io/trace" - "go.opencensus.io/trace/propagation" -) - -// B3 headers that OpenCensus understands. -const ( - TraceIDHeader = "X-B3-TraceId" - SpanIDHeader = "X-B3-SpanId" - SampledHeader = "X-B3-Sampled" -) - -// HTTPFormat implements propagation.HTTPFormat to propagate -// traces in HTTP headers in B3 propagation format. -// HTTPFormat skips the X-B3-ParentId and X-B3-Flags headers -// because there are additional fields not represented in the -// OpenCensus span context. Spans created from the incoming -// header will be the direct children of the client-side span. -// Similarly, reciever of the outgoing spans should use client-side -// span created by OpenCensus as the parent. -type HTTPFormat struct{} - -var _ propagation.HTTPFormat = (*HTTPFormat)(nil) - -// SpanContextFromRequest extracts a B3 span context from incoming requests. -func (f *HTTPFormat) SpanContextFromRequest(req *http.Request) (sc trace.SpanContext, ok bool) { - tid, ok := ParseTraceID(req.Header.Get(TraceIDHeader)) - if !ok { - return trace.SpanContext{}, false - } - sid, ok := ParseSpanID(req.Header.Get(SpanIDHeader)) - if !ok { - return trace.SpanContext{}, false - } - sampled, _ := ParseSampled(req.Header.Get(SampledHeader)) - return trace.SpanContext{ - TraceID: tid, - SpanID: sid, - TraceOptions: sampled, - }, true -} - -// ParseTraceID parses the value of the X-B3-TraceId header. -func ParseTraceID(tid string) (trace.TraceID, bool) { - if tid == "" { - return trace.TraceID{}, false - } - b, err := hex.DecodeString(tid) - if err != nil { - return trace.TraceID{}, false - } - var traceID trace.TraceID - if len(b) <= 8 { - // The lower 64-bits. - start := 8 + (8 - len(b)) - copy(traceID[start:], b) - } else { - start := 16 - len(b) - copy(traceID[start:], b) - } - - return traceID, true -} - -// ParseSpanID parses the value of the X-B3-SpanId or X-B3-ParentSpanId headers. -func ParseSpanID(sid string) (spanID trace.SpanID, ok bool) { - if sid == "" { - return trace.SpanID{}, false - } - b, err := hex.DecodeString(sid) - if err != nil { - return trace.SpanID{}, false - } - start := 8 - len(b) - copy(spanID[start:], b) - return spanID, true -} - -// ParseSampled parses the value of the X-B3-Sampled header. -func ParseSampled(sampled string) (trace.TraceOptions, bool) { - switch sampled { - case "true", "1": - return trace.TraceOptions(1), true - default: - return trace.TraceOptions(0), false - } -} - -// SpanContextToRequest modifies the given request to include B3 headers. -func (f *HTTPFormat) SpanContextToRequest(sc trace.SpanContext, req *http.Request) { - req.Header.Set(TraceIDHeader, hex.EncodeToString(sc.TraceID[:])) - req.Header.Set(SpanIDHeader, hex.EncodeToString(sc.SpanID[:])) - - var sampled string - if sc.IsSampled() { - sampled = "1" - } else { - sampled = "0" - } - req.Header.Set(SampledHeader, sampled) -} diff --git a/vendor/go.opencensus.io/plugin/ochttp/propagation/tracecontext/propagation.go b/vendor/go.opencensus.io/plugin/ochttp/propagation/tracecontext/propagation.go deleted file mode 100644 index 65ab1e9966..0000000000 --- a/vendor/go.opencensus.io/plugin/ochttp/propagation/tracecontext/propagation.go +++ /dev/null @@ -1,187 +0,0 @@ -// Copyright 2018, OpenCensus Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -// Package tracecontext contains HTTP propagator for TraceContext standard. -// See https://github.com/w3c/distributed-tracing for more information. -package tracecontext // import "go.opencensus.io/plugin/ochttp/propagation/tracecontext" - -import ( - "encoding/hex" - "fmt" - "net/http" - "net/textproto" - "regexp" - "strings" - - "go.opencensus.io/trace" - "go.opencensus.io/trace/propagation" - "go.opencensus.io/trace/tracestate" -) - -const ( - supportedVersion = 0 - maxVersion = 254 - maxTracestateLen = 512 - traceparentHeader = "traceparent" - tracestateHeader = "tracestate" - trimOWSRegexFmt = `^[\x09\x20]*(.*[^\x20\x09])[\x09\x20]*$` -) - -var trimOWSRegExp = regexp.MustCompile(trimOWSRegexFmt) - -var _ propagation.HTTPFormat = (*HTTPFormat)(nil) - -// HTTPFormat implements the TraceContext trace propagation format. -type HTTPFormat struct{} - -// SpanContextFromRequest extracts a span context from incoming requests. -func (f *HTTPFormat) SpanContextFromRequest(req *http.Request) (sc trace.SpanContext, ok bool) { - h, ok := getRequestHeader(req, traceparentHeader, false) - if !ok { - return trace.SpanContext{}, false - } - sections := strings.Split(h, "-") - if len(sections) < 4 { - return trace.SpanContext{}, false - } - - if len(sections[0]) != 2 { - return trace.SpanContext{}, false - } - ver, err := hex.DecodeString(sections[0]) - if err != nil { - return trace.SpanContext{}, false - } - version := int(ver[0]) - if version > maxVersion { - return trace.SpanContext{}, false - } - - if version == 0 && len(sections) != 4 { - return trace.SpanContext{}, false - } - - if len(sections[1]) != 32 { - return trace.SpanContext{}, false - } - tid, err := hex.DecodeString(sections[1]) - if err != nil { - return trace.SpanContext{}, false - } - copy(sc.TraceID[:], tid) - - if len(sections[2]) != 16 { - return trace.SpanContext{}, false - } - sid, err := hex.DecodeString(sections[2]) - if err != nil { - return trace.SpanContext{}, false - } - copy(sc.SpanID[:], sid) - - opts, err := hex.DecodeString(sections[3]) - if err != nil || len(opts) < 1 { - return trace.SpanContext{}, false - } - sc.TraceOptions = trace.TraceOptions(opts[0]) - - // Don't allow all zero trace or span ID. - if sc.TraceID == [16]byte{} || sc.SpanID == [8]byte{} { - return trace.SpanContext{}, false - } - - sc.Tracestate = tracestateFromRequest(req) - return sc, true -} - -// getRequestHeader returns a combined header field according to RFC7230 section 3.2.2. -// If commaSeparated is true, multiple header fields with the same field name using be -// combined using ",". -// If no header was found using the given name, "ok" would be false. -// If more than one headers was found using the given name, while commaSeparated is false, -// "ok" would be false. -func getRequestHeader(req *http.Request, name string, commaSeparated bool) (hdr string, ok bool) { - v := req.Header[textproto.CanonicalMIMEHeaderKey(name)] - switch len(v) { - case 0: - return "", false - case 1: - return v[0], true - default: - return strings.Join(v, ","), commaSeparated - } -} - -// TODO(rghetia): return an empty Tracestate when parsing tracestate header encounters an error. -// Revisit to return additional boolean value to indicate parsing error when following issues -// are resolved. -// https://github.com/w3c/distributed-tracing/issues/172 -// https://github.com/w3c/distributed-tracing/issues/175 -func tracestateFromRequest(req *http.Request) *tracestate.Tracestate { - h, _ := getRequestHeader(req, tracestateHeader, true) - if h == "" { - return nil - } - - var entries []tracestate.Entry - pairs := strings.Split(h, ",") - hdrLenWithoutOWS := len(pairs) - 1 // Number of commas - for _, pair := range pairs { - matches := trimOWSRegExp.FindStringSubmatch(pair) - if matches == nil { - return nil - } - pair = matches[1] - hdrLenWithoutOWS += len(pair) - if hdrLenWithoutOWS > maxTracestateLen { - return nil - } - kv := strings.Split(pair, "=") - if len(kv) != 2 { - return nil - } - entries = append(entries, tracestate.Entry{Key: kv[0], Value: kv[1]}) - } - ts, err := tracestate.New(nil, entries...) - if err != nil { - return nil - } - - return ts -} - -func tracestateToRequest(sc trace.SpanContext, req *http.Request) { - var pairs = make([]string, 0, len(sc.Tracestate.Entries())) - if sc.Tracestate != nil { - for _, entry := range sc.Tracestate.Entries() { - pairs = append(pairs, strings.Join([]string{entry.Key, entry.Value}, "=")) - } - h := strings.Join(pairs, ",") - - if h != "" && len(h) <= maxTracestateLen { - req.Header.Set(tracestateHeader, h) - } - } -} - -// SpanContextToRequest modifies the given request to include traceparent and tracestate headers. -func (f *HTTPFormat) SpanContextToRequest(sc trace.SpanContext, req *http.Request) { - h := fmt.Sprintf("%x-%x-%x-%x", - []byte{supportedVersion}, - sc.TraceID[:], - sc.SpanID[:], - []byte{byte(sc.TraceOptions)}) - req.Header.Set(traceparentHeader, h) - tracestateToRequest(sc, req) -} diff --git a/vendor/go.opencensus.io/plugin/ochttp/route.go b/vendor/go.opencensus.io/plugin/ochttp/route.go deleted file mode 100644 index dbe22d5861..0000000000 --- a/vendor/go.opencensus.io/plugin/ochttp/route.go +++ /dev/null @@ -1,51 +0,0 @@ -// Copyright 2018, OpenCensus Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package ochttp - -import ( - "net/http" - - "go.opencensus.io/tag" -) - -// WithRouteTag returns an http.Handler that records stats with the -// http_server_route tag set to the given value. -func WithRouteTag(handler http.Handler, route string) http.Handler { - return taggedHandlerFunc(func(w http.ResponseWriter, r *http.Request) []tag.Mutator { - addRoute := []tag.Mutator{tag.Upsert(KeyServerRoute, route)} - ctx, _ := tag.New(r.Context(), addRoute...) - r = r.WithContext(ctx) - handler.ServeHTTP(w, r) - return addRoute - }) -} - -// taggedHandlerFunc is a http.Handler that returns tags describing the -// processing of the request. These tags will be recorded along with the -// measures in this package at the end of the request. -type taggedHandlerFunc func(w http.ResponseWriter, r *http.Request) []tag.Mutator - -func (h taggedHandlerFunc) ServeHTTP(w http.ResponseWriter, r *http.Request) { - tags := h(w, r) - if a, ok := r.Context().Value(addedTagsKey{}).(*addedTags); ok { - a.t = append(a.t, tags...) - } -} - -type addedTagsKey struct{} - -type addedTags struct { - t []tag.Mutator -} diff --git a/vendor/go.opencensus.io/plugin/ochttp/server.go b/vendor/go.opencensus.io/plugin/ochttp/server.go deleted file mode 100644 index ff72de97a8..0000000000 --- a/vendor/go.opencensus.io/plugin/ochttp/server.go +++ /dev/null @@ -1,440 +0,0 @@ -// Copyright 2018, OpenCensus Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package ochttp - -import ( - "context" - "io" - "net/http" - "strconv" - "sync" - "time" - - "go.opencensus.io/stats" - "go.opencensus.io/tag" - "go.opencensus.io/trace" - "go.opencensus.io/trace/propagation" -) - -// Handler is an http.Handler wrapper to instrument your HTTP server with -// OpenCensus. It supports both stats and tracing. -// -// Tracing -// -// This handler is aware of the incoming request's span, reading it from request -// headers as configured using the Propagation field. -// The extracted span can be accessed from the incoming request's -// context. -// -// span := trace.FromContext(r.Context()) -// -// The server span will be automatically ended at the end of ServeHTTP. -type Handler struct { - // Propagation defines how traces are propagated. If unspecified, - // B3 propagation will be used. - Propagation propagation.HTTPFormat - - // Handler is the handler used to handle the incoming request. - Handler http.Handler - - // StartOptions are applied to the span started by this Handler around each - // request. - // - // StartOptions.SpanKind will always be set to trace.SpanKindServer - // for spans started by this transport. - StartOptions trace.StartOptions - - // GetStartOptions allows to set start options per request. If set, - // StartOptions is going to be ignored. - GetStartOptions func(*http.Request) trace.StartOptions - - // IsPublicEndpoint should be set to true for publicly accessible HTTP(S) - // servers. If true, any trace metadata set on the incoming request will - // be added as a linked trace instead of being added as a parent of the - // current trace. - IsPublicEndpoint bool - - // FormatSpanName holds the function to use for generating the span name - // from the information found in the incoming HTTP Request. By default the - // name equals the URL Path. - FormatSpanName func(*http.Request) string -} - -func (h *Handler) ServeHTTP(w http.ResponseWriter, r *http.Request) { - var tags addedTags - r, traceEnd := h.startTrace(w, r) - defer traceEnd() - w, statsEnd := h.startStats(w, r) - defer statsEnd(&tags) - handler := h.Handler - if handler == nil { - handler = http.DefaultServeMux - } - r = r.WithContext(context.WithValue(r.Context(), addedTagsKey{}, &tags)) - handler.ServeHTTP(w, r) -} - -func (h *Handler) startTrace(w http.ResponseWriter, r *http.Request) (*http.Request, func()) { - if isHealthEndpoint(r.URL.Path) { - return r, func() {} - } - var name string - if h.FormatSpanName == nil { - name = spanNameFromURL(r) - } else { - name = h.FormatSpanName(r) - } - ctx := r.Context() - - startOpts := h.StartOptions - if h.GetStartOptions != nil { - startOpts = h.GetStartOptions(r) - } - - var span *trace.Span - sc, ok := h.extractSpanContext(r) - if ok && !h.IsPublicEndpoint { - ctx, span = trace.StartSpanWithRemoteParent(ctx, name, sc, - trace.WithSampler(startOpts.Sampler), - trace.WithSpanKind(trace.SpanKindServer)) - } else { - ctx, span = trace.StartSpan(ctx, name, - trace.WithSampler(startOpts.Sampler), - trace.WithSpanKind(trace.SpanKindServer), - ) - if ok { - span.AddLink(trace.Link{ - TraceID: sc.TraceID, - SpanID: sc.SpanID, - Type: trace.LinkTypeChild, - Attributes: nil, - }) - } - } - span.AddAttributes(requestAttrs(r)...) - return r.WithContext(ctx), span.End -} - -func (h *Handler) extractSpanContext(r *http.Request) (trace.SpanContext, bool) { - if h.Propagation == nil { - return defaultFormat.SpanContextFromRequest(r) - } - return h.Propagation.SpanContextFromRequest(r) -} - -func (h *Handler) startStats(w http.ResponseWriter, r *http.Request) (http.ResponseWriter, func(tags *addedTags)) { - ctx, _ := tag.New(r.Context(), - tag.Upsert(Host, r.URL.Host), - tag.Upsert(Path, r.URL.Path), - tag.Upsert(Method, r.Method)) - track := &trackingResponseWriter{ - start: time.Now(), - ctx: ctx, - writer: w, - } - if r.Body == nil { - // TODO: Handle cases where ContentLength is not set. - track.reqSize = -1 - } else if r.ContentLength > 0 { - track.reqSize = r.ContentLength - } - stats.Record(ctx, ServerRequestCount.M(1)) - return track.wrappedResponseWriter(), track.end -} - -type trackingResponseWriter struct { - ctx context.Context - reqSize int64 - respSize int64 - start time.Time - statusCode int - statusLine string - endOnce sync.Once - writer http.ResponseWriter -} - -// Compile time assertion for ResponseWriter interface -var _ http.ResponseWriter = (*trackingResponseWriter)(nil) - -var logTagsErrorOnce sync.Once - -func (t *trackingResponseWriter) end(tags *addedTags) { - t.endOnce.Do(func() { - if t.statusCode == 0 { - t.statusCode = 200 - } - - span := trace.FromContext(t.ctx) - span.SetStatus(TraceStatus(t.statusCode, t.statusLine)) - span.AddAttributes(trace.Int64Attribute(StatusCodeAttribute, int64(t.statusCode))) - - m := []stats.Measurement{ - ServerLatency.M(float64(time.Since(t.start)) / float64(time.Millisecond)), - ServerResponseBytes.M(t.respSize), - } - if t.reqSize >= 0 { - m = append(m, ServerRequestBytes.M(t.reqSize)) - } - allTags := make([]tag.Mutator, len(tags.t)+1) - allTags[0] = tag.Upsert(StatusCode, strconv.Itoa(t.statusCode)) - copy(allTags[1:], tags.t) - stats.RecordWithTags(t.ctx, allTags, m...) - }) -} - -func (t *trackingResponseWriter) Header() http.Header { - return t.writer.Header() -} - -func (t *trackingResponseWriter) Write(data []byte) (int, error) { - n, err := t.writer.Write(data) - t.respSize += int64(n) - return n, err -} - -func (t *trackingResponseWriter) WriteHeader(statusCode int) { - t.writer.WriteHeader(statusCode) - t.statusCode = statusCode - t.statusLine = http.StatusText(t.statusCode) -} - -// wrappedResponseWriter returns a wrapped version of the original -// ResponseWriter and only implements the same combination of additional -// interfaces as the original. -// This implementation is based on https://github.com/felixge/httpsnoop. -func (t *trackingResponseWriter) wrappedResponseWriter() http.ResponseWriter { - var ( - hj, i0 = t.writer.(http.Hijacker) - cn, i1 = t.writer.(http.CloseNotifier) - pu, i2 = t.writer.(http.Pusher) - fl, i3 = t.writer.(http.Flusher) - rf, i4 = t.writer.(io.ReaderFrom) - ) - - switch { - case !i0 && !i1 && !i2 && !i3 && !i4: - return struct { - http.ResponseWriter - }{t} - case !i0 && !i1 && !i2 && !i3 && i4: - return struct { - http.ResponseWriter - io.ReaderFrom - }{t, rf} - case !i0 && !i1 && !i2 && i3 && !i4: - return struct { - http.ResponseWriter - http.Flusher - }{t, fl} - case !i0 && !i1 && !i2 && i3 && i4: - return struct { - http.ResponseWriter - http.Flusher - io.ReaderFrom - }{t, fl, rf} - case !i0 && !i1 && i2 && !i3 && !i4: - return struct { - http.ResponseWriter - http.Pusher - }{t, pu} - case !i0 && !i1 && i2 && !i3 && i4: - return struct { - http.ResponseWriter - http.Pusher - io.ReaderFrom - }{t, pu, rf} - case !i0 && !i1 && i2 && i3 && !i4: - return struct { - http.ResponseWriter - http.Pusher - http.Flusher - }{t, pu, fl} - case !i0 && !i1 && i2 && i3 && i4: - return struct { - http.ResponseWriter - http.Pusher - http.Flusher - io.ReaderFrom - }{t, pu, fl, rf} - case !i0 && i1 && !i2 && !i3 && !i4: - return struct { - http.ResponseWriter - http.CloseNotifier - }{t, cn} - case !i0 && i1 && !i2 && !i3 && i4: - return struct { - http.ResponseWriter - http.CloseNotifier - io.ReaderFrom - }{t, cn, rf} - case !i0 && i1 && !i2 && i3 && !i4: - return struct { - http.ResponseWriter - http.CloseNotifier - http.Flusher - }{t, cn, fl} - case !i0 && i1 && !i2 && i3 && i4: - return struct { - http.ResponseWriter - http.CloseNotifier - http.Flusher - io.ReaderFrom - }{t, cn, fl, rf} - case !i0 && i1 && i2 && !i3 && !i4: - return struct { - http.ResponseWriter - http.CloseNotifier - http.Pusher - }{t, cn, pu} - case !i0 && i1 && i2 && !i3 && i4: - return struct { - http.ResponseWriter - http.CloseNotifier - http.Pusher - io.ReaderFrom - }{t, cn, pu, rf} - case !i0 && i1 && i2 && i3 && !i4: - return struct { - http.ResponseWriter - http.CloseNotifier - http.Pusher - http.Flusher - }{t, cn, pu, fl} - case !i0 && i1 && i2 && i3 && i4: - return struct { - http.ResponseWriter - http.CloseNotifier - http.Pusher - http.Flusher - io.ReaderFrom - }{t, cn, pu, fl, rf} - case i0 && !i1 && !i2 && !i3 && !i4: - return struct { - http.ResponseWriter - http.Hijacker - }{t, hj} - case i0 && !i1 && !i2 && !i3 && i4: - return struct { - http.ResponseWriter - http.Hijacker - io.ReaderFrom - }{t, hj, rf} - case i0 && !i1 && !i2 && i3 && !i4: - return struct { - http.ResponseWriter - http.Hijacker - http.Flusher - }{t, hj, fl} - case i0 && !i1 && !i2 && i3 && i4: - return struct { - http.ResponseWriter - http.Hijacker - http.Flusher - io.ReaderFrom - }{t, hj, fl, rf} - case i0 && !i1 && i2 && !i3 && !i4: - return struct { - http.ResponseWriter - http.Hijacker - http.Pusher - }{t, hj, pu} - case i0 && !i1 && i2 && !i3 && i4: - return struct { - http.ResponseWriter - http.Hijacker - http.Pusher - io.ReaderFrom - }{t, hj, pu, rf} - case i0 && !i1 && i2 && i3 && !i4: - return struct { - http.ResponseWriter - http.Hijacker - http.Pusher - http.Flusher - }{t, hj, pu, fl} - case i0 && !i1 && i2 && i3 && i4: - return struct { - http.ResponseWriter - http.Hijacker - http.Pusher - http.Flusher - io.ReaderFrom - }{t, hj, pu, fl, rf} - case i0 && i1 && !i2 && !i3 && !i4: - return struct { - http.ResponseWriter - http.Hijacker - http.CloseNotifier - }{t, hj, cn} - case i0 && i1 && !i2 && !i3 && i4: - return struct { - http.ResponseWriter - http.Hijacker - http.CloseNotifier - io.ReaderFrom - }{t, hj, cn, rf} - case i0 && i1 && !i2 && i3 && !i4: - return struct { - http.ResponseWriter - http.Hijacker - http.CloseNotifier - http.Flusher - }{t, hj, cn, fl} - case i0 && i1 && !i2 && i3 && i4: - return struct { - http.ResponseWriter - http.Hijacker - http.CloseNotifier - http.Flusher - io.ReaderFrom - }{t, hj, cn, fl, rf} - case i0 && i1 && i2 && !i3 && !i4: - return struct { - http.ResponseWriter - http.Hijacker - http.CloseNotifier - http.Pusher - }{t, hj, cn, pu} - case i0 && i1 && i2 && !i3 && i4: - return struct { - http.ResponseWriter - http.Hijacker - http.CloseNotifier - http.Pusher - io.ReaderFrom - }{t, hj, cn, pu, rf} - case i0 && i1 && i2 && i3 && !i4: - return struct { - http.ResponseWriter - http.Hijacker - http.CloseNotifier - http.Pusher - http.Flusher - }{t, hj, cn, pu, fl} - case i0 && i1 && i2 && i3 && i4: - return struct { - http.ResponseWriter - http.Hijacker - http.CloseNotifier - http.Pusher - http.Flusher - io.ReaderFrom - }{t, hj, cn, pu, fl, rf} - default: - return struct { - http.ResponseWriter - }{t} - } -} diff --git a/vendor/go.opencensus.io/plugin/ochttp/span_annotating_client_trace.go b/vendor/go.opencensus.io/plugin/ochttp/span_annotating_client_trace.go deleted file mode 100644 index 05c6c56cc7..0000000000 --- a/vendor/go.opencensus.io/plugin/ochttp/span_annotating_client_trace.go +++ /dev/null @@ -1,169 +0,0 @@ -// Copyright 2018, OpenCensus Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package ochttp - -import ( - "crypto/tls" - "net/http" - "net/http/httptrace" - "strings" - - "go.opencensus.io/trace" -) - -type spanAnnotator struct { - sp *trace.Span -} - -// TODO: Remove NewSpanAnnotator at the next release. - -// NewSpanAnnotator returns a httptrace.ClientTrace which annotates -// all emitted httptrace events on the provided Span. -// Deprecated: Use NewSpanAnnotatingClientTrace instead -func NewSpanAnnotator(r *http.Request, s *trace.Span) *httptrace.ClientTrace { - return NewSpanAnnotatingClientTrace(r, s) -} - -// NewSpanAnnotatingClientTrace returns a httptrace.ClientTrace which annotates -// all emitted httptrace events on the provided Span. -func NewSpanAnnotatingClientTrace(_ *http.Request, s *trace.Span) *httptrace.ClientTrace { - sa := spanAnnotator{sp: s} - - return &httptrace.ClientTrace{ - GetConn: sa.getConn, - GotConn: sa.gotConn, - PutIdleConn: sa.putIdleConn, - GotFirstResponseByte: sa.gotFirstResponseByte, - Got100Continue: sa.got100Continue, - DNSStart: sa.dnsStart, - DNSDone: sa.dnsDone, - ConnectStart: sa.connectStart, - ConnectDone: sa.connectDone, - TLSHandshakeStart: sa.tlsHandshakeStart, - TLSHandshakeDone: sa.tlsHandshakeDone, - WroteHeaders: sa.wroteHeaders, - Wait100Continue: sa.wait100Continue, - WroteRequest: sa.wroteRequest, - } -} - -func (s spanAnnotator) getConn(hostPort string) { - attrs := []trace.Attribute{ - trace.StringAttribute("httptrace.get_connection.host_port", hostPort), - } - s.sp.Annotate(attrs, "GetConn") -} - -func (s spanAnnotator) gotConn(info httptrace.GotConnInfo) { - attrs := []trace.Attribute{ - trace.BoolAttribute("httptrace.got_connection.reused", info.Reused), - trace.BoolAttribute("httptrace.got_connection.was_idle", info.WasIdle), - } - if info.WasIdle { - attrs = append(attrs, - trace.StringAttribute("httptrace.got_connection.idle_time", info.IdleTime.String())) - } - s.sp.Annotate(attrs, "GotConn") -} - -// PutIdleConn implements a httptrace.ClientTrace hook -func (s spanAnnotator) putIdleConn(err error) { - var attrs []trace.Attribute - if err != nil { - attrs = append(attrs, - trace.StringAttribute("httptrace.put_idle_connection.error", err.Error())) - } - s.sp.Annotate(attrs, "PutIdleConn") -} - -func (s spanAnnotator) gotFirstResponseByte() { - s.sp.Annotate(nil, "GotFirstResponseByte") -} - -func (s spanAnnotator) got100Continue() { - s.sp.Annotate(nil, "Got100Continue") -} - -func (s spanAnnotator) dnsStart(info httptrace.DNSStartInfo) { - attrs := []trace.Attribute{ - trace.StringAttribute("httptrace.dns_start.host", info.Host), - } - s.sp.Annotate(attrs, "DNSStart") -} - -func (s spanAnnotator) dnsDone(info httptrace.DNSDoneInfo) { - var addrs []string - for _, addr := range info.Addrs { - addrs = append(addrs, addr.String()) - } - attrs := []trace.Attribute{ - trace.StringAttribute("httptrace.dns_done.addrs", strings.Join(addrs, " , ")), - } - if info.Err != nil { - attrs = append(attrs, - trace.StringAttribute("httptrace.dns_done.error", info.Err.Error())) - } - s.sp.Annotate(attrs, "DNSDone") -} - -func (s spanAnnotator) connectStart(network, addr string) { - attrs := []trace.Attribute{ - trace.StringAttribute("httptrace.connect_start.network", network), - trace.StringAttribute("httptrace.connect_start.addr", addr), - } - s.sp.Annotate(attrs, "ConnectStart") -} - -func (s spanAnnotator) connectDone(network, addr string, err error) { - attrs := []trace.Attribute{ - trace.StringAttribute("httptrace.connect_done.network", network), - trace.StringAttribute("httptrace.connect_done.addr", addr), - } - if err != nil { - attrs = append(attrs, - trace.StringAttribute("httptrace.connect_done.error", err.Error())) - } - s.sp.Annotate(attrs, "ConnectDone") -} - -func (s spanAnnotator) tlsHandshakeStart() { - s.sp.Annotate(nil, "TLSHandshakeStart") -} - -func (s spanAnnotator) tlsHandshakeDone(_ tls.ConnectionState, err error) { - var attrs []trace.Attribute - if err != nil { - attrs = append(attrs, - trace.StringAttribute("httptrace.tls_handshake_done.error", err.Error())) - } - s.sp.Annotate(attrs, "TLSHandshakeDone") -} - -func (s spanAnnotator) wroteHeaders() { - s.sp.Annotate(nil, "WroteHeaders") -} - -func (s spanAnnotator) wait100Continue() { - s.sp.Annotate(nil, "Wait100Continue") -} - -func (s spanAnnotator) wroteRequest(info httptrace.WroteRequestInfo) { - var attrs []trace.Attribute - if info.Err != nil { - attrs = append(attrs, - trace.StringAttribute("httptrace.wrote_request.error", info.Err.Error())) - } - s.sp.Annotate(attrs, "WroteRequest") -} diff --git a/vendor/go.opencensus.io/plugin/ochttp/stats.go b/vendor/go.opencensus.io/plugin/ochttp/stats.go deleted file mode 100644 index 46dcc8e57e..0000000000 --- a/vendor/go.opencensus.io/plugin/ochttp/stats.go +++ /dev/null @@ -1,265 +0,0 @@ -// Copyright 2018, OpenCensus Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package ochttp - -import ( - "go.opencensus.io/stats" - "go.opencensus.io/stats/view" - "go.opencensus.io/tag" -) - -// The following client HTTP measures are supported for use in custom views. -var ( - // Deprecated: Use a Count aggregation over one of the other client measures to achieve the same effect. - ClientRequestCount = stats.Int64("opencensus.io/http/client/request_count", "Number of HTTP requests started", stats.UnitDimensionless) - // Deprecated: Use ClientSentBytes. - ClientRequestBytes = stats.Int64("opencensus.io/http/client/request_bytes", "HTTP request body size if set as ContentLength (uncompressed)", stats.UnitBytes) - // Deprecated: Use ClientReceivedBytes. - ClientResponseBytes = stats.Int64("opencensus.io/http/client/response_bytes", "HTTP response body size (uncompressed)", stats.UnitBytes) - // Deprecated: Use ClientRoundtripLatency. - ClientLatency = stats.Float64("opencensus.io/http/client/latency", "End-to-end latency", stats.UnitMilliseconds) -) - -// Client measures supported for use in custom views. -var ( - ClientSentBytes = stats.Int64( - "opencensus.io/http/client/sent_bytes", - "Total bytes sent in request body (not including headers)", - stats.UnitBytes, - ) - ClientReceivedBytes = stats.Int64( - "opencensus.io/http/client/received_bytes", - "Total bytes received in response bodies (not including headers but including error responses with bodies)", - stats.UnitBytes, - ) - ClientRoundtripLatency = stats.Float64( - "opencensus.io/http/client/roundtrip_latency", - "Time between first byte of request headers sent to last byte of response received, or terminal error", - stats.UnitMilliseconds, - ) -) - -// The following server HTTP measures are supported for use in custom views: -var ( - ServerRequestCount = stats.Int64("opencensus.io/http/server/request_count", "Number of HTTP requests started", stats.UnitDimensionless) - ServerRequestBytes = stats.Int64("opencensus.io/http/server/request_bytes", "HTTP request body size if set as ContentLength (uncompressed)", stats.UnitBytes) - ServerResponseBytes = stats.Int64("opencensus.io/http/server/response_bytes", "HTTP response body size (uncompressed)", stats.UnitBytes) - ServerLatency = stats.Float64("opencensus.io/http/server/latency", "End-to-end latency", stats.UnitMilliseconds) -) - -// The following tags are applied to stats recorded by this package. Host, Path -// and Method are applied to all measures. StatusCode is not applied to -// ClientRequestCount or ServerRequestCount, since it is recorded before the status is known. -var ( - // Host is the value of the HTTP Host header. - // - // The value of this tag can be controlled by the HTTP client, so you need - // to watch out for potentially generating high-cardinality labels in your - // metrics backend if you use this tag in views. - Host, _ = tag.NewKey("http.host") - - // StatusCode is the numeric HTTP response status code, - // or "error" if a transport error occurred and no status code was read. - StatusCode, _ = tag.NewKey("http.status") - - // Path is the URL path (not including query string) in the request. - // - // The value of this tag can be controlled by the HTTP client, so you need - // to watch out for potentially generating high-cardinality labels in your - // metrics backend if you use this tag in views. - Path, _ = tag.NewKey("http.path") - - // Method is the HTTP method of the request, capitalized (GET, POST, etc.). - Method, _ = tag.NewKey("http.method") - - // KeyServerRoute is a low cardinality string representing the logical - // handler of the request. This is usually the pattern registered on the a - // ServeMux (or similar string). - KeyServerRoute, _ = tag.NewKey("http_server_route") -) - -// Client tag keys. -var ( - // KeyClientMethod is the HTTP method, capitalized (i.e. GET, POST, PUT, DELETE, etc.). - KeyClientMethod, _ = tag.NewKey("http_client_method") - // KeyClientPath is the URL path (not including query string). - KeyClientPath, _ = tag.NewKey("http_client_path") - // KeyClientStatus is the HTTP status code as an integer (e.g. 200, 404, 500.), or "error" if no response status line was received. - KeyClientStatus, _ = tag.NewKey("http_client_status") - // KeyClientHost is the value of the request Host header. - KeyClientHost, _ = tag.NewKey("http_client_host") -) - -// Default distributions used by views in this package. -var ( - DefaultSizeDistribution = view.Distribution(0, 1024, 2048, 4096, 16384, 65536, 262144, 1048576, 4194304, 16777216, 67108864, 268435456, 1073741824, 4294967296) - DefaultLatencyDistribution = view.Distribution(0, 1, 2, 3, 4, 5, 6, 8, 10, 13, 16, 20, 25, 30, 40, 50, 65, 80, 100, 130, 160, 200, 250, 300, 400, 500, 650, 800, 1000, 2000, 5000, 10000, 20000, 50000, 100000) -) - -// Package ochttp provides some convenience views. -// You still need to register these views for data to actually be collected. -var ( - ClientSentBytesDistribution = &view.View{ - Name: "opencensus.io/http/client/sent_bytes", - Measure: ClientSentBytes, - Aggregation: DefaultSizeDistribution, - Description: "Total bytes sent in request body (not including headers), by HTTP method and response status", - TagKeys: []tag.Key{KeyClientMethod, KeyClientStatus}, - } - - ClientReceivedBytesDistribution = &view.View{ - Name: "opencensus.io/http/client/received_bytes", - Measure: ClientReceivedBytes, - Aggregation: DefaultSizeDistribution, - Description: "Total bytes received in response bodies (not including headers but including error responses with bodies), by HTTP method and response status", - TagKeys: []tag.Key{KeyClientMethod, KeyClientStatus}, - } - - ClientRoundtripLatencyDistribution = &view.View{ - Name: "opencensus.io/http/client/roundtrip_latency", - Measure: ClientRoundtripLatency, - Aggregation: DefaultLatencyDistribution, - Description: "End-to-end latency, by HTTP method and response status", - TagKeys: []tag.Key{KeyClientMethod, KeyClientStatus}, - } - - ClientCompletedCount = &view.View{ - Name: "opencensus.io/http/client/completed_count", - Measure: ClientRoundtripLatency, - Aggregation: view.Count(), - Description: "Count of completed requests, by HTTP method and response status", - TagKeys: []tag.Key{KeyClientMethod, KeyClientStatus}, - } -) - -var ( - // Deprecated: No direct replacement, but see ClientCompletedCount. - ClientRequestCountView = &view.View{ - Name: "opencensus.io/http/client/request_count", - Description: "Count of HTTP requests started", - Measure: ClientRequestCount, - Aggregation: view.Count(), - } - - // Deprecated: Use ClientSentBytesDistribution. - ClientRequestBytesView = &view.View{ - Name: "opencensus.io/http/client/request_bytes", - Description: "Size distribution of HTTP request body", - Measure: ClientSentBytes, - Aggregation: DefaultSizeDistribution, - } - - // Deprecated: Use ClientReceivedBytesDistribution. - ClientResponseBytesView = &view.View{ - Name: "opencensus.io/http/client/response_bytes", - Description: "Size distribution of HTTP response body", - Measure: ClientReceivedBytes, - Aggregation: DefaultSizeDistribution, - } - - // Deprecated: Use ClientRoundtripLatencyDistribution. - ClientLatencyView = &view.View{ - Name: "opencensus.io/http/client/latency", - Description: "Latency distribution of HTTP requests", - Measure: ClientRoundtripLatency, - Aggregation: DefaultLatencyDistribution, - } - - // Deprecated: Use ClientCompletedCount. - ClientRequestCountByMethod = &view.View{ - Name: "opencensus.io/http/client/request_count_by_method", - Description: "Client request count by HTTP method", - TagKeys: []tag.Key{Method}, - Measure: ClientSentBytes, - Aggregation: view.Count(), - } - - // Deprecated: Use ClientCompletedCount. - ClientResponseCountByStatusCode = &view.View{ - Name: "opencensus.io/http/client/response_count_by_status_code", - Description: "Client response count by status code", - TagKeys: []tag.Key{StatusCode}, - Measure: ClientRoundtripLatency, - Aggregation: view.Count(), - } -) - -var ( - ServerRequestCountView = &view.View{ - Name: "opencensus.io/http/server/request_count", - Description: "Count of HTTP requests started", - Measure: ServerRequestCount, - Aggregation: view.Count(), - } - - ServerRequestBytesView = &view.View{ - Name: "opencensus.io/http/server/request_bytes", - Description: "Size distribution of HTTP request body", - Measure: ServerRequestBytes, - Aggregation: DefaultSizeDistribution, - } - - ServerResponseBytesView = &view.View{ - Name: "opencensus.io/http/server/response_bytes", - Description: "Size distribution of HTTP response body", - Measure: ServerResponseBytes, - Aggregation: DefaultSizeDistribution, - } - - ServerLatencyView = &view.View{ - Name: "opencensus.io/http/server/latency", - Description: "Latency distribution of HTTP requests", - Measure: ServerLatency, - Aggregation: DefaultLatencyDistribution, - } - - ServerRequestCountByMethod = &view.View{ - Name: "opencensus.io/http/server/request_count_by_method", - Description: "Server request count by HTTP method", - TagKeys: []tag.Key{Method}, - Measure: ServerRequestCount, - Aggregation: view.Count(), - } - - ServerResponseCountByStatusCode = &view.View{ - Name: "opencensus.io/http/server/response_count_by_status_code", - Description: "Server response count by status code", - TagKeys: []tag.Key{StatusCode}, - Measure: ServerLatency, - Aggregation: view.Count(), - } -) - -// DefaultClientViews are the default client views provided by this package. -// Deprecated: No replacement. Register the views you would like individually. -var DefaultClientViews = []*view.View{ - ClientRequestCountView, - ClientRequestBytesView, - ClientResponseBytesView, - ClientLatencyView, - ClientRequestCountByMethod, - ClientResponseCountByStatusCode, -} - -// DefaultServerViews are the default server views provided by this package. -// Deprecated: No replacement. Register the views you would like individually. -var DefaultServerViews = []*view.View{ - ServerRequestCountView, - ServerRequestBytesView, - ServerResponseBytesView, - ServerLatencyView, - ServerRequestCountByMethod, - ServerResponseCountByStatusCode, -} diff --git a/vendor/go.opencensus.io/plugin/ochttp/trace.go b/vendor/go.opencensus.io/plugin/ochttp/trace.go deleted file mode 100644 index 819a2d5ff9..0000000000 --- a/vendor/go.opencensus.io/plugin/ochttp/trace.go +++ /dev/null @@ -1,228 +0,0 @@ -// Copyright 2018, OpenCensus Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package ochttp - -import ( - "io" - "net/http" - "net/http/httptrace" - - "go.opencensus.io/plugin/ochttp/propagation/b3" - "go.opencensus.io/trace" - "go.opencensus.io/trace/propagation" -) - -// TODO(jbd): Add godoc examples. - -var defaultFormat propagation.HTTPFormat = &b3.HTTPFormat{} - -// Attributes recorded on the span for the requests. -// Only trace exporters will need them. -const ( - HostAttribute = "http.host" - MethodAttribute = "http.method" - PathAttribute = "http.path" - UserAgentAttribute = "http.user_agent" - StatusCodeAttribute = "http.status_code" -) - -type traceTransport struct { - base http.RoundTripper - startOptions trace.StartOptions - format propagation.HTTPFormat - formatSpanName func(*http.Request) string - newClientTrace func(*http.Request, *trace.Span) *httptrace.ClientTrace -} - -// TODO(jbd): Add message events for request and response size. - -// RoundTrip creates a trace.Span and inserts it into the outgoing request's headers. -// The created span can follow a parent span, if a parent is presented in -// the request's context. -func (t *traceTransport) RoundTrip(req *http.Request) (*http.Response, error) { - name := t.formatSpanName(req) - // TODO(jbd): Discuss whether we want to prefix - // outgoing requests with Sent. - ctx, span := trace.StartSpan(req.Context(), name, - trace.WithSampler(t.startOptions.Sampler), - trace.WithSpanKind(trace.SpanKindClient)) - - if t.newClientTrace != nil { - req = req.WithContext(httptrace.WithClientTrace(ctx, t.newClientTrace(req, span))) - } else { - req = req.WithContext(ctx) - } - - if t.format != nil { - // SpanContextToRequest will modify its Request argument, which is - // contrary to the contract for http.RoundTripper, so we need to - // pass it a copy of the Request. - // However, the Request struct itself was already copied by - // the WithContext calls above and so we just need to copy the header. - header := make(http.Header) - for k, v := range req.Header { - header[k] = v - } - req.Header = header - t.format.SpanContextToRequest(span.SpanContext(), req) - } - - span.AddAttributes(requestAttrs(req)...) - resp, err := t.base.RoundTrip(req) - if err != nil { - span.SetStatus(trace.Status{Code: trace.StatusCodeUnknown, Message: err.Error()}) - span.End() - return resp, err - } - - span.AddAttributes(responseAttrs(resp)...) - span.SetStatus(TraceStatus(resp.StatusCode, resp.Status)) - - // span.End() will be invoked after - // a read from resp.Body returns io.EOF or when - // resp.Body.Close() is invoked. - resp.Body = &bodyTracker{rc: resp.Body, span: span} - return resp, err -} - -// bodyTracker wraps a response.Body and invokes -// trace.EndSpan on encountering io.EOF on reading -// the body of the original response. -type bodyTracker struct { - rc io.ReadCloser - span *trace.Span -} - -var _ io.ReadCloser = (*bodyTracker)(nil) - -func (bt *bodyTracker) Read(b []byte) (int, error) { - n, err := bt.rc.Read(b) - - switch err { - case nil: - return n, nil - case io.EOF: - bt.span.End() - default: - // For all other errors, set the span status - bt.span.SetStatus(trace.Status{ - // Code 2 is the error code for Internal server error. - Code: 2, - Message: err.Error(), - }) - } - return n, err -} - -func (bt *bodyTracker) Close() error { - // Invoking endSpan on Close will help catch the cases - // in which a read returned a non-nil error, we set the - // span status but didn't end the span. - bt.span.End() - return bt.rc.Close() -} - -// CancelRequest cancels an in-flight request by closing its connection. -func (t *traceTransport) CancelRequest(req *http.Request) { - type canceler interface { - CancelRequest(*http.Request) - } - if cr, ok := t.base.(canceler); ok { - cr.CancelRequest(req) - } -} - -func spanNameFromURL(req *http.Request) string { - return req.URL.Path -} - -func requestAttrs(r *http.Request) []trace.Attribute { - return []trace.Attribute{ - trace.StringAttribute(PathAttribute, r.URL.Path), - trace.StringAttribute(HostAttribute, r.URL.Host), - trace.StringAttribute(MethodAttribute, r.Method), - trace.StringAttribute(UserAgentAttribute, r.UserAgent()), - } -} - -func responseAttrs(resp *http.Response) []trace.Attribute { - return []trace.Attribute{ - trace.Int64Attribute(StatusCodeAttribute, int64(resp.StatusCode)), - } -} - -// TraceStatus is a utility to convert the HTTP status code to a trace.Status that -// represents the outcome as closely as possible. -func TraceStatus(httpStatusCode int, statusLine string) trace.Status { - var code int32 - if httpStatusCode < 200 || httpStatusCode >= 400 { - code = trace.StatusCodeUnknown - } - switch httpStatusCode { - case 499: - code = trace.StatusCodeCancelled - case http.StatusBadRequest: - code = trace.StatusCodeInvalidArgument - case http.StatusGatewayTimeout: - code = trace.StatusCodeDeadlineExceeded - case http.StatusNotFound: - code = trace.StatusCodeNotFound - case http.StatusForbidden: - code = trace.StatusCodePermissionDenied - case http.StatusUnauthorized: // 401 is actually unauthenticated. - code = trace.StatusCodeUnauthenticated - case http.StatusTooManyRequests: - code = trace.StatusCodeResourceExhausted - case http.StatusNotImplemented: - code = trace.StatusCodeUnimplemented - case http.StatusServiceUnavailable: - code = trace.StatusCodeUnavailable - case http.StatusOK: - code = trace.StatusCodeOK - } - return trace.Status{Code: code, Message: codeToStr[code]} -} - -var codeToStr = map[int32]string{ - trace.StatusCodeOK: `OK`, - trace.StatusCodeCancelled: `CANCELLED`, - trace.StatusCodeUnknown: `UNKNOWN`, - trace.StatusCodeInvalidArgument: `INVALID_ARGUMENT`, - trace.StatusCodeDeadlineExceeded: `DEADLINE_EXCEEDED`, - trace.StatusCodeNotFound: `NOT_FOUND`, - trace.StatusCodeAlreadyExists: `ALREADY_EXISTS`, - trace.StatusCodePermissionDenied: `PERMISSION_DENIED`, - trace.StatusCodeResourceExhausted: `RESOURCE_EXHAUSTED`, - trace.StatusCodeFailedPrecondition: `FAILED_PRECONDITION`, - trace.StatusCodeAborted: `ABORTED`, - trace.StatusCodeOutOfRange: `OUT_OF_RANGE`, - trace.StatusCodeUnimplemented: `UNIMPLEMENTED`, - trace.StatusCodeInternal: `INTERNAL`, - trace.StatusCodeUnavailable: `UNAVAILABLE`, - trace.StatusCodeDataLoss: `DATA_LOSS`, - trace.StatusCodeUnauthenticated: `UNAUTHENTICATED`, -} - -func isHealthEndpoint(path string) bool { - // Health checking is pretty frequent and - // traces collected for health endpoints - // can be extremely noisy and expensive. - // Disable canonical health checking endpoints - // like /healthz and /_ah/health for now. - if path == "/healthz" || path == "/_ah/health" { - return true - } - return false -} diff --git a/vendor/go.opencensus.io/stats/doc.go b/vendor/go.opencensus.io/stats/doc.go deleted file mode 100644 index 00d473ee02..0000000000 --- a/vendor/go.opencensus.io/stats/doc.go +++ /dev/null @@ -1,69 +0,0 @@ -// Copyright 2017, OpenCensus Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. -// - -/* -Package stats contains support for OpenCensus stats recording. - -OpenCensus allows users to create typed measures, record measurements, -aggregate the collected data, and export the aggregated data. - -Measures - -A measure represents a type of data point to be tracked and recorded. -For example, latency, request Mb/s, and response Mb/s are measures -to collect from a server. - -Measure constructors such as Int64 and Float64 automatically -register the measure by the given name. Each registered measure needs -to be unique by name. Measures also have a description and a unit. - -Libraries can define and export measures. Application authors can then -create views and collect and break down measures by the tags they are -interested in. - -Recording measurements - -Measurement is a data point to be collected for a measure. For example, -for a latency (ms) measure, 100 is a measurement that represents a 100ms -latency event. Measurements are created from measures with -the current context. Tags from the current context are recorded with the -measurements if they are any. - -Recorded measurements are dropped immediately if no views are registered for them. -There is usually no need to conditionally enable and disable -recording to reduce cost. Recording of measurements is cheap. - -Libraries can always record measurements, and applications can later decide -on which measurements they want to collect by registering views. This allows -libraries to turn on the instrumentation by default. - -Exemplars - -For a given recorded measurement, the associated exemplar is a diagnostic map -that gives more information about the measurement. - -When aggregated using a Distribution aggregation, an exemplar is kept for each -bucket in the Distribution. This allows you to easily find an example of a -measurement that fell into each bucket. - -For example, if you also use the OpenCensus trace package and you -record a measurement with a context that contains a sampled trace span, -then the trace span will be added to the exemplar associated with the measurement. - -When exported to a supporting back end, you should be able to easily navigate -to example traces that fell into each bucket in the Distribution. - -*/ -package stats // import "go.opencensus.io/stats" diff --git a/vendor/go.opencensus.io/stats/internal/record.go b/vendor/go.opencensus.io/stats/internal/record.go deleted file mode 100644 index ed54552054..0000000000 --- a/vendor/go.opencensus.io/stats/internal/record.go +++ /dev/null @@ -1,25 +0,0 @@ -// Copyright 2018, OpenCensus Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package internal - -import ( - "go.opencensus.io/tag" -) - -// DefaultRecorder will be called for each Record call. -var DefaultRecorder func(tags *tag.Map, measurement interface{}, attachments map[string]string) - -// SubscriptionReporter reports when a view subscribed with a measure. -var SubscriptionReporter func(measure string) diff --git a/vendor/go.opencensus.io/stats/internal/validation.go b/vendor/go.opencensus.io/stats/internal/validation.go deleted file mode 100644 index b946667f96..0000000000 --- a/vendor/go.opencensus.io/stats/internal/validation.go +++ /dev/null @@ -1,28 +0,0 @@ -// Copyright 2018, OpenCensus Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package internal // import "go.opencensus.io/stats/internal" - -const ( - MaxNameLength = 255 -) - -func IsPrintable(str string) bool { - for _, r := range str { - if !(r >= ' ' && r <= '~') { - return false - } - } - return true -} diff --git a/vendor/go.opencensus.io/stats/measure.go b/vendor/go.opencensus.io/stats/measure.go deleted file mode 100644 index 64d02b1961..0000000000 --- a/vendor/go.opencensus.io/stats/measure.go +++ /dev/null @@ -1,123 +0,0 @@ -// Copyright 2017, OpenCensus Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. -// - -package stats - -import ( - "sync" - "sync/atomic" -) - -// Measure represents a single numeric value to be tracked and recorded. -// For example, latency, request bytes, and response bytes could be measures -// to collect from a server. -// -// Measures by themselves have no outside effects. In order to be exported, -// the measure needs to be used in a View. If no Views are defined over a -// measure, there is very little cost in recording it. -type Measure interface { - // Name returns the name of this measure. - // - // Measure names are globally unique (among all libraries linked into your program). - // We recommend prefixing the measure name with a domain name relevant to your - // project or application. - // - // Measure names are never sent over the wire or exported to backends. - // They are only used to create Views. - Name() string - - // Description returns the human-readable description of this measure. - Description() string - - // Unit returns the units for the values this measure takes on. - // - // Units are encoded according to the case-sensitive abbreviations from the - // Unified Code for Units of Measure: http://unitsofmeasure.org/ucum.html - Unit() string -} - -// measureDescriptor is the untyped descriptor associated with each measure. -// Int64Measure and Float64Measure wrap measureDescriptor to provide typed -// recording APIs. -// Two Measures with the same name will have the same measureDescriptor. -type measureDescriptor struct { - subs int32 // access atomically - - name string - description string - unit string -} - -func (m *measureDescriptor) subscribe() { - atomic.StoreInt32(&m.subs, 1) -} - -func (m *measureDescriptor) subscribed() bool { - return atomic.LoadInt32(&m.subs) == 1 -} - -// Name returns the name of the measure. -func (m *measureDescriptor) Name() string { - return m.name -} - -// Description returns the description of the measure. -func (m *measureDescriptor) Description() string { - return m.description -} - -// Unit returns the unit of the measure. -func (m *measureDescriptor) Unit() string { - return m.unit -} - -var ( - mu sync.RWMutex - measures = make(map[string]*measureDescriptor) -) - -func registerMeasureHandle(name, desc, unit string) *measureDescriptor { - mu.Lock() - defer mu.Unlock() - - if stored, ok := measures[name]; ok { - return stored - } - m := &measureDescriptor{ - name: name, - description: desc, - unit: unit, - } - measures[name] = m - return m -} - -// Measurement is the numeric value measured when recording stats. Each measure -// provides methods to create measurements of their kind. For example, Int64Measure -// provides M to convert an int64 into a measurement. -type Measurement struct { - v float64 - m *measureDescriptor -} - -// Value returns the value of the Measurement as a float64. -func (m Measurement) Value() float64 { - return m.v -} - -// Measure returns the Measure from which this Measurement was created. -func (m Measurement) Measure() Measure { - return m.m -} diff --git a/vendor/go.opencensus.io/stats/measure_float64.go b/vendor/go.opencensus.io/stats/measure_float64.go deleted file mode 100644 index acedb21c44..0000000000 --- a/vendor/go.opencensus.io/stats/measure_float64.go +++ /dev/null @@ -1,36 +0,0 @@ -// Copyright 2017, OpenCensus Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. -// - -package stats - -// Float64Measure is a measure for float64 values. -type Float64Measure struct { - *measureDescriptor -} - -// M creates a new float64 measurement. -// Use Record to record measurements. -func (m *Float64Measure) M(v float64) Measurement { - return Measurement{m: m.measureDescriptor, v: v} -} - -// Float64 creates a new measure for float64 values. -// -// See the documentation for interface Measure for more guidance on the -// parameters of this function. -func Float64(name, description, unit string) *Float64Measure { - mi := registerMeasureHandle(name, description, unit) - return &Float64Measure{mi} -} diff --git a/vendor/go.opencensus.io/stats/measure_int64.go b/vendor/go.opencensus.io/stats/measure_int64.go deleted file mode 100644 index c4243ba749..0000000000 --- a/vendor/go.opencensus.io/stats/measure_int64.go +++ /dev/null @@ -1,36 +0,0 @@ -// Copyright 2017, OpenCensus Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. -// - -package stats - -// Int64Measure is a measure for int64 values. -type Int64Measure struct { - *measureDescriptor -} - -// M creates a new int64 measurement. -// Use Record to record measurements. -func (m *Int64Measure) M(v int64) Measurement { - return Measurement{m: m.measureDescriptor, v: float64(v)} -} - -// Int64 creates a new measure for int64 values. -// -// See the documentation for interface Measure for more guidance on the -// parameters of this function. -func Int64(name, description, unit string) *Int64Measure { - mi := registerMeasureHandle(name, description, unit) - return &Int64Measure{mi} -} diff --git a/vendor/go.opencensus.io/stats/record.go b/vendor/go.opencensus.io/stats/record.go deleted file mode 100644 index 0aced02c30..0000000000 --- a/vendor/go.opencensus.io/stats/record.go +++ /dev/null @@ -1,69 +0,0 @@ -// Copyright 2018, OpenCensus Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. -// - -package stats - -import ( - "context" - - "go.opencensus.io/exemplar" - "go.opencensus.io/stats/internal" - "go.opencensus.io/tag" -) - -func init() { - internal.SubscriptionReporter = func(measure string) { - mu.Lock() - measures[measure].subscribe() - mu.Unlock() - } -} - -// Record records one or multiple measurements with the same context at once. -// If there are any tags in the context, measurements will be tagged with them. -func Record(ctx context.Context, ms ...Measurement) { - recorder := internal.DefaultRecorder - if recorder == nil { - return - } - if len(ms) == 0 { - return - } - record := false - for _, m := range ms { - if m.m.subscribed() { - record = true - break - } - } - if !record { - return - } - recorder(tag.FromContext(ctx), ms, exemplar.AttachmentsFromContext(ctx)) -} - -// RecordWithTags records one or multiple measurements at once. -// -// Measurements will be tagged with the tags in the context mutated by the mutators. -// RecordWithTags is useful if you want to record with tag mutations but don't want -// to propagate the mutations in the context. -func RecordWithTags(ctx context.Context, mutators []tag.Mutator, ms ...Measurement) error { - ctx, err := tag.New(ctx, mutators...) - if err != nil { - return err - } - Record(ctx, ms...) - return nil -} diff --git a/vendor/go.opencensus.io/stats/units.go b/vendor/go.opencensus.io/stats/units.go deleted file mode 100644 index 6931a5f296..0000000000 --- a/vendor/go.opencensus.io/stats/units.go +++ /dev/null @@ -1,25 +0,0 @@ -// Copyright 2018, OpenCensus Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. -// - -package stats - -// Units are encoded according to the case-sensitive abbreviations from the -// Unified Code for Units of Measure: http://unitsofmeasure.org/ucum.html -const ( - UnitNone = "1" // Deprecated: Use UnitDimensionless. - UnitDimensionless = "1" - UnitBytes = "By" - UnitMilliseconds = "ms" -) diff --git a/vendor/go.opencensus.io/stats/view/aggregation.go b/vendor/go.opencensus.io/stats/view/aggregation.go deleted file mode 100644 index b7f169b4a5..0000000000 --- a/vendor/go.opencensus.io/stats/view/aggregation.go +++ /dev/null @@ -1,120 +0,0 @@ -// Copyright 2017, OpenCensus Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. -// - -package view - -// AggType represents the type of aggregation function used on a View. -type AggType int - -// All available aggregation types. -const ( - AggTypeNone AggType = iota // no aggregation; reserved for future use. - AggTypeCount // the count aggregation, see Count. - AggTypeSum // the sum aggregation, see Sum. - AggTypeDistribution // the distribution aggregation, see Distribution. - AggTypeLastValue // the last value aggregation, see LastValue. -) - -func (t AggType) String() string { - return aggTypeName[t] -} - -var aggTypeName = map[AggType]string{ - AggTypeNone: "None", - AggTypeCount: "Count", - AggTypeSum: "Sum", - AggTypeDistribution: "Distribution", - AggTypeLastValue: "LastValue", -} - -// Aggregation represents a data aggregation method. Use one of the functions: -// Count, Sum, or Distribution to construct an Aggregation. -type Aggregation struct { - Type AggType // Type is the AggType of this Aggregation. - Buckets []float64 // Buckets are the bucket endpoints if this Aggregation represents a distribution, see Distribution. - - newData func() AggregationData -} - -var ( - aggCount = &Aggregation{ - Type: AggTypeCount, - newData: func() AggregationData { - return &CountData{} - }, - } - aggSum = &Aggregation{ - Type: AggTypeSum, - newData: func() AggregationData { - return &SumData{} - }, - } -) - -// Count indicates that data collected and aggregated -// with this method will be turned into a count value. -// For example, total number of accepted requests can be -// aggregated by using Count. -func Count() *Aggregation { - return aggCount -} - -// Sum indicates that data collected and aggregated -// with this method will be summed up. -// For example, accumulated request bytes can be aggregated by using -// Sum. -func Sum() *Aggregation { - return aggSum -} - -// Distribution indicates that the desired aggregation is -// a histogram distribution. -// -// An distribution aggregation may contain a histogram of the values in the -// population. The bucket boundaries for that histogram are described -// by the bounds. This defines len(bounds)+1 buckets. -// -// If len(bounds) >= 2 then the boundaries for bucket index i are: -// -// [-infinity, bounds[i]) for i = 0 -// [bounds[i-1], bounds[i]) for 0 < i < length -// [bounds[i-1], +infinity) for i = length -// -// If len(bounds) is 0 then there is no histogram associated with the -// distribution. There will be a single bucket with boundaries -// (-infinity, +infinity). -// -// If len(bounds) is 1 then there is no finite buckets, and that single -// element is the common boundary of the overflow and underflow buckets. -func Distribution(bounds ...float64) *Aggregation { - return &Aggregation{ - Type: AggTypeDistribution, - Buckets: bounds, - newData: func() AggregationData { - return newDistributionData(bounds) - }, - } -} - -// LastValue only reports the last value recorded using this -// aggregation. All other measurements will be dropped. -func LastValue() *Aggregation { - return &Aggregation{ - Type: AggTypeLastValue, - newData: func() AggregationData { - return &LastValueData{} - }, - } -} diff --git a/vendor/go.opencensus.io/stats/view/aggregation_data.go b/vendor/go.opencensus.io/stats/view/aggregation_data.go deleted file mode 100644 index 960b94601f..0000000000 --- a/vendor/go.opencensus.io/stats/view/aggregation_data.go +++ /dev/null @@ -1,235 +0,0 @@ -// Copyright 2017, OpenCensus Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. -// - -package view - -import ( - "math" - - "go.opencensus.io/exemplar" -) - -// AggregationData represents an aggregated value from a collection. -// They are reported on the view data during exporting. -// Mosts users won't directly access aggregration data. -type AggregationData interface { - isAggregationData() bool - addSample(e *exemplar.Exemplar) - clone() AggregationData - equal(other AggregationData) bool -} - -const epsilon = 1e-9 - -// CountData is the aggregated data for the Count aggregation. -// A count aggregation processes data and counts the recordings. -// -// Most users won't directly access count data. -type CountData struct { - Value int64 -} - -func (a *CountData) isAggregationData() bool { return true } - -func (a *CountData) addSample(_ *exemplar.Exemplar) { - a.Value = a.Value + 1 -} - -func (a *CountData) clone() AggregationData { - return &CountData{Value: a.Value} -} - -func (a *CountData) equal(other AggregationData) bool { - a2, ok := other.(*CountData) - if !ok { - return false - } - - return a.Value == a2.Value -} - -// SumData is the aggregated data for the Sum aggregation. -// A sum aggregation processes data and sums up the recordings. -// -// Most users won't directly access sum data. -type SumData struct { - Value float64 -} - -func (a *SumData) isAggregationData() bool { return true } - -func (a *SumData) addSample(e *exemplar.Exemplar) { - a.Value += e.Value -} - -func (a *SumData) clone() AggregationData { - return &SumData{Value: a.Value} -} - -func (a *SumData) equal(other AggregationData) bool { - a2, ok := other.(*SumData) - if !ok { - return false - } - return math.Pow(a.Value-a2.Value, 2) < epsilon -} - -// DistributionData is the aggregated data for the -// Distribution aggregation. -// -// Most users won't directly access distribution data. -// -// For a distribution with N bounds, the associated DistributionData will have -// N+1 buckets. -type DistributionData struct { - Count int64 // number of data points aggregated - Min float64 // minimum value in the distribution - Max float64 // max value in the distribution - Mean float64 // mean of the distribution - SumOfSquaredDev float64 // sum of the squared deviation from the mean - CountPerBucket []int64 // number of occurrences per bucket - // ExemplarsPerBucket is slice the same length as CountPerBucket containing - // an exemplar for the associated bucket, or nil. - ExemplarsPerBucket []*exemplar.Exemplar - bounds []float64 // histogram distribution of the values -} - -func newDistributionData(bounds []float64) *DistributionData { - bucketCount := len(bounds) + 1 - return &DistributionData{ - CountPerBucket: make([]int64, bucketCount), - ExemplarsPerBucket: make([]*exemplar.Exemplar, bucketCount), - bounds: bounds, - Min: math.MaxFloat64, - Max: math.SmallestNonzeroFloat64, - } -} - -// Sum returns the sum of all samples collected. -func (a *DistributionData) Sum() float64 { return a.Mean * float64(a.Count) } - -func (a *DistributionData) variance() float64 { - if a.Count <= 1 { - return 0 - } - return a.SumOfSquaredDev / float64(a.Count-1) -} - -func (a *DistributionData) isAggregationData() bool { return true } - -func (a *DistributionData) addSample(e *exemplar.Exemplar) { - f := e.Value - if f < a.Min { - a.Min = f - } - if f > a.Max { - a.Max = f - } - a.Count++ - a.addToBucket(e) - - if a.Count == 1 { - a.Mean = f - return - } - - oldMean := a.Mean - a.Mean = a.Mean + (f-a.Mean)/float64(a.Count) - a.SumOfSquaredDev = a.SumOfSquaredDev + (f-oldMean)*(f-a.Mean) -} - -func (a *DistributionData) addToBucket(e *exemplar.Exemplar) { - var count *int64 - var ex **exemplar.Exemplar - for i, b := range a.bounds { - if e.Value < b { - count = &a.CountPerBucket[i] - ex = &a.ExemplarsPerBucket[i] - break - } - } - if count == nil { - count = &a.CountPerBucket[len(a.bounds)] - ex = &a.ExemplarsPerBucket[len(a.bounds)] - } - *count++ - *ex = maybeRetainExemplar(*ex, e) -} - -func maybeRetainExemplar(old, cur *exemplar.Exemplar) *exemplar.Exemplar { - if old == nil { - return cur - } - - // Heuristic to pick the "better" exemplar: first keep the one with a - // sampled trace attachment, if neither have a trace attachment, pick the - // one with more attachments. - _, haveTraceID := cur.Attachments[exemplar.KeyTraceID] - if haveTraceID || len(cur.Attachments) >= len(old.Attachments) { - return cur - } - return old -} - -func (a *DistributionData) clone() AggregationData { - c := *a - c.CountPerBucket = append([]int64(nil), a.CountPerBucket...) - c.ExemplarsPerBucket = append([]*exemplar.Exemplar(nil), a.ExemplarsPerBucket...) - return &c -} - -func (a *DistributionData) equal(other AggregationData) bool { - a2, ok := other.(*DistributionData) - if !ok { - return false - } - if a2 == nil { - return false - } - if len(a.CountPerBucket) != len(a2.CountPerBucket) { - return false - } - for i := range a.CountPerBucket { - if a.CountPerBucket[i] != a2.CountPerBucket[i] { - return false - } - } - return a.Count == a2.Count && a.Min == a2.Min && a.Max == a2.Max && math.Pow(a.Mean-a2.Mean, 2) < epsilon && math.Pow(a.variance()-a2.variance(), 2) < epsilon -} - -// LastValueData returns the last value recorded for LastValue aggregation. -type LastValueData struct { - Value float64 -} - -func (l *LastValueData) isAggregationData() bool { - return true -} - -func (l *LastValueData) addSample(e *exemplar.Exemplar) { - l.Value = e.Value -} - -func (l *LastValueData) clone() AggregationData { - return &LastValueData{l.Value} -} - -func (l *LastValueData) equal(other AggregationData) bool { - a2, ok := other.(*LastValueData) - if !ok { - return false - } - return l.Value == a2.Value -} diff --git a/vendor/go.opencensus.io/stats/view/collector.go b/vendor/go.opencensus.io/stats/view/collector.go deleted file mode 100644 index 32415d4859..0000000000 --- a/vendor/go.opencensus.io/stats/view/collector.go +++ /dev/null @@ -1,87 +0,0 @@ -// Copyright 2017, OpenCensus Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. -// - -package view - -import ( - "sort" - - "go.opencensus.io/exemplar" - - "go.opencensus.io/internal/tagencoding" - "go.opencensus.io/tag" -) - -type collector struct { - // signatures holds the aggregations values for each unique tag signature - // (values for all keys) to its aggregator. - signatures map[string]AggregationData - // Aggregation is the description of the aggregation to perform for this - // view. - a *Aggregation -} - -func (c *collector) addSample(s string, e *exemplar.Exemplar) { - aggregator, ok := c.signatures[s] - if !ok { - aggregator = c.a.newData() - c.signatures[s] = aggregator - } - aggregator.addSample(e) -} - -// collectRows returns a snapshot of the collected Row values. -func (c *collector) collectedRows(keys []tag.Key) []*Row { - rows := make([]*Row, 0, len(c.signatures)) - for sig, aggregator := range c.signatures { - tags := decodeTags([]byte(sig), keys) - row := &Row{Tags: tags, Data: aggregator.clone()} - rows = append(rows, row) - } - return rows -} - -func (c *collector) clearRows() { - c.signatures = make(map[string]AggregationData) -} - -// encodeWithKeys encodes the map by using values -// only associated with the keys provided. -func encodeWithKeys(m *tag.Map, keys []tag.Key) []byte { - vb := &tagencoding.Values{ - Buffer: make([]byte, len(keys)), - } - for _, k := range keys { - v, _ := m.Value(k) - vb.WriteValue([]byte(v)) - } - return vb.Bytes() -} - -// decodeTags decodes tags from the buffer and -// orders them by the keys. -func decodeTags(buf []byte, keys []tag.Key) []tag.Tag { - vb := &tagencoding.Values{Buffer: buf} - var tags []tag.Tag - for _, k := range keys { - v := vb.ReadValue() - if v != nil { - tags = append(tags, tag.Tag{Key: k, Value: string(v)}) - } - } - vb.ReadIndex = 0 - sort.Slice(tags, func(i, j int) bool { return tags[i].Key.Name() < tags[j].Key.Name() }) - return tags -} diff --git a/vendor/go.opencensus.io/stats/view/doc.go b/vendor/go.opencensus.io/stats/view/doc.go deleted file mode 100644 index dced225c3d..0000000000 --- a/vendor/go.opencensus.io/stats/view/doc.go +++ /dev/null @@ -1,47 +0,0 @@ -// Copyright 2017, OpenCensus Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. -// - -// Package view contains support for collecting and exposing aggregates over stats. -// -// In order to collect measurements, views need to be defined and registered. -// A view allows recorded measurements to be filtered and aggregated. -// -// All recorded measurements can be grouped by a list of tags. -// -// OpenCensus provides several aggregation methods: Count, Distribution and Sum. -// -// Count only counts the number of measurement points recorded. -// Distribution provides statistical summary of the aggregated data by counting -// how many recorded measurements fall into each bucket. -// Sum adds up the measurement values. -// LastValue just keeps track of the most recently recorded measurement value. -// All aggregations are cumulative. -// -// Views can be registerd and unregistered at any time during program execution. -// -// Libraries can define views but it is recommended that in most cases registering -// views be left up to applications. -// -// Exporting -// -// Collected and aggregated data can be exported to a metric collection -// backend by registering its exporter. -// -// Multiple exporters can be registered to upload the data to various -// different back ends. -package view // import "go.opencensus.io/stats/view" - -// TODO(acetechnologist): Add a link to the language independent OpenCensus -// spec when it is available. diff --git a/vendor/go.opencensus.io/stats/view/export.go b/vendor/go.opencensus.io/stats/view/export.go deleted file mode 100644 index 7cb59718f5..0000000000 --- a/vendor/go.opencensus.io/stats/view/export.go +++ /dev/null @@ -1,58 +0,0 @@ -// Copyright 2017, OpenCensus Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package view - -import "sync" - -var ( - exportersMu sync.RWMutex // guards exporters - exporters = make(map[Exporter]struct{}) -) - -// Exporter exports the collected records as view data. -// -// The ExportView method should return quickly; if an -// Exporter takes a significant amount of time to -// process a Data, that work should be done on another goroutine. -// -// It is safe to assume that ExportView will not be called concurrently from -// multiple goroutines. -// -// The Data should not be modified. -type Exporter interface { - ExportView(viewData *Data) -} - -// RegisterExporter registers an exporter. -// Collected data will be reported via all the -// registered exporters. Once you no longer -// want data to be exported, invoke UnregisterExporter -// with the previously registered exporter. -// -// Binaries can register exporters, libraries shouldn't register exporters. -func RegisterExporter(e Exporter) { - exportersMu.Lock() - defer exportersMu.Unlock() - - exporters[e] = struct{}{} -} - -// UnregisterExporter unregisters an exporter. -func UnregisterExporter(e Exporter) { - exportersMu.Lock() - defer exportersMu.Unlock() - - delete(exporters, e) -} diff --git a/vendor/go.opencensus.io/stats/view/view.go b/vendor/go.opencensus.io/stats/view/view.go deleted file mode 100644 index c2a08af678..0000000000 --- a/vendor/go.opencensus.io/stats/view/view.go +++ /dev/null @@ -1,185 +0,0 @@ -// Copyright 2017, OpenCensus Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. -// - -package view - -import ( - "bytes" - "fmt" - "reflect" - "sort" - "sync/atomic" - "time" - - "go.opencensus.io/exemplar" - - "go.opencensus.io/stats" - "go.opencensus.io/stats/internal" - "go.opencensus.io/tag" -) - -// View allows users to aggregate the recorded stats.Measurements. -// Views need to be passed to the Register function to be before data will be -// collected and sent to Exporters. -type View struct { - Name string // Name of View. Must be unique. If unset, will default to the name of the Measure. - Description string // Description is a human-readable description for this view. - - // TagKeys are the tag keys describing the grouping of this view. - // A single Row will be produced for each combination of associated tag values. - TagKeys []tag.Key - - // Measure is a stats.Measure to aggregate in this view. - Measure stats.Measure - - // Aggregation is the aggregation function tp apply to the set of Measurements. - Aggregation *Aggregation -} - -// WithName returns a copy of the View with a new name. This is useful for -// renaming views to cope with limitations placed on metric names by various -// backends. -func (v *View) WithName(name string) *View { - vNew := *v - vNew.Name = name - return &vNew -} - -// same compares two views and returns true if they represent the same aggregation. -func (v *View) same(other *View) bool { - if v == other { - return true - } - if v == nil { - return false - } - return reflect.DeepEqual(v.Aggregation, other.Aggregation) && - v.Measure.Name() == other.Measure.Name() -} - -// canonicalize canonicalizes v by setting explicit -// defaults for Name and Description and sorting the TagKeys -func (v *View) canonicalize() error { - if v.Measure == nil { - return fmt.Errorf("cannot register view %q: measure not set", v.Name) - } - if v.Aggregation == nil { - return fmt.Errorf("cannot register view %q: aggregation not set", v.Name) - } - if v.Name == "" { - v.Name = v.Measure.Name() - } - if v.Description == "" { - v.Description = v.Measure.Description() - } - if err := checkViewName(v.Name); err != nil { - return err - } - sort.Slice(v.TagKeys, func(i, j int) bool { - return v.TagKeys[i].Name() < v.TagKeys[j].Name() - }) - return nil -} - -// viewInternal is the internal representation of a View. -type viewInternal struct { - view *View // view is the canonicalized View definition associated with this view. - subscribed uint32 // 1 if someone is subscribed and data need to be exported, use atomic to access - collector *collector -} - -func newViewInternal(v *View) (*viewInternal, error) { - return &viewInternal{ - view: v, - collector: &collector{make(map[string]AggregationData), v.Aggregation}, - }, nil -} - -func (v *viewInternal) subscribe() { - atomic.StoreUint32(&v.subscribed, 1) -} - -func (v *viewInternal) unsubscribe() { - atomic.StoreUint32(&v.subscribed, 0) -} - -// isSubscribed returns true if the view is exporting -// data by subscription. -func (v *viewInternal) isSubscribed() bool { - return atomic.LoadUint32(&v.subscribed) == 1 -} - -func (v *viewInternal) clearRows() { - v.collector.clearRows() -} - -func (v *viewInternal) collectedRows() []*Row { - return v.collector.collectedRows(v.view.TagKeys) -} - -func (v *viewInternal) addSample(m *tag.Map, e *exemplar.Exemplar) { - if !v.isSubscribed() { - return - } - sig := string(encodeWithKeys(m, v.view.TagKeys)) - v.collector.addSample(sig, e) -} - -// A Data is a set of rows about usage of the single measure associated -// with the given view. Each row is specific to a unique set of tags. -type Data struct { - View *View - Start, End time.Time - Rows []*Row -} - -// Row is the collected value for a specific set of key value pairs a.k.a tags. -type Row struct { - Tags []tag.Tag - Data AggregationData -} - -func (r *Row) String() string { - var buffer bytes.Buffer - buffer.WriteString("{ ") - buffer.WriteString("{ ") - for _, t := range r.Tags { - buffer.WriteString(fmt.Sprintf("{%v %v}", t.Key.Name(), t.Value)) - } - buffer.WriteString(" }") - buffer.WriteString(fmt.Sprintf("%v", r.Data)) - buffer.WriteString(" }") - return buffer.String() -} - -// Equal returns true if both rows are equal. Tags are expected to be ordered -// by the key name. Even both rows have the same tags but the tags appear in -// different orders it will return false. -func (r *Row) Equal(other *Row) bool { - if r == other { - return true - } - return reflect.DeepEqual(r.Tags, other.Tags) && r.Data.equal(other.Data) -} - -func checkViewName(name string) error { - if len(name) > internal.MaxNameLength { - return fmt.Errorf("view name cannot be larger than %v", internal.MaxNameLength) - } - if !internal.IsPrintable(name) { - return fmt.Errorf("view name needs to be an ASCII string") - } - return nil -} diff --git a/vendor/go.opencensus.io/stats/view/worker.go b/vendor/go.opencensus.io/stats/view/worker.go deleted file mode 100644 index 63b0ee3cc3..0000000000 --- a/vendor/go.opencensus.io/stats/view/worker.go +++ /dev/null @@ -1,229 +0,0 @@ -// Copyright 2017, OpenCensus Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. -// - -package view - -import ( - "fmt" - "time" - - "go.opencensus.io/stats" - "go.opencensus.io/stats/internal" - "go.opencensus.io/tag" -) - -func init() { - defaultWorker = newWorker() - go defaultWorker.start() - internal.DefaultRecorder = record -} - -type measureRef struct { - measure string - views map[*viewInternal]struct{} -} - -type worker struct { - measures map[string]*measureRef - views map[string]*viewInternal - startTimes map[*viewInternal]time.Time - - timer *time.Ticker - c chan command - quit, done chan bool -} - -var defaultWorker *worker - -var defaultReportingDuration = 10 * time.Second - -// Find returns a registered view associated with this name. -// If no registered view is found, nil is returned. -func Find(name string) (v *View) { - req := &getViewByNameReq{ - name: name, - c: make(chan *getViewByNameResp), - } - defaultWorker.c <- req - resp := <-req.c - return resp.v -} - -// Register begins collecting data for the given views. -// Once a view is registered, it reports data to the registered exporters. -func Register(views ...*View) error { - for _, v := range views { - if err := v.canonicalize(); err != nil { - return err - } - } - req := ®isterViewReq{ - views: views, - err: make(chan error), - } - defaultWorker.c <- req - return <-req.err -} - -// Unregister the given views. Data will not longer be exported for these views -// after Unregister returns. -// It is not necessary to unregister from views you expect to collect for the -// duration of your program execution. -func Unregister(views ...*View) { - names := make([]string, len(views)) - for i := range views { - names[i] = views[i].Name - } - req := &unregisterFromViewReq{ - views: names, - done: make(chan struct{}), - } - defaultWorker.c <- req - <-req.done -} - -// RetrieveData gets a snapshot of the data collected for the the view registered -// with the given name. It is intended for testing only. -func RetrieveData(viewName string) ([]*Row, error) { - req := &retrieveDataReq{ - now: time.Now(), - v: viewName, - c: make(chan *retrieveDataResp), - } - defaultWorker.c <- req - resp := <-req.c - return resp.rows, resp.err -} - -func record(tags *tag.Map, ms interface{}, attachments map[string]string) { - req := &recordReq{ - tm: tags, - ms: ms.([]stats.Measurement), - attachments: attachments, - t: time.Now(), - } - defaultWorker.c <- req -} - -// SetReportingPeriod sets the interval between reporting aggregated views in -// the program. If duration is less than or equal to zero, it enables the -// default behavior. -// -// Note: each exporter makes different promises about what the lowest supported -// duration is. For example, the Stackdriver exporter recommends a value no -// lower than 1 minute. Consult each exporter per your needs. -func SetReportingPeriod(d time.Duration) { - // TODO(acetechnologist): ensure that the duration d is more than a certain - // value. e.g. 1s - req := &setReportingPeriodReq{ - d: d, - c: make(chan bool), - } - defaultWorker.c <- req - <-req.c // don't return until the timer is set to the new duration. -} - -func newWorker() *worker { - return &worker{ - measures: make(map[string]*measureRef), - views: make(map[string]*viewInternal), - startTimes: make(map[*viewInternal]time.Time), - timer: time.NewTicker(defaultReportingDuration), - c: make(chan command, 1024), - quit: make(chan bool), - done: make(chan bool), - } -} - -func (w *worker) start() { - for { - select { - case cmd := <-w.c: - cmd.handleCommand(w) - case <-w.timer.C: - w.reportUsage(time.Now()) - case <-w.quit: - w.timer.Stop() - close(w.c) - w.done <- true - return - } - } -} - -func (w *worker) stop() { - w.quit <- true - <-w.done -} - -func (w *worker) getMeasureRef(name string) *measureRef { - if mr, ok := w.measures[name]; ok { - return mr - } - mr := &measureRef{ - measure: name, - views: make(map[*viewInternal]struct{}), - } - w.measures[name] = mr - return mr -} - -func (w *worker) tryRegisterView(v *View) (*viewInternal, error) { - vi, err := newViewInternal(v) - if err != nil { - return nil, err - } - if x, ok := w.views[vi.view.Name]; ok { - if !x.view.same(vi.view) { - return nil, fmt.Errorf("cannot register view %q; a different view with the same name is already registered", v.Name) - } - - // the view is already registered so there is nothing to do and the - // command is considered successful. - return x, nil - } - w.views[vi.view.Name] = vi - ref := w.getMeasureRef(vi.view.Measure.Name()) - ref.views[vi] = struct{}{} - return vi, nil -} - -func (w *worker) reportView(v *viewInternal, now time.Time) { - if !v.isSubscribed() { - return - } - rows := v.collectedRows() - _, ok := w.startTimes[v] - if !ok { - w.startTimes[v] = now - } - viewData := &Data{ - View: v.view, - Start: w.startTimes[v], - End: time.Now(), - Rows: rows, - } - exportersMu.Lock() - for e := range exporters { - e.ExportView(viewData) - } - exportersMu.Unlock() -} - -func (w *worker) reportUsage(now time.Time) { - for _, v := range w.views { - w.reportView(v, now) - } -} diff --git a/vendor/go.opencensus.io/stats/view/worker_commands.go b/vendor/go.opencensus.io/stats/view/worker_commands.go deleted file mode 100644 index b38f26f424..0000000000 --- a/vendor/go.opencensus.io/stats/view/worker_commands.go +++ /dev/null @@ -1,183 +0,0 @@ -// Copyright 2017, OpenCensus Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. -// - -package view - -import ( - "errors" - "fmt" - "strings" - "time" - - "go.opencensus.io/exemplar" - - "go.opencensus.io/stats" - "go.opencensus.io/stats/internal" - "go.opencensus.io/tag" -) - -type command interface { - handleCommand(w *worker) -} - -// getViewByNameReq is the command to get a view given its name. -type getViewByNameReq struct { - name string - c chan *getViewByNameResp -} - -type getViewByNameResp struct { - v *View -} - -func (cmd *getViewByNameReq) handleCommand(w *worker) { - v := w.views[cmd.name] - if v == nil { - cmd.c <- &getViewByNameResp{nil} - return - } - cmd.c <- &getViewByNameResp{v.view} -} - -// registerViewReq is the command to register a view. -type registerViewReq struct { - views []*View - err chan error -} - -func (cmd *registerViewReq) handleCommand(w *worker) { - var errstr []string - for _, view := range cmd.views { - vi, err := w.tryRegisterView(view) - if err != nil { - errstr = append(errstr, fmt.Sprintf("%s: %v", view.Name, err)) - continue - } - internal.SubscriptionReporter(view.Measure.Name()) - vi.subscribe() - } - if len(errstr) > 0 { - cmd.err <- errors.New(strings.Join(errstr, "\n")) - } else { - cmd.err <- nil - } -} - -// unregisterFromViewReq is the command to unregister to a view. Has no -// impact on the data collection for client that are pulling data from the -// library. -type unregisterFromViewReq struct { - views []string - done chan struct{} -} - -func (cmd *unregisterFromViewReq) handleCommand(w *worker) { - for _, name := range cmd.views { - vi, ok := w.views[name] - if !ok { - continue - } - - // Report pending data for this view before removing it. - w.reportView(vi, time.Now()) - - vi.unsubscribe() - if !vi.isSubscribed() { - // this was the last subscription and view is not collecting anymore. - // The collected data can be cleared. - vi.clearRows() - } - delete(w.views, name) - } - cmd.done <- struct{}{} -} - -// retrieveDataReq is the command to retrieve data for a view. -type retrieveDataReq struct { - now time.Time - v string - c chan *retrieveDataResp -} - -type retrieveDataResp struct { - rows []*Row - err error -} - -func (cmd *retrieveDataReq) handleCommand(w *worker) { - vi, ok := w.views[cmd.v] - if !ok { - cmd.c <- &retrieveDataResp{ - nil, - fmt.Errorf("cannot retrieve data; view %q is not registered", cmd.v), - } - return - } - - if !vi.isSubscribed() { - cmd.c <- &retrieveDataResp{ - nil, - fmt.Errorf("cannot retrieve data; view %q has no subscriptions or collection is not forcibly started", cmd.v), - } - return - } - cmd.c <- &retrieveDataResp{ - vi.collectedRows(), - nil, - } -} - -// recordReq is the command to record data related to multiple measures -// at once. -type recordReq struct { - tm *tag.Map - ms []stats.Measurement - attachments map[string]string - t time.Time -} - -func (cmd *recordReq) handleCommand(w *worker) { - for _, m := range cmd.ms { - if (m == stats.Measurement{}) { // not registered - continue - } - ref := w.getMeasureRef(m.Measure().Name()) - for v := range ref.views { - e := &exemplar.Exemplar{ - Value: m.Value(), - Timestamp: cmd.t, - Attachments: cmd.attachments, - } - v.addSample(cmd.tm, e) - } - } -} - -// setReportingPeriodReq is the command to modify the duration between -// reporting the collected data to the registered clients. -type setReportingPeriodReq struct { - d time.Duration - c chan bool -} - -func (cmd *setReportingPeriodReq) handleCommand(w *worker) { - w.timer.Stop() - if cmd.d <= 0 { - w.timer = time.NewTicker(defaultReportingDuration) - } else { - w.timer = time.NewTicker(cmd.d) - } - cmd.c <- true -} diff --git a/vendor/go.opencensus.io/tag/context.go b/vendor/go.opencensus.io/tag/context.go deleted file mode 100644 index dcc13f4987..0000000000 --- a/vendor/go.opencensus.io/tag/context.go +++ /dev/null @@ -1,67 +0,0 @@ -// Copyright 2017, OpenCensus Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. -// - -package tag - -import ( - "context" - - "go.opencensus.io/exemplar" -) - -// FromContext returns the tag map stored in the context. -func FromContext(ctx context.Context) *Map { - // The returned tag map shouldn't be mutated. - ts := ctx.Value(mapCtxKey) - if ts == nil { - return nil - } - return ts.(*Map) -} - -// NewContext creates a new context with the given tag map. -// To propagate a tag map to downstream methods and downstream RPCs, add a tag map -// to the current context. NewContext will return a copy of the current context, -// and put the tag map into the returned one. -// If there is already a tag map in the current context, it will be replaced with m. -func NewContext(ctx context.Context, m *Map) context.Context { - return context.WithValue(ctx, mapCtxKey, m) -} - -type ctxKey struct{} - -var mapCtxKey = ctxKey{} - -func init() { - exemplar.RegisterAttachmentExtractor(extractTagsAttachments) -} - -func extractTagsAttachments(ctx context.Context, a exemplar.Attachments) exemplar.Attachments { - m := FromContext(ctx) - if m == nil { - return a - } - if len(m.m) == 0 { - return a - } - if a == nil { - a = make(map[string]string) - } - - for k, v := range m.m { - a[exemplar.KeyPrefixTag+k.Name()] = v - } - return a -} diff --git a/vendor/go.opencensus.io/tag/doc.go b/vendor/go.opencensus.io/tag/doc.go deleted file mode 100644 index da16b74e4d..0000000000 --- a/vendor/go.opencensus.io/tag/doc.go +++ /dev/null @@ -1,26 +0,0 @@ -// Copyright 2017, OpenCensus Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. -// - -/* -Package tag contains OpenCensus tags. - -Tags are key-value pairs. Tags provide additional cardinality to -the OpenCensus instrumentation data. - -Tags can be propagated on the wire and in the same -process via context.Context. Encode and Decode should be -used to represent tags into their binary propagation form. -*/ -package tag // import "go.opencensus.io/tag" diff --git a/vendor/go.opencensus.io/tag/key.go b/vendor/go.opencensus.io/tag/key.go deleted file mode 100644 index ebbed95000..0000000000 --- a/vendor/go.opencensus.io/tag/key.go +++ /dev/null @@ -1,35 +0,0 @@ -// Copyright 2017, OpenCensus Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. -// - -package tag - -// Key represents a tag key. -type Key struct { - name string -} - -// NewKey creates or retrieves a string key identified by name. -// Calling NewKey consequently with the same name returns the same key. -func NewKey(name string) (Key, error) { - if !checkKeyName(name) { - return Key{}, errInvalidKeyName - } - return Key{name: name}, nil -} - -// Name returns the name of the key. -func (k Key) Name() string { - return k.name -} diff --git a/vendor/go.opencensus.io/tag/map.go b/vendor/go.opencensus.io/tag/map.go deleted file mode 100644 index 5b72ba6ad3..0000000000 --- a/vendor/go.opencensus.io/tag/map.go +++ /dev/null @@ -1,197 +0,0 @@ -// Copyright 2017, OpenCensus Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. -// - -package tag - -import ( - "bytes" - "context" - "fmt" - "sort" -) - -// Tag is a key value pair that can be propagated on wire. -type Tag struct { - Key Key - Value string -} - -// Map is a map of tags. Use New to create a context containing -// a new Map. -type Map struct { - m map[Key]string -} - -// Value returns the value for the key if a value for the key exists. -func (m *Map) Value(k Key) (string, bool) { - if m == nil { - return "", false - } - v, ok := m.m[k] - return v, ok -} - -func (m *Map) String() string { - if m == nil { - return "nil" - } - keys := make([]Key, 0, len(m.m)) - for k := range m.m { - keys = append(keys, k) - } - sort.Slice(keys, func(i, j int) bool { return keys[i].Name() < keys[j].Name() }) - - var buffer bytes.Buffer - buffer.WriteString("{ ") - for _, k := range keys { - buffer.WriteString(fmt.Sprintf("{%v %v}", k.name, m.m[k])) - } - buffer.WriteString(" }") - return buffer.String() -} - -func (m *Map) insert(k Key, v string) { - if _, ok := m.m[k]; ok { - return - } - m.m[k] = v -} - -func (m *Map) update(k Key, v string) { - if _, ok := m.m[k]; ok { - m.m[k] = v - } -} - -func (m *Map) upsert(k Key, v string) { - m.m[k] = v -} - -func (m *Map) delete(k Key) { - delete(m.m, k) -} - -func newMap() *Map { - return &Map{m: make(map[Key]string)} -} - -// Mutator modifies a tag map. -type Mutator interface { - Mutate(t *Map) (*Map, error) -} - -// Insert returns a mutator that inserts a -// value associated with k. If k already exists in the tag map, -// mutator doesn't update the value. -func Insert(k Key, v string) Mutator { - return &mutator{ - fn: func(m *Map) (*Map, error) { - if !checkValue(v) { - return nil, errInvalidValue - } - m.insert(k, v) - return m, nil - }, - } -} - -// Update returns a mutator that updates the -// value of the tag associated with k with v. If k doesn't -// exists in the tag map, the mutator doesn't insert the value. -func Update(k Key, v string) Mutator { - return &mutator{ - fn: func(m *Map) (*Map, error) { - if !checkValue(v) { - return nil, errInvalidValue - } - m.update(k, v) - return m, nil - }, - } -} - -// Upsert returns a mutator that upserts the -// value of the tag associated with k with v. It inserts the -// value if k doesn't exist already. It mutates the value -// if k already exists. -func Upsert(k Key, v string) Mutator { - return &mutator{ - fn: func(m *Map) (*Map, error) { - if !checkValue(v) { - return nil, errInvalidValue - } - m.upsert(k, v) - return m, nil - }, - } -} - -// Delete returns a mutator that deletes -// the value associated with k. -func Delete(k Key) Mutator { - return &mutator{ - fn: func(m *Map) (*Map, error) { - m.delete(k) - return m, nil - }, - } -} - -// New returns a new context that contains a tag map -// originated from the incoming context and modified -// with the provided mutators. -func New(ctx context.Context, mutator ...Mutator) (context.Context, error) { - m := newMap() - orig := FromContext(ctx) - if orig != nil { - for k, v := range orig.m { - if !checkKeyName(k.Name()) { - return ctx, fmt.Errorf("key:%q: %v", k, errInvalidKeyName) - } - if !checkValue(v) { - return ctx, fmt.Errorf("key:%q value:%q: %v", k.Name(), v, errInvalidValue) - } - m.insert(k, v) - } - } - var err error - for _, mod := range mutator { - m, err = mod.Mutate(m) - if err != nil { - return ctx, err - } - } - return NewContext(ctx, m), nil -} - -// Do is similar to pprof.Do: a convenience for installing the tags -// from the context as Go profiler labels. This allows you to -// correlated runtime profiling with stats. -// -// It converts the key/values from the given map to Go profiler labels -// and calls pprof.Do. -// -// Do is going to do nothing if your Go version is below 1.9. -func Do(ctx context.Context, f func(ctx context.Context)) { - do(ctx, f) -} - -type mutator struct { - fn func(t *Map) (*Map, error) -} - -func (m *mutator) Mutate(t *Map) (*Map, error) { - return m.fn(t) -} diff --git a/vendor/go.opencensus.io/tag/map_codec.go b/vendor/go.opencensus.io/tag/map_codec.go deleted file mode 100644 index 3e998950c3..0000000000 --- a/vendor/go.opencensus.io/tag/map_codec.go +++ /dev/null @@ -1,234 +0,0 @@ -// Copyright 2017, OpenCensus Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. -// - -package tag - -import ( - "encoding/binary" - "fmt" -) - -// KeyType defines the types of keys allowed. Currently only keyTypeString is -// supported. -type keyType byte - -const ( - keyTypeString keyType = iota - keyTypeInt64 - keyTypeTrue - keyTypeFalse - - tagsVersionID = byte(0) -) - -type encoderGRPC struct { - buf []byte - writeIdx, readIdx int -} - -// writeKeyString writes the fieldID '0' followed by the key string and value -// string. -func (eg *encoderGRPC) writeTagString(k, v string) { - eg.writeByte(byte(keyTypeString)) - eg.writeStringWithVarintLen(k) - eg.writeStringWithVarintLen(v) -} - -func (eg *encoderGRPC) writeTagUint64(k string, i uint64) { - eg.writeByte(byte(keyTypeInt64)) - eg.writeStringWithVarintLen(k) - eg.writeUint64(i) -} - -func (eg *encoderGRPC) writeTagTrue(k string) { - eg.writeByte(byte(keyTypeTrue)) - eg.writeStringWithVarintLen(k) -} - -func (eg *encoderGRPC) writeTagFalse(k string) { - eg.writeByte(byte(keyTypeFalse)) - eg.writeStringWithVarintLen(k) -} - -func (eg *encoderGRPC) writeBytesWithVarintLen(bytes []byte) { - length := len(bytes) - - eg.growIfRequired(binary.MaxVarintLen64 + length) - eg.writeIdx += binary.PutUvarint(eg.buf[eg.writeIdx:], uint64(length)) - copy(eg.buf[eg.writeIdx:], bytes) - eg.writeIdx += length -} - -func (eg *encoderGRPC) writeStringWithVarintLen(s string) { - length := len(s) - - eg.growIfRequired(binary.MaxVarintLen64 + length) - eg.writeIdx += binary.PutUvarint(eg.buf[eg.writeIdx:], uint64(length)) - copy(eg.buf[eg.writeIdx:], s) - eg.writeIdx += length -} - -func (eg *encoderGRPC) writeByte(v byte) { - eg.growIfRequired(1) - eg.buf[eg.writeIdx] = v - eg.writeIdx++ -} - -func (eg *encoderGRPC) writeUint32(i uint32) { - eg.growIfRequired(4) - binary.LittleEndian.PutUint32(eg.buf[eg.writeIdx:], i) - eg.writeIdx += 4 -} - -func (eg *encoderGRPC) writeUint64(i uint64) { - eg.growIfRequired(8) - binary.LittleEndian.PutUint64(eg.buf[eg.writeIdx:], i) - eg.writeIdx += 8 -} - -func (eg *encoderGRPC) readByte() byte { - b := eg.buf[eg.readIdx] - eg.readIdx++ - return b -} - -func (eg *encoderGRPC) readUint32() uint32 { - i := binary.LittleEndian.Uint32(eg.buf[eg.readIdx:]) - eg.readIdx += 4 - return i -} - -func (eg *encoderGRPC) readUint64() uint64 { - i := binary.LittleEndian.Uint64(eg.buf[eg.readIdx:]) - eg.readIdx += 8 - return i -} - -func (eg *encoderGRPC) readBytesWithVarintLen() ([]byte, error) { - if eg.readEnded() { - return nil, fmt.Errorf("unexpected end while readBytesWithVarintLen '%x' starting at idx '%v'", eg.buf, eg.readIdx) - } - length, valueStart := binary.Uvarint(eg.buf[eg.readIdx:]) - if valueStart <= 0 { - return nil, fmt.Errorf("unexpected end while readBytesWithVarintLen '%x' starting at idx '%v'", eg.buf, eg.readIdx) - } - - valueStart += eg.readIdx - valueEnd := valueStart + int(length) - if valueEnd > len(eg.buf) { - return nil, fmt.Errorf("malformed encoding: length:%v, upper:%v, maxLength:%v", length, valueEnd, len(eg.buf)) - } - - eg.readIdx = valueEnd - return eg.buf[valueStart:valueEnd], nil -} - -func (eg *encoderGRPC) readStringWithVarintLen() (string, error) { - bytes, err := eg.readBytesWithVarintLen() - if err != nil { - return "", err - } - return string(bytes), nil -} - -func (eg *encoderGRPC) growIfRequired(expected int) { - if len(eg.buf)-eg.writeIdx < expected { - tmp := make([]byte, 2*(len(eg.buf)+1)+expected) - copy(tmp, eg.buf) - eg.buf = tmp - } -} - -func (eg *encoderGRPC) readEnded() bool { - return eg.readIdx >= len(eg.buf) -} - -func (eg *encoderGRPC) bytes() []byte { - return eg.buf[:eg.writeIdx] -} - -// Encode encodes the tag map into a []byte. It is useful to propagate -// the tag maps on wire in binary format. -func Encode(m *Map) []byte { - eg := &encoderGRPC{ - buf: make([]byte, len(m.m)), - } - eg.writeByte(byte(tagsVersionID)) - for k, v := range m.m { - eg.writeByte(byte(keyTypeString)) - eg.writeStringWithVarintLen(k.name) - eg.writeBytesWithVarintLen([]byte(v)) - } - return eg.bytes() -} - -// Decode decodes the given []byte into a tag map. -func Decode(bytes []byte) (*Map, error) { - ts := newMap() - err := DecodeEach(bytes, ts.upsert) - if err != nil { - // no partial failures - return nil, err - } - return ts, nil -} - -// DecodeEach decodes the given serialized tag map, calling handler for each -// tag key and value decoded. -func DecodeEach(bytes []byte, fn func(key Key, val string)) error { - eg := &encoderGRPC{ - buf: bytes, - } - if len(eg.buf) == 0 { - return nil - } - - version := eg.readByte() - if version > tagsVersionID { - return fmt.Errorf("cannot decode: unsupported version: %q; supports only up to: %q", version, tagsVersionID) - } - - for !eg.readEnded() { - typ := keyType(eg.readByte()) - - if typ != keyTypeString { - return fmt.Errorf("cannot decode: invalid key type: %q", typ) - } - - k, err := eg.readBytesWithVarintLen() - if err != nil { - return err - } - - v, err := eg.readBytesWithVarintLen() - if err != nil { - return err - } - - key, err := NewKey(string(k)) - if err != nil { - return err - } - val := string(v) - if !checkValue(val) { - return errInvalidValue - } - fn(key, val) - if err != nil { - return err - } - } - return nil -} diff --git a/vendor/go.opencensus.io/tag/profile_19.go b/vendor/go.opencensus.io/tag/profile_19.go deleted file mode 100644 index f81cd0b4a7..0000000000 --- a/vendor/go.opencensus.io/tag/profile_19.go +++ /dev/null @@ -1,31 +0,0 @@ -// Copyright 2018, OpenCensus Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -// +build go1.9 - -package tag - -import ( - "context" - "runtime/pprof" -) - -func do(ctx context.Context, f func(ctx context.Context)) { - m := FromContext(ctx) - keyvals := make([]string, 0, 2*len(m.m)) - for k, v := range m.m { - keyvals = append(keyvals, k.Name(), v) - } - pprof.Do(ctx, pprof.Labels(keyvals...), f) -} diff --git a/vendor/go.opencensus.io/tag/profile_not19.go b/vendor/go.opencensus.io/tag/profile_not19.go deleted file mode 100644 index 83adbce56b..0000000000 --- a/vendor/go.opencensus.io/tag/profile_not19.go +++ /dev/null @@ -1,23 +0,0 @@ -// Copyright 2018, OpenCensus Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -// +build !go1.9 - -package tag - -import "context" - -func do(ctx context.Context, f func(ctx context.Context)) { - f(ctx) -} diff --git a/vendor/go.opencensus.io/tag/validate.go b/vendor/go.opencensus.io/tag/validate.go deleted file mode 100644 index 0939fc6748..0000000000 --- a/vendor/go.opencensus.io/tag/validate.go +++ /dev/null @@ -1,56 +0,0 @@ -// Copyright 2017, OpenCensus Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package tag - -import "errors" - -const ( - maxKeyLength = 255 - - // valid are restricted to US-ASCII subset (range 0x20 (' ') to 0x7e ('~')). - validKeyValueMin = 32 - validKeyValueMax = 126 -) - -var ( - errInvalidKeyName = errors.New("invalid key name: only ASCII characters accepted; max length must be 255 characters") - errInvalidValue = errors.New("invalid value: only ASCII characters accepted; max length must be 255 characters") -) - -func checkKeyName(name string) bool { - if len(name) == 0 { - return false - } - if len(name) > maxKeyLength { - return false - } - return isASCII(name) -} - -func isASCII(s string) bool { - for _, c := range s { - if (c < validKeyValueMin) || (c > validKeyValueMax) { - return false - } - } - return true -} - -func checkValue(v string) bool { - if len(v) > maxKeyLength { - return false - } - return isASCII(v) -} diff --git a/vendor/go.opencensus.io/trace/basetypes.go b/vendor/go.opencensus.io/trace/basetypes.go deleted file mode 100644 index 01f0f90831..0000000000 --- a/vendor/go.opencensus.io/trace/basetypes.go +++ /dev/null @@ -1,114 +0,0 @@ -// Copyright 2017, OpenCensus Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package trace - -import ( - "fmt" - "time" -) - -type ( - // TraceID is a 16-byte identifier for a set of spans. - TraceID [16]byte - - // SpanID is an 8-byte identifier for a single span. - SpanID [8]byte -) - -func (t TraceID) String() string { - return fmt.Sprintf("%02x", t[:]) -} - -func (s SpanID) String() string { - return fmt.Sprintf("%02x", s[:]) -} - -// Annotation represents a text annotation with a set of attributes and a timestamp. -type Annotation struct { - Time time.Time - Message string - Attributes map[string]interface{} -} - -// Attribute represents a key-value pair on a span, link or annotation. -// Construct with one of: BoolAttribute, Int64Attribute, or StringAttribute. -type Attribute struct { - key string - value interface{} -} - -// BoolAttribute returns a bool-valued attribute. -func BoolAttribute(key string, value bool) Attribute { - return Attribute{key: key, value: value} -} - -// Int64Attribute returns an int64-valued attribute. -func Int64Attribute(key string, value int64) Attribute { - return Attribute{key: key, value: value} -} - -// StringAttribute returns a string-valued attribute. -func StringAttribute(key string, value string) Attribute { - return Attribute{key: key, value: value} -} - -// LinkType specifies the relationship between the span that had the link -// added, and the linked span. -type LinkType int32 - -// LinkType values. -const ( - LinkTypeUnspecified LinkType = iota // The relationship of the two spans is unknown. - LinkTypeChild // The current span is a child of the linked span. - LinkTypeParent // The current span is the parent of the linked span. -) - -// Link represents a reference from one span to another span. -type Link struct { - TraceID TraceID - SpanID SpanID - Type LinkType - // Attributes is a set of attributes on the link. - Attributes map[string]interface{} -} - -// MessageEventType specifies the type of message event. -type MessageEventType int32 - -// MessageEventType values. -const ( - MessageEventTypeUnspecified MessageEventType = iota // Unknown event type. - MessageEventTypeSent // Indicates a sent RPC message. - MessageEventTypeRecv // Indicates a received RPC message. -) - -// MessageEvent represents an event describing a message sent or received on the network. -type MessageEvent struct { - Time time.Time - EventType MessageEventType - MessageID int64 - UncompressedByteSize int64 - CompressedByteSize int64 -} - -// Status is the status of a Span. -type Status struct { - // Code is a status code. Zero indicates success. - // - // If Code will be propagated to Google APIs, it ideally should be a value from - // https://github.com/googleapis/googleapis/blob/master/google/rpc/code.proto . - Code int32 - Message string -} diff --git a/vendor/go.opencensus.io/trace/config.go b/vendor/go.opencensus.io/trace/config.go deleted file mode 100644 index 0816892ea1..0000000000 --- a/vendor/go.opencensus.io/trace/config.go +++ /dev/null @@ -1,48 +0,0 @@ -// Copyright 2018, OpenCensus Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package trace - -import ( - "sync" - - "go.opencensus.io/trace/internal" -) - -// Config represents the global tracing configuration. -type Config struct { - // DefaultSampler is the default sampler used when creating new spans. - DefaultSampler Sampler - - // IDGenerator is for internal use only. - IDGenerator internal.IDGenerator -} - -var configWriteMu sync.Mutex - -// ApplyConfig applies changes to the global tracing configuration. -// -// Fields not provided in the given config are going to be preserved. -func ApplyConfig(cfg Config) { - configWriteMu.Lock() - defer configWriteMu.Unlock() - c := *config.Load().(*Config) - if cfg.DefaultSampler != nil { - c.DefaultSampler = cfg.DefaultSampler - } - if cfg.IDGenerator != nil { - c.IDGenerator = cfg.IDGenerator - } - config.Store(&c) -} diff --git a/vendor/go.opencensus.io/trace/doc.go b/vendor/go.opencensus.io/trace/doc.go deleted file mode 100644 index 04b1ee4f38..0000000000 --- a/vendor/go.opencensus.io/trace/doc.go +++ /dev/null @@ -1,53 +0,0 @@ -// Copyright 2017, OpenCensus Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -/* -Package trace contains support for OpenCensus distributed tracing. - -The following assumes a basic familiarity with OpenCensus concepts. -See http://opencensus.io - - -Exporting Traces - -To export collected tracing data, register at least one exporter. You can use -one of the provided exporters or write your own. - - trace.RegisterExporter(exporter) - -By default, traces will be sampled relatively rarely. To change the sampling -frequency for your entire program, call ApplyConfig. Use a ProbabilitySampler -to sample a subset of traces, or use AlwaysSample to collect a trace on every run: - - trace.ApplyConfig(trace.Config{DefaultSampler: trace.AlwaysSample()}) - -Be careful about using trace.AlwaysSample in a production application with -significant traffic: a new trace will be started and exported for every request. - -Adding Spans to a Trace - -A trace consists of a tree of spans. In Go, the current span is carried in a -context.Context. - -It is common to want to capture all the activity of a function call in a span. For -this to work, the function must take a context.Context as a parameter. Add these two -lines to the top of the function: - - ctx, span := trace.StartSpan(ctx, "example.com/Run") - defer span.End() - -StartSpan will create a new top-level span if the context -doesn't contain another span, otherwise it will create a child span. -*/ -package trace // import "go.opencensus.io/trace" diff --git a/vendor/go.opencensus.io/trace/exemplar.go b/vendor/go.opencensus.io/trace/exemplar.go deleted file mode 100644 index 416d80590d..0000000000 --- a/vendor/go.opencensus.io/trace/exemplar.go +++ /dev/null @@ -1,43 +0,0 @@ -// Copyright 2018, OpenCensus Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package trace - -import ( - "context" - "encoding/hex" - - "go.opencensus.io/exemplar" -) - -func init() { - exemplar.RegisterAttachmentExtractor(attachSpanContext) -} - -func attachSpanContext(ctx context.Context, a exemplar.Attachments) exemplar.Attachments { - span := FromContext(ctx) - if span == nil { - return a - } - sc := span.SpanContext() - if !sc.IsSampled() { - return a - } - if a == nil { - a = make(exemplar.Attachments) - } - a[exemplar.KeyTraceID] = hex.EncodeToString(sc.TraceID[:]) - a[exemplar.KeySpanID] = hex.EncodeToString(sc.SpanID[:]) - return a -} diff --git a/vendor/go.opencensus.io/trace/export.go b/vendor/go.opencensus.io/trace/export.go deleted file mode 100644 index 77a8c73575..0000000000 --- a/vendor/go.opencensus.io/trace/export.go +++ /dev/null @@ -1,90 +0,0 @@ -// Copyright 2017, OpenCensus Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package trace - -import ( - "sync" - "sync/atomic" - "time" -) - -// Exporter is a type for functions that receive sampled trace spans. -// -// The ExportSpan method should be safe for concurrent use and should return -// quickly; if an Exporter takes a significant amount of time to process a -// SpanData, that work should be done on another goroutine. -// -// The SpanData should not be modified, but a pointer to it can be kept. -type Exporter interface { - ExportSpan(s *SpanData) -} - -type exportersMap map[Exporter]struct{} - -var ( - exporterMu sync.Mutex - exporters atomic.Value -) - -// RegisterExporter adds to the list of Exporters that will receive sampled -// trace spans. -// -// Binaries can register exporters, libraries shouldn't register exporters. -func RegisterExporter(e Exporter) { - exporterMu.Lock() - new := make(exportersMap) - if old, ok := exporters.Load().(exportersMap); ok { - for k, v := range old { - new[k] = v - } - } - new[e] = struct{}{} - exporters.Store(new) - exporterMu.Unlock() -} - -// UnregisterExporter removes from the list of Exporters the Exporter that was -// registered with the given name. -func UnregisterExporter(e Exporter) { - exporterMu.Lock() - new := make(exportersMap) - if old, ok := exporters.Load().(exportersMap); ok { - for k, v := range old { - new[k] = v - } - } - delete(new, e) - exporters.Store(new) - exporterMu.Unlock() -} - -// SpanData contains all the information collected by a Span. -type SpanData struct { - SpanContext - ParentSpanID SpanID - SpanKind int - Name string - StartTime time.Time - // The wall clock time of EndTime will be adjusted to always be offset - // from StartTime by the duration of the span. - EndTime time.Time - // The values of Attributes each have type string, bool, or int64. - Attributes map[string]interface{} - Annotations []Annotation - MessageEvents []MessageEvent - Status - Links []Link - HasRemoteParent bool -} diff --git a/vendor/go.opencensus.io/trace/internal/internal.go b/vendor/go.opencensus.io/trace/internal/internal.go deleted file mode 100644 index 1c8b9b34b2..0000000000 --- a/vendor/go.opencensus.io/trace/internal/internal.go +++ /dev/null @@ -1,21 +0,0 @@ -// Copyright 2018, OpenCensus Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -// Package internal provides trace internals. -package internal - -type IDGenerator interface { - NewTraceID() [16]byte - NewSpanID() [8]byte -} diff --git a/vendor/go.opencensus.io/trace/propagation/propagation.go b/vendor/go.opencensus.io/trace/propagation/propagation.go deleted file mode 100644 index 1eb190a96a..0000000000 --- a/vendor/go.opencensus.io/trace/propagation/propagation.go +++ /dev/null @@ -1,108 +0,0 @@ -// Copyright 2017, OpenCensus Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -// Package propagation implements the binary trace context format. -package propagation // import "go.opencensus.io/trace/propagation" - -// TODO: link to external spec document. - -// BinaryFormat format: -// -// Binary value: -// version_id: 1 byte representing the version id. -// -// For version_id = 0: -// -// version_format: -// field_format: -// -// Fields: -// -// TraceId: (field_id = 0, len = 16, default = "0000000000000000") - 16-byte array representing the trace_id. -// SpanId: (field_id = 1, len = 8, default = "00000000") - 8-byte array representing the span_id. -// TraceOptions: (field_id = 2, len = 1, default = "0") - 1-byte array representing the trace_options. -// -// Fields MUST be encoded using the field id order (smaller to higher). -// -// Valid value example: -// -// {0, 0, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, 1, 97, -// 98, 99, 100, 101, 102, 103, 104, 2, 1} -// -// version_id = 0; -// trace_id = {64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79} -// span_id = {97, 98, 99, 100, 101, 102, 103, 104}; -// trace_options = {1}; - -import ( - "net/http" - - "go.opencensus.io/trace" -) - -// Binary returns the binary format representation of a SpanContext. -// -// If sc is the zero value, Binary returns nil. -func Binary(sc trace.SpanContext) []byte { - if sc == (trace.SpanContext{}) { - return nil - } - var b [29]byte - copy(b[2:18], sc.TraceID[:]) - b[18] = 1 - copy(b[19:27], sc.SpanID[:]) - b[27] = 2 - b[28] = uint8(sc.TraceOptions) - return b[:] -} - -// FromBinary returns the SpanContext represented by b. -// -// If b has an unsupported version ID or contains no TraceID, FromBinary -// returns with ok==false. -func FromBinary(b []byte) (sc trace.SpanContext, ok bool) { - if len(b) == 0 || b[0] != 0 { - return trace.SpanContext{}, false - } - b = b[1:] - if len(b) >= 17 && b[0] == 0 { - copy(sc.TraceID[:], b[1:17]) - b = b[17:] - } else { - return trace.SpanContext{}, false - } - if len(b) >= 9 && b[0] == 1 { - copy(sc.SpanID[:], b[1:9]) - b = b[9:] - } - if len(b) >= 2 && b[0] == 2 { - sc.TraceOptions = trace.TraceOptions(b[1]) - } - return sc, true -} - -// HTTPFormat implementations propagate span contexts -// in HTTP requests. -// -// SpanContextFromRequest extracts a span context from incoming -// requests. -// -// SpanContextToRequest modifies the given request to include the given -// span context. -type HTTPFormat interface { - SpanContextFromRequest(req *http.Request) (sc trace.SpanContext, ok bool) - SpanContextToRequest(sc trace.SpanContext, req *http.Request) -} - -// TODO(jbd): Find a more representative but short name for HTTPFormat. diff --git a/vendor/go.opencensus.io/trace/sampling.go b/vendor/go.opencensus.io/trace/sampling.go deleted file mode 100644 index 71c10f9e3b..0000000000 --- a/vendor/go.opencensus.io/trace/sampling.go +++ /dev/null @@ -1,75 +0,0 @@ -// Copyright 2017, OpenCensus Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package trace - -import ( - "encoding/binary" -) - -const defaultSamplingProbability = 1e-4 - -// Sampler decides whether a trace should be sampled and exported. -type Sampler func(SamplingParameters) SamplingDecision - -// SamplingParameters contains the values passed to a Sampler. -type SamplingParameters struct { - ParentContext SpanContext - TraceID TraceID - SpanID SpanID - Name string - HasRemoteParent bool -} - -// SamplingDecision is the value returned by a Sampler. -type SamplingDecision struct { - Sample bool -} - -// ProbabilitySampler returns a Sampler that samples a given fraction of traces. -// -// It also samples spans whose parents are sampled. -func ProbabilitySampler(fraction float64) Sampler { - if !(fraction >= 0) { - fraction = 0 - } else if fraction >= 1 { - return AlwaysSample() - } - - traceIDUpperBound := uint64(fraction * (1 << 63)) - return Sampler(func(p SamplingParameters) SamplingDecision { - if p.ParentContext.IsSampled() { - return SamplingDecision{Sample: true} - } - x := binary.BigEndian.Uint64(p.TraceID[0:8]) >> 1 - return SamplingDecision{Sample: x < traceIDUpperBound} - }) -} - -// AlwaysSample returns a Sampler that samples every trace. -// Be careful about using this sampler in a production application with -// significant traffic: a new trace will be started and exported for every -// request. -func AlwaysSample() Sampler { - return func(p SamplingParameters) SamplingDecision { - return SamplingDecision{Sample: true} - } -} - -// NeverSample returns a Sampler that samples no traces. -func NeverSample() Sampler { - return func(p SamplingParameters) SamplingDecision { - return SamplingDecision{Sample: false} - } -} diff --git a/vendor/go.opencensus.io/trace/spanbucket.go b/vendor/go.opencensus.io/trace/spanbucket.go deleted file mode 100644 index fbabad34c0..0000000000 --- a/vendor/go.opencensus.io/trace/spanbucket.go +++ /dev/null @@ -1,130 +0,0 @@ -// Copyright 2017, OpenCensus Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package trace - -import ( - "time" -) - -// samplePeriod is the minimum time between accepting spans in a single bucket. -const samplePeriod = time.Second - -// defaultLatencies contains the default latency bucket bounds. -// TODO: consider defaults, make configurable -var defaultLatencies = [...]time.Duration{ - 10 * time.Microsecond, - 100 * time.Microsecond, - time.Millisecond, - 10 * time.Millisecond, - 100 * time.Millisecond, - time.Second, - 10 * time.Second, - time.Minute, -} - -// bucket is a container for a set of spans for a particular error code or latency range. -type bucket struct { - nextTime time.Time // next time we can accept a span - buffer []*SpanData // circular buffer of spans - nextIndex int // location next SpanData should be placed in buffer - overflow bool // whether the circular buffer has wrapped around -} - -func makeBucket(bufferSize int) bucket { - return bucket{ - buffer: make([]*SpanData, bufferSize), - } -} - -// add adds a span to the bucket, if nextTime has been reached. -func (b *bucket) add(s *SpanData) { - if s.EndTime.Before(b.nextTime) { - return - } - if len(b.buffer) == 0 { - return - } - b.nextTime = s.EndTime.Add(samplePeriod) - b.buffer[b.nextIndex] = s - b.nextIndex++ - if b.nextIndex == len(b.buffer) { - b.nextIndex = 0 - b.overflow = true - } -} - -// size returns the number of spans in the bucket. -func (b *bucket) size() int { - if b.overflow { - return len(b.buffer) - } - return b.nextIndex -} - -// span returns the ith span in the bucket. -func (b *bucket) span(i int) *SpanData { - if !b.overflow { - return b.buffer[i] - } - if i < len(b.buffer)-b.nextIndex { - return b.buffer[b.nextIndex+i] - } - return b.buffer[b.nextIndex+i-len(b.buffer)] -} - -// resize changes the size of the bucket to n, keeping up to n existing spans. -func (b *bucket) resize(n int) { - cur := b.size() - newBuffer := make([]*SpanData, n) - if cur < n { - for i := 0; i < cur; i++ { - newBuffer[i] = b.span(i) - } - b.buffer = newBuffer - b.nextIndex = cur - b.overflow = false - return - } - for i := 0; i < n; i++ { - newBuffer[i] = b.span(i + cur - n) - } - b.buffer = newBuffer - b.nextIndex = 0 - b.overflow = true -} - -// latencyBucket returns the appropriate bucket number for a given latency. -func latencyBucket(latency time.Duration) int { - i := 0 - for i < len(defaultLatencies) && latency >= defaultLatencies[i] { - i++ - } - return i -} - -// latencyBucketBounds returns the lower and upper bounds for a latency bucket -// number. -// -// The lower bound is inclusive, the upper bound is exclusive (except for the -// last bucket.) -func latencyBucketBounds(index int) (lower time.Duration, upper time.Duration) { - if index == 0 { - return 0, defaultLatencies[index] - } - if index == len(defaultLatencies) { - return defaultLatencies[index-1], 1<<63 - 1 - } - return defaultLatencies[index-1], defaultLatencies[index] -} diff --git a/vendor/go.opencensus.io/trace/spanstore.go b/vendor/go.opencensus.io/trace/spanstore.go deleted file mode 100644 index c442d99021..0000000000 --- a/vendor/go.opencensus.io/trace/spanstore.go +++ /dev/null @@ -1,306 +0,0 @@ -// Copyright 2017, OpenCensus Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package trace - -import ( - "sync" - "time" - - "go.opencensus.io/internal" -) - -const ( - maxBucketSize = 100000 - defaultBucketSize = 10 -) - -var ( - ssmu sync.RWMutex // protects spanStores - spanStores = make(map[string]*spanStore) -) - -// This exists purely to avoid exposing internal methods used by z-Pages externally. -type internalOnly struct{} - -func init() { - //TODO(#412): remove - internal.Trace = &internalOnly{} -} - -// ReportActiveSpans returns the active spans for the given name. -func (i internalOnly) ReportActiveSpans(name string) []*SpanData { - s := spanStoreForName(name) - if s == nil { - return nil - } - var out []*SpanData - s.mu.Lock() - defer s.mu.Unlock() - for span := range s.active { - out = append(out, span.makeSpanData()) - } - return out -} - -// ReportSpansByError returns a sample of error spans. -// -// If code is nonzero, only spans with that status code are returned. -func (i internalOnly) ReportSpansByError(name string, code int32) []*SpanData { - s := spanStoreForName(name) - if s == nil { - return nil - } - var out []*SpanData - s.mu.Lock() - defer s.mu.Unlock() - if code != 0 { - if b, ok := s.errors[code]; ok { - for _, sd := range b.buffer { - if sd == nil { - break - } - out = append(out, sd) - } - } - } else { - for _, b := range s.errors { - for _, sd := range b.buffer { - if sd == nil { - break - } - out = append(out, sd) - } - } - } - return out -} - -// ConfigureBucketSizes sets the number of spans to keep per latency and error -// bucket for different span names. -func (i internalOnly) ConfigureBucketSizes(bcs []internal.BucketConfiguration) { - for _, bc := range bcs { - latencyBucketSize := bc.MaxRequestsSucceeded - if latencyBucketSize < 0 { - latencyBucketSize = 0 - } - if latencyBucketSize > maxBucketSize { - latencyBucketSize = maxBucketSize - } - errorBucketSize := bc.MaxRequestsErrors - if errorBucketSize < 0 { - errorBucketSize = 0 - } - if errorBucketSize > maxBucketSize { - errorBucketSize = maxBucketSize - } - spanStoreSetSize(bc.Name, latencyBucketSize, errorBucketSize) - } -} - -// ReportSpansPerMethod returns a summary of what spans are being stored for each span name. -func (i internalOnly) ReportSpansPerMethod() map[string]internal.PerMethodSummary { - out := make(map[string]internal.PerMethodSummary) - ssmu.RLock() - defer ssmu.RUnlock() - for name, s := range spanStores { - s.mu.Lock() - p := internal.PerMethodSummary{ - Active: len(s.active), - } - for code, b := range s.errors { - p.ErrorBuckets = append(p.ErrorBuckets, internal.ErrorBucketSummary{ - ErrorCode: code, - Size: b.size(), - }) - } - for i, b := range s.latency { - min, max := latencyBucketBounds(i) - p.LatencyBuckets = append(p.LatencyBuckets, internal.LatencyBucketSummary{ - MinLatency: min, - MaxLatency: max, - Size: b.size(), - }) - } - s.mu.Unlock() - out[name] = p - } - return out -} - -// ReportSpansByLatency returns a sample of successful spans. -// -// minLatency is the minimum latency of spans to be returned. -// maxLatency, if nonzero, is the maximum latency of spans to be returned. -func (i internalOnly) ReportSpansByLatency(name string, minLatency, maxLatency time.Duration) []*SpanData { - s := spanStoreForName(name) - if s == nil { - return nil - } - var out []*SpanData - s.mu.Lock() - defer s.mu.Unlock() - for i, b := range s.latency { - min, max := latencyBucketBounds(i) - if i+1 != len(s.latency) && max <= minLatency { - continue - } - if maxLatency != 0 && maxLatency < min { - continue - } - for _, sd := range b.buffer { - if sd == nil { - break - } - if minLatency != 0 || maxLatency != 0 { - d := sd.EndTime.Sub(sd.StartTime) - if d < minLatency { - continue - } - if maxLatency != 0 && d > maxLatency { - continue - } - } - out = append(out, sd) - } - } - return out -} - -// spanStore keeps track of spans stored for a particular span name. -// -// It contains all active spans; a sample of spans for failed requests, -// categorized by error code; and a sample of spans for successful requests, -// bucketed by latency. -type spanStore struct { - mu sync.Mutex // protects everything below. - active map[*Span]struct{} - errors map[int32]*bucket - latency []bucket - maxSpansPerErrorBucket int -} - -// newSpanStore creates a span store. -func newSpanStore(name string, latencyBucketSize int, errorBucketSize int) *spanStore { - s := &spanStore{ - active: make(map[*Span]struct{}), - latency: make([]bucket, len(defaultLatencies)+1), - maxSpansPerErrorBucket: errorBucketSize, - } - for i := range s.latency { - s.latency[i] = makeBucket(latencyBucketSize) - } - return s -} - -// spanStoreForName returns the spanStore for the given name. -// -// It returns nil if it doesn't exist. -func spanStoreForName(name string) *spanStore { - var s *spanStore - ssmu.RLock() - s, _ = spanStores[name] - ssmu.RUnlock() - return s -} - -// spanStoreForNameCreateIfNew returns the spanStore for the given name. -// -// It creates it if it didn't exist. -func spanStoreForNameCreateIfNew(name string) *spanStore { - ssmu.RLock() - s, ok := spanStores[name] - ssmu.RUnlock() - if ok { - return s - } - ssmu.Lock() - defer ssmu.Unlock() - s, ok = spanStores[name] - if ok { - return s - } - s = newSpanStore(name, defaultBucketSize, defaultBucketSize) - spanStores[name] = s - return s -} - -// spanStoreSetSize resizes the spanStore for the given name. -// -// It creates it if it didn't exist. -func spanStoreSetSize(name string, latencyBucketSize int, errorBucketSize int) { - ssmu.RLock() - s, ok := spanStores[name] - ssmu.RUnlock() - if ok { - s.resize(latencyBucketSize, errorBucketSize) - return - } - ssmu.Lock() - defer ssmu.Unlock() - s, ok = spanStores[name] - if ok { - s.resize(latencyBucketSize, errorBucketSize) - return - } - s = newSpanStore(name, latencyBucketSize, errorBucketSize) - spanStores[name] = s -} - -func (s *spanStore) resize(latencyBucketSize int, errorBucketSize int) { - s.mu.Lock() - for i := range s.latency { - s.latency[i].resize(latencyBucketSize) - } - for _, b := range s.errors { - b.resize(errorBucketSize) - } - s.maxSpansPerErrorBucket = errorBucketSize - s.mu.Unlock() -} - -// add adds a span to the active bucket of the spanStore. -func (s *spanStore) add(span *Span) { - s.mu.Lock() - s.active[span] = struct{}{} - s.mu.Unlock() -} - -// finished removes a span from the active set, and adds a corresponding -// SpanData to a latency or error bucket. -func (s *spanStore) finished(span *Span, sd *SpanData) { - latency := sd.EndTime.Sub(sd.StartTime) - if latency < 0 { - latency = 0 - } - code := sd.Status.Code - - s.mu.Lock() - delete(s.active, span) - if code == 0 { - s.latency[latencyBucket(latency)].add(sd) - } else { - if s.errors == nil { - s.errors = make(map[int32]*bucket) - } - if b := s.errors[code]; b != nil { - b.add(sd) - } else { - b := makeBucket(s.maxSpansPerErrorBucket) - s.errors[code] = &b - b.add(sd) - } - } - s.mu.Unlock() -} diff --git a/vendor/go.opencensus.io/trace/status_codes.go b/vendor/go.opencensus.io/trace/status_codes.go deleted file mode 100644 index ec60effd10..0000000000 --- a/vendor/go.opencensus.io/trace/status_codes.go +++ /dev/null @@ -1,37 +0,0 @@ -// Copyright 2018, OpenCensus Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package trace - -// Status codes for use with Span.SetStatus. These correspond to the status -// codes used by gRPC defined here: https://github.com/googleapis/googleapis/blob/master/google/rpc/code.proto -const ( - StatusCodeOK = 0 - StatusCodeCancelled = 1 - StatusCodeUnknown = 2 - StatusCodeInvalidArgument = 3 - StatusCodeDeadlineExceeded = 4 - StatusCodeNotFound = 5 - StatusCodeAlreadyExists = 6 - StatusCodePermissionDenied = 7 - StatusCodeResourceExhausted = 8 - StatusCodeFailedPrecondition = 9 - StatusCodeAborted = 10 - StatusCodeOutOfRange = 11 - StatusCodeUnimplemented = 12 - StatusCodeInternal = 13 - StatusCodeUnavailable = 14 - StatusCodeDataLoss = 15 - StatusCodeUnauthenticated = 16 -) diff --git a/vendor/go.opencensus.io/trace/trace.go b/vendor/go.opencensus.io/trace/trace.go deleted file mode 100644 index 9e5e5f0331..0000000000 --- a/vendor/go.opencensus.io/trace/trace.go +++ /dev/null @@ -1,516 +0,0 @@ -// Copyright 2017, OpenCensus Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package trace - -import ( - "context" - crand "crypto/rand" - "encoding/binary" - "fmt" - "math/rand" - "sync" - "sync/atomic" - "time" - - "go.opencensus.io/internal" - "go.opencensus.io/trace/tracestate" -) - -// Span represents a span of a trace. It has an associated SpanContext, and -// stores data accumulated while the span is active. -// -// Ideally users should interact with Spans by calling the functions in this -// package that take a Context parameter. -type Span struct { - // data contains information recorded about the span. - // - // It will be non-nil if we are exporting the span or recording events for it. - // Otherwise, data is nil, and the Span is simply a carrier for the - // SpanContext, so that the trace ID is propagated. - data *SpanData - mu sync.Mutex // protects the contents of *data (but not the pointer value.) - spanContext SpanContext - // spanStore is the spanStore this span belongs to, if any, otherwise it is nil. - *spanStore - endOnce sync.Once - - executionTracerTaskEnd func() // ends the execution tracer span -} - -// IsRecordingEvents returns true if events are being recorded for this span. -// Use this check to avoid computing expensive annotations when they will never -// be used. -func (s *Span) IsRecordingEvents() bool { - if s == nil { - return false - } - return s.data != nil -} - -// TraceOptions contains options associated with a trace span. -type TraceOptions uint32 - -// IsSampled returns true if the span will be exported. -func (sc SpanContext) IsSampled() bool { - return sc.TraceOptions.IsSampled() -} - -// setIsSampled sets the TraceOptions bit that determines whether the span will be exported. -func (sc *SpanContext) setIsSampled(sampled bool) { - if sampled { - sc.TraceOptions |= 1 - } else { - sc.TraceOptions &= ^TraceOptions(1) - } -} - -// IsSampled returns true if the span will be exported. -func (t TraceOptions) IsSampled() bool { - return t&1 == 1 -} - -// SpanContext contains the state that must propagate across process boundaries. -// -// SpanContext is not an implementation of context.Context. -// TODO: add reference to external Census docs for SpanContext. -type SpanContext struct { - TraceID TraceID - SpanID SpanID - TraceOptions TraceOptions - Tracestate *tracestate.Tracestate -} - -type contextKey struct{} - -// FromContext returns the Span stored in a context, or nil if there isn't one. -func FromContext(ctx context.Context) *Span { - s, _ := ctx.Value(contextKey{}).(*Span) - return s -} - -// NewContext returns a new context with the given Span attached. -func NewContext(parent context.Context, s *Span) context.Context { - return context.WithValue(parent, contextKey{}, s) -} - -// All available span kinds. Span kind must be either one of these values. -const ( - SpanKindUnspecified = iota - SpanKindServer - SpanKindClient -) - -// StartOptions contains options concerning how a span is started. -type StartOptions struct { - // Sampler to consult for this Span. If provided, it is always consulted. - // - // If not provided, then the behavior differs based on whether - // the parent of this Span is remote, local, or there is no parent. - // In the case of a remote parent or no parent, the - // default sampler (see Config) will be consulted. Otherwise, - // when there is a non-remote parent, no new sampling decision will be made: - // we will preserve the sampling of the parent. - Sampler Sampler - - // SpanKind represents the kind of a span. If none is set, - // SpanKindUnspecified is used. - SpanKind int -} - -// StartOption apply changes to StartOptions. -type StartOption func(*StartOptions) - -// WithSpanKind makes new spans to be created with the given kind. -func WithSpanKind(spanKind int) StartOption { - return func(o *StartOptions) { - o.SpanKind = spanKind - } -} - -// WithSampler makes new spans to be be created with a custom sampler. -// Otherwise, the global sampler is used. -func WithSampler(sampler Sampler) StartOption { - return func(o *StartOptions) { - o.Sampler = sampler - } -} - -// StartSpan starts a new child span of the current span in the context. If -// there is no span in the context, creates a new trace and span. -// -// Returned context contains the newly created span. You can use it to -// propagate the returned span in process. -func StartSpan(ctx context.Context, name string, o ...StartOption) (context.Context, *Span) { - var opts StartOptions - var parent SpanContext - if p := FromContext(ctx); p != nil { - parent = p.spanContext - } - for _, op := range o { - op(&opts) - } - span := startSpanInternal(name, parent != SpanContext{}, parent, false, opts) - - ctx, end := startExecutionTracerTask(ctx, name) - span.executionTracerTaskEnd = end - return NewContext(ctx, span), span -} - -// StartSpanWithRemoteParent starts a new child span of the span from the given parent. -// -// If the incoming context contains a parent, it ignores. StartSpanWithRemoteParent is -// preferred for cases where the parent is propagated via an incoming request. -// -// Returned context contains the newly created span. You can use it to -// propagate the returned span in process. -func StartSpanWithRemoteParent(ctx context.Context, name string, parent SpanContext, o ...StartOption) (context.Context, *Span) { - var opts StartOptions - for _, op := range o { - op(&opts) - } - span := startSpanInternal(name, parent != SpanContext{}, parent, true, opts) - ctx, end := startExecutionTracerTask(ctx, name) - span.executionTracerTaskEnd = end - return NewContext(ctx, span), span -} - -func startSpanInternal(name string, hasParent bool, parent SpanContext, remoteParent bool, o StartOptions) *Span { - span := &Span{} - span.spanContext = parent - - cfg := config.Load().(*Config) - - if !hasParent { - span.spanContext.TraceID = cfg.IDGenerator.NewTraceID() - } - span.spanContext.SpanID = cfg.IDGenerator.NewSpanID() - sampler := cfg.DefaultSampler - - if !hasParent || remoteParent || o.Sampler != nil { - // If this span is the child of a local span and no Sampler is set in the - // options, keep the parent's TraceOptions. - // - // Otherwise, consult the Sampler in the options if it is non-nil, otherwise - // the default sampler. - if o.Sampler != nil { - sampler = o.Sampler - } - span.spanContext.setIsSampled(sampler(SamplingParameters{ - ParentContext: parent, - TraceID: span.spanContext.TraceID, - SpanID: span.spanContext.SpanID, - Name: name, - HasRemoteParent: remoteParent}).Sample) - } - - if !internal.LocalSpanStoreEnabled && !span.spanContext.IsSampled() { - return span - } - - span.data = &SpanData{ - SpanContext: span.spanContext, - StartTime: time.Now(), - SpanKind: o.SpanKind, - Name: name, - HasRemoteParent: remoteParent, - } - if hasParent { - span.data.ParentSpanID = parent.SpanID - } - if internal.LocalSpanStoreEnabled { - var ss *spanStore - ss = spanStoreForNameCreateIfNew(name) - if ss != nil { - span.spanStore = ss - ss.add(span) - } - } - - return span -} - -// End ends the span. -func (s *Span) End() { - if s == nil { - return - } - if s.executionTracerTaskEnd != nil { - s.executionTracerTaskEnd() - } - if !s.IsRecordingEvents() { - return - } - s.endOnce.Do(func() { - exp, _ := exporters.Load().(exportersMap) - mustExport := s.spanContext.IsSampled() && len(exp) > 0 - if s.spanStore != nil || mustExport { - sd := s.makeSpanData() - sd.EndTime = internal.MonotonicEndTime(sd.StartTime) - if s.spanStore != nil { - s.spanStore.finished(s, sd) - } - if mustExport { - for e := range exp { - e.ExportSpan(sd) - } - } - } - }) -} - -// makeSpanData produces a SpanData representing the current state of the Span. -// It requires that s.data is non-nil. -func (s *Span) makeSpanData() *SpanData { - var sd SpanData - s.mu.Lock() - sd = *s.data - if s.data.Attributes != nil { - sd.Attributes = make(map[string]interface{}) - for k, v := range s.data.Attributes { - sd.Attributes[k] = v - } - } - s.mu.Unlock() - return &sd -} - -// SpanContext returns the SpanContext of the span. -func (s *Span) SpanContext() SpanContext { - if s == nil { - return SpanContext{} - } - return s.spanContext -} - -// SetName sets the name of the span, if it is recording events. -func (s *Span) SetName(name string) { - if !s.IsRecordingEvents() { - return - } - s.mu.Lock() - s.data.Name = name - s.mu.Unlock() -} - -// SetStatus sets the status of the span, if it is recording events. -func (s *Span) SetStatus(status Status) { - if !s.IsRecordingEvents() { - return - } - s.mu.Lock() - s.data.Status = status - s.mu.Unlock() -} - -// AddAttributes sets attributes in the span. -// -// Existing attributes whose keys appear in the attributes parameter are overwritten. -func (s *Span) AddAttributes(attributes ...Attribute) { - if !s.IsRecordingEvents() { - return - } - s.mu.Lock() - if s.data.Attributes == nil { - s.data.Attributes = make(map[string]interface{}) - } - copyAttributes(s.data.Attributes, attributes) - s.mu.Unlock() -} - -// copyAttributes copies a slice of Attributes into a map. -func copyAttributes(m map[string]interface{}, attributes []Attribute) { - for _, a := range attributes { - m[a.key] = a.value - } -} - -func (s *Span) lazyPrintfInternal(attributes []Attribute, format string, a ...interface{}) { - now := time.Now() - msg := fmt.Sprintf(format, a...) - var m map[string]interface{} - s.mu.Lock() - if len(attributes) != 0 { - m = make(map[string]interface{}) - copyAttributes(m, attributes) - } - s.data.Annotations = append(s.data.Annotations, Annotation{ - Time: now, - Message: msg, - Attributes: m, - }) - s.mu.Unlock() -} - -func (s *Span) printStringInternal(attributes []Attribute, str string) { - now := time.Now() - var a map[string]interface{} - s.mu.Lock() - if len(attributes) != 0 { - a = make(map[string]interface{}) - copyAttributes(a, attributes) - } - s.data.Annotations = append(s.data.Annotations, Annotation{ - Time: now, - Message: str, - Attributes: a, - }) - s.mu.Unlock() -} - -// Annotate adds an annotation with attributes. -// Attributes can be nil. -func (s *Span) Annotate(attributes []Attribute, str string) { - if !s.IsRecordingEvents() { - return - } - s.printStringInternal(attributes, str) -} - -// Annotatef adds an annotation with attributes. -func (s *Span) Annotatef(attributes []Attribute, format string, a ...interface{}) { - if !s.IsRecordingEvents() { - return - } - s.lazyPrintfInternal(attributes, format, a...) -} - -// AddMessageSendEvent adds a message send event to the span. -// -// messageID is an identifier for the message, which is recommended to be -// unique in this span and the same between the send event and the receive -// event (this allows to identify a message between the sender and receiver). -// For example, this could be a sequence id. -func (s *Span) AddMessageSendEvent(messageID, uncompressedByteSize, compressedByteSize int64) { - if !s.IsRecordingEvents() { - return - } - now := time.Now() - s.mu.Lock() - s.data.MessageEvents = append(s.data.MessageEvents, MessageEvent{ - Time: now, - EventType: MessageEventTypeSent, - MessageID: messageID, - UncompressedByteSize: uncompressedByteSize, - CompressedByteSize: compressedByteSize, - }) - s.mu.Unlock() -} - -// AddMessageReceiveEvent adds a message receive event to the span. -// -// messageID is an identifier for the message, which is recommended to be -// unique in this span and the same between the send event and the receive -// event (this allows to identify a message between the sender and receiver). -// For example, this could be a sequence id. -func (s *Span) AddMessageReceiveEvent(messageID, uncompressedByteSize, compressedByteSize int64) { - if !s.IsRecordingEvents() { - return - } - now := time.Now() - s.mu.Lock() - s.data.MessageEvents = append(s.data.MessageEvents, MessageEvent{ - Time: now, - EventType: MessageEventTypeRecv, - MessageID: messageID, - UncompressedByteSize: uncompressedByteSize, - CompressedByteSize: compressedByteSize, - }) - s.mu.Unlock() -} - -// AddLink adds a link to the span. -func (s *Span) AddLink(l Link) { - if !s.IsRecordingEvents() { - return - } - s.mu.Lock() - s.data.Links = append(s.data.Links, l) - s.mu.Unlock() -} - -func (s *Span) String() string { - if s == nil { - return "" - } - if s.data == nil { - return fmt.Sprintf("span %s", s.spanContext.SpanID) - } - s.mu.Lock() - str := fmt.Sprintf("span %s %q", s.spanContext.SpanID, s.data.Name) - s.mu.Unlock() - return str -} - -var config atomic.Value // access atomically - -func init() { - gen := &defaultIDGenerator{} - // initialize traceID and spanID generators. - var rngSeed int64 - for _, p := range []interface{}{ - &rngSeed, &gen.traceIDAdd, &gen.nextSpanID, &gen.spanIDInc, - } { - binary.Read(crand.Reader, binary.LittleEndian, p) - } - gen.traceIDRand = rand.New(rand.NewSource(rngSeed)) - gen.spanIDInc |= 1 - - config.Store(&Config{ - DefaultSampler: ProbabilitySampler(defaultSamplingProbability), - IDGenerator: gen, - }) -} - -type defaultIDGenerator struct { - sync.Mutex - - // Please keep these as the first fields - // so that these 8 byte fields will be aligned on addresses - // divisible by 8, on both 32-bit and 64-bit machines when - // performing atomic increments and accesses. - // See: - // * https://github.com/census-instrumentation/opencensus-go/issues/587 - // * https://github.com/census-instrumentation/opencensus-go/issues/865 - // * https://golang.org/pkg/sync/atomic/#pkg-note-BUG - nextSpanID uint64 - spanIDInc uint64 - - traceIDAdd [2]uint64 - traceIDRand *rand.Rand -} - -// NewSpanID returns a non-zero span ID from a randomly-chosen sequence. -func (gen *defaultIDGenerator) NewSpanID() [8]byte { - var id uint64 - for id == 0 { - id = atomic.AddUint64(&gen.nextSpanID, gen.spanIDInc) - } - var sid [8]byte - binary.LittleEndian.PutUint64(sid[:], id) - return sid -} - -// NewTraceID returns a non-zero trace ID from a randomly-chosen sequence. -// mu should be held while this function is called. -func (gen *defaultIDGenerator) NewTraceID() [16]byte { - var tid [16]byte - // Construct the trace ID from two outputs of traceIDRand, with a constant - // added to each half for additional entropy. - gen.Lock() - binary.LittleEndian.PutUint64(tid[0:8], gen.traceIDRand.Uint64()+gen.traceIDAdd[0]) - binary.LittleEndian.PutUint64(tid[8:16], gen.traceIDRand.Uint64()+gen.traceIDAdd[1]) - gen.Unlock() - return tid -} diff --git a/vendor/go.opencensus.io/trace/trace_go11.go b/vendor/go.opencensus.io/trace/trace_go11.go deleted file mode 100644 index b7d8aaf284..0000000000 --- a/vendor/go.opencensus.io/trace/trace_go11.go +++ /dev/null @@ -1,32 +0,0 @@ -// Copyright 2018, OpenCensus Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -// +build go1.11 - -package trace - -import ( - "context" - t "runtime/trace" -) - -func startExecutionTracerTask(ctx context.Context, name string) (context.Context, func()) { - if !t.IsEnabled() { - // Avoid additional overhead if - // runtime/trace is not enabled. - return ctx, func() {} - } - nctx, task := t.NewTask(ctx, name) - return nctx, task.End -} diff --git a/vendor/go.opencensus.io/trace/trace_nongo11.go b/vendor/go.opencensus.io/trace/trace_nongo11.go deleted file mode 100644 index e25419859c..0000000000 --- a/vendor/go.opencensus.io/trace/trace_nongo11.go +++ /dev/null @@ -1,25 +0,0 @@ -// Copyright 2018, OpenCensus Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -// +build !go1.11 - -package trace - -import ( - "context" -) - -func startExecutionTracerTask(ctx context.Context, name string) (context.Context, func()) { - return ctx, func() {} -} diff --git a/vendor/go.opencensus.io/trace/tracestate/tracestate.go b/vendor/go.opencensus.io/trace/tracestate/tracestate.go deleted file mode 100644 index 2d6c713eb3..0000000000 --- a/vendor/go.opencensus.io/trace/tracestate/tracestate.go +++ /dev/null @@ -1,147 +0,0 @@ -// Copyright 2018, OpenCensus Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -// Package tracestate implements support for the Tracestate header of the -// W3C TraceContext propagation format. -package tracestate - -import ( - "fmt" - "regexp" -) - -const ( - keyMaxSize = 256 - valueMaxSize = 256 - maxKeyValuePairs = 32 -) - -const ( - keyWithoutVendorFormat = `[a-z][_0-9a-z\-\*\/]{0,255}` - keyWithVendorFormat = `[a-z][_0-9a-z\-\*\/]{0,240}@[a-z][_0-9a-z\-\*\/]{0,13}` - keyFormat = `(` + keyWithoutVendorFormat + `)|(` + keyWithVendorFormat + `)` - valueFormat = `[\x20-\x2b\x2d-\x3c\x3e-\x7e]{0,255}[\x21-\x2b\x2d-\x3c\x3e-\x7e]` -) - -var keyValidationRegExp = regexp.MustCompile(`^(` + keyFormat + `)$`) -var valueValidationRegExp = regexp.MustCompile(`^(` + valueFormat + `)$`) - -// Tracestate represents tracing-system specific context in a list of key-value pairs. Tracestate allows different -// vendors propagate additional information and inter-operate with their legacy Id formats. -type Tracestate struct { - entries []Entry -} - -// Entry represents one key-value pair in a list of key-value pair of Tracestate. -type Entry struct { - // Key is an opaque string up to 256 characters printable. It MUST begin with a lowercase letter, - // and can only contain lowercase letters a-z, digits 0-9, underscores _, dashes -, asterisks *, and - // forward slashes /. - Key string - - // Value is an opaque string up to 256 characters printable ASCII RFC0020 characters (i.e., the - // range 0x20 to 0x7E) except comma , and =. - Value string -} - -// Entries returns a slice of Entry. -func (ts *Tracestate) Entries() []Entry { - if ts == nil { - return nil - } - return ts.entries -} - -func (ts *Tracestate) remove(key string) *Entry { - for index, entry := range ts.entries { - if entry.Key == key { - ts.entries = append(ts.entries[:index], ts.entries[index+1:]...) - return &entry - } - } - return nil -} - -func (ts *Tracestate) add(entries []Entry) error { - for _, entry := range entries { - ts.remove(entry.Key) - } - if len(ts.entries)+len(entries) > maxKeyValuePairs { - return fmt.Errorf("adding %d key-value pairs to current %d pairs exceeds the limit of %d", - len(entries), len(ts.entries), maxKeyValuePairs) - } - ts.entries = append(entries, ts.entries...) - return nil -} - -func isValid(entry Entry) bool { - return keyValidationRegExp.MatchString(entry.Key) && - valueValidationRegExp.MatchString(entry.Value) -} - -func containsDuplicateKey(entries ...Entry) (string, bool) { - keyMap := make(map[string]int) - for _, entry := range entries { - if _, ok := keyMap[entry.Key]; ok { - return entry.Key, true - } - keyMap[entry.Key] = 1 - } - return "", false -} - -func areEntriesValid(entries ...Entry) (*Entry, bool) { - for _, entry := range entries { - if !isValid(entry) { - return &entry, false - } - } - return nil, true -} - -// New creates a Tracestate object from a parent and/or entries (key-value pair). -// Entries from the parent are copied if present. The entries passed to this function -// are inserted in front of those copied from the parent. If an entry copied from the -// parent contains the same key as one of the entry in entries then the entry copied -// from the parent is removed. See add func. -// -// An error is returned with nil Tracestate if -// 1. one or more entry in entries is invalid. -// 2. two or more entries in the input entries have the same key. -// 3. the number of entries combined from the parent and the input entries exceeds maxKeyValuePairs. -// (duplicate entry is counted only once). -func New(parent *Tracestate, entries ...Entry) (*Tracestate, error) { - if parent == nil && len(entries) == 0 { - return nil, nil - } - if entry, ok := areEntriesValid(entries...); !ok { - return nil, fmt.Errorf("key-value pair {%s, %s} is invalid", entry.Key, entry.Value) - } - - if key, duplicate := containsDuplicateKey(entries...); duplicate { - return nil, fmt.Errorf("contains duplicate keys (%s)", key) - } - - tracestate := Tracestate{} - - if parent != nil && len(parent.entries) > 0 { - tracestate.entries = append([]Entry{}, parent.entries...) - } - - err := tracestate.add(entries) - if err != nil { - return nil, err - } - return &tracestate, nil -} diff --git a/vendor/golang.org/x/net/html/atom/gen.go b/vendor/golang.org/x/net/html/atom/gen.go deleted file mode 100644 index 5d052781bc..0000000000 --- a/vendor/golang.org/x/net/html/atom/gen.go +++ /dev/null @@ -1,712 +0,0 @@ -// Copyright 2012 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build ignore - -//go:generate go run gen.go -//go:generate go run gen.go -test - -package main - -import ( - "bytes" - "flag" - "fmt" - "go/format" - "io/ioutil" - "math/rand" - "os" - "sort" - "strings" -) - -// identifier converts s to a Go exported identifier. -// It converts "div" to "Div" and "accept-charset" to "AcceptCharset". -func identifier(s string) string { - b := make([]byte, 0, len(s)) - cap := true - for _, c := range s { - if c == '-' { - cap = true - continue - } - if cap && 'a' <= c && c <= 'z' { - c -= 'a' - 'A' - } - cap = false - b = append(b, byte(c)) - } - return string(b) -} - -var test = flag.Bool("test", false, "generate table_test.go") - -func genFile(name string, buf *bytes.Buffer) { - b, err := format.Source(buf.Bytes()) - if err != nil { - fmt.Fprintln(os.Stderr, err) - os.Exit(1) - } - if err := ioutil.WriteFile(name, b, 0644); err != nil { - fmt.Fprintln(os.Stderr, err) - os.Exit(1) - } -} - -func main() { - flag.Parse() - - var all []string - all = append(all, elements...) - all = append(all, attributes...) - all = append(all, eventHandlers...) - all = append(all, extra...) - sort.Strings(all) - - // uniq - lists have dups - w := 0 - for _, s := range all { - if w == 0 || all[w-1] != s { - all[w] = s - w++ - } - } - all = all[:w] - - if *test { - var buf bytes.Buffer - fmt.Fprintln(&buf, "// Code generated by go generate gen.go; DO NOT EDIT.\n") - fmt.Fprintln(&buf, "//go:generate go run gen.go -test\n") - fmt.Fprintln(&buf, "package atom\n") - fmt.Fprintln(&buf, "var testAtomList = []string{") - for _, s := range all { - fmt.Fprintf(&buf, "\t%q,\n", s) - } - fmt.Fprintln(&buf, "}") - - genFile("table_test.go", &buf) - return - } - - // Find hash that minimizes table size. - var best *table - for i := 0; i < 1000000; i++ { - if best != nil && 1<<(best.k-1) < len(all) { - break - } - h := rand.Uint32() - for k := uint(0); k <= 16; k++ { - if best != nil && k >= best.k { - break - } - var t table - if t.init(h, k, all) { - best = &t - break - } - } - } - if best == nil { - fmt.Fprintf(os.Stderr, "failed to construct string table\n") - os.Exit(1) - } - - // Lay out strings, using overlaps when possible. - layout := append([]string{}, all...) - - // Remove strings that are substrings of other strings - for changed := true; changed; { - changed = false - for i, s := range layout { - if s == "" { - continue - } - for j, t := range layout { - if i != j && t != "" && strings.Contains(s, t) { - changed = true - layout[j] = "" - } - } - } - } - - // Join strings where one suffix matches another prefix. - for { - // Find best i, j, k such that layout[i][len-k:] == layout[j][:k], - // maximizing overlap length k. - besti := -1 - bestj := -1 - bestk := 0 - for i, s := range layout { - if s == "" { - continue - } - for j, t := range layout { - if i == j { - continue - } - for k := bestk + 1; k <= len(s) && k <= len(t); k++ { - if s[len(s)-k:] == t[:k] { - besti = i - bestj = j - bestk = k - } - } - } - } - if bestk > 0 { - layout[besti] += layout[bestj][bestk:] - layout[bestj] = "" - continue - } - break - } - - text := strings.Join(layout, "") - - atom := map[string]uint32{} - for _, s := range all { - off := strings.Index(text, s) - if off < 0 { - panic("lost string " + s) - } - atom[s] = uint32(off<<8 | len(s)) - } - - var buf bytes.Buffer - // Generate the Go code. - fmt.Fprintln(&buf, "// Code generated by go generate gen.go; DO NOT EDIT.\n") - fmt.Fprintln(&buf, "//go:generate go run gen.go\n") - fmt.Fprintln(&buf, "package atom\n\nconst (") - - // compute max len - maxLen := 0 - for _, s := range all { - if maxLen < len(s) { - maxLen = len(s) - } - fmt.Fprintf(&buf, "\t%s Atom = %#x\n", identifier(s), atom[s]) - } - fmt.Fprintln(&buf, ")\n") - - fmt.Fprintf(&buf, "const hash0 = %#x\n\n", best.h0) - fmt.Fprintf(&buf, "const maxAtomLen = %d\n\n", maxLen) - - fmt.Fprintf(&buf, "var table = [1<<%d]Atom{\n", best.k) - for i, s := range best.tab { - if s == "" { - continue - } - fmt.Fprintf(&buf, "\t%#x: %#x, // %s\n", i, atom[s], s) - } - fmt.Fprintf(&buf, "}\n") - datasize := (1 << best.k) * 4 - - fmt.Fprintln(&buf, "const atomText =") - textsize := len(text) - for len(text) > 60 { - fmt.Fprintf(&buf, "\t%q +\n", text[:60]) - text = text[60:] - } - fmt.Fprintf(&buf, "\t%q\n\n", text) - - genFile("table.go", &buf) - - fmt.Fprintf(os.Stdout, "%d atoms; %d string bytes + %d tables = %d total data\n", len(all), textsize, datasize, textsize+datasize) -} - -type byLen []string - -func (x byLen) Less(i, j int) bool { return len(x[i]) > len(x[j]) } -func (x byLen) Swap(i, j int) { x[i], x[j] = x[j], x[i] } -func (x byLen) Len() int { return len(x) } - -// fnv computes the FNV hash with an arbitrary starting value h. -func fnv(h uint32, s string) uint32 { - for i := 0; i < len(s); i++ { - h ^= uint32(s[i]) - h *= 16777619 - } - return h -} - -// A table represents an attempt at constructing the lookup table. -// The lookup table uses cuckoo hashing, meaning that each string -// can be found in one of two positions. -type table struct { - h0 uint32 - k uint - mask uint32 - tab []string -} - -// hash returns the two hashes for s. -func (t *table) hash(s string) (h1, h2 uint32) { - h := fnv(t.h0, s) - h1 = h & t.mask - h2 = (h >> 16) & t.mask - return -} - -// init initializes the table with the given parameters. -// h0 is the initial hash value, -// k is the number of bits of hash value to use, and -// x is the list of strings to store in the table. -// init returns false if the table cannot be constructed. -func (t *table) init(h0 uint32, k uint, x []string) bool { - t.h0 = h0 - t.k = k - t.tab = make([]string, 1< len(t.tab) { - return false - } - s := t.tab[i] - h1, h2 := t.hash(s) - j := h1 + h2 - i - if t.tab[j] != "" && !t.push(j, depth+1) { - return false - } - t.tab[j] = s - return true -} - -// The lists of element names and attribute keys were taken from -// https://html.spec.whatwg.org/multipage/indices.html#index -// as of the "HTML Living Standard - Last Updated 16 April 2018" version. - -// "command", "keygen" and "menuitem" have been removed from the spec, -// but are kept here for backwards compatibility. -var elements = []string{ - "a", - "abbr", - "address", - "area", - "article", - "aside", - "audio", - "b", - "base", - "bdi", - "bdo", - "blockquote", - "body", - "br", - "button", - "canvas", - "caption", - "cite", - "code", - "col", - "colgroup", - "command", - "data", - "datalist", - "dd", - "del", - "details", - "dfn", - "dialog", - "div", - "dl", - "dt", - "em", - "embed", - "fieldset", - "figcaption", - "figure", - "footer", - "form", - "h1", - "h2", - "h3", - "h4", - "h5", - "h6", - "head", - "header", - "hgroup", - "hr", - "html", - "i", - "iframe", - "img", - "input", - "ins", - "kbd", - "keygen", - "label", - "legend", - "li", - "link", - "main", - "map", - "mark", - "menu", - "menuitem", - "meta", - "meter", - "nav", - "noscript", - "object", - "ol", - "optgroup", - "option", - "output", - "p", - "param", - "picture", - "pre", - "progress", - "q", - "rp", - "rt", - "ruby", - "s", - "samp", - "script", - "section", - "select", - "slot", - "small", - "source", - "span", - "strong", - "style", - "sub", - "summary", - "sup", - "table", - "tbody", - "td", - "template", - "textarea", - "tfoot", - "th", - "thead", - "time", - "title", - "tr", - "track", - "u", - "ul", - "var", - "video", - "wbr", -} - -// https://html.spec.whatwg.org/multipage/indices.html#attributes-3 -// -// "challenge", "command", "contextmenu", "dropzone", "icon", "keytype", "mediagroup", -// "radiogroup", "spellcheck", "scoped", "seamless", "sortable" and "sorted" have been removed from the spec, -// but are kept here for backwards compatibility. -var attributes = []string{ - "abbr", - "accept", - "accept-charset", - "accesskey", - "action", - "allowfullscreen", - "allowpaymentrequest", - "allowusermedia", - "alt", - "as", - "async", - "autocomplete", - "autofocus", - "autoplay", - "challenge", - "charset", - "checked", - "cite", - "class", - "color", - "cols", - "colspan", - "command", - "content", - "contenteditable", - "contextmenu", - "controls", - "coords", - "crossorigin", - "data", - "datetime", - "default", - "defer", - "dir", - "dirname", - "disabled", - "download", - "draggable", - "dropzone", - "enctype", - "for", - "form", - "formaction", - "formenctype", - "formmethod", - "formnovalidate", - "formtarget", - "headers", - "height", - "hidden", - "high", - "href", - "hreflang", - "http-equiv", - "icon", - "id", - "inputmode", - "integrity", - "is", - "ismap", - "itemid", - "itemprop", - "itemref", - "itemscope", - "itemtype", - "keytype", - "kind", - "label", - "lang", - "list", - "loop", - "low", - "manifest", - "max", - "maxlength", - "media", - "mediagroup", - "method", - "min", - "minlength", - "multiple", - "muted", - "name", - "nomodule", - "nonce", - "novalidate", - "open", - "optimum", - "pattern", - "ping", - "placeholder", - "playsinline", - "poster", - "preload", - "radiogroup", - "readonly", - "referrerpolicy", - "rel", - "required", - "reversed", - "rows", - "rowspan", - "sandbox", - "spellcheck", - "scope", - "scoped", - "seamless", - "selected", - "shape", - "size", - "sizes", - "sortable", - "sorted", - "slot", - "span", - "spellcheck", - "src", - "srcdoc", - "srclang", - "srcset", - "start", - "step", - "style", - "tabindex", - "target", - "title", - "translate", - "type", - "typemustmatch", - "updateviacache", - "usemap", - "value", - "width", - "workertype", - "wrap", -} - -// "onautocomplete", "onautocompleteerror", "onmousewheel", -// "onshow" and "onsort" have been removed from the spec, -// but are kept here for backwards compatibility. -var eventHandlers = []string{ - "onabort", - "onautocomplete", - "onautocompleteerror", - "onauxclick", - "onafterprint", - "onbeforeprint", - "onbeforeunload", - "onblur", - "oncancel", - "oncanplay", - "oncanplaythrough", - "onchange", - "onclick", - "onclose", - "oncontextmenu", - "oncopy", - "oncuechange", - "oncut", - "ondblclick", - "ondrag", - "ondragend", - "ondragenter", - "ondragexit", - "ondragleave", - "ondragover", - "ondragstart", - "ondrop", - "ondurationchange", - "onemptied", - "onended", - "onerror", - "onfocus", - "onhashchange", - "oninput", - "oninvalid", - "onkeydown", - "onkeypress", - "onkeyup", - "onlanguagechange", - "onload", - "onloadeddata", - "onloadedmetadata", - "onloadend", - "onloadstart", - "onmessage", - "onmessageerror", - "onmousedown", - "onmouseenter", - "onmouseleave", - "onmousemove", - "onmouseout", - "onmouseover", - "onmouseup", - "onmousewheel", - "onwheel", - "onoffline", - "ononline", - "onpagehide", - "onpageshow", - "onpaste", - "onpause", - "onplay", - "onplaying", - "onpopstate", - "onprogress", - "onratechange", - "onreset", - "onresize", - "onrejectionhandled", - "onscroll", - "onsecuritypolicyviolation", - "onseeked", - "onseeking", - "onselect", - "onshow", - "onsort", - "onstalled", - "onstorage", - "onsubmit", - "onsuspend", - "ontimeupdate", - "ontoggle", - "onunhandledrejection", - "onunload", - "onvolumechange", - "onwaiting", -} - -// extra are ad-hoc values not covered by any of the lists above. -var extra = []string{ - "acronym", - "align", - "annotation", - "annotation-xml", - "applet", - "basefont", - "bgsound", - "big", - "blink", - "center", - "color", - "desc", - "face", - "font", - "foreignObject", // HTML is case-insensitive, but SVG-embedded-in-HTML is case-sensitive. - "foreignobject", - "frame", - "frameset", - "image", - "isindex", - "listing", - "malignmark", - "marquee", - "math", - "mglyph", - "mi", - "mn", - "mo", - "ms", - "mtext", - "nobr", - "noembed", - "noframes", - "plaintext", - "prompt", - "public", - "rb", - "rtc", - "spacer", - "strike", - "svg", - "system", - "tt", - "xmp", -} diff --git a/vendor/golang.org/x/net/internal/timeseries/timeseries.go b/vendor/golang.org/x/net/internal/timeseries/timeseries.go deleted file mode 100644 index 685f0e7ea2..0000000000 --- a/vendor/golang.org/x/net/internal/timeseries/timeseries.go +++ /dev/null @@ -1,525 +0,0 @@ -// Copyright 2015 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// Package timeseries implements a time series structure for stats collection. -package timeseries // import "golang.org/x/net/internal/timeseries" - -import ( - "fmt" - "log" - "time" -) - -const ( - timeSeriesNumBuckets = 64 - minuteHourSeriesNumBuckets = 60 -) - -var timeSeriesResolutions = []time.Duration{ - 1 * time.Second, - 10 * time.Second, - 1 * time.Minute, - 10 * time.Minute, - 1 * time.Hour, - 6 * time.Hour, - 24 * time.Hour, // 1 day - 7 * 24 * time.Hour, // 1 week - 4 * 7 * 24 * time.Hour, // 4 weeks - 16 * 7 * 24 * time.Hour, // 16 weeks -} - -var minuteHourSeriesResolutions = []time.Duration{ - 1 * time.Second, - 1 * time.Minute, -} - -// An Observable is a kind of data that can be aggregated in a time series. -type Observable interface { - Multiply(ratio float64) // Multiplies the data in self by a given ratio - Add(other Observable) // Adds the data from a different observation to self - Clear() // Clears the observation so it can be reused. - CopyFrom(other Observable) // Copies the contents of a given observation to self -} - -// Float attaches the methods of Observable to a float64. -type Float float64 - -// NewFloat returns a Float. -func NewFloat() Observable { - f := Float(0) - return &f -} - -// String returns the float as a string. -func (f *Float) String() string { return fmt.Sprintf("%g", f.Value()) } - -// Value returns the float's value. -func (f *Float) Value() float64 { return float64(*f) } - -func (f *Float) Multiply(ratio float64) { *f *= Float(ratio) } - -func (f *Float) Add(other Observable) { - o := other.(*Float) - *f += *o -} - -func (f *Float) Clear() { *f = 0 } - -func (f *Float) CopyFrom(other Observable) { - o := other.(*Float) - *f = *o -} - -// A Clock tells the current time. -type Clock interface { - Time() time.Time -} - -type defaultClock int - -var defaultClockInstance defaultClock - -func (defaultClock) Time() time.Time { return time.Now() } - -// Information kept per level. Each level consists of a circular list of -// observations. The start of the level may be derived from end and the -// len(buckets) * sizeInMillis. -type tsLevel struct { - oldest int // index to oldest bucketed Observable - newest int // index to newest bucketed Observable - end time.Time // end timestamp for this level - size time.Duration // duration of the bucketed Observable - buckets []Observable // collections of observations - provider func() Observable // used for creating new Observable -} - -func (l *tsLevel) Clear() { - l.oldest = 0 - l.newest = len(l.buckets) - 1 - l.end = time.Time{} - for i := range l.buckets { - if l.buckets[i] != nil { - l.buckets[i].Clear() - l.buckets[i] = nil - } - } -} - -func (l *tsLevel) InitLevel(size time.Duration, numBuckets int, f func() Observable) { - l.size = size - l.provider = f - l.buckets = make([]Observable, numBuckets) -} - -// Keeps a sequence of levels. Each level is responsible for storing data at -// a given resolution. For example, the first level stores data at a one -// minute resolution while the second level stores data at a one hour -// resolution. - -// Each level is represented by a sequence of buckets. Each bucket spans an -// interval equal to the resolution of the level. New observations are added -// to the last bucket. -type timeSeries struct { - provider func() Observable // make more Observable - numBuckets int // number of buckets in each level - levels []*tsLevel // levels of bucketed Observable - lastAdd time.Time // time of last Observable tracked - total Observable // convenient aggregation of all Observable - clock Clock // Clock for getting current time - pending Observable // observations not yet bucketed - pendingTime time.Time // what time are we keeping in pending - dirty bool // if there are pending observations -} - -// init initializes a level according to the supplied criteria. -func (ts *timeSeries) init(resolutions []time.Duration, f func() Observable, numBuckets int, clock Clock) { - ts.provider = f - ts.numBuckets = numBuckets - ts.clock = clock - ts.levels = make([]*tsLevel, len(resolutions)) - - for i := range resolutions { - if i > 0 && resolutions[i-1] >= resolutions[i] { - log.Print("timeseries: resolutions must be monotonically increasing") - break - } - newLevel := new(tsLevel) - newLevel.InitLevel(resolutions[i], ts.numBuckets, ts.provider) - ts.levels[i] = newLevel - } - - ts.Clear() -} - -// Clear removes all observations from the time series. -func (ts *timeSeries) Clear() { - ts.lastAdd = time.Time{} - ts.total = ts.resetObservation(ts.total) - ts.pending = ts.resetObservation(ts.pending) - ts.pendingTime = time.Time{} - ts.dirty = false - - for i := range ts.levels { - ts.levels[i].Clear() - } -} - -// Add records an observation at the current time. -func (ts *timeSeries) Add(observation Observable) { - ts.AddWithTime(observation, ts.clock.Time()) -} - -// AddWithTime records an observation at the specified time. -func (ts *timeSeries) AddWithTime(observation Observable, t time.Time) { - - smallBucketDuration := ts.levels[0].size - - if t.After(ts.lastAdd) { - ts.lastAdd = t - } - - if t.After(ts.pendingTime) { - ts.advance(t) - ts.mergePendingUpdates() - ts.pendingTime = ts.levels[0].end - ts.pending.CopyFrom(observation) - ts.dirty = true - } else if t.After(ts.pendingTime.Add(-1 * smallBucketDuration)) { - // The observation is close enough to go into the pending bucket. - // This compensates for clock skewing and small scheduling delays - // by letting the update stay in the fast path. - ts.pending.Add(observation) - ts.dirty = true - } else { - ts.mergeValue(observation, t) - } -} - -// mergeValue inserts the observation at the specified time in the past into all levels. -func (ts *timeSeries) mergeValue(observation Observable, t time.Time) { - for _, level := range ts.levels { - index := (ts.numBuckets - 1) - int(level.end.Sub(t)/level.size) - if 0 <= index && index < ts.numBuckets { - bucketNumber := (level.oldest + index) % ts.numBuckets - if level.buckets[bucketNumber] == nil { - level.buckets[bucketNumber] = level.provider() - } - level.buckets[bucketNumber].Add(observation) - } - } - ts.total.Add(observation) -} - -// mergePendingUpdates applies the pending updates into all levels. -func (ts *timeSeries) mergePendingUpdates() { - if ts.dirty { - ts.mergeValue(ts.pending, ts.pendingTime) - ts.pending = ts.resetObservation(ts.pending) - ts.dirty = false - } -} - -// advance cycles the buckets at each level until the latest bucket in -// each level can hold the time specified. -func (ts *timeSeries) advance(t time.Time) { - if !t.After(ts.levels[0].end) { - return - } - for i := 0; i < len(ts.levels); i++ { - level := ts.levels[i] - if !level.end.Before(t) { - break - } - - // If the time is sufficiently far, just clear the level and advance - // directly. - if !t.Before(level.end.Add(level.size * time.Duration(ts.numBuckets))) { - for _, b := range level.buckets { - ts.resetObservation(b) - } - level.end = time.Unix(0, (t.UnixNano()/level.size.Nanoseconds())*level.size.Nanoseconds()) - } - - for t.After(level.end) { - level.end = level.end.Add(level.size) - level.newest = level.oldest - level.oldest = (level.oldest + 1) % ts.numBuckets - ts.resetObservation(level.buckets[level.newest]) - } - - t = level.end - } -} - -// Latest returns the sum of the num latest buckets from the level. -func (ts *timeSeries) Latest(level, num int) Observable { - now := ts.clock.Time() - if ts.levels[0].end.Before(now) { - ts.advance(now) - } - - ts.mergePendingUpdates() - - result := ts.provider() - l := ts.levels[level] - index := l.newest - - for i := 0; i < num; i++ { - if l.buckets[index] != nil { - result.Add(l.buckets[index]) - } - if index == 0 { - index = ts.numBuckets - } - index-- - } - - return result -} - -// LatestBuckets returns a copy of the num latest buckets from level. -func (ts *timeSeries) LatestBuckets(level, num int) []Observable { - if level < 0 || level > len(ts.levels) { - log.Print("timeseries: bad level argument: ", level) - return nil - } - if num < 0 || num >= ts.numBuckets { - log.Print("timeseries: bad num argument: ", num) - return nil - } - - results := make([]Observable, num) - now := ts.clock.Time() - if ts.levels[0].end.Before(now) { - ts.advance(now) - } - - ts.mergePendingUpdates() - - l := ts.levels[level] - index := l.newest - - for i := 0; i < num; i++ { - result := ts.provider() - results[i] = result - if l.buckets[index] != nil { - result.CopyFrom(l.buckets[index]) - } - - if index == 0 { - index = ts.numBuckets - } - index -= 1 - } - return results -} - -// ScaleBy updates observations by scaling by factor. -func (ts *timeSeries) ScaleBy(factor float64) { - for _, l := range ts.levels { - for i := 0; i < ts.numBuckets; i++ { - l.buckets[i].Multiply(factor) - } - } - - ts.total.Multiply(factor) - ts.pending.Multiply(factor) -} - -// Range returns the sum of observations added over the specified time range. -// If start or finish times don't fall on bucket boundaries of the same -// level, then return values are approximate answers. -func (ts *timeSeries) Range(start, finish time.Time) Observable { - return ts.ComputeRange(start, finish, 1)[0] -} - -// Recent returns the sum of observations from the last delta. -func (ts *timeSeries) Recent(delta time.Duration) Observable { - now := ts.clock.Time() - return ts.Range(now.Add(-delta), now) -} - -// Total returns the total of all observations. -func (ts *timeSeries) Total() Observable { - ts.mergePendingUpdates() - return ts.total -} - -// ComputeRange computes a specified number of values into a slice using -// the observations recorded over the specified time period. The return -// values are approximate if the start or finish times don't fall on the -// bucket boundaries at the same level or if the number of buckets spanning -// the range is not an integral multiple of num. -func (ts *timeSeries) ComputeRange(start, finish time.Time, num int) []Observable { - if start.After(finish) { - log.Printf("timeseries: start > finish, %v>%v", start, finish) - return nil - } - - if num < 0 { - log.Printf("timeseries: num < 0, %v", num) - return nil - } - - results := make([]Observable, num) - - for _, l := range ts.levels { - if !start.Before(l.end.Add(-l.size * time.Duration(ts.numBuckets))) { - ts.extract(l, start, finish, num, results) - return results - } - } - - // Failed to find a level that covers the desired range. So just - // extract from the last level, even if it doesn't cover the entire - // desired range. - ts.extract(ts.levels[len(ts.levels)-1], start, finish, num, results) - - return results -} - -// RecentList returns the specified number of values in slice over the most -// recent time period of the specified range. -func (ts *timeSeries) RecentList(delta time.Duration, num int) []Observable { - if delta < 0 { - return nil - } - now := ts.clock.Time() - return ts.ComputeRange(now.Add(-delta), now, num) -} - -// extract returns a slice of specified number of observations from a given -// level over a given range. -func (ts *timeSeries) extract(l *tsLevel, start, finish time.Time, num int, results []Observable) { - ts.mergePendingUpdates() - - srcInterval := l.size - dstInterval := finish.Sub(start) / time.Duration(num) - dstStart := start - srcStart := l.end.Add(-srcInterval * time.Duration(ts.numBuckets)) - - srcIndex := 0 - - // Where should scanning start? - if dstStart.After(srcStart) { - advance := dstStart.Sub(srcStart) / srcInterval - srcIndex += int(advance) - srcStart = srcStart.Add(advance * srcInterval) - } - - // The i'th value is computed as show below. - // interval = (finish/start)/num - // i'th value = sum of observation in range - // [ start + i * interval, - // start + (i + 1) * interval ) - for i := 0; i < num; i++ { - results[i] = ts.resetObservation(results[i]) - dstEnd := dstStart.Add(dstInterval) - for srcIndex < ts.numBuckets && srcStart.Before(dstEnd) { - srcEnd := srcStart.Add(srcInterval) - if srcEnd.After(ts.lastAdd) { - srcEnd = ts.lastAdd - } - - if !srcEnd.Before(dstStart) { - srcValue := l.buckets[(srcIndex+l.oldest)%ts.numBuckets] - if !srcStart.Before(dstStart) && !srcEnd.After(dstEnd) { - // dst completely contains src. - if srcValue != nil { - results[i].Add(srcValue) - } - } else { - // dst partially overlaps src. - overlapStart := maxTime(srcStart, dstStart) - overlapEnd := minTime(srcEnd, dstEnd) - base := srcEnd.Sub(srcStart) - fraction := overlapEnd.Sub(overlapStart).Seconds() / base.Seconds() - - used := ts.provider() - if srcValue != nil { - used.CopyFrom(srcValue) - } - used.Multiply(fraction) - results[i].Add(used) - } - - if srcEnd.After(dstEnd) { - break - } - } - srcIndex++ - srcStart = srcStart.Add(srcInterval) - } - dstStart = dstStart.Add(dstInterval) - } -} - -// resetObservation clears the content so the struct may be reused. -func (ts *timeSeries) resetObservation(observation Observable) Observable { - if observation == nil { - observation = ts.provider() - } else { - observation.Clear() - } - return observation -} - -// TimeSeries tracks data at granularities from 1 second to 16 weeks. -type TimeSeries struct { - timeSeries -} - -// NewTimeSeries creates a new TimeSeries using the function provided for creating new Observable. -func NewTimeSeries(f func() Observable) *TimeSeries { - return NewTimeSeriesWithClock(f, defaultClockInstance) -} - -// NewTimeSeriesWithClock creates a new TimeSeries using the function provided for creating new Observable and the clock for -// assigning timestamps. -func NewTimeSeriesWithClock(f func() Observable, clock Clock) *TimeSeries { - ts := new(TimeSeries) - ts.timeSeries.init(timeSeriesResolutions, f, timeSeriesNumBuckets, clock) - return ts -} - -// MinuteHourSeries tracks data at granularities of 1 minute and 1 hour. -type MinuteHourSeries struct { - timeSeries -} - -// NewMinuteHourSeries creates a new MinuteHourSeries using the function provided for creating new Observable. -func NewMinuteHourSeries(f func() Observable) *MinuteHourSeries { - return NewMinuteHourSeriesWithClock(f, defaultClockInstance) -} - -// NewMinuteHourSeriesWithClock creates a new MinuteHourSeries using the function provided for creating new Observable and the clock for -// assigning timestamps. -func NewMinuteHourSeriesWithClock(f func() Observable, clock Clock) *MinuteHourSeries { - ts := new(MinuteHourSeries) - ts.timeSeries.init(minuteHourSeriesResolutions, f, - minuteHourSeriesNumBuckets, clock) - return ts -} - -func (ts *MinuteHourSeries) Minute() Observable { - return ts.timeSeries.Latest(0, 60) -} - -func (ts *MinuteHourSeries) Hour() Observable { - return ts.timeSeries.Latest(1, 60) -} - -func minTime(a, b time.Time) time.Time { - if a.Before(b) { - return a - } - return b -} - -func maxTime(a, b time.Time) time.Time { - if a.After(b) { - return a - } - return b -} diff --git a/vendor/golang.org/x/net/trace/events.go b/vendor/golang.org/x/net/trace/events.go deleted file mode 100644 index c646a6952e..0000000000 --- a/vendor/golang.org/x/net/trace/events.go +++ /dev/null @@ -1,532 +0,0 @@ -// Copyright 2015 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package trace - -import ( - "bytes" - "fmt" - "html/template" - "io" - "log" - "net/http" - "runtime" - "sort" - "strconv" - "strings" - "sync" - "sync/atomic" - "text/tabwriter" - "time" -) - -const maxEventsPerLog = 100 - -type bucket struct { - MaxErrAge time.Duration - String string -} - -var buckets = []bucket{ - {0, "total"}, - {10 * time.Second, "errs<10s"}, - {1 * time.Minute, "errs<1m"}, - {10 * time.Minute, "errs<10m"}, - {1 * time.Hour, "errs<1h"}, - {10 * time.Hour, "errs<10h"}, - {24000 * time.Hour, "errors"}, -} - -// RenderEvents renders the HTML page typically served at /debug/events. -// It does not do any auth checking. The request may be nil. -// -// Most users will use the Events handler. -func RenderEvents(w http.ResponseWriter, req *http.Request, sensitive bool) { - now := time.Now() - data := &struct { - Families []string // family names - Buckets []bucket - Counts [][]int // eventLog count per family/bucket - - // Set when a bucket has been selected. - Family string - Bucket int - EventLogs eventLogs - Expanded bool - }{ - Buckets: buckets, - } - - data.Families = make([]string, 0, len(families)) - famMu.RLock() - for name := range families { - data.Families = append(data.Families, name) - } - famMu.RUnlock() - sort.Strings(data.Families) - - // Count the number of eventLogs in each family for each error age. - data.Counts = make([][]int, len(data.Families)) - for i, name := range data.Families { - // TODO(sameer): move this loop under the family lock. - f := getEventFamily(name) - data.Counts[i] = make([]int, len(data.Buckets)) - for j, b := range data.Buckets { - data.Counts[i][j] = f.Count(now, b.MaxErrAge) - } - } - - if req != nil { - var ok bool - data.Family, data.Bucket, ok = parseEventsArgs(req) - if !ok { - // No-op - } else { - data.EventLogs = getEventFamily(data.Family).Copy(now, buckets[data.Bucket].MaxErrAge) - } - if data.EventLogs != nil { - defer data.EventLogs.Free() - sort.Sort(data.EventLogs) - } - if exp, err := strconv.ParseBool(req.FormValue("exp")); err == nil { - data.Expanded = exp - } - } - - famMu.RLock() - defer famMu.RUnlock() - if err := eventsTmpl().Execute(w, data); err != nil { - log.Printf("net/trace: Failed executing template: %v", err) - } -} - -func parseEventsArgs(req *http.Request) (fam string, b int, ok bool) { - fam, bStr := req.FormValue("fam"), req.FormValue("b") - if fam == "" || bStr == "" { - return "", 0, false - } - b, err := strconv.Atoi(bStr) - if err != nil || b < 0 || b >= len(buckets) { - return "", 0, false - } - return fam, b, true -} - -// An EventLog provides a log of events associated with a specific object. -type EventLog interface { - // Printf formats its arguments with fmt.Sprintf and adds the - // result to the event log. - Printf(format string, a ...interface{}) - - // Errorf is like Printf, but it marks this event as an error. - Errorf(format string, a ...interface{}) - - // Finish declares that this event log is complete. - // The event log should not be used after calling this method. - Finish() -} - -// NewEventLog returns a new EventLog with the specified family name -// and title. -func NewEventLog(family, title string) EventLog { - el := newEventLog() - el.ref() - el.Family, el.Title = family, title - el.Start = time.Now() - el.events = make([]logEntry, 0, maxEventsPerLog) - el.stack = make([]uintptr, 32) - n := runtime.Callers(2, el.stack) - el.stack = el.stack[:n] - - getEventFamily(family).add(el) - return el -} - -func (el *eventLog) Finish() { - getEventFamily(el.Family).remove(el) - el.unref() // matches ref in New -} - -var ( - famMu sync.RWMutex - families = make(map[string]*eventFamily) // family name => family -) - -func getEventFamily(fam string) *eventFamily { - famMu.Lock() - defer famMu.Unlock() - f := families[fam] - if f == nil { - f = &eventFamily{} - families[fam] = f - } - return f -} - -type eventFamily struct { - mu sync.RWMutex - eventLogs eventLogs -} - -func (f *eventFamily) add(el *eventLog) { - f.mu.Lock() - f.eventLogs = append(f.eventLogs, el) - f.mu.Unlock() -} - -func (f *eventFamily) remove(el *eventLog) { - f.mu.Lock() - defer f.mu.Unlock() - for i, el0 := range f.eventLogs { - if el == el0 { - copy(f.eventLogs[i:], f.eventLogs[i+1:]) - f.eventLogs = f.eventLogs[:len(f.eventLogs)-1] - return - } - } -} - -func (f *eventFamily) Count(now time.Time, maxErrAge time.Duration) (n int) { - f.mu.RLock() - defer f.mu.RUnlock() - for _, el := range f.eventLogs { - if el.hasRecentError(now, maxErrAge) { - n++ - } - } - return -} - -func (f *eventFamily) Copy(now time.Time, maxErrAge time.Duration) (els eventLogs) { - f.mu.RLock() - defer f.mu.RUnlock() - els = make(eventLogs, 0, len(f.eventLogs)) - for _, el := range f.eventLogs { - if el.hasRecentError(now, maxErrAge) { - el.ref() - els = append(els, el) - } - } - return -} - -type eventLogs []*eventLog - -// Free calls unref on each element of the list. -func (els eventLogs) Free() { - for _, el := range els { - el.unref() - } -} - -// eventLogs may be sorted in reverse chronological order. -func (els eventLogs) Len() int { return len(els) } -func (els eventLogs) Less(i, j int) bool { return els[i].Start.After(els[j].Start) } -func (els eventLogs) Swap(i, j int) { els[i], els[j] = els[j], els[i] } - -// A logEntry is a timestamped log entry in an event log. -type logEntry struct { - When time.Time - Elapsed time.Duration // since previous event in log - NewDay bool // whether this event is on a different day to the previous event - What string - IsErr bool -} - -// WhenString returns a string representation of the elapsed time of the event. -// It will include the date if midnight was crossed. -func (e logEntry) WhenString() string { - if e.NewDay { - return e.When.Format("2006/01/02 15:04:05.000000") - } - return e.When.Format("15:04:05.000000") -} - -// An eventLog represents an active event log. -type eventLog struct { - // Family is the top-level grouping of event logs to which this belongs. - Family string - - // Title is the title of this event log. - Title string - - // Timing information. - Start time.Time - - // Call stack where this event log was created. - stack []uintptr - - // Append-only sequence of events. - // - // TODO(sameer): change this to a ring buffer to avoid the array copy - // when we hit maxEventsPerLog. - mu sync.RWMutex - events []logEntry - LastErrorTime time.Time - discarded int - - refs int32 // how many buckets this is in -} - -func (el *eventLog) reset() { - // Clear all but the mutex. Mutexes may not be copied, even when unlocked. - el.Family = "" - el.Title = "" - el.Start = time.Time{} - el.stack = nil - el.events = nil - el.LastErrorTime = time.Time{} - el.discarded = 0 - el.refs = 0 -} - -func (el *eventLog) hasRecentError(now time.Time, maxErrAge time.Duration) bool { - if maxErrAge == 0 { - return true - } - el.mu.RLock() - defer el.mu.RUnlock() - return now.Sub(el.LastErrorTime) < maxErrAge -} - -// delta returns the elapsed time since the last event or the log start, -// and whether it spans midnight. -// L >= el.mu -func (el *eventLog) delta(t time.Time) (time.Duration, bool) { - if len(el.events) == 0 { - return t.Sub(el.Start), false - } - prev := el.events[len(el.events)-1].When - return t.Sub(prev), prev.Day() != t.Day() - -} - -func (el *eventLog) Printf(format string, a ...interface{}) { - el.printf(false, format, a...) -} - -func (el *eventLog) Errorf(format string, a ...interface{}) { - el.printf(true, format, a...) -} - -func (el *eventLog) printf(isErr bool, format string, a ...interface{}) { - e := logEntry{When: time.Now(), IsErr: isErr, What: fmt.Sprintf(format, a...)} - el.mu.Lock() - e.Elapsed, e.NewDay = el.delta(e.When) - if len(el.events) < maxEventsPerLog { - el.events = append(el.events, e) - } else { - // Discard the oldest event. - if el.discarded == 0 { - // el.discarded starts at two to count for the event it - // is replacing, plus the next one that we are about to - // drop. - el.discarded = 2 - } else { - el.discarded++ - } - // TODO(sameer): if this causes allocations on a critical path, - // change eventLog.What to be a fmt.Stringer, as in trace.go. - el.events[0].What = fmt.Sprintf("(%d events discarded)", el.discarded) - // The timestamp of the discarded meta-event should be - // the time of the last event it is representing. - el.events[0].When = el.events[1].When - copy(el.events[1:], el.events[2:]) - el.events[maxEventsPerLog-1] = e - } - if e.IsErr { - el.LastErrorTime = e.When - } - el.mu.Unlock() -} - -func (el *eventLog) ref() { - atomic.AddInt32(&el.refs, 1) -} - -func (el *eventLog) unref() { - if atomic.AddInt32(&el.refs, -1) == 0 { - freeEventLog(el) - } -} - -func (el *eventLog) When() string { - return el.Start.Format("2006/01/02 15:04:05.000000") -} - -func (el *eventLog) ElapsedTime() string { - elapsed := time.Since(el.Start) - return fmt.Sprintf("%.6f", elapsed.Seconds()) -} - -func (el *eventLog) Stack() string { - buf := new(bytes.Buffer) - tw := tabwriter.NewWriter(buf, 1, 8, 1, '\t', 0) - printStackRecord(tw, el.stack) - tw.Flush() - return buf.String() -} - -// printStackRecord prints the function + source line information -// for a single stack trace. -// Adapted from runtime/pprof/pprof.go. -func printStackRecord(w io.Writer, stk []uintptr) { - for _, pc := range stk { - f := runtime.FuncForPC(pc) - if f == nil { - continue - } - file, line := f.FileLine(pc) - name := f.Name() - // Hide runtime.goexit and any runtime functions at the beginning. - if strings.HasPrefix(name, "runtime.") { - continue - } - fmt.Fprintf(w, "# %s\t%s:%d\n", name, file, line) - } -} - -func (el *eventLog) Events() []logEntry { - el.mu.RLock() - defer el.mu.RUnlock() - return el.events -} - -// freeEventLogs is a freelist of *eventLog -var freeEventLogs = make(chan *eventLog, 1000) - -// newEventLog returns a event log ready to use. -func newEventLog() *eventLog { - select { - case el := <-freeEventLogs: - return el - default: - return new(eventLog) - } -} - -// freeEventLog adds el to freeEventLogs if there's room. -// This is non-blocking. -func freeEventLog(el *eventLog) { - el.reset() - select { - case freeEventLogs <- el: - default: - } -} - -var eventsTmplCache *template.Template -var eventsTmplOnce sync.Once - -func eventsTmpl() *template.Template { - eventsTmplOnce.Do(func() { - eventsTmplCache = template.Must(template.New("events").Funcs(template.FuncMap{ - "elapsed": elapsed, - "trimSpace": strings.TrimSpace, - }).Parse(eventsHTML)) - }) - return eventsTmplCache -} - -const eventsHTML = ` - - - events - - - - -

/debug/events

- - - {{range $i, $fam := .Families}} - - - - {{range $j, $bucket := $.Buckets}} - {{$n := index $.Counts $i $j}} - - {{end}} - - {{end}} -
{{$fam}} - {{if $n}}{{end}} - [{{$n}} {{$bucket.String}}] - {{if $n}}{{end}} -
- -{{if $.EventLogs}} -
-

Family: {{$.Family}}

- -{{if $.Expanded}}{{end}} -[Summary]{{if $.Expanded}}{{end}} - -{{if not $.Expanded}}{{end}} -[Expanded]{{if not $.Expanded}}{{end}} - - - - {{range $el := $.EventLogs}} - - - - - {{if $.Expanded}} - - - - - - {{range $el.Events}} - - - - - - {{end}} - {{end}} - {{end}} -
WhenElapsed
{{$el.When}}{{$el.ElapsedTime}}{{$el.Title}} -
{{$el.Stack|trimSpace}}
{{.WhenString}}{{elapsed .Elapsed}}.{{if .IsErr}}E{{else}}.{{end}}. {{.What}}
-{{end}} - - -` diff --git a/vendor/golang.org/x/net/trace/histogram.go b/vendor/golang.org/x/net/trace/histogram.go deleted file mode 100644 index 9bf4286c79..0000000000 --- a/vendor/golang.org/x/net/trace/histogram.go +++ /dev/null @@ -1,365 +0,0 @@ -// Copyright 2015 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package trace - -// This file implements histogramming for RPC statistics collection. - -import ( - "bytes" - "fmt" - "html/template" - "log" - "math" - "sync" - - "golang.org/x/net/internal/timeseries" -) - -const ( - bucketCount = 38 -) - -// histogram keeps counts of values in buckets that are spaced -// out in powers of 2: 0-1, 2-3, 4-7... -// histogram implements timeseries.Observable -type histogram struct { - sum int64 // running total of measurements - sumOfSquares float64 // square of running total - buckets []int64 // bucketed values for histogram - value int // holds a single value as an optimization - valueCount int64 // number of values recorded for single value -} - -// AddMeasurement records a value measurement observation to the histogram. -func (h *histogram) addMeasurement(value int64) { - // TODO: assert invariant - h.sum += value - h.sumOfSquares += float64(value) * float64(value) - - bucketIndex := getBucket(value) - - if h.valueCount == 0 || (h.valueCount > 0 && h.value == bucketIndex) { - h.value = bucketIndex - h.valueCount++ - } else { - h.allocateBuckets() - h.buckets[bucketIndex]++ - } -} - -func (h *histogram) allocateBuckets() { - if h.buckets == nil { - h.buckets = make([]int64, bucketCount) - h.buckets[h.value] = h.valueCount - h.value = 0 - h.valueCount = -1 - } -} - -func log2(i int64) int { - n := 0 - for ; i >= 0x100; i >>= 8 { - n += 8 - } - for ; i > 0; i >>= 1 { - n += 1 - } - return n -} - -func getBucket(i int64) (index int) { - index = log2(i) - 1 - if index < 0 { - index = 0 - } - if index >= bucketCount { - index = bucketCount - 1 - } - return -} - -// Total returns the number of recorded observations. -func (h *histogram) total() (total int64) { - if h.valueCount >= 0 { - total = h.valueCount - } - for _, val := range h.buckets { - total += int64(val) - } - return -} - -// Average returns the average value of recorded observations. -func (h *histogram) average() float64 { - t := h.total() - if t == 0 { - return 0 - } - return float64(h.sum) / float64(t) -} - -// Variance returns the variance of recorded observations. -func (h *histogram) variance() float64 { - t := float64(h.total()) - if t == 0 { - return 0 - } - s := float64(h.sum) / t - return h.sumOfSquares/t - s*s -} - -// StandardDeviation returns the standard deviation of recorded observations. -func (h *histogram) standardDeviation() float64 { - return math.Sqrt(h.variance()) -} - -// PercentileBoundary estimates the value that the given fraction of recorded -// observations are less than. -func (h *histogram) percentileBoundary(percentile float64) int64 { - total := h.total() - - // Corner cases (make sure result is strictly less than Total()) - if total == 0 { - return 0 - } else if total == 1 { - return int64(h.average()) - } - - percentOfTotal := round(float64(total) * percentile) - var runningTotal int64 - - for i := range h.buckets { - value := h.buckets[i] - runningTotal += value - if runningTotal == percentOfTotal { - // We hit an exact bucket boundary. If the next bucket has data, it is a - // good estimate of the value. If the bucket is empty, we interpolate the - // midpoint between the next bucket's boundary and the next non-zero - // bucket. If the remaining buckets are all empty, then we use the - // boundary for the next bucket as the estimate. - j := uint8(i + 1) - min := bucketBoundary(j) - if runningTotal < total { - for h.buckets[j] == 0 { - j++ - } - } - max := bucketBoundary(j) - return min + round(float64(max-min)/2) - } else if runningTotal > percentOfTotal { - // The value is in this bucket. Interpolate the value. - delta := runningTotal - percentOfTotal - percentBucket := float64(value-delta) / float64(value) - bucketMin := bucketBoundary(uint8(i)) - nextBucketMin := bucketBoundary(uint8(i + 1)) - bucketSize := nextBucketMin - bucketMin - return bucketMin + round(percentBucket*float64(bucketSize)) - } - } - return bucketBoundary(bucketCount - 1) -} - -// Median returns the estimated median of the observed values. -func (h *histogram) median() int64 { - return h.percentileBoundary(0.5) -} - -// Add adds other to h. -func (h *histogram) Add(other timeseries.Observable) { - o := other.(*histogram) - if o.valueCount == 0 { - // Other histogram is empty - } else if h.valueCount >= 0 && o.valueCount > 0 && h.value == o.value { - // Both have a single bucketed value, aggregate them - h.valueCount += o.valueCount - } else { - // Two different values necessitate buckets in this histogram - h.allocateBuckets() - if o.valueCount >= 0 { - h.buckets[o.value] += o.valueCount - } else { - for i := range h.buckets { - h.buckets[i] += o.buckets[i] - } - } - } - h.sumOfSquares += o.sumOfSquares - h.sum += o.sum -} - -// Clear resets the histogram to an empty state, removing all observed values. -func (h *histogram) Clear() { - h.buckets = nil - h.value = 0 - h.valueCount = 0 - h.sum = 0 - h.sumOfSquares = 0 -} - -// CopyFrom copies from other, which must be a *histogram, into h. -func (h *histogram) CopyFrom(other timeseries.Observable) { - o := other.(*histogram) - if o.valueCount == -1 { - h.allocateBuckets() - copy(h.buckets, o.buckets) - } - h.sum = o.sum - h.sumOfSquares = o.sumOfSquares - h.value = o.value - h.valueCount = o.valueCount -} - -// Multiply scales the histogram by the specified ratio. -func (h *histogram) Multiply(ratio float64) { - if h.valueCount == -1 { - for i := range h.buckets { - h.buckets[i] = int64(float64(h.buckets[i]) * ratio) - } - } else { - h.valueCount = int64(float64(h.valueCount) * ratio) - } - h.sum = int64(float64(h.sum) * ratio) - h.sumOfSquares = h.sumOfSquares * ratio -} - -// New creates a new histogram. -func (h *histogram) New() timeseries.Observable { - r := new(histogram) - r.Clear() - return r -} - -func (h *histogram) String() string { - return fmt.Sprintf("%d, %f, %d, %d, %v", - h.sum, h.sumOfSquares, h.value, h.valueCount, h.buckets) -} - -// round returns the closest int64 to the argument -func round(in float64) int64 { - return int64(math.Floor(in + 0.5)) -} - -// bucketBoundary returns the first value in the bucket. -func bucketBoundary(bucket uint8) int64 { - if bucket == 0 { - return 0 - } - return 1 << bucket -} - -// bucketData holds data about a specific bucket for use in distTmpl. -type bucketData struct { - Lower, Upper int64 - N int64 - Pct, CumulativePct float64 - GraphWidth int -} - -// data holds data about a Distribution for use in distTmpl. -type data struct { - Buckets []*bucketData - Count, Median int64 - Mean, StandardDeviation float64 -} - -// maxHTMLBarWidth is the maximum width of the HTML bar for visualizing buckets. -const maxHTMLBarWidth = 350.0 - -// newData returns data representing h for use in distTmpl. -func (h *histogram) newData() *data { - // Force the allocation of buckets to simplify the rendering implementation - h.allocateBuckets() - // We scale the bars on the right so that the largest bar is - // maxHTMLBarWidth pixels in width. - maxBucket := int64(0) - for _, n := range h.buckets { - if n > maxBucket { - maxBucket = n - } - } - total := h.total() - barsizeMult := maxHTMLBarWidth / float64(maxBucket) - var pctMult float64 - if total == 0 { - pctMult = 1.0 - } else { - pctMult = 100.0 / float64(total) - } - - buckets := make([]*bucketData, len(h.buckets)) - runningTotal := int64(0) - for i, n := range h.buckets { - if n == 0 { - continue - } - runningTotal += n - var upperBound int64 - if i < bucketCount-1 { - upperBound = bucketBoundary(uint8(i + 1)) - } else { - upperBound = math.MaxInt64 - } - buckets[i] = &bucketData{ - Lower: bucketBoundary(uint8(i)), - Upper: upperBound, - N: n, - Pct: float64(n) * pctMult, - CumulativePct: float64(runningTotal) * pctMult, - GraphWidth: int(float64(n) * barsizeMult), - } - } - return &data{ - Buckets: buckets, - Count: total, - Median: h.median(), - Mean: h.average(), - StandardDeviation: h.standardDeviation(), - } -} - -func (h *histogram) html() template.HTML { - buf := new(bytes.Buffer) - if err := distTmpl().Execute(buf, h.newData()); err != nil { - buf.Reset() - log.Printf("net/trace: couldn't execute template: %v", err) - } - return template.HTML(buf.String()) -} - -var distTmplCache *template.Template -var distTmplOnce sync.Once - -func distTmpl() *template.Template { - distTmplOnce.Do(func() { - // Input: data - distTmplCache = template.Must(template.New("distTmpl").Parse(` - - - - - - - -
Count: {{.Count}}Mean: {{printf "%.0f" .Mean}}StdDev: {{printf "%.0f" .StandardDeviation}}Median: {{.Median}}
-
- -{{range $b := .Buckets}} -{{if $b}} - - - - - - - - - -{{end}} -{{end}} -
[{{.Lower}},{{.Upper}}){{.N}}{{printf "%#.3f" .Pct}}%{{printf "%#.3f" .CumulativePct}}%
-`)) - }) - return distTmplCache -} diff --git a/vendor/golang.org/x/net/trace/trace.go b/vendor/golang.org/x/net/trace/trace.go deleted file mode 100644 index 3ebf6f2daa..0000000000 --- a/vendor/golang.org/x/net/trace/trace.go +++ /dev/null @@ -1,1130 +0,0 @@ -// Copyright 2015 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -/* -Package trace implements tracing of requests and long-lived objects. -It exports HTTP interfaces on /debug/requests and /debug/events. - -A trace.Trace provides tracing for short-lived objects, usually requests. -A request handler might be implemented like this: - - func fooHandler(w http.ResponseWriter, req *http.Request) { - tr := trace.New("mypkg.Foo", req.URL.Path) - defer tr.Finish() - ... - tr.LazyPrintf("some event %q happened", str) - ... - if err := somethingImportant(); err != nil { - tr.LazyPrintf("somethingImportant failed: %v", err) - tr.SetError() - } - } - -The /debug/requests HTTP endpoint organizes the traces by family, -errors, and duration. It also provides histogram of request duration -for each family. - -A trace.EventLog provides tracing for long-lived objects, such as RPC -connections. - - // A Fetcher fetches URL paths for a single domain. - type Fetcher struct { - domain string - events trace.EventLog - } - - func NewFetcher(domain string) *Fetcher { - return &Fetcher{ - domain, - trace.NewEventLog("mypkg.Fetcher", domain), - } - } - - func (f *Fetcher) Fetch(path string) (string, error) { - resp, err := http.Get("http://" + f.domain + "/" + path) - if err != nil { - f.events.Errorf("Get(%q) = %v", path, err) - return "", err - } - f.events.Printf("Get(%q) = %s", path, resp.Status) - ... - } - - func (f *Fetcher) Close() error { - f.events.Finish() - return nil - } - -The /debug/events HTTP endpoint organizes the event logs by family and -by time since the last error. The expanded view displays recent log -entries and the log's call stack. -*/ -package trace // import "golang.org/x/net/trace" - -import ( - "bytes" - "context" - "fmt" - "html/template" - "io" - "log" - "net" - "net/http" - "net/url" - "runtime" - "sort" - "strconv" - "sync" - "sync/atomic" - "time" - - "golang.org/x/net/internal/timeseries" -) - -// DebugUseAfterFinish controls whether to debug uses of Trace values after finishing. -// FOR DEBUGGING ONLY. This will slow down the program. -var DebugUseAfterFinish = false - -// HTTP ServeMux paths. -const ( - debugRequestsPath = "/debug/requests" - debugEventsPath = "/debug/events" -) - -// AuthRequest determines whether a specific request is permitted to load the -// /debug/requests or /debug/events pages. -// -// It returns two bools; the first indicates whether the page may be viewed at all, -// and the second indicates whether sensitive events will be shown. -// -// AuthRequest may be replaced by a program to customize its authorization requirements. -// -// The default AuthRequest function returns (true, true) if and only if the request -// comes from localhost/127.0.0.1/[::1]. -var AuthRequest = func(req *http.Request) (any, sensitive bool) { - // RemoteAddr is commonly in the form "IP" or "IP:port". - // If it is in the form "IP:port", split off the port. - host, _, err := net.SplitHostPort(req.RemoteAddr) - if err != nil { - host = req.RemoteAddr - } - switch host { - case "localhost", "127.0.0.1", "::1": - return true, true - default: - return false, false - } -} - -func init() { - _, pat := http.DefaultServeMux.Handler(&http.Request{URL: &url.URL{Path: debugRequestsPath}}) - if pat == debugRequestsPath { - panic("/debug/requests is already registered. You may have two independent copies of " + - "golang.org/x/net/trace in your binary, trying to maintain separate state. This may " + - "involve a vendored copy of golang.org/x/net/trace.") - } - - // TODO(jbd): Serve Traces from /debug/traces in the future? - // There is no requirement for a request to be present to have traces. - http.HandleFunc(debugRequestsPath, Traces) - http.HandleFunc(debugEventsPath, Events) -} - -// NewContext returns a copy of the parent context -// and associates it with a Trace. -func NewContext(ctx context.Context, tr Trace) context.Context { - return context.WithValue(ctx, contextKey, tr) -} - -// FromContext returns the Trace bound to the context, if any. -func FromContext(ctx context.Context) (tr Trace, ok bool) { - tr, ok = ctx.Value(contextKey).(Trace) - return -} - -// Traces responds with traces from the program. -// The package initialization registers it in http.DefaultServeMux -// at /debug/requests. -// -// It performs authorization by running AuthRequest. -func Traces(w http.ResponseWriter, req *http.Request) { - any, sensitive := AuthRequest(req) - if !any { - http.Error(w, "not allowed", http.StatusUnauthorized) - return - } - w.Header().Set("Content-Type", "text/html; charset=utf-8") - Render(w, req, sensitive) -} - -// Events responds with a page of events collected by EventLogs. -// The package initialization registers it in http.DefaultServeMux -// at /debug/events. -// -// It performs authorization by running AuthRequest. -func Events(w http.ResponseWriter, req *http.Request) { - any, sensitive := AuthRequest(req) - if !any { - http.Error(w, "not allowed", http.StatusUnauthorized) - return - } - w.Header().Set("Content-Type", "text/html; charset=utf-8") - RenderEvents(w, req, sensitive) -} - -// Render renders the HTML page typically served at /debug/requests. -// It does not do any auth checking. The request may be nil. -// -// Most users will use the Traces handler. -func Render(w io.Writer, req *http.Request, sensitive bool) { - data := &struct { - Families []string - ActiveTraceCount map[string]int - CompletedTraces map[string]*family - - // Set when a bucket has been selected. - Traces traceList - Family string - Bucket int - Expanded bool - Traced bool - Active bool - ShowSensitive bool // whether to show sensitive events - - Histogram template.HTML - HistogramWindow string // e.g. "last minute", "last hour", "all time" - - // If non-zero, the set of traces is a partial set, - // and this is the total number. - Total int - }{ - CompletedTraces: completedTraces, - } - - data.ShowSensitive = sensitive - if req != nil { - // Allow show_sensitive=0 to force hiding of sensitive data for testing. - // This only goes one way; you can't use show_sensitive=1 to see things. - if req.FormValue("show_sensitive") == "0" { - data.ShowSensitive = false - } - - if exp, err := strconv.ParseBool(req.FormValue("exp")); err == nil { - data.Expanded = exp - } - if exp, err := strconv.ParseBool(req.FormValue("rtraced")); err == nil { - data.Traced = exp - } - } - - completedMu.RLock() - data.Families = make([]string, 0, len(completedTraces)) - for fam := range completedTraces { - data.Families = append(data.Families, fam) - } - completedMu.RUnlock() - sort.Strings(data.Families) - - // We are careful here to minimize the time spent locking activeMu, - // since that lock is required every time an RPC starts and finishes. - data.ActiveTraceCount = make(map[string]int, len(data.Families)) - activeMu.RLock() - for fam, s := range activeTraces { - data.ActiveTraceCount[fam] = s.Len() - } - activeMu.RUnlock() - - var ok bool - data.Family, data.Bucket, ok = parseArgs(req) - switch { - case !ok: - // No-op - case data.Bucket == -1: - data.Active = true - n := data.ActiveTraceCount[data.Family] - data.Traces = getActiveTraces(data.Family) - if len(data.Traces) < n { - data.Total = n - } - case data.Bucket < bucketsPerFamily: - if b := lookupBucket(data.Family, data.Bucket); b != nil { - data.Traces = b.Copy(data.Traced) - } - default: - if f := getFamily(data.Family, false); f != nil { - var obs timeseries.Observable - f.LatencyMu.RLock() - switch o := data.Bucket - bucketsPerFamily; o { - case 0: - obs = f.Latency.Minute() - data.HistogramWindow = "last minute" - case 1: - obs = f.Latency.Hour() - data.HistogramWindow = "last hour" - case 2: - obs = f.Latency.Total() - data.HistogramWindow = "all time" - } - f.LatencyMu.RUnlock() - if obs != nil { - data.Histogram = obs.(*histogram).html() - } - } - } - - if data.Traces != nil { - defer data.Traces.Free() - sort.Sort(data.Traces) - } - - completedMu.RLock() - defer completedMu.RUnlock() - if err := pageTmpl().ExecuteTemplate(w, "Page", data); err != nil { - log.Printf("net/trace: Failed executing template: %v", err) - } -} - -func parseArgs(req *http.Request) (fam string, b int, ok bool) { - if req == nil { - return "", 0, false - } - fam, bStr := req.FormValue("fam"), req.FormValue("b") - if fam == "" || bStr == "" { - return "", 0, false - } - b, err := strconv.Atoi(bStr) - if err != nil || b < -1 { - return "", 0, false - } - - return fam, b, true -} - -func lookupBucket(fam string, b int) *traceBucket { - f := getFamily(fam, false) - if f == nil || b < 0 || b >= len(f.Buckets) { - return nil - } - return f.Buckets[b] -} - -type contextKeyT string - -var contextKey = contextKeyT("golang.org/x/net/trace.Trace") - -// Trace represents an active request. -type Trace interface { - // LazyLog adds x to the event log. It will be evaluated each time the - // /debug/requests page is rendered. Any memory referenced by x will be - // pinned until the trace is finished and later discarded. - LazyLog(x fmt.Stringer, sensitive bool) - - // LazyPrintf evaluates its arguments with fmt.Sprintf each time the - // /debug/requests page is rendered. Any memory referenced by a will be - // pinned until the trace is finished and later discarded. - LazyPrintf(format string, a ...interface{}) - - // SetError declares that this trace resulted in an error. - SetError() - - // SetRecycler sets a recycler for the trace. - // f will be called for each event passed to LazyLog at a time when - // it is no longer required, whether while the trace is still active - // and the event is discarded, or when a completed trace is discarded. - SetRecycler(f func(interface{})) - - // SetTraceInfo sets the trace info for the trace. - // This is currently unused. - SetTraceInfo(traceID, spanID uint64) - - // SetMaxEvents sets the maximum number of events that will be stored - // in the trace. This has no effect if any events have already been - // added to the trace. - SetMaxEvents(m int) - - // Finish declares that this trace is complete. - // The trace should not be used after calling this method. - Finish() -} - -type lazySprintf struct { - format string - a []interface{} -} - -func (l *lazySprintf) String() string { - return fmt.Sprintf(l.format, l.a...) -} - -// New returns a new Trace with the specified family and title. -func New(family, title string) Trace { - tr := newTrace() - tr.ref() - tr.Family, tr.Title = family, title - tr.Start = time.Now() - tr.maxEvents = maxEventsPerTrace - tr.events = tr.eventsBuf[:0] - - activeMu.RLock() - s := activeTraces[tr.Family] - activeMu.RUnlock() - if s == nil { - activeMu.Lock() - s = activeTraces[tr.Family] // check again - if s == nil { - s = new(traceSet) - activeTraces[tr.Family] = s - } - activeMu.Unlock() - } - s.Add(tr) - - // Trigger allocation of the completed trace structure for this family. - // This will cause the family to be present in the request page during - // the first trace of this family. We don't care about the return value, - // nor is there any need for this to run inline, so we execute it in its - // own goroutine, but only if the family isn't allocated yet. - completedMu.RLock() - if _, ok := completedTraces[tr.Family]; !ok { - go allocFamily(tr.Family) - } - completedMu.RUnlock() - - return tr -} - -func (tr *trace) Finish() { - elapsed := time.Now().Sub(tr.Start) - tr.mu.Lock() - tr.Elapsed = elapsed - tr.mu.Unlock() - - if DebugUseAfterFinish { - buf := make([]byte, 4<<10) // 4 KB should be enough - n := runtime.Stack(buf, false) - tr.finishStack = buf[:n] - } - - activeMu.RLock() - m := activeTraces[tr.Family] - activeMu.RUnlock() - m.Remove(tr) - - f := getFamily(tr.Family, true) - tr.mu.RLock() // protects tr fields in Cond.match calls - for _, b := range f.Buckets { - if b.Cond.match(tr) { - b.Add(tr) - } - } - tr.mu.RUnlock() - - // Add a sample of elapsed time as microseconds to the family's timeseries - h := new(histogram) - h.addMeasurement(elapsed.Nanoseconds() / 1e3) - f.LatencyMu.Lock() - f.Latency.Add(h) - f.LatencyMu.Unlock() - - tr.unref() // matches ref in New -} - -const ( - bucketsPerFamily = 9 - tracesPerBucket = 10 - maxActiveTraces = 20 // Maximum number of active traces to show. - maxEventsPerTrace = 10 - numHistogramBuckets = 38 -) - -var ( - // The active traces. - activeMu sync.RWMutex - activeTraces = make(map[string]*traceSet) // family -> traces - - // Families of completed traces. - completedMu sync.RWMutex - completedTraces = make(map[string]*family) // family -> traces -) - -type traceSet struct { - mu sync.RWMutex - m map[*trace]bool - - // We could avoid the entire map scan in FirstN by having a slice of all the traces - // ordered by start time, and an index into that from the trace struct, with a periodic - // repack of the slice after enough traces finish; we could also use a skip list or similar. - // However, that would shift some of the expense from /debug/requests time to RPC time, - // which is probably the wrong trade-off. -} - -func (ts *traceSet) Len() int { - ts.mu.RLock() - defer ts.mu.RUnlock() - return len(ts.m) -} - -func (ts *traceSet) Add(tr *trace) { - ts.mu.Lock() - if ts.m == nil { - ts.m = make(map[*trace]bool) - } - ts.m[tr] = true - ts.mu.Unlock() -} - -func (ts *traceSet) Remove(tr *trace) { - ts.mu.Lock() - delete(ts.m, tr) - ts.mu.Unlock() -} - -// FirstN returns the first n traces ordered by time. -func (ts *traceSet) FirstN(n int) traceList { - ts.mu.RLock() - defer ts.mu.RUnlock() - - if n > len(ts.m) { - n = len(ts.m) - } - trl := make(traceList, 0, n) - - // Fast path for when no selectivity is needed. - if n == len(ts.m) { - for tr := range ts.m { - tr.ref() - trl = append(trl, tr) - } - sort.Sort(trl) - return trl - } - - // Pick the oldest n traces. - // This is inefficient. See the comment in the traceSet struct. - for tr := range ts.m { - // Put the first n traces into trl in the order they occur. - // When we have n, sort trl, and thereafter maintain its order. - if len(trl) < n { - tr.ref() - trl = append(trl, tr) - if len(trl) == n { - // This is guaranteed to happen exactly once during this loop. - sort.Sort(trl) - } - continue - } - if tr.Start.After(trl[n-1].Start) { - continue - } - - // Find where to insert this one. - tr.ref() - i := sort.Search(n, func(i int) bool { return trl[i].Start.After(tr.Start) }) - trl[n-1].unref() - copy(trl[i+1:], trl[i:]) - trl[i] = tr - } - - return trl -} - -func getActiveTraces(fam string) traceList { - activeMu.RLock() - s := activeTraces[fam] - activeMu.RUnlock() - if s == nil { - return nil - } - return s.FirstN(maxActiveTraces) -} - -func getFamily(fam string, allocNew bool) *family { - completedMu.RLock() - f := completedTraces[fam] - completedMu.RUnlock() - if f == nil && allocNew { - f = allocFamily(fam) - } - return f -} - -func allocFamily(fam string) *family { - completedMu.Lock() - defer completedMu.Unlock() - f := completedTraces[fam] - if f == nil { - f = newFamily() - completedTraces[fam] = f - } - return f -} - -// family represents a set of trace buckets and associated latency information. -type family struct { - // traces may occur in multiple buckets. - Buckets [bucketsPerFamily]*traceBucket - - // latency time series - LatencyMu sync.RWMutex - Latency *timeseries.MinuteHourSeries -} - -func newFamily() *family { - return &family{ - Buckets: [bucketsPerFamily]*traceBucket{ - {Cond: minCond(0)}, - {Cond: minCond(50 * time.Millisecond)}, - {Cond: minCond(100 * time.Millisecond)}, - {Cond: minCond(200 * time.Millisecond)}, - {Cond: minCond(500 * time.Millisecond)}, - {Cond: minCond(1 * time.Second)}, - {Cond: minCond(10 * time.Second)}, - {Cond: minCond(100 * time.Second)}, - {Cond: errorCond{}}, - }, - Latency: timeseries.NewMinuteHourSeries(func() timeseries.Observable { return new(histogram) }), - } -} - -// traceBucket represents a size-capped bucket of historic traces, -// along with a condition for a trace to belong to the bucket. -type traceBucket struct { - Cond cond - - // Ring buffer implementation of a fixed-size FIFO queue. - mu sync.RWMutex - buf [tracesPerBucket]*trace - start int // < tracesPerBucket - length int // <= tracesPerBucket -} - -func (b *traceBucket) Add(tr *trace) { - b.mu.Lock() - defer b.mu.Unlock() - - i := b.start + b.length - if i >= tracesPerBucket { - i -= tracesPerBucket - } - if b.length == tracesPerBucket { - // "Remove" an element from the bucket. - b.buf[i].unref() - b.start++ - if b.start == tracesPerBucket { - b.start = 0 - } - } - b.buf[i] = tr - if b.length < tracesPerBucket { - b.length++ - } - tr.ref() -} - -// Copy returns a copy of the traces in the bucket. -// If tracedOnly is true, only the traces with trace information will be returned. -// The logs will be ref'd before returning; the caller should call -// the Free method when it is done with them. -// TODO(dsymonds): keep track of traced requests in separate buckets. -func (b *traceBucket) Copy(tracedOnly bool) traceList { - b.mu.RLock() - defer b.mu.RUnlock() - - trl := make(traceList, 0, b.length) - for i, x := 0, b.start; i < b.length; i++ { - tr := b.buf[x] - if !tracedOnly || tr.spanID != 0 { - tr.ref() - trl = append(trl, tr) - } - x++ - if x == b.length { - x = 0 - } - } - return trl -} - -func (b *traceBucket) Empty() bool { - b.mu.RLock() - defer b.mu.RUnlock() - return b.length == 0 -} - -// cond represents a condition on a trace. -type cond interface { - match(t *trace) bool - String() string -} - -type minCond time.Duration - -func (m minCond) match(t *trace) bool { return t.Elapsed >= time.Duration(m) } -func (m minCond) String() string { return fmt.Sprintf("≥%gs", time.Duration(m).Seconds()) } - -type errorCond struct{} - -func (e errorCond) match(t *trace) bool { return t.IsError } -func (e errorCond) String() string { return "errors" } - -type traceList []*trace - -// Free calls unref on each element of the list. -func (trl traceList) Free() { - for _, t := range trl { - t.unref() - } -} - -// traceList may be sorted in reverse chronological order. -func (trl traceList) Len() int { return len(trl) } -func (trl traceList) Less(i, j int) bool { return trl[i].Start.After(trl[j].Start) } -func (trl traceList) Swap(i, j int) { trl[i], trl[j] = trl[j], trl[i] } - -// An event is a timestamped log entry in a trace. -type event struct { - When time.Time - Elapsed time.Duration // since previous event in trace - NewDay bool // whether this event is on a different day to the previous event - Recyclable bool // whether this event was passed via LazyLog - Sensitive bool // whether this event contains sensitive information - What interface{} // string or fmt.Stringer -} - -// WhenString returns a string representation of the elapsed time of the event. -// It will include the date if midnight was crossed. -func (e event) WhenString() string { - if e.NewDay { - return e.When.Format("2006/01/02 15:04:05.000000") - } - return e.When.Format("15:04:05.000000") -} - -// discarded represents a number of discarded events. -// It is stored as *discarded to make it easier to update in-place. -type discarded int - -func (d *discarded) String() string { - return fmt.Sprintf("(%d events discarded)", int(*d)) -} - -// trace represents an active or complete request, -// either sent or received by this program. -type trace struct { - // Family is the top-level grouping of traces to which this belongs. - Family string - - // Title is the title of this trace. - Title string - - // Start time of the this trace. - Start time.Time - - mu sync.RWMutex - events []event // Append-only sequence of events (modulo discards). - maxEvents int - recycler func(interface{}) - IsError bool // Whether this trace resulted in an error. - Elapsed time.Duration // Elapsed time for this trace, zero while active. - traceID uint64 // Trace information if non-zero. - spanID uint64 - - refs int32 // how many buckets this is in - disc discarded // scratch space to avoid allocation - - finishStack []byte // where finish was called, if DebugUseAfterFinish is set - - eventsBuf [4]event // preallocated buffer in case we only log a few events -} - -func (tr *trace) reset() { - // Clear all but the mutex. Mutexes may not be copied, even when unlocked. - tr.Family = "" - tr.Title = "" - tr.Start = time.Time{} - - tr.mu.Lock() - tr.Elapsed = 0 - tr.traceID = 0 - tr.spanID = 0 - tr.IsError = false - tr.maxEvents = 0 - tr.events = nil - tr.recycler = nil - tr.mu.Unlock() - - tr.refs = 0 - tr.disc = 0 - tr.finishStack = nil - for i := range tr.eventsBuf { - tr.eventsBuf[i] = event{} - } -} - -// delta returns the elapsed time since the last event or the trace start, -// and whether it spans midnight. -// L >= tr.mu -func (tr *trace) delta(t time.Time) (time.Duration, bool) { - if len(tr.events) == 0 { - return t.Sub(tr.Start), false - } - prev := tr.events[len(tr.events)-1].When - return t.Sub(prev), prev.Day() != t.Day() -} - -func (tr *trace) addEvent(x interface{}, recyclable, sensitive bool) { - if DebugUseAfterFinish && tr.finishStack != nil { - buf := make([]byte, 4<<10) // 4 KB should be enough - n := runtime.Stack(buf, false) - log.Printf("net/trace: trace used after finish:\nFinished at:\n%s\nUsed at:\n%s", tr.finishStack, buf[:n]) - } - - /* - NOTE TO DEBUGGERS - - If you are here because your program panicked in this code, - it is almost definitely the fault of code using this package, - and very unlikely to be the fault of this code. - - The most likely scenario is that some code elsewhere is using - a trace.Trace after its Finish method is called. - You can temporarily set the DebugUseAfterFinish var - to help discover where that is; do not leave that var set, - since it makes this package much less efficient. - */ - - e := event{When: time.Now(), What: x, Recyclable: recyclable, Sensitive: sensitive} - tr.mu.Lock() - e.Elapsed, e.NewDay = tr.delta(e.When) - if len(tr.events) < tr.maxEvents { - tr.events = append(tr.events, e) - } else { - // Discard the middle events. - di := int((tr.maxEvents - 1) / 2) - if d, ok := tr.events[di].What.(*discarded); ok { - (*d)++ - } else { - // disc starts at two to count for the event it is replacing, - // plus the next one that we are about to drop. - tr.disc = 2 - if tr.recycler != nil && tr.events[di].Recyclable { - go tr.recycler(tr.events[di].What) - } - tr.events[di].What = &tr.disc - } - // The timestamp of the discarded meta-event should be - // the time of the last event it is representing. - tr.events[di].When = tr.events[di+1].When - - if tr.recycler != nil && tr.events[di+1].Recyclable { - go tr.recycler(tr.events[di+1].What) - } - copy(tr.events[di+1:], tr.events[di+2:]) - tr.events[tr.maxEvents-1] = e - } - tr.mu.Unlock() -} - -func (tr *trace) LazyLog(x fmt.Stringer, sensitive bool) { - tr.addEvent(x, true, sensitive) -} - -func (tr *trace) LazyPrintf(format string, a ...interface{}) { - tr.addEvent(&lazySprintf{format, a}, false, false) -} - -func (tr *trace) SetError() { - tr.mu.Lock() - tr.IsError = true - tr.mu.Unlock() -} - -func (tr *trace) SetRecycler(f func(interface{})) { - tr.mu.Lock() - tr.recycler = f - tr.mu.Unlock() -} - -func (tr *trace) SetTraceInfo(traceID, spanID uint64) { - tr.mu.Lock() - tr.traceID, tr.spanID = traceID, spanID - tr.mu.Unlock() -} - -func (tr *trace) SetMaxEvents(m int) { - tr.mu.Lock() - // Always keep at least three events: first, discarded count, last. - if len(tr.events) == 0 && m > 3 { - tr.maxEvents = m - } - tr.mu.Unlock() -} - -func (tr *trace) ref() { - atomic.AddInt32(&tr.refs, 1) -} - -func (tr *trace) unref() { - if atomic.AddInt32(&tr.refs, -1) == 0 { - tr.mu.RLock() - if tr.recycler != nil { - // freeTrace clears tr, so we hold tr.recycler and tr.events here. - go func(f func(interface{}), es []event) { - for _, e := range es { - if e.Recyclable { - f(e.What) - } - } - }(tr.recycler, tr.events) - } - tr.mu.RUnlock() - - freeTrace(tr) - } -} - -func (tr *trace) When() string { - return tr.Start.Format("2006/01/02 15:04:05.000000") -} - -func (tr *trace) ElapsedTime() string { - tr.mu.RLock() - t := tr.Elapsed - tr.mu.RUnlock() - - if t == 0 { - // Active trace. - t = time.Since(tr.Start) - } - return fmt.Sprintf("%.6f", t.Seconds()) -} - -func (tr *trace) Events() []event { - tr.mu.RLock() - defer tr.mu.RUnlock() - return tr.events -} - -var traceFreeList = make(chan *trace, 1000) // TODO(dsymonds): Use sync.Pool? - -// newTrace returns a trace ready to use. -func newTrace() *trace { - select { - case tr := <-traceFreeList: - return tr - default: - return new(trace) - } -} - -// freeTrace adds tr to traceFreeList if there's room. -// This is non-blocking. -func freeTrace(tr *trace) { - if DebugUseAfterFinish { - return // never reuse - } - tr.reset() - select { - case traceFreeList <- tr: - default: - } -} - -func elapsed(d time.Duration) string { - b := []byte(fmt.Sprintf("%.6f", d.Seconds())) - - // For subsecond durations, blank all zeros before decimal point, - // and all zeros between the decimal point and the first non-zero digit. - if d < time.Second { - dot := bytes.IndexByte(b, '.') - for i := 0; i < dot; i++ { - b[i] = ' ' - } - for i := dot + 1; i < len(b); i++ { - if b[i] == '0' { - b[i] = ' ' - } else { - break - } - } - } - - return string(b) -} - -var pageTmplCache *template.Template -var pageTmplOnce sync.Once - -func pageTmpl() *template.Template { - pageTmplOnce.Do(func() { - pageTmplCache = template.Must(template.New("Page").Funcs(template.FuncMap{ - "elapsed": elapsed, - "add": func(a, b int) int { return a + b }, - }).Parse(pageHTML)) - }) - return pageTmplCache -} - -const pageHTML = ` -{{template "Prolog" .}} -{{template "StatusTable" .}} -{{template "Epilog" .}} - -{{define "Prolog"}} - - - /debug/requests - - - - -

/debug/requests

-{{end}} {{/* end of Prolog */}} - -{{define "StatusTable"}} - - {{range $fam := .Families}} - - - - {{$n := index $.ActiveTraceCount $fam}} - - - {{$f := index $.CompletedTraces $fam}} - {{range $i, $b := $f.Buckets}} - {{$empty := $b.Empty}} - - {{end}} - - {{$nb := len $f.Buckets}} - - - - - - {{end}} -
{{$fam}} - {{if $n}}{{end}} - [{{$n}} active] - {{if $n}}{{end}} - - {{if not $empty}}{{end}} - [{{.Cond}}] - {{if not $empty}}{{end}} - - [minute] - - [hour] - - [total] -
-{{end}} {{/* end of StatusTable */}} - -{{define "Epilog"}} -{{if $.Traces}} -
-

Family: {{$.Family}}

- -{{if or $.Expanded $.Traced}} - [Normal/Summary] -{{else}} - [Normal/Summary] -{{end}} - -{{if or (not $.Expanded) $.Traced}} - [Normal/Expanded] -{{else}} - [Normal/Expanded] -{{end}} - -{{if not $.Active}} - {{if or $.Expanded (not $.Traced)}} - [Traced/Summary] - {{else}} - [Traced/Summary] - {{end}} - {{if or (not $.Expanded) (not $.Traced)}} - [Traced/Expanded] - {{else}} - [Traced/Expanded] - {{end}} -{{end}} - -{{if $.Total}} -

Showing {{len $.Traces}} of {{$.Total}} traces.

-{{end}} - - - - - {{range $tr := $.Traces}} - - - - - {{/* TODO: include traceID/spanID */}} - - {{if $.Expanded}} - {{range $tr.Events}} - - - - - - {{end}} - {{end}} - {{end}} -
- {{if $.Active}}Active{{else}}Completed{{end}} Requests -
WhenElapsed (s)
{{$tr.When}}{{$tr.ElapsedTime}}{{$tr.Title}}
{{.WhenString}}{{elapsed .Elapsed}}{{if or $.ShowSensitive (not .Sensitive)}}... {{.What}}{{else}}[redacted]{{end}}
-{{end}} {{/* if $.Traces */}} - -{{if $.Histogram}} -

Latency (µs) of {{$.Family}} over {{$.HistogramWindow}}

-{{$.Histogram}} -{{end}} {{/* if $.Histogram */}} - - - -{{end}} {{/* end of Epilog */}} -` diff --git a/vendor/golang.org/x/oauth2/README.md b/vendor/golang.org/x/oauth2/README.md index 68f436ed95..0f443e6934 100644 --- a/vendor/golang.org/x/oauth2/README.md +++ b/vendor/golang.org/x/oauth2/README.md @@ -19,57 +19,6 @@ See godoc for further documentation and examples. * [godoc.org/golang.org/x/oauth2](http://godoc.org/golang.org/x/oauth2) * [godoc.org/golang.org/x/oauth2/google](http://godoc.org/golang.org/x/oauth2/google) - -## App Engine - -In change 96e89be (March 2015), we removed the `oauth2.Context2` type in favor -of the [`context.Context`](https://golang.org/x/net/context#Context) type from -the `golang.org/x/net/context` package. Later replaced by the standard `context` package -of the [`context.Context`](https://golang.org/pkg/context#Context) type. - - -This means it's no longer possible to use the "Classic App Engine" -`appengine.Context` type with the `oauth2` package. (You're using -Classic App Engine if you import the package `"appengine"`.) - -To work around this, you may use the new `"google.golang.org/appengine"` -package. This package has almost the same API as the `"appengine"` package, -but it can be fetched with `go get` and used on "Managed VMs" and well as -Classic App Engine. - -See the [new `appengine` package's readme](https://github.com/golang/appengine#updating-a-go-app-engine-app) -for information on updating your app. - -If you don't want to update your entire app to use the new App Engine packages, -you may use both sets of packages in parallel, using only the new packages -with the `oauth2` package. - -```go -import ( - "context" - "golang.org/x/oauth2" - "golang.org/x/oauth2/google" - newappengine "google.golang.org/appengine" - newurlfetch "google.golang.org/appengine/urlfetch" - - "appengine" -) - -func handler(w http.ResponseWriter, r *http.Request) { - var c appengine.Context = appengine.NewContext(r) - c.Infof("Logging a message with the old package") - - var ctx context.Context = newappengine.NewContext(r) - client := &http.Client{ - Transport: &oauth2.Transport{ - Source: google.AppEngineTokenSource(ctx, "scope"), - Base: &newurlfetch.Transport{Context: ctx}, - }, - } - client.Get("...") -} -``` - ## Policy for new packages We no longer accept new provider-specific packages in this repo. For diff --git a/vendor/golang.org/x/oauth2/google/appengine.go b/vendor/golang.org/x/oauth2/google/appengine.go deleted file mode 100644 index feb1157b15..0000000000 --- a/vendor/golang.org/x/oauth2/google/appengine.go +++ /dev/null @@ -1,38 +0,0 @@ -// Copyright 2014 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package google - -import ( - "context" - "time" - - "golang.org/x/oauth2" -) - -// Set at init time by appengine_gen1.go. If nil, we're not on App Engine standard first generation (<= Go 1.9) or App Engine flexible. -var appengineTokenFunc func(c context.Context, scopes ...string) (token string, expiry time.Time, err error) - -// Set at init time by appengine_gen1.go. If nil, we're not on App Engine standard first generation (<= Go 1.9) or App Engine flexible. -var appengineAppIDFunc func(c context.Context) string - -// AppEngineTokenSource returns a token source that fetches tokens from either -// the current application's service account or from the metadata server, -// depending on the App Engine environment. See below for environment-specific -// details. If you are implementing a 3-legged OAuth 2.0 flow on App Engine that -// involves user accounts, see oauth2.Config instead. -// -// First generation App Engine runtimes (<= Go 1.9): -// AppEngineTokenSource returns a token source that fetches tokens issued to the -// current App Engine application's service account. The provided context must have -// come from appengine.NewContext. -// -// Second generation App Engine runtimes (>= Go 1.11) and App Engine flexible: -// AppEngineTokenSource is DEPRECATED on second generation runtimes and on the -// flexible environment. It delegates to ComputeTokenSource, and the provided -// context and scopes are not used. Please use DefaultTokenSource (or ComputeTokenSource, -// which DefaultTokenSource will use in this case) instead. -func AppEngineTokenSource(ctx context.Context, scope ...string) oauth2.TokenSource { - return appEngineTokenSource(ctx, scope...) -} diff --git a/vendor/golang.org/x/oauth2/google/appengine_gen1.go b/vendor/golang.org/x/oauth2/google/appengine_gen1.go deleted file mode 100644 index 83dacac320..0000000000 --- a/vendor/golang.org/x/oauth2/google/appengine_gen1.go +++ /dev/null @@ -1,77 +0,0 @@ -// Copyright 2018 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build appengine - -// This file applies to App Engine first generation runtimes (<= Go 1.9). - -package google - -import ( - "context" - "sort" - "strings" - "sync" - - "golang.org/x/oauth2" - "google.golang.org/appengine" -) - -func init() { - appengineTokenFunc = appengine.AccessToken - appengineAppIDFunc = appengine.AppID -} - -// See comment on AppEngineTokenSource in appengine.go. -func appEngineTokenSource(ctx context.Context, scope ...string) oauth2.TokenSource { - scopes := append([]string{}, scope...) - sort.Strings(scopes) - return &gaeTokenSource{ - ctx: ctx, - scopes: scopes, - key: strings.Join(scopes, " "), - } -} - -// aeTokens helps the fetched tokens to be reused until their expiration. -var ( - aeTokensMu sync.Mutex - aeTokens = make(map[string]*tokenLock) // key is space-separated scopes -) - -type tokenLock struct { - mu sync.Mutex // guards t; held while fetching or updating t - t *oauth2.Token -} - -type gaeTokenSource struct { - ctx context.Context - scopes []string - key string // to aeTokens map; space-separated scopes -} - -func (ts *gaeTokenSource) Token() (*oauth2.Token, error) { - aeTokensMu.Lock() - tok, ok := aeTokens[ts.key] - if !ok { - tok = &tokenLock{} - aeTokens[ts.key] = tok - } - aeTokensMu.Unlock() - - tok.mu.Lock() - defer tok.mu.Unlock() - if tok.t.Valid() { - return tok.t, nil - } - access, exp, err := appengineTokenFunc(ts.ctx, ts.scopes...) - if err != nil { - return nil, err - } - tok.t = &oauth2.Token{ - AccessToken: access, - Expiry: exp, - } - return tok.t, nil -} diff --git a/vendor/golang.org/x/oauth2/google/appengine_gen2_flex.go b/vendor/golang.org/x/oauth2/google/appengine_gen2_flex.go deleted file mode 100644 index 04c2c2216a..0000000000 --- a/vendor/golang.org/x/oauth2/google/appengine_gen2_flex.go +++ /dev/null @@ -1,27 +0,0 @@ -// Copyright 2018 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build !appengine - -// This file applies to App Engine second generation runtimes (>= Go 1.11) and App Engine flexible. - -package google - -import ( - "context" - "log" - "sync" - - "golang.org/x/oauth2" -) - -var logOnce sync.Once // only spam about deprecation once - -// See comment on AppEngineTokenSource in appengine.go. -func appEngineTokenSource(ctx context.Context, scope ...string) oauth2.TokenSource { - logOnce.Do(func() { - log.Print("google: AppEngineTokenSource is deprecated on App Engine standard second generation runtimes (>= Go 1.11) and App Engine flexible. Please use DefaultTokenSource or ComputeTokenSource.") - }) - return ComputeTokenSource("") -} diff --git a/vendor/golang.org/x/oauth2/google/default.go b/vendor/golang.org/x/oauth2/google/default.go deleted file mode 100644 index 5087d845fe..0000000000 --- a/vendor/golang.org/x/oauth2/google/default.go +++ /dev/null @@ -1,155 +0,0 @@ -// Copyright 2015 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package google - -import ( - "context" - "encoding/json" - "fmt" - "io/ioutil" - "net/http" - "os" - "path/filepath" - "runtime" - - "cloud.google.com/go/compute/metadata" - "golang.org/x/oauth2" -) - -// Credentials holds Google credentials, including "Application Default Credentials". -// For more details, see: -// https://developers.google.com/accounts/docs/application-default-credentials -type Credentials struct { - ProjectID string // may be empty - TokenSource oauth2.TokenSource - - // JSON contains the raw bytes from a JSON credentials file. - // This field may be nil if authentication is provided by the - // environment and not with a credentials file, e.g. when code is - // running on Google Cloud Platform. - JSON []byte -} - -// DefaultCredentials is the old name of Credentials. -// -// Deprecated: use Credentials instead. -type DefaultCredentials = Credentials - -// DefaultClient returns an HTTP Client that uses the -// DefaultTokenSource to obtain authentication credentials. -func DefaultClient(ctx context.Context, scope ...string) (*http.Client, error) { - ts, err := DefaultTokenSource(ctx, scope...) - if err != nil { - return nil, err - } - return oauth2.NewClient(ctx, ts), nil -} - -// DefaultTokenSource returns the token source for -// "Application Default Credentials". -// It is a shortcut for FindDefaultCredentials(ctx, scope).TokenSource. -func DefaultTokenSource(ctx context.Context, scope ...string) (oauth2.TokenSource, error) { - creds, err := FindDefaultCredentials(ctx, scope...) - if err != nil { - return nil, err - } - return creds.TokenSource, nil -} - -// FindDefaultCredentials searches for "Application Default Credentials". -// -// It looks for credentials in the following places, -// preferring the first location found: -// -// 1. A JSON file whose path is specified by the -// GOOGLE_APPLICATION_CREDENTIALS environment variable. -// 2. A JSON file in a location known to the gcloud command-line tool. -// On Windows, this is %APPDATA%/gcloud/application_default_credentials.json. -// On other systems, $HOME/.config/gcloud/application_default_credentials.json. -// 3. On Google App Engine standard first generation runtimes (<= Go 1.9) it uses -// the appengine.AccessToken function. -// 4. On Google Compute Engine, Google App Engine standard second generation runtimes -// (>= Go 1.11), and Google App Engine flexible environment, it fetches -// credentials from the metadata server. -// (In this final case any provided scopes are ignored.) -func FindDefaultCredentials(ctx context.Context, scopes ...string) (*Credentials, error) { - // First, try the environment variable. - const envVar = "GOOGLE_APPLICATION_CREDENTIALS" - if filename := os.Getenv(envVar); filename != "" { - creds, err := readCredentialsFile(ctx, filename, scopes) - if err != nil { - return nil, fmt.Errorf("google: error getting credentials using %v environment variable: %v", envVar, err) - } - return creds, nil - } - - // Second, try a well-known file. - filename := wellKnownFile() - if creds, err := readCredentialsFile(ctx, filename, scopes); err == nil { - return creds, nil - } else if !os.IsNotExist(err) { - return nil, fmt.Errorf("google: error getting credentials using well-known file (%v): %v", filename, err) - } - - // Third, if we're on a Google App Engine standard first generation runtime (<= Go 1.9) - // use those credentials. App Engine standard second generation runtimes (>= Go 1.11) - // and App Engine flexible use ComputeTokenSource and the metadata server. - if appengineTokenFunc != nil { - return &DefaultCredentials{ - ProjectID: appengineAppIDFunc(ctx), - TokenSource: AppEngineTokenSource(ctx, scopes...), - }, nil - } - - // Fourth, if we're on Google Compute Engine, an App Engine standard second generation runtime, - // or App Engine flexible, use the metadata server. - if metadata.OnGCE() { - id, _ := metadata.ProjectID() - return &DefaultCredentials{ - ProjectID: id, - TokenSource: ComputeTokenSource(""), - }, nil - } - - // None are found; return helpful error. - const url = "https://developers.google.com/accounts/docs/application-default-credentials" - return nil, fmt.Errorf("google: could not find default credentials. See %v for more information.", url) -} - -// CredentialsFromJSON obtains Google credentials from a JSON value. The JSON can -// represent either a Google Developers Console client_credentials.json file (as in -// ConfigFromJSON) or a Google Developers service account key file (as in -// JWTConfigFromJSON). -func CredentialsFromJSON(ctx context.Context, jsonData []byte, scopes ...string) (*Credentials, error) { - var f credentialsFile - if err := json.Unmarshal(jsonData, &f); err != nil { - return nil, err - } - ts, err := f.tokenSource(ctx, append([]string(nil), scopes...)) - if err != nil { - return nil, err - } - return &DefaultCredentials{ - ProjectID: f.ProjectID, - TokenSource: ts, - JSON: jsonData, - }, nil -} - -func wellKnownFile() string { - const f = "application_default_credentials.json" - if runtime.GOOS == "windows" { - return filepath.Join(os.Getenv("APPDATA"), "gcloud", f) - } - return filepath.Join(guessUnixHomeDir(), ".config", "gcloud", f) -} - -func readCredentialsFile(ctx context.Context, filename string, scopes []string) (*DefaultCredentials, error) { - b, err := ioutil.ReadFile(filename) - if err != nil { - return nil, err - } - return CredentialsFromJSON(ctx, b, scopes...) -} diff --git a/vendor/golang.org/x/oauth2/google/doc.go b/vendor/golang.org/x/oauth2/google/doc.go deleted file mode 100644 index 73be629033..0000000000 --- a/vendor/golang.org/x/oauth2/google/doc.go +++ /dev/null @@ -1,40 +0,0 @@ -// Copyright 2018 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// Package google provides support for making OAuth2 authorized and authenticated -// HTTP requests to Google APIs. It supports the Web server flow, client-side -// credentials, service accounts, Google Compute Engine service accounts, and Google -// App Engine service accounts. -// -// A brief overview of the package follows. For more information, please read -// https://developers.google.com/accounts/docs/OAuth2 -// and -// https://developers.google.com/accounts/docs/application-default-credentials. -// -// OAuth2 Configs -// -// Two functions in this package return golang.org/x/oauth2.Config values from Google credential -// data. Google supports two JSON formats for OAuth2 credentials: one is handled by ConfigFromJSON, -// the other by JWTConfigFromJSON. The returned Config can be used to obtain a TokenSource or -// create an http.Client. -// -// -// Credentials -// -// The Credentials type represents Google credentials, including Application Default -// Credentials. -// -// Use FindDefaultCredentials to obtain Application Default Credentials. -// FindDefaultCredentials looks in some well-known places for a credentials file, and -// will call AppEngineTokenSource or ComputeTokenSource as needed. -// -// DefaultClient and DefaultTokenSource are convenience methods. They first call FindDefaultCredentials, -// then use the credentials to construct an http.Client or an oauth2.TokenSource. -// -// Use CredentialsFromJSON to obtain credentials from either of the two JSON formats -// described in OAuth2 Configs, above. The TokenSource in the returned value is the -// same as the one obtained from the oauth2.Config returned from ConfigFromJSON or -// JWTConfigFromJSON, but the Credentials may contain additional information -// that is useful is some circumstances. -package google // import "golang.org/x/oauth2/google" diff --git a/vendor/golang.org/x/oauth2/google/google.go b/vendor/golang.org/x/oauth2/google/google.go deleted file mode 100644 index ca7d208d77..0000000000 --- a/vendor/golang.org/x/oauth2/google/google.go +++ /dev/null @@ -1,192 +0,0 @@ -// Copyright 2014 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package google - -import ( - "context" - "encoding/json" - "errors" - "fmt" - "strings" - "time" - - "cloud.google.com/go/compute/metadata" - "golang.org/x/oauth2" - "golang.org/x/oauth2/jwt" -) - -// Endpoint is Google's OAuth 2.0 endpoint. -var Endpoint = oauth2.Endpoint{ - AuthURL: "https://accounts.google.com/o/oauth2/auth", - TokenURL: "https://accounts.google.com/o/oauth2/token", -} - -// JWTTokenURL is Google's OAuth 2.0 token URL to use with the JWT flow. -const JWTTokenURL = "https://accounts.google.com/o/oauth2/token" - -// ConfigFromJSON uses a Google Developers Console client_credentials.json -// file to construct a config. -// client_credentials.json can be downloaded from -// https://console.developers.google.com, under "Credentials". Download the Web -// application credentials in the JSON format and provide the contents of the -// file as jsonKey. -func ConfigFromJSON(jsonKey []byte, scope ...string) (*oauth2.Config, error) { - type cred struct { - ClientID string `json:"client_id"` - ClientSecret string `json:"client_secret"` - RedirectURIs []string `json:"redirect_uris"` - AuthURI string `json:"auth_uri"` - TokenURI string `json:"token_uri"` - } - var j struct { - Web *cred `json:"web"` - Installed *cred `json:"installed"` - } - if err := json.Unmarshal(jsonKey, &j); err != nil { - return nil, err - } - var c *cred - switch { - case j.Web != nil: - c = j.Web - case j.Installed != nil: - c = j.Installed - default: - return nil, fmt.Errorf("oauth2/google: no credentials found") - } - if len(c.RedirectURIs) < 1 { - return nil, errors.New("oauth2/google: missing redirect URL in the client_credentials.json") - } - return &oauth2.Config{ - ClientID: c.ClientID, - ClientSecret: c.ClientSecret, - RedirectURL: c.RedirectURIs[0], - Scopes: scope, - Endpoint: oauth2.Endpoint{ - AuthURL: c.AuthURI, - TokenURL: c.TokenURI, - }, - }, nil -} - -// JWTConfigFromJSON uses a Google Developers service account JSON key file to read -// the credentials that authorize and authenticate the requests. -// Create a service account on "Credentials" for your project at -// https://console.developers.google.com to download a JSON key file. -func JWTConfigFromJSON(jsonKey []byte, scope ...string) (*jwt.Config, error) { - var f credentialsFile - if err := json.Unmarshal(jsonKey, &f); err != nil { - return nil, err - } - if f.Type != serviceAccountKey { - return nil, fmt.Errorf("google: read JWT from JSON credentials: 'type' field is %q (expected %q)", f.Type, serviceAccountKey) - } - scope = append([]string(nil), scope...) // copy - return f.jwtConfig(scope), nil -} - -// JSON key file types. -const ( - serviceAccountKey = "service_account" - userCredentialsKey = "authorized_user" -) - -// credentialsFile is the unmarshalled representation of a credentials file. -type credentialsFile struct { - Type string `json:"type"` // serviceAccountKey or userCredentialsKey - - // Service Account fields - ClientEmail string `json:"client_email"` - PrivateKeyID string `json:"private_key_id"` - PrivateKey string `json:"private_key"` - TokenURL string `json:"token_uri"` - ProjectID string `json:"project_id"` - - // User Credential fields - // (These typically come from gcloud auth.) - ClientSecret string `json:"client_secret"` - ClientID string `json:"client_id"` - RefreshToken string `json:"refresh_token"` -} - -func (f *credentialsFile) jwtConfig(scopes []string) *jwt.Config { - cfg := &jwt.Config{ - Email: f.ClientEmail, - PrivateKey: []byte(f.PrivateKey), - PrivateKeyID: f.PrivateKeyID, - Scopes: scopes, - TokenURL: f.TokenURL, - } - if cfg.TokenURL == "" { - cfg.TokenURL = JWTTokenURL - } - return cfg -} - -func (f *credentialsFile) tokenSource(ctx context.Context, scopes []string) (oauth2.TokenSource, error) { - switch f.Type { - case serviceAccountKey: - cfg := f.jwtConfig(scopes) - return cfg.TokenSource(ctx), nil - case userCredentialsKey: - cfg := &oauth2.Config{ - ClientID: f.ClientID, - ClientSecret: f.ClientSecret, - Scopes: scopes, - Endpoint: Endpoint, - } - tok := &oauth2.Token{RefreshToken: f.RefreshToken} - return cfg.TokenSource(ctx, tok), nil - case "": - return nil, errors.New("missing 'type' field in credentials") - default: - return nil, fmt.Errorf("unknown credential type: %q", f.Type) - } -} - -// ComputeTokenSource returns a token source that fetches access tokens -// from Google Compute Engine (GCE)'s metadata server. It's only valid to use -// this token source if your program is running on a GCE instance. -// If no account is specified, "default" is used. -// Further information about retrieving access tokens from the GCE metadata -// server can be found at https://cloud.google.com/compute/docs/authentication. -func ComputeTokenSource(account string) oauth2.TokenSource { - return oauth2.ReuseTokenSource(nil, computeSource{account: account}) -} - -type computeSource struct { - account string -} - -func (cs computeSource) Token() (*oauth2.Token, error) { - if !metadata.OnGCE() { - return nil, errors.New("oauth2/google: can't get a token from the metadata service; not running on GCE") - } - acct := cs.account - if acct == "" { - acct = "default" - } - tokenJSON, err := metadata.Get("instance/service-accounts/" + acct + "/token") - if err != nil { - return nil, err - } - var res struct { - AccessToken string `json:"access_token"` - ExpiresInSec int `json:"expires_in"` - TokenType string `json:"token_type"` - } - err = json.NewDecoder(strings.NewReader(tokenJSON)).Decode(&res) - if err != nil { - return nil, fmt.Errorf("oauth2/google: invalid token JSON from metadata: %v", err) - } - if res.ExpiresInSec == 0 || res.AccessToken == "" { - return nil, fmt.Errorf("oauth2/google: incomplete token received from metadata") - } - return &oauth2.Token{ - AccessToken: res.AccessToken, - TokenType: res.TokenType, - Expiry: time.Now().Add(time.Duration(res.ExpiresInSec) * time.Second), - }, nil -} diff --git a/vendor/golang.org/x/oauth2/google/jwt.go b/vendor/golang.org/x/oauth2/google/jwt.go deleted file mode 100644 index b0fdb3a888..0000000000 --- a/vendor/golang.org/x/oauth2/google/jwt.go +++ /dev/null @@ -1,74 +0,0 @@ -// Copyright 2015 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package google - -import ( - "crypto/rsa" - "fmt" - "time" - - "golang.org/x/oauth2" - "golang.org/x/oauth2/internal" - "golang.org/x/oauth2/jws" -) - -// JWTAccessTokenSourceFromJSON uses a Google Developers service account JSON -// key file to read the credentials that authorize and authenticate the -// requests, and returns a TokenSource that does not use any OAuth2 flow but -// instead creates a JWT and sends that as the access token. -// The audience is typically a URL that specifies the scope of the credentials. -// -// Note that this is not a standard OAuth flow, but rather an -// optimization supported by a few Google services. -// Unless you know otherwise, you should use JWTConfigFromJSON instead. -func JWTAccessTokenSourceFromJSON(jsonKey []byte, audience string) (oauth2.TokenSource, error) { - cfg, err := JWTConfigFromJSON(jsonKey) - if err != nil { - return nil, fmt.Errorf("google: could not parse JSON key: %v", err) - } - pk, err := internal.ParseKey(cfg.PrivateKey) - if err != nil { - return nil, fmt.Errorf("google: could not parse key: %v", err) - } - ts := &jwtAccessTokenSource{ - email: cfg.Email, - audience: audience, - pk: pk, - pkID: cfg.PrivateKeyID, - } - tok, err := ts.Token() - if err != nil { - return nil, err - } - return oauth2.ReuseTokenSource(tok, ts), nil -} - -type jwtAccessTokenSource struct { - email, audience string - pk *rsa.PrivateKey - pkID string -} - -func (ts *jwtAccessTokenSource) Token() (*oauth2.Token, error) { - iat := time.Now() - exp := iat.Add(time.Hour) - cs := &jws.ClaimSet{ - Iss: ts.email, - Sub: ts.email, - Aud: ts.audience, - Iat: iat.Unix(), - Exp: exp.Unix(), - } - hdr := &jws.Header{ - Algorithm: "RS256", - Typ: "JWT", - KeyID: string(ts.pkID), - } - msg, err := jws.Encode(hdr, cs, ts.pk) - if err != nil { - return nil, fmt.Errorf("google: could not encode JWT: %v", err) - } - return &oauth2.Token{AccessToken: msg, TokenType: "Bearer", Expiry: exp}, nil -} diff --git a/vendor/golang.org/x/oauth2/google/sdk.go b/vendor/golang.org/x/oauth2/google/sdk.go deleted file mode 100644 index 456224bc78..0000000000 --- a/vendor/golang.org/x/oauth2/google/sdk.go +++ /dev/null @@ -1,201 +0,0 @@ -// Copyright 2015 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package google - -import ( - "bufio" - "context" - "encoding/json" - "errors" - "fmt" - "io" - "net/http" - "os" - "os/user" - "path/filepath" - "runtime" - "strings" - "time" - - "golang.org/x/oauth2" -) - -type sdkCredentials struct { - Data []struct { - Credential struct { - ClientID string `json:"client_id"` - ClientSecret string `json:"client_secret"` - AccessToken string `json:"access_token"` - RefreshToken string `json:"refresh_token"` - TokenExpiry *time.Time `json:"token_expiry"` - } `json:"credential"` - Key struct { - Account string `json:"account"` - Scope string `json:"scope"` - } `json:"key"` - } -} - -// An SDKConfig provides access to tokens from an account already -// authorized via the Google Cloud SDK. -type SDKConfig struct { - conf oauth2.Config - initialToken *oauth2.Token -} - -// NewSDKConfig creates an SDKConfig for the given Google Cloud SDK -// account. If account is empty, the account currently active in -// Google Cloud SDK properties is used. -// Google Cloud SDK credentials must be created by running `gcloud auth` -// before using this function. -// The Google Cloud SDK is available at https://cloud.google.com/sdk/. -func NewSDKConfig(account string) (*SDKConfig, error) { - configPath, err := sdkConfigPath() - if err != nil { - return nil, fmt.Errorf("oauth2/google: error getting SDK config path: %v", err) - } - credentialsPath := filepath.Join(configPath, "credentials") - f, err := os.Open(credentialsPath) - if err != nil { - return nil, fmt.Errorf("oauth2/google: failed to load SDK credentials: %v", err) - } - defer f.Close() - - var c sdkCredentials - if err := json.NewDecoder(f).Decode(&c); err != nil { - return nil, fmt.Errorf("oauth2/google: failed to decode SDK credentials from %q: %v", credentialsPath, err) - } - if len(c.Data) == 0 { - return nil, fmt.Errorf("oauth2/google: no credentials found in %q, run `gcloud auth login` to create one", credentialsPath) - } - if account == "" { - propertiesPath := filepath.Join(configPath, "properties") - f, err := os.Open(propertiesPath) - if err != nil { - return nil, fmt.Errorf("oauth2/google: failed to load SDK properties: %v", err) - } - defer f.Close() - ini, err := parseINI(f) - if err != nil { - return nil, fmt.Errorf("oauth2/google: failed to parse SDK properties %q: %v", propertiesPath, err) - } - core, ok := ini["core"] - if !ok { - return nil, fmt.Errorf("oauth2/google: failed to find [core] section in %v", ini) - } - active, ok := core["account"] - if !ok { - return nil, fmt.Errorf("oauth2/google: failed to find %q attribute in %v", "account", core) - } - account = active - } - - for _, d := range c.Data { - if account == "" || d.Key.Account == account { - if d.Credential.AccessToken == "" && d.Credential.RefreshToken == "" { - return nil, fmt.Errorf("oauth2/google: no token available for account %q", account) - } - var expiry time.Time - if d.Credential.TokenExpiry != nil { - expiry = *d.Credential.TokenExpiry - } - return &SDKConfig{ - conf: oauth2.Config{ - ClientID: d.Credential.ClientID, - ClientSecret: d.Credential.ClientSecret, - Scopes: strings.Split(d.Key.Scope, " "), - Endpoint: Endpoint, - RedirectURL: "oob", - }, - initialToken: &oauth2.Token{ - AccessToken: d.Credential.AccessToken, - RefreshToken: d.Credential.RefreshToken, - Expiry: expiry, - }, - }, nil - } - } - return nil, fmt.Errorf("oauth2/google: no such credentials for account %q", account) -} - -// Client returns an HTTP client using Google Cloud SDK credentials to -// authorize requests. The token will auto-refresh as necessary. The -// underlying http.RoundTripper will be obtained using the provided -// context. The returned client and its Transport should not be -// modified. -func (c *SDKConfig) Client(ctx context.Context) *http.Client { - return &http.Client{ - Transport: &oauth2.Transport{ - Source: c.TokenSource(ctx), - }, - } -} - -// TokenSource returns an oauth2.TokenSource that retrieve tokens from -// Google Cloud SDK credentials using the provided context. -// It will returns the current access token stored in the credentials, -// and refresh it when it expires, but it won't update the credentials -// with the new access token. -func (c *SDKConfig) TokenSource(ctx context.Context) oauth2.TokenSource { - return c.conf.TokenSource(ctx, c.initialToken) -} - -// Scopes are the OAuth 2.0 scopes the current account is authorized for. -func (c *SDKConfig) Scopes() []string { - return c.conf.Scopes -} - -func parseINI(ini io.Reader) (map[string]map[string]string, error) { - result := map[string]map[string]string{ - "": {}, // root section - } - scanner := bufio.NewScanner(ini) - currentSection := "" - for scanner.Scan() { - line := strings.TrimSpace(scanner.Text()) - if strings.HasPrefix(line, ";") { - // comment. - continue - } - if strings.HasPrefix(line, "[") && strings.HasSuffix(line, "]") { - currentSection = strings.TrimSpace(line[1 : len(line)-1]) - result[currentSection] = map[string]string{} - continue - } - parts := strings.SplitN(line, "=", 2) - if len(parts) == 2 && parts[0] != "" { - result[currentSection][strings.TrimSpace(parts[0])] = strings.TrimSpace(parts[1]) - } - } - if err := scanner.Err(); err != nil { - return nil, fmt.Errorf("error scanning ini: %v", err) - } - return result, nil -} - -// sdkConfigPath tries to guess where the gcloud config is located. -// It can be overridden during tests. -var sdkConfigPath = func() (string, error) { - if runtime.GOOS == "windows" { - return filepath.Join(os.Getenv("APPDATA"), "gcloud"), nil - } - homeDir := guessUnixHomeDir() - if homeDir == "" { - return "", errors.New("unable to get current user home directory: os/user lookup failed; $HOME is empty") - } - return filepath.Join(homeDir, ".config", "gcloud"), nil -} - -func guessUnixHomeDir() string { - // Prefer $HOME over user.Current due to glibc bug: golang.org/issue/13470 - if v := os.Getenv("HOME"); v != "" { - return v - } - // Else, fall back to user.Current: - if u, err := user.Current(); err == nil { - return u.HomeDir - } - return "" -} diff --git a/vendor/golang.org/x/oauth2/internal/token.go b/vendor/golang.org/x/oauth2/internal/token.go index a831b77465..83f7847e49 100644 --- a/vendor/golang.org/x/oauth2/internal/token.go +++ b/vendor/golang.org/x/oauth2/internal/token.go @@ -11,11 +11,13 @@ import ( "fmt" "io" "io/ioutil" + "math" "mime" "net/http" "net/url" "strconv" "strings" + "sync" "time" "golang.org/x/net/context/ctxhttp" @@ -77,6 +79,9 @@ func (e *tokenJSON) expiry() (t time.Time) { type expirationTime int32 func (e *expirationTime) UnmarshalJSON(b []byte) error { + if len(b) == 0 || string(b) == "null" { + return nil + } var n json.Number err := json.Unmarshal(b, &n) if err != nil { @@ -86,106 +91,78 @@ func (e *expirationTime) UnmarshalJSON(b []byte) error { if err != nil { return err } + if i > math.MaxInt32 { + i = math.MaxInt32 + } *e = expirationTime(i) return nil } -var brokenAuthHeaderProviders = []string{ - "https://accounts.google.com/", - "https://api.codeswholesale.com/oauth/token", - "https://api.dropbox.com/", - "https://api.dropboxapi.com/", - "https://api.instagram.com/", - "https://api.netatmo.net/", - "https://api.odnoklassniki.ru/", - "https://api.pushbullet.com/", - "https://api.soundcloud.com/", - "https://api.twitch.tv/", - "https://id.twitch.tv/", - "https://app.box.com/", - "https://api.box.com/", - "https://connect.stripe.com/", - "https://login.mailchimp.com/", - "https://login.microsoftonline.com/", - "https://login.salesforce.com/", - "https://login.windows.net", - "https://login.live.com/", - "https://login.live-int.com/", - "https://oauth.sandbox.trainingpeaks.com/", - "https://oauth.trainingpeaks.com/", - "https://oauth.vk.com/", - "https://openapi.baidu.com/", - "https://slack.com/", - "https://test-sandbox.auth.corp.google.com", - "https://test.salesforce.com/", - "https://user.gini.net/", - "https://www.douban.com/", - "https://www.googleapis.com/", - "https://www.linkedin.com/", - "https://www.strava.com/oauth/", - "https://www.wunderlist.com/oauth/", - "https://api.patreon.com/", - "https://sandbox.codeswholesale.com/oauth/token", - "https://api.sipgate.com/v1/authorization/oauth", - "https://api.medium.com/v1/tokens", - "https://log.finalsurge.com/oauth/token", - "https://multisport.todaysplan.com.au/rest/oauth/access_token", - "https://whats.todaysplan.com.au/rest/oauth/access_token", - "https://stackoverflow.com/oauth/access_token", - "https://account.health.nokia.com", - "https://accounts.zoho.com", - "https://gitter.im/login/oauth/token", - "https://openid-connect.onelogin.com/oidc", - "https://api.dailymotion.com/oauth/token", -} +// RegisterBrokenAuthHeaderProvider previously did something. It is now a no-op. +// +// Deprecated: this function no longer does anything. Caller code that +// wants to avoid potential extra HTTP requests made during +// auto-probing of the provider's auth style should set +// Endpoint.AuthStyle. +func RegisterBrokenAuthHeaderProvider(tokenURL string) {} + +// AuthStyle is a copy of the golang.org/x/oauth2 package's AuthStyle type. +type AuthStyle int -// brokenAuthHeaderDomains lists broken providers that issue dynamic endpoints. -var brokenAuthHeaderDomains = []string{ - ".auth0.com", - ".force.com", - ".myshopify.com", - ".okta.com", - ".oktapreview.com", +const ( + AuthStyleUnknown AuthStyle = 0 + AuthStyleInParams AuthStyle = 1 + AuthStyleInHeader AuthStyle = 2 +) + +// authStyleCache is the set of tokenURLs we've successfully used via +// RetrieveToken and which style auth we ended up using. +// It's called a cache, but it doesn't (yet?) shrink. It's expected that +// the set of OAuth2 servers a program contacts over time is fixed and +// small. +var authStyleCache struct { + sync.Mutex + m map[string]AuthStyle // keyed by tokenURL } -func RegisterBrokenAuthHeaderProvider(tokenURL string) { - brokenAuthHeaderProviders = append(brokenAuthHeaderProviders, tokenURL) +// ResetAuthCache resets the global authentication style cache used +// for AuthStyleUnknown token requests. +func ResetAuthCache() { + authStyleCache.Lock() + defer authStyleCache.Unlock() + authStyleCache.m = nil } -// providerAuthHeaderWorks reports whether the OAuth2 server identified by the tokenURL -// implements the OAuth2 spec correctly -// See https://code.google.com/p/goauth2/issues/detail?id=31 for background. -// In summary: -// - Reddit only accepts client secret in the Authorization header -// - Dropbox accepts either it in URL param or Auth header, but not both. -// - Google only accepts URL param (not spec compliant?), not Auth header -// - Stripe only accepts client secret in Auth header with Bearer method, not Basic -func providerAuthHeaderWorks(tokenURL string) bool { - for _, s := range brokenAuthHeaderProviders { - if strings.HasPrefix(tokenURL, s) { - // Some sites fail to implement the OAuth2 spec fully. - return false - } - } +// lookupAuthStyle reports which auth style we last used with tokenURL +// when calling RetrieveToken and whether we have ever done so. +func lookupAuthStyle(tokenURL string) (style AuthStyle, ok bool) { + authStyleCache.Lock() + defer authStyleCache.Unlock() + style, ok = authStyleCache.m[tokenURL] + return +} - if u, err := url.Parse(tokenURL); err == nil { - for _, s := range brokenAuthHeaderDomains { - if strings.HasSuffix(u.Host, s) { - return false - } - } +// setAuthStyle adds an entry to authStyleCache, documented above. +func setAuthStyle(tokenURL string, v AuthStyle) { + authStyleCache.Lock() + defer authStyleCache.Unlock() + if authStyleCache.m == nil { + authStyleCache.m = make(map[string]AuthStyle) } - - // Assume the provider implements the spec properly - // otherwise. We can add more exceptions as they're - // discovered. We will _not_ be adding configurable hooks - // to this package to let users select server bugs. - return true + authStyleCache.m[tokenURL] = v } -func RetrieveToken(ctx context.Context, clientID, clientSecret, tokenURL string, v url.Values) (*Token, error) { - bustedAuth := !providerAuthHeaderWorks(tokenURL) - if bustedAuth { +// newTokenRequest returns a new *http.Request to retrieve a new token +// from tokenURL using the provided clientID, clientSecret, and POST +// body parameters. +// +// inParams is whether the clientID & clientSecret should be encoded +// as the POST body. An 'inParams' value of true means to send it in +// the POST body (along with any values in v); false means to send it +// in the Authorization header. +func newTokenRequest(tokenURL, clientID, clientSecret string, v url.Values, authStyle AuthStyle) (*http.Request, error) { + if authStyle == AuthStyleInParams { + v = cloneURLValues(v) if clientID != "" { v.Set("client_id", clientID) } @@ -198,15 +175,70 @@ func RetrieveToken(ctx context.Context, clientID, clientSecret, tokenURL string, return nil, err } req.Header.Set("Content-Type", "application/x-www-form-urlencoded") - if !bustedAuth { + if authStyle == AuthStyleInHeader { req.SetBasicAuth(url.QueryEscape(clientID), url.QueryEscape(clientSecret)) } + return req, nil +} + +func cloneURLValues(v url.Values) url.Values { + v2 := make(url.Values, len(v)) + for k, vv := range v { + v2[k] = append([]string(nil), vv...) + } + return v2 +} + +func RetrieveToken(ctx context.Context, clientID, clientSecret, tokenURL string, v url.Values, authStyle AuthStyle) (*Token, error) { + needsAuthStyleProbe := authStyle == 0 + if needsAuthStyleProbe { + if style, ok := lookupAuthStyle(tokenURL); ok { + authStyle = style + needsAuthStyleProbe = false + } else { + authStyle = AuthStyleInHeader // the first way we'll try + } + } + req, err := newTokenRequest(tokenURL, clientID, clientSecret, v, authStyle) + if err != nil { + return nil, err + } + token, err := doTokenRoundTrip(ctx, req) + if err != nil && needsAuthStyleProbe { + // If we get an error, assume the server wants the + // clientID & clientSecret in a different form. + // See https://code.google.com/p/goauth2/issues/detail?id=31 for background. + // In summary: + // - Reddit only accepts client secret in the Authorization header + // - Dropbox accepts either it in URL param or Auth header, but not both. + // - Google only accepts URL param (not spec compliant?), not Auth header + // - Stripe only accepts client secret in Auth header with Bearer method, not Basic + // + // We used to maintain a big table in this code of all the sites and which way + // they went, but maintaining it didn't scale & got annoying. + // So just try both ways. + authStyle = AuthStyleInParams // the second way we'll try + req, _ = newTokenRequest(tokenURL, clientID, clientSecret, v, authStyle) + token, err = doTokenRoundTrip(ctx, req) + } + if needsAuthStyleProbe && err == nil { + setAuthStyle(tokenURL, authStyle) + } + // Don't overwrite `RefreshToken` with an empty value + // if this was a token refreshing request. + if token != nil && token.RefreshToken == "" { + token.RefreshToken = v.Get("refresh_token") + } + return token, err +} + +func doTokenRoundTrip(ctx context.Context, req *http.Request) (*Token, error) { r, err := ctxhttp.Do(ctx, ContextClient(ctx), req) if err != nil { return nil, err } - defer r.Body.Close() body, err := ioutil.ReadAll(io.LimitReader(r.Body, 1<<20)) + r.Body.Close() if err != nil { return nil, fmt.Errorf("oauth2: cannot fetch token: %v", err) } @@ -232,7 +264,7 @@ func RetrieveToken(ctx context.Context, clientID, clientSecret, tokenURL string, Raw: vals, } e := vals.Get("expires_in") - if e == "" { + if e == "" || e == "null" { // TODO(jbd): Facebook's OAuth2 implementation is broken and // returns expires_in field in expires. Remove the fallback to expires, // when Facebook fixes their implementation. @@ -256,13 +288,8 @@ func RetrieveToken(ctx context.Context, clientID, clientSecret, tokenURL string, } json.Unmarshal(body, &token.Raw) // no error checks for optional fields } - // Don't overwrite `RefreshToken` with an empty value - // if this was a token refreshing request. - if token.RefreshToken == "" { - token.RefreshToken = v.Get("refresh_token") - } if token.AccessToken == "" { - return token, errors.New("oauth2: server response missing access_token") + return nil, errors.New("oauth2: server response missing access_token") } return token, nil } diff --git a/vendor/golang.org/x/oauth2/jws/jws.go b/vendor/golang.org/x/oauth2/jws/jws.go deleted file mode 100644 index 683d2d271a..0000000000 --- a/vendor/golang.org/x/oauth2/jws/jws.go +++ /dev/null @@ -1,182 +0,0 @@ -// Copyright 2014 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// Package jws provides a partial implementation -// of JSON Web Signature encoding and decoding. -// It exists to support the golang.org/x/oauth2 package. -// -// See RFC 7515. -// -// Deprecated: this package is not intended for public use and might be -// removed in the future. It exists for internal use only. -// Please switch to another JWS package or copy this package into your own -// source tree. -package jws // import "golang.org/x/oauth2/jws" - -import ( - "bytes" - "crypto" - "crypto/rand" - "crypto/rsa" - "crypto/sha256" - "encoding/base64" - "encoding/json" - "errors" - "fmt" - "strings" - "time" -) - -// ClaimSet contains information about the JWT signature including the -// permissions being requested (scopes), the target of the token, the issuer, -// the time the token was issued, and the lifetime of the token. -type ClaimSet struct { - Iss string `json:"iss"` // email address of the client_id of the application making the access token request - Scope string `json:"scope,omitempty"` // space-delimited list of the permissions the application requests - Aud string `json:"aud"` // descriptor of the intended target of the assertion (Optional). - Exp int64 `json:"exp"` // the expiration time of the assertion (seconds since Unix epoch) - Iat int64 `json:"iat"` // the time the assertion was issued (seconds since Unix epoch) - Typ string `json:"typ,omitempty"` // token type (Optional). - - // Email for which the application is requesting delegated access (Optional). - Sub string `json:"sub,omitempty"` - - // The old name of Sub. Client keeps setting Prn to be - // complaint with legacy OAuth 2.0 providers. (Optional) - Prn string `json:"prn,omitempty"` - - // See http://tools.ietf.org/html/draft-jones-json-web-token-10#section-4.3 - // This array is marshalled using custom code (see (c *ClaimSet) encode()). - PrivateClaims map[string]interface{} `json:"-"` -} - -func (c *ClaimSet) encode() (string, error) { - // Reverting time back for machines whose time is not perfectly in sync. - // If client machine's time is in the future according - // to Google servers, an access token will not be issued. - now := time.Now().Add(-10 * time.Second) - if c.Iat == 0 { - c.Iat = now.Unix() - } - if c.Exp == 0 { - c.Exp = now.Add(time.Hour).Unix() - } - if c.Exp < c.Iat { - return "", fmt.Errorf("jws: invalid Exp = %v; must be later than Iat = %v", c.Exp, c.Iat) - } - - b, err := json.Marshal(c) - if err != nil { - return "", err - } - - if len(c.PrivateClaims) == 0 { - return base64.RawURLEncoding.EncodeToString(b), nil - } - - // Marshal private claim set and then append it to b. - prv, err := json.Marshal(c.PrivateClaims) - if err != nil { - return "", fmt.Errorf("jws: invalid map of private claims %v", c.PrivateClaims) - } - - // Concatenate public and private claim JSON objects. - if !bytes.HasSuffix(b, []byte{'}'}) { - return "", fmt.Errorf("jws: invalid JSON %s", b) - } - if !bytes.HasPrefix(prv, []byte{'{'}) { - return "", fmt.Errorf("jws: invalid JSON %s", prv) - } - b[len(b)-1] = ',' // Replace closing curly brace with a comma. - b = append(b, prv[1:]...) // Append private claims. - return base64.RawURLEncoding.EncodeToString(b), nil -} - -// Header represents the header for the signed JWS payloads. -type Header struct { - // The algorithm used for signature. - Algorithm string `json:"alg"` - - // Represents the token type. - Typ string `json:"typ"` - - // The optional hint of which key is being used. - KeyID string `json:"kid,omitempty"` -} - -func (h *Header) encode() (string, error) { - b, err := json.Marshal(h) - if err != nil { - return "", err - } - return base64.RawURLEncoding.EncodeToString(b), nil -} - -// Decode decodes a claim set from a JWS payload. -func Decode(payload string) (*ClaimSet, error) { - // decode returned id token to get expiry - s := strings.Split(payload, ".") - if len(s) < 2 { - // TODO(jbd): Provide more context about the error. - return nil, errors.New("jws: invalid token received") - } - decoded, err := base64.RawURLEncoding.DecodeString(s[1]) - if err != nil { - return nil, err - } - c := &ClaimSet{} - err = json.NewDecoder(bytes.NewBuffer(decoded)).Decode(c) - return c, err -} - -// Signer returns a signature for the given data. -type Signer func(data []byte) (sig []byte, err error) - -// EncodeWithSigner encodes a header and claim set with the provided signer. -func EncodeWithSigner(header *Header, c *ClaimSet, sg Signer) (string, error) { - head, err := header.encode() - if err != nil { - return "", err - } - cs, err := c.encode() - if err != nil { - return "", err - } - ss := fmt.Sprintf("%s.%s", head, cs) - sig, err := sg([]byte(ss)) - if err != nil { - return "", err - } - return fmt.Sprintf("%s.%s", ss, base64.RawURLEncoding.EncodeToString(sig)), nil -} - -// Encode encodes a signed JWS with provided header and claim set. -// This invokes EncodeWithSigner using crypto/rsa.SignPKCS1v15 with the given RSA private key. -func Encode(header *Header, c *ClaimSet, key *rsa.PrivateKey) (string, error) { - sg := func(data []byte) (sig []byte, err error) { - h := sha256.New() - h.Write(data) - return rsa.SignPKCS1v15(rand.Reader, key, crypto.SHA256, h.Sum(nil)) - } - return EncodeWithSigner(header, c, sg) -} - -// Verify tests whether the provided JWT token's signature was produced by the private key -// associated with the supplied public key. -func Verify(token string, key *rsa.PublicKey) error { - parts := strings.Split(token, ".") - if len(parts) != 3 { - return errors.New("jws: invalid token received, token must have 3 parts") - } - - signedContent := parts[0] + "." + parts[1] - signatureString, err := base64.RawURLEncoding.DecodeString(parts[2]) - if err != nil { - return err - } - - h := sha256.New() - h.Write([]byte(signedContent)) - return rsa.VerifyPKCS1v15(key, crypto.SHA256, h.Sum(nil), []byte(signatureString)) -} diff --git a/vendor/golang.org/x/oauth2/jwt/jwt.go b/vendor/golang.org/x/oauth2/jwt/jwt.go deleted file mode 100644 index 0783a94c48..0000000000 --- a/vendor/golang.org/x/oauth2/jwt/jwt.go +++ /dev/null @@ -1,162 +0,0 @@ -// Copyright 2014 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// Package jwt implements the OAuth 2.0 JSON Web Token flow, commonly -// known as "two-legged OAuth 2.0". -// -// See: https://tools.ietf.org/html/draft-ietf-oauth-jwt-bearer-12 -package jwt - -import ( - "context" - "encoding/json" - "fmt" - "io" - "io/ioutil" - "net/http" - "net/url" - "strings" - "time" - - "golang.org/x/oauth2" - "golang.org/x/oauth2/internal" - "golang.org/x/oauth2/jws" -) - -var ( - defaultGrantType = "urn:ietf:params:oauth:grant-type:jwt-bearer" - defaultHeader = &jws.Header{Algorithm: "RS256", Typ: "JWT"} -) - -// Config is the configuration for using JWT to fetch tokens, -// commonly known as "two-legged OAuth 2.0". -type Config struct { - // Email is the OAuth client identifier used when communicating with - // the configured OAuth provider. - Email string - - // PrivateKey contains the contents of an RSA private key or the - // contents of a PEM file that contains a private key. The provided - // private key is used to sign JWT payloads. - // PEM containers with a passphrase are not supported. - // Use the following command to convert a PKCS 12 file into a PEM. - // - // $ openssl pkcs12 -in key.p12 -out key.pem -nodes - // - PrivateKey []byte - - // PrivateKeyID contains an optional hint indicating which key is being - // used. - PrivateKeyID string - - // Subject is the optional user to impersonate. - Subject string - - // Scopes optionally specifies a list of requested permission scopes. - Scopes []string - - // TokenURL is the endpoint required to complete the 2-legged JWT flow. - TokenURL string - - // Expires optionally specifies how long the token is valid for. - Expires time.Duration -} - -// TokenSource returns a JWT TokenSource using the configuration -// in c and the HTTP client from the provided context. -func (c *Config) TokenSource(ctx context.Context) oauth2.TokenSource { - return oauth2.ReuseTokenSource(nil, jwtSource{ctx, c}) -} - -// Client returns an HTTP client wrapping the context's -// HTTP transport and adding Authorization headers with tokens -// obtained from c. -// -// The returned client and its Transport should not be modified. -func (c *Config) Client(ctx context.Context) *http.Client { - return oauth2.NewClient(ctx, c.TokenSource(ctx)) -} - -// jwtSource is a source that always does a signed JWT request for a token. -// It should typically be wrapped with a reuseTokenSource. -type jwtSource struct { - ctx context.Context - conf *Config -} - -func (js jwtSource) Token() (*oauth2.Token, error) { - pk, err := internal.ParseKey(js.conf.PrivateKey) - if err != nil { - return nil, err - } - hc := oauth2.NewClient(js.ctx, nil) - claimSet := &jws.ClaimSet{ - Iss: js.conf.Email, - Scope: strings.Join(js.conf.Scopes, " "), - Aud: js.conf.TokenURL, - } - if subject := js.conf.Subject; subject != "" { - claimSet.Sub = subject - // prn is the old name of sub. Keep setting it - // to be compatible with legacy OAuth 2.0 providers. - claimSet.Prn = subject - } - if t := js.conf.Expires; t > 0 { - claimSet.Exp = time.Now().Add(t).Unix() - } - h := *defaultHeader - h.KeyID = js.conf.PrivateKeyID - payload, err := jws.Encode(&h, claimSet, pk) - if err != nil { - return nil, err - } - v := url.Values{} - v.Set("grant_type", defaultGrantType) - v.Set("assertion", payload) - resp, err := hc.PostForm(js.conf.TokenURL, v) - if err != nil { - return nil, fmt.Errorf("oauth2: cannot fetch token: %v", err) - } - defer resp.Body.Close() - body, err := ioutil.ReadAll(io.LimitReader(resp.Body, 1<<20)) - if err != nil { - return nil, fmt.Errorf("oauth2: cannot fetch token: %v", err) - } - if c := resp.StatusCode; c < 200 || c > 299 { - return nil, &oauth2.RetrieveError{ - Response: resp, - Body: body, - } - } - // tokenRes is the JSON response body. - var tokenRes struct { - AccessToken string `json:"access_token"` - TokenType string `json:"token_type"` - IDToken string `json:"id_token"` - ExpiresIn int64 `json:"expires_in"` // relative seconds from now - } - if err := json.Unmarshal(body, &tokenRes); err != nil { - return nil, fmt.Errorf("oauth2: cannot fetch token: %v", err) - } - token := &oauth2.Token{ - AccessToken: tokenRes.AccessToken, - TokenType: tokenRes.TokenType, - } - raw := make(map[string]interface{}) - json.Unmarshal(body, &raw) // no error checks for optional fields - token = token.WithExtra(raw) - - if secs := tokenRes.ExpiresIn; secs > 0 { - token.Expiry = time.Now().Add(time.Duration(secs) * time.Second) - } - if v := tokenRes.IDToken; v != "" { - // decode returned id token to get expiry - claimSet, err := jws.Decode(v) - if err != nil { - return nil, fmt.Errorf("oauth2: error decoding JWT token: %v", err) - } - token.Expiry = time.Unix(claimSet.Exp, 0) - } - return token, nil -} diff --git a/vendor/golang.org/x/oauth2/oauth2.go b/vendor/golang.org/x/oauth2/oauth2.go index 3de63315b8..428283f0b0 100644 --- a/vendor/golang.org/x/oauth2/oauth2.go +++ b/vendor/golang.org/x/oauth2/oauth2.go @@ -26,17 +26,13 @@ import ( // Deprecated: Use context.Background() or context.TODO() instead. var NoContext = context.TODO() -// RegisterBrokenAuthHeaderProvider registers an OAuth2 server -// identified by the tokenURL prefix as an OAuth2 implementation -// which doesn't support the HTTP Basic authentication -// scheme to authenticate with the authorization server. -// Once a server is registered, credentials (client_id and client_secret) -// will be passed as parameters in the request body rather than being present -// in the Authorization header. -// See https://code.google.com/p/goauth2/issues/detail?id=31 for background. -func RegisterBrokenAuthHeaderProvider(tokenURL string) { - internal.RegisterBrokenAuthHeaderProvider(tokenURL) -} +// RegisterBrokenAuthHeaderProvider previously did something. It is now a no-op. +// +// Deprecated: this function no longer does anything. Caller code that +// wants to avoid potential extra HTTP requests made during +// auto-probing of the provider's auth style should set +// Endpoint.AuthStyle. +func RegisterBrokenAuthHeaderProvider(tokenURL string) {} // Config describes a typical 3-legged OAuth2 flow, with both the // client application information and the server's endpoint URLs. @@ -71,13 +67,38 @@ type TokenSource interface { Token() (*Token, error) } -// Endpoint contains the OAuth 2.0 provider's authorization and token +// Endpoint represents an OAuth 2.0 provider's authorization and token // endpoint URLs. type Endpoint struct { AuthURL string TokenURL string + + // AuthStyle optionally specifies how the endpoint wants the + // client ID & client secret sent. The zero value means to + // auto-detect. + AuthStyle AuthStyle } +// AuthStyle represents how requests for tokens are authenticated +// to the server. +type AuthStyle int + +const ( + // AuthStyleAutoDetect means to auto-detect which authentication + // style the provider wants by trying both ways and caching + // the successful way for the future. + AuthStyleAutoDetect AuthStyle = 0 + + // AuthStyleInParams sends the "client_id" and "client_secret" + // in the POST body as application/x-www-form-urlencoded parameters. + AuthStyleInParams AuthStyle = 1 + + // AuthStyleInHeader sends the client_id and client_password + // using HTTP Basic Authorization. This is an optional style + // described in the OAuth2 RFC 6749 section 2.3.1. + AuthStyleInHeader AuthStyle = 2 +) + var ( // AccessTypeOnline and AccessTypeOffline are options passed // to the Options.AuthCodeURL method. They modify the @@ -124,7 +145,7 @@ func SetAuthURLParam(key, value string) AuthCodeOption { // // Opts may include AccessTypeOnline or AccessTypeOffline, as well // as ApprovalForce. -// It can also be used to pass the PKCE challange. +// It can also be used to pass the PKCE challenge. // See https://www.oauth.com/oauth2-servers/pkce/ for more info. func (c *Config) AuthCodeURL(state string, opts ...AuthCodeOption) string { var buf bytes.Buffer diff --git a/vendor/golang.org/x/oauth2/token.go b/vendor/golang.org/x/oauth2/token.go index ee4be545fe..822720341a 100644 --- a/vendor/golang.org/x/oauth2/token.go +++ b/vendor/golang.org/x/oauth2/token.go @@ -154,7 +154,7 @@ func tokenFromInternal(t *internal.Token) *Token { // This token is then mapped from *internal.Token into an *oauth2.Token which is returned along // with an error.. func retrieveToken(ctx context.Context, c *Config, v url.Values) (*Token, error) { - tk, err := internal.RetrieveToken(ctx, c.ClientID, c.ClientSecret, c.Endpoint.TokenURL, v) + tk, err := internal.RetrieveToken(ctx, c.ClientID, c.ClientSecret, c.Endpoint.TokenURL, v, internal.AuthStyle(c.Endpoint.AuthStyle)) if err != nil { if rErr, ok := err.(*internal.RetrieveError); ok { return nil, (*RetrieveError)(rErr) diff --git a/vendor/golang.org/x/sync/AUTHORS b/vendor/golang.org/x/sync/AUTHORS deleted file mode 100644 index 15167cd746..0000000000 --- a/vendor/golang.org/x/sync/AUTHORS +++ /dev/null @@ -1,3 +0,0 @@ -# This source code refers to The Go Authors for copyright purposes. -# The master list of authors is in the main Go distribution, -# visible at http://tip.golang.org/AUTHORS. diff --git a/vendor/golang.org/x/sync/CONTRIBUTORS b/vendor/golang.org/x/sync/CONTRIBUTORS deleted file mode 100644 index 1c4577e968..0000000000 --- a/vendor/golang.org/x/sync/CONTRIBUTORS +++ /dev/null @@ -1,3 +0,0 @@ -# This source code was written by the Go contributors. -# The master list of contributors is in the main Go distribution, -# visible at http://tip.golang.org/CONTRIBUTORS. diff --git a/vendor/golang.org/x/sync/PATENTS b/vendor/golang.org/x/sync/PATENTS deleted file mode 100644 index 733099041f..0000000000 --- a/vendor/golang.org/x/sync/PATENTS +++ /dev/null @@ -1,22 +0,0 @@ -Additional IP Rights Grant (Patents) - -"This implementation" means the copyrightable works distributed by -Google as part of the Go project. - -Google hereby grants to You a perpetual, worldwide, non-exclusive, -no-charge, royalty-free, irrevocable (except as stated in this section) -patent license to make, have made, use, offer to sell, sell, import, -transfer and otherwise run, modify and propagate the contents of this -implementation of Go, where such license applies only to those patent -claims, both currently owned or controlled by Google and acquired in -the future, licensable by Google that are necessarily infringed by this -implementation of Go. This grant does not include claims that would be -infringed only as a consequence of further modification of this -implementation. If you or your agent or exclusive licensee institute or -order or agree to the institution of patent litigation against any -entity (including a cross-claim or counterclaim in a lawsuit) alleging -that this implementation of Go or any code incorporated within this -implementation of Go constitutes direct or contributory patent -infringement, or inducement of patent infringement, then any patent -rights granted to you under this License for this implementation of Go -shall terminate as of the date such litigation is filed. diff --git a/vendor/golang.org/x/sync/semaphore/semaphore.go b/vendor/golang.org/x/sync/semaphore/semaphore.go deleted file mode 100644 index ac53e733e7..0000000000 --- a/vendor/golang.org/x/sync/semaphore/semaphore.go +++ /dev/null @@ -1,127 +0,0 @@ -// Copyright 2017 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// Package semaphore provides a weighted semaphore implementation. -package semaphore // import "golang.org/x/sync/semaphore" - -import ( - "container/list" - "context" - "sync" -) - -type waiter struct { - n int64 - ready chan<- struct{} // Closed when semaphore acquired. -} - -// NewWeighted creates a new weighted semaphore with the given -// maximum combined weight for concurrent access. -func NewWeighted(n int64) *Weighted { - w := &Weighted{size: n} - return w -} - -// Weighted provides a way to bound concurrent access to a resource. -// The callers can request access with a given weight. -type Weighted struct { - size int64 - cur int64 - mu sync.Mutex - waiters list.List -} - -// Acquire acquires the semaphore with a weight of n, blocking until resources -// are available or ctx is done. On success, returns nil. On failure, returns -// ctx.Err() and leaves the semaphore unchanged. -// -// If ctx is already done, Acquire may still succeed without blocking. -func (s *Weighted) Acquire(ctx context.Context, n int64) error { - s.mu.Lock() - if s.size-s.cur >= n && s.waiters.Len() == 0 { - s.cur += n - s.mu.Unlock() - return nil - } - - if n > s.size { - // Don't make other Acquire calls block on one that's doomed to fail. - s.mu.Unlock() - <-ctx.Done() - return ctx.Err() - } - - ready := make(chan struct{}) - w := waiter{n: n, ready: ready} - elem := s.waiters.PushBack(w) - s.mu.Unlock() - - select { - case <-ctx.Done(): - err := ctx.Err() - s.mu.Lock() - select { - case <-ready: - // Acquired the semaphore after we were canceled. Rather than trying to - // fix up the queue, just pretend we didn't notice the cancelation. - err = nil - default: - s.waiters.Remove(elem) - } - s.mu.Unlock() - return err - - case <-ready: - return nil - } -} - -// TryAcquire acquires the semaphore with a weight of n without blocking. -// On success, returns true. On failure, returns false and leaves the semaphore unchanged. -func (s *Weighted) TryAcquire(n int64) bool { - s.mu.Lock() - success := s.size-s.cur >= n && s.waiters.Len() == 0 - if success { - s.cur += n - } - s.mu.Unlock() - return success -} - -// Release releases the semaphore with a weight of n. -func (s *Weighted) Release(n int64) { - s.mu.Lock() - s.cur -= n - if s.cur < 0 { - s.mu.Unlock() - panic("semaphore: bad release") - } - for { - next := s.waiters.Front() - if next == nil { - break // No more waiters blocked. - } - - w := next.Value.(waiter) - if s.size-s.cur < w.n { - // Not enough tokens for the next waiter. We could keep going (to try to - // find a waiter with a smaller request), but under load that could cause - // starvation for large requests; instead, we leave all remaining waiters - // blocked. - // - // Consider a semaphore used as a read-write lock, with N tokens, N - // readers, and one writer. Each reader can Acquire(1) to obtain a read - // lock. The writer can Acquire(N) to obtain a write lock, excluding all - // of the readers. If we allow the readers to jump ahead in the queue, - // the writer will starve — there is always one token available for every - // reader. - break - } - - s.cur += w.n - s.waiters.Remove(next) - close(w.ready) - } - s.mu.Unlock() -} diff --git a/vendor/golang.org/x/sys/unix/asm_linux_riscv64.s b/vendor/golang.org/x/sys/unix/asm_linux_riscv64.s deleted file mode 100644 index 6db717de53..0000000000 --- a/vendor/golang.org/x/sys/unix/asm_linux_riscv64.s +++ /dev/null @@ -1,54 +0,0 @@ -// Copyright 2019 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build riscv64,!gccgo - -#include "textflag.h" - -// -// System calls for linux/riscv64. -// -// Where available, just jump to package syscall's implementation of -// these functions. - -TEXT ·Syscall(SB),NOSPLIT,$0-56 - JMP syscall·Syscall(SB) - -TEXT ·Syscall6(SB),NOSPLIT,$0-80 - JMP syscall·Syscall6(SB) - -TEXT ·SyscallNoError(SB),NOSPLIT,$0-48 - CALL runtime·entersyscall(SB) - MOV a1+8(FP), A0 - MOV a2+16(FP), A1 - MOV a3+24(FP), A2 - MOV $0, A3 - MOV $0, A4 - MOV $0, A5 - MOV $0, A6 - MOV trap+0(FP), A7 // syscall entry - ECALL - MOV A0, r1+32(FP) // r1 - MOV A1, r2+40(FP) // r2 - CALL runtime·exitsyscall(SB) - RET - -TEXT ·RawSyscall(SB),NOSPLIT,$0-56 - JMP syscall·RawSyscall(SB) - -TEXT ·RawSyscall6(SB),NOSPLIT,$0-80 - JMP syscall·RawSyscall6(SB) - -TEXT ·RawSyscallNoError(SB),NOSPLIT,$0-48 - MOV a1+8(FP), A0 - MOV a2+16(FP), A1 - MOV a3+24(FP), A2 - MOV ZERO, A3 - MOV ZERO, A4 - MOV ZERO, A5 - MOV trap+0(FP), A7 // syscall entry - ECALL - MOV A0, r1+32(FP) - MOV A1, r2+40(FP) - RET diff --git a/vendor/golang.org/x/sys/unix/mkall.sh b/vendor/golang.org/x/sys/unix/mkall.sh index 5a22eca967..c6516f2b7c 100644 --- a/vendor/golang.org/x/sys/unix/mkall.sh +++ b/vendor/golang.org/x/sys/unix/mkall.sh @@ -105,25 +105,25 @@ dragonfly_amd64) freebsd_386) mkerrors="$mkerrors -m32" mksyscall="go run mksyscall.go -l32" - mksysnum="go run mksysnum.go 'https://svn.freebsd.org/base/stable/11/sys/kern/syscalls.master'" + mksysnum="go run mksysnum.go 'https://svn.freebsd.org/base/stable/10/sys/kern/syscalls.master'" mktypes="GOARCH=$GOARCH go tool cgo -godefs" ;; freebsd_amd64) mkerrors="$mkerrors -m64" - mksysnum="go run mksysnum.go 'https://svn.freebsd.org/base/stable/11/sys/kern/syscalls.master'" + mksysnum="go run mksysnum.go 'https://svn.freebsd.org/base/stable/10/sys/kern/syscalls.master'" mktypes="GOARCH=$GOARCH go tool cgo -godefs" ;; freebsd_arm) mkerrors="$mkerrors" mksyscall="go run mksyscall.go -l32 -arm" - mksysnum="go run mksysnum.go 'https://svn.freebsd.org/base/stable/11/sys/kern/syscalls.master'" + mksysnum="go run mksysnum.go 'https://svn.freebsd.org/base/stable/10/sys/kern/syscalls.master'" # Let the type of C char be signed for making the bare syscall # API consistent across platforms. mktypes="GOARCH=$GOARCH go tool cgo -godefs -- -fsigned-char" ;; freebsd_arm64) mkerrors="$mkerrors -m64" - mksysnum="go run mksysnum.go 'https://svn.freebsd.org/base/stable/11/sys/kern/syscalls.master'" + mksysnum="go run mksysnum.go 'https://svn.freebsd.org/base/stable/10/sys/kern/syscalls.master'" mktypes="GOARCH=$GOARCH go tool cgo -godefs" ;; netbsd_386) @@ -146,12 +146,6 @@ netbsd_arm) # API consistent across platforms. mktypes="GOARCH=$GOARCH go tool cgo -godefs -- -fsigned-char" ;; -netbsd_arm64) - mkerrors="$mkerrors -m64" - mksyscall="go run mksyscall.go -netbsd" - mksysnum="go run mksysnum.go 'http://cvsweb.netbsd.org/bsdweb.cgi/~checkout~/src/sys/kern/syscalls.master'" - mktypes="GOARCH=$GOARCH go tool cgo -godefs" - ;; openbsd_386) mkerrors="$mkerrors -m32" mksyscall="go run mksyscall.go -l32 -openbsd" diff --git a/vendor/golang.org/x/sys/unix/mkasm_darwin.go b/vendor/golang.org/x/sys/unix/mkasm_darwin.go deleted file mode 100644 index 4548b993db..0000000000 --- a/vendor/golang.org/x/sys/unix/mkasm_darwin.go +++ /dev/null @@ -1,61 +0,0 @@ -// Copyright 2018 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build ignore - -// mkasm_darwin.go generates assembly trampolines to call libSystem routines from Go. -//This program must be run after mksyscall.go. -package main - -import ( - "bytes" - "fmt" - "io/ioutil" - "log" - "os" - "strings" -) - -func main() { - in1, err := ioutil.ReadFile("syscall_darwin.go") - if err != nil { - log.Fatalf("can't open syscall_darwin.go: %s", err) - } - arch := os.Args[1] - in2, err := ioutil.ReadFile(fmt.Sprintf("syscall_darwin_%s.go", arch)) - if err != nil { - log.Fatalf("can't open syscall_darwin_%s.go: %s", arch, err) - } - in3, err := ioutil.ReadFile(fmt.Sprintf("zsyscall_darwin_%s.go", arch)) - if err != nil { - log.Fatalf("can't open zsyscall_darwin_%s.go: %s", arch, err) - } - in := string(in1) + string(in2) + string(in3) - - trampolines := map[string]bool{} - - var out bytes.Buffer - - fmt.Fprintf(&out, "// go run mkasm_darwin.go %s\n", strings.Join(os.Args[1:], " ")) - fmt.Fprintf(&out, "// Code generated by the command above; DO NOT EDIT.\n") - fmt.Fprintf(&out, "\n") - fmt.Fprintf(&out, "// +build go1.12\n") - fmt.Fprintf(&out, "\n") - fmt.Fprintf(&out, "#include \"textflag.h\"\n") - for _, line := range strings.Split(in, "\n") { - if !strings.HasPrefix(line, "func ") || !strings.HasSuffix(line, "_trampoline()") { - continue - } - fn := line[5 : len(line)-13] - if !trampolines[fn] { - trampolines[fn] = true - fmt.Fprintf(&out, "TEXT ·%s_trampoline(SB),NOSPLIT,$0-0\n", fn) - fmt.Fprintf(&out, "\tJMP\t%s(SB)\n", fn) - } - } - err = ioutil.WriteFile(fmt.Sprintf("zsyscall_darwin_%s.s", arch), out.Bytes(), 0644) - if err != nil { - log.Fatalf("can't write zsyscall_darwin_%s.s: %s", arch, err) - } -} diff --git a/vendor/golang.org/x/sys/unix/mkerrors.sh b/vendor/golang.org/x/sys/unix/mkerrors.sh index 4c91159c12..b3c33c2c32 100644 --- a/vendor/golang.org/x/sys/unix/mkerrors.sh +++ b/vendor/golang.org/x/sys/unix/mkerrors.sh @@ -182,7 +182,6 @@ struct ltchars { #include #include #include -#include #include #include #include @@ -434,7 +433,7 @@ ccflags="$@" $2 ~ /^TC[IO](ON|OFF)$/ || $2 ~ /^IN_/ || $2 ~ /^LOCK_(SH|EX|NB|UN)$/ || - $2 ~ /^(AF|SOCK|SO|SOL|IPPROTO|IP|IPV6|ICMP6|TCP|MCAST|EVFILT|NOTE|EV|SHUT|PROT|MAP|MFD|T?PACKET|MSG|SCM|MCL|DT|MADV|PR)_/ || + $2 ~ /^(AF|SOCK|SO|SOL|IPPROTO|IP|IPV6|ICMP6|TCP|EVFILT|NOTE|EV|SHUT|PROT|MAP|MFD|T?PACKET|MSG|SCM|MCL|DT|MADV|PR)_/ || $2 ~ /^TP_STATUS_/ || $2 ~ /^FALLOC_/ || $2 == "ICMPV6_FILTER" || @@ -467,7 +466,7 @@ ccflags="$@" $2 ~ /^RLIMIT_(AS|CORE|CPU|DATA|FSIZE|LOCKS|MEMLOCK|MSGQUEUE|NICE|NOFILE|NPROC|RSS|RTPRIO|RTTIME|SIGPENDING|STACK)|RLIM_INFINITY/ || $2 ~ /^PRIO_(PROCESS|PGRP|USER)/ || $2 ~ /^CLONE_[A-Z_]+/ || - $2 !~ /^(BPF_TIMEVAL|BPF_FIB_LOOKUP_[A-Z]+)$/ && + $2 !~ /^(BPF_TIMEVAL)$/ && $2 ~ /^(BPF|DLT)_/ || $2 ~ /^(CLOCK|TIMER)_/ || $2 ~ /^CAN_/ || diff --git a/vendor/golang.org/x/sys/unix/mkpost.go b/vendor/golang.org/x/sys/unix/mkpost.go deleted file mode 100644 index 4d5b531b50..0000000000 --- a/vendor/golang.org/x/sys/unix/mkpost.go +++ /dev/null @@ -1,122 +0,0 @@ -// Copyright 2016 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build ignore - -// mkpost processes the output of cgo -godefs to -// modify the generated types. It is used to clean up -// the sys API in an architecture specific manner. -// -// mkpost is run after cgo -godefs; see README.md. -package main - -import ( - "bytes" - "fmt" - "go/format" - "io/ioutil" - "log" - "os" - "regexp" -) - -func main() { - // Get the OS and architecture (using GOARCH_TARGET if it exists) - goos := os.Getenv("GOOS") - goarch := os.Getenv("GOARCH_TARGET") - if goarch == "" { - goarch = os.Getenv("GOARCH") - } - // Check that we are using the Docker-based build system if we should be. - if goos == "linux" { - if os.Getenv("GOLANG_SYS_BUILD") != "docker" { - os.Stderr.WriteString("In the Docker-based build system, mkpost should not be called directly.\n") - os.Stderr.WriteString("See README.md\n") - os.Exit(1) - } - } - - b, err := ioutil.ReadAll(os.Stdin) - if err != nil { - log.Fatal(err) - } - - if goos == "aix" { - // Replace type of Atim, Mtim and Ctim by Timespec in Stat_t - // to avoid having both StTimespec and Timespec. - sttimespec := regexp.MustCompile(`_Ctype_struct_st_timespec`) - b = sttimespec.ReplaceAll(b, []byte("Timespec")) - } - - // Intentionally export __val fields in Fsid and Sigset_t - valRegex := regexp.MustCompile(`type (Fsid|Sigset_t) struct {(\s+)X__val(\s+\S+\s+)}`) - b = valRegex.ReplaceAll(b, []byte("type $1 struct {${2}Val$3}")) - - // Intentionally export __fds_bits field in FdSet - fdSetRegex := regexp.MustCompile(`type (FdSet) struct {(\s+)X__fds_bits(\s+\S+\s+)}`) - b = fdSetRegex.ReplaceAll(b, []byte("type $1 struct {${2}Bits$3}")) - - // If we have empty Ptrace structs, we should delete them. Only s390x emits - // nonempty Ptrace structs. - ptraceRexexp := regexp.MustCompile(`type Ptrace((Psw|Fpregs|Per) struct {\s*})`) - b = ptraceRexexp.ReplaceAll(b, nil) - - // Replace the control_regs union with a blank identifier for now. - controlRegsRegex := regexp.MustCompile(`(Control_regs)\s+\[0\]uint64`) - b = controlRegsRegex.ReplaceAll(b, []byte("_ [0]uint64")) - - // Remove fields that are added by glibc - // Note that this is unstable as the identifers are private. - removeFieldsRegex := regexp.MustCompile(`X__glibc\S*`) - b = removeFieldsRegex.ReplaceAll(b, []byte("_")) - - // Convert [65]int8 to [65]byte in Utsname members to simplify - // conversion to string; see golang.org/issue/20753 - convertUtsnameRegex := regexp.MustCompile(`((Sys|Node|Domain)name|Release|Version|Machine)(\s+)\[(\d+)\]u?int8`) - b = convertUtsnameRegex.ReplaceAll(b, []byte("$1$3[$4]byte")) - - // Convert [1024]int8 to [1024]byte in Ptmget members - convertPtmget := regexp.MustCompile(`([SC]n)(\s+)\[(\d+)\]u?int8`) - b = convertPtmget.ReplaceAll(b, []byte("$1[$3]byte")) - - // Remove spare fields (e.g. in Statx_t) - spareFieldsRegex := regexp.MustCompile(`X__spare\S*`) - b = spareFieldsRegex.ReplaceAll(b, []byte("_")) - - // Remove cgo padding fields - removePaddingFieldsRegex := regexp.MustCompile(`Pad_cgo_\d+`) - b = removePaddingFieldsRegex.ReplaceAll(b, []byte("_")) - - // Remove padding, hidden, or unused fields - removeFieldsRegex = regexp.MustCompile(`\b(X_\S+|Padding)`) - b = removeFieldsRegex.ReplaceAll(b, []byte("_")) - - // Remove the first line of warning from cgo - b = b[bytes.IndexByte(b, '\n')+1:] - // Modify the command in the header to include: - // mkpost, our own warning, and a build tag. - replacement := fmt.Sprintf(`$1 | go run mkpost.go -// Code generated by the command above; see README.md. DO NOT EDIT. - -// +build %s,%s`, goarch, goos) - cgoCommandRegex := regexp.MustCompile(`(cgo -godefs .*)`) - b = cgoCommandRegex.ReplaceAll(b, []byte(replacement)) - - // Rename Stat_t time fields - if goos == "freebsd" && goarch == "386" { - // Hide Stat_t.[AMCB]tim_ext fields - renameStatTimeExtFieldsRegex := regexp.MustCompile(`[AMCB]tim_ext`) - b = renameStatTimeExtFieldsRegex.ReplaceAll(b, []byte("_")) - } - renameStatTimeFieldsRegex := regexp.MustCompile(`([AMCB])(?:irth)?time?(?:spec)?\s+(Timespec|StTimespec)`) - b = renameStatTimeFieldsRegex.ReplaceAll(b, []byte("${1}tim ${2}")) - - // gofmt - b, err = format.Source(b) - if err != nil { - log.Fatal(err) - } - - os.Stdout.Write(b) -} diff --git a/vendor/golang.org/x/sys/unix/mksyscall.go b/vendor/golang.org/x/sys/unix/mksyscall.go deleted file mode 100644 index e4af9424e9..0000000000 --- a/vendor/golang.org/x/sys/unix/mksyscall.go +++ /dev/null @@ -1,407 +0,0 @@ -// Copyright 2018 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build ignore - -/* -This program reads a file containing function prototypes -(like syscall_darwin.go) and generates system call bodies. -The prototypes are marked by lines beginning with "//sys" -and read like func declarations if //sys is replaced by func, but: - * The parameter lists must give a name for each argument. - This includes return parameters. - * The parameter lists must give a type for each argument: - the (x, y, z int) shorthand is not allowed. - * If the return parameter is an error number, it must be named errno. - -A line beginning with //sysnb is like //sys, except that the -goroutine will not be suspended during the execution of the system -call. This must only be used for system calls which can never -block, as otherwise the system call could cause all goroutines to -hang. -*/ -package main - -import ( - "bufio" - "flag" - "fmt" - "os" - "regexp" - "strings" -) - -var ( - b32 = flag.Bool("b32", false, "32bit big-endian") - l32 = flag.Bool("l32", false, "32bit little-endian") - plan9 = flag.Bool("plan9", false, "plan9") - openbsd = flag.Bool("openbsd", false, "openbsd") - netbsd = flag.Bool("netbsd", false, "netbsd") - dragonfly = flag.Bool("dragonfly", false, "dragonfly") - arm = flag.Bool("arm", false, "arm") // 64-bit value should use (even, odd)-pair - tags = flag.String("tags", "", "build tags") - filename = flag.String("output", "", "output file name (standard output if omitted)") -) - -// cmdLine returns this programs's commandline arguments -func cmdLine() string { - return "go run mksyscall.go " + strings.Join(os.Args[1:], " ") -} - -// buildTags returns build tags -func buildTags() string { - return *tags -} - -// Param is function parameter -type Param struct { - Name string - Type string -} - -// usage prints the program usage -func usage() { - fmt.Fprintf(os.Stderr, "usage: go run mksyscall.go [-b32 | -l32] [-tags x,y] [file ...]\n") - os.Exit(1) -} - -// parseParamList parses parameter list and returns a slice of parameters -func parseParamList(list string) []string { - list = strings.TrimSpace(list) - if list == "" { - return []string{} - } - return regexp.MustCompile(`\s*,\s*`).Split(list, -1) -} - -// parseParam splits a parameter into name and type -func parseParam(p string) Param { - ps := regexp.MustCompile(`^(\S*) (\S*)$`).FindStringSubmatch(p) - if ps == nil { - fmt.Fprintf(os.Stderr, "malformed parameter: %s\n", p) - os.Exit(1) - } - return Param{ps[1], ps[2]} -} - -func main() { - // Get the OS and architecture (using GOARCH_TARGET if it exists) - goos := os.Getenv("GOOS") - if goos == "" { - fmt.Fprintln(os.Stderr, "GOOS not defined in environment") - os.Exit(1) - } - goarch := os.Getenv("GOARCH_TARGET") - if goarch == "" { - goarch = os.Getenv("GOARCH") - } - - // Check that we are using the Docker-based build system if we should - if goos == "linux" { - if os.Getenv("GOLANG_SYS_BUILD") != "docker" { - fmt.Fprintf(os.Stderr, "In the Docker-based build system, mksyscall should not be called directly.\n") - fmt.Fprintf(os.Stderr, "See README.md\n") - os.Exit(1) - } - } - - flag.Usage = usage - flag.Parse() - if len(flag.Args()) <= 0 { - fmt.Fprintf(os.Stderr, "no files to parse provided\n") - usage() - } - - endianness := "" - if *b32 { - endianness = "big-endian" - } else if *l32 { - endianness = "little-endian" - } - - libc := false - if goos == "darwin" && strings.Contains(buildTags(), ",go1.12") { - libc = true - } - trampolines := map[string]bool{} - - text := "" - for _, path := range flag.Args() { - file, err := os.Open(path) - if err != nil { - fmt.Fprintf(os.Stderr, err.Error()) - os.Exit(1) - } - s := bufio.NewScanner(file) - for s.Scan() { - t := s.Text() - t = strings.TrimSpace(t) - t = regexp.MustCompile(`\s+`).ReplaceAllString(t, ` `) - nonblock := regexp.MustCompile(`^\/\/sysnb `).FindStringSubmatch(t) - if regexp.MustCompile(`^\/\/sys `).FindStringSubmatch(t) == nil && nonblock == nil { - continue - } - - // Line must be of the form - // func Open(path string, mode int, perm int) (fd int, errno error) - // Split into name, in params, out params. - f := regexp.MustCompile(`^\/\/sys(nb)? (\w+)\(([^()]*)\)\s*(?:\(([^()]+)\))?\s*(?:=\s*((?i)SYS_[A-Z0-9_]+))?$`).FindStringSubmatch(t) - if f == nil { - fmt.Fprintf(os.Stderr, "%s:%s\nmalformed //sys declaration\n", path, t) - os.Exit(1) - } - funct, inps, outps, sysname := f[2], f[3], f[4], f[5] - - // ClockGettime doesn't have a syscall number on Darwin, only generate libc wrappers. - if goos == "darwin" && !libc && funct == "ClockGettime" { - continue - } - - // Split argument lists on comma. - in := parseParamList(inps) - out := parseParamList(outps) - - // Try in vain to keep people from editing this file. - // The theory is that they jump into the middle of the file - // without reading the header. - text += "// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT\n\n" - - // Go function header. - outDecl := "" - if len(out) > 0 { - outDecl = fmt.Sprintf(" (%s)", strings.Join(out, ", ")) - } - text += fmt.Sprintf("func %s(%s)%s {\n", funct, strings.Join(in, ", "), outDecl) - - // Check if err return available - errvar := "" - for _, param := range out { - p := parseParam(param) - if p.Type == "error" { - errvar = p.Name - break - } - } - - // Prepare arguments to Syscall. - var args []string - n := 0 - for _, param := range in { - p := parseParam(param) - if regexp.MustCompile(`^\*`).FindStringSubmatch(p.Type) != nil { - args = append(args, "uintptr(unsafe.Pointer("+p.Name+"))") - } else if p.Type == "string" && errvar != "" { - text += fmt.Sprintf("\tvar _p%d *byte\n", n) - text += fmt.Sprintf("\t_p%d, %s = BytePtrFromString(%s)\n", n, errvar, p.Name) - text += fmt.Sprintf("\tif %s != nil {\n\t\treturn\n\t}\n", errvar) - args = append(args, fmt.Sprintf("uintptr(unsafe.Pointer(_p%d))", n)) - n++ - } else if p.Type == "string" { - fmt.Fprintf(os.Stderr, path+":"+funct+" uses string arguments, but has no error return\n") - text += fmt.Sprintf("\tvar _p%d *byte\n", n) - text += fmt.Sprintf("\t_p%d, _ = BytePtrFromString(%s)\n", n, p.Name) - args = append(args, fmt.Sprintf("uintptr(unsafe.Pointer(_p%d))", n)) - n++ - } else if regexp.MustCompile(`^\[\](.*)`).FindStringSubmatch(p.Type) != nil { - // Convert slice into pointer, length. - // Have to be careful not to take address of &a[0] if len == 0: - // pass dummy pointer in that case. - // Used to pass nil, but some OSes or simulators reject write(fd, nil, 0). - text += fmt.Sprintf("\tvar _p%d unsafe.Pointer\n", n) - text += fmt.Sprintf("\tif len(%s) > 0 {\n\t\t_p%d = unsafe.Pointer(&%s[0])\n\t}", p.Name, n, p.Name) - text += fmt.Sprintf(" else {\n\t\t_p%d = unsafe.Pointer(&_zero)\n\t}\n", n) - args = append(args, fmt.Sprintf("uintptr(_p%d)", n), fmt.Sprintf("uintptr(len(%s))", p.Name)) - n++ - } else if p.Type == "int64" && (*openbsd || *netbsd) { - args = append(args, "0") - if endianness == "big-endian" { - args = append(args, fmt.Sprintf("uintptr(%s>>32)", p.Name), fmt.Sprintf("uintptr(%s)", p.Name)) - } else if endianness == "little-endian" { - args = append(args, fmt.Sprintf("uintptr(%s)", p.Name), fmt.Sprintf("uintptr(%s>>32)", p.Name)) - } else { - args = append(args, fmt.Sprintf("uintptr(%s)", p.Name)) - } - } else if p.Type == "int64" && *dragonfly { - if regexp.MustCompile(`^(?i)extp(read|write)`).FindStringSubmatch(funct) == nil { - args = append(args, "0") - } - if endianness == "big-endian" { - args = append(args, fmt.Sprintf("uintptr(%s>>32)", p.Name), fmt.Sprintf("uintptr(%s)", p.Name)) - } else if endianness == "little-endian" { - args = append(args, fmt.Sprintf("uintptr(%s)", p.Name), fmt.Sprintf("uintptr(%s>>32)", p.Name)) - } else { - args = append(args, fmt.Sprintf("uintptr(%s)", p.Name)) - } - } else if (p.Type == "int64" || p.Type == "uint64") && endianness != "" { - if len(args)%2 == 1 && *arm { - // arm abi specifies 64-bit argument uses - // (even, odd) pair - args = append(args, "0") - } - if endianness == "big-endian" { - args = append(args, fmt.Sprintf("uintptr(%s>>32)", p.Name), fmt.Sprintf("uintptr(%s)", p.Name)) - } else { - args = append(args, fmt.Sprintf("uintptr(%s)", p.Name), fmt.Sprintf("uintptr(%s>>32)", p.Name)) - } - } else { - args = append(args, fmt.Sprintf("uintptr(%s)", p.Name)) - } - } - - // Determine which form to use; pad args with zeros. - asm := "Syscall" - if nonblock != nil { - if errvar == "" && goos == "linux" { - asm = "RawSyscallNoError" - } else { - asm = "RawSyscall" - } - } else { - if errvar == "" && goos == "linux" { - asm = "SyscallNoError" - } - } - if len(args) <= 3 { - for len(args) < 3 { - args = append(args, "0") - } - } else if len(args) <= 6 { - asm += "6" - for len(args) < 6 { - args = append(args, "0") - } - } else if len(args) <= 9 { - asm += "9" - for len(args) < 9 { - args = append(args, "0") - } - } else { - fmt.Fprintf(os.Stderr, "%s:%s too many arguments to system call\n", path, funct) - } - - // System call number. - if sysname == "" { - sysname = "SYS_" + funct - sysname = regexp.MustCompile(`([a-z])([A-Z])`).ReplaceAllString(sysname, `${1}_$2`) - sysname = strings.ToUpper(sysname) - } - - var libcFn string - if libc { - asm = "syscall_" + strings.ToLower(asm[:1]) + asm[1:] // internal syscall call - sysname = strings.TrimPrefix(sysname, "SYS_") // remove SYS_ - sysname = strings.ToLower(sysname) // lowercase - if sysname == "getdirentries64" { - // Special case - libSystem name and - // raw syscall name don't match. - sysname = "__getdirentries64" - } - libcFn = sysname - sysname = "funcPC(libc_" + sysname + "_trampoline)" - } - - // Actual call. - arglist := strings.Join(args, ", ") - call := fmt.Sprintf("%s(%s, %s)", asm, sysname, arglist) - - // Assign return values. - body := "" - ret := []string{"_", "_", "_"} - doErrno := false - for i := 0; i < len(out); i++ { - p := parseParam(out[i]) - reg := "" - if p.Name == "err" && !*plan9 { - reg = "e1" - ret[2] = reg - doErrno = true - } else if p.Name == "err" && *plan9 { - ret[0] = "r0" - ret[2] = "e1" - break - } else { - reg = fmt.Sprintf("r%d", i) - ret[i] = reg - } - if p.Type == "bool" { - reg = fmt.Sprintf("%s != 0", reg) - } - if p.Type == "int64" && endianness != "" { - // 64-bit number in r1:r0 or r0:r1. - if i+2 > len(out) { - fmt.Fprintf(os.Stderr, "%s:%s not enough registers for int64 return\n", path, funct) - } - if endianness == "big-endian" { - reg = fmt.Sprintf("int64(r%d)<<32 | int64(r%d)", i, i+1) - } else { - reg = fmt.Sprintf("int64(r%d)<<32 | int64(r%d)", i+1, i) - } - ret[i] = fmt.Sprintf("r%d", i) - ret[i+1] = fmt.Sprintf("r%d", i+1) - } - if reg != "e1" || *plan9 { - body += fmt.Sprintf("\t%s = %s(%s)\n", p.Name, p.Type, reg) - } - } - if ret[0] == "_" && ret[1] == "_" && ret[2] == "_" { - text += fmt.Sprintf("\t%s\n", call) - } else { - if errvar == "" && goos == "linux" { - // raw syscall without error on Linux, see golang.org/issue/22924 - text += fmt.Sprintf("\t%s, %s := %s\n", ret[0], ret[1], call) - } else { - text += fmt.Sprintf("\t%s, %s, %s := %s\n", ret[0], ret[1], ret[2], call) - } - } - text += body - - if *plan9 && ret[2] == "e1" { - text += "\tif int32(r0) == -1 {\n" - text += "\t\terr = e1\n" - text += "\t}\n" - } else if doErrno { - text += "\tif e1 != 0 {\n" - text += "\t\terr = errnoErr(e1)\n" - text += "\t}\n" - } - text += "\treturn\n" - text += "}\n\n" - - if libc && !trampolines[libcFn] { - // some system calls share a trampoline, like read and readlen. - trampolines[libcFn] = true - // Declare assembly trampoline. - text += fmt.Sprintf("func libc_%s_trampoline()\n", libcFn) - // Assembly trampoline calls the libc_* function, which this magic - // redirects to use the function from libSystem. - text += fmt.Sprintf("//go:linkname libc_%s libc_%s\n", libcFn, libcFn) - text += fmt.Sprintf("//go:cgo_import_dynamic libc_%s %s \"/usr/lib/libSystem.B.dylib\"\n", libcFn, libcFn) - text += "\n" - } - } - if err := s.Err(); err != nil { - fmt.Fprintf(os.Stderr, err.Error()) - os.Exit(1) - } - file.Close() - } - fmt.Printf(srcTemplate, cmdLine(), buildTags(), text) -} - -const srcTemplate = `// %s -// Code generated by the command above; see README.md. DO NOT EDIT. - -// +build %s - -package unix - -import ( - "syscall" - "unsafe" -) - -var _ syscall.Errno - -%s -` diff --git a/vendor/golang.org/x/sys/unix/mksyscall_aix_ppc.go b/vendor/golang.org/x/sys/unix/mksyscall_aix_ppc.go deleted file mode 100644 index 3be3cdfc3b..0000000000 --- a/vendor/golang.org/x/sys/unix/mksyscall_aix_ppc.go +++ /dev/null @@ -1,415 +0,0 @@ -// Copyright 2019 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build ignore - -/* -This program reads a file containing function prototypes -(like syscall_aix.go) and generates system call bodies. -The prototypes are marked by lines beginning with "//sys" -and read like func declarations if //sys is replaced by func, but: - * The parameter lists must give a name for each argument. - This includes return parameters. - * The parameter lists must give a type for each argument: - the (x, y, z int) shorthand is not allowed. - * If the return parameter is an error number, it must be named err. - * If go func name needs to be different than its libc name, - * or the function is not in libc, name could be specified - * at the end, after "=" sign, like - //sys getsockopt(s int, level int, name int, val uintptr, vallen *_Socklen) (err error) = libsocket.getsockopt -*/ -package main - -import ( - "bufio" - "flag" - "fmt" - "os" - "regexp" - "strings" -) - -var ( - b32 = flag.Bool("b32", false, "32bit big-endian") - l32 = flag.Bool("l32", false, "32bit little-endian") - aix = flag.Bool("aix", false, "aix") - tags = flag.String("tags", "", "build tags") -) - -// cmdLine returns this programs's commandline arguments -func cmdLine() string { - return "go run mksyscall_aix_ppc.go " + strings.Join(os.Args[1:], " ") -} - -// buildTags returns build tags -func buildTags() string { - return *tags -} - -// Param is function parameter -type Param struct { - Name string - Type string -} - -// usage prints the program usage -func usage() { - fmt.Fprintf(os.Stderr, "usage: go run mksyscall_aix_ppc.go [-b32 | -l32] [-tags x,y] [file ...]\n") - os.Exit(1) -} - -// parseParamList parses parameter list and returns a slice of parameters -func parseParamList(list string) []string { - list = strings.TrimSpace(list) - if list == "" { - return []string{} - } - return regexp.MustCompile(`\s*,\s*`).Split(list, -1) -} - -// parseParam splits a parameter into name and type -func parseParam(p string) Param { - ps := regexp.MustCompile(`^(\S*) (\S*)$`).FindStringSubmatch(p) - if ps == nil { - fmt.Fprintf(os.Stderr, "malformed parameter: %s\n", p) - os.Exit(1) - } - return Param{ps[1], ps[2]} -} - -func main() { - flag.Usage = usage - flag.Parse() - if len(flag.Args()) <= 0 { - fmt.Fprintf(os.Stderr, "no files to parse provided\n") - usage() - } - - endianness := "" - if *b32 { - endianness = "big-endian" - } else if *l32 { - endianness = "little-endian" - } - - pack := "" - text := "" - cExtern := "/*\n#include \n#include \n" - for _, path := range flag.Args() { - file, err := os.Open(path) - if err != nil { - fmt.Fprintf(os.Stderr, err.Error()) - os.Exit(1) - } - s := bufio.NewScanner(file) - for s.Scan() { - t := s.Text() - t = strings.TrimSpace(t) - t = regexp.MustCompile(`\s+`).ReplaceAllString(t, ` `) - if p := regexp.MustCompile(`^package (\S+)$`).FindStringSubmatch(t); p != nil && pack == "" { - pack = p[1] - } - nonblock := regexp.MustCompile(`^\/\/sysnb `).FindStringSubmatch(t) - if regexp.MustCompile(`^\/\/sys `).FindStringSubmatch(t) == nil && nonblock == nil { - continue - } - - // Line must be of the form - // func Open(path string, mode int, perm int) (fd int, err error) - // Split into name, in params, out params. - f := regexp.MustCompile(`^\/\/sys(nb)? (\w+)\(([^()]*)\)\s*(?:\(([^()]+)\))?\s*(?:=\s*(?:(\w*)\.)?(\w*))?$`).FindStringSubmatch(t) - if f == nil { - fmt.Fprintf(os.Stderr, "%s:%s\nmalformed //sys declaration\n", path, t) - os.Exit(1) - } - funct, inps, outps, modname, sysname := f[2], f[3], f[4], f[5], f[6] - - // Split argument lists on comma. - in := parseParamList(inps) - out := parseParamList(outps) - - inps = strings.Join(in, ", ") - outps = strings.Join(out, ", ") - - // Try in vain to keep people from editing this file. - // The theory is that they jump into the middle of the file - // without reading the header. - text += "// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT\n\n" - - // Check if value return, err return available - errvar := "" - retvar := "" - rettype := "" - for _, param := range out { - p := parseParam(param) - if p.Type == "error" { - errvar = p.Name - } else { - retvar = p.Name - rettype = p.Type - } - } - - // System call name. - if sysname == "" { - sysname = funct - } - sysname = regexp.MustCompile(`([a-z])([A-Z])`).ReplaceAllString(sysname, `${1}_$2`) - sysname = strings.ToLower(sysname) // All libc functions are lowercase. - - cRettype := "" - if rettype == "unsafe.Pointer" { - cRettype = "uintptr_t" - } else if rettype == "uintptr" { - cRettype = "uintptr_t" - } else if regexp.MustCompile(`^_`).FindStringSubmatch(rettype) != nil { - cRettype = "uintptr_t" - } else if rettype == "int" { - cRettype = "int" - } else if rettype == "int32" { - cRettype = "int" - } else if rettype == "int64" { - cRettype = "long long" - } else if rettype == "uint32" { - cRettype = "unsigned int" - } else if rettype == "uint64" { - cRettype = "unsigned long long" - } else { - cRettype = "int" - } - if sysname == "exit" { - cRettype = "void" - } - - // Change p.Types to c - var cIn []string - for _, param := range in { - p := parseParam(param) - if regexp.MustCompile(`^\*`).FindStringSubmatch(p.Type) != nil { - cIn = append(cIn, "uintptr_t") - } else if p.Type == "string" { - cIn = append(cIn, "uintptr_t") - } else if regexp.MustCompile(`^\[\](.*)`).FindStringSubmatch(p.Type) != nil { - cIn = append(cIn, "uintptr_t", "size_t") - } else if p.Type == "unsafe.Pointer" { - cIn = append(cIn, "uintptr_t") - } else if p.Type == "uintptr" { - cIn = append(cIn, "uintptr_t") - } else if regexp.MustCompile(`^_`).FindStringSubmatch(p.Type) != nil { - cIn = append(cIn, "uintptr_t") - } else if p.Type == "int" { - cIn = append(cIn, "int") - } else if p.Type == "int32" { - cIn = append(cIn, "int") - } else if p.Type == "int64" { - cIn = append(cIn, "long long") - } else if p.Type == "uint32" { - cIn = append(cIn, "unsigned int") - } else if p.Type == "uint64" { - cIn = append(cIn, "unsigned long long") - } else { - cIn = append(cIn, "int") - } - } - - if funct != "fcntl" && funct != "FcntlInt" && funct != "readlen" && funct != "writelen" { - if sysname == "select" { - // select is a keyword of Go. Its name is - // changed to c_select. - cExtern += "#define c_select select\n" - } - // Imports of system calls from libc - cExtern += fmt.Sprintf("%s %s", cRettype, sysname) - cIn := strings.Join(cIn, ", ") - cExtern += fmt.Sprintf("(%s);\n", cIn) - } - - // So file name. - if *aix { - if modname == "" { - modname = "libc.a/shr_64.o" - } else { - fmt.Fprintf(os.Stderr, "%s: only syscall using libc are available\n", funct) - os.Exit(1) - } - } - - strconvfunc := "C.CString" - - // Go function header. - if outps != "" { - outps = fmt.Sprintf(" (%s)", outps) - } - if text != "" { - text += "\n" - } - - text += fmt.Sprintf("func %s(%s)%s {\n", funct, strings.Join(in, ", "), outps) - - // Prepare arguments to Syscall. - var args []string - n := 0 - argN := 0 - for _, param := range in { - p := parseParam(param) - if regexp.MustCompile(`^\*`).FindStringSubmatch(p.Type) != nil { - args = append(args, "C.uintptr_t(uintptr(unsafe.Pointer("+p.Name+")))") - } else if p.Type == "string" && errvar != "" { - text += fmt.Sprintf("\t_p%d := uintptr(unsafe.Pointer(%s(%s)))\n", n, strconvfunc, p.Name) - args = append(args, fmt.Sprintf("C.uintptr_t(_p%d)", n)) - n++ - } else if p.Type == "string" { - fmt.Fprintf(os.Stderr, path+":"+funct+" uses string arguments, but has no error return\n") - text += fmt.Sprintf("\t_p%d := uintptr(unsafe.Pointer(%s(%s)))\n", n, strconvfunc, p.Name) - args = append(args, fmt.Sprintf("C.uintptr_t(_p%d)", n)) - n++ - } else if m := regexp.MustCompile(`^\[\](.*)`).FindStringSubmatch(p.Type); m != nil { - // Convert slice into pointer, length. - // Have to be careful not to take address of &a[0] if len == 0: - // pass nil in that case. - text += fmt.Sprintf("\tvar _p%d *%s\n", n, m[1]) - text += fmt.Sprintf("\tif len(%s) > 0 {\n\t\t_p%d = &%s[0]\n\t}\n", p.Name, n, p.Name) - args = append(args, fmt.Sprintf("C.uintptr_t(uintptr(unsafe.Pointer(_p%d)))", n)) - n++ - text += fmt.Sprintf("\tvar _p%d int\n", n) - text += fmt.Sprintf("\t_p%d = len(%s)\n", n, p.Name) - args = append(args, fmt.Sprintf("C.size_t(_p%d)", n)) - n++ - } else if p.Type == "int64" && endianness != "" { - if endianness == "big-endian" { - args = append(args, fmt.Sprintf("uintptr(%s>>32)", p.Name), fmt.Sprintf("uintptr(%s)", p.Name)) - } else { - args = append(args, fmt.Sprintf("uintptr(%s)", p.Name), fmt.Sprintf("uintptr(%s>>32)", p.Name)) - } - n++ - } else if p.Type == "bool" { - text += fmt.Sprintf("\tvar _p%d uint32\n", n) - text += fmt.Sprintf("\tif %s {\n\t\t_p%d = 1\n\t} else {\n\t\t_p%d = 0\n\t}\n", p.Name, n, n) - args = append(args, fmt.Sprintf("_p%d", n)) - } else if regexp.MustCompile(`^_`).FindStringSubmatch(p.Type) != nil { - args = append(args, fmt.Sprintf("C.uintptr_t(uintptr(%s))", p.Name)) - } else if p.Type == "unsafe.Pointer" { - args = append(args, fmt.Sprintf("C.uintptr_t(uintptr(%s))", p.Name)) - } else if p.Type == "int" { - if (argN == 2) && ((funct == "readlen") || (funct == "writelen")) { - args = append(args, fmt.Sprintf("C.size_t(%s)", p.Name)) - } else if argN == 0 && funct == "fcntl" { - args = append(args, fmt.Sprintf("C.uintptr_t(%s)", p.Name)) - } else if (argN == 2) && ((funct == "fcntl") || (funct == "FcntlInt")) { - args = append(args, fmt.Sprintf("C.uintptr_t(%s)", p.Name)) - } else { - args = append(args, fmt.Sprintf("C.int(%s)", p.Name)) - } - } else if p.Type == "int32" { - args = append(args, fmt.Sprintf("C.int(%s)", p.Name)) - } else if p.Type == "int64" { - args = append(args, fmt.Sprintf("C.longlong(%s)", p.Name)) - } else if p.Type == "uint32" { - args = append(args, fmt.Sprintf("C.uint(%s)", p.Name)) - } else if p.Type == "uint64" { - args = append(args, fmt.Sprintf("C.ulonglong(%s)", p.Name)) - } else if p.Type == "uintptr" { - args = append(args, fmt.Sprintf("C.uintptr_t(%s)", p.Name)) - } else { - args = append(args, fmt.Sprintf("C.int(%s)", p.Name)) - } - argN++ - } - - // Actual call. - arglist := strings.Join(args, ", ") - call := "" - if sysname == "exit" { - if errvar != "" { - call += "er :=" - } else { - call += "" - } - } else if errvar != "" { - call += "r0,er :=" - } else if retvar != "" { - call += "r0,_ :=" - } else { - call += "" - } - if sysname == "select" { - // select is a keyword of Go. Its name is - // changed to c_select. - call += fmt.Sprintf("C.c_%s(%s)", sysname, arglist) - } else { - call += fmt.Sprintf("C.%s(%s)", sysname, arglist) - } - - // Assign return values. - body := "" - for i := 0; i < len(out); i++ { - p := parseParam(out[i]) - reg := "" - if p.Name == "err" { - reg = "e1" - } else { - reg = "r0" - } - if reg != "e1" { - body += fmt.Sprintf("\t%s = %s(%s)\n", p.Name, p.Type, reg) - } - } - - // verify return - if sysname != "exit" && errvar != "" { - if regexp.MustCompile(`^uintptr`).FindStringSubmatch(cRettype) != nil { - body += "\tif (uintptr(r0) ==^uintptr(0) && er != nil) {\n" - body += fmt.Sprintf("\t\t%s = er\n", errvar) - body += "\t}\n" - } else { - body += "\tif (r0 ==-1 && er != nil) {\n" - body += fmt.Sprintf("\t\t%s = er\n", errvar) - body += "\t}\n" - } - } else if errvar != "" { - body += "\tif (er != nil) {\n" - body += fmt.Sprintf("\t\t%s = er\n", errvar) - body += "\t}\n" - } - - text += fmt.Sprintf("\t%s\n", call) - text += body - - text += "\treturn\n" - text += "}\n" - } - if err := s.Err(); err != nil { - fmt.Fprintf(os.Stderr, err.Error()) - os.Exit(1) - } - file.Close() - } - imp := "" - if pack != "unix" { - imp = "import \"golang.org/x/sys/unix\"\n" - - } - fmt.Printf(srcTemplate, cmdLine(), buildTags(), pack, cExtern, imp, text) -} - -const srcTemplate = `// %s -// Code generated by the command above; see README.md. DO NOT EDIT. - -// +build %s - -package %s - - -%s -*/ -import "C" -import ( - "unsafe" -) - - -%s - -%s -` diff --git a/vendor/golang.org/x/sys/unix/mksyscall_aix_ppc64.go b/vendor/golang.org/x/sys/unix/mksyscall_aix_ppc64.go deleted file mode 100644 index c960099517..0000000000 --- a/vendor/golang.org/x/sys/unix/mksyscall_aix_ppc64.go +++ /dev/null @@ -1,614 +0,0 @@ -// Copyright 2019 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build ignore - -/* -This program reads a file containing function prototypes -(like syscall_aix.go) and generates system call bodies. -The prototypes are marked by lines beginning with "//sys" -and read like func declarations if //sys is replaced by func, but: - * The parameter lists must give a name for each argument. - This includes return parameters. - * The parameter lists must give a type for each argument: - the (x, y, z int) shorthand is not allowed. - * If the return parameter is an error number, it must be named err. - * If go func name needs to be different than its libc name, - * or the function is not in libc, name could be specified - * at the end, after "=" sign, like - //sys getsockopt(s int, level int, name int, val uintptr, vallen *_Socklen) (err error) = libsocket.getsockopt - - -This program will generate three files and handle both gc and gccgo implementation: - - zsyscall_aix_ppc64.go: the common part of each implementation (error handler, pointer creation) - - zsyscall_aix_ppc64_gc.go: gc part with //go_cgo_import_dynamic and a call to syscall6 - - zsyscall_aix_ppc64_gccgo.go: gccgo part with C function and conversion to C type. - - The generated code looks like this - -zsyscall_aix_ppc64.go -func asyscall(...) (n int, err error) { - // Pointer Creation - r1, e1 := callasyscall(...) - // Type Conversion - // Error Handler - return -} - -zsyscall_aix_ppc64_gc.go -//go:cgo_import_dynamic libc_asyscall asyscall "libc.a/shr_64.o" -//go:linkname libc_asyscall libc_asyscall -var asyscall syscallFunc - -func callasyscall(...) (r1 uintptr, e1 Errno) { - r1, _, e1 = syscall6(uintptr(unsafe.Pointer(&libc_asyscall)), "nb_args", ... ) - return -} - -zsyscall_aix_ppc64_ggcgo.go - -// int asyscall(...) - -import "C" - -func callasyscall(...) (r1 uintptr, e1 Errno) { - r1 = uintptr(C.asyscall(...)) - e1 = syscall.GetErrno() - return -} -*/ - -package main - -import ( - "bufio" - "flag" - "fmt" - "io/ioutil" - "os" - "regexp" - "strings" -) - -var ( - b32 = flag.Bool("b32", false, "32bit big-endian") - l32 = flag.Bool("l32", false, "32bit little-endian") - aix = flag.Bool("aix", false, "aix") - tags = flag.String("tags", "", "build tags") -) - -// cmdLine returns this programs's commandline arguments -func cmdLine() string { - return "go run mksyscall_aix_ppc64.go " + strings.Join(os.Args[1:], " ") -} - -// buildTags returns build tags -func buildTags() string { - return *tags -} - -// Param is function parameter -type Param struct { - Name string - Type string -} - -// usage prints the program usage -func usage() { - fmt.Fprintf(os.Stderr, "usage: go run mksyscall_aix_ppc64.go [-b32 | -l32] [-tags x,y] [file ...]\n") - os.Exit(1) -} - -// parseParamList parses parameter list and returns a slice of parameters -func parseParamList(list string) []string { - list = strings.TrimSpace(list) - if list == "" { - return []string{} - } - return regexp.MustCompile(`\s*,\s*`).Split(list, -1) -} - -// parseParam splits a parameter into name and type -func parseParam(p string) Param { - ps := regexp.MustCompile(`^(\S*) (\S*)$`).FindStringSubmatch(p) - if ps == nil { - fmt.Fprintf(os.Stderr, "malformed parameter: %s\n", p) - os.Exit(1) - } - return Param{ps[1], ps[2]} -} - -func main() { - flag.Usage = usage - flag.Parse() - if len(flag.Args()) <= 0 { - fmt.Fprintf(os.Stderr, "no files to parse provided\n") - usage() - } - - endianness := "" - if *b32 { - endianness = "big-endian" - } else if *l32 { - endianness = "little-endian" - } - - pack := "" - // GCCGO - textgccgo := "" - cExtern := "/*\n#include \n" - // GC - textgc := "" - dynimports := "" - linknames := "" - var vars []string - // COMMON - textcommon := "" - for _, path := range flag.Args() { - file, err := os.Open(path) - if err != nil { - fmt.Fprintf(os.Stderr, err.Error()) - os.Exit(1) - } - s := bufio.NewScanner(file) - for s.Scan() { - t := s.Text() - t = strings.TrimSpace(t) - t = regexp.MustCompile(`\s+`).ReplaceAllString(t, ` `) - if p := regexp.MustCompile(`^package (\S+)$`).FindStringSubmatch(t); p != nil && pack == "" { - pack = p[1] - } - nonblock := regexp.MustCompile(`^\/\/sysnb `).FindStringSubmatch(t) - if regexp.MustCompile(`^\/\/sys `).FindStringSubmatch(t) == nil && nonblock == nil { - continue - } - - // Line must be of the form - // func Open(path string, mode int, perm int) (fd int, err error) - // Split into name, in params, out params. - f := regexp.MustCompile(`^\/\/sys(nb)? (\w+)\(([^()]*)\)\s*(?:\(([^()]+)\))?\s*(?:=\s*(?:(\w*)\.)?(\w*))?$`).FindStringSubmatch(t) - if f == nil { - fmt.Fprintf(os.Stderr, "%s:%s\nmalformed //sys declaration\n", path, t) - os.Exit(1) - } - funct, inps, outps, modname, sysname := f[2], f[3], f[4], f[5], f[6] - - // Split argument lists on comma. - in := parseParamList(inps) - out := parseParamList(outps) - - inps = strings.Join(in, ", ") - outps = strings.Join(out, ", ") - - if sysname == "" { - sysname = funct - } - - onlyCommon := false - if funct == "readlen" || funct == "writelen" || funct == "FcntlInt" || funct == "FcntlFlock" { - // This function call another syscall which is already implemented. - // Therefore, the gc and gccgo part must not be generated. - onlyCommon = true - } - - // Try in vain to keep people from editing this file. - // The theory is that they jump into the middle of the file - // without reading the header. - - textcommon += "// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT\n\n" - if !onlyCommon { - textgccgo += "// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT\n\n" - textgc += "// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT\n\n" - } - - // Check if value return, err return available - errvar := "" - rettype := "" - for _, param := range out { - p := parseParam(param) - if p.Type == "error" { - errvar = p.Name - } else { - rettype = p.Type - } - } - - sysname = regexp.MustCompile(`([a-z])([A-Z])`).ReplaceAllString(sysname, `${1}_$2`) - sysname = strings.ToLower(sysname) // All libc functions are lowercase. - - // GCCGO Prototype return type - cRettype := "" - if rettype == "unsafe.Pointer" { - cRettype = "uintptr_t" - } else if rettype == "uintptr" { - cRettype = "uintptr_t" - } else if regexp.MustCompile(`^_`).FindStringSubmatch(rettype) != nil { - cRettype = "uintptr_t" - } else if rettype == "int" { - cRettype = "int" - } else if rettype == "int32" { - cRettype = "int" - } else if rettype == "int64" { - cRettype = "long long" - } else if rettype == "uint32" { - cRettype = "unsigned int" - } else if rettype == "uint64" { - cRettype = "unsigned long long" - } else { - cRettype = "int" - } - if sysname == "exit" { - cRettype = "void" - } - - // GCCGO Prototype arguments type - var cIn []string - for i, param := range in { - p := parseParam(param) - if regexp.MustCompile(`^\*`).FindStringSubmatch(p.Type) != nil { - cIn = append(cIn, "uintptr_t") - } else if p.Type == "string" { - cIn = append(cIn, "uintptr_t") - } else if regexp.MustCompile(`^\[\](.*)`).FindStringSubmatch(p.Type) != nil { - cIn = append(cIn, "uintptr_t", "size_t") - } else if p.Type == "unsafe.Pointer" { - cIn = append(cIn, "uintptr_t") - } else if p.Type == "uintptr" { - cIn = append(cIn, "uintptr_t") - } else if regexp.MustCompile(`^_`).FindStringSubmatch(p.Type) != nil { - cIn = append(cIn, "uintptr_t") - } else if p.Type == "int" { - if (i == 0 || i == 2) && funct == "fcntl" { - // These fcntl arguments needs to be uintptr to be able to call FcntlInt and FcntlFlock - cIn = append(cIn, "uintptr_t") - } else { - cIn = append(cIn, "int") - } - - } else if p.Type == "int32" { - cIn = append(cIn, "int") - } else if p.Type == "int64" { - cIn = append(cIn, "long long") - } else if p.Type == "uint32" { - cIn = append(cIn, "unsigned int") - } else if p.Type == "uint64" { - cIn = append(cIn, "unsigned long long") - } else { - cIn = append(cIn, "int") - } - } - - if !onlyCommon { - // GCCGO Prototype Generation - // Imports of system calls from libc - if sysname == "select" { - // select is a keyword of Go. Its name is - // changed to c_select. - cExtern += "#define c_select select\n" - } - cExtern += fmt.Sprintf("%s %s", cRettype, sysname) - cIn := strings.Join(cIn, ", ") - cExtern += fmt.Sprintf("(%s);\n", cIn) - } - // GC Library name - if modname == "" { - modname = "libc.a/shr_64.o" - } else { - fmt.Fprintf(os.Stderr, "%s: only syscall using libc are available\n", funct) - os.Exit(1) - } - sysvarname := fmt.Sprintf("libc_%s", sysname) - - if !onlyCommon { - // GC Runtime import of function to allow cross-platform builds. - dynimports += fmt.Sprintf("//go:cgo_import_dynamic %s %s \"%s\"\n", sysvarname, sysname, modname) - // GC Link symbol to proc address variable. - linknames += fmt.Sprintf("//go:linkname %s %s\n", sysvarname, sysvarname) - // GC Library proc address variable. - vars = append(vars, sysvarname) - } - - strconvfunc := "BytePtrFromString" - strconvtype := "*byte" - - // Go function header. - if outps != "" { - outps = fmt.Sprintf(" (%s)", outps) - } - if textcommon != "" { - textcommon += "\n" - } - - textcommon += fmt.Sprintf("func %s(%s)%s {\n", funct, strings.Join(in, ", "), outps) - - // Prepare arguments tocall. - var argscommon []string // Arguments in the common part - var argscall []string // Arguments for call prototype - var argsgc []string // Arguments for gc call (with syscall6) - var argsgccgo []string // Arguments for gccgo call (with C.name_of_syscall) - n := 0 - argN := 0 - for _, param := range in { - p := parseParam(param) - if regexp.MustCompile(`^\*`).FindStringSubmatch(p.Type) != nil { - argscommon = append(argscommon, fmt.Sprintf("uintptr(unsafe.Pointer(%s))", p.Name)) - argscall = append(argscall, fmt.Sprintf("%s uintptr", p.Name)) - argsgc = append(argsgc, p.Name) - argsgccgo = append(argsgccgo, fmt.Sprintf("C.uintptr_t(%s)", p.Name)) - } else if p.Type == "string" && errvar != "" { - textcommon += fmt.Sprintf("\tvar _p%d %s\n", n, strconvtype) - textcommon += fmt.Sprintf("\t_p%d, %s = %s(%s)\n", n, errvar, strconvfunc, p.Name) - textcommon += fmt.Sprintf("\tif %s != nil {\n\t\treturn\n\t}\n", errvar) - - argscommon = append(argscommon, fmt.Sprintf("uintptr(unsafe.Pointer(_p%d))", n)) - argscall = append(argscall, fmt.Sprintf("_p%d uintptr ", n)) - argsgc = append(argsgc, fmt.Sprintf("_p%d", n)) - argsgccgo = append(argsgccgo, fmt.Sprintf("C.uintptr_t(_p%d)", n)) - n++ - } else if p.Type == "string" { - fmt.Fprintf(os.Stderr, path+":"+funct+" uses string arguments, but has no error return\n") - textcommon += fmt.Sprintf("\tvar _p%d %s\n", n, strconvtype) - textcommon += fmt.Sprintf("\t_p%d, %s = %s(%s)\n", n, errvar, strconvfunc, p.Name) - textcommon += fmt.Sprintf("\tif %s != nil {\n\t\treturn\n\t}\n", errvar) - - argscommon = append(argscommon, fmt.Sprintf("uintptr(unsafe.Pointer(_p%d))", n)) - argscall = append(argscall, fmt.Sprintf("_p%d uintptr", n)) - argsgc = append(argsgc, fmt.Sprintf("_p%d", n)) - argsgccgo = append(argsgccgo, fmt.Sprintf("C.uintptr_t(_p%d)", n)) - n++ - } else if m := regexp.MustCompile(`^\[\](.*)`).FindStringSubmatch(p.Type); m != nil { - // Convert slice into pointer, length. - // Have to be careful not to take address of &a[0] if len == 0: - // pass nil in that case. - textcommon += fmt.Sprintf("\tvar _p%d *%s\n", n, m[1]) - textcommon += fmt.Sprintf("\tif len(%s) > 0 {\n\t\t_p%d = &%s[0]\n\t}\n", p.Name, n, p.Name) - argscommon = append(argscommon, fmt.Sprintf("uintptr(unsafe.Pointer(_p%d))", n), fmt.Sprintf("len(%s)", p.Name)) - argscall = append(argscall, fmt.Sprintf("_p%d uintptr", n), fmt.Sprintf("_lenp%d int", n)) - argsgc = append(argsgc, fmt.Sprintf("_p%d", n), fmt.Sprintf("uintptr(_lenp%d)", n)) - argsgccgo = append(argsgccgo, fmt.Sprintf("C.uintptr_t(_p%d)", n), fmt.Sprintf("C.size_t(_lenp%d)", n)) - n++ - } else if p.Type == "int64" && endianness != "" { - fmt.Fprintf(os.Stderr, path+":"+funct+" uses int64 with 32 bits mode. Case not yet implemented\n") - } else if p.Type == "bool" { - fmt.Fprintf(os.Stderr, path+":"+funct+" uses bool. Case not yet implemented\n") - } else if regexp.MustCompile(`^_`).FindStringSubmatch(p.Type) != nil || p.Type == "unsafe.Pointer" { - argscommon = append(argscommon, fmt.Sprintf("uintptr(%s)", p.Name)) - argscall = append(argscall, fmt.Sprintf("%s uintptr", p.Name)) - argsgc = append(argsgc, p.Name) - argsgccgo = append(argsgccgo, fmt.Sprintf("C.uintptr_t(%s)", p.Name)) - } else if p.Type == "int" { - if (argN == 0 || argN == 2) && ((funct == "fcntl") || (funct == "FcntlInt") || (funct == "FcntlFlock")) { - // These fcntl arguments need to be uintptr to be able to call FcntlInt and FcntlFlock - argscommon = append(argscommon, fmt.Sprintf("uintptr(%s)", p.Name)) - argscall = append(argscall, fmt.Sprintf("%s uintptr", p.Name)) - argsgc = append(argsgc, p.Name) - argsgccgo = append(argsgccgo, fmt.Sprintf("C.uintptr_t(%s)", p.Name)) - - } else { - argscommon = append(argscommon, p.Name) - argscall = append(argscall, fmt.Sprintf("%s int", p.Name)) - argsgc = append(argsgc, fmt.Sprintf("uintptr(%s)", p.Name)) - argsgccgo = append(argsgccgo, fmt.Sprintf("C.int(%s)", p.Name)) - } - } else if p.Type == "int32" { - argscommon = append(argscommon, p.Name) - argscall = append(argscall, fmt.Sprintf("%s int32", p.Name)) - argsgc = append(argsgc, fmt.Sprintf("uintptr(%s)", p.Name)) - argsgccgo = append(argsgccgo, fmt.Sprintf("C.int(%s)", p.Name)) - } else if p.Type == "int64" { - argscommon = append(argscommon, p.Name) - argscall = append(argscall, fmt.Sprintf("%s int64", p.Name)) - argsgc = append(argsgc, fmt.Sprintf("uintptr(%s)", p.Name)) - argsgccgo = append(argsgccgo, fmt.Sprintf("C.longlong(%s)", p.Name)) - } else if p.Type == "uint32" { - argscommon = append(argscommon, p.Name) - argscall = append(argscall, fmt.Sprintf("%s uint32", p.Name)) - argsgc = append(argsgc, fmt.Sprintf("uintptr(%s)", p.Name)) - argsgccgo = append(argsgccgo, fmt.Sprintf("C.uint(%s)", p.Name)) - } else if p.Type == "uint64" { - argscommon = append(argscommon, p.Name) - argscall = append(argscall, fmt.Sprintf("%s uint64", p.Name)) - argsgc = append(argsgc, fmt.Sprintf("uintptr(%s)", p.Name)) - argsgccgo = append(argsgccgo, fmt.Sprintf("C.ulonglong(%s)", p.Name)) - } else if p.Type == "uintptr" { - argscommon = append(argscommon, p.Name) - argscall = append(argscall, fmt.Sprintf("%s uintptr", p.Name)) - argsgc = append(argsgc, p.Name) - argsgccgo = append(argsgccgo, fmt.Sprintf("C.uintptr_t(%s)", p.Name)) - } else { - argscommon = append(argscommon, fmt.Sprintf("int(%s)", p.Name)) - argscall = append(argscall, fmt.Sprintf("%s int", p.Name)) - argsgc = append(argsgc, fmt.Sprintf("uintptr(%s)", p.Name)) - argsgccgo = append(argsgccgo, fmt.Sprintf("C.int(%s)", p.Name)) - } - argN++ - } - nargs := len(argsgc) - - // COMMON function generation - argscommonlist := strings.Join(argscommon, ", ") - callcommon := fmt.Sprintf("call%s(%s)", sysname, argscommonlist) - ret := []string{"_", "_"} - body := "" - doErrno := false - for i := 0; i < len(out); i++ { - p := parseParam(out[i]) - reg := "" - if p.Name == "err" { - reg = "e1" - ret[1] = reg - doErrno = true - } else { - reg = "r0" - ret[0] = reg - } - if p.Type == "bool" { - reg = fmt.Sprintf("%s != 0", reg) - } - if reg != "e1" { - body += fmt.Sprintf("\t%s = %s(%s)\n", p.Name, p.Type, reg) - } - } - if ret[0] == "_" && ret[1] == "_" { - textcommon += fmt.Sprintf("\t%s\n", callcommon) - } else { - textcommon += fmt.Sprintf("\t%s, %s := %s\n", ret[0], ret[1], callcommon) - } - textcommon += body - - if doErrno { - textcommon += "\tif e1 != 0 {\n" - textcommon += "\t\terr = errnoErr(e1)\n" - textcommon += "\t}\n" - } - textcommon += "\treturn\n" - textcommon += "}\n" - - if onlyCommon { - continue - } - - // CALL Prototype - callProto := fmt.Sprintf("func call%s(%s) (r1 uintptr, e1 Errno) {\n", sysname, strings.Join(argscall, ", ")) - - // GC function generation - asm := "syscall6" - if nonblock != nil { - asm = "rawSyscall6" - } - - if len(argsgc) <= 6 { - for len(argsgc) < 6 { - argsgc = append(argsgc, "0") - } - } else { - fmt.Fprintf(os.Stderr, "%s: too many arguments to system call", funct) - os.Exit(1) - } - argsgclist := strings.Join(argsgc, ", ") - callgc := fmt.Sprintf("%s(uintptr(unsafe.Pointer(&%s)), %d, %s)", asm, sysvarname, nargs, argsgclist) - - textgc += callProto - textgc += fmt.Sprintf("\tr1, _, e1 = %s\n", callgc) - textgc += "\treturn\n}\n" - - // GCCGO function generation - argsgccgolist := strings.Join(argsgccgo, ", ") - var callgccgo string - if sysname == "select" { - // select is a keyword of Go. Its name is - // changed to c_select. - callgccgo = fmt.Sprintf("C.c_%s(%s)", sysname, argsgccgolist) - } else { - callgccgo = fmt.Sprintf("C.%s(%s)", sysname, argsgccgolist) - } - textgccgo += callProto - textgccgo += fmt.Sprintf("\tr1 = uintptr(%s)\n", callgccgo) - textgccgo += "\te1 = syscall.GetErrno()\n" - textgccgo += "\treturn\n}\n" - } - if err := s.Err(); err != nil { - fmt.Fprintf(os.Stderr, err.Error()) - os.Exit(1) - } - file.Close() - } - imp := "" - if pack != "unix" { - imp = "import \"golang.org/x/sys/unix\"\n" - - } - - // Print zsyscall_aix_ppc64.go - err := ioutil.WriteFile("zsyscall_aix_ppc64.go", - []byte(fmt.Sprintf(srcTemplate1, cmdLine(), buildTags(), pack, imp, textcommon)), - 0644) - if err != nil { - fmt.Fprintf(os.Stderr, err.Error()) - os.Exit(1) - } - - // Print zsyscall_aix_ppc64_gc.go - vardecls := "\t" + strings.Join(vars, ",\n\t") - vardecls += " syscallFunc" - err = ioutil.WriteFile("zsyscall_aix_ppc64_gc.go", - []byte(fmt.Sprintf(srcTemplate2, cmdLine(), buildTags(), pack, imp, dynimports, linknames, vardecls, textgc)), - 0644) - if err != nil { - fmt.Fprintf(os.Stderr, err.Error()) - os.Exit(1) - } - - // Print zsyscall_aix_ppc64_gccgo.go - err = ioutil.WriteFile("zsyscall_aix_ppc64_gccgo.go", - []byte(fmt.Sprintf(srcTemplate3, cmdLine(), buildTags(), pack, cExtern, imp, textgccgo)), - 0644) - if err != nil { - fmt.Fprintf(os.Stderr, err.Error()) - os.Exit(1) - } -} - -const srcTemplate1 = `// %s -// Code generated by the command above; see README.md. DO NOT EDIT. - -// +build %s - -package %s - -import ( - "unsafe" -) - - -%s - -%s -` -const srcTemplate2 = `// %s -// Code generated by the command above; see README.md. DO NOT EDIT. - -// +build %s -// +build !gccgo - -package %s - -import ( - "unsafe" -) -%s -%s -%s -type syscallFunc uintptr - -var ( -%s -) - -// Implemented in runtime/syscall_aix.go. -func rawSyscall6(trap, nargs, a1, a2, a3, a4, a5, a6 uintptr) (r1, r2 uintptr, err Errno) -func syscall6(trap, nargs, a1, a2, a3, a4, a5, a6 uintptr) (r1, r2 uintptr, err Errno) - -%s -` -const srcTemplate3 = `// %s -// Code generated by the command above; see README.md. DO NOT EDIT. - -// +build %s -// +build gccgo - -package %s - -%s -*/ -import "C" -import ( - "syscall" -) - - -%s - -%s -` diff --git a/vendor/golang.org/x/sys/unix/mksyscall_solaris.go b/vendor/golang.org/x/sys/unix/mksyscall_solaris.go deleted file mode 100644 index 3d864738b6..0000000000 --- a/vendor/golang.org/x/sys/unix/mksyscall_solaris.go +++ /dev/null @@ -1,335 +0,0 @@ -// Copyright 2019 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build ignore - -/* - This program reads a file containing function prototypes - (like syscall_solaris.go) and generates system call bodies. - The prototypes are marked by lines beginning with "//sys" - and read like func declarations if //sys is replaced by func, but: - * The parameter lists must give a name for each argument. - This includes return parameters. - * The parameter lists must give a type for each argument: - the (x, y, z int) shorthand is not allowed. - * If the return parameter is an error number, it must be named err. - * If go func name needs to be different than its libc name, - * or the function is not in libc, name could be specified - * at the end, after "=" sign, like - //sys getsockopt(s int, level int, name int, val uintptr, vallen *_Socklen) (err error) = libsocket.getsockopt -*/ - -package main - -import ( - "bufio" - "flag" - "fmt" - "os" - "regexp" - "strings" -) - -var ( - b32 = flag.Bool("b32", false, "32bit big-endian") - l32 = flag.Bool("l32", false, "32bit little-endian") - tags = flag.String("tags", "", "build tags") -) - -// cmdLine returns this programs's commandline arguments -func cmdLine() string { - return "go run mksyscall_solaris.go " + strings.Join(os.Args[1:], " ") -} - -// buildTags returns build tags -func buildTags() string { - return *tags -} - -// Param is function parameter -type Param struct { - Name string - Type string -} - -// usage prints the program usage -func usage() { - fmt.Fprintf(os.Stderr, "usage: go run mksyscall_solaris.go [-b32 | -l32] [-tags x,y] [file ...]\n") - os.Exit(1) -} - -// parseParamList parses parameter list and returns a slice of parameters -func parseParamList(list string) []string { - list = strings.TrimSpace(list) - if list == "" { - return []string{} - } - return regexp.MustCompile(`\s*,\s*`).Split(list, -1) -} - -// parseParam splits a parameter into name and type -func parseParam(p string) Param { - ps := regexp.MustCompile(`^(\S*) (\S*)$`).FindStringSubmatch(p) - if ps == nil { - fmt.Fprintf(os.Stderr, "malformed parameter: %s\n", p) - os.Exit(1) - } - return Param{ps[1], ps[2]} -} - -func main() { - flag.Usage = usage - flag.Parse() - if len(flag.Args()) <= 0 { - fmt.Fprintf(os.Stderr, "no files to parse provided\n") - usage() - } - - endianness := "" - if *b32 { - endianness = "big-endian" - } else if *l32 { - endianness = "little-endian" - } - - pack := "" - text := "" - dynimports := "" - linknames := "" - var vars []string - for _, path := range flag.Args() { - file, err := os.Open(path) - if err != nil { - fmt.Fprintf(os.Stderr, err.Error()) - os.Exit(1) - } - s := bufio.NewScanner(file) - for s.Scan() { - t := s.Text() - t = strings.TrimSpace(t) - t = regexp.MustCompile(`\s+`).ReplaceAllString(t, ` `) - if p := regexp.MustCompile(`^package (\S+)$`).FindStringSubmatch(t); p != nil && pack == "" { - pack = p[1] - } - nonblock := regexp.MustCompile(`^\/\/sysnb `).FindStringSubmatch(t) - if regexp.MustCompile(`^\/\/sys `).FindStringSubmatch(t) == nil && nonblock == nil { - continue - } - - // Line must be of the form - // func Open(path string, mode int, perm int) (fd int, err error) - // Split into name, in params, out params. - f := regexp.MustCompile(`^\/\/sys(nb)? (\w+)\(([^()]*)\)\s*(?:\(([^()]+)\))?\s*(?:=\s*(?:(\w*)\.)?(\w*))?$`).FindStringSubmatch(t) - if f == nil { - fmt.Fprintf(os.Stderr, "%s:%s\nmalformed //sys declaration\n", path, t) - os.Exit(1) - } - funct, inps, outps, modname, sysname := f[2], f[3], f[4], f[5], f[6] - - // Split argument lists on comma. - in := parseParamList(inps) - out := parseParamList(outps) - - inps = strings.Join(in, ", ") - outps = strings.Join(out, ", ") - - // Try in vain to keep people from editing this file. - // The theory is that they jump into the middle of the file - // without reading the header. - text += "// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT\n\n" - - // So file name. - if modname == "" { - modname = "libc" - } - - // System call name. - if sysname == "" { - sysname = funct - } - - // System call pointer variable name. - sysvarname := fmt.Sprintf("proc%s", sysname) - - strconvfunc := "BytePtrFromString" - strconvtype := "*byte" - - sysname = strings.ToLower(sysname) // All libc functions are lowercase. - - // Runtime import of function to allow cross-platform builds. - dynimports += fmt.Sprintf("//go:cgo_import_dynamic libc_%s %s \"%s.so\"\n", sysname, sysname, modname) - // Link symbol to proc address variable. - linknames += fmt.Sprintf("//go:linkname %s libc_%s\n", sysvarname, sysname) - // Library proc address variable. - vars = append(vars, sysvarname) - - // Go function header. - outlist := strings.Join(out, ", ") - if outlist != "" { - outlist = fmt.Sprintf(" (%s)", outlist) - } - if text != "" { - text += "\n" - } - text += fmt.Sprintf("func %s(%s)%s {\n", funct, strings.Join(in, ", "), outlist) - - // Check if err return available - errvar := "" - for _, param := range out { - p := parseParam(param) - if p.Type == "error" { - errvar = p.Name - continue - } - } - - // Prepare arguments to Syscall. - var args []string - n := 0 - for _, param := range in { - p := parseParam(param) - if regexp.MustCompile(`^\*`).FindStringSubmatch(p.Type) != nil { - args = append(args, "uintptr(unsafe.Pointer("+p.Name+"))") - } else if p.Type == "string" && errvar != "" { - text += fmt.Sprintf("\tvar _p%d %s\n", n, strconvtype) - text += fmt.Sprintf("\t_p%d, %s = %s(%s)\n", n, errvar, strconvfunc, p.Name) - text += fmt.Sprintf("\tif %s != nil {\n\t\treturn\n\t}\n", errvar) - args = append(args, fmt.Sprintf("uintptr(unsafe.Pointer(_p%d))", n)) - n++ - } else if p.Type == "string" { - fmt.Fprintf(os.Stderr, path+":"+funct+" uses string arguments, but has no error return\n") - text += fmt.Sprintf("\tvar _p%d %s\n", n, strconvtype) - text += fmt.Sprintf("\t_p%d, _ = %s(%s)\n", n, strconvfunc, p.Name) - args = append(args, fmt.Sprintf("uintptr(unsafe.Pointer(_p%d))", n)) - n++ - } else if s := regexp.MustCompile(`^\[\](.*)`).FindStringSubmatch(p.Type); s != nil { - // Convert slice into pointer, length. - // Have to be careful not to take address of &a[0] if len == 0: - // pass nil in that case. - text += fmt.Sprintf("\tvar _p%d *%s\n", n, s[1]) - text += fmt.Sprintf("\tif len(%s) > 0 {\n\t\t_p%d = &%s[0]\n\t}\n", p.Name, n, p.Name) - args = append(args, fmt.Sprintf("uintptr(unsafe.Pointer(_p%d))", n), fmt.Sprintf("uintptr(len(%s))", p.Name)) - n++ - } else if p.Type == "int64" && endianness != "" { - if endianness == "big-endian" { - args = append(args, fmt.Sprintf("uintptr(%s>>32)", p.Name), fmt.Sprintf("uintptr(%s)", p.Name)) - } else { - args = append(args, fmt.Sprintf("uintptr(%s)", p.Name), fmt.Sprintf("uintptr(%s>>32)", p.Name)) - } - } else if p.Type == "bool" { - text += fmt.Sprintf("\tvar _p%d uint32\n", n) - text += fmt.Sprintf("\tif %s {\n\t\t_p%d = 1\n\t} else {\n\t\t_p%d = 0\n\t}\n", p.Name, n, n) - args = append(args, fmt.Sprintf("uintptr(_p%d)", n)) - n++ - } else { - args = append(args, fmt.Sprintf("uintptr(%s)", p.Name)) - } - } - nargs := len(args) - - // Determine which form to use; pad args with zeros. - asm := "sysvicall6" - if nonblock != nil { - asm = "rawSysvicall6" - } - if len(args) <= 6 { - for len(args) < 6 { - args = append(args, "0") - } - } else { - fmt.Fprintf(os.Stderr, "%s: too many arguments to system call\n", path) - os.Exit(1) - } - - // Actual call. - arglist := strings.Join(args, ", ") - call := fmt.Sprintf("%s(uintptr(unsafe.Pointer(&%s)), %d, %s)", asm, sysvarname, nargs, arglist) - - // Assign return values. - body := "" - ret := []string{"_", "_", "_"} - doErrno := false - for i := 0; i < len(out); i++ { - p := parseParam(out[i]) - reg := "" - if p.Name == "err" { - reg = "e1" - ret[2] = reg - doErrno = true - } else { - reg = fmt.Sprintf("r%d", i) - ret[i] = reg - } - if p.Type == "bool" { - reg = fmt.Sprintf("%d != 0", reg) - } - if p.Type == "int64" && endianness != "" { - // 64-bit number in r1:r0 or r0:r1. - if i+2 > len(out) { - fmt.Fprintf(os.Stderr, "%s: not enough registers for int64 return\n", path) - os.Exit(1) - } - if endianness == "big-endian" { - reg = fmt.Sprintf("int64(r%d)<<32 | int64(r%d)", i, i+1) - } else { - reg = fmt.Sprintf("int64(r%d)<<32 | int64(r%d)", i+1, i) - } - ret[i] = fmt.Sprintf("r%d", i) - ret[i+1] = fmt.Sprintf("r%d", i+1) - } - if reg != "e1" { - body += fmt.Sprintf("\t%s = %s(%s)\n", p.Name, p.Type, reg) - } - } - if ret[0] == "_" && ret[1] == "_" && ret[2] == "_" { - text += fmt.Sprintf("\t%s\n", call) - } else { - text += fmt.Sprintf("\t%s, %s, %s := %s\n", ret[0], ret[1], ret[2], call) - } - text += body - - if doErrno { - text += "\tif e1 != 0 {\n" - text += "\t\terr = e1\n" - text += "\t}\n" - } - text += "\treturn\n" - text += "}\n" - } - if err := s.Err(); err != nil { - fmt.Fprintf(os.Stderr, err.Error()) - os.Exit(1) - } - file.Close() - } - imp := "" - if pack != "unix" { - imp = "import \"golang.org/x/sys/unix\"\n" - - } - vardecls := "\t" + strings.Join(vars, ",\n\t") - vardecls += " syscallFunc" - fmt.Printf(srcTemplate, cmdLine(), buildTags(), pack, imp, dynimports, linknames, vardecls, text) -} - -const srcTemplate = `// %s -// Code generated by the command above; see README.md. DO NOT EDIT. - -// +build %s - -package %s - -import ( - "syscall" - "unsafe" -) -%s -%s -%s -var ( -%s -) - -%s -` diff --git a/vendor/golang.org/x/sys/unix/mksysctl_openbsd.go b/vendor/golang.org/x/sys/unix/mksysctl_openbsd.go deleted file mode 100644 index b6b409909c..0000000000 --- a/vendor/golang.org/x/sys/unix/mksysctl_openbsd.go +++ /dev/null @@ -1,355 +0,0 @@ -// Copyright 2019 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build ignore - -// Parse the header files for OpenBSD and generate a Go usable sysctl MIB. -// -// Build a MIB with each entry being an array containing the level, type and -// a hash that will contain additional entries if the current entry is a node. -// We then walk this MIB and create a flattened sysctl name to OID hash. - -package main - -import ( - "bufio" - "fmt" - "os" - "path/filepath" - "regexp" - "sort" - "strings" -) - -var ( - goos, goarch string -) - -// cmdLine returns this programs's commandline arguments. -func cmdLine() string { - return "go run mksysctl_openbsd.go " + strings.Join(os.Args[1:], " ") -} - -// buildTags returns build tags. -func buildTags() string { - return fmt.Sprintf("%s,%s", goarch, goos) -} - -// reMatch performs regular expression match and stores the substring slice to value pointed by m. -func reMatch(re *regexp.Regexp, str string, m *[]string) bool { - *m = re.FindStringSubmatch(str) - if *m != nil { - return true - } - return false -} - -type nodeElement struct { - n int - t string - pE *map[string]nodeElement -} - -var ( - debugEnabled bool - mib map[string]nodeElement - node *map[string]nodeElement - nodeMap map[string]string - sysCtl []string -) - -var ( - ctlNames1RE = regexp.MustCompile(`^#define\s+(CTL_NAMES)\s+{`) - ctlNames2RE = regexp.MustCompile(`^#define\s+(CTL_(.*)_NAMES)\s+{`) - ctlNames3RE = regexp.MustCompile(`^#define\s+((.*)CTL_NAMES)\s+{`) - netInetRE = regexp.MustCompile(`^netinet/`) - netInet6RE = regexp.MustCompile(`^netinet6/`) - netRE = regexp.MustCompile(`^net/`) - bracesRE = regexp.MustCompile(`{.*}`) - ctlTypeRE = regexp.MustCompile(`{\s+"(\w+)",\s+(CTLTYPE_[A-Z]+)\s+}`) - fsNetKernRE = regexp.MustCompile(`^(fs|net|kern)_`) -) - -func debug(s string) { - if debugEnabled { - fmt.Fprintln(os.Stderr, s) - } -} - -// Walk the MIB and build a sysctl name to OID mapping. -func buildSysctl(pNode *map[string]nodeElement, name string, oid []int) { - lNode := pNode // local copy of pointer to node - var keys []string - for k := range *lNode { - keys = append(keys, k) - } - sort.Strings(keys) - - for _, key := range keys { - nodename := name - if name != "" { - nodename += "." - } - nodename += key - - nodeoid := append(oid, (*pNode)[key].n) - - if (*pNode)[key].t == `CTLTYPE_NODE` { - if _, ok := nodeMap[nodename]; ok { - lNode = &mib - ctlName := nodeMap[nodename] - for _, part := range strings.Split(ctlName, ".") { - lNode = ((*lNode)[part]).pE - } - } else { - lNode = (*pNode)[key].pE - } - buildSysctl(lNode, nodename, nodeoid) - } else if (*pNode)[key].t != "" { - oidStr := []string{} - for j := range nodeoid { - oidStr = append(oidStr, fmt.Sprintf("%d", nodeoid[j])) - } - text := "\t{ \"" + nodename + "\", []_C_int{ " + strings.Join(oidStr, ", ") + " } }, \n" - sysCtl = append(sysCtl, text) - } - } -} - -func main() { - // Get the OS (using GOOS_TARGET if it exist) - goos = os.Getenv("GOOS_TARGET") - if goos == "" { - goos = os.Getenv("GOOS") - } - // Get the architecture (using GOARCH_TARGET if it exists) - goarch = os.Getenv("GOARCH_TARGET") - if goarch == "" { - goarch = os.Getenv("GOARCH") - } - // Check if GOOS and GOARCH environment variables are defined - if goarch == "" || goos == "" { - fmt.Fprintf(os.Stderr, "GOARCH or GOOS not defined in environment\n") - os.Exit(1) - } - - mib = make(map[string]nodeElement) - headers := [...]string{ - `sys/sysctl.h`, - `sys/socket.h`, - `sys/tty.h`, - `sys/malloc.h`, - `sys/mount.h`, - `sys/namei.h`, - `sys/sem.h`, - `sys/shm.h`, - `sys/vmmeter.h`, - `uvm/uvmexp.h`, - `uvm/uvm_param.h`, - `uvm/uvm_swap_encrypt.h`, - `ddb/db_var.h`, - `net/if.h`, - `net/if_pfsync.h`, - `net/pipex.h`, - `netinet/in.h`, - `netinet/icmp_var.h`, - `netinet/igmp_var.h`, - `netinet/ip_ah.h`, - `netinet/ip_carp.h`, - `netinet/ip_divert.h`, - `netinet/ip_esp.h`, - `netinet/ip_ether.h`, - `netinet/ip_gre.h`, - `netinet/ip_ipcomp.h`, - `netinet/ip_ipip.h`, - `netinet/pim_var.h`, - `netinet/tcp_var.h`, - `netinet/udp_var.h`, - `netinet6/in6.h`, - `netinet6/ip6_divert.h`, - `netinet6/pim6_var.h`, - `netinet/icmp6.h`, - `netmpls/mpls.h`, - } - - ctls := [...]string{ - `kern`, - `vm`, - `fs`, - `net`, - //debug /* Special handling required */ - `hw`, - //machdep /* Arch specific */ - `user`, - `ddb`, - //vfs /* Special handling required */ - `fs.posix`, - `kern.forkstat`, - `kern.intrcnt`, - `kern.malloc`, - `kern.nchstats`, - `kern.seminfo`, - `kern.shminfo`, - `kern.timecounter`, - `kern.tty`, - `kern.watchdog`, - `net.bpf`, - `net.ifq`, - `net.inet`, - `net.inet.ah`, - `net.inet.carp`, - `net.inet.divert`, - `net.inet.esp`, - `net.inet.etherip`, - `net.inet.gre`, - `net.inet.icmp`, - `net.inet.igmp`, - `net.inet.ip`, - `net.inet.ip.ifq`, - `net.inet.ipcomp`, - `net.inet.ipip`, - `net.inet.mobileip`, - `net.inet.pfsync`, - `net.inet.pim`, - `net.inet.tcp`, - `net.inet.udp`, - `net.inet6`, - `net.inet6.divert`, - `net.inet6.ip6`, - `net.inet6.icmp6`, - `net.inet6.pim6`, - `net.inet6.tcp6`, - `net.inet6.udp6`, - `net.mpls`, - `net.mpls.ifq`, - `net.key`, - `net.pflow`, - `net.pfsync`, - `net.pipex`, - `net.rt`, - `vm.swapencrypt`, - //vfsgenctl /* Special handling required */ - } - - // Node name "fixups" - ctlMap := map[string]string{ - "ipproto": "net.inet", - "net.inet.ipproto": "net.inet", - "net.inet6.ipv6proto": "net.inet6", - "net.inet6.ipv6": "net.inet6.ip6", - "net.inet.icmpv6": "net.inet6.icmp6", - "net.inet6.divert6": "net.inet6.divert", - "net.inet6.tcp6": "net.inet.tcp", - "net.inet6.udp6": "net.inet.udp", - "mpls": "net.mpls", - "swpenc": "vm.swapencrypt", - } - - // Node mappings - nodeMap = map[string]string{ - "net.inet.ip.ifq": "net.ifq", - "net.inet.pfsync": "net.pfsync", - "net.mpls.ifq": "net.ifq", - } - - mCtls := make(map[string]bool) - for _, ctl := range ctls { - mCtls[ctl] = true - } - - for _, header := range headers { - debug("Processing " + header) - file, err := os.Open(filepath.Join("/usr/include", header)) - if err != nil { - fmt.Fprintf(os.Stderr, "%v\n", err) - os.Exit(1) - } - s := bufio.NewScanner(file) - for s.Scan() { - var sub []string - if reMatch(ctlNames1RE, s.Text(), &sub) || - reMatch(ctlNames2RE, s.Text(), &sub) || - reMatch(ctlNames3RE, s.Text(), &sub) { - if sub[1] == `CTL_NAMES` { - // Top level. - node = &mib - } else { - // Node. - nodename := strings.ToLower(sub[2]) - ctlName := "" - if reMatch(netInetRE, header, &sub) { - ctlName = "net.inet." + nodename - } else if reMatch(netInet6RE, header, &sub) { - ctlName = "net.inet6." + nodename - } else if reMatch(netRE, header, &sub) { - ctlName = "net." + nodename - } else { - ctlName = nodename - ctlName = fsNetKernRE.ReplaceAllString(ctlName, `$1.`) - } - - if val, ok := ctlMap[ctlName]; ok { - ctlName = val - } - if _, ok := mCtls[ctlName]; !ok { - debug("Ignoring " + ctlName + "...") - continue - } - - // Walk down from the top of the MIB. - node = &mib - for _, part := range strings.Split(ctlName, ".") { - if _, ok := (*node)[part]; !ok { - debug("Missing node " + part) - (*node)[part] = nodeElement{n: 0, t: "", pE: &map[string]nodeElement{}} - } - node = (*node)[part].pE - } - } - - // Populate current node with entries. - i := -1 - for !strings.HasPrefix(s.Text(), "}") { - s.Scan() - if reMatch(bracesRE, s.Text(), &sub) { - i++ - } - if !reMatch(ctlTypeRE, s.Text(), &sub) { - continue - } - (*node)[sub[1]] = nodeElement{n: i, t: sub[2], pE: &map[string]nodeElement{}} - } - } - } - err = s.Err() - if err != nil { - fmt.Fprintf(os.Stderr, "%v\n", err) - os.Exit(1) - } - file.Close() - } - buildSysctl(&mib, "", []int{}) - - sort.Strings(sysCtl) - text := strings.Join(sysCtl, "") - - fmt.Printf(srcTemplate, cmdLine(), buildTags(), text) -} - -const srcTemplate = `// %s -// Code generated by the command above; DO NOT EDIT. - -// +build %s - -package unix - -type mibentry struct { - ctlname string - ctloid []_C_int -} - -var sysctlMib = []mibentry { -%s -} -` diff --git a/vendor/golang.org/x/sys/unix/mksysnum.go b/vendor/golang.org/x/sys/unix/mksysnum.go deleted file mode 100644 index baa6ecd850..0000000000 --- a/vendor/golang.org/x/sys/unix/mksysnum.go +++ /dev/null @@ -1,190 +0,0 @@ -// Copyright 2018 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build ignore - -// Generate system call table for DragonFly, NetBSD, -// FreeBSD, OpenBSD or Darwin from master list -// (for example, /usr/src/sys/kern/syscalls.master or -// sys/syscall.h). -package main - -import ( - "bufio" - "fmt" - "io" - "io/ioutil" - "net/http" - "os" - "regexp" - "strings" -) - -var ( - goos, goarch string -) - -// cmdLine returns this programs's commandline arguments -func cmdLine() string { - return "go run mksysnum.go " + strings.Join(os.Args[1:], " ") -} - -// buildTags returns build tags -func buildTags() string { - return fmt.Sprintf("%s,%s", goarch, goos) -} - -func checkErr(err error) { - if err != nil { - fmt.Fprintf(os.Stderr, "%v\n", err) - os.Exit(1) - } -} - -// source string and substring slice for regexp -type re struct { - str string // source string - sub []string // matched sub-string -} - -// Match performs regular expression match -func (r *re) Match(exp string) bool { - r.sub = regexp.MustCompile(exp).FindStringSubmatch(r.str) - if r.sub != nil { - return true - } - return false -} - -// fetchFile fetches a text file from URL -func fetchFile(URL string) io.Reader { - resp, err := http.Get(URL) - checkErr(err) - defer resp.Body.Close() - body, err := ioutil.ReadAll(resp.Body) - checkErr(err) - return strings.NewReader(string(body)) -} - -// readFile reads a text file from path -func readFile(path string) io.Reader { - file, err := os.Open(os.Args[1]) - checkErr(err) - return file -} - -func format(name, num, proto string) string { - name = strings.ToUpper(name) - // There are multiple entries for enosys and nosys, so comment them out. - nm := re{str: name} - if nm.Match(`^SYS_E?NOSYS$`) { - name = fmt.Sprintf("// %s", name) - } - if name == `SYS_SYS_EXIT` { - name = `SYS_EXIT` - } - return fmt.Sprintf(" %s = %s; // %s\n", name, num, proto) -} - -func main() { - // Get the OS (using GOOS_TARGET if it exist) - goos = os.Getenv("GOOS_TARGET") - if goos == "" { - goos = os.Getenv("GOOS") - } - // Get the architecture (using GOARCH_TARGET if it exists) - goarch = os.Getenv("GOARCH_TARGET") - if goarch == "" { - goarch = os.Getenv("GOARCH") - } - // Check if GOOS and GOARCH environment variables are defined - if goarch == "" || goos == "" { - fmt.Fprintf(os.Stderr, "GOARCH or GOOS not defined in environment\n") - os.Exit(1) - } - - file := strings.TrimSpace(os.Args[1]) - var syscalls io.Reader - if strings.HasPrefix(file, "https://") || strings.HasPrefix(file, "http://") { - // Download syscalls.master file - syscalls = fetchFile(file) - } else { - syscalls = readFile(file) - } - - var text, line string - s := bufio.NewScanner(syscalls) - for s.Scan() { - t := re{str: line} - if t.Match(`^(.*)\\$`) { - // Handle continuation - line = t.sub[1] - line += strings.TrimLeft(s.Text(), " \t") - } else { - // New line - line = s.Text() - } - t = re{str: line} - if t.Match(`\\$`) { - continue - } - t = re{str: line} - - switch goos { - case "dragonfly": - if t.Match(`^([0-9]+)\s+STD\s+({ \S+\s+(\w+).*)$`) { - num, proto := t.sub[1], t.sub[2] - name := fmt.Sprintf("SYS_%s", t.sub[3]) - text += format(name, num, proto) - } - case "freebsd": - if t.Match(`^([0-9]+)\s+\S+\s+(?:(?:NO)?STD|COMPAT10)\s+({ \S+\s+(\w+).*)$`) { - num, proto := t.sub[1], t.sub[2] - name := fmt.Sprintf("SYS_%s", t.sub[3]) - text += format(name, num, proto) - } - case "openbsd": - if t.Match(`^([0-9]+)\s+STD\s+(NOLOCK\s+)?({ \S+\s+\*?(\w+).*)$`) { - num, proto, name := t.sub[1], t.sub[3], t.sub[4] - text += format(name, num, proto) - } - case "netbsd": - if t.Match(`^([0-9]+)\s+((STD)|(NOERR))\s+(RUMP\s+)?({\s+\S+\s*\*?\s*\|(\S+)\|(\S*)\|(\w+).*\s+})(\s+(\S+))?$`) { - num, proto, compat := t.sub[1], t.sub[6], t.sub[8] - name := t.sub[7] + "_" + t.sub[9] - if t.sub[11] != "" { - name = t.sub[7] + "_" + t.sub[11] - } - name = strings.ToUpper(name) - if compat == "" || compat == "13" || compat == "30" || compat == "50" { - text += fmt.Sprintf(" %s = %s; // %s\n", name, num, proto) - } - } - case "darwin": - if t.Match(`^#define\s+SYS_(\w+)\s+([0-9]+)`) { - name, num := t.sub[1], t.sub[2] - name = strings.ToUpper(name) - text += fmt.Sprintf(" SYS_%s = %s;\n", name, num) - } - default: - fmt.Fprintf(os.Stderr, "unrecognized GOOS=%s\n", goos) - os.Exit(1) - - } - } - err := s.Err() - checkErr(err) - - fmt.Printf(template, cmdLine(), buildTags(), text) -} - -const template = `// %s -// Code generated by the command above; see README.md. DO NOT EDIT. - -// +build %s - -package unix - -const( -%s)` diff --git a/vendor/golang.org/x/sys/unix/sockcmsg_unix.go b/vendor/golang.org/x/sys/unix/sockcmsg_unix.go index 062bcabab1..723b7f1012 100644 --- a/vendor/golang.org/x/sys/unix/sockcmsg_unix.go +++ b/vendor/golang.org/x/sys/unix/sockcmsg_unix.go @@ -21,10 +21,10 @@ func cmsgAlignOf(salen int) int { case "aix": // There is no alignment on AIX. salign = 1 - case "darwin", "dragonfly", "solaris", "illumos": - // NOTE: It seems like 64-bit Darwin, DragonFly BSD, - // illumos, and Solaris kernels still require 32-bit - // aligned access to network subsystem. + case "darwin", "dragonfly", "solaris": + // NOTE: It seems like 64-bit Darwin, DragonFly BSD and + // Solaris kernels still require 32-bit aligned access to + // network subsystem. if SizeofPtr == 8 { salign = 4 } diff --git a/vendor/golang.org/x/sys/unix/syscall.go b/vendor/golang.org/x/sys/unix/syscall.go index fd4ee8ebeb..0d4b1d7a20 100644 --- a/vendor/golang.org/x/sys/unix/syscall.go +++ b/vendor/golang.org/x/sys/unix/syscall.go @@ -50,4 +50,5 @@ func BytePtrFromString(s string) (*byte, error) { } // Single-word zero for use when we need a valid pointer to 0 bytes. +// See mkunix.pl. var _zero uintptr diff --git a/vendor/golang.org/x/sys/unix/syscall_aix.go b/vendor/golang.org/x/sys/unix/syscall_aix.go index 45e12fb8a8..c1fb7bd16e 100644 --- a/vendor/golang.org/x/sys/unix/syscall_aix.go +++ b/vendor/golang.org/x/sys/unix/syscall_aix.go @@ -454,8 +454,8 @@ func IoctlGetTermios(fd int, req uint) (*Termios, error) { //sys Dup2(oldfd int, newfd int) (err error) //sys Fadvise(fd int, offset int64, length int64, advice int) (err error) = posix_fadvise64 //sys Fchown(fd int, uid int, gid int) (err error) -//sys fstat(fd int, stat *Stat_t) (err error) -//sys fstatat(dirfd int, path string, stat *Stat_t, flags int) (err error) = fstatat +//sys Fstat(fd int, stat *Stat_t) (err error) +//sys Fstatat(dirfd int, path string, stat *Stat_t, flags int) (err error) = fstatat //sys Fstatfs(fd int, buf *Statfs_t) (err error) //sys Ftruncate(fd int, length int64) (err error) //sysnb Getegid() (egid int) @@ -464,7 +464,7 @@ func IoctlGetTermios(fd int, req uint) (*Termios, error) { //sysnb Getuid() (uid int) //sys Lchown(path string, uid int, gid int) (err error) //sys Listen(s int, n int) (err error) -//sys lstat(path string, stat *Stat_t) (err error) +//sys Lstat(path string, stat *Stat_t) (err error) //sys Pause() (err error) //sys Pread(fd int, p []byte, offset int64) (n int, err error) = pread64 //sys Pwrite(fd int, p []byte, offset int64) (n int, err error) = pwrite64 @@ -474,7 +474,7 @@ func IoctlGetTermios(fd int, req uint) (*Termios, error) { //sysnb Setreuid(ruid int, euid int) (err error) //sys Shutdown(fd int, how int) (err error) //sys Splice(rfd int, roff *int64, wfd int, woff *int64, len int, flags int) (n int64, err error) -//sys stat(path string, statptr *Stat_t) (err error) +//sys Stat(path string, stat *Stat_t) (err error) //sys Statfs(path string, buf *Statfs_t) (err error) //sys Truncate(path string, length int64) (err error) diff --git a/vendor/golang.org/x/sys/unix/syscall_aix_ppc.go b/vendor/golang.org/x/sys/unix/syscall_aix_ppc.go index bf05603f15..c28af1f86e 100644 --- a/vendor/golang.org/x/sys/unix/syscall_aix_ppc.go +++ b/vendor/golang.org/x/sys/unix/syscall_aix_ppc.go @@ -32,19 +32,3 @@ func (msghdr *Msghdr) SetControllen(length int) { func (cmsg *Cmsghdr) SetLen(length int) { cmsg.Len = uint32(length) } - -func Fstat(fd int, stat *Stat_t) error { - return fstat(fd, stat) -} - -func Fstatat(dirfd int, path string, stat *Stat_t, flags int) error { - return fstatat(dirfd, path, stat, flags) -} - -func Lstat(path string, stat *Stat_t) error { - return lstat(path, stat) -} - -func Stat(path string, statptr *Stat_t) error { - return stat(path, statptr) -} diff --git a/vendor/golang.org/x/sys/unix/syscall_aix_ppc64.go b/vendor/golang.org/x/sys/unix/syscall_aix_ppc64.go index 13d4321f4c..881cacc6cc 100644 --- a/vendor/golang.org/x/sys/unix/syscall_aix_ppc64.go +++ b/vendor/golang.org/x/sys/unix/syscall_aix_ppc64.go @@ -32,50 +32,3 @@ func (msghdr *Msghdr) SetControllen(length int) { func (cmsg *Cmsghdr) SetLen(length int) { cmsg.Len = uint32(length) } - -// In order to only have Timespec structure, type of Stat_t's fields -// Atim, Mtim and Ctim is changed from StTimespec to Timespec during -// ztypes generation. -// On ppc64, Timespec.Nsec is an int64 while StTimespec.Nsec is an -// int32, so the fields' value must be modified. -func fixStatTimFields(stat *Stat_t) { - stat.Atim.Nsec >>= 32 - stat.Mtim.Nsec >>= 32 - stat.Ctim.Nsec >>= 32 -} - -func Fstat(fd int, stat *Stat_t) error { - err := fstat(fd, stat) - if err != nil { - return err - } - fixStatTimFields(stat) - return nil -} - -func Fstatat(dirfd int, path string, stat *Stat_t, flags int) error { - err := fstatat(dirfd, path, stat, flags) - if err != nil { - return err - } - fixStatTimFields(stat) - return nil -} - -func Lstat(path string, stat *Stat_t) error { - err := lstat(path, stat) - if err != nil { - return err - } - fixStatTimFields(stat) - return nil -} - -func Stat(path string, statptr *Stat_t) error { - err := stat(path, statptr) - if err != nil { - return err - } - fixStatTimFields(statptr) - return nil -} diff --git a/vendor/golang.org/x/sys/unix/syscall_freebsd.go b/vendor/golang.org/x/sys/unix/syscall_freebsd.go index 1b6abe123b..a7ca1ebea3 100644 --- a/vendor/golang.org/x/sys/unix/syscall_freebsd.go +++ b/vendor/golang.org/x/sys/unix/syscall_freebsd.go @@ -362,21 +362,7 @@ func Getdents(fd int, buf []byte) (n int, err error) { func Getdirentries(fd int, buf []byte, basep *uintptr) (n int, err error) { if supportsABI(_ino64First) { - if unsafe.Sizeof(*basep) == 8 { - return getdirentries_freebsd12(fd, buf, (*uint64)(unsafe.Pointer(basep))) - } - // The freebsd12 syscall needs a 64-bit base. On 32-bit machines - // we can't just use the basep passed in. See #32498. - var base uint64 = uint64(*basep) - n, err = getdirentries_freebsd12(fd, buf, &base) - *basep = uintptr(base) - if base>>32 != 0 { - // We can't stuff the base back into a uintptr, so any - // future calls would be suspect. Generate an error. - // EIO is allowed by getdirentries. - err = EIO - } - return + return getdirentries_freebsd12(fd, buf, basep) } // The old syscall entries are smaller than the new. Use 1/4 of the original @@ -418,22 +404,22 @@ func roundup(x, y int) int { func (s *Stat_t) convertFrom(old *stat_freebsd11_t) { *s = Stat_t{ - Dev: uint64(old.Dev), - Ino: uint64(old.Ino), - Nlink: uint64(old.Nlink), - Mode: old.Mode, - Uid: old.Uid, - Gid: old.Gid, - Rdev: uint64(old.Rdev), - Atim: old.Atim, - Mtim: old.Mtim, - Ctim: old.Ctim, - Btim: old.Btim, - Size: old.Size, - Blocks: old.Blocks, - Blksize: old.Blksize, - Flags: old.Flags, - Gen: uint64(old.Gen), + Dev: uint64(old.Dev), + Ino: uint64(old.Ino), + Nlink: uint64(old.Nlink), + Mode: old.Mode, + Uid: old.Uid, + Gid: old.Gid, + Rdev: uint64(old.Rdev), + Atim: old.Atim, + Mtim: old.Mtim, + Ctim: old.Ctim, + Birthtim: old.Birthtim, + Size: old.Size, + Blocks: old.Blocks, + Blksize: old.Blksize, + Flags: old.Flags, + Gen: uint64(old.Gen), } } @@ -569,7 +555,7 @@ func Sendfile(outfd int, infd int, offset *int64, count int) (written int, err e //sys Fsync(fd int) (err error) //sys Ftruncate(fd int, length int64) (err error) //sys getdirentries(fd int, buf []byte, basep *uintptr) (n int, err error) -//sys getdirentries_freebsd12(fd int, buf []byte, basep *uint64) (n int, err error) +//sys getdirentries_freebsd12(fd int, buf []byte, basep *uintptr) (n int, err error) //sys Getdtablesize() (size int) //sysnb Getegid() (egid int) //sysnb Geteuid() (uid int) diff --git a/vendor/golang.org/x/sys/unix/syscall_linux.go b/vendor/golang.org/x/sys/unix/syscall_linux.go index c92545ea51..c302f01b2e 100644 --- a/vendor/golang.org/x/sys/unix/syscall_linux.go +++ b/vendor/golang.org/x/sys/unix/syscall_linux.go @@ -109,12 +109,6 @@ func IoctlGetInt(fd int, req uint) (int, error) { return value, err } -func IoctlGetUint32(fd int, req uint) (uint32, error) { - var value uint32 - err := ioctl(fd, req, uintptr(unsafe.Pointer(&value))) - return value, err -} - func IoctlGetWinsize(fd int, req uint) (*Winsize, error) { var value Winsize err := ioctl(fd, req, uintptr(unsafe.Pointer(&value))) @@ -1537,13 +1531,9 @@ func Setgid(uid int) (err error) { return EOPNOTSUPP } -func Signalfd(fd int, sigmask *Sigset_t, flags int) (newfd int, err error) { - return signalfd(fd, sigmask, _C__NSIG/8, flags) -} - //sys Setpriority(which int, who int, prio int) (err error) //sys Setxattr(path string, attr string, data []byte, flags int) (err error) -//sys signalfd(fd int, sigmask *Sigset_t, maskSize uintptr, flags int) (newfd int, err error) = SYS_SIGNALFD4 +//sys Signalfd(fd int, mask *Sigset_t, flags int) = SYS_SIGNALFD4 //sys Statx(dirfd int, path string, flags int, mask int, stat *Statx_t) (err error) //sys Sync() //sys Syncfs(fd int) (err error) diff --git a/vendor/golang.org/x/sys/unix/syscall_linux_arm.go b/vendor/golang.org/x/sys/unix/syscall_linux_arm.go index f626794439..3a3c37b4c8 100644 --- a/vendor/golang.org/x/sys/unix/syscall_linux_arm.go +++ b/vendor/golang.org/x/sys/unix/syscall_linux_arm.go @@ -272,16 +272,3 @@ func SyncFileRange(fd int, off int64, n int64, flags int) error { // order of their arguments. return armSyncFileRange(fd, flags, off, n) } - -//sys kexecFileLoad(kernelFd int, initrdFd int, cmdlineLen int, cmdline string, flags int) (err error) - -func KexecFileLoad(kernelFd int, initrdFd int, cmdline string, flags int) error { - cmdlineLen := len(cmdline) - if cmdlineLen > 0 { - // Account for the additional NULL byte added by - // BytePtrFromString in kexecFileLoad. The kexec_file_load - // syscall expects a NULL-terminated string. - cmdlineLen++ - } - return kexecFileLoad(kernelFd, initrdFd, cmdlineLen, cmdline, flags) -} diff --git a/vendor/golang.org/x/sys/unix/syscall_netbsd.go b/vendor/golang.org/x/sys/unix/syscall_netbsd.go index 8f4c320eba..5240e16e4b 100644 --- a/vendor/golang.org/x/sys/unix/syscall_netbsd.go +++ b/vendor/golang.org/x/sys/unix/syscall_netbsd.go @@ -120,30 +120,9 @@ func Pipe(p []int) (err error) { return } -//sys Getdents(fd int, buf []byte) (n int, err error) +//sys getdents(fd int, buf []byte) (n int, err error) func Getdirentries(fd int, buf []byte, basep *uintptr) (n int, err error) { - n, err = Getdents(fd, buf) - if err != nil || basep == nil { - return - } - - var off int64 - off, err = Seek(fd, 0, 1 /* SEEK_CUR */) - if err != nil { - *basep = ^uintptr(0) - return - } - *basep = uintptr(off) - if unsafe.Sizeof(*basep) == 8 { - return - } - if off>>4 != 0 { - // We can't stuff the offset back into a uintptr, so any - // future calls would be suspect. Generate an error. - // EIO is allowed by getdirentries. - err = EIO - } - return + return getdents(fd, buf) } const ImplementsGetwd = true diff --git a/vendor/golang.org/x/sys/unix/syscall_openbsd.go b/vendor/golang.org/x/sys/unix/syscall_openbsd.go index 276c93bef4..c8648ec026 100644 --- a/vendor/golang.org/x/sys/unix/syscall_openbsd.go +++ b/vendor/golang.org/x/sys/unix/syscall_openbsd.go @@ -89,30 +89,9 @@ func Pipe(p []int) (err error) { return } -//sys Getdents(fd int, buf []byte) (n int, err error) +//sys getdents(fd int, buf []byte) (n int, err error) func Getdirentries(fd int, buf []byte, basep *uintptr) (n int, err error) { - n, err = Getdents(fd, buf) - if err != nil || basep == nil { - return - } - - var off int64 - off, err = Seek(fd, 0, 1 /* SEEK_CUR */) - if err != nil { - *basep = ^uintptr(0) - return - } - *basep = uintptr(off) - if unsafe.Sizeof(*basep) == 8 { - return - } - if off>>4 != 0 { - // We can't stuff the offset back into a uintptr, so any - // future calls would be suspect. Generate an error. - // EIO was allowed by getdirentries. - err = EIO - } - return + return getdents(fd, buf) } const ImplementsGetwd = true diff --git a/vendor/golang.org/x/sys/unix/types_aix.go b/vendor/golang.org/x/sys/unix/types_aix.go deleted file mode 100644 index 40d2beede5..0000000000 --- a/vendor/golang.org/x/sys/unix/types_aix.go +++ /dev/null @@ -1,237 +0,0 @@ -// Copyright 2018 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build ignore -// +build aix - -/* -Input to cgo -godefs. See also mkerrors.sh and mkall.sh -*/ - -// +godefs map struct_in_addr [4]byte /* in_addr */ -// +godefs map struct_in6_addr [16]byte /* in6_addr */ - -package unix - -/* -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include - -#include - -#include -#include -#include -#include - - -#include -#include - -enum { - sizeofPtr = sizeof(void*), -}; - -union sockaddr_all { - struct sockaddr s1; // this one gets used for fields - struct sockaddr_in s2; // these pad it out - struct sockaddr_in6 s3; - struct sockaddr_un s4; - struct sockaddr_dl s5; -}; - -struct sockaddr_any { - struct sockaddr addr; - char pad[sizeof(union sockaddr_all) - sizeof(struct sockaddr)]; -}; - -*/ -import "C" - -// Machine characteristics - -const ( - SizeofPtr = C.sizeofPtr - SizeofShort = C.sizeof_short - SizeofInt = C.sizeof_int - SizeofLong = C.sizeof_long - SizeofLongLong = C.sizeof_longlong - PathMax = C.PATH_MAX -) - -// Basic types - -type ( - _C_short C.short - _C_int C.int - _C_long C.long - _C_long_long C.longlong -) - -type off64 C.off64_t -type off C.off_t -type Mode_t C.mode_t - -// Time - -type Timespec C.struct_timespec - -type Timeval C.struct_timeval - -type Timeval32 C.struct_timeval32 - -type Timex C.struct_timex - -type Time_t C.time_t - -type Tms C.struct_tms - -type Utimbuf C.struct_utimbuf - -type Timezone C.struct_timezone - -// Processes - -type Rusage C.struct_rusage - -type Rlimit C.struct_rlimit64 - -type Pid_t C.pid_t - -type _Gid_t C.gid_t - -type dev_t C.dev_t - -// Files - -type Stat_t C.struct_stat - -type StatxTimestamp C.struct_statx_timestamp - -type Statx_t C.struct_statx - -type Dirent C.struct_dirent - -// Sockets - -type RawSockaddrInet4 C.struct_sockaddr_in - -type RawSockaddrInet6 C.struct_sockaddr_in6 - -type RawSockaddrUnix C.struct_sockaddr_un - -type RawSockaddrDatalink C.struct_sockaddr_dl - -type RawSockaddr C.struct_sockaddr - -type RawSockaddrAny C.struct_sockaddr_any - -type _Socklen C.socklen_t - -type Cmsghdr C.struct_cmsghdr - -type ICMPv6Filter C.struct_icmp6_filter - -type Iovec C.struct_iovec - -type IPMreq C.struct_ip_mreq - -type IPv6Mreq C.struct_ipv6_mreq - -type IPv6MTUInfo C.struct_ip6_mtuinfo - -type Linger C.struct_linger - -type Msghdr C.struct_msghdr - -const ( - SizeofSockaddrInet4 = C.sizeof_struct_sockaddr_in - SizeofSockaddrInet6 = C.sizeof_struct_sockaddr_in6 - SizeofSockaddrAny = C.sizeof_struct_sockaddr_any - SizeofSockaddrUnix = C.sizeof_struct_sockaddr_un - SizeofSockaddrDatalink = C.sizeof_struct_sockaddr_dl - SizeofLinger = C.sizeof_struct_linger - SizeofIPMreq = C.sizeof_struct_ip_mreq - SizeofIPv6Mreq = C.sizeof_struct_ipv6_mreq - SizeofIPv6MTUInfo = C.sizeof_struct_ip6_mtuinfo - SizeofMsghdr = C.sizeof_struct_msghdr - SizeofCmsghdr = C.sizeof_struct_cmsghdr - SizeofICMPv6Filter = C.sizeof_struct_icmp6_filter -) - -// Routing and interface messages - -const ( - SizeofIfMsghdr = C.sizeof_struct_if_msghdr -) - -type IfMsgHdr C.struct_if_msghdr - -// Misc - -type FdSet C.fd_set - -type Utsname C.struct_utsname - -type Ustat_t C.struct_ustat - -type Sigset_t C.sigset_t - -const ( - AT_FDCWD = C.AT_FDCWD - AT_REMOVEDIR = C.AT_REMOVEDIR - AT_SYMLINK_NOFOLLOW = C.AT_SYMLINK_NOFOLLOW -) - -// Terminal handling - -type Termios C.struct_termios - -type Termio C.struct_termio - -type Winsize C.struct_winsize - -//poll - -type PollFd struct { - Fd int32 - Events uint16 - Revents uint16 -} - -const ( - POLLERR = C.POLLERR - POLLHUP = C.POLLHUP - POLLIN = C.POLLIN - POLLNVAL = C.POLLNVAL - POLLOUT = C.POLLOUT - POLLPRI = C.POLLPRI - POLLRDBAND = C.POLLRDBAND - POLLRDNORM = C.POLLRDNORM - POLLWRBAND = C.POLLWRBAND - POLLWRNORM = C.POLLWRNORM -) - -//flock_t - -type Flock_t C.struct_flock64 - -// Statfs - -type Fsid_t C.struct_fsid_t -type Fsid64_t C.struct_fsid64_t - -type Statfs_t C.struct_statfs - -const RNDGETENTCNT = 0x80045200 diff --git a/vendor/golang.org/x/sys/unix/types_darwin.go b/vendor/golang.org/x/sys/unix/types_darwin.go deleted file mode 100644 index 155c2e692b..0000000000 --- a/vendor/golang.org/x/sys/unix/types_darwin.go +++ /dev/null @@ -1,283 +0,0 @@ -// Copyright 2009 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build ignore - -/* -Input to cgo -godefs. See README.md -*/ - -// +godefs map struct_in_addr [4]byte /* in_addr */ -// +godefs map struct_in6_addr [16]byte /* in6_addr */ - -package unix - -/* -#define __DARWIN_UNIX03 0 -#define KERNEL -#define _DARWIN_USE_64_BIT_INODE -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include - -enum { - sizeofPtr = sizeof(void*), -}; - -union sockaddr_all { - struct sockaddr s1; // this one gets used for fields - struct sockaddr_in s2; // these pad it out - struct sockaddr_in6 s3; - struct sockaddr_un s4; - struct sockaddr_dl s5; -}; - -struct sockaddr_any { - struct sockaddr addr; - char pad[sizeof(union sockaddr_all) - sizeof(struct sockaddr)]; -}; - -*/ -import "C" - -// Machine characteristics - -const ( - SizeofPtr = C.sizeofPtr - SizeofShort = C.sizeof_short - SizeofInt = C.sizeof_int - SizeofLong = C.sizeof_long - SizeofLongLong = C.sizeof_longlong -) - -// Basic types - -type ( - _C_short C.short - _C_int C.int - _C_long C.long - _C_long_long C.longlong -) - -// Time - -type Timespec C.struct_timespec - -type Timeval C.struct_timeval - -type Timeval32 C.struct_timeval32 - -// Processes - -type Rusage C.struct_rusage - -type Rlimit C.struct_rlimit - -type _Gid_t C.gid_t - -// Files - -type Stat_t C.struct_stat64 - -type Statfs_t C.struct_statfs64 - -type Flock_t C.struct_flock - -type Fstore_t C.struct_fstore - -type Radvisory_t C.struct_radvisory - -type Fbootstraptransfer_t C.struct_fbootstraptransfer - -type Log2phys_t C.struct_log2phys - -type Fsid C.struct_fsid - -type Dirent C.struct_dirent - -// Sockets - -type RawSockaddrInet4 C.struct_sockaddr_in - -type RawSockaddrInet6 C.struct_sockaddr_in6 - -type RawSockaddrUnix C.struct_sockaddr_un - -type RawSockaddrDatalink C.struct_sockaddr_dl - -type RawSockaddr C.struct_sockaddr - -type RawSockaddrAny C.struct_sockaddr_any - -type _Socklen C.socklen_t - -type Linger C.struct_linger - -type Iovec C.struct_iovec - -type IPMreq C.struct_ip_mreq - -type IPv6Mreq C.struct_ipv6_mreq - -type Msghdr C.struct_msghdr - -type Cmsghdr C.struct_cmsghdr - -type Inet4Pktinfo C.struct_in_pktinfo - -type Inet6Pktinfo C.struct_in6_pktinfo - -type IPv6MTUInfo C.struct_ip6_mtuinfo - -type ICMPv6Filter C.struct_icmp6_filter - -const ( - SizeofSockaddrInet4 = C.sizeof_struct_sockaddr_in - SizeofSockaddrInet6 = C.sizeof_struct_sockaddr_in6 - SizeofSockaddrAny = C.sizeof_struct_sockaddr_any - SizeofSockaddrUnix = C.sizeof_struct_sockaddr_un - SizeofSockaddrDatalink = C.sizeof_struct_sockaddr_dl - SizeofLinger = C.sizeof_struct_linger - SizeofIPMreq = C.sizeof_struct_ip_mreq - SizeofIPv6Mreq = C.sizeof_struct_ipv6_mreq - SizeofMsghdr = C.sizeof_struct_msghdr - SizeofCmsghdr = C.sizeof_struct_cmsghdr - SizeofInet4Pktinfo = C.sizeof_struct_in_pktinfo - SizeofInet6Pktinfo = C.sizeof_struct_in6_pktinfo - SizeofIPv6MTUInfo = C.sizeof_struct_ip6_mtuinfo - SizeofICMPv6Filter = C.sizeof_struct_icmp6_filter -) - -// Ptrace requests - -const ( - PTRACE_TRACEME = C.PT_TRACE_ME - PTRACE_CONT = C.PT_CONTINUE - PTRACE_KILL = C.PT_KILL -) - -// Events (kqueue, kevent) - -type Kevent_t C.struct_kevent - -// Select - -type FdSet C.fd_set - -// Routing and interface messages - -const ( - SizeofIfMsghdr = C.sizeof_struct_if_msghdr - SizeofIfData = C.sizeof_struct_if_data - SizeofIfaMsghdr = C.sizeof_struct_ifa_msghdr - SizeofIfmaMsghdr = C.sizeof_struct_ifma_msghdr - SizeofIfmaMsghdr2 = C.sizeof_struct_ifma_msghdr2 - SizeofRtMsghdr = C.sizeof_struct_rt_msghdr - SizeofRtMetrics = C.sizeof_struct_rt_metrics -) - -type IfMsghdr C.struct_if_msghdr - -type IfData C.struct_if_data - -type IfaMsghdr C.struct_ifa_msghdr - -type IfmaMsghdr C.struct_ifma_msghdr - -type IfmaMsghdr2 C.struct_ifma_msghdr2 - -type RtMsghdr C.struct_rt_msghdr - -type RtMetrics C.struct_rt_metrics - -// Berkeley packet filter - -const ( - SizeofBpfVersion = C.sizeof_struct_bpf_version - SizeofBpfStat = C.sizeof_struct_bpf_stat - SizeofBpfProgram = C.sizeof_struct_bpf_program - SizeofBpfInsn = C.sizeof_struct_bpf_insn - SizeofBpfHdr = C.sizeof_struct_bpf_hdr -) - -type BpfVersion C.struct_bpf_version - -type BpfStat C.struct_bpf_stat - -type BpfProgram C.struct_bpf_program - -type BpfInsn C.struct_bpf_insn - -type BpfHdr C.struct_bpf_hdr - -// Terminal handling - -type Termios C.struct_termios - -type Winsize C.struct_winsize - -// fchmodat-like syscalls. - -const ( - AT_FDCWD = C.AT_FDCWD - AT_REMOVEDIR = C.AT_REMOVEDIR - AT_SYMLINK_FOLLOW = C.AT_SYMLINK_FOLLOW - AT_SYMLINK_NOFOLLOW = C.AT_SYMLINK_NOFOLLOW -) - -// poll - -type PollFd C.struct_pollfd - -const ( - POLLERR = C.POLLERR - POLLHUP = C.POLLHUP - POLLIN = C.POLLIN - POLLNVAL = C.POLLNVAL - POLLOUT = C.POLLOUT - POLLPRI = C.POLLPRI - POLLRDBAND = C.POLLRDBAND - POLLRDNORM = C.POLLRDNORM - POLLWRBAND = C.POLLWRBAND - POLLWRNORM = C.POLLWRNORM -) - -// uname - -type Utsname C.struct_utsname - -// Clockinfo - -const SizeofClockinfo = C.sizeof_struct_clockinfo - -type Clockinfo C.struct_clockinfo diff --git a/vendor/golang.org/x/sys/unix/types_dragonfly.go b/vendor/golang.org/x/sys/unix/types_dragonfly.go deleted file mode 100644 index 3365dd79d0..0000000000 --- a/vendor/golang.org/x/sys/unix/types_dragonfly.go +++ /dev/null @@ -1,263 +0,0 @@ -// Copyright 2009 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build ignore - -/* -Input to cgo -godefs. See README.md -*/ - -// +godefs map struct_in_addr [4]byte /* in_addr */ -// +godefs map struct_in6_addr [16]byte /* in6_addr */ - -package unix - -/* -#define KERNEL -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include - -enum { - sizeofPtr = sizeof(void*), -}; - -union sockaddr_all { - struct sockaddr s1; // this one gets used for fields - struct sockaddr_in s2; // these pad it out - struct sockaddr_in6 s3; - struct sockaddr_un s4; - struct sockaddr_dl s5; -}; - -struct sockaddr_any { - struct sockaddr addr; - char pad[sizeof(union sockaddr_all) - sizeof(struct sockaddr)]; -}; - -*/ -import "C" - -// Machine characteristics - -const ( - SizeofPtr = C.sizeofPtr - SizeofShort = C.sizeof_short - SizeofInt = C.sizeof_int - SizeofLong = C.sizeof_long - SizeofLongLong = C.sizeof_longlong -) - -// Basic types - -type ( - _C_short C.short - _C_int C.int - _C_long C.long - _C_long_long C.longlong -) - -// Time - -type Timespec C.struct_timespec - -type Timeval C.struct_timeval - -// Processes - -type Rusage C.struct_rusage - -type Rlimit C.struct_rlimit - -type _Gid_t C.gid_t - -// Files - -type Stat_t C.struct_stat - -type Statfs_t C.struct_statfs - -type Flock_t C.struct_flock - -type Dirent C.struct_dirent - -type Fsid C.struct_fsid - -// File system limits - -const ( - PathMax = C.PATH_MAX -) - -// Sockets - -type RawSockaddrInet4 C.struct_sockaddr_in - -type RawSockaddrInet6 C.struct_sockaddr_in6 - -type RawSockaddrUnix C.struct_sockaddr_un - -type RawSockaddrDatalink C.struct_sockaddr_dl - -type RawSockaddr C.struct_sockaddr - -type RawSockaddrAny C.struct_sockaddr_any - -type _Socklen C.socklen_t - -type Linger C.struct_linger - -type Iovec C.struct_iovec - -type IPMreq C.struct_ip_mreq - -type IPv6Mreq C.struct_ipv6_mreq - -type Msghdr C.struct_msghdr - -type Cmsghdr C.struct_cmsghdr - -type Inet6Pktinfo C.struct_in6_pktinfo - -type IPv6MTUInfo C.struct_ip6_mtuinfo - -type ICMPv6Filter C.struct_icmp6_filter - -const ( - SizeofSockaddrInet4 = C.sizeof_struct_sockaddr_in - SizeofSockaddrInet6 = C.sizeof_struct_sockaddr_in6 - SizeofSockaddrAny = C.sizeof_struct_sockaddr_any - SizeofSockaddrUnix = C.sizeof_struct_sockaddr_un - SizeofSockaddrDatalink = C.sizeof_struct_sockaddr_dl - SizeofLinger = C.sizeof_struct_linger - SizeofIPMreq = C.sizeof_struct_ip_mreq - SizeofIPv6Mreq = C.sizeof_struct_ipv6_mreq - SizeofMsghdr = C.sizeof_struct_msghdr - SizeofCmsghdr = C.sizeof_struct_cmsghdr - SizeofInet6Pktinfo = C.sizeof_struct_in6_pktinfo - SizeofIPv6MTUInfo = C.sizeof_struct_ip6_mtuinfo - SizeofICMPv6Filter = C.sizeof_struct_icmp6_filter -) - -// Ptrace requests - -const ( - PTRACE_TRACEME = C.PT_TRACE_ME - PTRACE_CONT = C.PT_CONTINUE - PTRACE_KILL = C.PT_KILL -) - -// Events (kqueue, kevent) - -type Kevent_t C.struct_kevent - -// Select - -type FdSet C.fd_set - -// Routing and interface messages - -const ( - SizeofIfMsghdr = C.sizeof_struct_if_msghdr - SizeofIfData = C.sizeof_struct_if_data - SizeofIfaMsghdr = C.sizeof_struct_ifa_msghdr - SizeofIfmaMsghdr = C.sizeof_struct_ifma_msghdr - SizeofIfAnnounceMsghdr = C.sizeof_struct_if_announcemsghdr - SizeofRtMsghdr = C.sizeof_struct_rt_msghdr - SizeofRtMetrics = C.sizeof_struct_rt_metrics -) - -type IfMsghdr C.struct_if_msghdr - -type IfData C.struct_if_data - -type IfaMsghdr C.struct_ifa_msghdr - -type IfmaMsghdr C.struct_ifma_msghdr - -type IfAnnounceMsghdr C.struct_if_announcemsghdr - -type RtMsghdr C.struct_rt_msghdr - -type RtMetrics C.struct_rt_metrics - -// Berkeley packet filter - -const ( - SizeofBpfVersion = C.sizeof_struct_bpf_version - SizeofBpfStat = C.sizeof_struct_bpf_stat - SizeofBpfProgram = C.sizeof_struct_bpf_program - SizeofBpfInsn = C.sizeof_struct_bpf_insn - SizeofBpfHdr = C.sizeof_struct_bpf_hdr -) - -type BpfVersion C.struct_bpf_version - -type BpfStat C.struct_bpf_stat - -type BpfProgram C.struct_bpf_program - -type BpfInsn C.struct_bpf_insn - -type BpfHdr C.struct_bpf_hdr - -// Terminal handling - -type Termios C.struct_termios - -type Winsize C.struct_winsize - -// fchmodat-like syscalls. - -const ( - AT_FDCWD = C.AT_FDCWD - AT_SYMLINK_NOFOLLOW = C.AT_SYMLINK_NOFOLLOW -) - -// poll - -type PollFd C.struct_pollfd - -const ( - POLLERR = C.POLLERR - POLLHUP = C.POLLHUP - POLLIN = C.POLLIN - POLLNVAL = C.POLLNVAL - POLLOUT = C.POLLOUT - POLLPRI = C.POLLPRI - POLLRDBAND = C.POLLRDBAND - POLLRDNORM = C.POLLRDNORM - POLLWRBAND = C.POLLWRBAND - POLLWRNORM = C.POLLWRNORM -) - -// Uname - -type Utsname C.struct_utsname diff --git a/vendor/golang.org/x/sys/unix/types_freebsd.go b/vendor/golang.org/x/sys/unix/types_freebsd.go deleted file mode 100644 index 7470798951..0000000000 --- a/vendor/golang.org/x/sys/unix/types_freebsd.go +++ /dev/null @@ -1,356 +0,0 @@ -// Copyright 2009 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build ignore - -/* -Input to cgo -godefs. See README.md -*/ - -// +godefs map struct_in_addr [4]byte /* in_addr */ -// +godefs map struct_in6_addr [16]byte /* in6_addr */ - -package unix - -/* -#define _WANT_FREEBSD11_STAT 1 -#define _WANT_FREEBSD11_STATFS 1 -#define _WANT_FREEBSD11_DIRENT 1 -#define _WANT_FREEBSD11_KEVENT 1 - -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include - -enum { - sizeofPtr = sizeof(void*), -}; - -union sockaddr_all { - struct sockaddr s1; // this one gets used for fields - struct sockaddr_in s2; // these pad it out - struct sockaddr_in6 s3; - struct sockaddr_un s4; - struct sockaddr_dl s5; -}; - -struct sockaddr_any { - struct sockaddr addr; - char pad[sizeof(union sockaddr_all) - sizeof(struct sockaddr)]; -}; - -// This structure is a duplicate of if_data on FreeBSD 8-STABLE. -// See /usr/include/net/if.h. -struct if_data8 { - u_char ifi_type; - u_char ifi_physical; - u_char ifi_addrlen; - u_char ifi_hdrlen; - u_char ifi_link_state; - u_char ifi_spare_char1; - u_char ifi_spare_char2; - u_char ifi_datalen; - u_long ifi_mtu; - u_long ifi_metric; - u_long ifi_baudrate; - u_long ifi_ipackets; - u_long ifi_ierrors; - u_long ifi_opackets; - u_long ifi_oerrors; - u_long ifi_collisions; - u_long ifi_ibytes; - u_long ifi_obytes; - u_long ifi_imcasts; - u_long ifi_omcasts; - u_long ifi_iqdrops; - u_long ifi_noproto; - u_long ifi_hwassist; -// FIXME: these are now unions, so maybe need to change definitions? -#undef ifi_epoch - time_t ifi_epoch; -#undef ifi_lastchange - struct timeval ifi_lastchange; -}; - -// This structure is a duplicate of if_msghdr on FreeBSD 8-STABLE. -// See /usr/include/net/if.h. -struct if_msghdr8 { - u_short ifm_msglen; - u_char ifm_version; - u_char ifm_type; - int ifm_addrs; - int ifm_flags; - u_short ifm_index; - struct if_data8 ifm_data; -}; -*/ -import "C" - -// Machine characteristics - -const ( - SizeofPtr = C.sizeofPtr - SizeofShort = C.sizeof_short - SizeofInt = C.sizeof_int - SizeofLong = C.sizeof_long - SizeofLongLong = C.sizeof_longlong -) - -// Basic types - -type ( - _C_short C.short - _C_int C.int - _C_long C.long - _C_long_long C.longlong -) - -// Time - -type Timespec C.struct_timespec - -type Timeval C.struct_timeval - -// Processes - -type Rusage C.struct_rusage - -type Rlimit C.struct_rlimit - -type _Gid_t C.gid_t - -// Files - -const ( - _statfsVersion = C.STATFS_VERSION - _dirblksiz = C.DIRBLKSIZ -) - -type Stat_t C.struct_stat - -type stat_freebsd11_t C.struct_freebsd11_stat - -type Statfs_t C.struct_statfs - -type statfs_freebsd11_t C.struct_freebsd11_statfs - -type Flock_t C.struct_flock - -type Dirent C.struct_dirent - -type dirent_freebsd11 C.struct_freebsd11_dirent - -type Fsid C.struct_fsid - -// File system limits - -const ( - PathMax = C.PATH_MAX -) - -// Advice to Fadvise - -const ( - FADV_NORMAL = C.POSIX_FADV_NORMAL - FADV_RANDOM = C.POSIX_FADV_RANDOM - FADV_SEQUENTIAL = C.POSIX_FADV_SEQUENTIAL - FADV_WILLNEED = C.POSIX_FADV_WILLNEED - FADV_DONTNEED = C.POSIX_FADV_DONTNEED - FADV_NOREUSE = C.POSIX_FADV_NOREUSE -) - -// Sockets - -type RawSockaddrInet4 C.struct_sockaddr_in - -type RawSockaddrInet6 C.struct_sockaddr_in6 - -type RawSockaddrUnix C.struct_sockaddr_un - -type RawSockaddrDatalink C.struct_sockaddr_dl - -type RawSockaddr C.struct_sockaddr - -type RawSockaddrAny C.struct_sockaddr_any - -type _Socklen C.socklen_t - -type Linger C.struct_linger - -type Iovec C.struct_iovec - -type IPMreq C.struct_ip_mreq - -type IPMreqn C.struct_ip_mreqn - -type IPv6Mreq C.struct_ipv6_mreq - -type Msghdr C.struct_msghdr - -type Cmsghdr C.struct_cmsghdr - -type Inet6Pktinfo C.struct_in6_pktinfo - -type IPv6MTUInfo C.struct_ip6_mtuinfo - -type ICMPv6Filter C.struct_icmp6_filter - -const ( - SizeofSockaddrInet4 = C.sizeof_struct_sockaddr_in - SizeofSockaddrInet6 = C.sizeof_struct_sockaddr_in6 - SizeofSockaddrAny = C.sizeof_struct_sockaddr_any - SizeofSockaddrUnix = C.sizeof_struct_sockaddr_un - SizeofSockaddrDatalink = C.sizeof_struct_sockaddr_dl - SizeofLinger = C.sizeof_struct_linger - SizeofIPMreq = C.sizeof_struct_ip_mreq - SizeofIPMreqn = C.sizeof_struct_ip_mreqn - SizeofIPv6Mreq = C.sizeof_struct_ipv6_mreq - SizeofMsghdr = C.sizeof_struct_msghdr - SizeofCmsghdr = C.sizeof_struct_cmsghdr - SizeofInet6Pktinfo = C.sizeof_struct_in6_pktinfo - SizeofIPv6MTUInfo = C.sizeof_struct_ip6_mtuinfo - SizeofICMPv6Filter = C.sizeof_struct_icmp6_filter -) - -// Ptrace requests - -const ( - PTRACE_TRACEME = C.PT_TRACE_ME - PTRACE_CONT = C.PT_CONTINUE - PTRACE_KILL = C.PT_KILL -) - -// Events (kqueue, kevent) - -type Kevent_t C.struct_kevent_freebsd11 - -// Select - -type FdSet C.fd_set - -// Routing and interface messages - -const ( - sizeofIfMsghdr = C.sizeof_struct_if_msghdr - SizeofIfMsghdr = C.sizeof_struct_if_msghdr8 - sizeofIfData = C.sizeof_struct_if_data - SizeofIfData = C.sizeof_struct_if_data8 - SizeofIfaMsghdr = C.sizeof_struct_ifa_msghdr - SizeofIfmaMsghdr = C.sizeof_struct_ifma_msghdr - SizeofIfAnnounceMsghdr = C.sizeof_struct_if_announcemsghdr - SizeofRtMsghdr = C.sizeof_struct_rt_msghdr - SizeofRtMetrics = C.sizeof_struct_rt_metrics -) - -type ifMsghdr C.struct_if_msghdr - -type IfMsghdr C.struct_if_msghdr8 - -type ifData C.struct_if_data - -type IfData C.struct_if_data8 - -type IfaMsghdr C.struct_ifa_msghdr - -type IfmaMsghdr C.struct_ifma_msghdr - -type IfAnnounceMsghdr C.struct_if_announcemsghdr - -type RtMsghdr C.struct_rt_msghdr - -type RtMetrics C.struct_rt_metrics - -// Berkeley packet filter - -const ( - SizeofBpfVersion = C.sizeof_struct_bpf_version - SizeofBpfStat = C.sizeof_struct_bpf_stat - SizeofBpfZbuf = C.sizeof_struct_bpf_zbuf - SizeofBpfProgram = C.sizeof_struct_bpf_program - SizeofBpfInsn = C.sizeof_struct_bpf_insn - SizeofBpfHdr = C.sizeof_struct_bpf_hdr - SizeofBpfZbufHeader = C.sizeof_struct_bpf_zbuf_header -) - -type BpfVersion C.struct_bpf_version - -type BpfStat C.struct_bpf_stat - -type BpfZbuf C.struct_bpf_zbuf - -type BpfProgram C.struct_bpf_program - -type BpfInsn C.struct_bpf_insn - -type BpfHdr C.struct_bpf_hdr - -type BpfZbufHeader C.struct_bpf_zbuf_header - -// Terminal handling - -type Termios C.struct_termios - -type Winsize C.struct_winsize - -// fchmodat-like syscalls. - -const ( - AT_FDCWD = C.AT_FDCWD - AT_REMOVEDIR = C.AT_REMOVEDIR - AT_SYMLINK_FOLLOW = C.AT_SYMLINK_FOLLOW - AT_SYMLINK_NOFOLLOW = C.AT_SYMLINK_NOFOLLOW -) - -// poll - -type PollFd C.struct_pollfd - -const ( - POLLERR = C.POLLERR - POLLHUP = C.POLLHUP - POLLIN = C.POLLIN - POLLINIGNEOF = C.POLLINIGNEOF - POLLNVAL = C.POLLNVAL - POLLOUT = C.POLLOUT - POLLPRI = C.POLLPRI - POLLRDBAND = C.POLLRDBAND - POLLRDNORM = C.POLLRDNORM - POLLWRBAND = C.POLLWRBAND - POLLWRNORM = C.POLLWRNORM -) - -// Capabilities - -type CapRights C.struct_cap_rights - -// Uname - -type Utsname C.struct_utsname diff --git a/vendor/golang.org/x/sys/unix/types_netbsd.go b/vendor/golang.org/x/sys/unix/types_netbsd.go deleted file mode 100644 index 2dd4f9542c..0000000000 --- a/vendor/golang.org/x/sys/unix/types_netbsd.go +++ /dev/null @@ -1,289 +0,0 @@ -// Copyright 2009 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build ignore - -/* -Input to cgo -godefs. See README.md -*/ - -// +godefs map struct_in_addr [4]byte /* in_addr */ -// +godefs map struct_in6_addr [16]byte /* in6_addr */ - -package unix - -/* -#define KERNEL -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include - -enum { - sizeofPtr = sizeof(void*), -}; - -union sockaddr_all { - struct sockaddr s1; // this one gets used for fields - struct sockaddr_in s2; // these pad it out - struct sockaddr_in6 s3; - struct sockaddr_un s4; - struct sockaddr_dl s5; -}; - -struct sockaddr_any { - struct sockaddr addr; - char pad[sizeof(union sockaddr_all) - sizeof(struct sockaddr)]; -}; - -*/ -import "C" - -// Machine characteristics - -const ( - SizeofPtr = C.sizeofPtr - SizeofShort = C.sizeof_short - SizeofInt = C.sizeof_int - SizeofLong = C.sizeof_long - SizeofLongLong = C.sizeof_longlong -) - -// Basic types - -type ( - _C_short C.short - _C_int C.int - _C_long C.long - _C_long_long C.longlong -) - -// Time - -type Timespec C.struct_timespec - -type Timeval C.struct_timeval - -// Processes - -type Rusage C.struct_rusage - -type Rlimit C.struct_rlimit - -type _Gid_t C.gid_t - -// Files - -type Stat_t C.struct_stat - -type Statfs_t C.struct_statfs - -type Flock_t C.struct_flock - -type Dirent C.struct_dirent - -type Fsid C.fsid_t - -// File system limits - -const ( - PathMax = C.PATH_MAX -) - -// Advice to Fadvise - -const ( - FADV_NORMAL = C.POSIX_FADV_NORMAL - FADV_RANDOM = C.POSIX_FADV_RANDOM - FADV_SEQUENTIAL = C.POSIX_FADV_SEQUENTIAL - FADV_WILLNEED = C.POSIX_FADV_WILLNEED - FADV_DONTNEED = C.POSIX_FADV_DONTNEED - FADV_NOREUSE = C.POSIX_FADV_NOREUSE -) - -// Sockets - -type RawSockaddrInet4 C.struct_sockaddr_in - -type RawSockaddrInet6 C.struct_sockaddr_in6 - -type RawSockaddrUnix C.struct_sockaddr_un - -type RawSockaddrDatalink C.struct_sockaddr_dl - -type RawSockaddr C.struct_sockaddr - -type RawSockaddrAny C.struct_sockaddr_any - -type _Socklen C.socklen_t - -type Linger C.struct_linger - -type Iovec C.struct_iovec - -type IPMreq C.struct_ip_mreq - -type IPv6Mreq C.struct_ipv6_mreq - -type Msghdr C.struct_msghdr - -type Cmsghdr C.struct_cmsghdr - -type Inet6Pktinfo C.struct_in6_pktinfo - -type IPv6MTUInfo C.struct_ip6_mtuinfo - -type ICMPv6Filter C.struct_icmp6_filter - -const ( - SizeofSockaddrInet4 = C.sizeof_struct_sockaddr_in - SizeofSockaddrInet6 = C.sizeof_struct_sockaddr_in6 - SizeofSockaddrAny = C.sizeof_struct_sockaddr_any - SizeofSockaddrUnix = C.sizeof_struct_sockaddr_un - SizeofSockaddrDatalink = C.sizeof_struct_sockaddr_dl - SizeofLinger = C.sizeof_struct_linger - SizeofIPMreq = C.sizeof_struct_ip_mreq - SizeofIPv6Mreq = C.sizeof_struct_ipv6_mreq - SizeofMsghdr = C.sizeof_struct_msghdr - SizeofCmsghdr = C.sizeof_struct_cmsghdr - SizeofInet6Pktinfo = C.sizeof_struct_in6_pktinfo - SizeofIPv6MTUInfo = C.sizeof_struct_ip6_mtuinfo - SizeofICMPv6Filter = C.sizeof_struct_icmp6_filter -) - -// Ptrace requests - -const ( - PTRACE_TRACEME = C.PT_TRACE_ME - PTRACE_CONT = C.PT_CONTINUE - PTRACE_KILL = C.PT_KILL -) - -// Events (kqueue, kevent) - -type Kevent_t C.struct_kevent - -// Select - -type FdSet C.fd_set - -// Routing and interface messages - -const ( - SizeofIfMsghdr = C.sizeof_struct_if_msghdr - SizeofIfData = C.sizeof_struct_if_data - SizeofIfaMsghdr = C.sizeof_struct_ifa_msghdr - SizeofIfAnnounceMsghdr = C.sizeof_struct_if_announcemsghdr - SizeofRtMsghdr = C.sizeof_struct_rt_msghdr - SizeofRtMetrics = C.sizeof_struct_rt_metrics -) - -type IfMsghdr C.struct_if_msghdr - -type IfData C.struct_if_data - -type IfaMsghdr C.struct_ifa_msghdr - -type IfAnnounceMsghdr C.struct_if_announcemsghdr - -type RtMsghdr C.struct_rt_msghdr - -type RtMetrics C.struct_rt_metrics - -type Mclpool C.struct_mclpool - -// Berkeley packet filter - -const ( - SizeofBpfVersion = C.sizeof_struct_bpf_version - SizeofBpfStat = C.sizeof_struct_bpf_stat - SizeofBpfProgram = C.sizeof_struct_bpf_program - SizeofBpfInsn = C.sizeof_struct_bpf_insn - SizeofBpfHdr = C.sizeof_struct_bpf_hdr -) - -type BpfVersion C.struct_bpf_version - -type BpfStat C.struct_bpf_stat - -type BpfProgram C.struct_bpf_program - -type BpfInsn C.struct_bpf_insn - -type BpfHdr C.struct_bpf_hdr - -type BpfTimeval C.struct_bpf_timeval - -// Terminal handling - -type Termios C.struct_termios - -type Winsize C.struct_winsize - -type Ptmget C.struct_ptmget - -// fchmodat-like syscalls. - -const ( - AT_FDCWD = C.AT_FDCWD - AT_SYMLINK_NOFOLLOW = C.AT_SYMLINK_NOFOLLOW -) - -// poll - -type PollFd C.struct_pollfd - -const ( - POLLERR = C.POLLERR - POLLHUP = C.POLLHUP - POLLIN = C.POLLIN - POLLNVAL = C.POLLNVAL - POLLOUT = C.POLLOUT - POLLPRI = C.POLLPRI - POLLRDBAND = C.POLLRDBAND - POLLRDNORM = C.POLLRDNORM - POLLWRBAND = C.POLLWRBAND - POLLWRNORM = C.POLLWRNORM -) - -// Sysctl - -type Sysctlnode C.struct_sysctlnode - -// Uname - -type Utsname C.struct_utsname - -// Clockinfo - -const SizeofClockinfo = C.sizeof_struct_clockinfo - -type Clockinfo C.struct_clockinfo diff --git a/vendor/golang.org/x/sys/unix/types_openbsd.go b/vendor/golang.org/x/sys/unix/types_openbsd.go deleted file mode 100644 index 8aafbe4469..0000000000 --- a/vendor/golang.org/x/sys/unix/types_openbsd.go +++ /dev/null @@ -1,282 +0,0 @@ -// Copyright 2009 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build ignore - -/* -Input to cgo -godefs. See README.md -*/ - -// +godefs map struct_in_addr [4]byte /* in_addr */ -// +godefs map struct_in6_addr [16]byte /* in6_addr */ - -package unix - -/* -#define KERNEL -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include - -enum { - sizeofPtr = sizeof(void*), -}; - -union sockaddr_all { - struct sockaddr s1; // this one gets used for fields - struct sockaddr_in s2; // these pad it out - struct sockaddr_in6 s3; - struct sockaddr_un s4; - struct sockaddr_dl s5; -}; - -struct sockaddr_any { - struct sockaddr addr; - char pad[sizeof(union sockaddr_all) - sizeof(struct sockaddr)]; -}; - -*/ -import "C" - -// Machine characteristics - -const ( - SizeofPtr = C.sizeofPtr - SizeofShort = C.sizeof_short - SizeofInt = C.sizeof_int - SizeofLong = C.sizeof_long - SizeofLongLong = C.sizeof_longlong -) - -// Basic types - -type ( - _C_short C.short - _C_int C.int - _C_long C.long - _C_long_long C.longlong -) - -// Time - -type Timespec C.struct_timespec - -type Timeval C.struct_timeval - -// Processes - -type Rusage C.struct_rusage - -type Rlimit C.struct_rlimit - -type _Gid_t C.gid_t - -// Files - -type Stat_t C.struct_stat - -type Statfs_t C.struct_statfs - -type Flock_t C.struct_flock - -type Dirent C.struct_dirent - -type Fsid C.fsid_t - -// File system limits - -const ( - PathMax = C.PATH_MAX -) - -// Sockets - -type RawSockaddrInet4 C.struct_sockaddr_in - -type RawSockaddrInet6 C.struct_sockaddr_in6 - -type RawSockaddrUnix C.struct_sockaddr_un - -type RawSockaddrDatalink C.struct_sockaddr_dl - -type RawSockaddr C.struct_sockaddr - -type RawSockaddrAny C.struct_sockaddr_any - -type _Socklen C.socklen_t - -type Linger C.struct_linger - -type Iovec C.struct_iovec - -type IPMreq C.struct_ip_mreq - -type IPv6Mreq C.struct_ipv6_mreq - -type Msghdr C.struct_msghdr - -type Cmsghdr C.struct_cmsghdr - -type Inet6Pktinfo C.struct_in6_pktinfo - -type IPv6MTUInfo C.struct_ip6_mtuinfo - -type ICMPv6Filter C.struct_icmp6_filter - -const ( - SizeofSockaddrInet4 = C.sizeof_struct_sockaddr_in - SizeofSockaddrInet6 = C.sizeof_struct_sockaddr_in6 - SizeofSockaddrAny = C.sizeof_struct_sockaddr_any - SizeofSockaddrUnix = C.sizeof_struct_sockaddr_un - SizeofSockaddrDatalink = C.sizeof_struct_sockaddr_dl - SizeofLinger = C.sizeof_struct_linger - SizeofIPMreq = C.sizeof_struct_ip_mreq - SizeofIPv6Mreq = C.sizeof_struct_ipv6_mreq - SizeofMsghdr = C.sizeof_struct_msghdr - SizeofCmsghdr = C.sizeof_struct_cmsghdr - SizeofInet6Pktinfo = C.sizeof_struct_in6_pktinfo - SizeofIPv6MTUInfo = C.sizeof_struct_ip6_mtuinfo - SizeofICMPv6Filter = C.sizeof_struct_icmp6_filter -) - -// Ptrace requests - -const ( - PTRACE_TRACEME = C.PT_TRACE_ME - PTRACE_CONT = C.PT_CONTINUE - PTRACE_KILL = C.PT_KILL -) - -// Events (kqueue, kevent) - -type Kevent_t C.struct_kevent - -// Select - -type FdSet C.fd_set - -// Routing and interface messages - -const ( - SizeofIfMsghdr = C.sizeof_struct_if_msghdr - SizeofIfData = C.sizeof_struct_if_data - SizeofIfaMsghdr = C.sizeof_struct_ifa_msghdr - SizeofIfAnnounceMsghdr = C.sizeof_struct_if_announcemsghdr - SizeofRtMsghdr = C.sizeof_struct_rt_msghdr - SizeofRtMetrics = C.sizeof_struct_rt_metrics -) - -type IfMsghdr C.struct_if_msghdr - -type IfData C.struct_if_data - -type IfaMsghdr C.struct_ifa_msghdr - -type IfAnnounceMsghdr C.struct_if_announcemsghdr - -type RtMsghdr C.struct_rt_msghdr - -type RtMetrics C.struct_rt_metrics - -type Mclpool C.struct_mclpool - -// Berkeley packet filter - -const ( - SizeofBpfVersion = C.sizeof_struct_bpf_version - SizeofBpfStat = C.sizeof_struct_bpf_stat - SizeofBpfProgram = C.sizeof_struct_bpf_program - SizeofBpfInsn = C.sizeof_struct_bpf_insn - SizeofBpfHdr = C.sizeof_struct_bpf_hdr -) - -type BpfVersion C.struct_bpf_version - -type BpfStat C.struct_bpf_stat - -type BpfProgram C.struct_bpf_program - -type BpfInsn C.struct_bpf_insn - -type BpfHdr C.struct_bpf_hdr - -type BpfTimeval C.struct_bpf_timeval - -// Terminal handling - -type Termios C.struct_termios - -type Winsize C.struct_winsize - -// fchmodat-like syscalls. - -const ( - AT_FDCWD = C.AT_FDCWD - AT_SYMLINK_NOFOLLOW = C.AT_SYMLINK_NOFOLLOW -) - -// poll - -type PollFd C.struct_pollfd - -const ( - POLLERR = C.POLLERR - POLLHUP = C.POLLHUP - POLLIN = C.POLLIN - POLLNVAL = C.POLLNVAL - POLLOUT = C.POLLOUT - POLLPRI = C.POLLPRI - POLLRDBAND = C.POLLRDBAND - POLLRDNORM = C.POLLRDNORM - POLLWRBAND = C.POLLWRBAND - POLLWRNORM = C.POLLWRNORM -) - -// Signal Sets - -type Sigset_t C.sigset_t - -// Uname - -type Utsname C.struct_utsname - -// Uvmexp - -const SizeofUvmexp = C.sizeof_struct_uvmexp - -type Uvmexp C.struct_uvmexp - -// Clockinfo - -const SizeofClockinfo = C.sizeof_struct_clockinfo - -type Clockinfo C.struct_clockinfo diff --git a/vendor/golang.org/x/sys/unix/types_solaris.go b/vendor/golang.org/x/sys/unix/types_solaris.go deleted file mode 100644 index 2b716f9348..0000000000 --- a/vendor/golang.org/x/sys/unix/types_solaris.go +++ /dev/null @@ -1,266 +0,0 @@ -// Copyright 2009 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build ignore - -/* -Input to cgo -godefs. See README.md -*/ - -// +godefs map struct_in_addr [4]byte /* in_addr */ -// +godefs map struct_in6_addr [16]byte /* in6_addr */ - -package unix - -/* -#define KERNEL -// These defines ensure that builds done on newer versions of Solaris are -// backwards-compatible with older versions of Solaris and -// OpenSolaris-based derivatives. -#define __USE_SUNOS_SOCKETS__ // msghdr -#define __USE_LEGACY_PROTOTYPES__ // iovec -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include - -enum { - sizeofPtr = sizeof(void*), -}; - -union sockaddr_all { - struct sockaddr s1; // this one gets used for fields - struct sockaddr_in s2; // these pad it out - struct sockaddr_in6 s3; - struct sockaddr_un s4; - struct sockaddr_dl s5; -}; - -struct sockaddr_any { - struct sockaddr addr; - char pad[sizeof(union sockaddr_all) - sizeof(struct sockaddr)]; -}; - -*/ -import "C" - -// Machine characteristics - -const ( - SizeofPtr = C.sizeofPtr - SizeofShort = C.sizeof_short - SizeofInt = C.sizeof_int - SizeofLong = C.sizeof_long - SizeofLongLong = C.sizeof_longlong - PathMax = C.PATH_MAX - MaxHostNameLen = C.MAXHOSTNAMELEN -) - -// Basic types - -type ( - _C_short C.short - _C_int C.int - _C_long C.long - _C_long_long C.longlong -) - -// Time - -type Timespec C.struct_timespec - -type Timeval C.struct_timeval - -type Timeval32 C.struct_timeval32 - -type Tms C.struct_tms - -type Utimbuf C.struct_utimbuf - -// Processes - -type Rusage C.struct_rusage - -type Rlimit C.struct_rlimit - -type _Gid_t C.gid_t - -// Files - -type Stat_t C.struct_stat - -type Flock_t C.struct_flock - -type Dirent C.struct_dirent - -// Filesystems - -type _Fsblkcnt_t C.fsblkcnt_t - -type Statvfs_t C.struct_statvfs - -// Sockets - -type RawSockaddrInet4 C.struct_sockaddr_in - -type RawSockaddrInet6 C.struct_sockaddr_in6 - -type RawSockaddrUnix C.struct_sockaddr_un - -type RawSockaddrDatalink C.struct_sockaddr_dl - -type RawSockaddr C.struct_sockaddr - -type RawSockaddrAny C.struct_sockaddr_any - -type _Socklen C.socklen_t - -type Linger C.struct_linger - -type Iovec C.struct_iovec - -type IPMreq C.struct_ip_mreq - -type IPv6Mreq C.struct_ipv6_mreq - -type Msghdr C.struct_msghdr - -type Cmsghdr C.struct_cmsghdr - -type Inet6Pktinfo C.struct_in6_pktinfo - -type IPv6MTUInfo C.struct_ip6_mtuinfo - -type ICMPv6Filter C.struct_icmp6_filter - -const ( - SizeofSockaddrInet4 = C.sizeof_struct_sockaddr_in - SizeofSockaddrInet6 = C.sizeof_struct_sockaddr_in6 - SizeofSockaddrAny = C.sizeof_struct_sockaddr_any - SizeofSockaddrUnix = C.sizeof_struct_sockaddr_un - SizeofSockaddrDatalink = C.sizeof_struct_sockaddr_dl - SizeofLinger = C.sizeof_struct_linger - SizeofIPMreq = C.sizeof_struct_ip_mreq - SizeofIPv6Mreq = C.sizeof_struct_ipv6_mreq - SizeofMsghdr = C.sizeof_struct_msghdr - SizeofCmsghdr = C.sizeof_struct_cmsghdr - SizeofInet6Pktinfo = C.sizeof_struct_in6_pktinfo - SizeofIPv6MTUInfo = C.sizeof_struct_ip6_mtuinfo - SizeofICMPv6Filter = C.sizeof_struct_icmp6_filter -) - -// Select - -type FdSet C.fd_set - -// Misc - -type Utsname C.struct_utsname - -type Ustat_t C.struct_ustat - -const ( - AT_FDCWD = C.AT_FDCWD - AT_SYMLINK_NOFOLLOW = C.AT_SYMLINK_NOFOLLOW - AT_SYMLINK_FOLLOW = C.AT_SYMLINK_FOLLOW - AT_REMOVEDIR = C.AT_REMOVEDIR - AT_EACCESS = C.AT_EACCESS -) - -// Routing and interface messages - -const ( - SizeofIfMsghdr = C.sizeof_struct_if_msghdr - SizeofIfData = C.sizeof_struct_if_data - SizeofIfaMsghdr = C.sizeof_struct_ifa_msghdr - SizeofRtMsghdr = C.sizeof_struct_rt_msghdr - SizeofRtMetrics = C.sizeof_struct_rt_metrics -) - -type IfMsghdr C.struct_if_msghdr - -type IfData C.struct_if_data - -type IfaMsghdr C.struct_ifa_msghdr - -type RtMsghdr C.struct_rt_msghdr - -type RtMetrics C.struct_rt_metrics - -// Berkeley packet filter - -const ( - SizeofBpfVersion = C.sizeof_struct_bpf_version - SizeofBpfStat = C.sizeof_struct_bpf_stat - SizeofBpfProgram = C.sizeof_struct_bpf_program - SizeofBpfInsn = C.sizeof_struct_bpf_insn - SizeofBpfHdr = C.sizeof_struct_bpf_hdr -) - -type BpfVersion C.struct_bpf_version - -type BpfStat C.struct_bpf_stat - -type BpfProgram C.struct_bpf_program - -type BpfInsn C.struct_bpf_insn - -type BpfTimeval C.struct_bpf_timeval - -type BpfHdr C.struct_bpf_hdr - -// Terminal handling - -type Termios C.struct_termios - -type Termio C.struct_termio - -type Winsize C.struct_winsize - -// poll - -type PollFd C.struct_pollfd - -const ( - POLLERR = C.POLLERR - POLLHUP = C.POLLHUP - POLLIN = C.POLLIN - POLLNVAL = C.POLLNVAL - POLLOUT = C.POLLOUT - POLLPRI = C.POLLPRI - POLLRDBAND = C.POLLRDBAND - POLLRDNORM = C.POLLRDNORM - POLLWRBAND = C.POLLWRBAND - POLLWRNORM = C.POLLWRNORM -) diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_386.go b/vendor/golang.org/x/sys/unix/zerrors_linux_386.go index 881e69f128..d180147f79 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_386.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_386.go @@ -197,59 +197,10 @@ const ( BPF_ABS = 0x20 BPF_ADD = 0x0 BPF_ALU = 0x4 - BPF_ALU64 = 0x7 BPF_AND = 0x50 - BPF_ANY = 0x0 - BPF_ARSH = 0xc0 BPF_B = 0x10 - BPF_BUILD_ID_SIZE = 0x14 - BPF_CALL = 0x80 - BPF_DEVCG_ACC_MKNOD = 0x1 - BPF_DEVCG_ACC_READ = 0x2 - BPF_DEVCG_ACC_WRITE = 0x4 - BPF_DEVCG_DEV_BLOCK = 0x1 - BPF_DEVCG_DEV_CHAR = 0x2 BPF_DIV = 0x30 - BPF_DW = 0x18 - BPF_END = 0xd0 - BPF_EXIST = 0x2 - BPF_EXIT = 0x90 - BPF_FROM_BE = 0x8 - BPF_FROM_LE = 0x0 BPF_FS_MAGIC = 0xcafe4a11 - BPF_F_ALLOW_MULTI = 0x2 - BPF_F_ALLOW_OVERRIDE = 0x1 - BPF_F_ANY_ALIGNMENT = 0x2 - BPF_F_CTXLEN_MASK = 0xfffff00000000 - BPF_F_CURRENT_CPU = 0xffffffff - BPF_F_CURRENT_NETNS = -0x1 - BPF_F_DONT_FRAGMENT = 0x4 - BPF_F_FAST_STACK_CMP = 0x200 - BPF_F_HDR_FIELD_MASK = 0xf - BPF_F_INDEX_MASK = 0xffffffff - BPF_F_INGRESS = 0x1 - BPF_F_INVALIDATE_HASH = 0x2 - BPF_F_LOCK = 0x4 - BPF_F_MARK_ENFORCE = 0x40 - BPF_F_MARK_MANGLED_0 = 0x20 - BPF_F_NO_COMMON_LRU = 0x2 - BPF_F_NO_PREALLOC = 0x1 - BPF_F_NUMA_NODE = 0x4 - BPF_F_PSEUDO_HDR = 0x10 - BPF_F_QUERY_EFFECTIVE = 0x1 - BPF_F_RDONLY = 0x8 - BPF_F_RECOMPUTE_CSUM = 0x1 - BPF_F_REUSE_STACKID = 0x400 - BPF_F_SEQ_NUMBER = 0x8 - BPF_F_SKIP_FIELD_MASK = 0xff - BPF_F_STACK_BUILD_ID = 0x20 - BPF_F_STRICT_ALIGNMENT = 0x1 - BPF_F_TUNINFO_IPV6 = 0x1 - BPF_F_USER_BUILD_ID = 0x800 - BPF_F_USER_STACK = 0x100 - BPF_F_WRONLY = 0x10 - BPF_F_ZERO_CSUM_TX = 0x2 - BPF_F_ZERO_SEED = 0x40 BPF_H = 0x8 BPF_IMM = 0x0 BPF_IND = 0x40 @@ -257,16 +208,8 @@ const ( BPF_JEQ = 0x10 BPF_JGE = 0x30 BPF_JGT = 0x20 - BPF_JLE = 0xb0 - BPF_JLT = 0xa0 BPF_JMP = 0x5 - BPF_JMP32 = 0x6 - BPF_JNE = 0x50 BPF_JSET = 0x40 - BPF_JSGE = 0x70 - BPF_JSGT = 0x60 - BPF_JSLE = 0xd0 - BPF_JSLT = 0xc0 BPF_K = 0x0 BPF_LD = 0x0 BPF_LDX = 0x1 @@ -280,33 +223,20 @@ const ( BPF_MINOR_VERSION = 0x1 BPF_MISC = 0x7 BPF_MOD = 0x90 - BPF_MOV = 0xb0 BPF_MSH = 0xa0 BPF_MUL = 0x20 BPF_NEG = 0x80 BPF_NET_OFF = -0x100000 - BPF_NOEXIST = 0x1 - BPF_OBJ_NAME_LEN = 0x10 BPF_OR = 0x40 - BPF_PSEUDO_CALL = 0x1 - BPF_PSEUDO_MAP_FD = 0x1 BPF_RET = 0x6 BPF_RSH = 0x70 - BPF_SOCK_OPS_ALL_CB_FLAGS = 0x7 - BPF_SOCK_OPS_RETRANS_CB_FLAG = 0x2 - BPF_SOCK_OPS_RTO_CB_FLAG = 0x1 - BPF_SOCK_OPS_STATE_CB_FLAG = 0x4 BPF_ST = 0x2 BPF_STX = 0x3 BPF_SUB = 0x10 - BPF_TAG_SIZE = 0x8 BPF_TAX = 0x0 - BPF_TO_BE = 0x8 - BPF_TO_LE = 0x0 BPF_TXA = 0x80 BPF_W = 0x0 BPF_X = 0x8 - BPF_XADD = 0xc0 BPF_XOR = 0xa0 BRKINT = 0x2 BS0 = 0x0 @@ -571,7 +501,6 @@ const ( FAN_ALL_MARK_FLAGS = 0xff FAN_ALL_OUTGOING_EVENTS = 0x3403b FAN_ALL_PERM_EVENTS = 0x30000 - FAN_ATTRIB = 0x4 FAN_AUDIT = 0x10 FAN_CLASS_CONTENT = 0x4 FAN_CLASS_NOTIF = 0x0 @@ -580,12 +509,8 @@ const ( FAN_CLOSE = 0x18 FAN_CLOSE_NOWRITE = 0x10 FAN_CLOSE_WRITE = 0x8 - FAN_CREATE = 0x100 - FAN_DELETE = 0x200 - FAN_DELETE_SELF = 0x400 FAN_DENY = 0x2 FAN_ENABLE_AUDIT = 0x40 - FAN_EVENT_INFO_TYPE_FID = 0x1 FAN_EVENT_METADATA_LEN = 0x18 FAN_EVENT_ON_CHILD = 0x8000000 FAN_MARK_ADD = 0x1 @@ -599,10 +524,6 @@ const ( FAN_MARK_ONLYDIR = 0x8 FAN_MARK_REMOVE = 0x2 FAN_MODIFY = 0x2 - FAN_MOVE = 0xc0 - FAN_MOVED_FROM = 0x40 - FAN_MOVED_TO = 0x80 - FAN_MOVE_SELF = 0x800 FAN_NOFD = -0x1 FAN_NONBLOCK = 0x2 FAN_ONDIR = 0x40000000 @@ -611,7 +532,6 @@ const ( FAN_OPEN_EXEC_PERM = 0x40000 FAN_OPEN_PERM = 0x10000 FAN_Q_OVERFLOW = 0x4000 - FAN_REPORT_FID = 0x200 FAN_REPORT_TID = 0x100 FAN_UNLIMITED_MARKS = 0x20 FAN_UNLIMITED_QUEUE = 0x10 @@ -1136,15 +1056,6 @@ const ( MAP_STACK = 0x20000 MAP_SYNC = 0x80000 MAP_TYPE = 0xf - MCAST_BLOCK_SOURCE = 0x2b - MCAST_EXCLUDE = 0x0 - MCAST_INCLUDE = 0x1 - MCAST_JOIN_GROUP = 0x2a - MCAST_JOIN_SOURCE_GROUP = 0x2e - MCAST_LEAVE_GROUP = 0x2d - MCAST_LEAVE_SOURCE_GROUP = 0x2f - MCAST_MSFILTER = 0x30 - MCAST_UNBLOCK_SOURCE = 0x2c MCL_CURRENT = 0x1 MCL_FUTURE = 0x2 MCL_ONFAULT = 0x4 @@ -1580,7 +1491,6 @@ const ( PR_SET_TSC = 0x1a PR_SET_UNALIGN = 0x6 PR_SPEC_DISABLE = 0x4 - PR_SPEC_DISABLE_NOEXEC = 0x10 PR_SPEC_ENABLE = 0x2 PR_SPEC_FORCE_DISABLE = 0x8 PR_SPEC_INDIRECT_BRANCH = 0x1 @@ -2051,7 +1961,6 @@ const ( SO_ATTACH_REUSEPORT_CBPF = 0x33 SO_ATTACH_REUSEPORT_EBPF = 0x34 SO_BINDTODEVICE = 0x19 - SO_BINDTOIFINDEX = 0x3e SO_BPF_EXTENSIONS = 0x30 SO_BROADCAST = 0x6 SO_BSDCOMPAT = 0xe @@ -2100,8 +2009,6 @@ const ( SO_RCVBUFFORCE = 0x21 SO_RCVLOWAT = 0x12 SO_RCVTIMEO = 0x14 - SO_RCVTIMEO_NEW = 0x42 - SO_RCVTIMEO_OLD = 0x14 SO_REUSEADDR = 0x2 SO_REUSEPORT = 0xf SO_RXQ_OVFL = 0x28 @@ -2113,17 +2020,9 @@ const ( SO_SNDBUFFORCE = 0x20 SO_SNDLOWAT = 0x13 SO_SNDTIMEO = 0x15 - SO_SNDTIMEO_NEW = 0x43 - SO_SNDTIMEO_OLD = 0x15 SO_TIMESTAMP = 0x1d SO_TIMESTAMPING = 0x25 - SO_TIMESTAMPING_NEW = 0x41 - SO_TIMESTAMPING_OLD = 0x25 SO_TIMESTAMPNS = 0x23 - SO_TIMESTAMPNS_NEW = 0x40 - SO_TIMESTAMPNS_OLD = 0x23 - SO_TIMESTAMP_NEW = 0x3f - SO_TIMESTAMP_OLD = 0x1d SO_TXTIME = 0x3d SO_TYPE = 0x3 SO_VM_SOCKETS_BUFFER_MAX_SIZE = 0x2 @@ -2216,8 +2115,6 @@ const ( TCOFLUSH = 0x1 TCOOFF = 0x0 TCOON = 0x1 - TCP_BPF_IW = 0x3e9 - TCP_BPF_SNDCWND_CLAMP = 0x3ea TCP_CC_INFO = 0x1a TCP_CM_INQ = 0x24 TCP_CONGESTION = 0xd @@ -2419,10 +2316,8 @@ const ( UBI_IOCMKVOL = 0x40986f00 UBI_IOCRMVOL = 0x40046f01 UBI_IOCRNVOL = 0x51106f03 - UBI_IOCRPEB = 0x40046f04 UBI_IOCRSVOL = 0x400c6f02 UBI_IOCSETVOLPROP = 0x40104f06 - UBI_IOCSPEB = 0x40046f05 UBI_IOCVOLCRBLK = 0x40804f07 UBI_IOCVOLRMBLK = 0x4f08 UBI_IOCVOLUP = 0x40084f00 @@ -2571,7 +2466,6 @@ const ( XDP_FLAGS_SKB_MODE = 0x2 XDP_FLAGS_UPDATE_IF_NOEXIST = 0x1 XDP_MMAP_OFFSETS = 0x1 - XDP_PACKET_HEADROOM = 0x100 XDP_PGOFF_RX_RING = 0x0 XDP_PGOFF_TX_RING = 0x80000000 XDP_RX_RING = 0x2 diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_amd64.go b/vendor/golang.org/x/sys/unix/zerrors_linux_amd64.go index 039b007d7b..1d277a412d 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_amd64.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_amd64.go @@ -197,59 +197,10 @@ const ( BPF_ABS = 0x20 BPF_ADD = 0x0 BPF_ALU = 0x4 - BPF_ALU64 = 0x7 BPF_AND = 0x50 - BPF_ANY = 0x0 - BPF_ARSH = 0xc0 BPF_B = 0x10 - BPF_BUILD_ID_SIZE = 0x14 - BPF_CALL = 0x80 - BPF_DEVCG_ACC_MKNOD = 0x1 - BPF_DEVCG_ACC_READ = 0x2 - BPF_DEVCG_ACC_WRITE = 0x4 - BPF_DEVCG_DEV_BLOCK = 0x1 - BPF_DEVCG_DEV_CHAR = 0x2 BPF_DIV = 0x30 - BPF_DW = 0x18 - BPF_END = 0xd0 - BPF_EXIST = 0x2 - BPF_EXIT = 0x90 - BPF_FROM_BE = 0x8 - BPF_FROM_LE = 0x0 BPF_FS_MAGIC = 0xcafe4a11 - BPF_F_ALLOW_MULTI = 0x2 - BPF_F_ALLOW_OVERRIDE = 0x1 - BPF_F_ANY_ALIGNMENT = 0x2 - BPF_F_CTXLEN_MASK = 0xfffff00000000 - BPF_F_CURRENT_CPU = 0xffffffff - BPF_F_CURRENT_NETNS = -0x1 - BPF_F_DONT_FRAGMENT = 0x4 - BPF_F_FAST_STACK_CMP = 0x200 - BPF_F_HDR_FIELD_MASK = 0xf - BPF_F_INDEX_MASK = 0xffffffff - BPF_F_INGRESS = 0x1 - BPF_F_INVALIDATE_HASH = 0x2 - BPF_F_LOCK = 0x4 - BPF_F_MARK_ENFORCE = 0x40 - BPF_F_MARK_MANGLED_0 = 0x20 - BPF_F_NO_COMMON_LRU = 0x2 - BPF_F_NO_PREALLOC = 0x1 - BPF_F_NUMA_NODE = 0x4 - BPF_F_PSEUDO_HDR = 0x10 - BPF_F_QUERY_EFFECTIVE = 0x1 - BPF_F_RDONLY = 0x8 - BPF_F_RECOMPUTE_CSUM = 0x1 - BPF_F_REUSE_STACKID = 0x400 - BPF_F_SEQ_NUMBER = 0x8 - BPF_F_SKIP_FIELD_MASK = 0xff - BPF_F_STACK_BUILD_ID = 0x20 - BPF_F_STRICT_ALIGNMENT = 0x1 - BPF_F_TUNINFO_IPV6 = 0x1 - BPF_F_USER_BUILD_ID = 0x800 - BPF_F_USER_STACK = 0x100 - BPF_F_WRONLY = 0x10 - BPF_F_ZERO_CSUM_TX = 0x2 - BPF_F_ZERO_SEED = 0x40 BPF_H = 0x8 BPF_IMM = 0x0 BPF_IND = 0x40 @@ -257,16 +208,8 @@ const ( BPF_JEQ = 0x10 BPF_JGE = 0x30 BPF_JGT = 0x20 - BPF_JLE = 0xb0 - BPF_JLT = 0xa0 BPF_JMP = 0x5 - BPF_JMP32 = 0x6 - BPF_JNE = 0x50 BPF_JSET = 0x40 - BPF_JSGE = 0x70 - BPF_JSGT = 0x60 - BPF_JSLE = 0xd0 - BPF_JSLT = 0xc0 BPF_K = 0x0 BPF_LD = 0x0 BPF_LDX = 0x1 @@ -280,33 +223,20 @@ const ( BPF_MINOR_VERSION = 0x1 BPF_MISC = 0x7 BPF_MOD = 0x90 - BPF_MOV = 0xb0 BPF_MSH = 0xa0 BPF_MUL = 0x20 BPF_NEG = 0x80 BPF_NET_OFF = -0x100000 - BPF_NOEXIST = 0x1 - BPF_OBJ_NAME_LEN = 0x10 BPF_OR = 0x40 - BPF_PSEUDO_CALL = 0x1 - BPF_PSEUDO_MAP_FD = 0x1 BPF_RET = 0x6 BPF_RSH = 0x70 - BPF_SOCK_OPS_ALL_CB_FLAGS = 0x7 - BPF_SOCK_OPS_RETRANS_CB_FLAG = 0x2 - BPF_SOCK_OPS_RTO_CB_FLAG = 0x1 - BPF_SOCK_OPS_STATE_CB_FLAG = 0x4 BPF_ST = 0x2 BPF_STX = 0x3 BPF_SUB = 0x10 - BPF_TAG_SIZE = 0x8 BPF_TAX = 0x0 - BPF_TO_BE = 0x8 - BPF_TO_LE = 0x0 BPF_TXA = 0x80 BPF_W = 0x0 BPF_X = 0x8 - BPF_XADD = 0xc0 BPF_XOR = 0xa0 BRKINT = 0x2 BS0 = 0x0 @@ -571,7 +501,6 @@ const ( FAN_ALL_MARK_FLAGS = 0xff FAN_ALL_OUTGOING_EVENTS = 0x3403b FAN_ALL_PERM_EVENTS = 0x30000 - FAN_ATTRIB = 0x4 FAN_AUDIT = 0x10 FAN_CLASS_CONTENT = 0x4 FAN_CLASS_NOTIF = 0x0 @@ -580,12 +509,8 @@ const ( FAN_CLOSE = 0x18 FAN_CLOSE_NOWRITE = 0x10 FAN_CLOSE_WRITE = 0x8 - FAN_CREATE = 0x100 - FAN_DELETE = 0x200 - FAN_DELETE_SELF = 0x400 FAN_DENY = 0x2 FAN_ENABLE_AUDIT = 0x40 - FAN_EVENT_INFO_TYPE_FID = 0x1 FAN_EVENT_METADATA_LEN = 0x18 FAN_EVENT_ON_CHILD = 0x8000000 FAN_MARK_ADD = 0x1 @@ -599,10 +524,6 @@ const ( FAN_MARK_ONLYDIR = 0x8 FAN_MARK_REMOVE = 0x2 FAN_MODIFY = 0x2 - FAN_MOVE = 0xc0 - FAN_MOVED_FROM = 0x40 - FAN_MOVED_TO = 0x80 - FAN_MOVE_SELF = 0x800 FAN_NOFD = -0x1 FAN_NONBLOCK = 0x2 FAN_ONDIR = 0x40000000 @@ -611,7 +532,6 @@ const ( FAN_OPEN_EXEC_PERM = 0x40000 FAN_OPEN_PERM = 0x10000 FAN_Q_OVERFLOW = 0x4000 - FAN_REPORT_FID = 0x200 FAN_REPORT_TID = 0x100 FAN_UNLIMITED_MARKS = 0x20 FAN_UNLIMITED_QUEUE = 0x10 @@ -1136,15 +1056,6 @@ const ( MAP_STACK = 0x20000 MAP_SYNC = 0x80000 MAP_TYPE = 0xf - MCAST_BLOCK_SOURCE = 0x2b - MCAST_EXCLUDE = 0x0 - MCAST_INCLUDE = 0x1 - MCAST_JOIN_GROUP = 0x2a - MCAST_JOIN_SOURCE_GROUP = 0x2e - MCAST_LEAVE_GROUP = 0x2d - MCAST_LEAVE_SOURCE_GROUP = 0x2f - MCAST_MSFILTER = 0x30 - MCAST_UNBLOCK_SOURCE = 0x2c MCL_CURRENT = 0x1 MCL_FUTURE = 0x2 MCL_ONFAULT = 0x4 @@ -1580,7 +1491,6 @@ const ( PR_SET_TSC = 0x1a PR_SET_UNALIGN = 0x6 PR_SPEC_DISABLE = 0x4 - PR_SPEC_DISABLE_NOEXEC = 0x10 PR_SPEC_ENABLE = 0x2 PR_SPEC_FORCE_DISABLE = 0x8 PR_SPEC_INDIRECT_BRANCH = 0x1 @@ -2052,7 +1962,6 @@ const ( SO_ATTACH_REUSEPORT_CBPF = 0x33 SO_ATTACH_REUSEPORT_EBPF = 0x34 SO_BINDTODEVICE = 0x19 - SO_BINDTOIFINDEX = 0x3e SO_BPF_EXTENSIONS = 0x30 SO_BROADCAST = 0x6 SO_BSDCOMPAT = 0xe @@ -2101,8 +2010,6 @@ const ( SO_RCVBUFFORCE = 0x21 SO_RCVLOWAT = 0x12 SO_RCVTIMEO = 0x14 - SO_RCVTIMEO_NEW = 0x42 - SO_RCVTIMEO_OLD = 0x14 SO_REUSEADDR = 0x2 SO_REUSEPORT = 0xf SO_RXQ_OVFL = 0x28 @@ -2114,17 +2021,9 @@ const ( SO_SNDBUFFORCE = 0x20 SO_SNDLOWAT = 0x13 SO_SNDTIMEO = 0x15 - SO_SNDTIMEO_NEW = 0x43 - SO_SNDTIMEO_OLD = 0x15 SO_TIMESTAMP = 0x1d SO_TIMESTAMPING = 0x25 - SO_TIMESTAMPING_NEW = 0x41 - SO_TIMESTAMPING_OLD = 0x25 SO_TIMESTAMPNS = 0x23 - SO_TIMESTAMPNS_NEW = 0x40 - SO_TIMESTAMPNS_OLD = 0x23 - SO_TIMESTAMP_NEW = 0x3f - SO_TIMESTAMP_OLD = 0x1d SO_TXTIME = 0x3d SO_TYPE = 0x3 SO_VM_SOCKETS_BUFFER_MAX_SIZE = 0x2 @@ -2217,8 +2116,6 @@ const ( TCOFLUSH = 0x1 TCOOFF = 0x0 TCOON = 0x1 - TCP_BPF_IW = 0x3e9 - TCP_BPF_SNDCWND_CLAMP = 0x3ea TCP_CC_INFO = 0x1a TCP_CM_INQ = 0x24 TCP_CONGESTION = 0xd @@ -2420,10 +2317,8 @@ const ( UBI_IOCMKVOL = 0x40986f00 UBI_IOCRMVOL = 0x40046f01 UBI_IOCRNVOL = 0x51106f03 - UBI_IOCRPEB = 0x40046f04 UBI_IOCRSVOL = 0x400c6f02 UBI_IOCSETVOLPROP = 0x40104f06 - UBI_IOCSPEB = 0x40046f05 UBI_IOCVOLCRBLK = 0x40804f07 UBI_IOCVOLRMBLK = 0x4f08 UBI_IOCVOLUP = 0x40084f00 @@ -2571,7 +2466,6 @@ const ( XDP_FLAGS_SKB_MODE = 0x2 XDP_FLAGS_UPDATE_IF_NOEXIST = 0x1 XDP_MMAP_OFFSETS = 0x1 - XDP_PACKET_HEADROOM = 0x100 XDP_PGOFF_RX_RING = 0x0 XDP_PGOFF_TX_RING = 0x80000000 XDP_RX_RING = 0x2 diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_arm.go b/vendor/golang.org/x/sys/unix/zerrors_linux_arm.go index 97ed569a2a..63ac45af81 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_arm.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_arm.go @@ -197,59 +197,10 @@ const ( BPF_ABS = 0x20 BPF_ADD = 0x0 BPF_ALU = 0x4 - BPF_ALU64 = 0x7 BPF_AND = 0x50 - BPF_ANY = 0x0 - BPF_ARSH = 0xc0 BPF_B = 0x10 - BPF_BUILD_ID_SIZE = 0x14 - BPF_CALL = 0x80 - BPF_DEVCG_ACC_MKNOD = 0x1 - BPF_DEVCG_ACC_READ = 0x2 - BPF_DEVCG_ACC_WRITE = 0x4 - BPF_DEVCG_DEV_BLOCK = 0x1 - BPF_DEVCG_DEV_CHAR = 0x2 BPF_DIV = 0x30 - BPF_DW = 0x18 - BPF_END = 0xd0 - BPF_EXIST = 0x2 - BPF_EXIT = 0x90 - BPF_FROM_BE = 0x8 - BPF_FROM_LE = 0x0 BPF_FS_MAGIC = 0xcafe4a11 - BPF_F_ALLOW_MULTI = 0x2 - BPF_F_ALLOW_OVERRIDE = 0x1 - BPF_F_ANY_ALIGNMENT = 0x2 - BPF_F_CTXLEN_MASK = 0xfffff00000000 - BPF_F_CURRENT_CPU = 0xffffffff - BPF_F_CURRENT_NETNS = -0x1 - BPF_F_DONT_FRAGMENT = 0x4 - BPF_F_FAST_STACK_CMP = 0x200 - BPF_F_HDR_FIELD_MASK = 0xf - BPF_F_INDEX_MASK = 0xffffffff - BPF_F_INGRESS = 0x1 - BPF_F_INVALIDATE_HASH = 0x2 - BPF_F_LOCK = 0x4 - BPF_F_MARK_ENFORCE = 0x40 - BPF_F_MARK_MANGLED_0 = 0x20 - BPF_F_NO_COMMON_LRU = 0x2 - BPF_F_NO_PREALLOC = 0x1 - BPF_F_NUMA_NODE = 0x4 - BPF_F_PSEUDO_HDR = 0x10 - BPF_F_QUERY_EFFECTIVE = 0x1 - BPF_F_RDONLY = 0x8 - BPF_F_RECOMPUTE_CSUM = 0x1 - BPF_F_REUSE_STACKID = 0x400 - BPF_F_SEQ_NUMBER = 0x8 - BPF_F_SKIP_FIELD_MASK = 0xff - BPF_F_STACK_BUILD_ID = 0x20 - BPF_F_STRICT_ALIGNMENT = 0x1 - BPF_F_TUNINFO_IPV6 = 0x1 - BPF_F_USER_BUILD_ID = 0x800 - BPF_F_USER_STACK = 0x100 - BPF_F_WRONLY = 0x10 - BPF_F_ZERO_CSUM_TX = 0x2 - BPF_F_ZERO_SEED = 0x40 BPF_H = 0x8 BPF_IMM = 0x0 BPF_IND = 0x40 @@ -257,16 +208,8 @@ const ( BPF_JEQ = 0x10 BPF_JGE = 0x30 BPF_JGT = 0x20 - BPF_JLE = 0xb0 - BPF_JLT = 0xa0 BPF_JMP = 0x5 - BPF_JMP32 = 0x6 - BPF_JNE = 0x50 BPF_JSET = 0x40 - BPF_JSGE = 0x70 - BPF_JSGT = 0x60 - BPF_JSLE = 0xd0 - BPF_JSLT = 0xc0 BPF_K = 0x0 BPF_LD = 0x0 BPF_LDX = 0x1 @@ -280,33 +223,20 @@ const ( BPF_MINOR_VERSION = 0x1 BPF_MISC = 0x7 BPF_MOD = 0x90 - BPF_MOV = 0xb0 BPF_MSH = 0xa0 BPF_MUL = 0x20 BPF_NEG = 0x80 BPF_NET_OFF = -0x100000 - BPF_NOEXIST = 0x1 - BPF_OBJ_NAME_LEN = 0x10 BPF_OR = 0x40 - BPF_PSEUDO_CALL = 0x1 - BPF_PSEUDO_MAP_FD = 0x1 BPF_RET = 0x6 BPF_RSH = 0x70 - BPF_SOCK_OPS_ALL_CB_FLAGS = 0x7 - BPF_SOCK_OPS_RETRANS_CB_FLAG = 0x2 - BPF_SOCK_OPS_RTO_CB_FLAG = 0x1 - BPF_SOCK_OPS_STATE_CB_FLAG = 0x4 BPF_ST = 0x2 BPF_STX = 0x3 BPF_SUB = 0x10 - BPF_TAG_SIZE = 0x8 BPF_TAX = 0x0 - BPF_TO_BE = 0x8 - BPF_TO_LE = 0x0 BPF_TXA = 0x80 BPF_W = 0x0 BPF_X = 0x8 - BPF_XADD = 0xc0 BPF_XOR = 0xa0 BRKINT = 0x2 BS0 = 0x0 @@ -571,7 +501,6 @@ const ( FAN_ALL_MARK_FLAGS = 0xff FAN_ALL_OUTGOING_EVENTS = 0x3403b FAN_ALL_PERM_EVENTS = 0x30000 - FAN_ATTRIB = 0x4 FAN_AUDIT = 0x10 FAN_CLASS_CONTENT = 0x4 FAN_CLASS_NOTIF = 0x0 @@ -580,12 +509,8 @@ const ( FAN_CLOSE = 0x18 FAN_CLOSE_NOWRITE = 0x10 FAN_CLOSE_WRITE = 0x8 - FAN_CREATE = 0x100 - FAN_DELETE = 0x200 - FAN_DELETE_SELF = 0x400 FAN_DENY = 0x2 FAN_ENABLE_AUDIT = 0x40 - FAN_EVENT_INFO_TYPE_FID = 0x1 FAN_EVENT_METADATA_LEN = 0x18 FAN_EVENT_ON_CHILD = 0x8000000 FAN_MARK_ADD = 0x1 @@ -599,10 +524,6 @@ const ( FAN_MARK_ONLYDIR = 0x8 FAN_MARK_REMOVE = 0x2 FAN_MODIFY = 0x2 - FAN_MOVE = 0xc0 - FAN_MOVED_FROM = 0x40 - FAN_MOVED_TO = 0x80 - FAN_MOVE_SELF = 0x800 FAN_NOFD = -0x1 FAN_NONBLOCK = 0x2 FAN_ONDIR = 0x40000000 @@ -611,7 +532,6 @@ const ( FAN_OPEN_EXEC_PERM = 0x40000 FAN_OPEN_PERM = 0x10000 FAN_Q_OVERFLOW = 0x4000 - FAN_REPORT_FID = 0x200 FAN_REPORT_TID = 0x100 FAN_UNLIMITED_MARKS = 0x20 FAN_UNLIMITED_QUEUE = 0x10 @@ -1134,15 +1054,6 @@ const ( MAP_STACK = 0x20000 MAP_SYNC = 0x80000 MAP_TYPE = 0xf - MCAST_BLOCK_SOURCE = 0x2b - MCAST_EXCLUDE = 0x0 - MCAST_INCLUDE = 0x1 - MCAST_JOIN_GROUP = 0x2a - MCAST_JOIN_SOURCE_GROUP = 0x2e - MCAST_LEAVE_GROUP = 0x2d - MCAST_LEAVE_SOURCE_GROUP = 0x2f - MCAST_MSFILTER = 0x30 - MCAST_UNBLOCK_SOURCE = 0x2c MCL_CURRENT = 0x1 MCL_FUTURE = 0x2 MCL_ONFAULT = 0x4 @@ -1578,7 +1489,6 @@ const ( PR_SET_TSC = 0x1a PR_SET_UNALIGN = 0x6 PR_SPEC_DISABLE = 0x4 - PR_SPEC_DISABLE_NOEXEC = 0x10 PR_SPEC_ENABLE = 0x2 PR_SPEC_FORCE_DISABLE = 0x8 PR_SPEC_INDIRECT_BRANCH = 0x1 @@ -2058,7 +1968,6 @@ const ( SO_ATTACH_REUSEPORT_CBPF = 0x33 SO_ATTACH_REUSEPORT_EBPF = 0x34 SO_BINDTODEVICE = 0x19 - SO_BINDTOIFINDEX = 0x3e SO_BPF_EXTENSIONS = 0x30 SO_BROADCAST = 0x6 SO_BSDCOMPAT = 0xe @@ -2107,8 +2016,6 @@ const ( SO_RCVBUFFORCE = 0x21 SO_RCVLOWAT = 0x12 SO_RCVTIMEO = 0x14 - SO_RCVTIMEO_NEW = 0x42 - SO_RCVTIMEO_OLD = 0x14 SO_REUSEADDR = 0x2 SO_REUSEPORT = 0xf SO_RXQ_OVFL = 0x28 @@ -2120,17 +2027,9 @@ const ( SO_SNDBUFFORCE = 0x20 SO_SNDLOWAT = 0x13 SO_SNDTIMEO = 0x15 - SO_SNDTIMEO_NEW = 0x43 - SO_SNDTIMEO_OLD = 0x15 SO_TIMESTAMP = 0x1d SO_TIMESTAMPING = 0x25 - SO_TIMESTAMPING_NEW = 0x41 - SO_TIMESTAMPING_OLD = 0x25 SO_TIMESTAMPNS = 0x23 - SO_TIMESTAMPNS_NEW = 0x40 - SO_TIMESTAMPNS_OLD = 0x23 - SO_TIMESTAMP_NEW = 0x3f - SO_TIMESTAMP_OLD = 0x1d SO_TXTIME = 0x3d SO_TYPE = 0x3 SO_VM_SOCKETS_BUFFER_MAX_SIZE = 0x2 @@ -2223,8 +2122,6 @@ const ( TCOFLUSH = 0x1 TCOOFF = 0x0 TCOON = 0x1 - TCP_BPF_IW = 0x3e9 - TCP_BPF_SNDCWND_CLAMP = 0x3ea TCP_CC_INFO = 0x1a TCP_CM_INQ = 0x24 TCP_CONGESTION = 0xd @@ -2426,10 +2323,8 @@ const ( UBI_IOCMKVOL = 0x40986f00 UBI_IOCRMVOL = 0x40046f01 UBI_IOCRNVOL = 0x51106f03 - UBI_IOCRPEB = 0x40046f04 UBI_IOCRSVOL = 0x400c6f02 UBI_IOCSETVOLPROP = 0x40104f06 - UBI_IOCSPEB = 0x40046f05 UBI_IOCVOLCRBLK = 0x40804f07 UBI_IOCVOLRMBLK = 0x4f08 UBI_IOCVOLUP = 0x40084f00 @@ -2577,7 +2472,6 @@ const ( XDP_FLAGS_SKB_MODE = 0x2 XDP_FLAGS_UPDATE_IF_NOEXIST = 0x1 XDP_MMAP_OFFSETS = 0x1 - XDP_PACKET_HEADROOM = 0x100 XDP_PGOFF_RX_RING = 0x0 XDP_PGOFF_TX_RING = 0x80000000 XDP_RX_RING = 0x2 diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_arm64.go b/vendor/golang.org/x/sys/unix/zerrors_linux_arm64.go index d47f3ba6a1..81d3259edb 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_arm64.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_arm64.go @@ -197,59 +197,10 @@ const ( BPF_ABS = 0x20 BPF_ADD = 0x0 BPF_ALU = 0x4 - BPF_ALU64 = 0x7 BPF_AND = 0x50 - BPF_ANY = 0x0 - BPF_ARSH = 0xc0 BPF_B = 0x10 - BPF_BUILD_ID_SIZE = 0x14 - BPF_CALL = 0x80 - BPF_DEVCG_ACC_MKNOD = 0x1 - BPF_DEVCG_ACC_READ = 0x2 - BPF_DEVCG_ACC_WRITE = 0x4 - BPF_DEVCG_DEV_BLOCK = 0x1 - BPF_DEVCG_DEV_CHAR = 0x2 BPF_DIV = 0x30 - BPF_DW = 0x18 - BPF_END = 0xd0 - BPF_EXIST = 0x2 - BPF_EXIT = 0x90 - BPF_FROM_BE = 0x8 - BPF_FROM_LE = 0x0 BPF_FS_MAGIC = 0xcafe4a11 - BPF_F_ALLOW_MULTI = 0x2 - BPF_F_ALLOW_OVERRIDE = 0x1 - BPF_F_ANY_ALIGNMENT = 0x2 - BPF_F_CTXLEN_MASK = 0xfffff00000000 - BPF_F_CURRENT_CPU = 0xffffffff - BPF_F_CURRENT_NETNS = -0x1 - BPF_F_DONT_FRAGMENT = 0x4 - BPF_F_FAST_STACK_CMP = 0x200 - BPF_F_HDR_FIELD_MASK = 0xf - BPF_F_INDEX_MASK = 0xffffffff - BPF_F_INGRESS = 0x1 - BPF_F_INVALIDATE_HASH = 0x2 - BPF_F_LOCK = 0x4 - BPF_F_MARK_ENFORCE = 0x40 - BPF_F_MARK_MANGLED_0 = 0x20 - BPF_F_NO_COMMON_LRU = 0x2 - BPF_F_NO_PREALLOC = 0x1 - BPF_F_NUMA_NODE = 0x4 - BPF_F_PSEUDO_HDR = 0x10 - BPF_F_QUERY_EFFECTIVE = 0x1 - BPF_F_RDONLY = 0x8 - BPF_F_RECOMPUTE_CSUM = 0x1 - BPF_F_REUSE_STACKID = 0x400 - BPF_F_SEQ_NUMBER = 0x8 - BPF_F_SKIP_FIELD_MASK = 0xff - BPF_F_STACK_BUILD_ID = 0x20 - BPF_F_STRICT_ALIGNMENT = 0x1 - BPF_F_TUNINFO_IPV6 = 0x1 - BPF_F_USER_BUILD_ID = 0x800 - BPF_F_USER_STACK = 0x100 - BPF_F_WRONLY = 0x10 - BPF_F_ZERO_CSUM_TX = 0x2 - BPF_F_ZERO_SEED = 0x40 BPF_H = 0x8 BPF_IMM = 0x0 BPF_IND = 0x40 @@ -257,16 +208,8 @@ const ( BPF_JEQ = 0x10 BPF_JGE = 0x30 BPF_JGT = 0x20 - BPF_JLE = 0xb0 - BPF_JLT = 0xa0 BPF_JMP = 0x5 - BPF_JMP32 = 0x6 - BPF_JNE = 0x50 BPF_JSET = 0x40 - BPF_JSGE = 0x70 - BPF_JSGT = 0x60 - BPF_JSLE = 0xd0 - BPF_JSLT = 0xc0 BPF_K = 0x0 BPF_LD = 0x0 BPF_LDX = 0x1 @@ -280,33 +223,20 @@ const ( BPF_MINOR_VERSION = 0x1 BPF_MISC = 0x7 BPF_MOD = 0x90 - BPF_MOV = 0xb0 BPF_MSH = 0xa0 BPF_MUL = 0x20 BPF_NEG = 0x80 BPF_NET_OFF = -0x100000 - BPF_NOEXIST = 0x1 - BPF_OBJ_NAME_LEN = 0x10 BPF_OR = 0x40 - BPF_PSEUDO_CALL = 0x1 - BPF_PSEUDO_MAP_FD = 0x1 BPF_RET = 0x6 BPF_RSH = 0x70 - BPF_SOCK_OPS_ALL_CB_FLAGS = 0x7 - BPF_SOCK_OPS_RETRANS_CB_FLAG = 0x2 - BPF_SOCK_OPS_RTO_CB_FLAG = 0x1 - BPF_SOCK_OPS_STATE_CB_FLAG = 0x4 BPF_ST = 0x2 BPF_STX = 0x3 BPF_SUB = 0x10 - BPF_TAG_SIZE = 0x8 BPF_TAX = 0x0 - BPF_TO_BE = 0x8 - BPF_TO_LE = 0x0 BPF_TXA = 0x80 BPF_W = 0x0 BPF_X = 0x8 - BPF_XADD = 0xc0 BPF_XOR = 0xa0 BRKINT = 0x2 BS0 = 0x0 @@ -573,7 +503,6 @@ const ( FAN_ALL_MARK_FLAGS = 0xff FAN_ALL_OUTGOING_EVENTS = 0x3403b FAN_ALL_PERM_EVENTS = 0x30000 - FAN_ATTRIB = 0x4 FAN_AUDIT = 0x10 FAN_CLASS_CONTENT = 0x4 FAN_CLASS_NOTIF = 0x0 @@ -582,12 +511,8 @@ const ( FAN_CLOSE = 0x18 FAN_CLOSE_NOWRITE = 0x10 FAN_CLOSE_WRITE = 0x8 - FAN_CREATE = 0x100 - FAN_DELETE = 0x200 - FAN_DELETE_SELF = 0x400 FAN_DENY = 0x2 FAN_ENABLE_AUDIT = 0x40 - FAN_EVENT_INFO_TYPE_FID = 0x1 FAN_EVENT_METADATA_LEN = 0x18 FAN_EVENT_ON_CHILD = 0x8000000 FAN_MARK_ADD = 0x1 @@ -601,10 +526,6 @@ const ( FAN_MARK_ONLYDIR = 0x8 FAN_MARK_REMOVE = 0x2 FAN_MODIFY = 0x2 - FAN_MOVE = 0xc0 - FAN_MOVED_FROM = 0x40 - FAN_MOVED_TO = 0x80 - FAN_MOVE_SELF = 0x800 FAN_NOFD = -0x1 FAN_NONBLOCK = 0x2 FAN_ONDIR = 0x40000000 @@ -613,7 +534,6 @@ const ( FAN_OPEN_EXEC_PERM = 0x40000 FAN_OPEN_PERM = 0x10000 FAN_Q_OVERFLOW = 0x4000 - FAN_REPORT_FID = 0x200 FAN_REPORT_TID = 0x100 FAN_UNLIMITED_MARKS = 0x20 FAN_UNLIMITED_QUEUE = 0x10 @@ -1137,15 +1057,6 @@ const ( MAP_STACK = 0x20000 MAP_SYNC = 0x80000 MAP_TYPE = 0xf - MCAST_BLOCK_SOURCE = 0x2b - MCAST_EXCLUDE = 0x0 - MCAST_INCLUDE = 0x1 - MCAST_JOIN_GROUP = 0x2a - MCAST_JOIN_SOURCE_GROUP = 0x2e - MCAST_LEAVE_GROUP = 0x2d - MCAST_LEAVE_SOURCE_GROUP = 0x2f - MCAST_MSFILTER = 0x30 - MCAST_UNBLOCK_SOURCE = 0x2c MCL_CURRENT = 0x1 MCL_FUTURE = 0x2 MCL_ONFAULT = 0x4 @@ -1581,7 +1492,6 @@ const ( PR_SET_TSC = 0x1a PR_SET_UNALIGN = 0x6 PR_SPEC_DISABLE = 0x4 - PR_SPEC_DISABLE_NOEXEC = 0x10 PR_SPEC_ENABLE = 0x2 PR_SPEC_FORCE_DISABLE = 0x8 PR_SPEC_INDIRECT_BRANCH = 0x1 @@ -2042,7 +1952,6 @@ const ( SO_ATTACH_REUSEPORT_CBPF = 0x33 SO_ATTACH_REUSEPORT_EBPF = 0x34 SO_BINDTODEVICE = 0x19 - SO_BINDTOIFINDEX = 0x3e SO_BPF_EXTENSIONS = 0x30 SO_BROADCAST = 0x6 SO_BSDCOMPAT = 0xe @@ -2091,8 +2000,6 @@ const ( SO_RCVBUFFORCE = 0x21 SO_RCVLOWAT = 0x12 SO_RCVTIMEO = 0x14 - SO_RCVTIMEO_NEW = 0x42 - SO_RCVTIMEO_OLD = 0x14 SO_REUSEADDR = 0x2 SO_REUSEPORT = 0xf SO_RXQ_OVFL = 0x28 @@ -2104,17 +2011,9 @@ const ( SO_SNDBUFFORCE = 0x20 SO_SNDLOWAT = 0x13 SO_SNDTIMEO = 0x15 - SO_SNDTIMEO_NEW = 0x43 - SO_SNDTIMEO_OLD = 0x15 SO_TIMESTAMP = 0x1d SO_TIMESTAMPING = 0x25 - SO_TIMESTAMPING_NEW = 0x41 - SO_TIMESTAMPING_OLD = 0x25 SO_TIMESTAMPNS = 0x23 - SO_TIMESTAMPNS_NEW = 0x40 - SO_TIMESTAMPNS_OLD = 0x23 - SO_TIMESTAMP_NEW = 0x3f - SO_TIMESTAMP_OLD = 0x1d SO_TXTIME = 0x3d SO_TYPE = 0x3 SO_VM_SOCKETS_BUFFER_MAX_SIZE = 0x2 @@ -2208,8 +2107,6 @@ const ( TCOFLUSH = 0x1 TCOOFF = 0x0 TCOON = 0x1 - TCP_BPF_IW = 0x3e9 - TCP_BPF_SNDCWND_CLAMP = 0x3ea TCP_CC_INFO = 0x1a TCP_CM_INQ = 0x24 TCP_CONGESTION = 0xd @@ -2411,10 +2308,8 @@ const ( UBI_IOCMKVOL = 0x40986f00 UBI_IOCRMVOL = 0x40046f01 UBI_IOCRNVOL = 0x51106f03 - UBI_IOCRPEB = 0x40046f04 UBI_IOCRSVOL = 0x400c6f02 UBI_IOCSETVOLPROP = 0x40104f06 - UBI_IOCSPEB = 0x40046f05 UBI_IOCVOLCRBLK = 0x40804f07 UBI_IOCVOLRMBLK = 0x4f08 UBI_IOCVOLUP = 0x40084f00 @@ -2562,7 +2457,6 @@ const ( XDP_FLAGS_SKB_MODE = 0x2 XDP_FLAGS_UPDATE_IF_NOEXIST = 0x1 XDP_MMAP_OFFSETS = 0x1 - XDP_PACKET_HEADROOM = 0x100 XDP_PGOFF_RX_RING = 0x0 XDP_PGOFF_TX_RING = 0x80000000 XDP_RX_RING = 0x2 diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_mips.go b/vendor/golang.org/x/sys/unix/zerrors_linux_mips.go index 0ae030ee4e..58fc1eb9c8 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_mips.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_mips.go @@ -197,59 +197,10 @@ const ( BPF_ABS = 0x20 BPF_ADD = 0x0 BPF_ALU = 0x4 - BPF_ALU64 = 0x7 BPF_AND = 0x50 - BPF_ANY = 0x0 - BPF_ARSH = 0xc0 BPF_B = 0x10 - BPF_BUILD_ID_SIZE = 0x14 - BPF_CALL = 0x80 - BPF_DEVCG_ACC_MKNOD = 0x1 - BPF_DEVCG_ACC_READ = 0x2 - BPF_DEVCG_ACC_WRITE = 0x4 - BPF_DEVCG_DEV_BLOCK = 0x1 - BPF_DEVCG_DEV_CHAR = 0x2 BPF_DIV = 0x30 - BPF_DW = 0x18 - BPF_END = 0xd0 - BPF_EXIST = 0x2 - BPF_EXIT = 0x90 - BPF_FROM_BE = 0x8 - BPF_FROM_LE = 0x0 BPF_FS_MAGIC = 0xcafe4a11 - BPF_F_ALLOW_MULTI = 0x2 - BPF_F_ALLOW_OVERRIDE = 0x1 - BPF_F_ANY_ALIGNMENT = 0x2 - BPF_F_CTXLEN_MASK = 0xfffff00000000 - BPF_F_CURRENT_CPU = 0xffffffff - BPF_F_CURRENT_NETNS = -0x1 - BPF_F_DONT_FRAGMENT = 0x4 - BPF_F_FAST_STACK_CMP = 0x200 - BPF_F_HDR_FIELD_MASK = 0xf - BPF_F_INDEX_MASK = 0xffffffff - BPF_F_INGRESS = 0x1 - BPF_F_INVALIDATE_HASH = 0x2 - BPF_F_LOCK = 0x4 - BPF_F_MARK_ENFORCE = 0x40 - BPF_F_MARK_MANGLED_0 = 0x20 - BPF_F_NO_COMMON_LRU = 0x2 - BPF_F_NO_PREALLOC = 0x1 - BPF_F_NUMA_NODE = 0x4 - BPF_F_PSEUDO_HDR = 0x10 - BPF_F_QUERY_EFFECTIVE = 0x1 - BPF_F_RDONLY = 0x8 - BPF_F_RECOMPUTE_CSUM = 0x1 - BPF_F_REUSE_STACKID = 0x400 - BPF_F_SEQ_NUMBER = 0x8 - BPF_F_SKIP_FIELD_MASK = 0xff - BPF_F_STACK_BUILD_ID = 0x20 - BPF_F_STRICT_ALIGNMENT = 0x1 - BPF_F_TUNINFO_IPV6 = 0x1 - BPF_F_USER_BUILD_ID = 0x800 - BPF_F_USER_STACK = 0x100 - BPF_F_WRONLY = 0x10 - BPF_F_ZERO_CSUM_TX = 0x2 - BPF_F_ZERO_SEED = 0x40 BPF_H = 0x8 BPF_IMM = 0x0 BPF_IND = 0x40 @@ -257,16 +208,8 @@ const ( BPF_JEQ = 0x10 BPF_JGE = 0x30 BPF_JGT = 0x20 - BPF_JLE = 0xb0 - BPF_JLT = 0xa0 BPF_JMP = 0x5 - BPF_JMP32 = 0x6 - BPF_JNE = 0x50 BPF_JSET = 0x40 - BPF_JSGE = 0x70 - BPF_JSGT = 0x60 - BPF_JSLE = 0xd0 - BPF_JSLT = 0xc0 BPF_K = 0x0 BPF_LD = 0x0 BPF_LDX = 0x1 @@ -280,33 +223,20 @@ const ( BPF_MINOR_VERSION = 0x1 BPF_MISC = 0x7 BPF_MOD = 0x90 - BPF_MOV = 0xb0 BPF_MSH = 0xa0 BPF_MUL = 0x20 BPF_NEG = 0x80 BPF_NET_OFF = -0x100000 - BPF_NOEXIST = 0x1 - BPF_OBJ_NAME_LEN = 0x10 BPF_OR = 0x40 - BPF_PSEUDO_CALL = 0x1 - BPF_PSEUDO_MAP_FD = 0x1 BPF_RET = 0x6 BPF_RSH = 0x70 - BPF_SOCK_OPS_ALL_CB_FLAGS = 0x7 - BPF_SOCK_OPS_RETRANS_CB_FLAG = 0x2 - BPF_SOCK_OPS_RTO_CB_FLAG = 0x1 - BPF_SOCK_OPS_STATE_CB_FLAG = 0x4 BPF_ST = 0x2 BPF_STX = 0x3 BPF_SUB = 0x10 - BPF_TAG_SIZE = 0x8 BPF_TAX = 0x0 - BPF_TO_BE = 0x8 - BPF_TO_LE = 0x0 BPF_TXA = 0x80 BPF_W = 0x0 BPF_X = 0x8 - BPF_XADD = 0xc0 BPF_XOR = 0xa0 BRKINT = 0x2 BS0 = 0x0 @@ -571,7 +501,6 @@ const ( FAN_ALL_MARK_FLAGS = 0xff FAN_ALL_OUTGOING_EVENTS = 0x3403b FAN_ALL_PERM_EVENTS = 0x30000 - FAN_ATTRIB = 0x4 FAN_AUDIT = 0x10 FAN_CLASS_CONTENT = 0x4 FAN_CLASS_NOTIF = 0x0 @@ -580,12 +509,8 @@ const ( FAN_CLOSE = 0x18 FAN_CLOSE_NOWRITE = 0x10 FAN_CLOSE_WRITE = 0x8 - FAN_CREATE = 0x100 - FAN_DELETE = 0x200 - FAN_DELETE_SELF = 0x400 FAN_DENY = 0x2 FAN_ENABLE_AUDIT = 0x40 - FAN_EVENT_INFO_TYPE_FID = 0x1 FAN_EVENT_METADATA_LEN = 0x18 FAN_EVENT_ON_CHILD = 0x8000000 FAN_MARK_ADD = 0x1 @@ -599,10 +524,6 @@ const ( FAN_MARK_ONLYDIR = 0x8 FAN_MARK_REMOVE = 0x2 FAN_MODIFY = 0x2 - FAN_MOVE = 0xc0 - FAN_MOVED_FROM = 0x40 - FAN_MOVED_TO = 0x80 - FAN_MOVE_SELF = 0x800 FAN_NOFD = -0x1 FAN_NONBLOCK = 0x2 FAN_ONDIR = 0x40000000 @@ -611,7 +532,6 @@ const ( FAN_OPEN_EXEC_PERM = 0x40000 FAN_OPEN_PERM = 0x10000 FAN_Q_OVERFLOW = 0x4000 - FAN_REPORT_FID = 0x200 FAN_REPORT_TID = 0x100 FAN_UNLIMITED_MARKS = 0x20 FAN_UNLIMITED_QUEUE = 0x10 @@ -1134,15 +1054,6 @@ const ( MAP_SHARED_VALIDATE = 0x3 MAP_STACK = 0x40000 MAP_TYPE = 0xf - MCAST_BLOCK_SOURCE = 0x2b - MCAST_EXCLUDE = 0x0 - MCAST_INCLUDE = 0x1 - MCAST_JOIN_GROUP = 0x2a - MCAST_JOIN_SOURCE_GROUP = 0x2e - MCAST_LEAVE_GROUP = 0x2d - MCAST_LEAVE_SOURCE_GROUP = 0x2f - MCAST_MSFILTER = 0x30 - MCAST_UNBLOCK_SOURCE = 0x2c MCL_CURRENT = 0x1 MCL_FUTURE = 0x2 MCL_ONFAULT = 0x4 @@ -1578,7 +1489,6 @@ const ( PR_SET_TSC = 0x1a PR_SET_UNALIGN = 0x6 PR_SPEC_DISABLE = 0x4 - PR_SPEC_DISABLE_NOEXEC = 0x10 PR_SPEC_ENABLE = 0x2 PR_SPEC_FORCE_DISABLE = 0x8 PR_SPEC_INDIRECT_BRANCH = 0x1 @@ -2051,7 +1961,6 @@ const ( SO_ATTACH_REUSEPORT_CBPF = 0x33 SO_ATTACH_REUSEPORT_EBPF = 0x34 SO_BINDTODEVICE = 0x19 - SO_BINDTOIFINDEX = 0x3e SO_BPF_EXTENSIONS = 0x30 SO_BROADCAST = 0x20 SO_BSDCOMPAT = 0xe @@ -2100,8 +2009,6 @@ const ( SO_RCVBUFFORCE = 0x21 SO_RCVLOWAT = 0x1004 SO_RCVTIMEO = 0x1006 - SO_RCVTIMEO_NEW = 0x42 - SO_RCVTIMEO_OLD = 0x1006 SO_REUSEADDR = 0x4 SO_REUSEPORT = 0x200 SO_RXQ_OVFL = 0x28 @@ -2113,18 +2020,10 @@ const ( SO_SNDBUFFORCE = 0x1f SO_SNDLOWAT = 0x1003 SO_SNDTIMEO = 0x1005 - SO_SNDTIMEO_NEW = 0x43 - SO_SNDTIMEO_OLD = 0x1005 SO_STYLE = 0x1008 SO_TIMESTAMP = 0x1d SO_TIMESTAMPING = 0x25 - SO_TIMESTAMPING_NEW = 0x41 - SO_TIMESTAMPING_OLD = 0x25 SO_TIMESTAMPNS = 0x23 - SO_TIMESTAMPNS_NEW = 0x40 - SO_TIMESTAMPNS_OLD = 0x23 - SO_TIMESTAMP_NEW = 0x3f - SO_TIMESTAMP_OLD = 0x1d SO_TXTIME = 0x3d SO_TYPE = 0x1008 SO_VM_SOCKETS_BUFFER_MAX_SIZE = 0x2 @@ -2216,8 +2115,6 @@ const ( TCOFLUSH = 0x1 TCOOFF = 0x0 TCOON = 0x1 - TCP_BPF_IW = 0x3e9 - TCP_BPF_SNDCWND_CLAMP = 0x3ea TCP_CC_INFO = 0x1a TCP_CM_INQ = 0x24 TCP_CONGESTION = 0xd @@ -2421,10 +2318,8 @@ const ( UBI_IOCMKVOL = 0x80986f00 UBI_IOCRMVOL = 0x80046f01 UBI_IOCRNVOL = 0x91106f03 - UBI_IOCRPEB = 0x80046f04 UBI_IOCRSVOL = 0x800c6f02 UBI_IOCSETVOLPROP = 0x80104f06 - UBI_IOCSPEB = 0x80046f05 UBI_IOCVOLCRBLK = 0x80804f07 UBI_IOCVOLRMBLK = 0x20004f08 UBI_IOCVOLUP = 0x80084f00 @@ -2573,7 +2468,6 @@ const ( XDP_FLAGS_SKB_MODE = 0x2 XDP_FLAGS_UPDATE_IF_NOEXIST = 0x1 XDP_MMAP_OFFSETS = 0x1 - XDP_PACKET_HEADROOM = 0x100 XDP_PGOFF_RX_RING = 0x0 XDP_PGOFF_TX_RING = 0x80000000 XDP_RX_RING = 0x2 diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_mips64.go b/vendor/golang.org/x/sys/unix/zerrors_linux_mips64.go index 91b49dddbe..67336da6cd 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_mips64.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_mips64.go @@ -197,59 +197,10 @@ const ( BPF_ABS = 0x20 BPF_ADD = 0x0 BPF_ALU = 0x4 - BPF_ALU64 = 0x7 BPF_AND = 0x50 - BPF_ANY = 0x0 - BPF_ARSH = 0xc0 BPF_B = 0x10 - BPF_BUILD_ID_SIZE = 0x14 - BPF_CALL = 0x80 - BPF_DEVCG_ACC_MKNOD = 0x1 - BPF_DEVCG_ACC_READ = 0x2 - BPF_DEVCG_ACC_WRITE = 0x4 - BPF_DEVCG_DEV_BLOCK = 0x1 - BPF_DEVCG_DEV_CHAR = 0x2 BPF_DIV = 0x30 - BPF_DW = 0x18 - BPF_END = 0xd0 - BPF_EXIST = 0x2 - BPF_EXIT = 0x90 - BPF_FROM_BE = 0x8 - BPF_FROM_LE = 0x0 BPF_FS_MAGIC = 0xcafe4a11 - BPF_F_ALLOW_MULTI = 0x2 - BPF_F_ALLOW_OVERRIDE = 0x1 - BPF_F_ANY_ALIGNMENT = 0x2 - BPF_F_CTXLEN_MASK = 0xfffff00000000 - BPF_F_CURRENT_CPU = 0xffffffff - BPF_F_CURRENT_NETNS = -0x1 - BPF_F_DONT_FRAGMENT = 0x4 - BPF_F_FAST_STACK_CMP = 0x200 - BPF_F_HDR_FIELD_MASK = 0xf - BPF_F_INDEX_MASK = 0xffffffff - BPF_F_INGRESS = 0x1 - BPF_F_INVALIDATE_HASH = 0x2 - BPF_F_LOCK = 0x4 - BPF_F_MARK_ENFORCE = 0x40 - BPF_F_MARK_MANGLED_0 = 0x20 - BPF_F_NO_COMMON_LRU = 0x2 - BPF_F_NO_PREALLOC = 0x1 - BPF_F_NUMA_NODE = 0x4 - BPF_F_PSEUDO_HDR = 0x10 - BPF_F_QUERY_EFFECTIVE = 0x1 - BPF_F_RDONLY = 0x8 - BPF_F_RECOMPUTE_CSUM = 0x1 - BPF_F_REUSE_STACKID = 0x400 - BPF_F_SEQ_NUMBER = 0x8 - BPF_F_SKIP_FIELD_MASK = 0xff - BPF_F_STACK_BUILD_ID = 0x20 - BPF_F_STRICT_ALIGNMENT = 0x1 - BPF_F_TUNINFO_IPV6 = 0x1 - BPF_F_USER_BUILD_ID = 0x800 - BPF_F_USER_STACK = 0x100 - BPF_F_WRONLY = 0x10 - BPF_F_ZERO_CSUM_TX = 0x2 - BPF_F_ZERO_SEED = 0x40 BPF_H = 0x8 BPF_IMM = 0x0 BPF_IND = 0x40 @@ -257,16 +208,8 @@ const ( BPF_JEQ = 0x10 BPF_JGE = 0x30 BPF_JGT = 0x20 - BPF_JLE = 0xb0 - BPF_JLT = 0xa0 BPF_JMP = 0x5 - BPF_JMP32 = 0x6 - BPF_JNE = 0x50 BPF_JSET = 0x40 - BPF_JSGE = 0x70 - BPF_JSGT = 0x60 - BPF_JSLE = 0xd0 - BPF_JSLT = 0xc0 BPF_K = 0x0 BPF_LD = 0x0 BPF_LDX = 0x1 @@ -280,33 +223,20 @@ const ( BPF_MINOR_VERSION = 0x1 BPF_MISC = 0x7 BPF_MOD = 0x90 - BPF_MOV = 0xb0 BPF_MSH = 0xa0 BPF_MUL = 0x20 BPF_NEG = 0x80 BPF_NET_OFF = -0x100000 - BPF_NOEXIST = 0x1 - BPF_OBJ_NAME_LEN = 0x10 BPF_OR = 0x40 - BPF_PSEUDO_CALL = 0x1 - BPF_PSEUDO_MAP_FD = 0x1 BPF_RET = 0x6 BPF_RSH = 0x70 - BPF_SOCK_OPS_ALL_CB_FLAGS = 0x7 - BPF_SOCK_OPS_RETRANS_CB_FLAG = 0x2 - BPF_SOCK_OPS_RTO_CB_FLAG = 0x1 - BPF_SOCK_OPS_STATE_CB_FLAG = 0x4 BPF_ST = 0x2 BPF_STX = 0x3 BPF_SUB = 0x10 - BPF_TAG_SIZE = 0x8 BPF_TAX = 0x0 - BPF_TO_BE = 0x8 - BPF_TO_LE = 0x0 BPF_TXA = 0x80 BPF_W = 0x0 BPF_X = 0x8 - BPF_XADD = 0xc0 BPF_XOR = 0xa0 BRKINT = 0x2 BS0 = 0x0 @@ -571,7 +501,6 @@ const ( FAN_ALL_MARK_FLAGS = 0xff FAN_ALL_OUTGOING_EVENTS = 0x3403b FAN_ALL_PERM_EVENTS = 0x30000 - FAN_ATTRIB = 0x4 FAN_AUDIT = 0x10 FAN_CLASS_CONTENT = 0x4 FAN_CLASS_NOTIF = 0x0 @@ -580,12 +509,8 @@ const ( FAN_CLOSE = 0x18 FAN_CLOSE_NOWRITE = 0x10 FAN_CLOSE_WRITE = 0x8 - FAN_CREATE = 0x100 - FAN_DELETE = 0x200 - FAN_DELETE_SELF = 0x400 FAN_DENY = 0x2 FAN_ENABLE_AUDIT = 0x40 - FAN_EVENT_INFO_TYPE_FID = 0x1 FAN_EVENT_METADATA_LEN = 0x18 FAN_EVENT_ON_CHILD = 0x8000000 FAN_MARK_ADD = 0x1 @@ -599,10 +524,6 @@ const ( FAN_MARK_ONLYDIR = 0x8 FAN_MARK_REMOVE = 0x2 FAN_MODIFY = 0x2 - FAN_MOVE = 0xc0 - FAN_MOVED_FROM = 0x40 - FAN_MOVED_TO = 0x80 - FAN_MOVE_SELF = 0x800 FAN_NOFD = -0x1 FAN_NONBLOCK = 0x2 FAN_ONDIR = 0x40000000 @@ -611,7 +532,6 @@ const ( FAN_OPEN_EXEC_PERM = 0x40000 FAN_OPEN_PERM = 0x10000 FAN_Q_OVERFLOW = 0x4000 - FAN_REPORT_FID = 0x200 FAN_REPORT_TID = 0x100 FAN_UNLIMITED_MARKS = 0x20 FAN_UNLIMITED_QUEUE = 0x10 @@ -1134,15 +1054,6 @@ const ( MAP_SHARED_VALIDATE = 0x3 MAP_STACK = 0x40000 MAP_TYPE = 0xf - MCAST_BLOCK_SOURCE = 0x2b - MCAST_EXCLUDE = 0x0 - MCAST_INCLUDE = 0x1 - MCAST_JOIN_GROUP = 0x2a - MCAST_JOIN_SOURCE_GROUP = 0x2e - MCAST_LEAVE_GROUP = 0x2d - MCAST_LEAVE_SOURCE_GROUP = 0x2f - MCAST_MSFILTER = 0x30 - MCAST_UNBLOCK_SOURCE = 0x2c MCL_CURRENT = 0x1 MCL_FUTURE = 0x2 MCL_ONFAULT = 0x4 @@ -1578,7 +1489,6 @@ const ( PR_SET_TSC = 0x1a PR_SET_UNALIGN = 0x6 PR_SPEC_DISABLE = 0x4 - PR_SPEC_DISABLE_NOEXEC = 0x10 PR_SPEC_ENABLE = 0x2 PR_SPEC_FORCE_DISABLE = 0x8 PR_SPEC_INDIRECT_BRANCH = 0x1 @@ -2051,7 +1961,6 @@ const ( SO_ATTACH_REUSEPORT_CBPF = 0x33 SO_ATTACH_REUSEPORT_EBPF = 0x34 SO_BINDTODEVICE = 0x19 - SO_BINDTOIFINDEX = 0x3e SO_BPF_EXTENSIONS = 0x30 SO_BROADCAST = 0x20 SO_BSDCOMPAT = 0xe @@ -2100,8 +2009,6 @@ const ( SO_RCVBUFFORCE = 0x21 SO_RCVLOWAT = 0x1004 SO_RCVTIMEO = 0x1006 - SO_RCVTIMEO_NEW = 0x42 - SO_RCVTIMEO_OLD = 0x1006 SO_REUSEADDR = 0x4 SO_REUSEPORT = 0x200 SO_RXQ_OVFL = 0x28 @@ -2113,18 +2020,10 @@ const ( SO_SNDBUFFORCE = 0x1f SO_SNDLOWAT = 0x1003 SO_SNDTIMEO = 0x1005 - SO_SNDTIMEO_NEW = 0x43 - SO_SNDTIMEO_OLD = 0x1005 SO_STYLE = 0x1008 SO_TIMESTAMP = 0x1d SO_TIMESTAMPING = 0x25 - SO_TIMESTAMPING_NEW = 0x41 - SO_TIMESTAMPING_OLD = 0x25 SO_TIMESTAMPNS = 0x23 - SO_TIMESTAMPNS_NEW = 0x40 - SO_TIMESTAMPNS_OLD = 0x23 - SO_TIMESTAMP_NEW = 0x3f - SO_TIMESTAMP_OLD = 0x1d SO_TXTIME = 0x3d SO_TYPE = 0x1008 SO_VM_SOCKETS_BUFFER_MAX_SIZE = 0x2 @@ -2216,8 +2115,6 @@ const ( TCOFLUSH = 0x1 TCOOFF = 0x0 TCOON = 0x1 - TCP_BPF_IW = 0x3e9 - TCP_BPF_SNDCWND_CLAMP = 0x3ea TCP_CC_INFO = 0x1a TCP_CM_INQ = 0x24 TCP_CONGESTION = 0xd @@ -2421,10 +2318,8 @@ const ( UBI_IOCMKVOL = 0x80986f00 UBI_IOCRMVOL = 0x80046f01 UBI_IOCRNVOL = 0x91106f03 - UBI_IOCRPEB = 0x80046f04 UBI_IOCRSVOL = 0x800c6f02 UBI_IOCSETVOLPROP = 0x80104f06 - UBI_IOCSPEB = 0x80046f05 UBI_IOCVOLCRBLK = 0x80804f07 UBI_IOCVOLRMBLK = 0x20004f08 UBI_IOCVOLUP = 0x80084f00 @@ -2573,7 +2468,6 @@ const ( XDP_FLAGS_SKB_MODE = 0x2 XDP_FLAGS_UPDATE_IF_NOEXIST = 0x1 XDP_MMAP_OFFSETS = 0x1 - XDP_PACKET_HEADROOM = 0x100 XDP_PGOFF_RX_RING = 0x0 XDP_PGOFF_TX_RING = 0x80000000 XDP_RX_RING = 0x2 diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_mips64le.go b/vendor/golang.org/x/sys/unix/zerrors_linux_mips64le.go index 7f1ef04eba..af030dcbb0 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_mips64le.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_mips64le.go @@ -197,59 +197,10 @@ const ( BPF_ABS = 0x20 BPF_ADD = 0x0 BPF_ALU = 0x4 - BPF_ALU64 = 0x7 BPF_AND = 0x50 - BPF_ANY = 0x0 - BPF_ARSH = 0xc0 BPF_B = 0x10 - BPF_BUILD_ID_SIZE = 0x14 - BPF_CALL = 0x80 - BPF_DEVCG_ACC_MKNOD = 0x1 - BPF_DEVCG_ACC_READ = 0x2 - BPF_DEVCG_ACC_WRITE = 0x4 - BPF_DEVCG_DEV_BLOCK = 0x1 - BPF_DEVCG_DEV_CHAR = 0x2 BPF_DIV = 0x30 - BPF_DW = 0x18 - BPF_END = 0xd0 - BPF_EXIST = 0x2 - BPF_EXIT = 0x90 - BPF_FROM_BE = 0x8 - BPF_FROM_LE = 0x0 BPF_FS_MAGIC = 0xcafe4a11 - BPF_F_ALLOW_MULTI = 0x2 - BPF_F_ALLOW_OVERRIDE = 0x1 - BPF_F_ANY_ALIGNMENT = 0x2 - BPF_F_CTXLEN_MASK = 0xfffff00000000 - BPF_F_CURRENT_CPU = 0xffffffff - BPF_F_CURRENT_NETNS = -0x1 - BPF_F_DONT_FRAGMENT = 0x4 - BPF_F_FAST_STACK_CMP = 0x200 - BPF_F_HDR_FIELD_MASK = 0xf - BPF_F_INDEX_MASK = 0xffffffff - BPF_F_INGRESS = 0x1 - BPF_F_INVALIDATE_HASH = 0x2 - BPF_F_LOCK = 0x4 - BPF_F_MARK_ENFORCE = 0x40 - BPF_F_MARK_MANGLED_0 = 0x20 - BPF_F_NO_COMMON_LRU = 0x2 - BPF_F_NO_PREALLOC = 0x1 - BPF_F_NUMA_NODE = 0x4 - BPF_F_PSEUDO_HDR = 0x10 - BPF_F_QUERY_EFFECTIVE = 0x1 - BPF_F_RDONLY = 0x8 - BPF_F_RECOMPUTE_CSUM = 0x1 - BPF_F_REUSE_STACKID = 0x400 - BPF_F_SEQ_NUMBER = 0x8 - BPF_F_SKIP_FIELD_MASK = 0xff - BPF_F_STACK_BUILD_ID = 0x20 - BPF_F_STRICT_ALIGNMENT = 0x1 - BPF_F_TUNINFO_IPV6 = 0x1 - BPF_F_USER_BUILD_ID = 0x800 - BPF_F_USER_STACK = 0x100 - BPF_F_WRONLY = 0x10 - BPF_F_ZERO_CSUM_TX = 0x2 - BPF_F_ZERO_SEED = 0x40 BPF_H = 0x8 BPF_IMM = 0x0 BPF_IND = 0x40 @@ -257,16 +208,8 @@ const ( BPF_JEQ = 0x10 BPF_JGE = 0x30 BPF_JGT = 0x20 - BPF_JLE = 0xb0 - BPF_JLT = 0xa0 BPF_JMP = 0x5 - BPF_JMP32 = 0x6 - BPF_JNE = 0x50 BPF_JSET = 0x40 - BPF_JSGE = 0x70 - BPF_JSGT = 0x60 - BPF_JSLE = 0xd0 - BPF_JSLT = 0xc0 BPF_K = 0x0 BPF_LD = 0x0 BPF_LDX = 0x1 @@ -280,33 +223,20 @@ const ( BPF_MINOR_VERSION = 0x1 BPF_MISC = 0x7 BPF_MOD = 0x90 - BPF_MOV = 0xb0 BPF_MSH = 0xa0 BPF_MUL = 0x20 BPF_NEG = 0x80 BPF_NET_OFF = -0x100000 - BPF_NOEXIST = 0x1 - BPF_OBJ_NAME_LEN = 0x10 BPF_OR = 0x40 - BPF_PSEUDO_CALL = 0x1 - BPF_PSEUDO_MAP_FD = 0x1 BPF_RET = 0x6 BPF_RSH = 0x70 - BPF_SOCK_OPS_ALL_CB_FLAGS = 0x7 - BPF_SOCK_OPS_RETRANS_CB_FLAG = 0x2 - BPF_SOCK_OPS_RTO_CB_FLAG = 0x1 - BPF_SOCK_OPS_STATE_CB_FLAG = 0x4 BPF_ST = 0x2 BPF_STX = 0x3 BPF_SUB = 0x10 - BPF_TAG_SIZE = 0x8 BPF_TAX = 0x0 - BPF_TO_BE = 0x8 - BPF_TO_LE = 0x0 BPF_TXA = 0x80 BPF_W = 0x0 BPF_X = 0x8 - BPF_XADD = 0xc0 BPF_XOR = 0xa0 BRKINT = 0x2 BS0 = 0x0 @@ -571,7 +501,6 @@ const ( FAN_ALL_MARK_FLAGS = 0xff FAN_ALL_OUTGOING_EVENTS = 0x3403b FAN_ALL_PERM_EVENTS = 0x30000 - FAN_ATTRIB = 0x4 FAN_AUDIT = 0x10 FAN_CLASS_CONTENT = 0x4 FAN_CLASS_NOTIF = 0x0 @@ -580,12 +509,8 @@ const ( FAN_CLOSE = 0x18 FAN_CLOSE_NOWRITE = 0x10 FAN_CLOSE_WRITE = 0x8 - FAN_CREATE = 0x100 - FAN_DELETE = 0x200 - FAN_DELETE_SELF = 0x400 FAN_DENY = 0x2 FAN_ENABLE_AUDIT = 0x40 - FAN_EVENT_INFO_TYPE_FID = 0x1 FAN_EVENT_METADATA_LEN = 0x18 FAN_EVENT_ON_CHILD = 0x8000000 FAN_MARK_ADD = 0x1 @@ -599,10 +524,6 @@ const ( FAN_MARK_ONLYDIR = 0x8 FAN_MARK_REMOVE = 0x2 FAN_MODIFY = 0x2 - FAN_MOVE = 0xc0 - FAN_MOVED_FROM = 0x40 - FAN_MOVED_TO = 0x80 - FAN_MOVE_SELF = 0x800 FAN_NOFD = -0x1 FAN_NONBLOCK = 0x2 FAN_ONDIR = 0x40000000 @@ -611,7 +532,6 @@ const ( FAN_OPEN_EXEC_PERM = 0x40000 FAN_OPEN_PERM = 0x10000 FAN_Q_OVERFLOW = 0x4000 - FAN_REPORT_FID = 0x200 FAN_REPORT_TID = 0x100 FAN_UNLIMITED_MARKS = 0x20 FAN_UNLIMITED_QUEUE = 0x10 @@ -1134,15 +1054,6 @@ const ( MAP_SHARED_VALIDATE = 0x3 MAP_STACK = 0x40000 MAP_TYPE = 0xf - MCAST_BLOCK_SOURCE = 0x2b - MCAST_EXCLUDE = 0x0 - MCAST_INCLUDE = 0x1 - MCAST_JOIN_GROUP = 0x2a - MCAST_JOIN_SOURCE_GROUP = 0x2e - MCAST_LEAVE_GROUP = 0x2d - MCAST_LEAVE_SOURCE_GROUP = 0x2f - MCAST_MSFILTER = 0x30 - MCAST_UNBLOCK_SOURCE = 0x2c MCL_CURRENT = 0x1 MCL_FUTURE = 0x2 MCL_ONFAULT = 0x4 @@ -1578,7 +1489,6 @@ const ( PR_SET_TSC = 0x1a PR_SET_UNALIGN = 0x6 PR_SPEC_DISABLE = 0x4 - PR_SPEC_DISABLE_NOEXEC = 0x10 PR_SPEC_ENABLE = 0x2 PR_SPEC_FORCE_DISABLE = 0x8 PR_SPEC_INDIRECT_BRANCH = 0x1 @@ -2051,7 +1961,6 @@ const ( SO_ATTACH_REUSEPORT_CBPF = 0x33 SO_ATTACH_REUSEPORT_EBPF = 0x34 SO_BINDTODEVICE = 0x19 - SO_BINDTOIFINDEX = 0x3e SO_BPF_EXTENSIONS = 0x30 SO_BROADCAST = 0x20 SO_BSDCOMPAT = 0xe @@ -2100,8 +2009,6 @@ const ( SO_RCVBUFFORCE = 0x21 SO_RCVLOWAT = 0x1004 SO_RCVTIMEO = 0x1006 - SO_RCVTIMEO_NEW = 0x42 - SO_RCVTIMEO_OLD = 0x1006 SO_REUSEADDR = 0x4 SO_REUSEPORT = 0x200 SO_RXQ_OVFL = 0x28 @@ -2113,18 +2020,10 @@ const ( SO_SNDBUFFORCE = 0x1f SO_SNDLOWAT = 0x1003 SO_SNDTIMEO = 0x1005 - SO_SNDTIMEO_NEW = 0x43 - SO_SNDTIMEO_OLD = 0x1005 SO_STYLE = 0x1008 SO_TIMESTAMP = 0x1d SO_TIMESTAMPING = 0x25 - SO_TIMESTAMPING_NEW = 0x41 - SO_TIMESTAMPING_OLD = 0x25 SO_TIMESTAMPNS = 0x23 - SO_TIMESTAMPNS_NEW = 0x40 - SO_TIMESTAMPNS_OLD = 0x23 - SO_TIMESTAMP_NEW = 0x3f - SO_TIMESTAMP_OLD = 0x1d SO_TXTIME = 0x3d SO_TYPE = 0x1008 SO_VM_SOCKETS_BUFFER_MAX_SIZE = 0x2 @@ -2216,8 +2115,6 @@ const ( TCOFLUSH = 0x1 TCOOFF = 0x0 TCOON = 0x1 - TCP_BPF_IW = 0x3e9 - TCP_BPF_SNDCWND_CLAMP = 0x3ea TCP_CC_INFO = 0x1a TCP_CM_INQ = 0x24 TCP_CONGESTION = 0xd @@ -2421,10 +2318,8 @@ const ( UBI_IOCMKVOL = 0x80986f00 UBI_IOCRMVOL = 0x80046f01 UBI_IOCRNVOL = 0x91106f03 - UBI_IOCRPEB = 0x80046f04 UBI_IOCRSVOL = 0x800c6f02 UBI_IOCSETVOLPROP = 0x80104f06 - UBI_IOCSPEB = 0x80046f05 UBI_IOCVOLCRBLK = 0x80804f07 UBI_IOCVOLRMBLK = 0x20004f08 UBI_IOCVOLUP = 0x80084f00 @@ -2573,7 +2468,6 @@ const ( XDP_FLAGS_SKB_MODE = 0x2 XDP_FLAGS_UPDATE_IF_NOEXIST = 0x1 XDP_MMAP_OFFSETS = 0x1 - XDP_PACKET_HEADROOM = 0x100 XDP_PGOFF_RX_RING = 0x0 XDP_PGOFF_TX_RING = 0x80000000 XDP_RX_RING = 0x2 diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_mipsle.go b/vendor/golang.org/x/sys/unix/zerrors_linux_mipsle.go index 724a244fd3..be225da7d9 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_mipsle.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_mipsle.go @@ -197,59 +197,10 @@ const ( BPF_ABS = 0x20 BPF_ADD = 0x0 BPF_ALU = 0x4 - BPF_ALU64 = 0x7 BPF_AND = 0x50 - BPF_ANY = 0x0 - BPF_ARSH = 0xc0 BPF_B = 0x10 - BPF_BUILD_ID_SIZE = 0x14 - BPF_CALL = 0x80 - BPF_DEVCG_ACC_MKNOD = 0x1 - BPF_DEVCG_ACC_READ = 0x2 - BPF_DEVCG_ACC_WRITE = 0x4 - BPF_DEVCG_DEV_BLOCK = 0x1 - BPF_DEVCG_DEV_CHAR = 0x2 BPF_DIV = 0x30 - BPF_DW = 0x18 - BPF_END = 0xd0 - BPF_EXIST = 0x2 - BPF_EXIT = 0x90 - BPF_FROM_BE = 0x8 - BPF_FROM_LE = 0x0 BPF_FS_MAGIC = 0xcafe4a11 - BPF_F_ALLOW_MULTI = 0x2 - BPF_F_ALLOW_OVERRIDE = 0x1 - BPF_F_ANY_ALIGNMENT = 0x2 - BPF_F_CTXLEN_MASK = 0xfffff00000000 - BPF_F_CURRENT_CPU = 0xffffffff - BPF_F_CURRENT_NETNS = -0x1 - BPF_F_DONT_FRAGMENT = 0x4 - BPF_F_FAST_STACK_CMP = 0x200 - BPF_F_HDR_FIELD_MASK = 0xf - BPF_F_INDEX_MASK = 0xffffffff - BPF_F_INGRESS = 0x1 - BPF_F_INVALIDATE_HASH = 0x2 - BPF_F_LOCK = 0x4 - BPF_F_MARK_ENFORCE = 0x40 - BPF_F_MARK_MANGLED_0 = 0x20 - BPF_F_NO_COMMON_LRU = 0x2 - BPF_F_NO_PREALLOC = 0x1 - BPF_F_NUMA_NODE = 0x4 - BPF_F_PSEUDO_HDR = 0x10 - BPF_F_QUERY_EFFECTIVE = 0x1 - BPF_F_RDONLY = 0x8 - BPF_F_RECOMPUTE_CSUM = 0x1 - BPF_F_REUSE_STACKID = 0x400 - BPF_F_SEQ_NUMBER = 0x8 - BPF_F_SKIP_FIELD_MASK = 0xff - BPF_F_STACK_BUILD_ID = 0x20 - BPF_F_STRICT_ALIGNMENT = 0x1 - BPF_F_TUNINFO_IPV6 = 0x1 - BPF_F_USER_BUILD_ID = 0x800 - BPF_F_USER_STACK = 0x100 - BPF_F_WRONLY = 0x10 - BPF_F_ZERO_CSUM_TX = 0x2 - BPF_F_ZERO_SEED = 0x40 BPF_H = 0x8 BPF_IMM = 0x0 BPF_IND = 0x40 @@ -257,16 +208,8 @@ const ( BPF_JEQ = 0x10 BPF_JGE = 0x30 BPF_JGT = 0x20 - BPF_JLE = 0xb0 - BPF_JLT = 0xa0 BPF_JMP = 0x5 - BPF_JMP32 = 0x6 - BPF_JNE = 0x50 BPF_JSET = 0x40 - BPF_JSGE = 0x70 - BPF_JSGT = 0x60 - BPF_JSLE = 0xd0 - BPF_JSLT = 0xc0 BPF_K = 0x0 BPF_LD = 0x0 BPF_LDX = 0x1 @@ -280,33 +223,20 @@ const ( BPF_MINOR_VERSION = 0x1 BPF_MISC = 0x7 BPF_MOD = 0x90 - BPF_MOV = 0xb0 BPF_MSH = 0xa0 BPF_MUL = 0x20 BPF_NEG = 0x80 BPF_NET_OFF = -0x100000 - BPF_NOEXIST = 0x1 - BPF_OBJ_NAME_LEN = 0x10 BPF_OR = 0x40 - BPF_PSEUDO_CALL = 0x1 - BPF_PSEUDO_MAP_FD = 0x1 BPF_RET = 0x6 BPF_RSH = 0x70 - BPF_SOCK_OPS_ALL_CB_FLAGS = 0x7 - BPF_SOCK_OPS_RETRANS_CB_FLAG = 0x2 - BPF_SOCK_OPS_RTO_CB_FLAG = 0x1 - BPF_SOCK_OPS_STATE_CB_FLAG = 0x4 BPF_ST = 0x2 BPF_STX = 0x3 BPF_SUB = 0x10 - BPF_TAG_SIZE = 0x8 BPF_TAX = 0x0 - BPF_TO_BE = 0x8 - BPF_TO_LE = 0x0 BPF_TXA = 0x80 BPF_W = 0x0 BPF_X = 0x8 - BPF_XADD = 0xc0 BPF_XOR = 0xa0 BRKINT = 0x2 BS0 = 0x0 @@ -571,7 +501,6 @@ const ( FAN_ALL_MARK_FLAGS = 0xff FAN_ALL_OUTGOING_EVENTS = 0x3403b FAN_ALL_PERM_EVENTS = 0x30000 - FAN_ATTRIB = 0x4 FAN_AUDIT = 0x10 FAN_CLASS_CONTENT = 0x4 FAN_CLASS_NOTIF = 0x0 @@ -580,12 +509,8 @@ const ( FAN_CLOSE = 0x18 FAN_CLOSE_NOWRITE = 0x10 FAN_CLOSE_WRITE = 0x8 - FAN_CREATE = 0x100 - FAN_DELETE = 0x200 - FAN_DELETE_SELF = 0x400 FAN_DENY = 0x2 FAN_ENABLE_AUDIT = 0x40 - FAN_EVENT_INFO_TYPE_FID = 0x1 FAN_EVENT_METADATA_LEN = 0x18 FAN_EVENT_ON_CHILD = 0x8000000 FAN_MARK_ADD = 0x1 @@ -599,10 +524,6 @@ const ( FAN_MARK_ONLYDIR = 0x8 FAN_MARK_REMOVE = 0x2 FAN_MODIFY = 0x2 - FAN_MOVE = 0xc0 - FAN_MOVED_FROM = 0x40 - FAN_MOVED_TO = 0x80 - FAN_MOVE_SELF = 0x800 FAN_NOFD = -0x1 FAN_NONBLOCK = 0x2 FAN_ONDIR = 0x40000000 @@ -611,7 +532,6 @@ const ( FAN_OPEN_EXEC_PERM = 0x40000 FAN_OPEN_PERM = 0x10000 FAN_Q_OVERFLOW = 0x4000 - FAN_REPORT_FID = 0x200 FAN_REPORT_TID = 0x100 FAN_UNLIMITED_MARKS = 0x20 FAN_UNLIMITED_QUEUE = 0x10 @@ -1134,15 +1054,6 @@ const ( MAP_SHARED_VALIDATE = 0x3 MAP_STACK = 0x40000 MAP_TYPE = 0xf - MCAST_BLOCK_SOURCE = 0x2b - MCAST_EXCLUDE = 0x0 - MCAST_INCLUDE = 0x1 - MCAST_JOIN_GROUP = 0x2a - MCAST_JOIN_SOURCE_GROUP = 0x2e - MCAST_LEAVE_GROUP = 0x2d - MCAST_LEAVE_SOURCE_GROUP = 0x2f - MCAST_MSFILTER = 0x30 - MCAST_UNBLOCK_SOURCE = 0x2c MCL_CURRENT = 0x1 MCL_FUTURE = 0x2 MCL_ONFAULT = 0x4 @@ -1578,7 +1489,6 @@ const ( PR_SET_TSC = 0x1a PR_SET_UNALIGN = 0x6 PR_SPEC_DISABLE = 0x4 - PR_SPEC_DISABLE_NOEXEC = 0x10 PR_SPEC_ENABLE = 0x2 PR_SPEC_FORCE_DISABLE = 0x8 PR_SPEC_INDIRECT_BRANCH = 0x1 @@ -2051,7 +1961,6 @@ const ( SO_ATTACH_REUSEPORT_CBPF = 0x33 SO_ATTACH_REUSEPORT_EBPF = 0x34 SO_BINDTODEVICE = 0x19 - SO_BINDTOIFINDEX = 0x3e SO_BPF_EXTENSIONS = 0x30 SO_BROADCAST = 0x20 SO_BSDCOMPAT = 0xe @@ -2100,8 +2009,6 @@ const ( SO_RCVBUFFORCE = 0x21 SO_RCVLOWAT = 0x1004 SO_RCVTIMEO = 0x1006 - SO_RCVTIMEO_NEW = 0x42 - SO_RCVTIMEO_OLD = 0x1006 SO_REUSEADDR = 0x4 SO_REUSEPORT = 0x200 SO_RXQ_OVFL = 0x28 @@ -2113,18 +2020,10 @@ const ( SO_SNDBUFFORCE = 0x1f SO_SNDLOWAT = 0x1003 SO_SNDTIMEO = 0x1005 - SO_SNDTIMEO_NEW = 0x43 - SO_SNDTIMEO_OLD = 0x1005 SO_STYLE = 0x1008 SO_TIMESTAMP = 0x1d SO_TIMESTAMPING = 0x25 - SO_TIMESTAMPING_NEW = 0x41 - SO_TIMESTAMPING_OLD = 0x25 SO_TIMESTAMPNS = 0x23 - SO_TIMESTAMPNS_NEW = 0x40 - SO_TIMESTAMPNS_OLD = 0x23 - SO_TIMESTAMP_NEW = 0x3f - SO_TIMESTAMP_OLD = 0x1d SO_TXTIME = 0x3d SO_TYPE = 0x1008 SO_VM_SOCKETS_BUFFER_MAX_SIZE = 0x2 @@ -2216,8 +2115,6 @@ const ( TCOFLUSH = 0x1 TCOOFF = 0x0 TCOON = 0x1 - TCP_BPF_IW = 0x3e9 - TCP_BPF_SNDCWND_CLAMP = 0x3ea TCP_CC_INFO = 0x1a TCP_CM_INQ = 0x24 TCP_CONGESTION = 0xd @@ -2421,10 +2318,8 @@ const ( UBI_IOCMKVOL = 0x80986f00 UBI_IOCRMVOL = 0x80046f01 UBI_IOCRNVOL = 0x91106f03 - UBI_IOCRPEB = 0x80046f04 UBI_IOCRSVOL = 0x800c6f02 UBI_IOCSETVOLPROP = 0x80104f06 - UBI_IOCSPEB = 0x80046f05 UBI_IOCVOLCRBLK = 0x80804f07 UBI_IOCVOLRMBLK = 0x20004f08 UBI_IOCVOLUP = 0x80084f00 @@ -2573,7 +2468,6 @@ const ( XDP_FLAGS_SKB_MODE = 0x2 XDP_FLAGS_UPDATE_IF_NOEXIST = 0x1 XDP_MMAP_OFFSETS = 0x1 - XDP_PACKET_HEADROOM = 0x100 XDP_PGOFF_RX_RING = 0x0 XDP_PGOFF_TX_RING = 0x80000000 XDP_RX_RING = 0x2 diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_ppc64.go b/vendor/golang.org/x/sys/unix/zerrors_linux_ppc64.go index 250446292b..fcbc70173a 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_ppc64.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_ppc64.go @@ -197,59 +197,10 @@ const ( BPF_ABS = 0x20 BPF_ADD = 0x0 BPF_ALU = 0x4 - BPF_ALU64 = 0x7 BPF_AND = 0x50 - BPF_ANY = 0x0 - BPF_ARSH = 0xc0 BPF_B = 0x10 - BPF_BUILD_ID_SIZE = 0x14 - BPF_CALL = 0x80 - BPF_DEVCG_ACC_MKNOD = 0x1 - BPF_DEVCG_ACC_READ = 0x2 - BPF_DEVCG_ACC_WRITE = 0x4 - BPF_DEVCG_DEV_BLOCK = 0x1 - BPF_DEVCG_DEV_CHAR = 0x2 BPF_DIV = 0x30 - BPF_DW = 0x18 - BPF_END = 0xd0 - BPF_EXIST = 0x2 - BPF_EXIT = 0x90 - BPF_FROM_BE = 0x8 - BPF_FROM_LE = 0x0 BPF_FS_MAGIC = 0xcafe4a11 - BPF_F_ALLOW_MULTI = 0x2 - BPF_F_ALLOW_OVERRIDE = 0x1 - BPF_F_ANY_ALIGNMENT = 0x2 - BPF_F_CTXLEN_MASK = 0xfffff00000000 - BPF_F_CURRENT_CPU = 0xffffffff - BPF_F_CURRENT_NETNS = -0x1 - BPF_F_DONT_FRAGMENT = 0x4 - BPF_F_FAST_STACK_CMP = 0x200 - BPF_F_HDR_FIELD_MASK = 0xf - BPF_F_INDEX_MASK = 0xffffffff - BPF_F_INGRESS = 0x1 - BPF_F_INVALIDATE_HASH = 0x2 - BPF_F_LOCK = 0x4 - BPF_F_MARK_ENFORCE = 0x40 - BPF_F_MARK_MANGLED_0 = 0x20 - BPF_F_NO_COMMON_LRU = 0x2 - BPF_F_NO_PREALLOC = 0x1 - BPF_F_NUMA_NODE = 0x4 - BPF_F_PSEUDO_HDR = 0x10 - BPF_F_QUERY_EFFECTIVE = 0x1 - BPF_F_RDONLY = 0x8 - BPF_F_RECOMPUTE_CSUM = 0x1 - BPF_F_REUSE_STACKID = 0x400 - BPF_F_SEQ_NUMBER = 0x8 - BPF_F_SKIP_FIELD_MASK = 0xff - BPF_F_STACK_BUILD_ID = 0x20 - BPF_F_STRICT_ALIGNMENT = 0x1 - BPF_F_TUNINFO_IPV6 = 0x1 - BPF_F_USER_BUILD_ID = 0x800 - BPF_F_USER_STACK = 0x100 - BPF_F_WRONLY = 0x10 - BPF_F_ZERO_CSUM_TX = 0x2 - BPF_F_ZERO_SEED = 0x40 BPF_H = 0x8 BPF_IMM = 0x0 BPF_IND = 0x40 @@ -257,16 +208,8 @@ const ( BPF_JEQ = 0x10 BPF_JGE = 0x30 BPF_JGT = 0x20 - BPF_JLE = 0xb0 - BPF_JLT = 0xa0 BPF_JMP = 0x5 - BPF_JMP32 = 0x6 - BPF_JNE = 0x50 BPF_JSET = 0x40 - BPF_JSGE = 0x70 - BPF_JSGT = 0x60 - BPF_JSLE = 0xd0 - BPF_JSLT = 0xc0 BPF_K = 0x0 BPF_LD = 0x0 BPF_LDX = 0x1 @@ -280,33 +223,20 @@ const ( BPF_MINOR_VERSION = 0x1 BPF_MISC = 0x7 BPF_MOD = 0x90 - BPF_MOV = 0xb0 BPF_MSH = 0xa0 BPF_MUL = 0x20 BPF_NEG = 0x80 BPF_NET_OFF = -0x100000 - BPF_NOEXIST = 0x1 - BPF_OBJ_NAME_LEN = 0x10 BPF_OR = 0x40 - BPF_PSEUDO_CALL = 0x1 - BPF_PSEUDO_MAP_FD = 0x1 BPF_RET = 0x6 BPF_RSH = 0x70 - BPF_SOCK_OPS_ALL_CB_FLAGS = 0x7 - BPF_SOCK_OPS_RETRANS_CB_FLAG = 0x2 - BPF_SOCK_OPS_RTO_CB_FLAG = 0x1 - BPF_SOCK_OPS_STATE_CB_FLAG = 0x4 BPF_ST = 0x2 BPF_STX = 0x3 BPF_SUB = 0x10 - BPF_TAG_SIZE = 0x8 BPF_TAX = 0x0 - BPF_TO_BE = 0x8 - BPF_TO_LE = 0x0 BPF_TXA = 0x80 BPF_W = 0x0 BPF_X = 0x8 - BPF_XADD = 0xc0 BPF_XOR = 0xa0 BRKINT = 0x2 BS0 = 0x0 @@ -571,7 +501,6 @@ const ( FAN_ALL_MARK_FLAGS = 0xff FAN_ALL_OUTGOING_EVENTS = 0x3403b FAN_ALL_PERM_EVENTS = 0x30000 - FAN_ATTRIB = 0x4 FAN_AUDIT = 0x10 FAN_CLASS_CONTENT = 0x4 FAN_CLASS_NOTIF = 0x0 @@ -580,12 +509,8 @@ const ( FAN_CLOSE = 0x18 FAN_CLOSE_NOWRITE = 0x10 FAN_CLOSE_WRITE = 0x8 - FAN_CREATE = 0x100 - FAN_DELETE = 0x200 - FAN_DELETE_SELF = 0x400 FAN_DENY = 0x2 FAN_ENABLE_AUDIT = 0x40 - FAN_EVENT_INFO_TYPE_FID = 0x1 FAN_EVENT_METADATA_LEN = 0x18 FAN_EVENT_ON_CHILD = 0x8000000 FAN_MARK_ADD = 0x1 @@ -599,10 +524,6 @@ const ( FAN_MARK_ONLYDIR = 0x8 FAN_MARK_REMOVE = 0x2 FAN_MODIFY = 0x2 - FAN_MOVE = 0xc0 - FAN_MOVED_FROM = 0x40 - FAN_MOVED_TO = 0x80 - FAN_MOVE_SELF = 0x800 FAN_NOFD = -0x1 FAN_NONBLOCK = 0x2 FAN_ONDIR = 0x40000000 @@ -611,7 +532,6 @@ const ( FAN_OPEN_EXEC_PERM = 0x40000 FAN_OPEN_PERM = 0x10000 FAN_Q_OVERFLOW = 0x4000 - FAN_REPORT_FID = 0x200 FAN_REPORT_TID = 0x100 FAN_UNLIMITED_MARKS = 0x20 FAN_UNLIMITED_QUEUE = 0x10 @@ -1133,15 +1053,6 @@ const ( MAP_SHARED_VALIDATE = 0x3 MAP_STACK = 0x20000 MAP_TYPE = 0xf - MCAST_BLOCK_SOURCE = 0x2b - MCAST_EXCLUDE = 0x0 - MCAST_INCLUDE = 0x1 - MCAST_JOIN_GROUP = 0x2a - MCAST_JOIN_SOURCE_GROUP = 0x2e - MCAST_LEAVE_GROUP = 0x2d - MCAST_LEAVE_SOURCE_GROUP = 0x2f - MCAST_MSFILTER = 0x30 - MCAST_UNBLOCK_SOURCE = 0x2c MCL_CURRENT = 0x2000 MCL_FUTURE = 0x4000 MCL_ONFAULT = 0x8000 @@ -1580,7 +1491,6 @@ const ( PR_SET_TSC = 0x1a PR_SET_UNALIGN = 0x6 PR_SPEC_DISABLE = 0x4 - PR_SPEC_DISABLE_NOEXEC = 0x10 PR_SPEC_ENABLE = 0x2 PR_SPEC_FORCE_DISABLE = 0x8 PR_SPEC_INDIRECT_BRANCH = 0x1 @@ -2109,7 +2019,6 @@ const ( SO_ATTACH_REUSEPORT_CBPF = 0x33 SO_ATTACH_REUSEPORT_EBPF = 0x34 SO_BINDTODEVICE = 0x19 - SO_BINDTOIFINDEX = 0x3e SO_BPF_EXTENSIONS = 0x30 SO_BROADCAST = 0x6 SO_BSDCOMPAT = 0xe @@ -2158,8 +2067,6 @@ const ( SO_RCVBUFFORCE = 0x21 SO_RCVLOWAT = 0x10 SO_RCVTIMEO = 0x12 - SO_RCVTIMEO_NEW = 0x42 - SO_RCVTIMEO_OLD = 0x12 SO_REUSEADDR = 0x2 SO_REUSEPORT = 0xf SO_RXQ_OVFL = 0x28 @@ -2171,17 +2078,9 @@ const ( SO_SNDBUFFORCE = 0x20 SO_SNDLOWAT = 0x11 SO_SNDTIMEO = 0x13 - SO_SNDTIMEO_NEW = 0x43 - SO_SNDTIMEO_OLD = 0x13 SO_TIMESTAMP = 0x1d SO_TIMESTAMPING = 0x25 - SO_TIMESTAMPING_NEW = 0x41 - SO_TIMESTAMPING_OLD = 0x25 SO_TIMESTAMPNS = 0x23 - SO_TIMESTAMPNS_NEW = 0x40 - SO_TIMESTAMPNS_OLD = 0x23 - SO_TIMESTAMP_NEW = 0x3f - SO_TIMESTAMP_OLD = 0x1d SO_TXTIME = 0x3d SO_TYPE = 0x3 SO_VM_SOCKETS_BUFFER_MAX_SIZE = 0x2 @@ -2272,8 +2171,6 @@ const ( TCOFLUSH = 0x1 TCOOFF = 0x0 TCOON = 0x1 - TCP_BPF_IW = 0x3e9 - TCP_BPF_SNDCWND_CLAMP = 0x3ea TCP_CC_INFO = 0x1a TCP_CM_INQ = 0x24 TCP_CONGESTION = 0xd @@ -2481,10 +2378,8 @@ const ( UBI_IOCMKVOL = 0x80986f00 UBI_IOCRMVOL = 0x80046f01 UBI_IOCRNVOL = 0x91106f03 - UBI_IOCRPEB = 0x80046f04 UBI_IOCRSVOL = 0x800c6f02 UBI_IOCSETVOLPROP = 0x80104f06 - UBI_IOCSPEB = 0x80046f05 UBI_IOCVOLCRBLK = 0x80804f07 UBI_IOCVOLRMBLK = 0x20004f08 UBI_IOCVOLUP = 0x80084f00 @@ -2632,7 +2527,6 @@ const ( XDP_FLAGS_SKB_MODE = 0x2 XDP_FLAGS_UPDATE_IF_NOEXIST = 0x1 XDP_MMAP_OFFSETS = 0x1 - XDP_PACKET_HEADROOM = 0x100 XDP_PGOFF_RX_RING = 0x0 XDP_PGOFF_TX_RING = 0x80000000 XDP_RX_RING = 0x2 diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_ppc64le.go b/vendor/golang.org/x/sys/unix/zerrors_linux_ppc64le.go index e7c49911b4..5cd3b4ed0e 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_ppc64le.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_ppc64le.go @@ -197,59 +197,10 @@ const ( BPF_ABS = 0x20 BPF_ADD = 0x0 BPF_ALU = 0x4 - BPF_ALU64 = 0x7 BPF_AND = 0x50 - BPF_ANY = 0x0 - BPF_ARSH = 0xc0 BPF_B = 0x10 - BPF_BUILD_ID_SIZE = 0x14 - BPF_CALL = 0x80 - BPF_DEVCG_ACC_MKNOD = 0x1 - BPF_DEVCG_ACC_READ = 0x2 - BPF_DEVCG_ACC_WRITE = 0x4 - BPF_DEVCG_DEV_BLOCK = 0x1 - BPF_DEVCG_DEV_CHAR = 0x2 BPF_DIV = 0x30 - BPF_DW = 0x18 - BPF_END = 0xd0 - BPF_EXIST = 0x2 - BPF_EXIT = 0x90 - BPF_FROM_BE = 0x8 - BPF_FROM_LE = 0x0 BPF_FS_MAGIC = 0xcafe4a11 - BPF_F_ALLOW_MULTI = 0x2 - BPF_F_ALLOW_OVERRIDE = 0x1 - BPF_F_ANY_ALIGNMENT = 0x2 - BPF_F_CTXLEN_MASK = 0xfffff00000000 - BPF_F_CURRENT_CPU = 0xffffffff - BPF_F_CURRENT_NETNS = -0x1 - BPF_F_DONT_FRAGMENT = 0x4 - BPF_F_FAST_STACK_CMP = 0x200 - BPF_F_HDR_FIELD_MASK = 0xf - BPF_F_INDEX_MASK = 0xffffffff - BPF_F_INGRESS = 0x1 - BPF_F_INVALIDATE_HASH = 0x2 - BPF_F_LOCK = 0x4 - BPF_F_MARK_ENFORCE = 0x40 - BPF_F_MARK_MANGLED_0 = 0x20 - BPF_F_NO_COMMON_LRU = 0x2 - BPF_F_NO_PREALLOC = 0x1 - BPF_F_NUMA_NODE = 0x4 - BPF_F_PSEUDO_HDR = 0x10 - BPF_F_QUERY_EFFECTIVE = 0x1 - BPF_F_RDONLY = 0x8 - BPF_F_RECOMPUTE_CSUM = 0x1 - BPF_F_REUSE_STACKID = 0x400 - BPF_F_SEQ_NUMBER = 0x8 - BPF_F_SKIP_FIELD_MASK = 0xff - BPF_F_STACK_BUILD_ID = 0x20 - BPF_F_STRICT_ALIGNMENT = 0x1 - BPF_F_TUNINFO_IPV6 = 0x1 - BPF_F_USER_BUILD_ID = 0x800 - BPF_F_USER_STACK = 0x100 - BPF_F_WRONLY = 0x10 - BPF_F_ZERO_CSUM_TX = 0x2 - BPF_F_ZERO_SEED = 0x40 BPF_H = 0x8 BPF_IMM = 0x0 BPF_IND = 0x40 @@ -257,16 +208,8 @@ const ( BPF_JEQ = 0x10 BPF_JGE = 0x30 BPF_JGT = 0x20 - BPF_JLE = 0xb0 - BPF_JLT = 0xa0 BPF_JMP = 0x5 - BPF_JMP32 = 0x6 - BPF_JNE = 0x50 BPF_JSET = 0x40 - BPF_JSGE = 0x70 - BPF_JSGT = 0x60 - BPF_JSLE = 0xd0 - BPF_JSLT = 0xc0 BPF_K = 0x0 BPF_LD = 0x0 BPF_LDX = 0x1 @@ -280,33 +223,20 @@ const ( BPF_MINOR_VERSION = 0x1 BPF_MISC = 0x7 BPF_MOD = 0x90 - BPF_MOV = 0xb0 BPF_MSH = 0xa0 BPF_MUL = 0x20 BPF_NEG = 0x80 BPF_NET_OFF = -0x100000 - BPF_NOEXIST = 0x1 - BPF_OBJ_NAME_LEN = 0x10 BPF_OR = 0x40 - BPF_PSEUDO_CALL = 0x1 - BPF_PSEUDO_MAP_FD = 0x1 BPF_RET = 0x6 BPF_RSH = 0x70 - BPF_SOCK_OPS_ALL_CB_FLAGS = 0x7 - BPF_SOCK_OPS_RETRANS_CB_FLAG = 0x2 - BPF_SOCK_OPS_RTO_CB_FLAG = 0x1 - BPF_SOCK_OPS_STATE_CB_FLAG = 0x4 BPF_ST = 0x2 BPF_STX = 0x3 BPF_SUB = 0x10 - BPF_TAG_SIZE = 0x8 BPF_TAX = 0x0 - BPF_TO_BE = 0x8 - BPF_TO_LE = 0x0 BPF_TXA = 0x80 BPF_W = 0x0 BPF_X = 0x8 - BPF_XADD = 0xc0 BPF_XOR = 0xa0 BRKINT = 0x2 BS0 = 0x0 @@ -571,7 +501,6 @@ const ( FAN_ALL_MARK_FLAGS = 0xff FAN_ALL_OUTGOING_EVENTS = 0x3403b FAN_ALL_PERM_EVENTS = 0x30000 - FAN_ATTRIB = 0x4 FAN_AUDIT = 0x10 FAN_CLASS_CONTENT = 0x4 FAN_CLASS_NOTIF = 0x0 @@ -580,12 +509,8 @@ const ( FAN_CLOSE = 0x18 FAN_CLOSE_NOWRITE = 0x10 FAN_CLOSE_WRITE = 0x8 - FAN_CREATE = 0x100 - FAN_DELETE = 0x200 - FAN_DELETE_SELF = 0x400 FAN_DENY = 0x2 FAN_ENABLE_AUDIT = 0x40 - FAN_EVENT_INFO_TYPE_FID = 0x1 FAN_EVENT_METADATA_LEN = 0x18 FAN_EVENT_ON_CHILD = 0x8000000 FAN_MARK_ADD = 0x1 @@ -599,10 +524,6 @@ const ( FAN_MARK_ONLYDIR = 0x8 FAN_MARK_REMOVE = 0x2 FAN_MODIFY = 0x2 - FAN_MOVE = 0xc0 - FAN_MOVED_FROM = 0x40 - FAN_MOVED_TO = 0x80 - FAN_MOVE_SELF = 0x800 FAN_NOFD = -0x1 FAN_NONBLOCK = 0x2 FAN_ONDIR = 0x40000000 @@ -611,7 +532,6 @@ const ( FAN_OPEN_EXEC_PERM = 0x40000 FAN_OPEN_PERM = 0x10000 FAN_Q_OVERFLOW = 0x4000 - FAN_REPORT_FID = 0x200 FAN_REPORT_TID = 0x100 FAN_UNLIMITED_MARKS = 0x20 FAN_UNLIMITED_QUEUE = 0x10 @@ -1133,15 +1053,6 @@ const ( MAP_SHARED_VALIDATE = 0x3 MAP_STACK = 0x20000 MAP_TYPE = 0xf - MCAST_BLOCK_SOURCE = 0x2b - MCAST_EXCLUDE = 0x0 - MCAST_INCLUDE = 0x1 - MCAST_JOIN_GROUP = 0x2a - MCAST_JOIN_SOURCE_GROUP = 0x2e - MCAST_LEAVE_GROUP = 0x2d - MCAST_LEAVE_SOURCE_GROUP = 0x2f - MCAST_MSFILTER = 0x30 - MCAST_UNBLOCK_SOURCE = 0x2c MCL_CURRENT = 0x2000 MCL_FUTURE = 0x4000 MCL_ONFAULT = 0x8000 @@ -1580,7 +1491,6 @@ const ( PR_SET_TSC = 0x1a PR_SET_UNALIGN = 0x6 PR_SPEC_DISABLE = 0x4 - PR_SPEC_DISABLE_NOEXEC = 0x10 PR_SPEC_ENABLE = 0x2 PR_SPEC_FORCE_DISABLE = 0x8 PR_SPEC_INDIRECT_BRANCH = 0x1 @@ -2109,7 +2019,6 @@ const ( SO_ATTACH_REUSEPORT_CBPF = 0x33 SO_ATTACH_REUSEPORT_EBPF = 0x34 SO_BINDTODEVICE = 0x19 - SO_BINDTOIFINDEX = 0x3e SO_BPF_EXTENSIONS = 0x30 SO_BROADCAST = 0x6 SO_BSDCOMPAT = 0xe @@ -2158,8 +2067,6 @@ const ( SO_RCVBUFFORCE = 0x21 SO_RCVLOWAT = 0x10 SO_RCVTIMEO = 0x12 - SO_RCVTIMEO_NEW = 0x42 - SO_RCVTIMEO_OLD = 0x12 SO_REUSEADDR = 0x2 SO_REUSEPORT = 0xf SO_RXQ_OVFL = 0x28 @@ -2171,17 +2078,9 @@ const ( SO_SNDBUFFORCE = 0x20 SO_SNDLOWAT = 0x11 SO_SNDTIMEO = 0x13 - SO_SNDTIMEO_NEW = 0x43 - SO_SNDTIMEO_OLD = 0x13 SO_TIMESTAMP = 0x1d SO_TIMESTAMPING = 0x25 - SO_TIMESTAMPING_NEW = 0x41 - SO_TIMESTAMPING_OLD = 0x25 SO_TIMESTAMPNS = 0x23 - SO_TIMESTAMPNS_NEW = 0x40 - SO_TIMESTAMPNS_OLD = 0x23 - SO_TIMESTAMP_NEW = 0x3f - SO_TIMESTAMP_OLD = 0x1d SO_TXTIME = 0x3d SO_TYPE = 0x3 SO_VM_SOCKETS_BUFFER_MAX_SIZE = 0x2 @@ -2272,8 +2171,6 @@ const ( TCOFLUSH = 0x1 TCOOFF = 0x0 TCOON = 0x1 - TCP_BPF_IW = 0x3e9 - TCP_BPF_SNDCWND_CLAMP = 0x3ea TCP_CC_INFO = 0x1a TCP_CM_INQ = 0x24 TCP_CONGESTION = 0xd @@ -2481,10 +2378,8 @@ const ( UBI_IOCMKVOL = 0x80986f00 UBI_IOCRMVOL = 0x80046f01 UBI_IOCRNVOL = 0x91106f03 - UBI_IOCRPEB = 0x80046f04 UBI_IOCRSVOL = 0x800c6f02 UBI_IOCSETVOLPROP = 0x80104f06 - UBI_IOCSPEB = 0x80046f05 UBI_IOCVOLCRBLK = 0x80804f07 UBI_IOCVOLRMBLK = 0x20004f08 UBI_IOCVOLUP = 0x80084f00 @@ -2632,7 +2527,6 @@ const ( XDP_FLAGS_SKB_MODE = 0x2 XDP_FLAGS_UPDATE_IF_NOEXIST = 0x1 XDP_MMAP_OFFSETS = 0x1 - XDP_PACKET_HEADROOM = 0x100 XDP_PGOFF_RX_RING = 0x0 XDP_PGOFF_TX_RING = 0x80000000 XDP_RX_RING = 0x2 diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_riscv64.go b/vendor/golang.org/x/sys/unix/zerrors_linux_riscv64.go index 0373d65ae7..0465451e88 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_riscv64.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_riscv64.go @@ -197,59 +197,10 @@ const ( BPF_ABS = 0x20 BPF_ADD = 0x0 BPF_ALU = 0x4 - BPF_ALU64 = 0x7 BPF_AND = 0x50 - BPF_ANY = 0x0 - BPF_ARSH = 0xc0 BPF_B = 0x10 - BPF_BUILD_ID_SIZE = 0x14 - BPF_CALL = 0x80 - BPF_DEVCG_ACC_MKNOD = 0x1 - BPF_DEVCG_ACC_READ = 0x2 - BPF_DEVCG_ACC_WRITE = 0x4 - BPF_DEVCG_DEV_BLOCK = 0x1 - BPF_DEVCG_DEV_CHAR = 0x2 BPF_DIV = 0x30 - BPF_DW = 0x18 - BPF_END = 0xd0 - BPF_EXIST = 0x2 - BPF_EXIT = 0x90 - BPF_FROM_BE = 0x8 - BPF_FROM_LE = 0x0 BPF_FS_MAGIC = 0xcafe4a11 - BPF_F_ALLOW_MULTI = 0x2 - BPF_F_ALLOW_OVERRIDE = 0x1 - BPF_F_ANY_ALIGNMENT = 0x2 - BPF_F_CTXLEN_MASK = 0xfffff00000000 - BPF_F_CURRENT_CPU = 0xffffffff - BPF_F_CURRENT_NETNS = -0x1 - BPF_F_DONT_FRAGMENT = 0x4 - BPF_F_FAST_STACK_CMP = 0x200 - BPF_F_HDR_FIELD_MASK = 0xf - BPF_F_INDEX_MASK = 0xffffffff - BPF_F_INGRESS = 0x1 - BPF_F_INVALIDATE_HASH = 0x2 - BPF_F_LOCK = 0x4 - BPF_F_MARK_ENFORCE = 0x40 - BPF_F_MARK_MANGLED_0 = 0x20 - BPF_F_NO_COMMON_LRU = 0x2 - BPF_F_NO_PREALLOC = 0x1 - BPF_F_NUMA_NODE = 0x4 - BPF_F_PSEUDO_HDR = 0x10 - BPF_F_QUERY_EFFECTIVE = 0x1 - BPF_F_RDONLY = 0x8 - BPF_F_RECOMPUTE_CSUM = 0x1 - BPF_F_REUSE_STACKID = 0x400 - BPF_F_SEQ_NUMBER = 0x8 - BPF_F_SKIP_FIELD_MASK = 0xff - BPF_F_STACK_BUILD_ID = 0x20 - BPF_F_STRICT_ALIGNMENT = 0x1 - BPF_F_TUNINFO_IPV6 = 0x1 - BPF_F_USER_BUILD_ID = 0x800 - BPF_F_USER_STACK = 0x100 - BPF_F_WRONLY = 0x10 - BPF_F_ZERO_CSUM_TX = 0x2 - BPF_F_ZERO_SEED = 0x40 BPF_H = 0x8 BPF_IMM = 0x0 BPF_IND = 0x40 @@ -257,16 +208,8 @@ const ( BPF_JEQ = 0x10 BPF_JGE = 0x30 BPF_JGT = 0x20 - BPF_JLE = 0xb0 - BPF_JLT = 0xa0 BPF_JMP = 0x5 - BPF_JMP32 = 0x6 - BPF_JNE = 0x50 BPF_JSET = 0x40 - BPF_JSGE = 0x70 - BPF_JSGT = 0x60 - BPF_JSLE = 0xd0 - BPF_JSLT = 0xc0 BPF_K = 0x0 BPF_LD = 0x0 BPF_LDX = 0x1 @@ -280,33 +223,20 @@ const ( BPF_MINOR_VERSION = 0x1 BPF_MISC = 0x7 BPF_MOD = 0x90 - BPF_MOV = 0xb0 BPF_MSH = 0xa0 BPF_MUL = 0x20 BPF_NEG = 0x80 BPF_NET_OFF = -0x100000 - BPF_NOEXIST = 0x1 - BPF_OBJ_NAME_LEN = 0x10 BPF_OR = 0x40 - BPF_PSEUDO_CALL = 0x1 - BPF_PSEUDO_MAP_FD = 0x1 BPF_RET = 0x6 BPF_RSH = 0x70 - BPF_SOCK_OPS_ALL_CB_FLAGS = 0x7 - BPF_SOCK_OPS_RETRANS_CB_FLAG = 0x2 - BPF_SOCK_OPS_RTO_CB_FLAG = 0x1 - BPF_SOCK_OPS_STATE_CB_FLAG = 0x4 BPF_ST = 0x2 BPF_STX = 0x3 BPF_SUB = 0x10 - BPF_TAG_SIZE = 0x8 BPF_TAX = 0x0 - BPF_TO_BE = 0x8 - BPF_TO_LE = 0x0 BPF_TXA = 0x80 BPF_W = 0x0 BPF_X = 0x8 - BPF_XADD = 0xc0 BPF_XOR = 0xa0 BRKINT = 0x2 BS0 = 0x0 @@ -571,7 +501,6 @@ const ( FAN_ALL_MARK_FLAGS = 0xff FAN_ALL_OUTGOING_EVENTS = 0x3403b FAN_ALL_PERM_EVENTS = 0x30000 - FAN_ATTRIB = 0x4 FAN_AUDIT = 0x10 FAN_CLASS_CONTENT = 0x4 FAN_CLASS_NOTIF = 0x0 @@ -580,12 +509,8 @@ const ( FAN_CLOSE = 0x18 FAN_CLOSE_NOWRITE = 0x10 FAN_CLOSE_WRITE = 0x8 - FAN_CREATE = 0x100 - FAN_DELETE = 0x200 - FAN_DELETE_SELF = 0x400 FAN_DENY = 0x2 FAN_ENABLE_AUDIT = 0x40 - FAN_EVENT_INFO_TYPE_FID = 0x1 FAN_EVENT_METADATA_LEN = 0x18 FAN_EVENT_ON_CHILD = 0x8000000 FAN_MARK_ADD = 0x1 @@ -599,10 +524,6 @@ const ( FAN_MARK_ONLYDIR = 0x8 FAN_MARK_REMOVE = 0x2 FAN_MODIFY = 0x2 - FAN_MOVE = 0xc0 - FAN_MOVED_FROM = 0x40 - FAN_MOVED_TO = 0x80 - FAN_MOVE_SELF = 0x800 FAN_NOFD = -0x1 FAN_NONBLOCK = 0x2 FAN_ONDIR = 0x40000000 @@ -611,7 +532,6 @@ const ( FAN_OPEN_EXEC_PERM = 0x40000 FAN_OPEN_PERM = 0x10000 FAN_Q_OVERFLOW = 0x4000 - FAN_REPORT_FID = 0x200 FAN_REPORT_TID = 0x100 FAN_UNLIMITED_MARKS = 0x20 FAN_UNLIMITED_QUEUE = 0x10 @@ -1134,15 +1054,6 @@ const ( MAP_STACK = 0x20000 MAP_SYNC = 0x80000 MAP_TYPE = 0xf - MCAST_BLOCK_SOURCE = 0x2b - MCAST_EXCLUDE = 0x0 - MCAST_INCLUDE = 0x1 - MCAST_JOIN_GROUP = 0x2a - MCAST_JOIN_SOURCE_GROUP = 0x2e - MCAST_LEAVE_GROUP = 0x2d - MCAST_LEAVE_SOURCE_GROUP = 0x2f - MCAST_MSFILTER = 0x30 - MCAST_UNBLOCK_SOURCE = 0x2c MCL_CURRENT = 0x1 MCL_FUTURE = 0x2 MCL_ONFAULT = 0x4 @@ -1578,7 +1489,6 @@ const ( PR_SET_TSC = 0x1a PR_SET_UNALIGN = 0x6 PR_SPEC_DISABLE = 0x4 - PR_SPEC_DISABLE_NOEXEC = 0x10 PR_SPEC_ENABLE = 0x2 PR_SPEC_FORCE_DISABLE = 0x8 PR_SPEC_INDIRECT_BRANCH = 0x1 @@ -2039,7 +1949,6 @@ const ( SO_ATTACH_REUSEPORT_CBPF = 0x33 SO_ATTACH_REUSEPORT_EBPF = 0x34 SO_BINDTODEVICE = 0x19 - SO_BINDTOIFINDEX = 0x3e SO_BPF_EXTENSIONS = 0x30 SO_BROADCAST = 0x6 SO_BSDCOMPAT = 0xe @@ -2088,8 +1997,6 @@ const ( SO_RCVBUFFORCE = 0x21 SO_RCVLOWAT = 0x12 SO_RCVTIMEO = 0x14 - SO_RCVTIMEO_NEW = 0x42 - SO_RCVTIMEO_OLD = 0x14 SO_REUSEADDR = 0x2 SO_REUSEPORT = 0xf SO_RXQ_OVFL = 0x28 @@ -2101,17 +2008,9 @@ const ( SO_SNDBUFFORCE = 0x20 SO_SNDLOWAT = 0x13 SO_SNDTIMEO = 0x15 - SO_SNDTIMEO_NEW = 0x43 - SO_SNDTIMEO_OLD = 0x15 SO_TIMESTAMP = 0x1d SO_TIMESTAMPING = 0x25 - SO_TIMESTAMPING_NEW = 0x41 - SO_TIMESTAMPING_OLD = 0x25 SO_TIMESTAMPNS = 0x23 - SO_TIMESTAMPNS_NEW = 0x40 - SO_TIMESTAMPNS_OLD = 0x23 - SO_TIMESTAMP_NEW = 0x3f - SO_TIMESTAMP_OLD = 0x1d SO_TXTIME = 0x3d SO_TYPE = 0x3 SO_VM_SOCKETS_BUFFER_MAX_SIZE = 0x2 @@ -2204,8 +2103,6 @@ const ( TCOFLUSH = 0x1 TCOOFF = 0x0 TCOON = 0x1 - TCP_BPF_IW = 0x3e9 - TCP_BPF_SNDCWND_CLAMP = 0x3ea TCP_CC_INFO = 0x1a TCP_CM_INQ = 0x24 TCP_CONGESTION = 0xd @@ -2407,10 +2304,8 @@ const ( UBI_IOCMKVOL = 0x40986f00 UBI_IOCRMVOL = 0x40046f01 UBI_IOCRNVOL = 0x51106f03 - UBI_IOCRPEB = 0x40046f04 UBI_IOCRSVOL = 0x400c6f02 UBI_IOCSETVOLPROP = 0x40104f06 - UBI_IOCSPEB = 0x40046f05 UBI_IOCVOLCRBLK = 0x40804f07 UBI_IOCVOLRMBLK = 0x4f08 UBI_IOCVOLUP = 0x40084f00 @@ -2558,7 +2453,6 @@ const ( XDP_FLAGS_SKB_MODE = 0x2 XDP_FLAGS_UPDATE_IF_NOEXIST = 0x1 XDP_MMAP_OFFSETS = 0x1 - XDP_PACKET_HEADROOM = 0x100 XDP_PGOFF_RX_RING = 0x0 XDP_PGOFF_TX_RING = 0x80000000 XDP_RX_RING = 0x2 diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_s390x.go b/vendor/golang.org/x/sys/unix/zerrors_linux_s390x.go index b2ed7ee6a1..e1592a17a3 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_s390x.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_s390x.go @@ -197,59 +197,10 @@ const ( BPF_ABS = 0x20 BPF_ADD = 0x0 BPF_ALU = 0x4 - BPF_ALU64 = 0x7 BPF_AND = 0x50 - BPF_ANY = 0x0 - BPF_ARSH = 0xc0 BPF_B = 0x10 - BPF_BUILD_ID_SIZE = 0x14 - BPF_CALL = 0x80 - BPF_DEVCG_ACC_MKNOD = 0x1 - BPF_DEVCG_ACC_READ = 0x2 - BPF_DEVCG_ACC_WRITE = 0x4 - BPF_DEVCG_DEV_BLOCK = 0x1 - BPF_DEVCG_DEV_CHAR = 0x2 BPF_DIV = 0x30 - BPF_DW = 0x18 - BPF_END = 0xd0 - BPF_EXIST = 0x2 - BPF_EXIT = 0x90 - BPF_FROM_BE = 0x8 - BPF_FROM_LE = 0x0 BPF_FS_MAGIC = 0xcafe4a11 - BPF_F_ALLOW_MULTI = 0x2 - BPF_F_ALLOW_OVERRIDE = 0x1 - BPF_F_ANY_ALIGNMENT = 0x2 - BPF_F_CTXLEN_MASK = 0xfffff00000000 - BPF_F_CURRENT_CPU = 0xffffffff - BPF_F_CURRENT_NETNS = -0x1 - BPF_F_DONT_FRAGMENT = 0x4 - BPF_F_FAST_STACK_CMP = 0x200 - BPF_F_HDR_FIELD_MASK = 0xf - BPF_F_INDEX_MASK = 0xffffffff - BPF_F_INGRESS = 0x1 - BPF_F_INVALIDATE_HASH = 0x2 - BPF_F_LOCK = 0x4 - BPF_F_MARK_ENFORCE = 0x40 - BPF_F_MARK_MANGLED_0 = 0x20 - BPF_F_NO_COMMON_LRU = 0x2 - BPF_F_NO_PREALLOC = 0x1 - BPF_F_NUMA_NODE = 0x4 - BPF_F_PSEUDO_HDR = 0x10 - BPF_F_QUERY_EFFECTIVE = 0x1 - BPF_F_RDONLY = 0x8 - BPF_F_RECOMPUTE_CSUM = 0x1 - BPF_F_REUSE_STACKID = 0x400 - BPF_F_SEQ_NUMBER = 0x8 - BPF_F_SKIP_FIELD_MASK = 0xff - BPF_F_STACK_BUILD_ID = 0x20 - BPF_F_STRICT_ALIGNMENT = 0x1 - BPF_F_TUNINFO_IPV6 = 0x1 - BPF_F_USER_BUILD_ID = 0x800 - BPF_F_USER_STACK = 0x100 - BPF_F_WRONLY = 0x10 - BPF_F_ZERO_CSUM_TX = 0x2 - BPF_F_ZERO_SEED = 0x40 BPF_H = 0x8 BPF_IMM = 0x0 BPF_IND = 0x40 @@ -257,16 +208,8 @@ const ( BPF_JEQ = 0x10 BPF_JGE = 0x30 BPF_JGT = 0x20 - BPF_JLE = 0xb0 - BPF_JLT = 0xa0 BPF_JMP = 0x5 - BPF_JMP32 = 0x6 - BPF_JNE = 0x50 BPF_JSET = 0x40 - BPF_JSGE = 0x70 - BPF_JSGT = 0x60 - BPF_JSLE = 0xd0 - BPF_JSLT = 0xc0 BPF_K = 0x0 BPF_LD = 0x0 BPF_LDX = 0x1 @@ -280,33 +223,20 @@ const ( BPF_MINOR_VERSION = 0x1 BPF_MISC = 0x7 BPF_MOD = 0x90 - BPF_MOV = 0xb0 BPF_MSH = 0xa0 BPF_MUL = 0x20 BPF_NEG = 0x80 BPF_NET_OFF = -0x100000 - BPF_NOEXIST = 0x1 - BPF_OBJ_NAME_LEN = 0x10 BPF_OR = 0x40 - BPF_PSEUDO_CALL = 0x1 - BPF_PSEUDO_MAP_FD = 0x1 BPF_RET = 0x6 BPF_RSH = 0x70 - BPF_SOCK_OPS_ALL_CB_FLAGS = 0x7 - BPF_SOCK_OPS_RETRANS_CB_FLAG = 0x2 - BPF_SOCK_OPS_RTO_CB_FLAG = 0x1 - BPF_SOCK_OPS_STATE_CB_FLAG = 0x4 BPF_ST = 0x2 BPF_STX = 0x3 BPF_SUB = 0x10 - BPF_TAG_SIZE = 0x8 BPF_TAX = 0x0 - BPF_TO_BE = 0x8 - BPF_TO_LE = 0x0 BPF_TXA = 0x80 BPF_W = 0x0 BPF_X = 0x8 - BPF_XADD = 0xc0 BPF_XOR = 0xa0 BRKINT = 0x2 BS0 = 0x0 @@ -571,7 +501,6 @@ const ( FAN_ALL_MARK_FLAGS = 0xff FAN_ALL_OUTGOING_EVENTS = 0x3403b FAN_ALL_PERM_EVENTS = 0x30000 - FAN_ATTRIB = 0x4 FAN_AUDIT = 0x10 FAN_CLASS_CONTENT = 0x4 FAN_CLASS_NOTIF = 0x0 @@ -580,12 +509,8 @@ const ( FAN_CLOSE = 0x18 FAN_CLOSE_NOWRITE = 0x10 FAN_CLOSE_WRITE = 0x8 - FAN_CREATE = 0x100 - FAN_DELETE = 0x200 - FAN_DELETE_SELF = 0x400 FAN_DENY = 0x2 FAN_ENABLE_AUDIT = 0x40 - FAN_EVENT_INFO_TYPE_FID = 0x1 FAN_EVENT_METADATA_LEN = 0x18 FAN_EVENT_ON_CHILD = 0x8000000 FAN_MARK_ADD = 0x1 @@ -599,10 +524,6 @@ const ( FAN_MARK_ONLYDIR = 0x8 FAN_MARK_REMOVE = 0x2 FAN_MODIFY = 0x2 - FAN_MOVE = 0xc0 - FAN_MOVED_FROM = 0x40 - FAN_MOVED_TO = 0x80 - FAN_MOVE_SELF = 0x800 FAN_NOFD = -0x1 FAN_NONBLOCK = 0x2 FAN_ONDIR = 0x40000000 @@ -611,7 +532,6 @@ const ( FAN_OPEN_EXEC_PERM = 0x40000 FAN_OPEN_PERM = 0x10000 FAN_Q_OVERFLOW = 0x4000 - FAN_REPORT_FID = 0x200 FAN_REPORT_TID = 0x100 FAN_UNLIMITED_MARKS = 0x20 FAN_UNLIMITED_QUEUE = 0x10 @@ -1134,15 +1054,6 @@ const ( MAP_STACK = 0x20000 MAP_SYNC = 0x80000 MAP_TYPE = 0xf - MCAST_BLOCK_SOURCE = 0x2b - MCAST_EXCLUDE = 0x0 - MCAST_INCLUDE = 0x1 - MCAST_JOIN_GROUP = 0x2a - MCAST_JOIN_SOURCE_GROUP = 0x2e - MCAST_LEAVE_GROUP = 0x2d - MCAST_LEAVE_SOURCE_GROUP = 0x2f - MCAST_MSFILTER = 0x30 - MCAST_UNBLOCK_SOURCE = 0x2c MCL_CURRENT = 0x1 MCL_FUTURE = 0x2 MCL_ONFAULT = 0x4 @@ -1578,7 +1489,6 @@ const ( PR_SET_TSC = 0x1a PR_SET_UNALIGN = 0x6 PR_SPEC_DISABLE = 0x4 - PR_SPEC_DISABLE_NOEXEC = 0x10 PR_SPEC_ENABLE = 0x2 PR_SPEC_FORCE_DISABLE = 0x8 PR_SPEC_INDIRECT_BRANCH = 0x1 @@ -2112,7 +2022,6 @@ const ( SO_ATTACH_REUSEPORT_CBPF = 0x33 SO_ATTACH_REUSEPORT_EBPF = 0x34 SO_BINDTODEVICE = 0x19 - SO_BINDTOIFINDEX = 0x3e SO_BPF_EXTENSIONS = 0x30 SO_BROADCAST = 0x6 SO_BSDCOMPAT = 0xe @@ -2161,8 +2070,6 @@ const ( SO_RCVBUFFORCE = 0x21 SO_RCVLOWAT = 0x12 SO_RCVTIMEO = 0x14 - SO_RCVTIMEO_NEW = 0x42 - SO_RCVTIMEO_OLD = 0x14 SO_REUSEADDR = 0x2 SO_REUSEPORT = 0xf SO_RXQ_OVFL = 0x28 @@ -2174,17 +2081,9 @@ const ( SO_SNDBUFFORCE = 0x20 SO_SNDLOWAT = 0x13 SO_SNDTIMEO = 0x15 - SO_SNDTIMEO_NEW = 0x43 - SO_SNDTIMEO_OLD = 0x15 SO_TIMESTAMP = 0x1d SO_TIMESTAMPING = 0x25 - SO_TIMESTAMPING_NEW = 0x41 - SO_TIMESTAMPING_OLD = 0x25 SO_TIMESTAMPNS = 0x23 - SO_TIMESTAMPNS_NEW = 0x40 - SO_TIMESTAMPNS_OLD = 0x23 - SO_TIMESTAMP_NEW = 0x3f - SO_TIMESTAMP_OLD = 0x1d SO_TXTIME = 0x3d SO_TYPE = 0x3 SO_VM_SOCKETS_BUFFER_MAX_SIZE = 0x2 @@ -2277,8 +2176,6 @@ const ( TCOFLUSH = 0x1 TCOOFF = 0x0 TCOON = 0x1 - TCP_BPF_IW = 0x3e9 - TCP_BPF_SNDCWND_CLAMP = 0x3ea TCP_CC_INFO = 0x1a TCP_CM_INQ = 0x24 TCP_CONGESTION = 0xd @@ -2480,10 +2377,8 @@ const ( UBI_IOCMKVOL = 0x40986f00 UBI_IOCRMVOL = 0x40046f01 UBI_IOCRNVOL = 0x51106f03 - UBI_IOCRPEB = 0x40046f04 UBI_IOCRSVOL = 0x400c6f02 UBI_IOCSETVOLPROP = 0x40104f06 - UBI_IOCSPEB = 0x40046f05 UBI_IOCVOLCRBLK = 0x40804f07 UBI_IOCVOLRMBLK = 0x4f08 UBI_IOCVOLUP = 0x40084f00 @@ -2631,7 +2526,6 @@ const ( XDP_FLAGS_SKB_MODE = 0x2 XDP_FLAGS_UPDATE_IF_NOEXIST = 0x1 XDP_MMAP_OFFSETS = 0x1 - XDP_PACKET_HEADROOM = 0x100 XDP_PGOFF_RX_RING = 0x0 XDP_PGOFF_TX_RING = 0x80000000 XDP_RX_RING = 0x2 diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_sparc64.go b/vendor/golang.org/x/sys/unix/zerrors_linux_sparc64.go index 58067c529a..4be507343c 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_sparc64.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_sparc64.go @@ -200,59 +200,10 @@ const ( BPF_ABS = 0x20 BPF_ADD = 0x0 BPF_ALU = 0x4 - BPF_ALU64 = 0x7 BPF_AND = 0x50 - BPF_ANY = 0x0 - BPF_ARSH = 0xc0 BPF_B = 0x10 - BPF_BUILD_ID_SIZE = 0x14 - BPF_CALL = 0x80 - BPF_DEVCG_ACC_MKNOD = 0x1 - BPF_DEVCG_ACC_READ = 0x2 - BPF_DEVCG_ACC_WRITE = 0x4 - BPF_DEVCG_DEV_BLOCK = 0x1 - BPF_DEVCG_DEV_CHAR = 0x2 BPF_DIV = 0x30 - BPF_DW = 0x18 - BPF_END = 0xd0 - BPF_EXIST = 0x2 - BPF_EXIT = 0x90 - BPF_FROM_BE = 0x8 - BPF_FROM_LE = 0x0 BPF_FS_MAGIC = 0xcafe4a11 - BPF_F_ALLOW_MULTI = 0x2 - BPF_F_ALLOW_OVERRIDE = 0x1 - BPF_F_ANY_ALIGNMENT = 0x2 - BPF_F_CTXLEN_MASK = 0xfffff00000000 - BPF_F_CURRENT_CPU = 0xffffffff - BPF_F_CURRENT_NETNS = -0x1 - BPF_F_DONT_FRAGMENT = 0x4 - BPF_F_FAST_STACK_CMP = 0x200 - BPF_F_HDR_FIELD_MASK = 0xf - BPF_F_INDEX_MASK = 0xffffffff - BPF_F_INGRESS = 0x1 - BPF_F_INVALIDATE_HASH = 0x2 - BPF_F_LOCK = 0x4 - BPF_F_MARK_ENFORCE = 0x40 - BPF_F_MARK_MANGLED_0 = 0x20 - BPF_F_NO_COMMON_LRU = 0x2 - BPF_F_NO_PREALLOC = 0x1 - BPF_F_NUMA_NODE = 0x4 - BPF_F_PSEUDO_HDR = 0x10 - BPF_F_QUERY_EFFECTIVE = 0x1 - BPF_F_RDONLY = 0x8 - BPF_F_RECOMPUTE_CSUM = 0x1 - BPF_F_REUSE_STACKID = 0x400 - BPF_F_SEQ_NUMBER = 0x8 - BPF_F_SKIP_FIELD_MASK = 0xff - BPF_F_STACK_BUILD_ID = 0x20 - BPF_F_STRICT_ALIGNMENT = 0x1 - BPF_F_TUNINFO_IPV6 = 0x1 - BPF_F_USER_BUILD_ID = 0x800 - BPF_F_USER_STACK = 0x100 - BPF_F_WRONLY = 0x10 - BPF_F_ZERO_CSUM_TX = 0x2 - BPF_F_ZERO_SEED = 0x40 BPF_H = 0x8 BPF_IMM = 0x0 BPF_IND = 0x40 @@ -260,16 +211,8 @@ const ( BPF_JEQ = 0x10 BPF_JGE = 0x30 BPF_JGT = 0x20 - BPF_JLE = 0xb0 - BPF_JLT = 0xa0 BPF_JMP = 0x5 - BPF_JMP32 = 0x6 - BPF_JNE = 0x50 BPF_JSET = 0x40 - BPF_JSGE = 0x70 - BPF_JSGT = 0x60 - BPF_JSLE = 0xd0 - BPF_JSLT = 0xc0 BPF_K = 0x0 BPF_LD = 0x0 BPF_LDX = 0x1 @@ -283,33 +226,20 @@ const ( BPF_MINOR_VERSION = 0x1 BPF_MISC = 0x7 BPF_MOD = 0x90 - BPF_MOV = 0xb0 BPF_MSH = 0xa0 BPF_MUL = 0x20 BPF_NEG = 0x80 BPF_NET_OFF = -0x100000 - BPF_NOEXIST = 0x1 - BPF_OBJ_NAME_LEN = 0x10 BPF_OR = 0x40 - BPF_PSEUDO_CALL = 0x1 - BPF_PSEUDO_MAP_FD = 0x1 BPF_RET = 0x6 BPF_RSH = 0x70 - BPF_SOCK_OPS_ALL_CB_FLAGS = 0x7 - BPF_SOCK_OPS_RETRANS_CB_FLAG = 0x2 - BPF_SOCK_OPS_RTO_CB_FLAG = 0x1 - BPF_SOCK_OPS_STATE_CB_FLAG = 0x4 BPF_ST = 0x2 BPF_STX = 0x3 BPF_SUB = 0x10 - BPF_TAG_SIZE = 0x8 BPF_TAX = 0x0 - BPF_TO_BE = 0x8 - BPF_TO_LE = 0x0 BPF_TXA = 0x80 BPF_W = 0x0 BPF_X = 0x8 - BPF_XADD = 0xc0 BPF_XOR = 0xa0 BRKINT = 0x2 BS0 = 0x0 @@ -575,7 +505,6 @@ const ( FAN_ALL_MARK_FLAGS = 0xff FAN_ALL_OUTGOING_EVENTS = 0x3403b FAN_ALL_PERM_EVENTS = 0x30000 - FAN_ATTRIB = 0x4 FAN_AUDIT = 0x10 FAN_CLASS_CONTENT = 0x4 FAN_CLASS_NOTIF = 0x0 @@ -584,12 +513,8 @@ const ( FAN_CLOSE = 0x18 FAN_CLOSE_NOWRITE = 0x10 FAN_CLOSE_WRITE = 0x8 - FAN_CREATE = 0x100 - FAN_DELETE = 0x200 - FAN_DELETE_SELF = 0x400 FAN_DENY = 0x2 FAN_ENABLE_AUDIT = 0x40 - FAN_EVENT_INFO_TYPE_FID = 0x1 FAN_EVENT_METADATA_LEN = 0x18 FAN_EVENT_ON_CHILD = 0x8000000 FAN_MARK_ADD = 0x1 @@ -603,10 +528,6 @@ const ( FAN_MARK_ONLYDIR = 0x8 FAN_MARK_REMOVE = 0x2 FAN_MODIFY = 0x2 - FAN_MOVE = 0xc0 - FAN_MOVED_FROM = 0x40 - FAN_MOVED_TO = 0x80 - FAN_MOVE_SELF = 0x800 FAN_NOFD = -0x1 FAN_NONBLOCK = 0x2 FAN_ONDIR = 0x40000000 @@ -615,7 +536,6 @@ const ( FAN_OPEN_EXEC_PERM = 0x40000 FAN_OPEN_PERM = 0x10000 FAN_Q_OVERFLOW = 0x4000 - FAN_REPORT_FID = 0x200 FAN_REPORT_TID = 0x100 FAN_UNLIMITED_MARKS = 0x20 FAN_UNLIMITED_QUEUE = 0x10 @@ -1138,15 +1058,6 @@ const ( MAP_SHARED_VALIDATE = 0x3 MAP_STACK = 0x20000 MAP_TYPE = 0xf - MCAST_BLOCK_SOURCE = 0x2b - MCAST_EXCLUDE = 0x0 - MCAST_INCLUDE = 0x1 - MCAST_JOIN_GROUP = 0x2a - MCAST_JOIN_SOURCE_GROUP = 0x2e - MCAST_LEAVE_GROUP = 0x2d - MCAST_LEAVE_SOURCE_GROUP = 0x2f - MCAST_MSFILTER = 0x30 - MCAST_UNBLOCK_SOURCE = 0x2c MCL_CURRENT = 0x2000 MCL_FUTURE = 0x4000 MCL_ONFAULT = 0x8000 @@ -1582,7 +1493,6 @@ const ( PR_SET_TSC = 0x1a PR_SET_UNALIGN = 0x6 PR_SPEC_DISABLE = 0x4 - PR_SPEC_DISABLE_NOEXEC = 0x10 PR_SPEC_ENABLE = 0x2 PR_SPEC_FORCE_DISABLE = 0x8 PR_SPEC_INDIRECT_BRANCH = 0x1 @@ -2104,7 +2014,6 @@ const ( SO_ATTACH_REUSEPORT_CBPF = 0x35 SO_ATTACH_REUSEPORT_EBPF = 0x36 SO_BINDTODEVICE = 0xd - SO_BINDTOIFINDEX = 0x41 SO_BPF_EXTENSIONS = 0x32 SO_BROADCAST = 0x20 SO_BSDCOMPAT = 0x400 @@ -2153,8 +2062,6 @@ const ( SO_RCVBUFFORCE = 0x100b SO_RCVLOWAT = 0x800 SO_RCVTIMEO = 0x2000 - SO_RCVTIMEO_NEW = 0x44 - SO_RCVTIMEO_OLD = 0x2000 SO_REUSEADDR = 0x4 SO_REUSEPORT = 0x200 SO_RXQ_OVFL = 0x24 @@ -2166,17 +2073,9 @@ const ( SO_SNDBUFFORCE = 0x100a SO_SNDLOWAT = 0x1000 SO_SNDTIMEO = 0x4000 - SO_SNDTIMEO_NEW = 0x45 - SO_SNDTIMEO_OLD = 0x4000 SO_TIMESTAMP = 0x1d SO_TIMESTAMPING = 0x23 - SO_TIMESTAMPING_NEW = 0x43 - SO_TIMESTAMPING_OLD = 0x23 SO_TIMESTAMPNS = 0x21 - SO_TIMESTAMPNS_NEW = 0x42 - SO_TIMESTAMPNS_OLD = 0x21 - SO_TIMESTAMP_NEW = 0x46 - SO_TIMESTAMP_OLD = 0x1d SO_TXTIME = 0x3f SO_TYPE = 0x1008 SO_VM_SOCKETS_BUFFER_MAX_SIZE = 0x2 @@ -2268,8 +2167,6 @@ const ( TCOFLUSH = 0x1 TCOOFF = 0x0 TCOON = 0x1 - TCP_BPF_IW = 0x3e9 - TCP_BPF_SNDCWND_CLAMP = 0x3ea TCP_CC_INFO = 0x1a TCP_CM_INQ = 0x24 TCP_CONGESTION = 0xd @@ -2469,10 +2366,8 @@ const ( UBI_IOCMKVOL = 0x80986f00 UBI_IOCRMVOL = 0x80046f01 UBI_IOCRNVOL = 0x91106f03 - UBI_IOCRPEB = 0x80046f04 UBI_IOCRSVOL = 0x800c6f02 UBI_IOCSETVOLPROP = 0x80104f06 - UBI_IOCSPEB = 0x80046f05 UBI_IOCVOLCRBLK = 0x80804f07 UBI_IOCVOLRMBLK = 0x20004f08 UBI_IOCVOLUP = 0x80084f00 @@ -2620,7 +2515,6 @@ const ( XDP_FLAGS_SKB_MODE = 0x2 XDP_FLAGS_UPDATE_IF_NOEXIST = 0x1 XDP_MMAP_OFFSETS = 0x1 - XDP_PACKET_HEADROOM = 0x100 XDP_PGOFF_RX_RING = 0x0 XDP_PGOFF_TX_RING = 0x80000000 XDP_RX_RING = 0x2 diff --git a/vendor/golang.org/x/sys/unix/zsyscall_aix_ppc.go b/vendor/golang.org/x/sys/unix/zsyscall_aix_ppc.go index ed657ff1bc..4a9e99a0e1 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_aix_ppc.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_aix_ppc.go @@ -859,7 +859,7 @@ func Fchown(fd int, uid int, gid int) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func fstat(fd int, stat *Stat_t) (err error) { +func Fstat(fd int, stat *Stat_t) (err error) { r0, er := C.fstat(C.int(fd), C.uintptr_t(uintptr(unsafe.Pointer(stat)))) if r0 == -1 && er != nil { err = er @@ -869,7 +869,7 @@ func fstat(fd int, stat *Stat_t) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func fstatat(dirfd int, path string, stat *Stat_t, flags int) (err error) { +func Fstatat(dirfd int, path string, stat *Stat_t, flags int) (err error) { _p0 := uintptr(unsafe.Pointer(C.CString(path))) r0, er := C.fstatat(C.int(dirfd), C.uintptr_t(_p0), C.uintptr_t(uintptr(unsafe.Pointer(stat))), C.int(flags)) if r0 == -1 && er != nil { @@ -953,7 +953,7 @@ func Listen(s int, n int) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func lstat(path string, stat *Stat_t) (err error) { +func Lstat(path string, stat *Stat_t) (err error) { _p0 := uintptr(unsafe.Pointer(C.CString(path))) r0, er := C.lstat(C.uintptr_t(_p0), C.uintptr_t(uintptr(unsafe.Pointer(stat)))) if r0 == -1 && er != nil { @@ -1071,9 +1071,9 @@ func Splice(rfd int, roff *int64, wfd int, woff *int64, len int, flags int) (n i // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func stat(path string, statptr *Stat_t) (err error) { +func Stat(path string, stat *Stat_t) (err error) { _p0 := uintptr(unsafe.Pointer(C.CString(path))) - r0, er := C.stat(C.uintptr_t(_p0), C.uintptr_t(uintptr(unsafe.Pointer(statptr)))) + r0, er := C.stat(C.uintptr_t(_p0), C.uintptr_t(uintptr(unsafe.Pointer(stat)))) if r0 == -1 && er != nil { err = er } diff --git a/vendor/golang.org/x/sys/unix/zsyscall_aix_ppc64.go b/vendor/golang.org/x/sys/unix/zsyscall_aix_ppc64.go index 664b293b43..c3371ddc2f 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_aix_ppc64.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_aix_ppc64.go @@ -803,7 +803,7 @@ func Fchown(fd int, uid int, gid int) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func fstat(fd int, stat *Stat_t) (err error) { +func Fstat(fd int, stat *Stat_t) (err error) { _, e1 := callfstat(fd, uintptr(unsafe.Pointer(stat))) if e1 != 0 { err = errnoErr(e1) @@ -813,7 +813,7 @@ func fstat(fd int, stat *Stat_t) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func fstatat(dirfd int, path string, stat *Stat_t, flags int) (err error) { +func Fstatat(dirfd int, path string, stat *Stat_t, flags int) (err error) { var _p0 *byte _p0, err = BytePtrFromString(path) if err != nil { @@ -905,7 +905,7 @@ func Listen(s int, n int) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func lstat(path string, stat *Stat_t) (err error) { +func Lstat(path string, stat *Stat_t) (err error) { var _p0 *byte _p0, err = BytePtrFromString(path) if err != nil { @@ -1023,13 +1023,13 @@ func Splice(rfd int, roff *int64, wfd int, woff *int64, len int, flags int) (n i // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func stat(path string, statptr *Stat_t) (err error) { +func Stat(path string, stat *Stat_t) (err error) { var _p0 *byte _p0, err = BytePtrFromString(path) if err != nil { return } - _, e1 := callstat(uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(statptr))) + _, e1 := callstat(uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(stat))) if e1 != 0 { err = errnoErr(e1) } diff --git a/vendor/golang.org/x/sys/unix/zsyscall_aix_ppc64_gc.go b/vendor/golang.org/x/sys/unix/zsyscall_aix_ppc64_gc.go index 4b3a8ad7be..4eda723234 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_aix_ppc64_gc.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_aix_ppc64_gc.go @@ -941,8 +941,8 @@ func callsplice(rfd int, roff uintptr, wfd int, woff uintptr, len int, flags int // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func callstat(_p0 uintptr, statptr uintptr) (r1 uintptr, e1 Errno) { - r1, _, e1 = syscall6(uintptr(unsafe.Pointer(&libc_stat)), 2, _p0, statptr, 0, 0, 0, 0) +func callstat(_p0 uintptr, stat uintptr) (r1 uintptr, e1 Errno) { + r1, _, e1 = syscall6(uintptr(unsafe.Pointer(&libc_stat)), 2, _p0, stat, 0, 0, 0, 0) return } diff --git a/vendor/golang.org/x/sys/unix/zsyscall_aix_ppc64_gccgo.go b/vendor/golang.org/x/sys/unix/zsyscall_aix_ppc64_gccgo.go index cde4dbc5f5..e5c4cbdd6c 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_aix_ppc64_gccgo.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_aix_ppc64_gccgo.go @@ -783,8 +783,8 @@ func callsplice(rfd int, roff uintptr, wfd int, woff uintptr, len int, flags int // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func callstat(_p0 uintptr, statptr uintptr) (r1 uintptr, e1 Errno) { - r1 = uintptr(C.stat(C.uintptr_t(_p0), C.uintptr_t(statptr))) +func callstat(_p0 uintptr, stat uintptr) (r1 uintptr, e1 Errno) { + r1 = uintptr(C.stat(C.uintptr_t(_p0), C.uintptr_t(stat))) e1 = syscall.GetErrno() return } diff --git a/vendor/golang.org/x/sys/unix/zsyscall_freebsd_386.go b/vendor/golang.org/x/sys/unix/zsyscall_freebsd_386.go index 2707c01317..80903e47b6 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_freebsd_386.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_freebsd_386.go @@ -1019,7 +1019,7 @@ func getdirentries(fd int, buf []byte, basep *uintptr) (n int, err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func getdirentries_freebsd12(fd int, buf []byte, basep *uint64) (n int, err error) { +func getdirentries_freebsd12(fd int, buf []byte, basep *uintptr) (n int, err error) { var _p0 unsafe.Pointer if len(buf) > 0 { _p0 = unsafe.Pointer(&buf[0]) diff --git a/vendor/golang.org/x/sys/unix/zsyscall_freebsd_amd64.go b/vendor/golang.org/x/sys/unix/zsyscall_freebsd_amd64.go index 8e3c0cea9a..cd250ff0e2 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_freebsd_amd64.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_freebsd_amd64.go @@ -1019,7 +1019,7 @@ func getdirentries(fd int, buf []byte, basep *uintptr) (n int, err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func getdirentries_freebsd12(fd int, buf []byte, basep *uint64) (n int, err error) { +func getdirentries_freebsd12(fd int, buf []byte, basep *uintptr) (n int, err error) { var _p0 unsafe.Pointer if len(buf) > 0 { _p0 = unsafe.Pointer(&buf[0]) diff --git a/vendor/golang.org/x/sys/unix/zsyscall_freebsd_arm.go b/vendor/golang.org/x/sys/unix/zsyscall_freebsd_arm.go index 641f86a034..290a9c2cb0 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_freebsd_arm.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_freebsd_arm.go @@ -1019,7 +1019,7 @@ func getdirentries(fd int, buf []byte, basep *uintptr) (n int, err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func getdirentries_freebsd12(fd int, buf []byte, basep *uint64) (n int, err error) { +func getdirentries_freebsd12(fd int, buf []byte, basep *uintptr) (n int, err error) { var _p0 unsafe.Pointer if len(buf) > 0 { _p0 = unsafe.Pointer(&buf[0]) diff --git a/vendor/golang.org/x/sys/unix/zsyscall_freebsd_arm64.go b/vendor/golang.org/x/sys/unix/zsyscall_freebsd_arm64.go index 68fbccf727..c6df9d2e8f 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_freebsd_arm64.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_freebsd_arm64.go @@ -1019,7 +1019,7 @@ func getdirentries(fd int, buf []byte, basep *uintptr) (n int, err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func getdirentries_freebsd12(fd int, buf []byte, basep *uint64) (n int, err error) { +func getdirentries_freebsd12(fd int, buf []byte, basep *uintptr) (n int, err error) { var _p0 unsafe.Pointer if len(buf) > 0 { _p0 = unsafe.Pointer(&buf[0]) diff --git a/vendor/golang.org/x/sys/unix/zsyscall_linux_386.go b/vendor/golang.org/x/sys/unix/zsyscall_linux_386.go index 81d90a27e4..feb3c03933 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_linux_386.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_linux_386.go @@ -1381,12 +1381,8 @@ func Setxattr(path string, attr string, data []byte, flags int) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func signalfd(fd int, sigmask *Sigset_t, maskSize uintptr, flags int) (newfd int, err error) { - r0, _, e1 := Syscall6(SYS_SIGNALFD4, uintptr(fd), uintptr(unsafe.Pointer(sigmask)), uintptr(maskSize), uintptr(flags), 0, 0) - newfd = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } +func Signalfd(fd int, mask *Sigset_t, flags int) { + SyscallNoError(SYS_SIGNALFD4, uintptr(fd), uintptr(unsafe.Pointer(mask)), uintptr(flags)) return } diff --git a/vendor/golang.org/x/sys/unix/zsyscall_linux_amd64.go b/vendor/golang.org/x/sys/unix/zsyscall_linux_amd64.go index 0c184586bf..fa0cb252a9 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_linux_amd64.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_linux_amd64.go @@ -1381,12 +1381,8 @@ func Setxattr(path string, attr string, data []byte, flags int) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func signalfd(fd int, sigmask *Sigset_t, maskSize uintptr, flags int) (newfd int, err error) { - r0, _, e1 := Syscall6(SYS_SIGNALFD4, uintptr(fd), uintptr(unsafe.Pointer(sigmask)), uintptr(maskSize), uintptr(flags), 0, 0) - newfd = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } +func Signalfd(fd int, mask *Sigset_t, flags int) { + SyscallNoError(SYS_SIGNALFD4, uintptr(fd), uintptr(unsafe.Pointer(mask)), uintptr(flags)) return } diff --git a/vendor/golang.org/x/sys/unix/zsyscall_linux_arm.go b/vendor/golang.org/x/sys/unix/zsyscall_linux_arm.go index 18ef8a6268..daec1d5d2c 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_linux_arm.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_linux_arm.go @@ -1381,12 +1381,8 @@ func Setxattr(path string, attr string, data []byte, flags int) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func signalfd(fd int, sigmask *Sigset_t, maskSize uintptr, flags int) (newfd int, err error) { - r0, _, e1 := Syscall6(SYS_SIGNALFD4, uintptr(fd), uintptr(unsafe.Pointer(sigmask)), uintptr(maskSize), uintptr(flags), 0, 0) - newfd = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } +func Signalfd(fd int, mask *Sigset_t, flags int) { + SyscallNoError(SYS_SIGNALFD4, uintptr(fd), uintptr(unsafe.Pointer(mask)), uintptr(flags)) return } @@ -2370,18 +2366,3 @@ func armSyncFileRange(fd int, flags int, off int64, n int64) (err error) { } return } - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func kexecFileLoad(kernelFd int, initrdFd int, cmdlineLen int, cmdline string, flags int) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(cmdline) - if err != nil { - return - } - _, _, e1 := Syscall6(SYS_KEXEC_FILE_LOAD, uintptr(kernelFd), uintptr(initrdFd), uintptr(cmdlineLen), uintptr(unsafe.Pointer(_p0)), uintptr(flags), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} diff --git a/vendor/golang.org/x/sys/unix/zsyscall_linux_arm64.go b/vendor/golang.org/x/sys/unix/zsyscall_linux_arm64.go index 2fba25d05b..ad9820b598 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_linux_arm64.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_linux_arm64.go @@ -1381,12 +1381,8 @@ func Setxattr(path string, attr string, data []byte, flags int) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func signalfd(fd int, sigmask *Sigset_t, maskSize uintptr, flags int) (newfd int, err error) { - r0, _, e1 := Syscall6(SYS_SIGNALFD4, uintptr(fd), uintptr(unsafe.Pointer(sigmask)), uintptr(maskSize), uintptr(flags), 0, 0) - newfd = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } +func Signalfd(fd int, mask *Sigset_t, flags int) { + SyscallNoError(SYS_SIGNALFD4, uintptr(fd), uintptr(unsafe.Pointer(mask)), uintptr(flags)) return } diff --git a/vendor/golang.org/x/sys/unix/zsyscall_linux_mips.go b/vendor/golang.org/x/sys/unix/zsyscall_linux_mips.go index c330f4ffa0..c82ce7d299 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_linux_mips.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_linux_mips.go @@ -1381,12 +1381,8 @@ func Setxattr(path string, attr string, data []byte, flags int) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func signalfd(fd int, sigmask *Sigset_t, maskSize uintptr, flags int) (newfd int, err error) { - r0, _, e1 := Syscall6(SYS_SIGNALFD4, uintptr(fd), uintptr(unsafe.Pointer(sigmask)), uintptr(maskSize), uintptr(flags), 0, 0) - newfd = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } +func Signalfd(fd int, mask *Sigset_t, flags int) { + SyscallNoError(SYS_SIGNALFD4, uintptr(fd), uintptr(unsafe.Pointer(mask)), uintptr(flags)) return } diff --git a/vendor/golang.org/x/sys/unix/zsyscall_linux_mips64.go b/vendor/golang.org/x/sys/unix/zsyscall_linux_mips64.go index 8e9e0098a1..d1b77c1934 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_linux_mips64.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_linux_mips64.go @@ -1381,12 +1381,8 @@ func Setxattr(path string, attr string, data []byte, flags int) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func signalfd(fd int, sigmask *Sigset_t, maskSize uintptr, flags int) (newfd int, err error) { - r0, _, e1 := Syscall6(SYS_SIGNALFD4, uintptr(fd), uintptr(unsafe.Pointer(sigmask)), uintptr(maskSize), uintptr(flags), 0, 0) - newfd = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } +func Signalfd(fd int, mask *Sigset_t, flags int) { + SyscallNoError(SYS_SIGNALFD4, uintptr(fd), uintptr(unsafe.Pointer(mask)), uintptr(flags)) return } diff --git a/vendor/golang.org/x/sys/unix/zsyscall_linux_mips64le.go b/vendor/golang.org/x/sys/unix/zsyscall_linux_mips64le.go index c22d62607d..b8e45f98c7 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_linux_mips64le.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_linux_mips64le.go @@ -1381,12 +1381,8 @@ func Setxattr(path string, attr string, data []byte, flags int) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func signalfd(fd int, sigmask *Sigset_t, maskSize uintptr, flags int) (newfd int, err error) { - r0, _, e1 := Syscall6(SYS_SIGNALFD4, uintptr(fd), uintptr(unsafe.Pointer(sigmask)), uintptr(maskSize), uintptr(flags), 0, 0) - newfd = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } +func Signalfd(fd int, mask *Sigset_t, flags int) { + SyscallNoError(SYS_SIGNALFD4, uintptr(fd), uintptr(unsafe.Pointer(mask)), uintptr(flags)) return } diff --git a/vendor/golang.org/x/sys/unix/zsyscall_linux_mipsle.go b/vendor/golang.org/x/sys/unix/zsyscall_linux_mipsle.go index 700a99e979..e26c748d4e 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_linux_mipsle.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_linux_mipsle.go @@ -1381,12 +1381,8 @@ func Setxattr(path string, attr string, data []byte, flags int) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func signalfd(fd int, sigmask *Sigset_t, maskSize uintptr, flags int) (newfd int, err error) { - r0, _, e1 := Syscall6(SYS_SIGNALFD4, uintptr(fd), uintptr(unsafe.Pointer(sigmask)), uintptr(maskSize), uintptr(flags), 0, 0) - newfd = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } +func Signalfd(fd int, mask *Sigset_t, flags int) { + SyscallNoError(SYS_SIGNALFD4, uintptr(fd), uintptr(unsafe.Pointer(mask)), uintptr(flags)) return } diff --git a/vendor/golang.org/x/sys/unix/zsyscall_linux_ppc64.go b/vendor/golang.org/x/sys/unix/zsyscall_linux_ppc64.go index cec4c106c6..0a958ca0b3 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_linux_ppc64.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_linux_ppc64.go @@ -1381,12 +1381,8 @@ func Setxattr(path string, attr string, data []byte, flags int) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func signalfd(fd int, sigmask *Sigset_t, maskSize uintptr, flags int) (newfd int, err error) { - r0, _, e1 := Syscall6(SYS_SIGNALFD4, uintptr(fd), uintptr(unsafe.Pointer(sigmask)), uintptr(maskSize), uintptr(flags), 0, 0) - newfd = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } +func Signalfd(fd int, mask *Sigset_t, flags int) { + SyscallNoError(SYS_SIGNALFD4, uintptr(fd), uintptr(unsafe.Pointer(mask)), uintptr(flags)) return } diff --git a/vendor/golang.org/x/sys/unix/zsyscall_linux_ppc64le.go b/vendor/golang.org/x/sys/unix/zsyscall_linux_ppc64le.go index 677ef5a697..658f361e77 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_linux_ppc64le.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_linux_ppc64le.go @@ -1381,12 +1381,8 @@ func Setxattr(path string, attr string, data []byte, flags int) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func signalfd(fd int, sigmask *Sigset_t, maskSize uintptr, flags int) (newfd int, err error) { - r0, _, e1 := Syscall6(SYS_SIGNALFD4, uintptr(fd), uintptr(unsafe.Pointer(sigmask)), uintptr(maskSize), uintptr(flags), 0, 0) - newfd = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } +func Signalfd(fd int, mask *Sigset_t, flags int) { + SyscallNoError(SYS_SIGNALFD4, uintptr(fd), uintptr(unsafe.Pointer(mask)), uintptr(flags)) return } diff --git a/vendor/golang.org/x/sys/unix/zsyscall_linux_riscv64.go b/vendor/golang.org/x/sys/unix/zsyscall_linux_riscv64.go index 565034c54b..daff300399 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_linux_riscv64.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_linux_riscv64.go @@ -1381,12 +1381,8 @@ func Setxattr(path string, attr string, data []byte, flags int) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func signalfd(fd int, sigmask *Sigset_t, maskSize uintptr, flags int) (newfd int, err error) { - r0, _, e1 := Syscall6(SYS_SIGNALFD4, uintptr(fd), uintptr(unsafe.Pointer(sigmask)), uintptr(maskSize), uintptr(flags), 0, 0) - newfd = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } +func Signalfd(fd int, mask *Sigset_t, flags int) { + SyscallNoError(SYS_SIGNALFD4, uintptr(fd), uintptr(unsafe.Pointer(mask)), uintptr(flags)) return } diff --git a/vendor/golang.org/x/sys/unix/zsyscall_linux_s390x.go b/vendor/golang.org/x/sys/unix/zsyscall_linux_s390x.go index 7feb2c6b6b..caf6ea8663 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_linux_s390x.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_linux_s390x.go @@ -1381,12 +1381,8 @@ func Setxattr(path string, attr string, data []byte, flags int) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func signalfd(fd int, sigmask *Sigset_t, maskSize uintptr, flags int) (newfd int, err error) { - r0, _, e1 := Syscall6(SYS_SIGNALFD4, uintptr(fd), uintptr(unsafe.Pointer(sigmask)), uintptr(maskSize), uintptr(flags), 0, 0) - newfd = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } +func Signalfd(fd int, mask *Sigset_t, flags int) { + SyscallNoError(SYS_SIGNALFD4, uintptr(fd), uintptr(unsafe.Pointer(mask)), uintptr(flags)) return } diff --git a/vendor/golang.org/x/sys/unix/zsyscall_linux_sparc64.go b/vendor/golang.org/x/sys/unix/zsyscall_linux_sparc64.go index 07655c4554..369a04b57d 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_linux_sparc64.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_linux_sparc64.go @@ -1381,12 +1381,8 @@ func Setxattr(path string, attr string, data []byte, flags int) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func signalfd(fd int, sigmask *Sigset_t, maskSize uintptr, flags int) (newfd int, err error) { - r0, _, e1 := Syscall6(SYS_SIGNALFD4, uintptr(fd), uintptr(unsafe.Pointer(sigmask)), uintptr(maskSize), uintptr(flags), 0, 0) - newfd = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } +func Signalfd(fd int, mask *Sigset_t, flags int) { + SyscallNoError(SYS_SIGNALFD4, uintptr(fd), uintptr(unsafe.Pointer(mask)), uintptr(flags)) return } diff --git a/vendor/golang.org/x/sys/unix/zsyscall_netbsd_386.go b/vendor/golang.org/x/sys/unix/zsyscall_netbsd_386.go index 7e05826647..642db7670a 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_netbsd_386.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_netbsd_386.go @@ -389,7 +389,7 @@ func pipe() (fd1 int, fd2 int, err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Getdents(fd int, buf []byte) (n int, err error) { +func getdents(fd int, buf []byte) (n int, err error) { var _p0 unsafe.Pointer if len(buf) > 0 { _p0 = unsafe.Pointer(&buf[0]) diff --git a/vendor/golang.org/x/sys/unix/zsyscall_netbsd_amd64.go b/vendor/golang.org/x/sys/unix/zsyscall_netbsd_amd64.go index d94d076aa0..59585fee35 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_netbsd_amd64.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_netbsd_amd64.go @@ -389,7 +389,7 @@ func pipe() (fd1 int, fd2 int, err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Getdents(fd int, buf []byte) (n int, err error) { +func getdents(fd int, buf []byte) (n int, err error) { var _p0 unsafe.Pointer if len(buf) > 0 { _p0 = unsafe.Pointer(&buf[0]) diff --git a/vendor/golang.org/x/sys/unix/zsyscall_netbsd_arm.go b/vendor/golang.org/x/sys/unix/zsyscall_netbsd_arm.go index cf5bf3d054..6ec31434b2 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_netbsd_arm.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_netbsd_arm.go @@ -389,7 +389,7 @@ func pipe() (fd1 int, fd2 int, err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Getdents(fd int, buf []byte) (n int, err error) { +func getdents(fd int, buf []byte) (n int, err error) { var _p0 unsafe.Pointer if len(buf) > 0 { _p0 = unsafe.Pointer(&buf[0]) diff --git a/vendor/golang.org/x/sys/unix/zsyscall_netbsd_arm64.go b/vendor/golang.org/x/sys/unix/zsyscall_netbsd_arm64.go index 243a9317cf..603d144334 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_netbsd_arm64.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_netbsd_arm64.go @@ -389,7 +389,7 @@ func pipe() (fd1 int, fd2 int, err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Getdents(fd int, buf []byte) (n int, err error) { +func getdents(fd int, buf []byte) (n int, err error) { var _p0 unsafe.Pointer if len(buf) > 0 { _p0 = unsafe.Pointer(&buf[0]) diff --git a/vendor/golang.org/x/sys/unix/zsyscall_openbsd_386.go b/vendor/golang.org/x/sys/unix/zsyscall_openbsd_386.go index a9532d0787..6a489fac0a 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_openbsd_386.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_openbsd_386.go @@ -387,7 +387,7 @@ func pipe(p *[2]_C_int) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Getdents(fd int, buf []byte) (n int, err error) { +func getdents(fd int, buf []byte) (n int, err error) { var _p0 unsafe.Pointer if len(buf) > 0 { _p0 = unsafe.Pointer(&buf[0]) diff --git a/vendor/golang.org/x/sys/unix/zsyscall_openbsd_amd64.go b/vendor/golang.org/x/sys/unix/zsyscall_openbsd_amd64.go index 0cb9f01774..30cba4347c 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_openbsd_amd64.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_openbsd_amd64.go @@ -387,7 +387,7 @@ func pipe(p *[2]_C_int) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Getdents(fd int, buf []byte) (n int, err error) { +func getdents(fd int, buf []byte) (n int, err error) { var _p0 unsafe.Pointer if len(buf) > 0 { _p0 = unsafe.Pointer(&buf[0]) diff --git a/vendor/golang.org/x/sys/unix/zsyscall_openbsd_arm.go b/vendor/golang.org/x/sys/unix/zsyscall_openbsd_arm.go index 6fc99b5494..fa1beda33e 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_openbsd_arm.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_openbsd_arm.go @@ -387,7 +387,7 @@ func pipe(p *[2]_C_int) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Getdents(fd int, buf []byte) (n int, err error) { +func getdents(fd int, buf []byte) (n int, err error) { var _p0 unsafe.Pointer if len(buf) > 0 { _p0 = unsafe.Pointer(&buf[0]) diff --git a/vendor/golang.org/x/sys/unix/zsyscall_openbsd_arm64.go b/vendor/golang.org/x/sys/unix/zsyscall_openbsd_arm64.go index 27878a72b8..eb58990461 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_openbsd_arm64.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_openbsd_arm64.go @@ -387,7 +387,7 @@ func pipe(p *[2]_C_int) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Getdents(fd int, buf []byte) (n int, err error) { +func getdents(fd int, buf []byte) (n int, err error) { var _p0 unsafe.Pointer if len(buf) > 0 { _p0 = unsafe.Pointer(&buf[0]) diff --git a/vendor/golang.org/x/sys/unix/zsysnum_freebsd_386.go b/vendor/golang.org/x/sys/unix/zsysnum_freebsd_386.go index 9474974b65..55c3a32945 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_freebsd_386.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_freebsd_386.go @@ -1,4 +1,4 @@ -// go run mksysnum.go https://svn.freebsd.org/base/stable/11/sys/kern/syscalls.master +// go run mksysnum.go https://svn.freebsd.org/base/stable/10/sys/kern/syscalls.master // Code generated by the command above; see README.md. DO NOT EDIT. // +build 386,freebsd @@ -118,6 +118,8 @@ const ( SYS_SEMSYS = 169 // { int semsys(int which, int a2, int a3, int a4, int a5); } SYS_MSGSYS = 170 // { int msgsys(int which, int a2, int a3, int a4, int a5, int a6); } SYS_SHMSYS = 171 // { int shmsys(int which, int a2, int a3, int a4); } + SYS_FREEBSD6_PREAD = 173 // { ssize_t freebsd6_pread(int fd, void *buf, size_t nbyte, int pad, off_t offset); } + SYS_FREEBSD6_PWRITE = 174 // { ssize_t freebsd6_pwrite(int fd, const void *buf, size_t nbyte, int pad, off_t offset); } SYS_SETFIB = 175 // { int setfib(int fibnum); } SYS_NTP_ADJTIME = 176 // { int ntp_adjtime(struct timex *tp); } SYS_SETGID = 181 // { int setgid(gid_t gid); } @@ -131,6 +133,10 @@ const ( SYS_GETRLIMIT = 194 // { int getrlimit(u_int which, struct rlimit *rlp); } getrlimit __getrlimit_args int SYS_SETRLIMIT = 195 // { int setrlimit(u_int which, struct rlimit *rlp); } setrlimit __setrlimit_args int SYS_GETDIRENTRIES = 196 // { int getdirentries(int fd, char *buf, u_int count, long *basep); } + SYS_FREEBSD6_MMAP = 197 // { caddr_t freebsd6_mmap(caddr_t addr, size_t len, int prot, int flags, int fd, int pad, off_t pos); } + SYS_FREEBSD6_LSEEK = 199 // { off_t freebsd6_lseek(int fd, int pad, off_t offset, int whence); } + SYS_FREEBSD6_TRUNCATE = 200 // { int freebsd6_truncate(char *path, int pad, off_t length); } + SYS_FREEBSD6_FTRUNCATE = 201 // { int freebsd6_ftruncate(int fd, int pad, off_t length); } SYS___SYSCTL = 202 // { int __sysctl(int *name, u_int namelen, void *old, size_t *oldlenp, void *new, size_t newlen); } __sysctl sysctl_args int SYS_MLOCK = 203 // { int mlock(const void *addr, size_t len); } SYS_MUNLOCK = 204 // { int munlock(const void *addr, size_t len); } @@ -158,7 +164,6 @@ const ( SYS_FFCLOCK_GETCOUNTER = 241 // { int ffclock_getcounter(ffcounter *ffcount); } SYS_FFCLOCK_SETESTIMATE = 242 // { int ffclock_setestimate( struct ffclock_estimate *cest); } SYS_FFCLOCK_GETESTIMATE = 243 // { int ffclock_getestimate( struct ffclock_estimate *cest); } - SYS_CLOCK_NANOSLEEP = 244 // { int clock_nanosleep(clockid_t clock_id, int flags, const struct timespec *rqtp, struct timespec *rmtp); } SYS_CLOCK_GETCPUCLOCKID2 = 247 // { int clock_getcpuclockid2(id_t id,int which, clockid_t *clock_id); } SYS_NTP_GETTIME = 248 // { int ntp_gettime(struct ntptimeval *ntvp); } SYS_MINHERIT = 250 // { int minherit(void *addr, size_t len, int inherit); } @@ -192,10 +197,13 @@ const ( SYS_GETSID = 310 // { int getsid(pid_t pid); } SYS_SETRESUID = 311 // { int setresuid(uid_t ruid, uid_t euid, uid_t suid); } SYS_SETRESGID = 312 // { int setresgid(gid_t rgid, gid_t egid, gid_t sgid); } - SYS_AIO_RETURN = 314 // { ssize_t aio_return(struct aiocb *aiocbp); } + SYS_AIO_RETURN = 314 // { int aio_return(struct aiocb *aiocbp); } SYS_AIO_SUSPEND = 315 // { int aio_suspend( struct aiocb * const * aiocbp, int nent, const struct timespec *timeout); } SYS_AIO_CANCEL = 316 // { int aio_cancel(int fd, struct aiocb *aiocbp); } SYS_AIO_ERROR = 317 // { int aio_error(struct aiocb *aiocbp); } + SYS_OAIO_READ = 318 // { int oaio_read(struct oaiocb *aiocbp); } + SYS_OAIO_WRITE = 319 // { int oaio_write(struct oaiocb *aiocbp); } + SYS_OLIO_LISTIO = 320 // { int olio_listio(int mode, struct oaiocb * const *acb_list, int nent, struct osigevent *sig); } SYS_YIELD = 321 // { int yield(void); } SYS_MLOCKALL = 324 // { int mlockall(int how); } SYS_MUNLOCKALL = 325 // { int munlockall(void); } @@ -228,7 +236,7 @@ const ( SYS_EXTATTR_SET_FILE = 356 // { ssize_t extattr_set_file( const char *path, int attrnamespace, const char *attrname, void *data, size_t nbytes); } SYS_EXTATTR_GET_FILE = 357 // { ssize_t extattr_get_file( const char *path, int attrnamespace, const char *attrname, void *data, size_t nbytes); } SYS_EXTATTR_DELETE_FILE = 358 // { int extattr_delete_file(const char *path, int attrnamespace, const char *attrname); } - SYS_AIO_WAITCOMPLETE = 359 // { ssize_t aio_waitcomplete( struct aiocb **aiocbp, struct timespec *timeout); } + SYS_AIO_WAITCOMPLETE = 359 // { int aio_waitcomplete( struct aiocb **aiocbp, struct timespec *timeout); } SYS_GETRESUID = 360 // { int getresuid(uid_t *ruid, uid_t *euid, uid_t *suid); } SYS_GETRESGID = 361 // { int getresgid(gid_t *rgid, gid_t *egid, gid_t *sgid); } SYS_KQUEUE = 362 // { int kqueue(void); } @@ -250,7 +258,7 @@ const ( SYS_UUIDGEN = 392 // { int uuidgen(struct uuid *store, int count); } SYS_SENDFILE = 393 // { int sendfile(int fd, int s, off_t offset, size_t nbytes, struct sf_hdtr *hdtr, off_t *sbytes, int flags); } SYS_MAC_SYSCALL = 394 // { int mac_syscall(const char *policy, int call, void *arg); } - SYS_GETFSSTAT = 395 // { int getfsstat(struct statfs *buf, long bufsize, int mode); } + SYS_GETFSSTAT = 395 // { int getfsstat(struct statfs *buf, long bufsize, int flags); } SYS_STATFS = 396 // { int statfs(char *path, struct statfs *buf); } SYS_FSTATFS = 397 // { int fstatfs(int fd, struct statfs *buf); } SYS_FHSTATFS = 398 // { int fhstatfs(const struct fhandle *u_fhp, struct statfs *buf); } @@ -285,6 +293,8 @@ const ( SYS_THR_EXIT = 431 // { void thr_exit(long *state); } SYS_THR_SELF = 432 // { int thr_self(long *id); } SYS_THR_KILL = 433 // { int thr_kill(long id, int sig); } + SYS__UMTX_LOCK = 434 // { int _umtx_lock(struct umtx *umtx); } + SYS__UMTX_UNLOCK = 435 // { int _umtx_unlock(struct umtx *umtx); } SYS_JAIL_ATTACH = 436 // { int jail_attach(int jid); } SYS_EXTATTR_LIST_FD = 437 // { ssize_t extattr_list_fd(int fd, int attrnamespace, void *data, size_t nbytes); } SYS_EXTATTR_LIST_FILE = 438 // { ssize_t extattr_list_file( const char *path, int attrnamespace, void *data, size_t nbytes); } @@ -390,7 +400,4 @@ const ( SYS_PPOLL = 545 // { int ppoll(struct pollfd *fds, u_int nfds, const struct timespec *ts, const sigset_t *set); } SYS_FUTIMENS = 546 // { int futimens(int fd, struct timespec *times); } SYS_UTIMENSAT = 547 // { int utimensat(int fd, char *path, struct timespec *times, int flag); } - SYS_NUMA_GETAFFINITY = 548 // { int numa_getaffinity(cpuwhich_t which, id_t id, struct vm_domain_policy_entry *policy); } - SYS_NUMA_SETAFFINITY = 549 // { int numa_setaffinity(cpuwhich_t which, id_t id, const struct vm_domain_policy_entry *policy); } - SYS_FDATASYNC = 550 // { int fdatasync(int fd); } ) diff --git a/vendor/golang.org/x/sys/unix/zsysnum_freebsd_amd64.go b/vendor/golang.org/x/sys/unix/zsysnum_freebsd_amd64.go index 48a7beae7b..b39be6cb8f 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_freebsd_amd64.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_freebsd_amd64.go @@ -1,4 +1,4 @@ -// go run mksysnum.go https://svn.freebsd.org/base/stable/11/sys/kern/syscalls.master +// go run mksysnum.go https://svn.freebsd.org/base/stable/10/sys/kern/syscalls.master // Code generated by the command above; see README.md. DO NOT EDIT. // +build amd64,freebsd @@ -118,6 +118,8 @@ const ( SYS_SEMSYS = 169 // { int semsys(int which, int a2, int a3, int a4, int a5); } SYS_MSGSYS = 170 // { int msgsys(int which, int a2, int a3, int a4, int a5, int a6); } SYS_SHMSYS = 171 // { int shmsys(int which, int a2, int a3, int a4); } + SYS_FREEBSD6_PREAD = 173 // { ssize_t freebsd6_pread(int fd, void *buf, size_t nbyte, int pad, off_t offset); } + SYS_FREEBSD6_PWRITE = 174 // { ssize_t freebsd6_pwrite(int fd, const void *buf, size_t nbyte, int pad, off_t offset); } SYS_SETFIB = 175 // { int setfib(int fibnum); } SYS_NTP_ADJTIME = 176 // { int ntp_adjtime(struct timex *tp); } SYS_SETGID = 181 // { int setgid(gid_t gid); } @@ -131,6 +133,10 @@ const ( SYS_GETRLIMIT = 194 // { int getrlimit(u_int which, struct rlimit *rlp); } getrlimit __getrlimit_args int SYS_SETRLIMIT = 195 // { int setrlimit(u_int which, struct rlimit *rlp); } setrlimit __setrlimit_args int SYS_GETDIRENTRIES = 196 // { int getdirentries(int fd, char *buf, u_int count, long *basep); } + SYS_FREEBSD6_MMAP = 197 // { caddr_t freebsd6_mmap(caddr_t addr, size_t len, int prot, int flags, int fd, int pad, off_t pos); } + SYS_FREEBSD6_LSEEK = 199 // { off_t freebsd6_lseek(int fd, int pad, off_t offset, int whence); } + SYS_FREEBSD6_TRUNCATE = 200 // { int freebsd6_truncate(char *path, int pad, off_t length); } + SYS_FREEBSD6_FTRUNCATE = 201 // { int freebsd6_ftruncate(int fd, int pad, off_t length); } SYS___SYSCTL = 202 // { int __sysctl(int *name, u_int namelen, void *old, size_t *oldlenp, void *new, size_t newlen); } __sysctl sysctl_args int SYS_MLOCK = 203 // { int mlock(const void *addr, size_t len); } SYS_MUNLOCK = 204 // { int munlock(const void *addr, size_t len); } @@ -158,7 +164,6 @@ const ( SYS_FFCLOCK_GETCOUNTER = 241 // { int ffclock_getcounter(ffcounter *ffcount); } SYS_FFCLOCK_SETESTIMATE = 242 // { int ffclock_setestimate( struct ffclock_estimate *cest); } SYS_FFCLOCK_GETESTIMATE = 243 // { int ffclock_getestimate( struct ffclock_estimate *cest); } - SYS_CLOCK_NANOSLEEP = 244 // { int clock_nanosleep(clockid_t clock_id, int flags, const struct timespec *rqtp, struct timespec *rmtp); } SYS_CLOCK_GETCPUCLOCKID2 = 247 // { int clock_getcpuclockid2(id_t id,int which, clockid_t *clock_id); } SYS_NTP_GETTIME = 248 // { int ntp_gettime(struct ntptimeval *ntvp); } SYS_MINHERIT = 250 // { int minherit(void *addr, size_t len, int inherit); } @@ -192,10 +197,13 @@ const ( SYS_GETSID = 310 // { int getsid(pid_t pid); } SYS_SETRESUID = 311 // { int setresuid(uid_t ruid, uid_t euid, uid_t suid); } SYS_SETRESGID = 312 // { int setresgid(gid_t rgid, gid_t egid, gid_t sgid); } - SYS_AIO_RETURN = 314 // { ssize_t aio_return(struct aiocb *aiocbp); } + SYS_AIO_RETURN = 314 // { int aio_return(struct aiocb *aiocbp); } SYS_AIO_SUSPEND = 315 // { int aio_suspend( struct aiocb * const * aiocbp, int nent, const struct timespec *timeout); } SYS_AIO_CANCEL = 316 // { int aio_cancel(int fd, struct aiocb *aiocbp); } SYS_AIO_ERROR = 317 // { int aio_error(struct aiocb *aiocbp); } + SYS_OAIO_READ = 318 // { int oaio_read(struct oaiocb *aiocbp); } + SYS_OAIO_WRITE = 319 // { int oaio_write(struct oaiocb *aiocbp); } + SYS_OLIO_LISTIO = 320 // { int olio_listio(int mode, struct oaiocb * const *acb_list, int nent, struct osigevent *sig); } SYS_YIELD = 321 // { int yield(void); } SYS_MLOCKALL = 324 // { int mlockall(int how); } SYS_MUNLOCKALL = 325 // { int munlockall(void); } @@ -228,7 +236,7 @@ const ( SYS_EXTATTR_SET_FILE = 356 // { ssize_t extattr_set_file( const char *path, int attrnamespace, const char *attrname, void *data, size_t nbytes); } SYS_EXTATTR_GET_FILE = 357 // { ssize_t extattr_get_file( const char *path, int attrnamespace, const char *attrname, void *data, size_t nbytes); } SYS_EXTATTR_DELETE_FILE = 358 // { int extattr_delete_file(const char *path, int attrnamespace, const char *attrname); } - SYS_AIO_WAITCOMPLETE = 359 // { ssize_t aio_waitcomplete( struct aiocb **aiocbp, struct timespec *timeout); } + SYS_AIO_WAITCOMPLETE = 359 // { int aio_waitcomplete( struct aiocb **aiocbp, struct timespec *timeout); } SYS_GETRESUID = 360 // { int getresuid(uid_t *ruid, uid_t *euid, uid_t *suid); } SYS_GETRESGID = 361 // { int getresgid(gid_t *rgid, gid_t *egid, gid_t *sgid); } SYS_KQUEUE = 362 // { int kqueue(void); } @@ -250,7 +258,7 @@ const ( SYS_UUIDGEN = 392 // { int uuidgen(struct uuid *store, int count); } SYS_SENDFILE = 393 // { int sendfile(int fd, int s, off_t offset, size_t nbytes, struct sf_hdtr *hdtr, off_t *sbytes, int flags); } SYS_MAC_SYSCALL = 394 // { int mac_syscall(const char *policy, int call, void *arg); } - SYS_GETFSSTAT = 395 // { int getfsstat(struct statfs *buf, long bufsize, int mode); } + SYS_GETFSSTAT = 395 // { int getfsstat(struct statfs *buf, long bufsize, int flags); } SYS_STATFS = 396 // { int statfs(char *path, struct statfs *buf); } SYS_FSTATFS = 397 // { int fstatfs(int fd, struct statfs *buf); } SYS_FHSTATFS = 398 // { int fhstatfs(const struct fhandle *u_fhp, struct statfs *buf); } @@ -285,6 +293,8 @@ const ( SYS_THR_EXIT = 431 // { void thr_exit(long *state); } SYS_THR_SELF = 432 // { int thr_self(long *id); } SYS_THR_KILL = 433 // { int thr_kill(long id, int sig); } + SYS__UMTX_LOCK = 434 // { int _umtx_lock(struct umtx *umtx); } + SYS__UMTX_UNLOCK = 435 // { int _umtx_unlock(struct umtx *umtx); } SYS_JAIL_ATTACH = 436 // { int jail_attach(int jid); } SYS_EXTATTR_LIST_FD = 437 // { ssize_t extattr_list_fd(int fd, int attrnamespace, void *data, size_t nbytes); } SYS_EXTATTR_LIST_FILE = 438 // { ssize_t extattr_list_file( const char *path, int attrnamespace, void *data, size_t nbytes); } @@ -390,7 +400,4 @@ const ( SYS_PPOLL = 545 // { int ppoll(struct pollfd *fds, u_int nfds, const struct timespec *ts, const sigset_t *set); } SYS_FUTIMENS = 546 // { int futimens(int fd, struct timespec *times); } SYS_UTIMENSAT = 547 // { int utimensat(int fd, char *path, struct timespec *times, int flag); } - SYS_NUMA_GETAFFINITY = 548 // { int numa_getaffinity(cpuwhich_t which, id_t id, struct vm_domain_policy_entry *policy); } - SYS_NUMA_SETAFFINITY = 549 // { int numa_setaffinity(cpuwhich_t which, id_t id, const struct vm_domain_policy_entry *policy); } - SYS_FDATASYNC = 550 // { int fdatasync(int fd); } ) diff --git a/vendor/golang.org/x/sys/unix/zsysnum_freebsd_arm.go b/vendor/golang.org/x/sys/unix/zsysnum_freebsd_arm.go index 4a6dfd4a74..44ffd4ce5e 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_freebsd_arm.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_freebsd_arm.go @@ -1,4 +1,4 @@ -// go run mksysnum.go https://svn.freebsd.org/base/stable/11/sys/kern/syscalls.master +// go run mksysnum.go https://svn.freebsd.org/base/stable/10/sys/kern/syscalls.master // Code generated by the command above; see README.md. DO NOT EDIT. // +build arm,freebsd @@ -118,6 +118,8 @@ const ( SYS_SEMSYS = 169 // { int semsys(int which, int a2, int a3, int a4, int a5); } SYS_MSGSYS = 170 // { int msgsys(int which, int a2, int a3, int a4, int a5, int a6); } SYS_SHMSYS = 171 // { int shmsys(int which, int a2, int a3, int a4); } + SYS_FREEBSD6_PREAD = 173 // { ssize_t freebsd6_pread(int fd, void *buf, size_t nbyte, int pad, off_t offset); } + SYS_FREEBSD6_PWRITE = 174 // { ssize_t freebsd6_pwrite(int fd, const void *buf, size_t nbyte, int pad, off_t offset); } SYS_SETFIB = 175 // { int setfib(int fibnum); } SYS_NTP_ADJTIME = 176 // { int ntp_adjtime(struct timex *tp); } SYS_SETGID = 181 // { int setgid(gid_t gid); } @@ -131,6 +133,10 @@ const ( SYS_GETRLIMIT = 194 // { int getrlimit(u_int which, struct rlimit *rlp); } getrlimit __getrlimit_args int SYS_SETRLIMIT = 195 // { int setrlimit(u_int which, struct rlimit *rlp); } setrlimit __setrlimit_args int SYS_GETDIRENTRIES = 196 // { int getdirentries(int fd, char *buf, u_int count, long *basep); } + SYS_FREEBSD6_MMAP = 197 // { caddr_t freebsd6_mmap(caddr_t addr, size_t len, int prot, int flags, int fd, int pad, off_t pos); } + SYS_FREEBSD6_LSEEK = 199 // { off_t freebsd6_lseek(int fd, int pad, off_t offset, int whence); } + SYS_FREEBSD6_TRUNCATE = 200 // { int freebsd6_truncate(char *path, int pad, off_t length); } + SYS_FREEBSD6_FTRUNCATE = 201 // { int freebsd6_ftruncate(int fd, int pad, off_t length); } SYS___SYSCTL = 202 // { int __sysctl(int *name, u_int namelen, void *old, size_t *oldlenp, void *new, size_t newlen); } __sysctl sysctl_args int SYS_MLOCK = 203 // { int mlock(const void *addr, size_t len); } SYS_MUNLOCK = 204 // { int munlock(const void *addr, size_t len); } @@ -158,7 +164,6 @@ const ( SYS_FFCLOCK_GETCOUNTER = 241 // { int ffclock_getcounter(ffcounter *ffcount); } SYS_FFCLOCK_SETESTIMATE = 242 // { int ffclock_setestimate( struct ffclock_estimate *cest); } SYS_FFCLOCK_GETESTIMATE = 243 // { int ffclock_getestimate( struct ffclock_estimate *cest); } - SYS_CLOCK_NANOSLEEP = 244 // { int clock_nanosleep(clockid_t clock_id, int flags, const struct timespec *rqtp, struct timespec *rmtp); } SYS_CLOCK_GETCPUCLOCKID2 = 247 // { int clock_getcpuclockid2(id_t id,int which, clockid_t *clock_id); } SYS_NTP_GETTIME = 248 // { int ntp_gettime(struct ntptimeval *ntvp); } SYS_MINHERIT = 250 // { int minherit(void *addr, size_t len, int inherit); } @@ -192,10 +197,13 @@ const ( SYS_GETSID = 310 // { int getsid(pid_t pid); } SYS_SETRESUID = 311 // { int setresuid(uid_t ruid, uid_t euid, uid_t suid); } SYS_SETRESGID = 312 // { int setresgid(gid_t rgid, gid_t egid, gid_t sgid); } - SYS_AIO_RETURN = 314 // { ssize_t aio_return(struct aiocb *aiocbp); } + SYS_AIO_RETURN = 314 // { int aio_return(struct aiocb *aiocbp); } SYS_AIO_SUSPEND = 315 // { int aio_suspend( struct aiocb * const * aiocbp, int nent, const struct timespec *timeout); } SYS_AIO_CANCEL = 316 // { int aio_cancel(int fd, struct aiocb *aiocbp); } SYS_AIO_ERROR = 317 // { int aio_error(struct aiocb *aiocbp); } + SYS_OAIO_READ = 318 // { int oaio_read(struct oaiocb *aiocbp); } + SYS_OAIO_WRITE = 319 // { int oaio_write(struct oaiocb *aiocbp); } + SYS_OLIO_LISTIO = 320 // { int olio_listio(int mode, struct oaiocb * const *acb_list, int nent, struct osigevent *sig); } SYS_YIELD = 321 // { int yield(void); } SYS_MLOCKALL = 324 // { int mlockall(int how); } SYS_MUNLOCKALL = 325 // { int munlockall(void); } @@ -228,7 +236,7 @@ const ( SYS_EXTATTR_SET_FILE = 356 // { ssize_t extattr_set_file( const char *path, int attrnamespace, const char *attrname, void *data, size_t nbytes); } SYS_EXTATTR_GET_FILE = 357 // { ssize_t extattr_get_file( const char *path, int attrnamespace, const char *attrname, void *data, size_t nbytes); } SYS_EXTATTR_DELETE_FILE = 358 // { int extattr_delete_file(const char *path, int attrnamespace, const char *attrname); } - SYS_AIO_WAITCOMPLETE = 359 // { ssize_t aio_waitcomplete( struct aiocb **aiocbp, struct timespec *timeout); } + SYS_AIO_WAITCOMPLETE = 359 // { int aio_waitcomplete( struct aiocb **aiocbp, struct timespec *timeout); } SYS_GETRESUID = 360 // { int getresuid(uid_t *ruid, uid_t *euid, uid_t *suid); } SYS_GETRESGID = 361 // { int getresgid(gid_t *rgid, gid_t *egid, gid_t *sgid); } SYS_KQUEUE = 362 // { int kqueue(void); } @@ -250,7 +258,7 @@ const ( SYS_UUIDGEN = 392 // { int uuidgen(struct uuid *store, int count); } SYS_SENDFILE = 393 // { int sendfile(int fd, int s, off_t offset, size_t nbytes, struct sf_hdtr *hdtr, off_t *sbytes, int flags); } SYS_MAC_SYSCALL = 394 // { int mac_syscall(const char *policy, int call, void *arg); } - SYS_GETFSSTAT = 395 // { int getfsstat(struct statfs *buf, long bufsize, int mode); } + SYS_GETFSSTAT = 395 // { int getfsstat(struct statfs *buf, long bufsize, int flags); } SYS_STATFS = 396 // { int statfs(char *path, struct statfs *buf); } SYS_FSTATFS = 397 // { int fstatfs(int fd, struct statfs *buf); } SYS_FHSTATFS = 398 // { int fhstatfs(const struct fhandle *u_fhp, struct statfs *buf); } @@ -285,6 +293,8 @@ const ( SYS_THR_EXIT = 431 // { void thr_exit(long *state); } SYS_THR_SELF = 432 // { int thr_self(long *id); } SYS_THR_KILL = 433 // { int thr_kill(long id, int sig); } + SYS__UMTX_LOCK = 434 // { int _umtx_lock(struct umtx *umtx); } + SYS__UMTX_UNLOCK = 435 // { int _umtx_unlock(struct umtx *umtx); } SYS_JAIL_ATTACH = 436 // { int jail_attach(int jid); } SYS_EXTATTR_LIST_FD = 437 // { ssize_t extattr_list_fd(int fd, int attrnamespace, void *data, size_t nbytes); } SYS_EXTATTR_LIST_FILE = 438 // { ssize_t extattr_list_file( const char *path, int attrnamespace, void *data, size_t nbytes); } @@ -390,7 +400,4 @@ const ( SYS_PPOLL = 545 // { int ppoll(struct pollfd *fds, u_int nfds, const struct timespec *ts, const sigset_t *set); } SYS_FUTIMENS = 546 // { int futimens(int fd, struct timespec *times); } SYS_UTIMENSAT = 547 // { int utimensat(int fd, char *path, struct timespec *times, int flag); } - SYS_NUMA_GETAFFINITY = 548 // { int numa_getaffinity(cpuwhich_t which, id_t id, struct vm_domain_policy_entry *policy); } - SYS_NUMA_SETAFFINITY = 549 // { int numa_setaffinity(cpuwhich_t which, id_t id, const struct vm_domain_policy_entry *policy); } - SYS_FDATASYNC = 550 // { int fdatasync(int fd); } ) diff --git a/vendor/golang.org/x/sys/unix/zsysnum_freebsd_arm64.go b/vendor/golang.org/x/sys/unix/zsysnum_freebsd_arm64.go index 3e51af8edd..9f21e9550e 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_freebsd_arm64.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_freebsd_arm64.go @@ -1,4 +1,4 @@ -// go run mksysnum.go https://svn.freebsd.org/base/stable/11/sys/kern/syscalls.master +// go run mksysnum.go https://svn.freebsd.org/base/stable/10/sys/kern/syscalls.master // Code generated by the command above; see README.md. DO NOT EDIT. // +build arm64,freebsd @@ -7,13 +7,13 @@ package unix const ( // SYS_NOSYS = 0; // { int nosys(void); } syscall nosys_args int - SYS_EXIT = 1 // { void sys_exit(int rval); } exit sys_exit_args void + SYS_EXIT = 1 // { void sys_exit(int rval); } exit \ SYS_FORK = 2 // { int fork(void); } - SYS_READ = 3 // { ssize_t read(int fd, void *buf, size_t nbyte); } - SYS_WRITE = 4 // { ssize_t write(int fd, const void *buf, size_t nbyte); } + SYS_READ = 3 // { ssize_t read(int fd, void *buf, \ + SYS_WRITE = 4 // { ssize_t write(int fd, const void *buf, \ SYS_OPEN = 5 // { int open(char *path, int flags, int mode); } SYS_CLOSE = 6 // { int close(int fd); } - SYS_WAIT4 = 7 // { int wait4(int pid, int *status, int options, struct rusage *rusage); } + SYS_WAIT4 = 7 // { int wait4(int pid, int *status, \ SYS_LINK = 9 // { int link(char *path, char *link); } SYS_UNLINK = 10 // { int unlink(char *path); } SYS_CHDIR = 12 // { int chdir(char *path); } @@ -21,20 +21,20 @@ const ( SYS_MKNOD = 14 // { int mknod(char *path, int mode, int dev); } SYS_CHMOD = 15 // { int chmod(char *path, int mode); } SYS_CHOWN = 16 // { int chown(char *path, int uid, int gid); } - SYS_OBREAK = 17 // { int obreak(char *nsize); } break obreak_args int + SYS_OBREAK = 17 // { int obreak(char *nsize); } break \ SYS_GETPID = 20 // { pid_t getpid(void); } - SYS_MOUNT = 21 // { int mount(char *type, char *path, int flags, caddr_t data); } + SYS_MOUNT = 21 // { int mount(char *type, char *path, \ SYS_UNMOUNT = 22 // { int unmount(char *path, int flags); } SYS_SETUID = 23 // { int setuid(uid_t uid); } SYS_GETUID = 24 // { uid_t getuid(void); } SYS_GETEUID = 25 // { uid_t geteuid(void); } - SYS_PTRACE = 26 // { int ptrace(int req, pid_t pid, caddr_t addr, int data); } - SYS_RECVMSG = 27 // { int recvmsg(int s, struct msghdr *msg, int flags); } - SYS_SENDMSG = 28 // { int sendmsg(int s, struct msghdr *msg, int flags); } - SYS_RECVFROM = 29 // { int recvfrom(int s, caddr_t buf, size_t len, int flags, struct sockaddr * __restrict from, __socklen_t * __restrict fromlenaddr); } - SYS_ACCEPT = 30 // { int accept(int s, struct sockaddr * __restrict name, __socklen_t * __restrict anamelen); } - SYS_GETPEERNAME = 31 // { int getpeername(int fdes, struct sockaddr * __restrict asa, __socklen_t * __restrict alen); } - SYS_GETSOCKNAME = 32 // { int getsockname(int fdes, struct sockaddr * __restrict asa, __socklen_t * __restrict alen); } + SYS_PTRACE = 26 // { int ptrace(int req, pid_t pid, \ + SYS_RECVMSG = 27 // { int recvmsg(int s, struct msghdr *msg, \ + SYS_SENDMSG = 28 // { int sendmsg(int s, struct msghdr *msg, \ + SYS_RECVFROM = 29 // { int recvfrom(int s, caddr_t buf, \ + SYS_ACCEPT = 30 // { int accept(int s, \ + SYS_GETPEERNAME = 31 // { int getpeername(int fdes, \ + SYS_GETSOCKNAME = 32 // { int getsockname(int fdes, \ SYS_ACCESS = 33 // { int access(char *path, int amode); } SYS_CHFLAGS = 34 // { int chflags(const char *path, u_long flags); } SYS_FCHFLAGS = 35 // { int fchflags(int fd, u_long flags); } @@ -42,57 +42,56 @@ const ( SYS_KILL = 37 // { int kill(int pid, int signum); } SYS_GETPPID = 39 // { pid_t getppid(void); } SYS_DUP = 41 // { int dup(u_int fd); } - SYS_PIPE = 42 // { int pipe(void); } SYS_GETEGID = 43 // { gid_t getegid(void); } - SYS_PROFIL = 44 // { int profil(caddr_t samples, size_t size, size_t offset, u_int scale); } - SYS_KTRACE = 45 // { int ktrace(const char *fname, int ops, int facs, int pid); } + SYS_PROFIL = 44 // { int profil(caddr_t samples, size_t size, \ + SYS_KTRACE = 45 // { int ktrace(const char *fname, int ops, \ SYS_GETGID = 47 // { gid_t getgid(void); } - SYS_GETLOGIN = 49 // { int getlogin(char *namebuf, u_int namelen); } + SYS_GETLOGIN = 49 // { int getlogin(char *namebuf, u_int \ SYS_SETLOGIN = 50 // { int setlogin(char *namebuf); } SYS_ACCT = 51 // { int acct(char *path); } - SYS_SIGALTSTACK = 53 // { int sigaltstack(stack_t *ss, stack_t *oss); } - SYS_IOCTL = 54 // { int ioctl(int fd, u_long com, caddr_t data); } + SYS_SIGALTSTACK = 53 // { int sigaltstack(stack_t *ss, \ + SYS_IOCTL = 54 // { int ioctl(int fd, u_long com, \ SYS_REBOOT = 55 // { int reboot(int opt); } SYS_REVOKE = 56 // { int revoke(char *path); } SYS_SYMLINK = 57 // { int symlink(char *path, char *link); } - SYS_READLINK = 58 // { ssize_t readlink(char *path, char *buf, size_t count); } - SYS_EXECVE = 59 // { int execve(char *fname, char **argv, char **envv); } - SYS_UMASK = 60 // { int umask(int newmask); } umask umask_args int + SYS_READLINK = 58 // { ssize_t readlink(char *path, char *buf, \ + SYS_EXECVE = 59 // { int execve(char *fname, char **argv, \ + SYS_UMASK = 60 // { int umask(int newmask); } umask umask_args \ SYS_CHROOT = 61 // { int chroot(char *path); } - SYS_MSYNC = 65 // { int msync(void *addr, size_t len, int flags); } + SYS_MSYNC = 65 // { int msync(void *addr, size_t len, \ SYS_VFORK = 66 // { int vfork(void); } SYS_SBRK = 69 // { int sbrk(int incr); } SYS_SSTK = 70 // { int sstk(int incr); } - SYS_OVADVISE = 72 // { int ovadvise(int anom); } vadvise ovadvise_args int + SYS_OVADVISE = 72 // { int ovadvise(int anom); } vadvise \ SYS_MUNMAP = 73 // { int munmap(void *addr, size_t len); } - SYS_MPROTECT = 74 // { int mprotect(const void *addr, size_t len, int prot); } - SYS_MADVISE = 75 // { int madvise(void *addr, size_t len, int behav); } - SYS_MINCORE = 78 // { int mincore(const void *addr, size_t len, char *vec); } - SYS_GETGROUPS = 79 // { int getgroups(u_int gidsetsize, gid_t *gidset); } - SYS_SETGROUPS = 80 // { int setgroups(u_int gidsetsize, gid_t *gidset); } + SYS_MPROTECT = 74 // { int mprotect(const void *addr, size_t len, \ + SYS_MADVISE = 75 // { int madvise(void *addr, size_t len, \ + SYS_MINCORE = 78 // { int mincore(const void *addr, size_t len, \ + SYS_GETGROUPS = 79 // { int getgroups(u_int gidsetsize, \ + SYS_SETGROUPS = 80 // { int setgroups(u_int gidsetsize, \ SYS_GETPGRP = 81 // { int getpgrp(void); } SYS_SETPGID = 82 // { int setpgid(int pid, int pgid); } - SYS_SETITIMER = 83 // { int setitimer(u_int which, struct itimerval *itv, struct itimerval *oitv); } + SYS_SETITIMER = 83 // { int setitimer(u_int which, struct \ SYS_SWAPON = 85 // { int swapon(char *name); } - SYS_GETITIMER = 86 // { int getitimer(u_int which, struct itimerval *itv); } + SYS_GETITIMER = 86 // { int getitimer(u_int which, \ SYS_GETDTABLESIZE = 89 // { int getdtablesize(void); } SYS_DUP2 = 90 // { int dup2(u_int from, u_int to); } SYS_FCNTL = 92 // { int fcntl(int fd, int cmd, long arg); } - SYS_SELECT = 93 // { int select(int nd, fd_set *in, fd_set *ou, fd_set *ex, struct timeval *tv); } + SYS_SELECT = 93 // { int select(int nd, fd_set *in, fd_set *ou, \ SYS_FSYNC = 95 // { int fsync(int fd); } - SYS_SETPRIORITY = 96 // { int setpriority(int which, int who, int prio); } - SYS_SOCKET = 97 // { int socket(int domain, int type, int protocol); } - SYS_CONNECT = 98 // { int connect(int s, caddr_t name, int namelen); } + SYS_SETPRIORITY = 96 // { int setpriority(int which, int who, \ + SYS_SOCKET = 97 // { int socket(int domain, int type, \ + SYS_CONNECT = 98 // { int connect(int s, caddr_t name, \ SYS_GETPRIORITY = 100 // { int getpriority(int which, int who); } - SYS_BIND = 104 // { int bind(int s, caddr_t name, int namelen); } - SYS_SETSOCKOPT = 105 // { int setsockopt(int s, int level, int name, caddr_t val, int valsize); } + SYS_BIND = 104 // { int bind(int s, caddr_t name, \ + SYS_SETSOCKOPT = 105 // { int setsockopt(int s, int level, int name, \ SYS_LISTEN = 106 // { int listen(int s, int backlog); } - SYS_GETTIMEOFDAY = 116 // { int gettimeofday(struct timeval *tp, struct timezone *tzp); } - SYS_GETRUSAGE = 117 // { int getrusage(int who, struct rusage *rusage); } - SYS_GETSOCKOPT = 118 // { int getsockopt(int s, int level, int name, caddr_t val, int *avalsize); } - SYS_READV = 120 // { int readv(int fd, struct iovec *iovp, u_int iovcnt); } - SYS_WRITEV = 121 // { int writev(int fd, struct iovec *iovp, u_int iovcnt); } - SYS_SETTIMEOFDAY = 122 // { int settimeofday(struct timeval *tv, struct timezone *tzp); } + SYS_GETTIMEOFDAY = 116 // { int gettimeofday(struct timeval *tp, \ + SYS_GETRUSAGE = 117 // { int getrusage(int who, \ + SYS_GETSOCKOPT = 118 // { int getsockopt(int s, int level, int name, \ + SYS_READV = 120 // { int readv(int fd, struct iovec *iovp, \ + SYS_WRITEV = 121 // { int writev(int fd, struct iovec *iovp, \ + SYS_SETTIMEOFDAY = 122 // { int settimeofday(struct timeval *tv, \ SYS_FCHOWN = 123 // { int fchown(int fd, int uid, int gid); } SYS_FCHMOD = 124 // { int fchmod(int fd, int mode); } SYS_SETREUID = 126 // { int setreuid(int ruid, int euid); } @@ -100,24 +99,24 @@ const ( SYS_RENAME = 128 // { int rename(char *from, char *to); } SYS_FLOCK = 131 // { int flock(int fd, int how); } SYS_MKFIFO = 132 // { int mkfifo(char *path, int mode); } - SYS_SENDTO = 133 // { int sendto(int s, caddr_t buf, size_t len, int flags, caddr_t to, int tolen); } + SYS_SENDTO = 133 // { int sendto(int s, caddr_t buf, size_t len, \ SYS_SHUTDOWN = 134 // { int shutdown(int s, int how); } - SYS_SOCKETPAIR = 135 // { int socketpair(int domain, int type, int protocol, int *rsv); } + SYS_SOCKETPAIR = 135 // { int socketpair(int domain, int type, \ SYS_MKDIR = 136 // { int mkdir(char *path, int mode); } SYS_RMDIR = 137 // { int rmdir(char *path); } - SYS_UTIMES = 138 // { int utimes(char *path, struct timeval *tptr); } - SYS_ADJTIME = 140 // { int adjtime(struct timeval *delta, struct timeval *olddelta); } + SYS_UTIMES = 138 // { int utimes(char *path, \ + SYS_ADJTIME = 140 // { int adjtime(struct timeval *delta, \ SYS_SETSID = 147 // { int setsid(void); } - SYS_QUOTACTL = 148 // { int quotactl(char *path, int cmd, int uid, caddr_t arg); } + SYS_QUOTACTL = 148 // { int quotactl(char *path, int cmd, int uid, \ SYS_NLM_SYSCALL = 154 // { int nlm_syscall(int debug_level, int grace_period, int addr_count, char **addrs); } SYS_NFSSVC = 155 // { int nfssvc(int flag, caddr_t argp); } - SYS_LGETFH = 160 // { int lgetfh(char *fname, struct fhandle *fhp); } - SYS_GETFH = 161 // { int getfh(char *fname, struct fhandle *fhp); } + SYS_LGETFH = 160 // { int lgetfh(char *fname, \ + SYS_GETFH = 161 // { int getfh(char *fname, \ SYS_SYSARCH = 165 // { int sysarch(int op, char *parms); } - SYS_RTPRIO = 166 // { int rtprio(int function, pid_t pid, struct rtprio *rtp); } - SYS_SEMSYS = 169 // { int semsys(int which, int a2, int a3, int a4, int a5); } - SYS_MSGSYS = 170 // { int msgsys(int which, int a2, int a3, int a4, int a5, int a6); } - SYS_SHMSYS = 171 // { int shmsys(int which, int a2, int a3, int a4); } + SYS_RTPRIO = 166 // { int rtprio(int function, pid_t pid, \ + SYS_SEMSYS = 169 // { int semsys(int which, int a2, int a3, \ + SYS_MSGSYS = 170 // { int msgsys(int which, int a2, int a3, \ + SYS_SHMSYS = 171 // { int shmsys(int which, int a2, int a3, \ SYS_SETFIB = 175 // { int setfib(int fibnum); } SYS_NTP_ADJTIME = 176 // { int ntp_adjtime(struct timex *tp); } SYS_SETGID = 181 // { int setgid(gid_t gid); } @@ -128,269 +127,269 @@ const ( SYS_LSTAT = 190 // { int lstat(char *path, struct stat *ub); } SYS_PATHCONF = 191 // { int pathconf(char *path, int name); } SYS_FPATHCONF = 192 // { int fpathconf(int fd, int name); } - SYS_GETRLIMIT = 194 // { int getrlimit(u_int which, struct rlimit *rlp); } getrlimit __getrlimit_args int - SYS_SETRLIMIT = 195 // { int setrlimit(u_int which, struct rlimit *rlp); } setrlimit __setrlimit_args int - SYS_GETDIRENTRIES = 196 // { int getdirentries(int fd, char *buf, u_int count, long *basep); } - SYS___SYSCTL = 202 // { int __sysctl(int *name, u_int namelen, void *old, size_t *oldlenp, void *new, size_t newlen); } __sysctl sysctl_args int + SYS_GETRLIMIT = 194 // { int getrlimit(u_int which, \ + SYS_SETRLIMIT = 195 // { int setrlimit(u_int which, \ + SYS_GETDIRENTRIES = 196 // { int getdirentries(int fd, char *buf, \ + SYS___SYSCTL = 202 // { int __sysctl(int *name, u_int namelen, \ SYS_MLOCK = 203 // { int mlock(const void *addr, size_t len); } SYS_MUNLOCK = 204 // { int munlock(const void *addr, size_t len); } SYS_UNDELETE = 205 // { int undelete(char *path); } SYS_FUTIMES = 206 // { int futimes(int fd, struct timeval *tptr); } SYS_GETPGID = 207 // { int getpgid(pid_t pid); } - SYS_POLL = 209 // { int poll(struct pollfd *fds, u_int nfds, int timeout); } - SYS_SEMGET = 221 // { int semget(key_t key, int nsems, int semflg); } - SYS_SEMOP = 222 // { int semop(int semid, struct sembuf *sops, size_t nsops); } + SYS_POLL = 209 // { int poll(struct pollfd *fds, u_int nfds, \ + SYS_SEMGET = 221 // { int semget(key_t key, int nsems, \ + SYS_SEMOP = 222 // { int semop(int semid, struct sembuf *sops, \ SYS_MSGGET = 225 // { int msgget(key_t key, int msgflg); } - SYS_MSGSND = 226 // { int msgsnd(int msqid, const void *msgp, size_t msgsz, int msgflg); } - SYS_MSGRCV = 227 // { int msgrcv(int msqid, void *msgp, size_t msgsz, long msgtyp, int msgflg); } - SYS_SHMAT = 228 // { int shmat(int shmid, const void *shmaddr, int shmflg); } + SYS_MSGSND = 226 // { int msgsnd(int msqid, const void *msgp, \ + SYS_MSGRCV = 227 // { int msgrcv(int msqid, void *msgp, \ + SYS_SHMAT = 228 // { int shmat(int shmid, const void *shmaddr, \ SYS_SHMDT = 230 // { int shmdt(const void *shmaddr); } - SYS_SHMGET = 231 // { int shmget(key_t key, size_t size, int shmflg); } - SYS_CLOCK_GETTIME = 232 // { int clock_gettime(clockid_t clock_id, struct timespec *tp); } - SYS_CLOCK_SETTIME = 233 // { int clock_settime( clockid_t clock_id, const struct timespec *tp); } - SYS_CLOCK_GETRES = 234 // { int clock_getres(clockid_t clock_id, struct timespec *tp); } - SYS_KTIMER_CREATE = 235 // { int ktimer_create(clockid_t clock_id, struct sigevent *evp, int *timerid); } + SYS_SHMGET = 231 // { int shmget(key_t key, size_t size, \ + SYS_CLOCK_GETTIME = 232 // { int clock_gettime(clockid_t clock_id, \ + SYS_CLOCK_SETTIME = 233 // { int clock_settime( \ + SYS_CLOCK_GETRES = 234 // { int clock_getres(clockid_t clock_id, \ + SYS_KTIMER_CREATE = 235 // { int ktimer_create(clockid_t clock_id, \ SYS_KTIMER_DELETE = 236 // { int ktimer_delete(int timerid); } - SYS_KTIMER_SETTIME = 237 // { int ktimer_settime(int timerid, int flags, const struct itimerspec *value, struct itimerspec *ovalue); } - SYS_KTIMER_GETTIME = 238 // { int ktimer_gettime(int timerid, struct itimerspec *value); } + SYS_KTIMER_SETTIME = 237 // { int ktimer_settime(int timerid, int flags, \ + SYS_KTIMER_GETTIME = 238 // { int ktimer_gettime(int timerid, struct \ SYS_KTIMER_GETOVERRUN = 239 // { int ktimer_getoverrun(int timerid); } - SYS_NANOSLEEP = 240 // { int nanosleep(const struct timespec *rqtp, struct timespec *rmtp); } + SYS_NANOSLEEP = 240 // { int nanosleep(const struct timespec *rqtp, \ SYS_FFCLOCK_GETCOUNTER = 241 // { int ffclock_getcounter(ffcounter *ffcount); } - SYS_FFCLOCK_SETESTIMATE = 242 // { int ffclock_setestimate( struct ffclock_estimate *cest); } - SYS_FFCLOCK_GETESTIMATE = 243 // { int ffclock_getestimate( struct ffclock_estimate *cest); } - SYS_CLOCK_NANOSLEEP = 244 // { int clock_nanosleep(clockid_t clock_id, int flags, const struct timespec *rqtp, struct timespec *rmtp); } - SYS_CLOCK_GETCPUCLOCKID2 = 247 // { int clock_getcpuclockid2(id_t id,int which, clockid_t *clock_id); } + SYS_FFCLOCK_SETESTIMATE = 242 // { int ffclock_setestimate( \ + SYS_FFCLOCK_GETESTIMATE = 243 // { int ffclock_getestimate( \ + SYS_CLOCK_NANOSLEEP = 244 // { int clock_nanosleep(clockid_t clock_id, \ + SYS_CLOCK_GETCPUCLOCKID2 = 247 // { int clock_getcpuclockid2(id_t id,\ SYS_NTP_GETTIME = 248 // { int ntp_gettime(struct ntptimeval *ntvp); } - SYS_MINHERIT = 250 // { int minherit(void *addr, size_t len, int inherit); } + SYS_MINHERIT = 250 // { int minherit(void *addr, size_t len, \ SYS_RFORK = 251 // { int rfork(int flags); } - SYS_OPENBSD_POLL = 252 // { int openbsd_poll(struct pollfd *fds, u_int nfds, int timeout); } + SYS_OPENBSD_POLL = 252 // { int openbsd_poll(struct pollfd *fds, \ SYS_ISSETUGID = 253 // { int issetugid(void); } SYS_LCHOWN = 254 // { int lchown(char *path, int uid, int gid); } SYS_AIO_READ = 255 // { int aio_read(struct aiocb *aiocbp); } SYS_AIO_WRITE = 256 // { int aio_write(struct aiocb *aiocbp); } - SYS_LIO_LISTIO = 257 // { int lio_listio(int mode, struct aiocb * const *acb_list, int nent, struct sigevent *sig); } - SYS_GETDENTS = 272 // { int getdents(int fd, char *buf, size_t count); } + SYS_LIO_LISTIO = 257 // { int lio_listio(int mode, \ + SYS_GETDENTS = 272 // { int getdents(int fd, char *buf, \ SYS_LCHMOD = 274 // { int lchmod(char *path, mode_t mode); } - SYS_LUTIMES = 276 // { int lutimes(char *path, struct timeval *tptr); } + SYS_LUTIMES = 276 // { int lutimes(char *path, \ SYS_NSTAT = 278 // { int nstat(char *path, struct nstat *ub); } SYS_NFSTAT = 279 // { int nfstat(int fd, struct nstat *sb); } SYS_NLSTAT = 280 // { int nlstat(char *path, struct nstat *ub); } - SYS_PREADV = 289 // { ssize_t preadv(int fd, struct iovec *iovp, u_int iovcnt, off_t offset); } - SYS_PWRITEV = 290 // { ssize_t pwritev(int fd, struct iovec *iovp, u_int iovcnt, off_t offset); } - SYS_FHOPEN = 298 // { int fhopen(const struct fhandle *u_fhp, int flags); } - SYS_FHSTAT = 299 // { int fhstat(const struct fhandle *u_fhp, struct stat *sb); } + SYS_PREADV = 289 // { ssize_t preadv(int fd, struct iovec *iovp, \ + SYS_PWRITEV = 290 // { ssize_t pwritev(int fd, struct iovec *iovp, \ + SYS_FHOPEN = 298 // { int fhopen(const struct fhandle *u_fhp, \ + SYS_FHSTAT = 299 // { int fhstat(const struct fhandle *u_fhp, \ SYS_MODNEXT = 300 // { int modnext(int modid); } - SYS_MODSTAT = 301 // { int modstat(int modid, struct module_stat *stat); } + SYS_MODSTAT = 301 // { int modstat(int modid, \ SYS_MODFNEXT = 302 // { int modfnext(int modid); } SYS_MODFIND = 303 // { int modfind(const char *name); } SYS_KLDLOAD = 304 // { int kldload(const char *file); } SYS_KLDUNLOAD = 305 // { int kldunload(int fileid); } SYS_KLDFIND = 306 // { int kldfind(const char *file); } SYS_KLDNEXT = 307 // { int kldnext(int fileid); } - SYS_KLDSTAT = 308 // { int kldstat(int fileid, struct kld_file_stat* stat); } + SYS_KLDSTAT = 308 // { int kldstat(int fileid, struct \ SYS_KLDFIRSTMOD = 309 // { int kldfirstmod(int fileid); } SYS_GETSID = 310 // { int getsid(pid_t pid); } - SYS_SETRESUID = 311 // { int setresuid(uid_t ruid, uid_t euid, uid_t suid); } - SYS_SETRESGID = 312 // { int setresgid(gid_t rgid, gid_t egid, gid_t sgid); } + SYS_SETRESUID = 311 // { int setresuid(uid_t ruid, uid_t euid, \ + SYS_SETRESGID = 312 // { int setresgid(gid_t rgid, gid_t egid, \ SYS_AIO_RETURN = 314 // { ssize_t aio_return(struct aiocb *aiocbp); } - SYS_AIO_SUSPEND = 315 // { int aio_suspend( struct aiocb * const * aiocbp, int nent, const struct timespec *timeout); } - SYS_AIO_CANCEL = 316 // { int aio_cancel(int fd, struct aiocb *aiocbp); } + SYS_AIO_SUSPEND = 315 // { int aio_suspend( \ + SYS_AIO_CANCEL = 316 // { int aio_cancel(int fd, \ SYS_AIO_ERROR = 317 // { int aio_error(struct aiocb *aiocbp); } SYS_YIELD = 321 // { int yield(void); } SYS_MLOCKALL = 324 // { int mlockall(int how); } SYS_MUNLOCKALL = 325 // { int munlockall(void); } SYS___GETCWD = 326 // { int __getcwd(char *buf, u_int buflen); } - SYS_SCHED_SETPARAM = 327 // { int sched_setparam (pid_t pid, const struct sched_param *param); } - SYS_SCHED_GETPARAM = 328 // { int sched_getparam (pid_t pid, struct sched_param *param); } - SYS_SCHED_SETSCHEDULER = 329 // { int sched_setscheduler (pid_t pid, int policy, const struct sched_param *param); } + SYS_SCHED_SETPARAM = 327 // { int sched_setparam (pid_t pid, \ + SYS_SCHED_GETPARAM = 328 // { int sched_getparam (pid_t pid, struct \ + SYS_SCHED_SETSCHEDULER = 329 // { int sched_setscheduler (pid_t pid, int \ SYS_SCHED_GETSCHEDULER = 330 // { int sched_getscheduler (pid_t pid); } SYS_SCHED_YIELD = 331 // { int sched_yield (void); } SYS_SCHED_GET_PRIORITY_MAX = 332 // { int sched_get_priority_max (int policy); } SYS_SCHED_GET_PRIORITY_MIN = 333 // { int sched_get_priority_min (int policy); } - SYS_SCHED_RR_GET_INTERVAL = 334 // { int sched_rr_get_interval (pid_t pid, struct timespec *interval); } + SYS_SCHED_RR_GET_INTERVAL = 334 // { int sched_rr_get_interval (pid_t pid, \ SYS_UTRACE = 335 // { int utrace(const void *addr, size_t len); } - SYS_KLDSYM = 337 // { int kldsym(int fileid, int cmd, void *data); } + SYS_KLDSYM = 337 // { int kldsym(int fileid, int cmd, \ SYS_JAIL = 338 // { int jail(struct jail *jail); } - SYS_SIGPROCMASK = 340 // { int sigprocmask(int how, const sigset_t *set, sigset_t *oset); } + SYS_SIGPROCMASK = 340 // { int sigprocmask(int how, \ SYS_SIGSUSPEND = 341 // { int sigsuspend(const sigset_t *sigmask); } SYS_SIGPENDING = 343 // { int sigpending(sigset_t *set); } - SYS_SIGTIMEDWAIT = 345 // { int sigtimedwait(const sigset_t *set, siginfo_t *info, const struct timespec *timeout); } - SYS_SIGWAITINFO = 346 // { int sigwaitinfo(const sigset_t *set, siginfo_t *info); } - SYS___ACL_GET_FILE = 347 // { int __acl_get_file(const char *path, acl_type_t type, struct acl *aclp); } - SYS___ACL_SET_FILE = 348 // { int __acl_set_file(const char *path, acl_type_t type, struct acl *aclp); } - SYS___ACL_GET_FD = 349 // { int __acl_get_fd(int filedes, acl_type_t type, struct acl *aclp); } - SYS___ACL_SET_FD = 350 // { int __acl_set_fd(int filedes, acl_type_t type, struct acl *aclp); } - SYS___ACL_DELETE_FILE = 351 // { int __acl_delete_file(const char *path, acl_type_t type); } - SYS___ACL_DELETE_FD = 352 // { int __acl_delete_fd(int filedes, acl_type_t type); } - SYS___ACL_ACLCHECK_FILE = 353 // { int __acl_aclcheck_file(const char *path, acl_type_t type, struct acl *aclp); } - SYS___ACL_ACLCHECK_FD = 354 // { int __acl_aclcheck_fd(int filedes, acl_type_t type, struct acl *aclp); } - SYS_EXTATTRCTL = 355 // { int extattrctl(const char *path, int cmd, const char *filename, int attrnamespace, const char *attrname); } - SYS_EXTATTR_SET_FILE = 356 // { ssize_t extattr_set_file( const char *path, int attrnamespace, const char *attrname, void *data, size_t nbytes); } - SYS_EXTATTR_GET_FILE = 357 // { ssize_t extattr_get_file( const char *path, int attrnamespace, const char *attrname, void *data, size_t nbytes); } - SYS_EXTATTR_DELETE_FILE = 358 // { int extattr_delete_file(const char *path, int attrnamespace, const char *attrname); } - SYS_AIO_WAITCOMPLETE = 359 // { ssize_t aio_waitcomplete( struct aiocb **aiocbp, struct timespec *timeout); } - SYS_GETRESUID = 360 // { int getresuid(uid_t *ruid, uid_t *euid, uid_t *suid); } - SYS_GETRESGID = 361 // { int getresgid(gid_t *rgid, gid_t *egid, gid_t *sgid); } + SYS_SIGTIMEDWAIT = 345 // { int sigtimedwait(const sigset_t *set, \ + SYS_SIGWAITINFO = 346 // { int sigwaitinfo(const sigset_t *set, \ + SYS___ACL_GET_FILE = 347 // { int __acl_get_file(const char *path, \ + SYS___ACL_SET_FILE = 348 // { int __acl_set_file(const char *path, \ + SYS___ACL_GET_FD = 349 // { int __acl_get_fd(int filedes, \ + SYS___ACL_SET_FD = 350 // { int __acl_set_fd(int filedes, \ + SYS___ACL_DELETE_FILE = 351 // { int __acl_delete_file(const char *path, \ + SYS___ACL_DELETE_FD = 352 // { int __acl_delete_fd(int filedes, \ + SYS___ACL_ACLCHECK_FILE = 353 // { int __acl_aclcheck_file(const char *path, \ + SYS___ACL_ACLCHECK_FD = 354 // { int __acl_aclcheck_fd(int filedes, \ + SYS_EXTATTRCTL = 355 // { int extattrctl(const char *path, int cmd, \ + SYS_EXTATTR_SET_FILE = 356 // { ssize_t extattr_set_file( \ + SYS_EXTATTR_GET_FILE = 357 // { ssize_t extattr_get_file( \ + SYS_EXTATTR_DELETE_FILE = 358 // { int extattr_delete_file(const char *path, \ + SYS_AIO_WAITCOMPLETE = 359 // { ssize_t aio_waitcomplete( \ + SYS_GETRESUID = 360 // { int getresuid(uid_t *ruid, uid_t *euid, \ + SYS_GETRESGID = 361 // { int getresgid(gid_t *rgid, gid_t *egid, \ SYS_KQUEUE = 362 // { int kqueue(void); } - SYS_KEVENT = 363 // { int kevent(int fd, struct kevent *changelist, int nchanges, struct kevent *eventlist, int nevents, const struct timespec *timeout); } - SYS_EXTATTR_SET_FD = 371 // { ssize_t extattr_set_fd(int fd, int attrnamespace, const char *attrname, void *data, size_t nbytes); } - SYS_EXTATTR_GET_FD = 372 // { ssize_t extattr_get_fd(int fd, int attrnamespace, const char *attrname, void *data, size_t nbytes); } - SYS_EXTATTR_DELETE_FD = 373 // { int extattr_delete_fd(int fd, int attrnamespace, const char *attrname); } + SYS_KEVENT = 363 // { int kevent(int fd, \ + SYS_EXTATTR_SET_FD = 371 // { ssize_t extattr_set_fd(int fd, \ + SYS_EXTATTR_GET_FD = 372 // { ssize_t extattr_get_fd(int fd, \ + SYS_EXTATTR_DELETE_FD = 373 // { int extattr_delete_fd(int fd, \ SYS___SETUGID = 374 // { int __setugid(int flag); } SYS_EACCESS = 376 // { int eaccess(char *path, int amode); } - SYS_NMOUNT = 378 // { int nmount(struct iovec *iovp, unsigned int iovcnt, int flags); } + SYS_NMOUNT = 378 // { int nmount(struct iovec *iovp, \ SYS___MAC_GET_PROC = 384 // { int __mac_get_proc(struct mac *mac_p); } SYS___MAC_SET_PROC = 385 // { int __mac_set_proc(struct mac *mac_p); } - SYS___MAC_GET_FD = 386 // { int __mac_get_fd(int fd, struct mac *mac_p); } - SYS___MAC_GET_FILE = 387 // { int __mac_get_file(const char *path_p, struct mac *mac_p); } - SYS___MAC_SET_FD = 388 // { int __mac_set_fd(int fd, struct mac *mac_p); } - SYS___MAC_SET_FILE = 389 // { int __mac_set_file(const char *path_p, struct mac *mac_p); } - SYS_KENV = 390 // { int kenv(int what, const char *name, char *value, int len); } - SYS_LCHFLAGS = 391 // { int lchflags(const char *path, u_long flags); } - SYS_UUIDGEN = 392 // { int uuidgen(struct uuid *store, int count); } - SYS_SENDFILE = 393 // { int sendfile(int fd, int s, off_t offset, size_t nbytes, struct sf_hdtr *hdtr, off_t *sbytes, int flags); } - SYS_MAC_SYSCALL = 394 // { int mac_syscall(const char *policy, int call, void *arg); } - SYS_GETFSSTAT = 395 // { int getfsstat(struct statfs *buf, long bufsize, int mode); } - SYS_STATFS = 396 // { int statfs(char *path, struct statfs *buf); } + SYS___MAC_GET_FD = 386 // { int __mac_get_fd(int fd, \ + SYS___MAC_GET_FILE = 387 // { int __mac_get_file(const char *path_p, \ + SYS___MAC_SET_FD = 388 // { int __mac_set_fd(int fd, \ + SYS___MAC_SET_FILE = 389 // { int __mac_set_file(const char *path_p, \ + SYS_KENV = 390 // { int kenv(int what, const char *name, \ + SYS_LCHFLAGS = 391 // { int lchflags(const char *path, \ + SYS_UUIDGEN = 392 // { int uuidgen(struct uuid *store, \ + SYS_SENDFILE = 393 // { int sendfile(int fd, int s, off_t offset, \ + SYS_MAC_SYSCALL = 394 // { int mac_syscall(const char *policy, \ + SYS_GETFSSTAT = 395 // { int getfsstat(struct statfs *buf, \ + SYS_STATFS = 396 // { int statfs(char *path, \ SYS_FSTATFS = 397 // { int fstatfs(int fd, struct statfs *buf); } - SYS_FHSTATFS = 398 // { int fhstatfs(const struct fhandle *u_fhp, struct statfs *buf); } + SYS_FHSTATFS = 398 // { int fhstatfs(const struct fhandle *u_fhp, \ SYS_KSEM_CLOSE = 400 // { int ksem_close(semid_t id); } SYS_KSEM_POST = 401 // { int ksem_post(semid_t id); } SYS_KSEM_WAIT = 402 // { int ksem_wait(semid_t id); } SYS_KSEM_TRYWAIT = 403 // { int ksem_trywait(semid_t id); } - SYS_KSEM_INIT = 404 // { int ksem_init(semid_t *idp, unsigned int value); } - SYS_KSEM_OPEN = 405 // { int ksem_open(semid_t *idp, const char *name, int oflag, mode_t mode, unsigned int value); } + SYS_KSEM_INIT = 404 // { int ksem_init(semid_t *idp, \ + SYS_KSEM_OPEN = 405 // { int ksem_open(semid_t *idp, \ SYS_KSEM_UNLINK = 406 // { int ksem_unlink(const char *name); } SYS_KSEM_GETVALUE = 407 // { int ksem_getvalue(semid_t id, int *val); } SYS_KSEM_DESTROY = 408 // { int ksem_destroy(semid_t id); } - SYS___MAC_GET_PID = 409 // { int __mac_get_pid(pid_t pid, struct mac *mac_p); } - SYS___MAC_GET_LINK = 410 // { int __mac_get_link(const char *path_p, struct mac *mac_p); } - SYS___MAC_SET_LINK = 411 // { int __mac_set_link(const char *path_p, struct mac *mac_p); } - SYS_EXTATTR_SET_LINK = 412 // { ssize_t extattr_set_link( const char *path, int attrnamespace, const char *attrname, void *data, size_t nbytes); } - SYS_EXTATTR_GET_LINK = 413 // { ssize_t extattr_get_link( const char *path, int attrnamespace, const char *attrname, void *data, size_t nbytes); } - SYS_EXTATTR_DELETE_LINK = 414 // { int extattr_delete_link( const char *path, int attrnamespace, const char *attrname); } - SYS___MAC_EXECVE = 415 // { int __mac_execve(char *fname, char **argv, char **envv, struct mac *mac_p); } - SYS_SIGACTION = 416 // { int sigaction(int sig, const struct sigaction *act, struct sigaction *oact); } - SYS_SIGRETURN = 417 // { int sigreturn( const struct __ucontext *sigcntxp); } + SYS___MAC_GET_PID = 409 // { int __mac_get_pid(pid_t pid, \ + SYS___MAC_GET_LINK = 410 // { int __mac_get_link(const char *path_p, \ + SYS___MAC_SET_LINK = 411 // { int __mac_set_link(const char *path_p, \ + SYS_EXTATTR_SET_LINK = 412 // { ssize_t extattr_set_link( \ + SYS_EXTATTR_GET_LINK = 413 // { ssize_t extattr_get_link( \ + SYS_EXTATTR_DELETE_LINK = 414 // { int extattr_delete_link( \ + SYS___MAC_EXECVE = 415 // { int __mac_execve(char *fname, char **argv, \ + SYS_SIGACTION = 416 // { int sigaction(int sig, \ + SYS_SIGRETURN = 417 // { int sigreturn( \ SYS_GETCONTEXT = 421 // { int getcontext(struct __ucontext *ucp); } - SYS_SETCONTEXT = 422 // { int setcontext( const struct __ucontext *ucp); } - SYS_SWAPCONTEXT = 423 // { int swapcontext(struct __ucontext *oucp, const struct __ucontext *ucp); } + SYS_SETCONTEXT = 422 // { int setcontext( \ + SYS_SWAPCONTEXT = 423 // { int swapcontext(struct __ucontext *oucp, \ SYS_SWAPOFF = 424 // { int swapoff(const char *name); } - SYS___ACL_GET_LINK = 425 // { int __acl_get_link(const char *path, acl_type_t type, struct acl *aclp); } - SYS___ACL_SET_LINK = 426 // { int __acl_set_link(const char *path, acl_type_t type, struct acl *aclp); } - SYS___ACL_DELETE_LINK = 427 // { int __acl_delete_link(const char *path, acl_type_t type); } - SYS___ACL_ACLCHECK_LINK = 428 // { int __acl_aclcheck_link(const char *path, acl_type_t type, struct acl *aclp); } - SYS_SIGWAIT = 429 // { int sigwait(const sigset_t *set, int *sig); } - SYS_THR_CREATE = 430 // { int thr_create(ucontext_t *ctx, long *id, int flags); } + SYS___ACL_GET_LINK = 425 // { int __acl_get_link(const char *path, \ + SYS___ACL_SET_LINK = 426 // { int __acl_set_link(const char *path, \ + SYS___ACL_DELETE_LINK = 427 // { int __acl_delete_link(const char *path, \ + SYS___ACL_ACLCHECK_LINK = 428 // { int __acl_aclcheck_link(const char *path, \ + SYS_SIGWAIT = 429 // { int sigwait(const sigset_t *set, \ + SYS_THR_CREATE = 430 // { int thr_create(ucontext_t *ctx, long *id, \ SYS_THR_EXIT = 431 // { void thr_exit(long *state); } SYS_THR_SELF = 432 // { int thr_self(long *id); } SYS_THR_KILL = 433 // { int thr_kill(long id, int sig); } SYS_JAIL_ATTACH = 436 // { int jail_attach(int jid); } - SYS_EXTATTR_LIST_FD = 437 // { ssize_t extattr_list_fd(int fd, int attrnamespace, void *data, size_t nbytes); } - SYS_EXTATTR_LIST_FILE = 438 // { ssize_t extattr_list_file( const char *path, int attrnamespace, void *data, size_t nbytes); } - SYS_EXTATTR_LIST_LINK = 439 // { ssize_t extattr_list_link( const char *path, int attrnamespace, void *data, size_t nbytes); } - SYS_KSEM_TIMEDWAIT = 441 // { int ksem_timedwait(semid_t id, const struct timespec *abstime); } - SYS_THR_SUSPEND = 442 // { int thr_suspend( const struct timespec *timeout); } + SYS_EXTATTR_LIST_FD = 437 // { ssize_t extattr_list_fd(int fd, \ + SYS_EXTATTR_LIST_FILE = 438 // { ssize_t extattr_list_file( \ + SYS_EXTATTR_LIST_LINK = 439 // { ssize_t extattr_list_link( \ + SYS_KSEM_TIMEDWAIT = 441 // { int ksem_timedwait(semid_t id, \ + SYS_THR_SUSPEND = 442 // { int thr_suspend( \ SYS_THR_WAKE = 443 // { int thr_wake(long id); } SYS_KLDUNLOADF = 444 // { int kldunloadf(int fileid, int flags); } - SYS_AUDIT = 445 // { int audit(const void *record, u_int length); } - SYS_AUDITON = 446 // { int auditon(int cmd, void *data, u_int length); } + SYS_AUDIT = 445 // { int audit(const void *record, \ + SYS_AUDITON = 446 // { int auditon(int cmd, void *data, \ SYS_GETAUID = 447 // { int getauid(uid_t *auid); } SYS_SETAUID = 448 // { int setauid(uid_t *auid); } SYS_GETAUDIT = 449 // { int getaudit(struct auditinfo *auditinfo); } SYS_SETAUDIT = 450 // { int setaudit(struct auditinfo *auditinfo); } - SYS_GETAUDIT_ADDR = 451 // { int getaudit_addr( struct auditinfo_addr *auditinfo_addr, u_int length); } - SYS_SETAUDIT_ADDR = 452 // { int setaudit_addr( struct auditinfo_addr *auditinfo_addr, u_int length); } + SYS_GETAUDIT_ADDR = 451 // { int getaudit_addr( \ + SYS_SETAUDIT_ADDR = 452 // { int setaudit_addr( \ SYS_AUDITCTL = 453 // { int auditctl(char *path); } - SYS__UMTX_OP = 454 // { int _umtx_op(void *obj, int op, u_long val, void *uaddr1, void *uaddr2); } - SYS_THR_NEW = 455 // { int thr_new(struct thr_param *param, int param_size); } + SYS__UMTX_OP = 454 // { int _umtx_op(void *obj, int op, \ + SYS_THR_NEW = 455 // { int thr_new(struct thr_param *param, \ SYS_SIGQUEUE = 456 // { int sigqueue(pid_t pid, int signum, void *value); } - SYS_KMQ_OPEN = 457 // { int kmq_open(const char *path, int flags, mode_t mode, const struct mq_attr *attr); } - SYS_KMQ_SETATTR = 458 // { int kmq_setattr(int mqd, const struct mq_attr *attr, struct mq_attr *oattr); } - SYS_KMQ_TIMEDRECEIVE = 459 // { int kmq_timedreceive(int mqd, char *msg_ptr, size_t msg_len, unsigned *msg_prio, const struct timespec *abs_timeout); } - SYS_KMQ_TIMEDSEND = 460 // { int kmq_timedsend(int mqd, const char *msg_ptr, size_t msg_len,unsigned msg_prio, const struct timespec *abs_timeout);} - SYS_KMQ_NOTIFY = 461 // { int kmq_notify(int mqd, const struct sigevent *sigev); } + SYS_KMQ_OPEN = 457 // { int kmq_open(const char *path, int flags, \ + SYS_KMQ_SETATTR = 458 // { int kmq_setattr(int mqd, \ + SYS_KMQ_TIMEDRECEIVE = 459 // { int kmq_timedreceive(int mqd, \ + SYS_KMQ_TIMEDSEND = 460 // { int kmq_timedsend(int mqd, \ + SYS_KMQ_NOTIFY = 461 // { int kmq_notify(int mqd, \ SYS_KMQ_UNLINK = 462 // { int kmq_unlink(const char *path); } SYS_ABORT2 = 463 // { int abort2(const char *why, int nargs, void **args); } SYS_THR_SET_NAME = 464 // { int thr_set_name(long id, const char *name); } SYS_AIO_FSYNC = 465 // { int aio_fsync(int op, struct aiocb *aiocbp); } - SYS_RTPRIO_THREAD = 466 // { int rtprio_thread(int function, lwpid_t lwpid, struct rtprio *rtp); } + SYS_RTPRIO_THREAD = 466 // { int rtprio_thread(int function, \ SYS_SCTP_PEELOFF = 471 // { int sctp_peeloff(int sd, uint32_t name); } - SYS_SCTP_GENERIC_SENDMSG = 472 // { int sctp_generic_sendmsg(int sd, caddr_t msg, int mlen, caddr_t to, __socklen_t tolen, struct sctp_sndrcvinfo *sinfo, int flags); } - SYS_SCTP_GENERIC_SENDMSG_IOV = 473 // { int sctp_generic_sendmsg_iov(int sd, struct iovec *iov, int iovlen, caddr_t to, __socklen_t tolen, struct sctp_sndrcvinfo *sinfo, int flags); } - SYS_SCTP_GENERIC_RECVMSG = 474 // { int sctp_generic_recvmsg(int sd, struct iovec *iov, int iovlen, struct sockaddr * from, __socklen_t *fromlenaddr, struct sctp_sndrcvinfo *sinfo, int *msg_flags); } - SYS_PREAD = 475 // { ssize_t pread(int fd, void *buf, size_t nbyte, off_t offset); } - SYS_PWRITE = 476 // { ssize_t pwrite(int fd, const void *buf, size_t nbyte, off_t offset); } - SYS_MMAP = 477 // { caddr_t mmap(caddr_t addr, size_t len, int prot, int flags, int fd, off_t pos); } - SYS_LSEEK = 478 // { off_t lseek(int fd, off_t offset, int whence); } + SYS_SCTP_GENERIC_SENDMSG = 472 // { int sctp_generic_sendmsg(int sd, caddr_t msg, int mlen, \ + SYS_SCTP_GENERIC_SENDMSG_IOV = 473 // { int sctp_generic_sendmsg_iov(int sd, struct iovec *iov, int iovlen, \ + SYS_SCTP_GENERIC_RECVMSG = 474 // { int sctp_generic_recvmsg(int sd, struct iovec *iov, int iovlen, \ + SYS_PREAD = 475 // { ssize_t pread(int fd, void *buf, \ + SYS_PWRITE = 476 // { ssize_t pwrite(int fd, const void *buf, \ + SYS_MMAP = 477 // { caddr_t mmap(caddr_t addr, size_t len, \ + SYS_LSEEK = 478 // { off_t lseek(int fd, off_t offset, \ SYS_TRUNCATE = 479 // { int truncate(char *path, off_t length); } SYS_FTRUNCATE = 480 // { int ftruncate(int fd, off_t length); } SYS_THR_KILL2 = 481 // { int thr_kill2(pid_t pid, long id, int sig); } - SYS_SHM_OPEN = 482 // { int shm_open(const char *path, int flags, mode_t mode); } + SYS_SHM_OPEN = 482 // { int shm_open(const char *path, int flags, \ SYS_SHM_UNLINK = 483 // { int shm_unlink(const char *path); } SYS_CPUSET = 484 // { int cpuset(cpusetid_t *setid); } - SYS_CPUSET_SETID = 485 // { int cpuset_setid(cpuwhich_t which, id_t id, cpusetid_t setid); } - SYS_CPUSET_GETID = 486 // { int cpuset_getid(cpulevel_t level, cpuwhich_t which, id_t id, cpusetid_t *setid); } - SYS_CPUSET_GETAFFINITY = 487 // { int cpuset_getaffinity(cpulevel_t level, cpuwhich_t which, id_t id, size_t cpusetsize, cpuset_t *mask); } - SYS_CPUSET_SETAFFINITY = 488 // { int cpuset_setaffinity(cpulevel_t level, cpuwhich_t which, id_t id, size_t cpusetsize, const cpuset_t *mask); } - SYS_FACCESSAT = 489 // { int faccessat(int fd, char *path, int amode, int flag); } - SYS_FCHMODAT = 490 // { int fchmodat(int fd, char *path, mode_t mode, int flag); } - SYS_FCHOWNAT = 491 // { int fchownat(int fd, char *path, uid_t uid, gid_t gid, int flag); } - SYS_FEXECVE = 492 // { int fexecve(int fd, char **argv, char **envv); } - SYS_FSTATAT = 493 // { int fstatat(int fd, char *path, struct stat *buf, int flag); } - SYS_FUTIMESAT = 494 // { int futimesat(int fd, char *path, struct timeval *times); } - SYS_LINKAT = 495 // { int linkat(int fd1, char *path1, int fd2, char *path2, int flag); } + SYS_CPUSET_SETID = 485 // { int cpuset_setid(cpuwhich_t which, id_t id, \ + SYS_CPUSET_GETID = 486 // { int cpuset_getid(cpulevel_t level, \ + SYS_CPUSET_GETAFFINITY = 487 // { int cpuset_getaffinity(cpulevel_t level, \ + SYS_CPUSET_SETAFFINITY = 488 // { int cpuset_setaffinity(cpulevel_t level, \ + SYS_FACCESSAT = 489 // { int faccessat(int fd, char *path, int amode, \ + SYS_FCHMODAT = 490 // { int fchmodat(int fd, char *path, mode_t mode, \ + SYS_FCHOWNAT = 491 // { int fchownat(int fd, char *path, uid_t uid, \ + SYS_FEXECVE = 492 // { int fexecve(int fd, char **argv, \ + SYS_FSTATAT = 493 // { int fstatat(int fd, char *path, \ + SYS_FUTIMESAT = 494 // { int futimesat(int fd, char *path, \ + SYS_LINKAT = 495 // { int linkat(int fd1, char *path1, int fd2, \ SYS_MKDIRAT = 496 // { int mkdirat(int fd, char *path, mode_t mode); } SYS_MKFIFOAT = 497 // { int mkfifoat(int fd, char *path, mode_t mode); } - SYS_MKNODAT = 498 // { int mknodat(int fd, char *path, mode_t mode, dev_t dev); } - SYS_OPENAT = 499 // { int openat(int fd, char *path, int flag, mode_t mode); } - SYS_READLINKAT = 500 // { int readlinkat(int fd, char *path, char *buf, size_t bufsize); } - SYS_RENAMEAT = 501 // { int renameat(int oldfd, char *old, int newfd, char *new); } - SYS_SYMLINKAT = 502 // { int symlinkat(char *path1, int fd, char *path2); } + SYS_MKNODAT = 498 // { int mknodat(int fd, char *path, mode_t mode, \ + SYS_OPENAT = 499 // { int openat(int fd, char *path, int flag, \ + SYS_READLINKAT = 500 // { int readlinkat(int fd, char *path, char *buf, \ + SYS_RENAMEAT = 501 // { int renameat(int oldfd, char *old, int newfd, \ + SYS_SYMLINKAT = 502 // { int symlinkat(char *path1, int fd, \ SYS_UNLINKAT = 503 // { int unlinkat(int fd, char *path, int flag); } SYS_POSIX_OPENPT = 504 // { int posix_openpt(int flags); } SYS_GSSD_SYSCALL = 505 // { int gssd_syscall(char *path); } - SYS_JAIL_GET = 506 // { int jail_get(struct iovec *iovp, unsigned int iovcnt, int flags); } - SYS_JAIL_SET = 507 // { int jail_set(struct iovec *iovp, unsigned int iovcnt, int flags); } + SYS_JAIL_GET = 506 // { int jail_get(struct iovec *iovp, \ + SYS_JAIL_SET = 507 // { int jail_set(struct iovec *iovp, \ SYS_JAIL_REMOVE = 508 // { int jail_remove(int jid); } SYS_CLOSEFROM = 509 // { int closefrom(int lowfd); } - SYS___SEMCTL = 510 // { int __semctl(int semid, int semnum, int cmd, union semun *arg); } - SYS_MSGCTL = 511 // { int msgctl(int msqid, int cmd, struct msqid_ds *buf); } - SYS_SHMCTL = 512 // { int shmctl(int shmid, int cmd, struct shmid_ds *buf); } + SYS___SEMCTL = 510 // { int __semctl(int semid, int semnum, \ + SYS_MSGCTL = 511 // { int msgctl(int msqid, int cmd, \ + SYS_SHMCTL = 512 // { int shmctl(int shmid, int cmd, \ SYS_LPATHCONF = 513 // { int lpathconf(char *path, int name); } - SYS___CAP_RIGHTS_GET = 515 // { int __cap_rights_get(int version, int fd, cap_rights_t *rightsp); } + SYS___CAP_RIGHTS_GET = 515 // { int __cap_rights_get(int version, \ SYS_CAP_ENTER = 516 // { int cap_enter(void); } SYS_CAP_GETMODE = 517 // { int cap_getmode(u_int *modep); } SYS_PDFORK = 518 // { int pdfork(int *fdp, int flags); } SYS_PDKILL = 519 // { int pdkill(int fd, int signum); } SYS_PDGETPID = 520 // { int pdgetpid(int fd, pid_t *pidp); } - SYS_PSELECT = 522 // { int pselect(int nd, fd_set *in, fd_set *ou, fd_set *ex, const struct timespec *ts, const sigset_t *sm); } - SYS_GETLOGINCLASS = 523 // { int getloginclass(char *namebuf, size_t namelen); } + SYS_PSELECT = 522 // { int pselect(int nd, fd_set *in, \ + SYS_GETLOGINCLASS = 523 // { int getloginclass(char *namebuf, \ SYS_SETLOGINCLASS = 524 // { int setloginclass(const char *namebuf); } - SYS_RCTL_GET_RACCT = 525 // { int rctl_get_racct(const void *inbufp, size_t inbuflen, void *outbufp, size_t outbuflen); } - SYS_RCTL_GET_RULES = 526 // { int rctl_get_rules(const void *inbufp, size_t inbuflen, void *outbufp, size_t outbuflen); } - SYS_RCTL_GET_LIMITS = 527 // { int rctl_get_limits(const void *inbufp, size_t inbuflen, void *outbufp, size_t outbuflen); } - SYS_RCTL_ADD_RULE = 528 // { int rctl_add_rule(const void *inbufp, size_t inbuflen, void *outbufp, size_t outbuflen); } - SYS_RCTL_REMOVE_RULE = 529 // { int rctl_remove_rule(const void *inbufp, size_t inbuflen, void *outbufp, size_t outbuflen); } - SYS_POSIX_FALLOCATE = 530 // { int posix_fallocate(int fd, off_t offset, off_t len); } - SYS_POSIX_FADVISE = 531 // { int posix_fadvise(int fd, off_t offset, off_t len, int advice); } - SYS_WAIT6 = 532 // { int wait6(idtype_t idtype, id_t id, int *status, int options, struct __wrusage *wrusage, siginfo_t *info); } - SYS_CAP_RIGHTS_LIMIT = 533 // { int cap_rights_limit(int fd, cap_rights_t *rightsp); } - SYS_CAP_IOCTLS_LIMIT = 534 // { int cap_ioctls_limit(int fd, const u_long *cmds, size_t ncmds); } - SYS_CAP_IOCTLS_GET = 535 // { ssize_t cap_ioctls_get(int fd, u_long *cmds, size_t maxcmds); } - SYS_CAP_FCNTLS_LIMIT = 536 // { int cap_fcntls_limit(int fd, uint32_t fcntlrights); } - SYS_CAP_FCNTLS_GET = 537 // { int cap_fcntls_get(int fd, uint32_t *fcntlrightsp); } - SYS_BINDAT = 538 // { int bindat(int fd, int s, caddr_t name, int namelen); } - SYS_CONNECTAT = 539 // { int connectat(int fd, int s, caddr_t name, int namelen); } - SYS_CHFLAGSAT = 540 // { int chflagsat(int fd, const char *path, u_long flags, int atflag); } - SYS_ACCEPT4 = 541 // { int accept4(int s, struct sockaddr * __restrict name, __socklen_t * __restrict anamelen, int flags); } + SYS_RCTL_GET_RACCT = 525 // { int rctl_get_racct(const void *inbufp, \ + SYS_RCTL_GET_RULES = 526 // { int rctl_get_rules(const void *inbufp, \ + SYS_RCTL_GET_LIMITS = 527 // { int rctl_get_limits(const void *inbufp, \ + SYS_RCTL_ADD_RULE = 528 // { int rctl_add_rule(const void *inbufp, \ + SYS_RCTL_REMOVE_RULE = 529 // { int rctl_remove_rule(const void *inbufp, \ + SYS_POSIX_FALLOCATE = 530 // { int posix_fallocate(int fd, \ + SYS_POSIX_FADVISE = 531 // { int posix_fadvise(int fd, off_t offset, \ + SYS_WAIT6 = 532 // { int wait6(idtype_t idtype, id_t id, \ + SYS_CAP_RIGHTS_LIMIT = 533 // { int cap_rights_limit(int fd, \ + SYS_CAP_IOCTLS_LIMIT = 534 // { int cap_ioctls_limit(int fd, \ + SYS_CAP_IOCTLS_GET = 535 // { ssize_t cap_ioctls_get(int fd, \ + SYS_CAP_FCNTLS_LIMIT = 536 // { int cap_fcntls_limit(int fd, \ + SYS_CAP_FCNTLS_GET = 537 // { int cap_fcntls_get(int fd, \ + SYS_BINDAT = 538 // { int bindat(int fd, int s, caddr_t name, \ + SYS_CONNECTAT = 539 // { int connectat(int fd, int s, caddr_t name, \ + SYS_CHFLAGSAT = 540 // { int chflagsat(int fd, const char *path, \ + SYS_ACCEPT4 = 541 // { int accept4(int s, \ SYS_PIPE2 = 542 // { int pipe2(int *fildes, int flags); } SYS_AIO_MLOCK = 543 // { int aio_mlock(struct aiocb *aiocbp); } - SYS_PROCCTL = 544 // { int procctl(idtype_t idtype, id_t id, int com, void *data); } - SYS_PPOLL = 545 // { int ppoll(struct pollfd *fds, u_int nfds, const struct timespec *ts, const sigset_t *set); } - SYS_FUTIMENS = 546 // { int futimens(int fd, struct timespec *times); } - SYS_UTIMENSAT = 547 // { int utimensat(int fd, char *path, struct timespec *times, int flag); } - SYS_NUMA_GETAFFINITY = 548 // { int numa_getaffinity(cpuwhich_t which, id_t id, struct vm_domain_policy_entry *policy); } - SYS_NUMA_SETAFFINITY = 549 // { int numa_setaffinity(cpuwhich_t which, id_t id, const struct vm_domain_policy_entry *policy); } + SYS_PROCCTL = 544 // { int procctl(idtype_t idtype, id_t id, \ + SYS_PPOLL = 545 // { int ppoll(struct pollfd *fds, u_int nfds, \ + SYS_FUTIMENS = 546 // { int futimens(int fd, \ + SYS_UTIMENSAT = 547 // { int utimensat(int fd, \ + SYS_NUMA_GETAFFINITY = 548 // { int numa_getaffinity(cpuwhich_t which, \ + SYS_NUMA_SETAFFINITY = 549 // { int numa_setaffinity(cpuwhich_t which, \ SYS_FDATASYNC = 550 // { int fdatasync(int fd); } ) diff --git a/vendor/golang.org/x/sys/unix/zsysnum_linux_386.go b/vendor/golang.org/x/sys/unix/zsysnum_linux_386.go index 33b6e4d1af..8d17873de0 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_linux_386.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_linux_386.go @@ -6,421 +6,387 @@ package unix const ( - SYS_RESTART_SYSCALL = 0 - SYS_EXIT = 1 - SYS_FORK = 2 - SYS_READ = 3 - SYS_WRITE = 4 - SYS_OPEN = 5 - SYS_CLOSE = 6 - SYS_WAITPID = 7 - SYS_CREAT = 8 - SYS_LINK = 9 - SYS_UNLINK = 10 - SYS_EXECVE = 11 - SYS_CHDIR = 12 - SYS_TIME = 13 - SYS_MKNOD = 14 - SYS_CHMOD = 15 - SYS_LCHOWN = 16 - SYS_BREAK = 17 - SYS_OLDSTAT = 18 - SYS_LSEEK = 19 - SYS_GETPID = 20 - SYS_MOUNT = 21 - SYS_UMOUNT = 22 - SYS_SETUID = 23 - SYS_GETUID = 24 - SYS_STIME = 25 - SYS_PTRACE = 26 - SYS_ALARM = 27 - SYS_OLDFSTAT = 28 - SYS_PAUSE = 29 - SYS_UTIME = 30 - SYS_STTY = 31 - SYS_GTTY = 32 - SYS_ACCESS = 33 - SYS_NICE = 34 - SYS_FTIME = 35 - SYS_SYNC = 36 - SYS_KILL = 37 - SYS_RENAME = 38 - SYS_MKDIR = 39 - SYS_RMDIR = 40 - SYS_DUP = 41 - SYS_PIPE = 42 - SYS_TIMES = 43 - SYS_PROF = 44 - SYS_BRK = 45 - SYS_SETGID = 46 - SYS_GETGID = 47 - SYS_SIGNAL = 48 - SYS_GETEUID = 49 - SYS_GETEGID = 50 - SYS_ACCT = 51 - SYS_UMOUNT2 = 52 - SYS_LOCK = 53 - SYS_IOCTL = 54 - SYS_FCNTL = 55 - SYS_MPX = 56 - SYS_SETPGID = 57 - SYS_ULIMIT = 58 - SYS_OLDOLDUNAME = 59 - SYS_UMASK = 60 - SYS_CHROOT = 61 - SYS_USTAT = 62 - SYS_DUP2 = 63 - SYS_GETPPID = 64 - SYS_GETPGRP = 65 - SYS_SETSID = 66 - SYS_SIGACTION = 67 - SYS_SGETMASK = 68 - SYS_SSETMASK = 69 - SYS_SETREUID = 70 - SYS_SETREGID = 71 - SYS_SIGSUSPEND = 72 - SYS_SIGPENDING = 73 - SYS_SETHOSTNAME = 74 - SYS_SETRLIMIT = 75 - SYS_GETRLIMIT = 76 - SYS_GETRUSAGE = 77 - SYS_GETTIMEOFDAY = 78 - SYS_SETTIMEOFDAY = 79 - SYS_GETGROUPS = 80 - SYS_SETGROUPS = 81 - SYS_SELECT = 82 - SYS_SYMLINK = 83 - SYS_OLDLSTAT = 84 - SYS_READLINK = 85 - SYS_USELIB = 86 - SYS_SWAPON = 87 - SYS_REBOOT = 88 - SYS_READDIR = 89 - SYS_MMAP = 90 - SYS_MUNMAP = 91 - SYS_TRUNCATE = 92 - SYS_FTRUNCATE = 93 - SYS_FCHMOD = 94 - SYS_FCHOWN = 95 - SYS_GETPRIORITY = 96 - SYS_SETPRIORITY = 97 - SYS_PROFIL = 98 - SYS_STATFS = 99 - SYS_FSTATFS = 100 - SYS_IOPERM = 101 - SYS_SOCKETCALL = 102 - SYS_SYSLOG = 103 - SYS_SETITIMER = 104 - SYS_GETITIMER = 105 - SYS_STAT = 106 - SYS_LSTAT = 107 - SYS_FSTAT = 108 - SYS_OLDUNAME = 109 - SYS_IOPL = 110 - SYS_VHANGUP = 111 - SYS_IDLE = 112 - SYS_VM86OLD = 113 - SYS_WAIT4 = 114 - SYS_SWAPOFF = 115 - SYS_SYSINFO = 116 - SYS_IPC = 117 - SYS_FSYNC = 118 - SYS_SIGRETURN = 119 - SYS_CLONE = 120 - SYS_SETDOMAINNAME = 121 - SYS_UNAME = 122 - SYS_MODIFY_LDT = 123 - SYS_ADJTIMEX = 124 - SYS_MPROTECT = 125 - SYS_SIGPROCMASK = 126 - SYS_CREATE_MODULE = 127 - SYS_INIT_MODULE = 128 - SYS_DELETE_MODULE = 129 - SYS_GET_KERNEL_SYMS = 130 - SYS_QUOTACTL = 131 - SYS_GETPGID = 132 - SYS_FCHDIR = 133 - SYS_BDFLUSH = 134 - SYS_SYSFS = 135 - SYS_PERSONALITY = 136 - SYS_AFS_SYSCALL = 137 - SYS_SETFSUID = 138 - SYS_SETFSGID = 139 - SYS__LLSEEK = 140 - SYS_GETDENTS = 141 - SYS__NEWSELECT = 142 - SYS_FLOCK = 143 - SYS_MSYNC = 144 - SYS_READV = 145 - SYS_WRITEV = 146 - SYS_GETSID = 147 - SYS_FDATASYNC = 148 - SYS__SYSCTL = 149 - SYS_MLOCK = 150 - SYS_MUNLOCK = 151 - SYS_MLOCKALL = 152 - SYS_MUNLOCKALL = 153 - SYS_SCHED_SETPARAM = 154 - SYS_SCHED_GETPARAM = 155 - SYS_SCHED_SETSCHEDULER = 156 - SYS_SCHED_GETSCHEDULER = 157 - SYS_SCHED_YIELD = 158 - SYS_SCHED_GET_PRIORITY_MAX = 159 - SYS_SCHED_GET_PRIORITY_MIN = 160 - SYS_SCHED_RR_GET_INTERVAL = 161 - SYS_NANOSLEEP = 162 - SYS_MREMAP = 163 - SYS_SETRESUID = 164 - SYS_GETRESUID = 165 - SYS_VM86 = 166 - SYS_QUERY_MODULE = 167 - SYS_POLL = 168 - SYS_NFSSERVCTL = 169 - SYS_SETRESGID = 170 - SYS_GETRESGID = 171 - SYS_PRCTL = 172 - SYS_RT_SIGRETURN = 173 - SYS_RT_SIGACTION = 174 - SYS_RT_SIGPROCMASK = 175 - SYS_RT_SIGPENDING = 176 - SYS_RT_SIGTIMEDWAIT = 177 - SYS_RT_SIGQUEUEINFO = 178 - SYS_RT_SIGSUSPEND = 179 - SYS_PREAD64 = 180 - SYS_PWRITE64 = 181 - SYS_CHOWN = 182 - SYS_GETCWD = 183 - SYS_CAPGET = 184 - SYS_CAPSET = 185 - SYS_SIGALTSTACK = 186 - SYS_SENDFILE = 187 - SYS_GETPMSG = 188 - SYS_PUTPMSG = 189 - SYS_VFORK = 190 - SYS_UGETRLIMIT = 191 - SYS_MMAP2 = 192 - SYS_TRUNCATE64 = 193 - SYS_FTRUNCATE64 = 194 - SYS_STAT64 = 195 - SYS_LSTAT64 = 196 - SYS_FSTAT64 = 197 - SYS_LCHOWN32 = 198 - SYS_GETUID32 = 199 - SYS_GETGID32 = 200 - SYS_GETEUID32 = 201 - SYS_GETEGID32 = 202 - SYS_SETREUID32 = 203 - SYS_SETREGID32 = 204 - SYS_GETGROUPS32 = 205 - SYS_SETGROUPS32 = 206 - SYS_FCHOWN32 = 207 - SYS_SETRESUID32 = 208 - SYS_GETRESUID32 = 209 - SYS_SETRESGID32 = 210 - SYS_GETRESGID32 = 211 - SYS_CHOWN32 = 212 - SYS_SETUID32 = 213 - SYS_SETGID32 = 214 - SYS_SETFSUID32 = 215 - SYS_SETFSGID32 = 216 - SYS_PIVOT_ROOT = 217 - SYS_MINCORE = 218 - SYS_MADVISE = 219 - SYS_GETDENTS64 = 220 - SYS_FCNTL64 = 221 - SYS_GETTID = 224 - SYS_READAHEAD = 225 - SYS_SETXATTR = 226 - SYS_LSETXATTR = 227 - SYS_FSETXATTR = 228 - SYS_GETXATTR = 229 - SYS_LGETXATTR = 230 - SYS_FGETXATTR = 231 - SYS_LISTXATTR = 232 - SYS_LLISTXATTR = 233 - SYS_FLISTXATTR = 234 - SYS_REMOVEXATTR = 235 - SYS_LREMOVEXATTR = 236 - SYS_FREMOVEXATTR = 237 - SYS_TKILL = 238 - SYS_SENDFILE64 = 239 - SYS_FUTEX = 240 - SYS_SCHED_SETAFFINITY = 241 - SYS_SCHED_GETAFFINITY = 242 - SYS_SET_THREAD_AREA = 243 - SYS_GET_THREAD_AREA = 244 - SYS_IO_SETUP = 245 - SYS_IO_DESTROY = 246 - SYS_IO_GETEVENTS = 247 - SYS_IO_SUBMIT = 248 - SYS_IO_CANCEL = 249 - SYS_FADVISE64 = 250 - SYS_EXIT_GROUP = 252 - SYS_LOOKUP_DCOOKIE = 253 - SYS_EPOLL_CREATE = 254 - SYS_EPOLL_CTL = 255 - SYS_EPOLL_WAIT = 256 - SYS_REMAP_FILE_PAGES = 257 - SYS_SET_TID_ADDRESS = 258 - SYS_TIMER_CREATE = 259 - SYS_TIMER_SETTIME = 260 - SYS_TIMER_GETTIME = 261 - SYS_TIMER_GETOVERRUN = 262 - SYS_TIMER_DELETE = 263 - SYS_CLOCK_SETTIME = 264 - SYS_CLOCK_GETTIME = 265 - SYS_CLOCK_GETRES = 266 - SYS_CLOCK_NANOSLEEP = 267 - SYS_STATFS64 = 268 - SYS_FSTATFS64 = 269 - SYS_TGKILL = 270 - SYS_UTIMES = 271 - SYS_FADVISE64_64 = 272 - SYS_VSERVER = 273 - SYS_MBIND = 274 - SYS_GET_MEMPOLICY = 275 - SYS_SET_MEMPOLICY = 276 - SYS_MQ_OPEN = 277 - SYS_MQ_UNLINK = 278 - SYS_MQ_TIMEDSEND = 279 - SYS_MQ_TIMEDRECEIVE = 280 - SYS_MQ_NOTIFY = 281 - SYS_MQ_GETSETATTR = 282 - SYS_KEXEC_LOAD = 283 - SYS_WAITID = 284 - SYS_ADD_KEY = 286 - SYS_REQUEST_KEY = 287 - SYS_KEYCTL = 288 - SYS_IOPRIO_SET = 289 - SYS_IOPRIO_GET = 290 - SYS_INOTIFY_INIT = 291 - SYS_INOTIFY_ADD_WATCH = 292 - SYS_INOTIFY_RM_WATCH = 293 - SYS_MIGRATE_PAGES = 294 - SYS_OPENAT = 295 - SYS_MKDIRAT = 296 - SYS_MKNODAT = 297 - SYS_FCHOWNAT = 298 - SYS_FUTIMESAT = 299 - SYS_FSTATAT64 = 300 - SYS_UNLINKAT = 301 - SYS_RENAMEAT = 302 - SYS_LINKAT = 303 - SYS_SYMLINKAT = 304 - SYS_READLINKAT = 305 - SYS_FCHMODAT = 306 - SYS_FACCESSAT = 307 - SYS_PSELECT6 = 308 - SYS_PPOLL = 309 - SYS_UNSHARE = 310 - SYS_SET_ROBUST_LIST = 311 - SYS_GET_ROBUST_LIST = 312 - SYS_SPLICE = 313 - SYS_SYNC_FILE_RANGE = 314 - SYS_TEE = 315 - SYS_VMSPLICE = 316 - SYS_MOVE_PAGES = 317 - SYS_GETCPU = 318 - SYS_EPOLL_PWAIT = 319 - SYS_UTIMENSAT = 320 - SYS_SIGNALFD = 321 - SYS_TIMERFD_CREATE = 322 - SYS_EVENTFD = 323 - SYS_FALLOCATE = 324 - SYS_TIMERFD_SETTIME = 325 - SYS_TIMERFD_GETTIME = 326 - SYS_SIGNALFD4 = 327 - SYS_EVENTFD2 = 328 - SYS_EPOLL_CREATE1 = 329 - SYS_DUP3 = 330 - SYS_PIPE2 = 331 - SYS_INOTIFY_INIT1 = 332 - SYS_PREADV = 333 - SYS_PWRITEV = 334 - SYS_RT_TGSIGQUEUEINFO = 335 - SYS_PERF_EVENT_OPEN = 336 - SYS_RECVMMSG = 337 - SYS_FANOTIFY_INIT = 338 - SYS_FANOTIFY_MARK = 339 - SYS_PRLIMIT64 = 340 - SYS_NAME_TO_HANDLE_AT = 341 - SYS_OPEN_BY_HANDLE_AT = 342 - SYS_CLOCK_ADJTIME = 343 - SYS_SYNCFS = 344 - SYS_SENDMMSG = 345 - SYS_SETNS = 346 - SYS_PROCESS_VM_READV = 347 - SYS_PROCESS_VM_WRITEV = 348 - SYS_KCMP = 349 - SYS_FINIT_MODULE = 350 - SYS_SCHED_SETATTR = 351 - SYS_SCHED_GETATTR = 352 - SYS_RENAMEAT2 = 353 - SYS_SECCOMP = 354 - SYS_GETRANDOM = 355 - SYS_MEMFD_CREATE = 356 - SYS_BPF = 357 - SYS_EXECVEAT = 358 - SYS_SOCKET = 359 - SYS_SOCKETPAIR = 360 - SYS_BIND = 361 - SYS_CONNECT = 362 - SYS_LISTEN = 363 - SYS_ACCEPT4 = 364 - SYS_GETSOCKOPT = 365 - SYS_SETSOCKOPT = 366 - SYS_GETSOCKNAME = 367 - SYS_GETPEERNAME = 368 - SYS_SENDTO = 369 - SYS_SENDMSG = 370 - SYS_RECVFROM = 371 - SYS_RECVMSG = 372 - SYS_SHUTDOWN = 373 - SYS_USERFAULTFD = 374 - SYS_MEMBARRIER = 375 - SYS_MLOCK2 = 376 - SYS_COPY_FILE_RANGE = 377 - SYS_PREADV2 = 378 - SYS_PWRITEV2 = 379 - SYS_PKEY_MPROTECT = 380 - SYS_PKEY_ALLOC = 381 - SYS_PKEY_FREE = 382 - SYS_STATX = 383 - SYS_ARCH_PRCTL = 384 - SYS_IO_PGETEVENTS = 385 - SYS_RSEQ = 386 - SYS_SEMGET = 393 - SYS_SEMCTL = 394 - SYS_SHMGET = 395 - SYS_SHMCTL = 396 - SYS_SHMAT = 397 - SYS_SHMDT = 398 - SYS_MSGGET = 399 - SYS_MSGSND = 400 - SYS_MSGRCV = 401 - SYS_MSGCTL = 402 - SYS_CLOCK_GETTIME64 = 403 - SYS_CLOCK_SETTIME64 = 404 - SYS_CLOCK_ADJTIME64 = 405 - SYS_CLOCK_GETRES_TIME64 = 406 - SYS_CLOCK_NANOSLEEP_TIME64 = 407 - SYS_TIMER_GETTIME64 = 408 - SYS_TIMER_SETTIME64 = 409 - SYS_TIMERFD_GETTIME64 = 410 - SYS_TIMERFD_SETTIME64 = 411 - SYS_UTIMENSAT_TIME64 = 412 - SYS_PSELECT6_TIME64 = 413 - SYS_PPOLL_TIME64 = 414 - SYS_IO_PGETEVENTS_TIME64 = 416 - SYS_RECVMMSG_TIME64 = 417 - SYS_MQ_TIMEDSEND_TIME64 = 418 - SYS_MQ_TIMEDRECEIVE_TIME64 = 419 - SYS_SEMTIMEDOP_TIME64 = 420 - SYS_RT_SIGTIMEDWAIT_TIME64 = 421 - SYS_FUTEX_TIME64 = 422 - SYS_SCHED_RR_GET_INTERVAL_TIME64 = 423 - SYS_PIDFD_SEND_SIGNAL = 424 - SYS_IO_URING_SETUP = 425 - SYS_IO_URING_ENTER = 426 - SYS_IO_URING_REGISTER = 427 + SYS_RESTART_SYSCALL = 0 + SYS_EXIT = 1 + SYS_FORK = 2 + SYS_READ = 3 + SYS_WRITE = 4 + SYS_OPEN = 5 + SYS_CLOSE = 6 + SYS_WAITPID = 7 + SYS_CREAT = 8 + SYS_LINK = 9 + SYS_UNLINK = 10 + SYS_EXECVE = 11 + SYS_CHDIR = 12 + SYS_TIME = 13 + SYS_MKNOD = 14 + SYS_CHMOD = 15 + SYS_LCHOWN = 16 + SYS_BREAK = 17 + SYS_OLDSTAT = 18 + SYS_LSEEK = 19 + SYS_GETPID = 20 + SYS_MOUNT = 21 + SYS_UMOUNT = 22 + SYS_SETUID = 23 + SYS_GETUID = 24 + SYS_STIME = 25 + SYS_PTRACE = 26 + SYS_ALARM = 27 + SYS_OLDFSTAT = 28 + SYS_PAUSE = 29 + SYS_UTIME = 30 + SYS_STTY = 31 + SYS_GTTY = 32 + SYS_ACCESS = 33 + SYS_NICE = 34 + SYS_FTIME = 35 + SYS_SYNC = 36 + SYS_KILL = 37 + SYS_RENAME = 38 + SYS_MKDIR = 39 + SYS_RMDIR = 40 + SYS_DUP = 41 + SYS_PIPE = 42 + SYS_TIMES = 43 + SYS_PROF = 44 + SYS_BRK = 45 + SYS_SETGID = 46 + SYS_GETGID = 47 + SYS_SIGNAL = 48 + SYS_GETEUID = 49 + SYS_GETEGID = 50 + SYS_ACCT = 51 + SYS_UMOUNT2 = 52 + SYS_LOCK = 53 + SYS_IOCTL = 54 + SYS_FCNTL = 55 + SYS_MPX = 56 + SYS_SETPGID = 57 + SYS_ULIMIT = 58 + SYS_OLDOLDUNAME = 59 + SYS_UMASK = 60 + SYS_CHROOT = 61 + SYS_USTAT = 62 + SYS_DUP2 = 63 + SYS_GETPPID = 64 + SYS_GETPGRP = 65 + SYS_SETSID = 66 + SYS_SIGACTION = 67 + SYS_SGETMASK = 68 + SYS_SSETMASK = 69 + SYS_SETREUID = 70 + SYS_SETREGID = 71 + SYS_SIGSUSPEND = 72 + SYS_SIGPENDING = 73 + SYS_SETHOSTNAME = 74 + SYS_SETRLIMIT = 75 + SYS_GETRLIMIT = 76 + SYS_GETRUSAGE = 77 + SYS_GETTIMEOFDAY = 78 + SYS_SETTIMEOFDAY = 79 + SYS_GETGROUPS = 80 + SYS_SETGROUPS = 81 + SYS_SELECT = 82 + SYS_SYMLINK = 83 + SYS_OLDLSTAT = 84 + SYS_READLINK = 85 + SYS_USELIB = 86 + SYS_SWAPON = 87 + SYS_REBOOT = 88 + SYS_READDIR = 89 + SYS_MMAP = 90 + SYS_MUNMAP = 91 + SYS_TRUNCATE = 92 + SYS_FTRUNCATE = 93 + SYS_FCHMOD = 94 + SYS_FCHOWN = 95 + SYS_GETPRIORITY = 96 + SYS_SETPRIORITY = 97 + SYS_PROFIL = 98 + SYS_STATFS = 99 + SYS_FSTATFS = 100 + SYS_IOPERM = 101 + SYS_SOCKETCALL = 102 + SYS_SYSLOG = 103 + SYS_SETITIMER = 104 + SYS_GETITIMER = 105 + SYS_STAT = 106 + SYS_LSTAT = 107 + SYS_FSTAT = 108 + SYS_OLDUNAME = 109 + SYS_IOPL = 110 + SYS_VHANGUP = 111 + SYS_IDLE = 112 + SYS_VM86OLD = 113 + SYS_WAIT4 = 114 + SYS_SWAPOFF = 115 + SYS_SYSINFO = 116 + SYS_IPC = 117 + SYS_FSYNC = 118 + SYS_SIGRETURN = 119 + SYS_CLONE = 120 + SYS_SETDOMAINNAME = 121 + SYS_UNAME = 122 + SYS_MODIFY_LDT = 123 + SYS_ADJTIMEX = 124 + SYS_MPROTECT = 125 + SYS_SIGPROCMASK = 126 + SYS_CREATE_MODULE = 127 + SYS_INIT_MODULE = 128 + SYS_DELETE_MODULE = 129 + SYS_GET_KERNEL_SYMS = 130 + SYS_QUOTACTL = 131 + SYS_GETPGID = 132 + SYS_FCHDIR = 133 + SYS_BDFLUSH = 134 + SYS_SYSFS = 135 + SYS_PERSONALITY = 136 + SYS_AFS_SYSCALL = 137 + SYS_SETFSUID = 138 + SYS_SETFSGID = 139 + SYS__LLSEEK = 140 + SYS_GETDENTS = 141 + SYS__NEWSELECT = 142 + SYS_FLOCK = 143 + SYS_MSYNC = 144 + SYS_READV = 145 + SYS_WRITEV = 146 + SYS_GETSID = 147 + SYS_FDATASYNC = 148 + SYS__SYSCTL = 149 + SYS_MLOCK = 150 + SYS_MUNLOCK = 151 + SYS_MLOCKALL = 152 + SYS_MUNLOCKALL = 153 + SYS_SCHED_SETPARAM = 154 + SYS_SCHED_GETPARAM = 155 + SYS_SCHED_SETSCHEDULER = 156 + SYS_SCHED_GETSCHEDULER = 157 + SYS_SCHED_YIELD = 158 + SYS_SCHED_GET_PRIORITY_MAX = 159 + SYS_SCHED_GET_PRIORITY_MIN = 160 + SYS_SCHED_RR_GET_INTERVAL = 161 + SYS_NANOSLEEP = 162 + SYS_MREMAP = 163 + SYS_SETRESUID = 164 + SYS_GETRESUID = 165 + SYS_VM86 = 166 + SYS_QUERY_MODULE = 167 + SYS_POLL = 168 + SYS_NFSSERVCTL = 169 + SYS_SETRESGID = 170 + SYS_GETRESGID = 171 + SYS_PRCTL = 172 + SYS_RT_SIGRETURN = 173 + SYS_RT_SIGACTION = 174 + SYS_RT_SIGPROCMASK = 175 + SYS_RT_SIGPENDING = 176 + SYS_RT_SIGTIMEDWAIT = 177 + SYS_RT_SIGQUEUEINFO = 178 + SYS_RT_SIGSUSPEND = 179 + SYS_PREAD64 = 180 + SYS_PWRITE64 = 181 + SYS_CHOWN = 182 + SYS_GETCWD = 183 + SYS_CAPGET = 184 + SYS_CAPSET = 185 + SYS_SIGALTSTACK = 186 + SYS_SENDFILE = 187 + SYS_GETPMSG = 188 + SYS_PUTPMSG = 189 + SYS_VFORK = 190 + SYS_UGETRLIMIT = 191 + SYS_MMAP2 = 192 + SYS_TRUNCATE64 = 193 + SYS_FTRUNCATE64 = 194 + SYS_STAT64 = 195 + SYS_LSTAT64 = 196 + SYS_FSTAT64 = 197 + SYS_LCHOWN32 = 198 + SYS_GETUID32 = 199 + SYS_GETGID32 = 200 + SYS_GETEUID32 = 201 + SYS_GETEGID32 = 202 + SYS_SETREUID32 = 203 + SYS_SETREGID32 = 204 + SYS_GETGROUPS32 = 205 + SYS_SETGROUPS32 = 206 + SYS_FCHOWN32 = 207 + SYS_SETRESUID32 = 208 + SYS_GETRESUID32 = 209 + SYS_SETRESGID32 = 210 + SYS_GETRESGID32 = 211 + SYS_CHOWN32 = 212 + SYS_SETUID32 = 213 + SYS_SETGID32 = 214 + SYS_SETFSUID32 = 215 + SYS_SETFSGID32 = 216 + SYS_PIVOT_ROOT = 217 + SYS_MINCORE = 218 + SYS_MADVISE = 219 + SYS_GETDENTS64 = 220 + SYS_FCNTL64 = 221 + SYS_GETTID = 224 + SYS_READAHEAD = 225 + SYS_SETXATTR = 226 + SYS_LSETXATTR = 227 + SYS_FSETXATTR = 228 + SYS_GETXATTR = 229 + SYS_LGETXATTR = 230 + SYS_FGETXATTR = 231 + SYS_LISTXATTR = 232 + SYS_LLISTXATTR = 233 + SYS_FLISTXATTR = 234 + SYS_REMOVEXATTR = 235 + SYS_LREMOVEXATTR = 236 + SYS_FREMOVEXATTR = 237 + SYS_TKILL = 238 + SYS_SENDFILE64 = 239 + SYS_FUTEX = 240 + SYS_SCHED_SETAFFINITY = 241 + SYS_SCHED_GETAFFINITY = 242 + SYS_SET_THREAD_AREA = 243 + SYS_GET_THREAD_AREA = 244 + SYS_IO_SETUP = 245 + SYS_IO_DESTROY = 246 + SYS_IO_GETEVENTS = 247 + SYS_IO_SUBMIT = 248 + SYS_IO_CANCEL = 249 + SYS_FADVISE64 = 250 + SYS_EXIT_GROUP = 252 + SYS_LOOKUP_DCOOKIE = 253 + SYS_EPOLL_CREATE = 254 + SYS_EPOLL_CTL = 255 + SYS_EPOLL_WAIT = 256 + SYS_REMAP_FILE_PAGES = 257 + SYS_SET_TID_ADDRESS = 258 + SYS_TIMER_CREATE = 259 + SYS_TIMER_SETTIME = 260 + SYS_TIMER_GETTIME = 261 + SYS_TIMER_GETOVERRUN = 262 + SYS_TIMER_DELETE = 263 + SYS_CLOCK_SETTIME = 264 + SYS_CLOCK_GETTIME = 265 + SYS_CLOCK_GETRES = 266 + SYS_CLOCK_NANOSLEEP = 267 + SYS_STATFS64 = 268 + SYS_FSTATFS64 = 269 + SYS_TGKILL = 270 + SYS_UTIMES = 271 + SYS_FADVISE64_64 = 272 + SYS_VSERVER = 273 + SYS_MBIND = 274 + SYS_GET_MEMPOLICY = 275 + SYS_SET_MEMPOLICY = 276 + SYS_MQ_OPEN = 277 + SYS_MQ_UNLINK = 278 + SYS_MQ_TIMEDSEND = 279 + SYS_MQ_TIMEDRECEIVE = 280 + SYS_MQ_NOTIFY = 281 + SYS_MQ_GETSETATTR = 282 + SYS_KEXEC_LOAD = 283 + SYS_WAITID = 284 + SYS_ADD_KEY = 286 + SYS_REQUEST_KEY = 287 + SYS_KEYCTL = 288 + SYS_IOPRIO_SET = 289 + SYS_IOPRIO_GET = 290 + SYS_INOTIFY_INIT = 291 + SYS_INOTIFY_ADD_WATCH = 292 + SYS_INOTIFY_RM_WATCH = 293 + SYS_MIGRATE_PAGES = 294 + SYS_OPENAT = 295 + SYS_MKDIRAT = 296 + SYS_MKNODAT = 297 + SYS_FCHOWNAT = 298 + SYS_FUTIMESAT = 299 + SYS_FSTATAT64 = 300 + SYS_UNLINKAT = 301 + SYS_RENAMEAT = 302 + SYS_LINKAT = 303 + SYS_SYMLINKAT = 304 + SYS_READLINKAT = 305 + SYS_FCHMODAT = 306 + SYS_FACCESSAT = 307 + SYS_PSELECT6 = 308 + SYS_PPOLL = 309 + SYS_UNSHARE = 310 + SYS_SET_ROBUST_LIST = 311 + SYS_GET_ROBUST_LIST = 312 + SYS_SPLICE = 313 + SYS_SYNC_FILE_RANGE = 314 + SYS_TEE = 315 + SYS_VMSPLICE = 316 + SYS_MOVE_PAGES = 317 + SYS_GETCPU = 318 + SYS_EPOLL_PWAIT = 319 + SYS_UTIMENSAT = 320 + SYS_SIGNALFD = 321 + SYS_TIMERFD_CREATE = 322 + SYS_EVENTFD = 323 + SYS_FALLOCATE = 324 + SYS_TIMERFD_SETTIME = 325 + SYS_TIMERFD_GETTIME = 326 + SYS_SIGNALFD4 = 327 + SYS_EVENTFD2 = 328 + SYS_EPOLL_CREATE1 = 329 + SYS_DUP3 = 330 + SYS_PIPE2 = 331 + SYS_INOTIFY_INIT1 = 332 + SYS_PREADV = 333 + SYS_PWRITEV = 334 + SYS_RT_TGSIGQUEUEINFO = 335 + SYS_PERF_EVENT_OPEN = 336 + SYS_RECVMMSG = 337 + SYS_FANOTIFY_INIT = 338 + SYS_FANOTIFY_MARK = 339 + SYS_PRLIMIT64 = 340 + SYS_NAME_TO_HANDLE_AT = 341 + SYS_OPEN_BY_HANDLE_AT = 342 + SYS_CLOCK_ADJTIME = 343 + SYS_SYNCFS = 344 + SYS_SENDMMSG = 345 + SYS_SETNS = 346 + SYS_PROCESS_VM_READV = 347 + SYS_PROCESS_VM_WRITEV = 348 + SYS_KCMP = 349 + SYS_FINIT_MODULE = 350 + SYS_SCHED_SETATTR = 351 + SYS_SCHED_GETATTR = 352 + SYS_RENAMEAT2 = 353 + SYS_SECCOMP = 354 + SYS_GETRANDOM = 355 + SYS_MEMFD_CREATE = 356 + SYS_BPF = 357 + SYS_EXECVEAT = 358 + SYS_SOCKET = 359 + SYS_SOCKETPAIR = 360 + SYS_BIND = 361 + SYS_CONNECT = 362 + SYS_LISTEN = 363 + SYS_ACCEPT4 = 364 + SYS_GETSOCKOPT = 365 + SYS_SETSOCKOPT = 366 + SYS_GETSOCKNAME = 367 + SYS_GETPEERNAME = 368 + SYS_SENDTO = 369 + SYS_SENDMSG = 370 + SYS_RECVFROM = 371 + SYS_RECVMSG = 372 + SYS_SHUTDOWN = 373 + SYS_USERFAULTFD = 374 + SYS_MEMBARRIER = 375 + SYS_MLOCK2 = 376 + SYS_COPY_FILE_RANGE = 377 + SYS_PREADV2 = 378 + SYS_PWRITEV2 = 379 + SYS_PKEY_MPROTECT = 380 + SYS_PKEY_ALLOC = 381 + SYS_PKEY_FREE = 382 + SYS_STATX = 383 + SYS_ARCH_PRCTL = 384 + SYS_IO_PGETEVENTS = 385 + SYS_RSEQ = 386 ) diff --git a/vendor/golang.org/x/sys/unix/zsysnum_linux_amd64.go b/vendor/golang.org/x/sys/unix/zsysnum_linux_amd64.go index 9ba2078476..b3d8ad79d4 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_linux_amd64.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_linux_amd64.go @@ -341,8 +341,4 @@ const ( SYS_STATX = 332 SYS_IO_PGETEVENTS = 333 SYS_RSEQ = 334 - SYS_PIDFD_SEND_SIGNAL = 424 - SYS_IO_URING_SETUP = 425 - SYS_IO_URING_ENTER = 426 - SYS_IO_URING_REGISTER = 427 ) diff --git a/vendor/golang.org/x/sys/unix/zsysnum_linux_arm.go b/vendor/golang.org/x/sys/unix/zsysnum_linux_arm.go index 94f68f101b..e092822fba 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_linux_arm.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_linux_arm.go @@ -6,385 +6,359 @@ package unix const ( - SYS_RESTART_SYSCALL = 0 - SYS_EXIT = 1 - SYS_FORK = 2 - SYS_READ = 3 - SYS_WRITE = 4 - SYS_OPEN = 5 - SYS_CLOSE = 6 - SYS_CREAT = 8 - SYS_LINK = 9 - SYS_UNLINK = 10 - SYS_EXECVE = 11 - SYS_CHDIR = 12 - SYS_MKNOD = 14 - SYS_CHMOD = 15 - SYS_LCHOWN = 16 - SYS_LSEEK = 19 - SYS_GETPID = 20 - SYS_MOUNT = 21 - SYS_SETUID = 23 - SYS_GETUID = 24 - SYS_PTRACE = 26 - SYS_PAUSE = 29 - SYS_ACCESS = 33 - SYS_NICE = 34 - SYS_SYNC = 36 - SYS_KILL = 37 - SYS_RENAME = 38 - SYS_MKDIR = 39 - SYS_RMDIR = 40 - SYS_DUP = 41 - SYS_PIPE = 42 - SYS_TIMES = 43 - SYS_BRK = 45 - SYS_SETGID = 46 - SYS_GETGID = 47 - SYS_GETEUID = 49 - SYS_GETEGID = 50 - SYS_ACCT = 51 - SYS_UMOUNT2 = 52 - SYS_IOCTL = 54 - SYS_FCNTL = 55 - SYS_SETPGID = 57 - SYS_UMASK = 60 - SYS_CHROOT = 61 - SYS_USTAT = 62 - SYS_DUP2 = 63 - SYS_GETPPID = 64 - SYS_GETPGRP = 65 - SYS_SETSID = 66 - SYS_SIGACTION = 67 - SYS_SETREUID = 70 - SYS_SETREGID = 71 - SYS_SIGSUSPEND = 72 - SYS_SIGPENDING = 73 - SYS_SETHOSTNAME = 74 - SYS_SETRLIMIT = 75 - SYS_GETRUSAGE = 77 - SYS_GETTIMEOFDAY = 78 - SYS_SETTIMEOFDAY = 79 - SYS_GETGROUPS = 80 - SYS_SETGROUPS = 81 - SYS_SYMLINK = 83 - SYS_READLINK = 85 - SYS_USELIB = 86 - SYS_SWAPON = 87 - SYS_REBOOT = 88 - SYS_MUNMAP = 91 - SYS_TRUNCATE = 92 - SYS_FTRUNCATE = 93 - SYS_FCHMOD = 94 - SYS_FCHOWN = 95 - SYS_GETPRIORITY = 96 - SYS_SETPRIORITY = 97 - SYS_STATFS = 99 - SYS_FSTATFS = 100 - SYS_SYSLOG = 103 - SYS_SETITIMER = 104 - SYS_GETITIMER = 105 - SYS_STAT = 106 - SYS_LSTAT = 107 - SYS_FSTAT = 108 - SYS_VHANGUP = 111 - SYS_WAIT4 = 114 - SYS_SWAPOFF = 115 - SYS_SYSINFO = 116 - SYS_FSYNC = 118 - SYS_SIGRETURN = 119 - SYS_CLONE = 120 - SYS_SETDOMAINNAME = 121 - SYS_UNAME = 122 - SYS_ADJTIMEX = 124 - SYS_MPROTECT = 125 - SYS_SIGPROCMASK = 126 - SYS_INIT_MODULE = 128 - SYS_DELETE_MODULE = 129 - SYS_QUOTACTL = 131 - SYS_GETPGID = 132 - SYS_FCHDIR = 133 - SYS_BDFLUSH = 134 - SYS_SYSFS = 135 - SYS_PERSONALITY = 136 - SYS_SETFSUID = 138 - SYS_SETFSGID = 139 - SYS__LLSEEK = 140 - SYS_GETDENTS = 141 - SYS__NEWSELECT = 142 - SYS_FLOCK = 143 - SYS_MSYNC = 144 - SYS_READV = 145 - SYS_WRITEV = 146 - SYS_GETSID = 147 - SYS_FDATASYNC = 148 - SYS__SYSCTL = 149 - SYS_MLOCK = 150 - SYS_MUNLOCK = 151 - SYS_MLOCKALL = 152 - SYS_MUNLOCKALL = 153 - SYS_SCHED_SETPARAM = 154 - SYS_SCHED_GETPARAM = 155 - SYS_SCHED_SETSCHEDULER = 156 - SYS_SCHED_GETSCHEDULER = 157 - SYS_SCHED_YIELD = 158 - SYS_SCHED_GET_PRIORITY_MAX = 159 - SYS_SCHED_GET_PRIORITY_MIN = 160 - SYS_SCHED_RR_GET_INTERVAL = 161 - SYS_NANOSLEEP = 162 - SYS_MREMAP = 163 - SYS_SETRESUID = 164 - SYS_GETRESUID = 165 - SYS_POLL = 168 - SYS_NFSSERVCTL = 169 - SYS_SETRESGID = 170 - SYS_GETRESGID = 171 - SYS_PRCTL = 172 - SYS_RT_SIGRETURN = 173 - SYS_RT_SIGACTION = 174 - SYS_RT_SIGPROCMASK = 175 - SYS_RT_SIGPENDING = 176 - SYS_RT_SIGTIMEDWAIT = 177 - SYS_RT_SIGQUEUEINFO = 178 - SYS_RT_SIGSUSPEND = 179 - SYS_PREAD64 = 180 - SYS_PWRITE64 = 181 - SYS_CHOWN = 182 - SYS_GETCWD = 183 - SYS_CAPGET = 184 - SYS_CAPSET = 185 - SYS_SIGALTSTACK = 186 - SYS_SENDFILE = 187 - SYS_VFORK = 190 - SYS_UGETRLIMIT = 191 - SYS_MMAP2 = 192 - SYS_TRUNCATE64 = 193 - SYS_FTRUNCATE64 = 194 - SYS_STAT64 = 195 - SYS_LSTAT64 = 196 - SYS_FSTAT64 = 197 - SYS_LCHOWN32 = 198 - SYS_GETUID32 = 199 - SYS_GETGID32 = 200 - SYS_GETEUID32 = 201 - SYS_GETEGID32 = 202 - SYS_SETREUID32 = 203 - SYS_SETREGID32 = 204 - SYS_GETGROUPS32 = 205 - SYS_SETGROUPS32 = 206 - SYS_FCHOWN32 = 207 - SYS_SETRESUID32 = 208 - SYS_GETRESUID32 = 209 - SYS_SETRESGID32 = 210 - SYS_GETRESGID32 = 211 - SYS_CHOWN32 = 212 - SYS_SETUID32 = 213 - SYS_SETGID32 = 214 - SYS_SETFSUID32 = 215 - SYS_SETFSGID32 = 216 - SYS_GETDENTS64 = 217 - SYS_PIVOT_ROOT = 218 - SYS_MINCORE = 219 - SYS_MADVISE = 220 - SYS_FCNTL64 = 221 - SYS_GETTID = 224 - SYS_READAHEAD = 225 - SYS_SETXATTR = 226 - SYS_LSETXATTR = 227 - SYS_FSETXATTR = 228 - SYS_GETXATTR = 229 - SYS_LGETXATTR = 230 - SYS_FGETXATTR = 231 - SYS_LISTXATTR = 232 - SYS_LLISTXATTR = 233 - SYS_FLISTXATTR = 234 - SYS_REMOVEXATTR = 235 - SYS_LREMOVEXATTR = 236 - SYS_FREMOVEXATTR = 237 - SYS_TKILL = 238 - SYS_SENDFILE64 = 239 - SYS_FUTEX = 240 - SYS_SCHED_SETAFFINITY = 241 - SYS_SCHED_GETAFFINITY = 242 - SYS_IO_SETUP = 243 - SYS_IO_DESTROY = 244 - SYS_IO_GETEVENTS = 245 - SYS_IO_SUBMIT = 246 - SYS_IO_CANCEL = 247 - SYS_EXIT_GROUP = 248 - SYS_LOOKUP_DCOOKIE = 249 - SYS_EPOLL_CREATE = 250 - SYS_EPOLL_CTL = 251 - SYS_EPOLL_WAIT = 252 - SYS_REMAP_FILE_PAGES = 253 - SYS_SET_TID_ADDRESS = 256 - SYS_TIMER_CREATE = 257 - SYS_TIMER_SETTIME = 258 - SYS_TIMER_GETTIME = 259 - SYS_TIMER_GETOVERRUN = 260 - SYS_TIMER_DELETE = 261 - SYS_CLOCK_SETTIME = 262 - SYS_CLOCK_GETTIME = 263 - SYS_CLOCK_GETRES = 264 - SYS_CLOCK_NANOSLEEP = 265 - SYS_STATFS64 = 266 - SYS_FSTATFS64 = 267 - SYS_TGKILL = 268 - SYS_UTIMES = 269 - SYS_ARM_FADVISE64_64 = 270 - SYS_PCICONFIG_IOBASE = 271 - SYS_PCICONFIG_READ = 272 - SYS_PCICONFIG_WRITE = 273 - SYS_MQ_OPEN = 274 - SYS_MQ_UNLINK = 275 - SYS_MQ_TIMEDSEND = 276 - SYS_MQ_TIMEDRECEIVE = 277 - SYS_MQ_NOTIFY = 278 - SYS_MQ_GETSETATTR = 279 - SYS_WAITID = 280 - SYS_SOCKET = 281 - SYS_BIND = 282 - SYS_CONNECT = 283 - SYS_LISTEN = 284 - SYS_ACCEPT = 285 - SYS_GETSOCKNAME = 286 - SYS_GETPEERNAME = 287 - SYS_SOCKETPAIR = 288 - SYS_SEND = 289 - SYS_SENDTO = 290 - SYS_RECV = 291 - SYS_RECVFROM = 292 - SYS_SHUTDOWN = 293 - SYS_SETSOCKOPT = 294 - SYS_GETSOCKOPT = 295 - SYS_SENDMSG = 296 - SYS_RECVMSG = 297 - SYS_SEMOP = 298 - SYS_SEMGET = 299 - SYS_SEMCTL = 300 - SYS_MSGSND = 301 - SYS_MSGRCV = 302 - SYS_MSGGET = 303 - SYS_MSGCTL = 304 - SYS_SHMAT = 305 - SYS_SHMDT = 306 - SYS_SHMGET = 307 - SYS_SHMCTL = 308 - SYS_ADD_KEY = 309 - SYS_REQUEST_KEY = 310 - SYS_KEYCTL = 311 - SYS_SEMTIMEDOP = 312 - SYS_VSERVER = 313 - SYS_IOPRIO_SET = 314 - SYS_IOPRIO_GET = 315 - SYS_INOTIFY_INIT = 316 - SYS_INOTIFY_ADD_WATCH = 317 - SYS_INOTIFY_RM_WATCH = 318 - SYS_MBIND = 319 - SYS_GET_MEMPOLICY = 320 - SYS_SET_MEMPOLICY = 321 - SYS_OPENAT = 322 - SYS_MKDIRAT = 323 - SYS_MKNODAT = 324 - SYS_FCHOWNAT = 325 - SYS_FUTIMESAT = 326 - SYS_FSTATAT64 = 327 - SYS_UNLINKAT = 328 - SYS_RENAMEAT = 329 - SYS_LINKAT = 330 - SYS_SYMLINKAT = 331 - SYS_READLINKAT = 332 - SYS_FCHMODAT = 333 - SYS_FACCESSAT = 334 - SYS_PSELECT6 = 335 - SYS_PPOLL = 336 - SYS_UNSHARE = 337 - SYS_SET_ROBUST_LIST = 338 - SYS_GET_ROBUST_LIST = 339 - SYS_SPLICE = 340 - SYS_ARM_SYNC_FILE_RANGE = 341 - SYS_TEE = 342 - SYS_VMSPLICE = 343 - SYS_MOVE_PAGES = 344 - SYS_GETCPU = 345 - SYS_EPOLL_PWAIT = 346 - SYS_KEXEC_LOAD = 347 - SYS_UTIMENSAT = 348 - SYS_SIGNALFD = 349 - SYS_TIMERFD_CREATE = 350 - SYS_EVENTFD = 351 - SYS_FALLOCATE = 352 - SYS_TIMERFD_SETTIME = 353 - SYS_TIMERFD_GETTIME = 354 - SYS_SIGNALFD4 = 355 - SYS_EVENTFD2 = 356 - SYS_EPOLL_CREATE1 = 357 - SYS_DUP3 = 358 - SYS_PIPE2 = 359 - SYS_INOTIFY_INIT1 = 360 - SYS_PREADV = 361 - SYS_PWRITEV = 362 - SYS_RT_TGSIGQUEUEINFO = 363 - SYS_PERF_EVENT_OPEN = 364 - SYS_RECVMMSG = 365 - SYS_ACCEPT4 = 366 - SYS_FANOTIFY_INIT = 367 - SYS_FANOTIFY_MARK = 368 - SYS_PRLIMIT64 = 369 - SYS_NAME_TO_HANDLE_AT = 370 - SYS_OPEN_BY_HANDLE_AT = 371 - SYS_CLOCK_ADJTIME = 372 - SYS_SYNCFS = 373 - SYS_SENDMMSG = 374 - SYS_SETNS = 375 - SYS_PROCESS_VM_READV = 376 - SYS_PROCESS_VM_WRITEV = 377 - SYS_KCMP = 378 - SYS_FINIT_MODULE = 379 - SYS_SCHED_SETATTR = 380 - SYS_SCHED_GETATTR = 381 - SYS_RENAMEAT2 = 382 - SYS_SECCOMP = 383 - SYS_GETRANDOM = 384 - SYS_MEMFD_CREATE = 385 - SYS_BPF = 386 - SYS_EXECVEAT = 387 - SYS_USERFAULTFD = 388 - SYS_MEMBARRIER = 389 - SYS_MLOCK2 = 390 - SYS_COPY_FILE_RANGE = 391 - SYS_PREADV2 = 392 - SYS_PWRITEV2 = 393 - SYS_PKEY_MPROTECT = 394 - SYS_PKEY_ALLOC = 395 - SYS_PKEY_FREE = 396 - SYS_STATX = 397 - SYS_RSEQ = 398 - SYS_IO_PGETEVENTS = 399 - SYS_MIGRATE_PAGES = 400 - SYS_KEXEC_FILE_LOAD = 401 - SYS_CLOCK_GETTIME64 = 403 - SYS_CLOCK_SETTIME64 = 404 - SYS_CLOCK_ADJTIME64 = 405 - SYS_CLOCK_GETRES_TIME64 = 406 - SYS_CLOCK_NANOSLEEP_TIME64 = 407 - SYS_TIMER_GETTIME64 = 408 - SYS_TIMER_SETTIME64 = 409 - SYS_TIMERFD_GETTIME64 = 410 - SYS_TIMERFD_SETTIME64 = 411 - SYS_UTIMENSAT_TIME64 = 412 - SYS_PSELECT6_TIME64 = 413 - SYS_PPOLL_TIME64 = 414 - SYS_IO_PGETEVENTS_TIME64 = 416 - SYS_RECVMMSG_TIME64 = 417 - SYS_MQ_TIMEDSEND_TIME64 = 418 - SYS_MQ_TIMEDRECEIVE_TIME64 = 419 - SYS_SEMTIMEDOP_TIME64 = 420 - SYS_RT_SIGTIMEDWAIT_TIME64 = 421 - SYS_FUTEX_TIME64 = 422 - SYS_SCHED_RR_GET_INTERVAL_TIME64 = 423 - SYS_PIDFD_SEND_SIGNAL = 424 - SYS_IO_URING_SETUP = 425 - SYS_IO_URING_ENTER = 426 - SYS_IO_URING_REGISTER = 427 + SYS_RESTART_SYSCALL = 0 + SYS_EXIT = 1 + SYS_FORK = 2 + SYS_READ = 3 + SYS_WRITE = 4 + SYS_OPEN = 5 + SYS_CLOSE = 6 + SYS_CREAT = 8 + SYS_LINK = 9 + SYS_UNLINK = 10 + SYS_EXECVE = 11 + SYS_CHDIR = 12 + SYS_MKNOD = 14 + SYS_CHMOD = 15 + SYS_LCHOWN = 16 + SYS_LSEEK = 19 + SYS_GETPID = 20 + SYS_MOUNT = 21 + SYS_SETUID = 23 + SYS_GETUID = 24 + SYS_PTRACE = 26 + SYS_PAUSE = 29 + SYS_ACCESS = 33 + SYS_NICE = 34 + SYS_SYNC = 36 + SYS_KILL = 37 + SYS_RENAME = 38 + SYS_MKDIR = 39 + SYS_RMDIR = 40 + SYS_DUP = 41 + SYS_PIPE = 42 + SYS_TIMES = 43 + SYS_BRK = 45 + SYS_SETGID = 46 + SYS_GETGID = 47 + SYS_GETEUID = 49 + SYS_GETEGID = 50 + SYS_ACCT = 51 + SYS_UMOUNT2 = 52 + SYS_IOCTL = 54 + SYS_FCNTL = 55 + SYS_SETPGID = 57 + SYS_UMASK = 60 + SYS_CHROOT = 61 + SYS_USTAT = 62 + SYS_DUP2 = 63 + SYS_GETPPID = 64 + SYS_GETPGRP = 65 + SYS_SETSID = 66 + SYS_SIGACTION = 67 + SYS_SETREUID = 70 + SYS_SETREGID = 71 + SYS_SIGSUSPEND = 72 + SYS_SIGPENDING = 73 + SYS_SETHOSTNAME = 74 + SYS_SETRLIMIT = 75 + SYS_GETRUSAGE = 77 + SYS_GETTIMEOFDAY = 78 + SYS_SETTIMEOFDAY = 79 + SYS_GETGROUPS = 80 + SYS_SETGROUPS = 81 + SYS_SYMLINK = 83 + SYS_READLINK = 85 + SYS_USELIB = 86 + SYS_SWAPON = 87 + SYS_REBOOT = 88 + SYS_MUNMAP = 91 + SYS_TRUNCATE = 92 + SYS_FTRUNCATE = 93 + SYS_FCHMOD = 94 + SYS_FCHOWN = 95 + SYS_GETPRIORITY = 96 + SYS_SETPRIORITY = 97 + SYS_STATFS = 99 + SYS_FSTATFS = 100 + SYS_SYSLOG = 103 + SYS_SETITIMER = 104 + SYS_GETITIMER = 105 + SYS_STAT = 106 + SYS_LSTAT = 107 + SYS_FSTAT = 108 + SYS_VHANGUP = 111 + SYS_WAIT4 = 114 + SYS_SWAPOFF = 115 + SYS_SYSINFO = 116 + SYS_FSYNC = 118 + SYS_SIGRETURN = 119 + SYS_CLONE = 120 + SYS_SETDOMAINNAME = 121 + SYS_UNAME = 122 + SYS_ADJTIMEX = 124 + SYS_MPROTECT = 125 + SYS_SIGPROCMASK = 126 + SYS_INIT_MODULE = 128 + SYS_DELETE_MODULE = 129 + SYS_QUOTACTL = 131 + SYS_GETPGID = 132 + SYS_FCHDIR = 133 + SYS_BDFLUSH = 134 + SYS_SYSFS = 135 + SYS_PERSONALITY = 136 + SYS_SETFSUID = 138 + SYS_SETFSGID = 139 + SYS__LLSEEK = 140 + SYS_GETDENTS = 141 + SYS__NEWSELECT = 142 + SYS_FLOCK = 143 + SYS_MSYNC = 144 + SYS_READV = 145 + SYS_WRITEV = 146 + SYS_GETSID = 147 + SYS_FDATASYNC = 148 + SYS__SYSCTL = 149 + SYS_MLOCK = 150 + SYS_MUNLOCK = 151 + SYS_MLOCKALL = 152 + SYS_MUNLOCKALL = 153 + SYS_SCHED_SETPARAM = 154 + SYS_SCHED_GETPARAM = 155 + SYS_SCHED_SETSCHEDULER = 156 + SYS_SCHED_GETSCHEDULER = 157 + SYS_SCHED_YIELD = 158 + SYS_SCHED_GET_PRIORITY_MAX = 159 + SYS_SCHED_GET_PRIORITY_MIN = 160 + SYS_SCHED_RR_GET_INTERVAL = 161 + SYS_NANOSLEEP = 162 + SYS_MREMAP = 163 + SYS_SETRESUID = 164 + SYS_GETRESUID = 165 + SYS_POLL = 168 + SYS_NFSSERVCTL = 169 + SYS_SETRESGID = 170 + SYS_GETRESGID = 171 + SYS_PRCTL = 172 + SYS_RT_SIGRETURN = 173 + SYS_RT_SIGACTION = 174 + SYS_RT_SIGPROCMASK = 175 + SYS_RT_SIGPENDING = 176 + SYS_RT_SIGTIMEDWAIT = 177 + SYS_RT_SIGQUEUEINFO = 178 + SYS_RT_SIGSUSPEND = 179 + SYS_PREAD64 = 180 + SYS_PWRITE64 = 181 + SYS_CHOWN = 182 + SYS_GETCWD = 183 + SYS_CAPGET = 184 + SYS_CAPSET = 185 + SYS_SIGALTSTACK = 186 + SYS_SENDFILE = 187 + SYS_VFORK = 190 + SYS_UGETRLIMIT = 191 + SYS_MMAP2 = 192 + SYS_TRUNCATE64 = 193 + SYS_FTRUNCATE64 = 194 + SYS_STAT64 = 195 + SYS_LSTAT64 = 196 + SYS_FSTAT64 = 197 + SYS_LCHOWN32 = 198 + SYS_GETUID32 = 199 + SYS_GETGID32 = 200 + SYS_GETEUID32 = 201 + SYS_GETEGID32 = 202 + SYS_SETREUID32 = 203 + SYS_SETREGID32 = 204 + SYS_GETGROUPS32 = 205 + SYS_SETGROUPS32 = 206 + SYS_FCHOWN32 = 207 + SYS_SETRESUID32 = 208 + SYS_GETRESUID32 = 209 + SYS_SETRESGID32 = 210 + SYS_GETRESGID32 = 211 + SYS_CHOWN32 = 212 + SYS_SETUID32 = 213 + SYS_SETGID32 = 214 + SYS_SETFSUID32 = 215 + SYS_SETFSGID32 = 216 + SYS_GETDENTS64 = 217 + SYS_PIVOT_ROOT = 218 + SYS_MINCORE = 219 + SYS_MADVISE = 220 + SYS_FCNTL64 = 221 + SYS_GETTID = 224 + SYS_READAHEAD = 225 + SYS_SETXATTR = 226 + SYS_LSETXATTR = 227 + SYS_FSETXATTR = 228 + SYS_GETXATTR = 229 + SYS_LGETXATTR = 230 + SYS_FGETXATTR = 231 + SYS_LISTXATTR = 232 + SYS_LLISTXATTR = 233 + SYS_FLISTXATTR = 234 + SYS_REMOVEXATTR = 235 + SYS_LREMOVEXATTR = 236 + SYS_FREMOVEXATTR = 237 + SYS_TKILL = 238 + SYS_SENDFILE64 = 239 + SYS_FUTEX = 240 + SYS_SCHED_SETAFFINITY = 241 + SYS_SCHED_GETAFFINITY = 242 + SYS_IO_SETUP = 243 + SYS_IO_DESTROY = 244 + SYS_IO_GETEVENTS = 245 + SYS_IO_SUBMIT = 246 + SYS_IO_CANCEL = 247 + SYS_EXIT_GROUP = 248 + SYS_LOOKUP_DCOOKIE = 249 + SYS_EPOLL_CREATE = 250 + SYS_EPOLL_CTL = 251 + SYS_EPOLL_WAIT = 252 + SYS_REMAP_FILE_PAGES = 253 + SYS_SET_TID_ADDRESS = 256 + SYS_TIMER_CREATE = 257 + SYS_TIMER_SETTIME = 258 + SYS_TIMER_GETTIME = 259 + SYS_TIMER_GETOVERRUN = 260 + SYS_TIMER_DELETE = 261 + SYS_CLOCK_SETTIME = 262 + SYS_CLOCK_GETTIME = 263 + SYS_CLOCK_GETRES = 264 + SYS_CLOCK_NANOSLEEP = 265 + SYS_STATFS64 = 266 + SYS_FSTATFS64 = 267 + SYS_TGKILL = 268 + SYS_UTIMES = 269 + SYS_ARM_FADVISE64_64 = 270 + SYS_PCICONFIG_IOBASE = 271 + SYS_PCICONFIG_READ = 272 + SYS_PCICONFIG_WRITE = 273 + SYS_MQ_OPEN = 274 + SYS_MQ_UNLINK = 275 + SYS_MQ_TIMEDSEND = 276 + SYS_MQ_TIMEDRECEIVE = 277 + SYS_MQ_NOTIFY = 278 + SYS_MQ_GETSETATTR = 279 + SYS_WAITID = 280 + SYS_SOCKET = 281 + SYS_BIND = 282 + SYS_CONNECT = 283 + SYS_LISTEN = 284 + SYS_ACCEPT = 285 + SYS_GETSOCKNAME = 286 + SYS_GETPEERNAME = 287 + SYS_SOCKETPAIR = 288 + SYS_SEND = 289 + SYS_SENDTO = 290 + SYS_RECV = 291 + SYS_RECVFROM = 292 + SYS_SHUTDOWN = 293 + SYS_SETSOCKOPT = 294 + SYS_GETSOCKOPT = 295 + SYS_SENDMSG = 296 + SYS_RECVMSG = 297 + SYS_SEMOP = 298 + SYS_SEMGET = 299 + SYS_SEMCTL = 300 + SYS_MSGSND = 301 + SYS_MSGRCV = 302 + SYS_MSGGET = 303 + SYS_MSGCTL = 304 + SYS_SHMAT = 305 + SYS_SHMDT = 306 + SYS_SHMGET = 307 + SYS_SHMCTL = 308 + SYS_ADD_KEY = 309 + SYS_REQUEST_KEY = 310 + SYS_KEYCTL = 311 + SYS_SEMTIMEDOP = 312 + SYS_VSERVER = 313 + SYS_IOPRIO_SET = 314 + SYS_IOPRIO_GET = 315 + SYS_INOTIFY_INIT = 316 + SYS_INOTIFY_ADD_WATCH = 317 + SYS_INOTIFY_RM_WATCH = 318 + SYS_MBIND = 319 + SYS_GET_MEMPOLICY = 320 + SYS_SET_MEMPOLICY = 321 + SYS_OPENAT = 322 + SYS_MKDIRAT = 323 + SYS_MKNODAT = 324 + SYS_FCHOWNAT = 325 + SYS_FUTIMESAT = 326 + SYS_FSTATAT64 = 327 + SYS_UNLINKAT = 328 + SYS_RENAMEAT = 329 + SYS_LINKAT = 330 + SYS_SYMLINKAT = 331 + SYS_READLINKAT = 332 + SYS_FCHMODAT = 333 + SYS_FACCESSAT = 334 + SYS_PSELECT6 = 335 + SYS_PPOLL = 336 + SYS_UNSHARE = 337 + SYS_SET_ROBUST_LIST = 338 + SYS_GET_ROBUST_LIST = 339 + SYS_SPLICE = 340 + SYS_ARM_SYNC_FILE_RANGE = 341 + SYS_TEE = 342 + SYS_VMSPLICE = 343 + SYS_MOVE_PAGES = 344 + SYS_GETCPU = 345 + SYS_EPOLL_PWAIT = 346 + SYS_KEXEC_LOAD = 347 + SYS_UTIMENSAT = 348 + SYS_SIGNALFD = 349 + SYS_TIMERFD_CREATE = 350 + SYS_EVENTFD = 351 + SYS_FALLOCATE = 352 + SYS_TIMERFD_SETTIME = 353 + SYS_TIMERFD_GETTIME = 354 + SYS_SIGNALFD4 = 355 + SYS_EVENTFD2 = 356 + SYS_EPOLL_CREATE1 = 357 + SYS_DUP3 = 358 + SYS_PIPE2 = 359 + SYS_INOTIFY_INIT1 = 360 + SYS_PREADV = 361 + SYS_PWRITEV = 362 + SYS_RT_TGSIGQUEUEINFO = 363 + SYS_PERF_EVENT_OPEN = 364 + SYS_RECVMMSG = 365 + SYS_ACCEPT4 = 366 + SYS_FANOTIFY_INIT = 367 + SYS_FANOTIFY_MARK = 368 + SYS_PRLIMIT64 = 369 + SYS_NAME_TO_HANDLE_AT = 370 + SYS_OPEN_BY_HANDLE_AT = 371 + SYS_CLOCK_ADJTIME = 372 + SYS_SYNCFS = 373 + SYS_SENDMMSG = 374 + SYS_SETNS = 375 + SYS_PROCESS_VM_READV = 376 + SYS_PROCESS_VM_WRITEV = 377 + SYS_KCMP = 378 + SYS_FINIT_MODULE = 379 + SYS_SCHED_SETATTR = 380 + SYS_SCHED_GETATTR = 381 + SYS_RENAMEAT2 = 382 + SYS_SECCOMP = 383 + SYS_GETRANDOM = 384 + SYS_MEMFD_CREATE = 385 + SYS_BPF = 386 + SYS_EXECVEAT = 387 + SYS_USERFAULTFD = 388 + SYS_MEMBARRIER = 389 + SYS_MLOCK2 = 390 + SYS_COPY_FILE_RANGE = 391 + SYS_PREADV2 = 392 + SYS_PWRITEV2 = 393 + SYS_PKEY_MPROTECT = 394 + SYS_PKEY_ALLOC = 395 + SYS_PKEY_FREE = 396 + SYS_STATX = 397 + SYS_RSEQ = 398 + SYS_IO_PGETEVENTS = 399 ) diff --git a/vendor/golang.org/x/sys/unix/zsysnum_linux_arm64.go b/vendor/golang.org/x/sys/unix/zsysnum_linux_arm64.go index 15c413516e..b81d508a73 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_linux_arm64.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_linux_arm64.go @@ -286,8 +286,4 @@ const ( SYS_IO_PGETEVENTS = 292 SYS_RSEQ = 293 SYS_KEXEC_FILE_LOAD = 294 - SYS_PIDFD_SEND_SIGNAL = 424 - SYS_IO_URING_SETUP = 425 - SYS_IO_URING_ENTER = 426 - SYS_IO_URING_REGISTER = 427 ) diff --git a/vendor/golang.org/x/sys/unix/zsysnum_linux_mips.go b/vendor/golang.org/x/sys/unix/zsysnum_linux_mips.go index 638465b142..6893a5bd05 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_linux_mips.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_linux_mips.go @@ -6,406 +6,372 @@ package unix const ( - SYS_SYSCALL = 4000 - SYS_EXIT = 4001 - SYS_FORK = 4002 - SYS_READ = 4003 - SYS_WRITE = 4004 - SYS_OPEN = 4005 - SYS_CLOSE = 4006 - SYS_WAITPID = 4007 - SYS_CREAT = 4008 - SYS_LINK = 4009 - SYS_UNLINK = 4010 - SYS_EXECVE = 4011 - SYS_CHDIR = 4012 - SYS_TIME = 4013 - SYS_MKNOD = 4014 - SYS_CHMOD = 4015 - SYS_LCHOWN = 4016 - SYS_BREAK = 4017 - SYS_UNUSED18 = 4018 - SYS_LSEEK = 4019 - SYS_GETPID = 4020 - SYS_MOUNT = 4021 - SYS_UMOUNT = 4022 - SYS_SETUID = 4023 - SYS_GETUID = 4024 - SYS_STIME = 4025 - SYS_PTRACE = 4026 - SYS_ALARM = 4027 - SYS_UNUSED28 = 4028 - SYS_PAUSE = 4029 - SYS_UTIME = 4030 - SYS_STTY = 4031 - SYS_GTTY = 4032 - SYS_ACCESS = 4033 - SYS_NICE = 4034 - SYS_FTIME = 4035 - SYS_SYNC = 4036 - SYS_KILL = 4037 - SYS_RENAME = 4038 - SYS_MKDIR = 4039 - SYS_RMDIR = 4040 - SYS_DUP = 4041 - SYS_PIPE = 4042 - SYS_TIMES = 4043 - SYS_PROF = 4044 - SYS_BRK = 4045 - SYS_SETGID = 4046 - SYS_GETGID = 4047 - SYS_SIGNAL = 4048 - SYS_GETEUID = 4049 - SYS_GETEGID = 4050 - SYS_ACCT = 4051 - SYS_UMOUNT2 = 4052 - SYS_LOCK = 4053 - SYS_IOCTL = 4054 - SYS_FCNTL = 4055 - SYS_MPX = 4056 - SYS_SETPGID = 4057 - SYS_ULIMIT = 4058 - SYS_UNUSED59 = 4059 - SYS_UMASK = 4060 - SYS_CHROOT = 4061 - SYS_USTAT = 4062 - SYS_DUP2 = 4063 - SYS_GETPPID = 4064 - SYS_GETPGRP = 4065 - SYS_SETSID = 4066 - SYS_SIGACTION = 4067 - SYS_SGETMASK = 4068 - SYS_SSETMASK = 4069 - SYS_SETREUID = 4070 - SYS_SETREGID = 4071 - SYS_SIGSUSPEND = 4072 - SYS_SIGPENDING = 4073 - SYS_SETHOSTNAME = 4074 - SYS_SETRLIMIT = 4075 - SYS_GETRLIMIT = 4076 - SYS_GETRUSAGE = 4077 - SYS_GETTIMEOFDAY = 4078 - SYS_SETTIMEOFDAY = 4079 - SYS_GETGROUPS = 4080 - SYS_SETGROUPS = 4081 - SYS_RESERVED82 = 4082 - SYS_SYMLINK = 4083 - SYS_UNUSED84 = 4084 - SYS_READLINK = 4085 - SYS_USELIB = 4086 - SYS_SWAPON = 4087 - SYS_REBOOT = 4088 - SYS_READDIR = 4089 - SYS_MMAP = 4090 - SYS_MUNMAP = 4091 - SYS_TRUNCATE = 4092 - SYS_FTRUNCATE = 4093 - SYS_FCHMOD = 4094 - SYS_FCHOWN = 4095 - SYS_GETPRIORITY = 4096 - SYS_SETPRIORITY = 4097 - SYS_PROFIL = 4098 - SYS_STATFS = 4099 - SYS_FSTATFS = 4100 - SYS_IOPERM = 4101 - SYS_SOCKETCALL = 4102 - SYS_SYSLOG = 4103 - SYS_SETITIMER = 4104 - SYS_GETITIMER = 4105 - SYS_STAT = 4106 - SYS_LSTAT = 4107 - SYS_FSTAT = 4108 - SYS_UNUSED109 = 4109 - SYS_IOPL = 4110 - SYS_VHANGUP = 4111 - SYS_IDLE = 4112 - SYS_VM86 = 4113 - SYS_WAIT4 = 4114 - SYS_SWAPOFF = 4115 - SYS_SYSINFO = 4116 - SYS_IPC = 4117 - SYS_FSYNC = 4118 - SYS_SIGRETURN = 4119 - SYS_CLONE = 4120 - SYS_SETDOMAINNAME = 4121 - SYS_UNAME = 4122 - SYS_MODIFY_LDT = 4123 - SYS_ADJTIMEX = 4124 - SYS_MPROTECT = 4125 - SYS_SIGPROCMASK = 4126 - SYS_CREATE_MODULE = 4127 - SYS_INIT_MODULE = 4128 - SYS_DELETE_MODULE = 4129 - SYS_GET_KERNEL_SYMS = 4130 - SYS_QUOTACTL = 4131 - SYS_GETPGID = 4132 - SYS_FCHDIR = 4133 - SYS_BDFLUSH = 4134 - SYS_SYSFS = 4135 - SYS_PERSONALITY = 4136 - SYS_AFS_SYSCALL = 4137 - SYS_SETFSUID = 4138 - SYS_SETFSGID = 4139 - SYS__LLSEEK = 4140 - SYS_GETDENTS = 4141 - SYS__NEWSELECT = 4142 - SYS_FLOCK = 4143 - SYS_MSYNC = 4144 - SYS_READV = 4145 - SYS_WRITEV = 4146 - SYS_CACHEFLUSH = 4147 - SYS_CACHECTL = 4148 - SYS_SYSMIPS = 4149 - SYS_UNUSED150 = 4150 - SYS_GETSID = 4151 - SYS_FDATASYNC = 4152 - SYS__SYSCTL = 4153 - SYS_MLOCK = 4154 - SYS_MUNLOCK = 4155 - SYS_MLOCKALL = 4156 - SYS_MUNLOCKALL = 4157 - SYS_SCHED_SETPARAM = 4158 - SYS_SCHED_GETPARAM = 4159 - SYS_SCHED_SETSCHEDULER = 4160 - SYS_SCHED_GETSCHEDULER = 4161 - SYS_SCHED_YIELD = 4162 - SYS_SCHED_GET_PRIORITY_MAX = 4163 - SYS_SCHED_GET_PRIORITY_MIN = 4164 - SYS_SCHED_RR_GET_INTERVAL = 4165 - SYS_NANOSLEEP = 4166 - SYS_MREMAP = 4167 - SYS_ACCEPT = 4168 - SYS_BIND = 4169 - SYS_CONNECT = 4170 - SYS_GETPEERNAME = 4171 - SYS_GETSOCKNAME = 4172 - SYS_GETSOCKOPT = 4173 - SYS_LISTEN = 4174 - SYS_RECV = 4175 - SYS_RECVFROM = 4176 - SYS_RECVMSG = 4177 - SYS_SEND = 4178 - SYS_SENDMSG = 4179 - SYS_SENDTO = 4180 - SYS_SETSOCKOPT = 4181 - SYS_SHUTDOWN = 4182 - SYS_SOCKET = 4183 - SYS_SOCKETPAIR = 4184 - SYS_SETRESUID = 4185 - SYS_GETRESUID = 4186 - SYS_QUERY_MODULE = 4187 - SYS_POLL = 4188 - SYS_NFSSERVCTL = 4189 - SYS_SETRESGID = 4190 - SYS_GETRESGID = 4191 - SYS_PRCTL = 4192 - SYS_RT_SIGRETURN = 4193 - SYS_RT_SIGACTION = 4194 - SYS_RT_SIGPROCMASK = 4195 - SYS_RT_SIGPENDING = 4196 - SYS_RT_SIGTIMEDWAIT = 4197 - SYS_RT_SIGQUEUEINFO = 4198 - SYS_RT_SIGSUSPEND = 4199 - SYS_PREAD64 = 4200 - SYS_PWRITE64 = 4201 - SYS_CHOWN = 4202 - SYS_GETCWD = 4203 - SYS_CAPGET = 4204 - SYS_CAPSET = 4205 - SYS_SIGALTSTACK = 4206 - SYS_SENDFILE = 4207 - SYS_GETPMSG = 4208 - SYS_PUTPMSG = 4209 - SYS_MMAP2 = 4210 - SYS_TRUNCATE64 = 4211 - SYS_FTRUNCATE64 = 4212 - SYS_STAT64 = 4213 - SYS_LSTAT64 = 4214 - SYS_FSTAT64 = 4215 - SYS_PIVOT_ROOT = 4216 - SYS_MINCORE = 4217 - SYS_MADVISE = 4218 - SYS_GETDENTS64 = 4219 - SYS_FCNTL64 = 4220 - SYS_RESERVED221 = 4221 - SYS_GETTID = 4222 - SYS_READAHEAD = 4223 - SYS_SETXATTR = 4224 - SYS_LSETXATTR = 4225 - SYS_FSETXATTR = 4226 - SYS_GETXATTR = 4227 - SYS_LGETXATTR = 4228 - SYS_FGETXATTR = 4229 - SYS_LISTXATTR = 4230 - SYS_LLISTXATTR = 4231 - SYS_FLISTXATTR = 4232 - SYS_REMOVEXATTR = 4233 - SYS_LREMOVEXATTR = 4234 - SYS_FREMOVEXATTR = 4235 - SYS_TKILL = 4236 - SYS_SENDFILE64 = 4237 - SYS_FUTEX = 4238 - SYS_SCHED_SETAFFINITY = 4239 - SYS_SCHED_GETAFFINITY = 4240 - SYS_IO_SETUP = 4241 - SYS_IO_DESTROY = 4242 - SYS_IO_GETEVENTS = 4243 - SYS_IO_SUBMIT = 4244 - SYS_IO_CANCEL = 4245 - SYS_EXIT_GROUP = 4246 - SYS_LOOKUP_DCOOKIE = 4247 - SYS_EPOLL_CREATE = 4248 - SYS_EPOLL_CTL = 4249 - SYS_EPOLL_WAIT = 4250 - SYS_REMAP_FILE_PAGES = 4251 - SYS_SET_TID_ADDRESS = 4252 - SYS_RESTART_SYSCALL = 4253 - SYS_FADVISE64 = 4254 - SYS_STATFS64 = 4255 - SYS_FSTATFS64 = 4256 - SYS_TIMER_CREATE = 4257 - SYS_TIMER_SETTIME = 4258 - SYS_TIMER_GETTIME = 4259 - SYS_TIMER_GETOVERRUN = 4260 - SYS_TIMER_DELETE = 4261 - SYS_CLOCK_SETTIME = 4262 - SYS_CLOCK_GETTIME = 4263 - SYS_CLOCK_GETRES = 4264 - SYS_CLOCK_NANOSLEEP = 4265 - SYS_TGKILL = 4266 - SYS_UTIMES = 4267 - SYS_MBIND = 4268 - SYS_GET_MEMPOLICY = 4269 - SYS_SET_MEMPOLICY = 4270 - SYS_MQ_OPEN = 4271 - SYS_MQ_UNLINK = 4272 - SYS_MQ_TIMEDSEND = 4273 - SYS_MQ_TIMEDRECEIVE = 4274 - SYS_MQ_NOTIFY = 4275 - SYS_MQ_GETSETATTR = 4276 - SYS_VSERVER = 4277 - SYS_WAITID = 4278 - SYS_ADD_KEY = 4280 - SYS_REQUEST_KEY = 4281 - SYS_KEYCTL = 4282 - SYS_SET_THREAD_AREA = 4283 - SYS_INOTIFY_INIT = 4284 - SYS_INOTIFY_ADD_WATCH = 4285 - SYS_INOTIFY_RM_WATCH = 4286 - SYS_MIGRATE_PAGES = 4287 - SYS_OPENAT = 4288 - SYS_MKDIRAT = 4289 - SYS_MKNODAT = 4290 - SYS_FCHOWNAT = 4291 - SYS_FUTIMESAT = 4292 - SYS_FSTATAT64 = 4293 - SYS_UNLINKAT = 4294 - SYS_RENAMEAT = 4295 - SYS_LINKAT = 4296 - SYS_SYMLINKAT = 4297 - SYS_READLINKAT = 4298 - SYS_FCHMODAT = 4299 - SYS_FACCESSAT = 4300 - SYS_PSELECT6 = 4301 - SYS_PPOLL = 4302 - SYS_UNSHARE = 4303 - SYS_SPLICE = 4304 - SYS_SYNC_FILE_RANGE = 4305 - SYS_TEE = 4306 - SYS_VMSPLICE = 4307 - SYS_MOVE_PAGES = 4308 - SYS_SET_ROBUST_LIST = 4309 - SYS_GET_ROBUST_LIST = 4310 - SYS_KEXEC_LOAD = 4311 - SYS_GETCPU = 4312 - SYS_EPOLL_PWAIT = 4313 - SYS_IOPRIO_SET = 4314 - SYS_IOPRIO_GET = 4315 - SYS_UTIMENSAT = 4316 - SYS_SIGNALFD = 4317 - SYS_TIMERFD = 4318 - SYS_EVENTFD = 4319 - SYS_FALLOCATE = 4320 - SYS_TIMERFD_CREATE = 4321 - SYS_TIMERFD_GETTIME = 4322 - SYS_TIMERFD_SETTIME = 4323 - SYS_SIGNALFD4 = 4324 - SYS_EVENTFD2 = 4325 - SYS_EPOLL_CREATE1 = 4326 - SYS_DUP3 = 4327 - SYS_PIPE2 = 4328 - SYS_INOTIFY_INIT1 = 4329 - SYS_PREADV = 4330 - SYS_PWRITEV = 4331 - SYS_RT_TGSIGQUEUEINFO = 4332 - SYS_PERF_EVENT_OPEN = 4333 - SYS_ACCEPT4 = 4334 - SYS_RECVMMSG = 4335 - SYS_FANOTIFY_INIT = 4336 - SYS_FANOTIFY_MARK = 4337 - SYS_PRLIMIT64 = 4338 - SYS_NAME_TO_HANDLE_AT = 4339 - SYS_OPEN_BY_HANDLE_AT = 4340 - SYS_CLOCK_ADJTIME = 4341 - SYS_SYNCFS = 4342 - SYS_SENDMMSG = 4343 - SYS_SETNS = 4344 - SYS_PROCESS_VM_READV = 4345 - SYS_PROCESS_VM_WRITEV = 4346 - SYS_KCMP = 4347 - SYS_FINIT_MODULE = 4348 - SYS_SCHED_SETATTR = 4349 - SYS_SCHED_GETATTR = 4350 - SYS_RENAMEAT2 = 4351 - SYS_SECCOMP = 4352 - SYS_GETRANDOM = 4353 - SYS_MEMFD_CREATE = 4354 - SYS_BPF = 4355 - SYS_EXECVEAT = 4356 - SYS_USERFAULTFD = 4357 - SYS_MEMBARRIER = 4358 - SYS_MLOCK2 = 4359 - SYS_COPY_FILE_RANGE = 4360 - SYS_PREADV2 = 4361 - SYS_PWRITEV2 = 4362 - SYS_PKEY_MPROTECT = 4363 - SYS_PKEY_ALLOC = 4364 - SYS_PKEY_FREE = 4365 - SYS_STATX = 4366 - SYS_RSEQ = 4367 - SYS_IO_PGETEVENTS = 4368 - SYS_SEMGET = 4393 - SYS_SEMCTL = 4394 - SYS_SHMGET = 4395 - SYS_SHMCTL = 4396 - SYS_SHMAT = 4397 - SYS_SHMDT = 4398 - SYS_MSGGET = 4399 - SYS_MSGSND = 4400 - SYS_MSGRCV = 4401 - SYS_MSGCTL = 4402 - SYS_CLOCK_GETTIME64 = 4403 - SYS_CLOCK_SETTIME64 = 4404 - SYS_CLOCK_ADJTIME64 = 4405 - SYS_CLOCK_GETRES_TIME64 = 4406 - SYS_CLOCK_NANOSLEEP_TIME64 = 4407 - SYS_TIMER_GETTIME64 = 4408 - SYS_TIMER_SETTIME64 = 4409 - SYS_TIMERFD_GETTIME64 = 4410 - SYS_TIMERFD_SETTIME64 = 4411 - SYS_UTIMENSAT_TIME64 = 4412 - SYS_PSELECT6_TIME64 = 4413 - SYS_PPOLL_TIME64 = 4414 - SYS_IO_PGETEVENTS_TIME64 = 4416 - SYS_RECVMMSG_TIME64 = 4417 - SYS_MQ_TIMEDSEND_TIME64 = 4418 - SYS_MQ_TIMEDRECEIVE_TIME64 = 4419 - SYS_SEMTIMEDOP_TIME64 = 4420 - SYS_RT_SIGTIMEDWAIT_TIME64 = 4421 - SYS_FUTEX_TIME64 = 4422 - SYS_SCHED_RR_GET_INTERVAL_TIME64 = 4423 - SYS_PIDFD_SEND_SIGNAL = 4424 - SYS_IO_URING_SETUP = 4425 - SYS_IO_URING_ENTER = 4426 - SYS_IO_URING_REGISTER = 4427 + SYS_SYSCALL = 4000 + SYS_EXIT = 4001 + SYS_FORK = 4002 + SYS_READ = 4003 + SYS_WRITE = 4004 + SYS_OPEN = 4005 + SYS_CLOSE = 4006 + SYS_WAITPID = 4007 + SYS_CREAT = 4008 + SYS_LINK = 4009 + SYS_UNLINK = 4010 + SYS_EXECVE = 4011 + SYS_CHDIR = 4012 + SYS_TIME = 4013 + SYS_MKNOD = 4014 + SYS_CHMOD = 4015 + SYS_LCHOWN = 4016 + SYS_BREAK = 4017 + SYS_UNUSED18 = 4018 + SYS_LSEEK = 4019 + SYS_GETPID = 4020 + SYS_MOUNT = 4021 + SYS_UMOUNT = 4022 + SYS_SETUID = 4023 + SYS_GETUID = 4024 + SYS_STIME = 4025 + SYS_PTRACE = 4026 + SYS_ALARM = 4027 + SYS_UNUSED28 = 4028 + SYS_PAUSE = 4029 + SYS_UTIME = 4030 + SYS_STTY = 4031 + SYS_GTTY = 4032 + SYS_ACCESS = 4033 + SYS_NICE = 4034 + SYS_FTIME = 4035 + SYS_SYNC = 4036 + SYS_KILL = 4037 + SYS_RENAME = 4038 + SYS_MKDIR = 4039 + SYS_RMDIR = 4040 + SYS_DUP = 4041 + SYS_PIPE = 4042 + SYS_TIMES = 4043 + SYS_PROF = 4044 + SYS_BRK = 4045 + SYS_SETGID = 4046 + SYS_GETGID = 4047 + SYS_SIGNAL = 4048 + SYS_GETEUID = 4049 + SYS_GETEGID = 4050 + SYS_ACCT = 4051 + SYS_UMOUNT2 = 4052 + SYS_LOCK = 4053 + SYS_IOCTL = 4054 + SYS_FCNTL = 4055 + SYS_MPX = 4056 + SYS_SETPGID = 4057 + SYS_ULIMIT = 4058 + SYS_UNUSED59 = 4059 + SYS_UMASK = 4060 + SYS_CHROOT = 4061 + SYS_USTAT = 4062 + SYS_DUP2 = 4063 + SYS_GETPPID = 4064 + SYS_GETPGRP = 4065 + SYS_SETSID = 4066 + SYS_SIGACTION = 4067 + SYS_SGETMASK = 4068 + SYS_SSETMASK = 4069 + SYS_SETREUID = 4070 + SYS_SETREGID = 4071 + SYS_SIGSUSPEND = 4072 + SYS_SIGPENDING = 4073 + SYS_SETHOSTNAME = 4074 + SYS_SETRLIMIT = 4075 + SYS_GETRLIMIT = 4076 + SYS_GETRUSAGE = 4077 + SYS_GETTIMEOFDAY = 4078 + SYS_SETTIMEOFDAY = 4079 + SYS_GETGROUPS = 4080 + SYS_SETGROUPS = 4081 + SYS_RESERVED82 = 4082 + SYS_SYMLINK = 4083 + SYS_UNUSED84 = 4084 + SYS_READLINK = 4085 + SYS_USELIB = 4086 + SYS_SWAPON = 4087 + SYS_REBOOT = 4088 + SYS_READDIR = 4089 + SYS_MMAP = 4090 + SYS_MUNMAP = 4091 + SYS_TRUNCATE = 4092 + SYS_FTRUNCATE = 4093 + SYS_FCHMOD = 4094 + SYS_FCHOWN = 4095 + SYS_GETPRIORITY = 4096 + SYS_SETPRIORITY = 4097 + SYS_PROFIL = 4098 + SYS_STATFS = 4099 + SYS_FSTATFS = 4100 + SYS_IOPERM = 4101 + SYS_SOCKETCALL = 4102 + SYS_SYSLOG = 4103 + SYS_SETITIMER = 4104 + SYS_GETITIMER = 4105 + SYS_STAT = 4106 + SYS_LSTAT = 4107 + SYS_FSTAT = 4108 + SYS_UNUSED109 = 4109 + SYS_IOPL = 4110 + SYS_VHANGUP = 4111 + SYS_IDLE = 4112 + SYS_VM86 = 4113 + SYS_WAIT4 = 4114 + SYS_SWAPOFF = 4115 + SYS_SYSINFO = 4116 + SYS_IPC = 4117 + SYS_FSYNC = 4118 + SYS_SIGRETURN = 4119 + SYS_CLONE = 4120 + SYS_SETDOMAINNAME = 4121 + SYS_UNAME = 4122 + SYS_MODIFY_LDT = 4123 + SYS_ADJTIMEX = 4124 + SYS_MPROTECT = 4125 + SYS_SIGPROCMASK = 4126 + SYS_CREATE_MODULE = 4127 + SYS_INIT_MODULE = 4128 + SYS_DELETE_MODULE = 4129 + SYS_GET_KERNEL_SYMS = 4130 + SYS_QUOTACTL = 4131 + SYS_GETPGID = 4132 + SYS_FCHDIR = 4133 + SYS_BDFLUSH = 4134 + SYS_SYSFS = 4135 + SYS_PERSONALITY = 4136 + SYS_AFS_SYSCALL = 4137 + SYS_SETFSUID = 4138 + SYS_SETFSGID = 4139 + SYS__LLSEEK = 4140 + SYS_GETDENTS = 4141 + SYS__NEWSELECT = 4142 + SYS_FLOCK = 4143 + SYS_MSYNC = 4144 + SYS_READV = 4145 + SYS_WRITEV = 4146 + SYS_CACHEFLUSH = 4147 + SYS_CACHECTL = 4148 + SYS_SYSMIPS = 4149 + SYS_UNUSED150 = 4150 + SYS_GETSID = 4151 + SYS_FDATASYNC = 4152 + SYS__SYSCTL = 4153 + SYS_MLOCK = 4154 + SYS_MUNLOCK = 4155 + SYS_MLOCKALL = 4156 + SYS_MUNLOCKALL = 4157 + SYS_SCHED_SETPARAM = 4158 + SYS_SCHED_GETPARAM = 4159 + SYS_SCHED_SETSCHEDULER = 4160 + SYS_SCHED_GETSCHEDULER = 4161 + SYS_SCHED_YIELD = 4162 + SYS_SCHED_GET_PRIORITY_MAX = 4163 + SYS_SCHED_GET_PRIORITY_MIN = 4164 + SYS_SCHED_RR_GET_INTERVAL = 4165 + SYS_NANOSLEEP = 4166 + SYS_MREMAP = 4167 + SYS_ACCEPT = 4168 + SYS_BIND = 4169 + SYS_CONNECT = 4170 + SYS_GETPEERNAME = 4171 + SYS_GETSOCKNAME = 4172 + SYS_GETSOCKOPT = 4173 + SYS_LISTEN = 4174 + SYS_RECV = 4175 + SYS_RECVFROM = 4176 + SYS_RECVMSG = 4177 + SYS_SEND = 4178 + SYS_SENDMSG = 4179 + SYS_SENDTO = 4180 + SYS_SETSOCKOPT = 4181 + SYS_SHUTDOWN = 4182 + SYS_SOCKET = 4183 + SYS_SOCKETPAIR = 4184 + SYS_SETRESUID = 4185 + SYS_GETRESUID = 4186 + SYS_QUERY_MODULE = 4187 + SYS_POLL = 4188 + SYS_NFSSERVCTL = 4189 + SYS_SETRESGID = 4190 + SYS_GETRESGID = 4191 + SYS_PRCTL = 4192 + SYS_RT_SIGRETURN = 4193 + SYS_RT_SIGACTION = 4194 + SYS_RT_SIGPROCMASK = 4195 + SYS_RT_SIGPENDING = 4196 + SYS_RT_SIGTIMEDWAIT = 4197 + SYS_RT_SIGQUEUEINFO = 4198 + SYS_RT_SIGSUSPEND = 4199 + SYS_PREAD64 = 4200 + SYS_PWRITE64 = 4201 + SYS_CHOWN = 4202 + SYS_GETCWD = 4203 + SYS_CAPGET = 4204 + SYS_CAPSET = 4205 + SYS_SIGALTSTACK = 4206 + SYS_SENDFILE = 4207 + SYS_GETPMSG = 4208 + SYS_PUTPMSG = 4209 + SYS_MMAP2 = 4210 + SYS_TRUNCATE64 = 4211 + SYS_FTRUNCATE64 = 4212 + SYS_STAT64 = 4213 + SYS_LSTAT64 = 4214 + SYS_FSTAT64 = 4215 + SYS_PIVOT_ROOT = 4216 + SYS_MINCORE = 4217 + SYS_MADVISE = 4218 + SYS_GETDENTS64 = 4219 + SYS_FCNTL64 = 4220 + SYS_RESERVED221 = 4221 + SYS_GETTID = 4222 + SYS_READAHEAD = 4223 + SYS_SETXATTR = 4224 + SYS_LSETXATTR = 4225 + SYS_FSETXATTR = 4226 + SYS_GETXATTR = 4227 + SYS_LGETXATTR = 4228 + SYS_FGETXATTR = 4229 + SYS_LISTXATTR = 4230 + SYS_LLISTXATTR = 4231 + SYS_FLISTXATTR = 4232 + SYS_REMOVEXATTR = 4233 + SYS_LREMOVEXATTR = 4234 + SYS_FREMOVEXATTR = 4235 + SYS_TKILL = 4236 + SYS_SENDFILE64 = 4237 + SYS_FUTEX = 4238 + SYS_SCHED_SETAFFINITY = 4239 + SYS_SCHED_GETAFFINITY = 4240 + SYS_IO_SETUP = 4241 + SYS_IO_DESTROY = 4242 + SYS_IO_GETEVENTS = 4243 + SYS_IO_SUBMIT = 4244 + SYS_IO_CANCEL = 4245 + SYS_EXIT_GROUP = 4246 + SYS_LOOKUP_DCOOKIE = 4247 + SYS_EPOLL_CREATE = 4248 + SYS_EPOLL_CTL = 4249 + SYS_EPOLL_WAIT = 4250 + SYS_REMAP_FILE_PAGES = 4251 + SYS_SET_TID_ADDRESS = 4252 + SYS_RESTART_SYSCALL = 4253 + SYS_FADVISE64 = 4254 + SYS_STATFS64 = 4255 + SYS_FSTATFS64 = 4256 + SYS_TIMER_CREATE = 4257 + SYS_TIMER_SETTIME = 4258 + SYS_TIMER_GETTIME = 4259 + SYS_TIMER_GETOVERRUN = 4260 + SYS_TIMER_DELETE = 4261 + SYS_CLOCK_SETTIME = 4262 + SYS_CLOCK_GETTIME = 4263 + SYS_CLOCK_GETRES = 4264 + SYS_CLOCK_NANOSLEEP = 4265 + SYS_TGKILL = 4266 + SYS_UTIMES = 4267 + SYS_MBIND = 4268 + SYS_GET_MEMPOLICY = 4269 + SYS_SET_MEMPOLICY = 4270 + SYS_MQ_OPEN = 4271 + SYS_MQ_UNLINK = 4272 + SYS_MQ_TIMEDSEND = 4273 + SYS_MQ_TIMEDRECEIVE = 4274 + SYS_MQ_NOTIFY = 4275 + SYS_MQ_GETSETATTR = 4276 + SYS_VSERVER = 4277 + SYS_WAITID = 4278 + SYS_ADD_KEY = 4280 + SYS_REQUEST_KEY = 4281 + SYS_KEYCTL = 4282 + SYS_SET_THREAD_AREA = 4283 + SYS_INOTIFY_INIT = 4284 + SYS_INOTIFY_ADD_WATCH = 4285 + SYS_INOTIFY_RM_WATCH = 4286 + SYS_MIGRATE_PAGES = 4287 + SYS_OPENAT = 4288 + SYS_MKDIRAT = 4289 + SYS_MKNODAT = 4290 + SYS_FCHOWNAT = 4291 + SYS_FUTIMESAT = 4292 + SYS_FSTATAT64 = 4293 + SYS_UNLINKAT = 4294 + SYS_RENAMEAT = 4295 + SYS_LINKAT = 4296 + SYS_SYMLINKAT = 4297 + SYS_READLINKAT = 4298 + SYS_FCHMODAT = 4299 + SYS_FACCESSAT = 4300 + SYS_PSELECT6 = 4301 + SYS_PPOLL = 4302 + SYS_UNSHARE = 4303 + SYS_SPLICE = 4304 + SYS_SYNC_FILE_RANGE = 4305 + SYS_TEE = 4306 + SYS_VMSPLICE = 4307 + SYS_MOVE_PAGES = 4308 + SYS_SET_ROBUST_LIST = 4309 + SYS_GET_ROBUST_LIST = 4310 + SYS_KEXEC_LOAD = 4311 + SYS_GETCPU = 4312 + SYS_EPOLL_PWAIT = 4313 + SYS_IOPRIO_SET = 4314 + SYS_IOPRIO_GET = 4315 + SYS_UTIMENSAT = 4316 + SYS_SIGNALFD = 4317 + SYS_TIMERFD = 4318 + SYS_EVENTFD = 4319 + SYS_FALLOCATE = 4320 + SYS_TIMERFD_CREATE = 4321 + SYS_TIMERFD_GETTIME = 4322 + SYS_TIMERFD_SETTIME = 4323 + SYS_SIGNALFD4 = 4324 + SYS_EVENTFD2 = 4325 + SYS_EPOLL_CREATE1 = 4326 + SYS_DUP3 = 4327 + SYS_PIPE2 = 4328 + SYS_INOTIFY_INIT1 = 4329 + SYS_PREADV = 4330 + SYS_PWRITEV = 4331 + SYS_RT_TGSIGQUEUEINFO = 4332 + SYS_PERF_EVENT_OPEN = 4333 + SYS_ACCEPT4 = 4334 + SYS_RECVMMSG = 4335 + SYS_FANOTIFY_INIT = 4336 + SYS_FANOTIFY_MARK = 4337 + SYS_PRLIMIT64 = 4338 + SYS_NAME_TO_HANDLE_AT = 4339 + SYS_OPEN_BY_HANDLE_AT = 4340 + SYS_CLOCK_ADJTIME = 4341 + SYS_SYNCFS = 4342 + SYS_SENDMMSG = 4343 + SYS_SETNS = 4344 + SYS_PROCESS_VM_READV = 4345 + SYS_PROCESS_VM_WRITEV = 4346 + SYS_KCMP = 4347 + SYS_FINIT_MODULE = 4348 + SYS_SCHED_SETATTR = 4349 + SYS_SCHED_GETATTR = 4350 + SYS_RENAMEAT2 = 4351 + SYS_SECCOMP = 4352 + SYS_GETRANDOM = 4353 + SYS_MEMFD_CREATE = 4354 + SYS_BPF = 4355 + SYS_EXECVEAT = 4356 + SYS_USERFAULTFD = 4357 + SYS_MEMBARRIER = 4358 + SYS_MLOCK2 = 4359 + SYS_COPY_FILE_RANGE = 4360 + SYS_PREADV2 = 4361 + SYS_PWRITEV2 = 4362 + SYS_PKEY_MPROTECT = 4363 + SYS_PKEY_ALLOC = 4364 + SYS_PKEY_FREE = 4365 + SYS_STATX = 4366 + SYS_RSEQ = 4367 + SYS_IO_PGETEVENTS = 4368 ) diff --git a/vendor/golang.org/x/sys/unix/zsysnum_linux_mips64.go b/vendor/golang.org/x/sys/unix/zsysnum_linux_mips64.go index 57ec82aac4..40164cacdf 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_linux_mips64.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_linux_mips64.go @@ -334,8 +334,4 @@ const ( SYS_STATX = 5326 SYS_RSEQ = 5327 SYS_IO_PGETEVENTS = 5328 - SYS_PIDFD_SEND_SIGNAL = 5424 - SYS_IO_URING_SETUP = 5425 - SYS_IO_URING_ENTER = 5426 - SYS_IO_URING_REGISTER = 5427 ) diff --git a/vendor/golang.org/x/sys/unix/zsysnum_linux_mips64le.go b/vendor/golang.org/x/sys/unix/zsysnum_linux_mips64le.go index 825a3e3b02..8a909738bc 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_linux_mips64le.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_linux_mips64le.go @@ -334,8 +334,4 @@ const ( SYS_STATX = 5326 SYS_RSEQ = 5327 SYS_IO_PGETEVENTS = 5328 - SYS_PIDFD_SEND_SIGNAL = 5424 - SYS_IO_URING_SETUP = 5425 - SYS_IO_URING_ENTER = 5426 - SYS_IO_URING_REGISTER = 5427 ) diff --git a/vendor/golang.org/x/sys/unix/zsysnum_linux_mipsle.go b/vendor/golang.org/x/sys/unix/zsysnum_linux_mipsle.go index f152dfdd05..8d78184224 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_linux_mipsle.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_linux_mipsle.go @@ -6,406 +6,372 @@ package unix const ( - SYS_SYSCALL = 4000 - SYS_EXIT = 4001 - SYS_FORK = 4002 - SYS_READ = 4003 - SYS_WRITE = 4004 - SYS_OPEN = 4005 - SYS_CLOSE = 4006 - SYS_WAITPID = 4007 - SYS_CREAT = 4008 - SYS_LINK = 4009 - SYS_UNLINK = 4010 - SYS_EXECVE = 4011 - SYS_CHDIR = 4012 - SYS_TIME = 4013 - SYS_MKNOD = 4014 - SYS_CHMOD = 4015 - SYS_LCHOWN = 4016 - SYS_BREAK = 4017 - SYS_UNUSED18 = 4018 - SYS_LSEEK = 4019 - SYS_GETPID = 4020 - SYS_MOUNT = 4021 - SYS_UMOUNT = 4022 - SYS_SETUID = 4023 - SYS_GETUID = 4024 - SYS_STIME = 4025 - SYS_PTRACE = 4026 - SYS_ALARM = 4027 - SYS_UNUSED28 = 4028 - SYS_PAUSE = 4029 - SYS_UTIME = 4030 - SYS_STTY = 4031 - SYS_GTTY = 4032 - SYS_ACCESS = 4033 - SYS_NICE = 4034 - SYS_FTIME = 4035 - SYS_SYNC = 4036 - SYS_KILL = 4037 - SYS_RENAME = 4038 - SYS_MKDIR = 4039 - SYS_RMDIR = 4040 - SYS_DUP = 4041 - SYS_PIPE = 4042 - SYS_TIMES = 4043 - SYS_PROF = 4044 - SYS_BRK = 4045 - SYS_SETGID = 4046 - SYS_GETGID = 4047 - SYS_SIGNAL = 4048 - SYS_GETEUID = 4049 - SYS_GETEGID = 4050 - SYS_ACCT = 4051 - SYS_UMOUNT2 = 4052 - SYS_LOCK = 4053 - SYS_IOCTL = 4054 - SYS_FCNTL = 4055 - SYS_MPX = 4056 - SYS_SETPGID = 4057 - SYS_ULIMIT = 4058 - SYS_UNUSED59 = 4059 - SYS_UMASK = 4060 - SYS_CHROOT = 4061 - SYS_USTAT = 4062 - SYS_DUP2 = 4063 - SYS_GETPPID = 4064 - SYS_GETPGRP = 4065 - SYS_SETSID = 4066 - SYS_SIGACTION = 4067 - SYS_SGETMASK = 4068 - SYS_SSETMASK = 4069 - SYS_SETREUID = 4070 - SYS_SETREGID = 4071 - SYS_SIGSUSPEND = 4072 - SYS_SIGPENDING = 4073 - SYS_SETHOSTNAME = 4074 - SYS_SETRLIMIT = 4075 - SYS_GETRLIMIT = 4076 - SYS_GETRUSAGE = 4077 - SYS_GETTIMEOFDAY = 4078 - SYS_SETTIMEOFDAY = 4079 - SYS_GETGROUPS = 4080 - SYS_SETGROUPS = 4081 - SYS_RESERVED82 = 4082 - SYS_SYMLINK = 4083 - SYS_UNUSED84 = 4084 - SYS_READLINK = 4085 - SYS_USELIB = 4086 - SYS_SWAPON = 4087 - SYS_REBOOT = 4088 - SYS_READDIR = 4089 - SYS_MMAP = 4090 - SYS_MUNMAP = 4091 - SYS_TRUNCATE = 4092 - SYS_FTRUNCATE = 4093 - SYS_FCHMOD = 4094 - SYS_FCHOWN = 4095 - SYS_GETPRIORITY = 4096 - SYS_SETPRIORITY = 4097 - SYS_PROFIL = 4098 - SYS_STATFS = 4099 - SYS_FSTATFS = 4100 - SYS_IOPERM = 4101 - SYS_SOCKETCALL = 4102 - SYS_SYSLOG = 4103 - SYS_SETITIMER = 4104 - SYS_GETITIMER = 4105 - SYS_STAT = 4106 - SYS_LSTAT = 4107 - SYS_FSTAT = 4108 - SYS_UNUSED109 = 4109 - SYS_IOPL = 4110 - SYS_VHANGUP = 4111 - SYS_IDLE = 4112 - SYS_VM86 = 4113 - SYS_WAIT4 = 4114 - SYS_SWAPOFF = 4115 - SYS_SYSINFO = 4116 - SYS_IPC = 4117 - SYS_FSYNC = 4118 - SYS_SIGRETURN = 4119 - SYS_CLONE = 4120 - SYS_SETDOMAINNAME = 4121 - SYS_UNAME = 4122 - SYS_MODIFY_LDT = 4123 - SYS_ADJTIMEX = 4124 - SYS_MPROTECT = 4125 - SYS_SIGPROCMASK = 4126 - SYS_CREATE_MODULE = 4127 - SYS_INIT_MODULE = 4128 - SYS_DELETE_MODULE = 4129 - SYS_GET_KERNEL_SYMS = 4130 - SYS_QUOTACTL = 4131 - SYS_GETPGID = 4132 - SYS_FCHDIR = 4133 - SYS_BDFLUSH = 4134 - SYS_SYSFS = 4135 - SYS_PERSONALITY = 4136 - SYS_AFS_SYSCALL = 4137 - SYS_SETFSUID = 4138 - SYS_SETFSGID = 4139 - SYS__LLSEEK = 4140 - SYS_GETDENTS = 4141 - SYS__NEWSELECT = 4142 - SYS_FLOCK = 4143 - SYS_MSYNC = 4144 - SYS_READV = 4145 - SYS_WRITEV = 4146 - SYS_CACHEFLUSH = 4147 - SYS_CACHECTL = 4148 - SYS_SYSMIPS = 4149 - SYS_UNUSED150 = 4150 - SYS_GETSID = 4151 - SYS_FDATASYNC = 4152 - SYS__SYSCTL = 4153 - SYS_MLOCK = 4154 - SYS_MUNLOCK = 4155 - SYS_MLOCKALL = 4156 - SYS_MUNLOCKALL = 4157 - SYS_SCHED_SETPARAM = 4158 - SYS_SCHED_GETPARAM = 4159 - SYS_SCHED_SETSCHEDULER = 4160 - SYS_SCHED_GETSCHEDULER = 4161 - SYS_SCHED_YIELD = 4162 - SYS_SCHED_GET_PRIORITY_MAX = 4163 - SYS_SCHED_GET_PRIORITY_MIN = 4164 - SYS_SCHED_RR_GET_INTERVAL = 4165 - SYS_NANOSLEEP = 4166 - SYS_MREMAP = 4167 - SYS_ACCEPT = 4168 - SYS_BIND = 4169 - SYS_CONNECT = 4170 - SYS_GETPEERNAME = 4171 - SYS_GETSOCKNAME = 4172 - SYS_GETSOCKOPT = 4173 - SYS_LISTEN = 4174 - SYS_RECV = 4175 - SYS_RECVFROM = 4176 - SYS_RECVMSG = 4177 - SYS_SEND = 4178 - SYS_SENDMSG = 4179 - SYS_SENDTO = 4180 - SYS_SETSOCKOPT = 4181 - SYS_SHUTDOWN = 4182 - SYS_SOCKET = 4183 - SYS_SOCKETPAIR = 4184 - SYS_SETRESUID = 4185 - SYS_GETRESUID = 4186 - SYS_QUERY_MODULE = 4187 - SYS_POLL = 4188 - SYS_NFSSERVCTL = 4189 - SYS_SETRESGID = 4190 - SYS_GETRESGID = 4191 - SYS_PRCTL = 4192 - SYS_RT_SIGRETURN = 4193 - SYS_RT_SIGACTION = 4194 - SYS_RT_SIGPROCMASK = 4195 - SYS_RT_SIGPENDING = 4196 - SYS_RT_SIGTIMEDWAIT = 4197 - SYS_RT_SIGQUEUEINFO = 4198 - SYS_RT_SIGSUSPEND = 4199 - SYS_PREAD64 = 4200 - SYS_PWRITE64 = 4201 - SYS_CHOWN = 4202 - SYS_GETCWD = 4203 - SYS_CAPGET = 4204 - SYS_CAPSET = 4205 - SYS_SIGALTSTACK = 4206 - SYS_SENDFILE = 4207 - SYS_GETPMSG = 4208 - SYS_PUTPMSG = 4209 - SYS_MMAP2 = 4210 - SYS_TRUNCATE64 = 4211 - SYS_FTRUNCATE64 = 4212 - SYS_STAT64 = 4213 - SYS_LSTAT64 = 4214 - SYS_FSTAT64 = 4215 - SYS_PIVOT_ROOT = 4216 - SYS_MINCORE = 4217 - SYS_MADVISE = 4218 - SYS_GETDENTS64 = 4219 - SYS_FCNTL64 = 4220 - SYS_RESERVED221 = 4221 - SYS_GETTID = 4222 - SYS_READAHEAD = 4223 - SYS_SETXATTR = 4224 - SYS_LSETXATTR = 4225 - SYS_FSETXATTR = 4226 - SYS_GETXATTR = 4227 - SYS_LGETXATTR = 4228 - SYS_FGETXATTR = 4229 - SYS_LISTXATTR = 4230 - SYS_LLISTXATTR = 4231 - SYS_FLISTXATTR = 4232 - SYS_REMOVEXATTR = 4233 - SYS_LREMOVEXATTR = 4234 - SYS_FREMOVEXATTR = 4235 - SYS_TKILL = 4236 - SYS_SENDFILE64 = 4237 - SYS_FUTEX = 4238 - SYS_SCHED_SETAFFINITY = 4239 - SYS_SCHED_GETAFFINITY = 4240 - SYS_IO_SETUP = 4241 - SYS_IO_DESTROY = 4242 - SYS_IO_GETEVENTS = 4243 - SYS_IO_SUBMIT = 4244 - SYS_IO_CANCEL = 4245 - SYS_EXIT_GROUP = 4246 - SYS_LOOKUP_DCOOKIE = 4247 - SYS_EPOLL_CREATE = 4248 - SYS_EPOLL_CTL = 4249 - SYS_EPOLL_WAIT = 4250 - SYS_REMAP_FILE_PAGES = 4251 - SYS_SET_TID_ADDRESS = 4252 - SYS_RESTART_SYSCALL = 4253 - SYS_FADVISE64 = 4254 - SYS_STATFS64 = 4255 - SYS_FSTATFS64 = 4256 - SYS_TIMER_CREATE = 4257 - SYS_TIMER_SETTIME = 4258 - SYS_TIMER_GETTIME = 4259 - SYS_TIMER_GETOVERRUN = 4260 - SYS_TIMER_DELETE = 4261 - SYS_CLOCK_SETTIME = 4262 - SYS_CLOCK_GETTIME = 4263 - SYS_CLOCK_GETRES = 4264 - SYS_CLOCK_NANOSLEEP = 4265 - SYS_TGKILL = 4266 - SYS_UTIMES = 4267 - SYS_MBIND = 4268 - SYS_GET_MEMPOLICY = 4269 - SYS_SET_MEMPOLICY = 4270 - SYS_MQ_OPEN = 4271 - SYS_MQ_UNLINK = 4272 - SYS_MQ_TIMEDSEND = 4273 - SYS_MQ_TIMEDRECEIVE = 4274 - SYS_MQ_NOTIFY = 4275 - SYS_MQ_GETSETATTR = 4276 - SYS_VSERVER = 4277 - SYS_WAITID = 4278 - SYS_ADD_KEY = 4280 - SYS_REQUEST_KEY = 4281 - SYS_KEYCTL = 4282 - SYS_SET_THREAD_AREA = 4283 - SYS_INOTIFY_INIT = 4284 - SYS_INOTIFY_ADD_WATCH = 4285 - SYS_INOTIFY_RM_WATCH = 4286 - SYS_MIGRATE_PAGES = 4287 - SYS_OPENAT = 4288 - SYS_MKDIRAT = 4289 - SYS_MKNODAT = 4290 - SYS_FCHOWNAT = 4291 - SYS_FUTIMESAT = 4292 - SYS_FSTATAT64 = 4293 - SYS_UNLINKAT = 4294 - SYS_RENAMEAT = 4295 - SYS_LINKAT = 4296 - SYS_SYMLINKAT = 4297 - SYS_READLINKAT = 4298 - SYS_FCHMODAT = 4299 - SYS_FACCESSAT = 4300 - SYS_PSELECT6 = 4301 - SYS_PPOLL = 4302 - SYS_UNSHARE = 4303 - SYS_SPLICE = 4304 - SYS_SYNC_FILE_RANGE = 4305 - SYS_TEE = 4306 - SYS_VMSPLICE = 4307 - SYS_MOVE_PAGES = 4308 - SYS_SET_ROBUST_LIST = 4309 - SYS_GET_ROBUST_LIST = 4310 - SYS_KEXEC_LOAD = 4311 - SYS_GETCPU = 4312 - SYS_EPOLL_PWAIT = 4313 - SYS_IOPRIO_SET = 4314 - SYS_IOPRIO_GET = 4315 - SYS_UTIMENSAT = 4316 - SYS_SIGNALFD = 4317 - SYS_TIMERFD = 4318 - SYS_EVENTFD = 4319 - SYS_FALLOCATE = 4320 - SYS_TIMERFD_CREATE = 4321 - SYS_TIMERFD_GETTIME = 4322 - SYS_TIMERFD_SETTIME = 4323 - SYS_SIGNALFD4 = 4324 - SYS_EVENTFD2 = 4325 - SYS_EPOLL_CREATE1 = 4326 - SYS_DUP3 = 4327 - SYS_PIPE2 = 4328 - SYS_INOTIFY_INIT1 = 4329 - SYS_PREADV = 4330 - SYS_PWRITEV = 4331 - SYS_RT_TGSIGQUEUEINFO = 4332 - SYS_PERF_EVENT_OPEN = 4333 - SYS_ACCEPT4 = 4334 - SYS_RECVMMSG = 4335 - SYS_FANOTIFY_INIT = 4336 - SYS_FANOTIFY_MARK = 4337 - SYS_PRLIMIT64 = 4338 - SYS_NAME_TO_HANDLE_AT = 4339 - SYS_OPEN_BY_HANDLE_AT = 4340 - SYS_CLOCK_ADJTIME = 4341 - SYS_SYNCFS = 4342 - SYS_SENDMMSG = 4343 - SYS_SETNS = 4344 - SYS_PROCESS_VM_READV = 4345 - SYS_PROCESS_VM_WRITEV = 4346 - SYS_KCMP = 4347 - SYS_FINIT_MODULE = 4348 - SYS_SCHED_SETATTR = 4349 - SYS_SCHED_GETATTR = 4350 - SYS_RENAMEAT2 = 4351 - SYS_SECCOMP = 4352 - SYS_GETRANDOM = 4353 - SYS_MEMFD_CREATE = 4354 - SYS_BPF = 4355 - SYS_EXECVEAT = 4356 - SYS_USERFAULTFD = 4357 - SYS_MEMBARRIER = 4358 - SYS_MLOCK2 = 4359 - SYS_COPY_FILE_RANGE = 4360 - SYS_PREADV2 = 4361 - SYS_PWRITEV2 = 4362 - SYS_PKEY_MPROTECT = 4363 - SYS_PKEY_ALLOC = 4364 - SYS_PKEY_FREE = 4365 - SYS_STATX = 4366 - SYS_RSEQ = 4367 - SYS_IO_PGETEVENTS = 4368 - SYS_SEMGET = 4393 - SYS_SEMCTL = 4394 - SYS_SHMGET = 4395 - SYS_SHMCTL = 4396 - SYS_SHMAT = 4397 - SYS_SHMDT = 4398 - SYS_MSGGET = 4399 - SYS_MSGSND = 4400 - SYS_MSGRCV = 4401 - SYS_MSGCTL = 4402 - SYS_CLOCK_GETTIME64 = 4403 - SYS_CLOCK_SETTIME64 = 4404 - SYS_CLOCK_ADJTIME64 = 4405 - SYS_CLOCK_GETRES_TIME64 = 4406 - SYS_CLOCK_NANOSLEEP_TIME64 = 4407 - SYS_TIMER_GETTIME64 = 4408 - SYS_TIMER_SETTIME64 = 4409 - SYS_TIMERFD_GETTIME64 = 4410 - SYS_TIMERFD_SETTIME64 = 4411 - SYS_UTIMENSAT_TIME64 = 4412 - SYS_PSELECT6_TIME64 = 4413 - SYS_PPOLL_TIME64 = 4414 - SYS_IO_PGETEVENTS_TIME64 = 4416 - SYS_RECVMMSG_TIME64 = 4417 - SYS_MQ_TIMEDSEND_TIME64 = 4418 - SYS_MQ_TIMEDRECEIVE_TIME64 = 4419 - SYS_SEMTIMEDOP_TIME64 = 4420 - SYS_RT_SIGTIMEDWAIT_TIME64 = 4421 - SYS_FUTEX_TIME64 = 4422 - SYS_SCHED_RR_GET_INTERVAL_TIME64 = 4423 - SYS_PIDFD_SEND_SIGNAL = 4424 - SYS_IO_URING_SETUP = 4425 - SYS_IO_URING_ENTER = 4426 - SYS_IO_URING_REGISTER = 4427 + SYS_SYSCALL = 4000 + SYS_EXIT = 4001 + SYS_FORK = 4002 + SYS_READ = 4003 + SYS_WRITE = 4004 + SYS_OPEN = 4005 + SYS_CLOSE = 4006 + SYS_WAITPID = 4007 + SYS_CREAT = 4008 + SYS_LINK = 4009 + SYS_UNLINK = 4010 + SYS_EXECVE = 4011 + SYS_CHDIR = 4012 + SYS_TIME = 4013 + SYS_MKNOD = 4014 + SYS_CHMOD = 4015 + SYS_LCHOWN = 4016 + SYS_BREAK = 4017 + SYS_UNUSED18 = 4018 + SYS_LSEEK = 4019 + SYS_GETPID = 4020 + SYS_MOUNT = 4021 + SYS_UMOUNT = 4022 + SYS_SETUID = 4023 + SYS_GETUID = 4024 + SYS_STIME = 4025 + SYS_PTRACE = 4026 + SYS_ALARM = 4027 + SYS_UNUSED28 = 4028 + SYS_PAUSE = 4029 + SYS_UTIME = 4030 + SYS_STTY = 4031 + SYS_GTTY = 4032 + SYS_ACCESS = 4033 + SYS_NICE = 4034 + SYS_FTIME = 4035 + SYS_SYNC = 4036 + SYS_KILL = 4037 + SYS_RENAME = 4038 + SYS_MKDIR = 4039 + SYS_RMDIR = 4040 + SYS_DUP = 4041 + SYS_PIPE = 4042 + SYS_TIMES = 4043 + SYS_PROF = 4044 + SYS_BRK = 4045 + SYS_SETGID = 4046 + SYS_GETGID = 4047 + SYS_SIGNAL = 4048 + SYS_GETEUID = 4049 + SYS_GETEGID = 4050 + SYS_ACCT = 4051 + SYS_UMOUNT2 = 4052 + SYS_LOCK = 4053 + SYS_IOCTL = 4054 + SYS_FCNTL = 4055 + SYS_MPX = 4056 + SYS_SETPGID = 4057 + SYS_ULIMIT = 4058 + SYS_UNUSED59 = 4059 + SYS_UMASK = 4060 + SYS_CHROOT = 4061 + SYS_USTAT = 4062 + SYS_DUP2 = 4063 + SYS_GETPPID = 4064 + SYS_GETPGRP = 4065 + SYS_SETSID = 4066 + SYS_SIGACTION = 4067 + SYS_SGETMASK = 4068 + SYS_SSETMASK = 4069 + SYS_SETREUID = 4070 + SYS_SETREGID = 4071 + SYS_SIGSUSPEND = 4072 + SYS_SIGPENDING = 4073 + SYS_SETHOSTNAME = 4074 + SYS_SETRLIMIT = 4075 + SYS_GETRLIMIT = 4076 + SYS_GETRUSAGE = 4077 + SYS_GETTIMEOFDAY = 4078 + SYS_SETTIMEOFDAY = 4079 + SYS_GETGROUPS = 4080 + SYS_SETGROUPS = 4081 + SYS_RESERVED82 = 4082 + SYS_SYMLINK = 4083 + SYS_UNUSED84 = 4084 + SYS_READLINK = 4085 + SYS_USELIB = 4086 + SYS_SWAPON = 4087 + SYS_REBOOT = 4088 + SYS_READDIR = 4089 + SYS_MMAP = 4090 + SYS_MUNMAP = 4091 + SYS_TRUNCATE = 4092 + SYS_FTRUNCATE = 4093 + SYS_FCHMOD = 4094 + SYS_FCHOWN = 4095 + SYS_GETPRIORITY = 4096 + SYS_SETPRIORITY = 4097 + SYS_PROFIL = 4098 + SYS_STATFS = 4099 + SYS_FSTATFS = 4100 + SYS_IOPERM = 4101 + SYS_SOCKETCALL = 4102 + SYS_SYSLOG = 4103 + SYS_SETITIMER = 4104 + SYS_GETITIMER = 4105 + SYS_STAT = 4106 + SYS_LSTAT = 4107 + SYS_FSTAT = 4108 + SYS_UNUSED109 = 4109 + SYS_IOPL = 4110 + SYS_VHANGUP = 4111 + SYS_IDLE = 4112 + SYS_VM86 = 4113 + SYS_WAIT4 = 4114 + SYS_SWAPOFF = 4115 + SYS_SYSINFO = 4116 + SYS_IPC = 4117 + SYS_FSYNC = 4118 + SYS_SIGRETURN = 4119 + SYS_CLONE = 4120 + SYS_SETDOMAINNAME = 4121 + SYS_UNAME = 4122 + SYS_MODIFY_LDT = 4123 + SYS_ADJTIMEX = 4124 + SYS_MPROTECT = 4125 + SYS_SIGPROCMASK = 4126 + SYS_CREATE_MODULE = 4127 + SYS_INIT_MODULE = 4128 + SYS_DELETE_MODULE = 4129 + SYS_GET_KERNEL_SYMS = 4130 + SYS_QUOTACTL = 4131 + SYS_GETPGID = 4132 + SYS_FCHDIR = 4133 + SYS_BDFLUSH = 4134 + SYS_SYSFS = 4135 + SYS_PERSONALITY = 4136 + SYS_AFS_SYSCALL = 4137 + SYS_SETFSUID = 4138 + SYS_SETFSGID = 4139 + SYS__LLSEEK = 4140 + SYS_GETDENTS = 4141 + SYS__NEWSELECT = 4142 + SYS_FLOCK = 4143 + SYS_MSYNC = 4144 + SYS_READV = 4145 + SYS_WRITEV = 4146 + SYS_CACHEFLUSH = 4147 + SYS_CACHECTL = 4148 + SYS_SYSMIPS = 4149 + SYS_UNUSED150 = 4150 + SYS_GETSID = 4151 + SYS_FDATASYNC = 4152 + SYS__SYSCTL = 4153 + SYS_MLOCK = 4154 + SYS_MUNLOCK = 4155 + SYS_MLOCKALL = 4156 + SYS_MUNLOCKALL = 4157 + SYS_SCHED_SETPARAM = 4158 + SYS_SCHED_GETPARAM = 4159 + SYS_SCHED_SETSCHEDULER = 4160 + SYS_SCHED_GETSCHEDULER = 4161 + SYS_SCHED_YIELD = 4162 + SYS_SCHED_GET_PRIORITY_MAX = 4163 + SYS_SCHED_GET_PRIORITY_MIN = 4164 + SYS_SCHED_RR_GET_INTERVAL = 4165 + SYS_NANOSLEEP = 4166 + SYS_MREMAP = 4167 + SYS_ACCEPT = 4168 + SYS_BIND = 4169 + SYS_CONNECT = 4170 + SYS_GETPEERNAME = 4171 + SYS_GETSOCKNAME = 4172 + SYS_GETSOCKOPT = 4173 + SYS_LISTEN = 4174 + SYS_RECV = 4175 + SYS_RECVFROM = 4176 + SYS_RECVMSG = 4177 + SYS_SEND = 4178 + SYS_SENDMSG = 4179 + SYS_SENDTO = 4180 + SYS_SETSOCKOPT = 4181 + SYS_SHUTDOWN = 4182 + SYS_SOCKET = 4183 + SYS_SOCKETPAIR = 4184 + SYS_SETRESUID = 4185 + SYS_GETRESUID = 4186 + SYS_QUERY_MODULE = 4187 + SYS_POLL = 4188 + SYS_NFSSERVCTL = 4189 + SYS_SETRESGID = 4190 + SYS_GETRESGID = 4191 + SYS_PRCTL = 4192 + SYS_RT_SIGRETURN = 4193 + SYS_RT_SIGACTION = 4194 + SYS_RT_SIGPROCMASK = 4195 + SYS_RT_SIGPENDING = 4196 + SYS_RT_SIGTIMEDWAIT = 4197 + SYS_RT_SIGQUEUEINFO = 4198 + SYS_RT_SIGSUSPEND = 4199 + SYS_PREAD64 = 4200 + SYS_PWRITE64 = 4201 + SYS_CHOWN = 4202 + SYS_GETCWD = 4203 + SYS_CAPGET = 4204 + SYS_CAPSET = 4205 + SYS_SIGALTSTACK = 4206 + SYS_SENDFILE = 4207 + SYS_GETPMSG = 4208 + SYS_PUTPMSG = 4209 + SYS_MMAP2 = 4210 + SYS_TRUNCATE64 = 4211 + SYS_FTRUNCATE64 = 4212 + SYS_STAT64 = 4213 + SYS_LSTAT64 = 4214 + SYS_FSTAT64 = 4215 + SYS_PIVOT_ROOT = 4216 + SYS_MINCORE = 4217 + SYS_MADVISE = 4218 + SYS_GETDENTS64 = 4219 + SYS_FCNTL64 = 4220 + SYS_RESERVED221 = 4221 + SYS_GETTID = 4222 + SYS_READAHEAD = 4223 + SYS_SETXATTR = 4224 + SYS_LSETXATTR = 4225 + SYS_FSETXATTR = 4226 + SYS_GETXATTR = 4227 + SYS_LGETXATTR = 4228 + SYS_FGETXATTR = 4229 + SYS_LISTXATTR = 4230 + SYS_LLISTXATTR = 4231 + SYS_FLISTXATTR = 4232 + SYS_REMOVEXATTR = 4233 + SYS_LREMOVEXATTR = 4234 + SYS_FREMOVEXATTR = 4235 + SYS_TKILL = 4236 + SYS_SENDFILE64 = 4237 + SYS_FUTEX = 4238 + SYS_SCHED_SETAFFINITY = 4239 + SYS_SCHED_GETAFFINITY = 4240 + SYS_IO_SETUP = 4241 + SYS_IO_DESTROY = 4242 + SYS_IO_GETEVENTS = 4243 + SYS_IO_SUBMIT = 4244 + SYS_IO_CANCEL = 4245 + SYS_EXIT_GROUP = 4246 + SYS_LOOKUP_DCOOKIE = 4247 + SYS_EPOLL_CREATE = 4248 + SYS_EPOLL_CTL = 4249 + SYS_EPOLL_WAIT = 4250 + SYS_REMAP_FILE_PAGES = 4251 + SYS_SET_TID_ADDRESS = 4252 + SYS_RESTART_SYSCALL = 4253 + SYS_FADVISE64 = 4254 + SYS_STATFS64 = 4255 + SYS_FSTATFS64 = 4256 + SYS_TIMER_CREATE = 4257 + SYS_TIMER_SETTIME = 4258 + SYS_TIMER_GETTIME = 4259 + SYS_TIMER_GETOVERRUN = 4260 + SYS_TIMER_DELETE = 4261 + SYS_CLOCK_SETTIME = 4262 + SYS_CLOCK_GETTIME = 4263 + SYS_CLOCK_GETRES = 4264 + SYS_CLOCK_NANOSLEEP = 4265 + SYS_TGKILL = 4266 + SYS_UTIMES = 4267 + SYS_MBIND = 4268 + SYS_GET_MEMPOLICY = 4269 + SYS_SET_MEMPOLICY = 4270 + SYS_MQ_OPEN = 4271 + SYS_MQ_UNLINK = 4272 + SYS_MQ_TIMEDSEND = 4273 + SYS_MQ_TIMEDRECEIVE = 4274 + SYS_MQ_NOTIFY = 4275 + SYS_MQ_GETSETATTR = 4276 + SYS_VSERVER = 4277 + SYS_WAITID = 4278 + SYS_ADD_KEY = 4280 + SYS_REQUEST_KEY = 4281 + SYS_KEYCTL = 4282 + SYS_SET_THREAD_AREA = 4283 + SYS_INOTIFY_INIT = 4284 + SYS_INOTIFY_ADD_WATCH = 4285 + SYS_INOTIFY_RM_WATCH = 4286 + SYS_MIGRATE_PAGES = 4287 + SYS_OPENAT = 4288 + SYS_MKDIRAT = 4289 + SYS_MKNODAT = 4290 + SYS_FCHOWNAT = 4291 + SYS_FUTIMESAT = 4292 + SYS_FSTATAT64 = 4293 + SYS_UNLINKAT = 4294 + SYS_RENAMEAT = 4295 + SYS_LINKAT = 4296 + SYS_SYMLINKAT = 4297 + SYS_READLINKAT = 4298 + SYS_FCHMODAT = 4299 + SYS_FACCESSAT = 4300 + SYS_PSELECT6 = 4301 + SYS_PPOLL = 4302 + SYS_UNSHARE = 4303 + SYS_SPLICE = 4304 + SYS_SYNC_FILE_RANGE = 4305 + SYS_TEE = 4306 + SYS_VMSPLICE = 4307 + SYS_MOVE_PAGES = 4308 + SYS_SET_ROBUST_LIST = 4309 + SYS_GET_ROBUST_LIST = 4310 + SYS_KEXEC_LOAD = 4311 + SYS_GETCPU = 4312 + SYS_EPOLL_PWAIT = 4313 + SYS_IOPRIO_SET = 4314 + SYS_IOPRIO_GET = 4315 + SYS_UTIMENSAT = 4316 + SYS_SIGNALFD = 4317 + SYS_TIMERFD = 4318 + SYS_EVENTFD = 4319 + SYS_FALLOCATE = 4320 + SYS_TIMERFD_CREATE = 4321 + SYS_TIMERFD_GETTIME = 4322 + SYS_TIMERFD_SETTIME = 4323 + SYS_SIGNALFD4 = 4324 + SYS_EVENTFD2 = 4325 + SYS_EPOLL_CREATE1 = 4326 + SYS_DUP3 = 4327 + SYS_PIPE2 = 4328 + SYS_INOTIFY_INIT1 = 4329 + SYS_PREADV = 4330 + SYS_PWRITEV = 4331 + SYS_RT_TGSIGQUEUEINFO = 4332 + SYS_PERF_EVENT_OPEN = 4333 + SYS_ACCEPT4 = 4334 + SYS_RECVMMSG = 4335 + SYS_FANOTIFY_INIT = 4336 + SYS_FANOTIFY_MARK = 4337 + SYS_PRLIMIT64 = 4338 + SYS_NAME_TO_HANDLE_AT = 4339 + SYS_OPEN_BY_HANDLE_AT = 4340 + SYS_CLOCK_ADJTIME = 4341 + SYS_SYNCFS = 4342 + SYS_SENDMMSG = 4343 + SYS_SETNS = 4344 + SYS_PROCESS_VM_READV = 4345 + SYS_PROCESS_VM_WRITEV = 4346 + SYS_KCMP = 4347 + SYS_FINIT_MODULE = 4348 + SYS_SCHED_SETATTR = 4349 + SYS_SCHED_GETATTR = 4350 + SYS_RENAMEAT2 = 4351 + SYS_SECCOMP = 4352 + SYS_GETRANDOM = 4353 + SYS_MEMFD_CREATE = 4354 + SYS_BPF = 4355 + SYS_EXECVEAT = 4356 + SYS_USERFAULTFD = 4357 + SYS_MEMBARRIER = 4358 + SYS_MLOCK2 = 4359 + SYS_COPY_FILE_RANGE = 4360 + SYS_PREADV2 = 4361 + SYS_PWRITEV2 = 4362 + SYS_PKEY_MPROTECT = 4363 + SYS_PKEY_ALLOC = 4364 + SYS_PKEY_FREE = 4365 + SYS_STATX = 4366 + SYS_RSEQ = 4367 + SYS_IO_PGETEVENTS = 4368 ) diff --git a/vendor/golang.org/x/sys/unix/zsysnum_linux_ppc64.go b/vendor/golang.org/x/sys/unix/zsysnum_linux_ppc64.go index 7cbe78b196..ec5bde3d56 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_linux_ppc64.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_linux_ppc64.go @@ -372,19 +372,4 @@ const ( SYS_PKEY_MPROTECT = 386 SYS_RSEQ = 387 SYS_IO_PGETEVENTS = 388 - SYS_SEMTIMEDOP = 392 - SYS_SEMGET = 393 - SYS_SEMCTL = 394 - SYS_SHMGET = 395 - SYS_SHMCTL = 396 - SYS_SHMAT = 397 - SYS_SHMDT = 398 - SYS_MSGGET = 399 - SYS_MSGSND = 400 - SYS_MSGRCV = 401 - SYS_MSGCTL = 402 - SYS_PIDFD_SEND_SIGNAL = 424 - SYS_IO_URING_SETUP = 425 - SYS_IO_URING_ENTER = 426 - SYS_IO_URING_REGISTER = 427 ) diff --git a/vendor/golang.org/x/sys/unix/zsysnum_linux_ppc64le.go b/vendor/golang.org/x/sys/unix/zsysnum_linux_ppc64le.go index 51a2f1236a..bdbabdbcdb 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_linux_ppc64le.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_linux_ppc64le.go @@ -372,19 +372,4 @@ const ( SYS_PKEY_MPROTECT = 386 SYS_RSEQ = 387 SYS_IO_PGETEVENTS = 388 - SYS_SEMTIMEDOP = 392 - SYS_SEMGET = 393 - SYS_SEMCTL = 394 - SYS_SHMGET = 395 - SYS_SHMCTL = 396 - SYS_SHMAT = 397 - SYS_SHMDT = 398 - SYS_MSGGET = 399 - SYS_MSGSND = 400 - SYS_MSGRCV = 401 - SYS_MSGCTL = 402 - SYS_PIDFD_SEND_SIGNAL = 424 - SYS_IO_URING_SETUP = 425 - SYS_IO_URING_ENTER = 426 - SYS_IO_URING_REGISTER = 427 ) diff --git a/vendor/golang.org/x/sys/unix/zsysnum_linux_riscv64.go b/vendor/golang.org/x/sys/unix/zsysnum_linux_riscv64.go index 323432ae30..2c8c46a2fc 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_linux_riscv64.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_linux_riscv64.go @@ -285,8 +285,4 @@ const ( SYS_IO_PGETEVENTS = 292 SYS_RSEQ = 293 SYS_KEXEC_FILE_LOAD = 294 - SYS_PIDFD_SEND_SIGNAL = 424 - SYS_IO_URING_SETUP = 425 - SYS_IO_URING_ENTER = 426 - SYS_IO_URING_REGISTER = 427 ) diff --git a/vendor/golang.org/x/sys/unix/zsysnum_linux_s390x.go b/vendor/golang.org/x/sys/unix/zsysnum_linux_s390x.go index 9dca974849..6eb7c257f8 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_linux_s390x.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_linux_s390x.go @@ -334,22 +334,4 @@ const ( SYS_KEXEC_FILE_LOAD = 381 SYS_IO_PGETEVENTS = 382 SYS_RSEQ = 383 - SYS_PKEY_MPROTECT = 384 - SYS_PKEY_ALLOC = 385 - SYS_PKEY_FREE = 386 - SYS_SEMTIMEDOP = 392 - SYS_SEMGET = 393 - SYS_SEMCTL = 394 - SYS_SHMGET = 395 - SYS_SHMCTL = 396 - SYS_SHMAT = 397 - SYS_SHMDT = 398 - SYS_MSGGET = 399 - SYS_MSGSND = 400 - SYS_MSGRCV = 401 - SYS_MSGCTL = 402 - SYS_PIDFD_SEND_SIGNAL = 424 - SYS_IO_URING_SETUP = 425 - SYS_IO_URING_ENTER = 426 - SYS_IO_URING_REGISTER = 427 ) diff --git a/vendor/golang.org/x/sys/unix/zsysnum_linux_sparc64.go b/vendor/golang.org/x/sys/unix/zsysnum_linux_sparc64.go index d3da46f0de..6ed306370a 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_linux_sparc64.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_linux_sparc64.go @@ -348,23 +348,4 @@ const ( SYS_PWRITEV2 = 359 SYS_STATX = 360 SYS_IO_PGETEVENTS = 361 - SYS_PKEY_MPROTECT = 362 - SYS_PKEY_ALLOC = 363 - SYS_PKEY_FREE = 364 - SYS_RSEQ = 365 - SYS_SEMTIMEDOP = 392 - SYS_SEMGET = 393 - SYS_SEMCTL = 394 - SYS_SHMGET = 395 - SYS_SHMCTL = 396 - SYS_SHMAT = 397 - SYS_SHMDT = 398 - SYS_MSGGET = 399 - SYS_MSGSND = 400 - SYS_MSGRCV = 401 - SYS_MSGCTL = 402 - SYS_PIDFD_SEND_SIGNAL = 424 - SYS_IO_URING_SETUP = 425 - SYS_IO_URING_ENTER = 426 - SYS_IO_URING_REGISTER = 427 ) diff --git a/vendor/golang.org/x/sys/unix/ztypes_aix_ppc.go b/vendor/golang.org/x/sys/unix/ztypes_aix_ppc.go index 2c1f815e6f..cedc9b0f26 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_aix_ppc.go +++ b/vendor/golang.org/x/sys/unix/ztypes_aix_ppc.go @@ -30,6 +30,11 @@ type Timespec struct { Nsec int32 } +type StTimespec struct { + Sec int32 + Nsec int32 +} + type Timeval struct { Sec int32 Usec int32 @@ -96,9 +101,9 @@ type Stat_t struct { Gid uint32 Rdev uint32 Size int32 - Atim Timespec - Mtim Timespec - Ctim Timespec + Atim StTimespec + Mtim StTimespec + Ctim StTimespec Blksize int32 Blocks int32 Vfstype int32 @@ -143,17 +148,6 @@ type RawSockaddrUnix struct { Path [1023]uint8 } -type RawSockaddrDatalink struct { - Len uint8 - Family uint8 - Index uint16 - Type uint8 - Nlen uint8 - Alen uint8 - Slen uint8 - Data [120]uint8 -} - type RawSockaddr struct { Len uint8 Family uint8 @@ -213,18 +207,17 @@ type Msghdr struct { } const ( - SizeofSockaddrInet4 = 0x10 - SizeofSockaddrInet6 = 0x1c - SizeofSockaddrAny = 0x404 - SizeofSockaddrUnix = 0x401 - SizeofSockaddrDatalink = 0x80 - SizeofLinger = 0x8 - SizeofIPMreq = 0x8 - SizeofIPv6Mreq = 0x14 - SizeofIPv6MTUInfo = 0x20 - SizeofMsghdr = 0x1c - SizeofCmsghdr = 0xc - SizeofICMPv6Filter = 0x20 + SizeofSockaddrInet4 = 0x10 + SizeofSockaddrInet6 = 0x1c + SizeofSockaddrAny = 0x404 + SizeofSockaddrUnix = 0x401 + SizeofLinger = 0x8 + SizeofIPMreq = 0x8 + SizeofIPv6Mreq = 0x14 + SizeofIPv6MTUInfo = 0x20 + SizeofMsghdr = 0x1c + SizeofCmsghdr = 0xc + SizeofICMPv6Filter = 0x20 ) const ( diff --git a/vendor/golang.org/x/sys/unix/ztypes_aix_ppc64.go b/vendor/golang.org/x/sys/unix/ztypes_aix_ppc64.go index b4a069ecbd..904359f69f 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_aix_ppc64.go +++ b/vendor/golang.org/x/sys/unix/ztypes_aix_ppc64.go @@ -30,6 +30,12 @@ type Timespec struct { Nsec int64 } +type StTimespec struct { + Sec int64 + Nsec int32 + _ [4]byte +} + type Timeval struct { Sec int64 Usec int32 @@ -97,9 +103,9 @@ type Stat_t struct { Gid uint32 Rdev uint64 Ssize int32 - Atim Timespec - Mtim Timespec - Ctim Timespec + Atim StTimespec + Mtim StTimespec + Ctim StTimespec Blksize int64 Blocks int64 Vfstype int32 @@ -147,17 +153,6 @@ type RawSockaddrUnix struct { Path [1023]uint8 } -type RawSockaddrDatalink struct { - Len uint8 - Family uint8 - Index uint16 - Type uint8 - Nlen uint8 - Alen uint8 - Slen uint8 - Data [120]uint8 -} - type RawSockaddr struct { Len uint8 Family uint8 @@ -217,18 +212,17 @@ type Msghdr struct { } const ( - SizeofSockaddrInet4 = 0x10 - SizeofSockaddrInet6 = 0x1c - SizeofSockaddrAny = 0x404 - SizeofSockaddrUnix = 0x401 - SizeofSockaddrDatalink = 0x80 - SizeofLinger = 0x8 - SizeofIPMreq = 0x8 - SizeofIPv6Mreq = 0x14 - SizeofIPv6MTUInfo = 0x20 - SizeofMsghdr = 0x30 - SizeofCmsghdr = 0xc - SizeofICMPv6Filter = 0x20 + SizeofSockaddrInet4 = 0x10 + SizeofSockaddrInet6 = 0x1c + SizeofSockaddrAny = 0x404 + SizeofSockaddrUnix = 0x401 + SizeofLinger = 0x8 + SizeofIPMreq = 0x8 + SizeofIPv6Mreq = 0x14 + SizeofIPv6MTUInfo = 0x20 + SizeofMsghdr = 0x30 + SizeofCmsghdr = 0xc + SizeofICMPv6Filter = 0x20 ) const ( diff --git a/vendor/golang.org/x/sys/unix/ztypes_darwin_386.go b/vendor/golang.org/x/sys/unix/ztypes_darwin_386.go index 9f47b87c50..cefe2f8eae 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_darwin_386.go +++ b/vendor/golang.org/x/sys/unix/ztypes_darwin_386.go @@ -59,24 +59,24 @@ type Rlimit struct { type _Gid_t uint32 type Stat_t struct { - Dev int32 - Mode uint16 - Nlink uint16 - Ino uint64 - Uid uint32 - Gid uint32 - Rdev int32 - Atim Timespec - Mtim Timespec - Ctim Timespec - Btim Timespec - Size int64 - Blocks int64 - Blksize int32 - Flags uint32 - Gen uint32 - Lspare int32 - Qspare [2]int64 + Dev int32 + Mode uint16 + Nlink uint16 + Ino uint64 + Uid uint32 + Gid uint32 + Rdev int32 + Atimespec Timespec + Mtimespec Timespec + Ctimespec Timespec + Birthtimespec Timespec + Size int64 + Blocks int64 + Blksize int32 + Flags uint32 + Gen uint32 + Lspare int32 + Qspare [2]int64 } type Statfs_t struct { diff --git a/vendor/golang.org/x/sys/unix/ztypes_darwin_amd64.go b/vendor/golang.org/x/sys/unix/ztypes_darwin_amd64.go index 966798a870..c6bb883c39 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_darwin_amd64.go +++ b/vendor/golang.org/x/sys/unix/ztypes_darwin_amd64.go @@ -63,25 +63,25 @@ type Rlimit struct { type _Gid_t uint32 type Stat_t struct { - Dev int32 - Mode uint16 - Nlink uint16 - Ino uint64 - Uid uint32 - Gid uint32 - Rdev int32 - _ [4]byte - Atim Timespec - Mtim Timespec - Ctim Timespec - Btim Timespec - Size int64 - Blocks int64 - Blksize int32 - Flags uint32 - Gen uint32 - Lspare int32 - Qspare [2]int64 + Dev int32 + Mode uint16 + Nlink uint16 + Ino uint64 + Uid uint32 + Gid uint32 + Rdev int32 + _ [4]byte + Atimespec Timespec + Mtimespec Timespec + Ctimespec Timespec + Birthtimespec Timespec + Size int64 + Blocks int64 + Blksize int32 + Flags uint32 + Gen uint32 + Lspare int32 + Qspare [2]int64 } type Statfs_t struct { diff --git a/vendor/golang.org/x/sys/unix/ztypes_darwin_arm.go b/vendor/golang.org/x/sys/unix/ztypes_darwin_arm.go index 4fe4c9cd73..94c23bc2d4 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_darwin_arm.go +++ b/vendor/golang.org/x/sys/unix/ztypes_darwin_arm.go @@ -60,24 +60,24 @@ type Rlimit struct { type _Gid_t uint32 type Stat_t struct { - Dev int32 - Mode uint16 - Nlink uint16 - Ino uint64 - Uid uint32 - Gid uint32 - Rdev int32 - Atim Timespec - Mtim Timespec - Ctim Timespec - Btim Timespec - Size int64 - Blocks int64 - Blksize int32 - Flags uint32 - Gen uint32 - Lspare int32 - Qspare [2]int64 + Dev int32 + Mode uint16 + Nlink uint16 + Ino uint64 + Uid uint32 + Gid uint32 + Rdev int32 + Atimespec Timespec + Mtimespec Timespec + Ctimespec Timespec + Birthtimespec Timespec + Size int64 + Blocks int64 + Blksize int32 + Flags uint32 + Gen uint32 + Lspare int32 + Qspare [2]int64 } type Statfs_t struct { diff --git a/vendor/golang.org/x/sys/unix/ztypes_darwin_arm64.go b/vendor/golang.org/x/sys/unix/ztypes_darwin_arm64.go index 21999e4b0a..c82a930cdc 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_darwin_arm64.go +++ b/vendor/golang.org/x/sys/unix/ztypes_darwin_arm64.go @@ -63,25 +63,25 @@ type Rlimit struct { type _Gid_t uint32 type Stat_t struct { - Dev int32 - Mode uint16 - Nlink uint16 - Ino uint64 - Uid uint32 - Gid uint32 - Rdev int32 - _ [4]byte - Atim Timespec - Mtim Timespec - Ctim Timespec - Btim Timespec - Size int64 - Blocks int64 - Blksize int32 - Flags uint32 - Gen uint32 - Lspare int32 - Qspare [2]int64 + Dev int32 + Mode uint16 + Nlink uint16 + Ino uint64 + Uid uint32 + Gid uint32 + Rdev int32 + _ [4]byte + Atimespec Timespec + Mtimespec Timespec + Ctimespec Timespec + Birthtimespec Timespec + Size int64 + Blocks int64 + Blksize int32 + Flags uint32 + Gen uint32 + Lspare int32 + Qspare [2]int64 } type Statfs_t struct { diff --git a/vendor/golang.org/x/sys/unix/ztypes_dragonfly_amd64.go b/vendor/golang.org/x/sys/unix/ztypes_dragonfly_amd64.go index c206f2b053..7b34e2e2c6 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_dragonfly_amd64.go +++ b/vendor/golang.org/x/sys/unix/ztypes_dragonfly_amd64.go @@ -57,25 +57,25 @@ type Rlimit struct { type _Gid_t uint32 type Stat_t struct { - Ino uint64 - Nlink uint32 - Dev uint32 - Mode uint16 - _1 uint16 - Uid uint32 - Gid uint32 - Rdev uint32 - Atim Timespec - Mtim Timespec - Ctim Timespec - Size int64 - Blocks int64 - Blksize uint32 - Flags uint32 - Gen uint32 - Lspare int32 - Qspare1 int64 - Qspare2 int64 + Ino uint64 + Nlink uint32 + Dev uint32 + Mode uint16 + Padding1 uint16 + Uid uint32 + Gid uint32 + Rdev uint32 + Atim Timespec + Mtim Timespec + Ctim Timespec + Size int64 + Blocks int64 + Blksize uint32 + Flags uint32 + Gen uint32 + Lspare int32 + Qspare1 int64 + Qspare2 int64 } type Statfs_t struct { diff --git a/vendor/golang.org/x/sys/unix/ztypes_freebsd_386.go b/vendor/golang.org/x/sys/unix/ztypes_freebsd_386.go index 0edc5409a0..c146c1ad35 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_freebsd_386.go +++ b/vendor/golang.org/x/sys/unix/ztypes_freebsd_386.go @@ -62,50 +62,50 @@ const ( ) type Stat_t struct { - Dev uint64 - Ino uint64 - Nlink uint64 - Mode uint16 - _0 int16 - Uid uint32 - Gid uint32 - _1 int32 - Rdev uint64 - _ int32 - Atim Timespec - _ int32 - Mtim Timespec - _ int32 - Ctim Timespec - _ int32 - Btim Timespec - Size int64 - Blocks int64 - Blksize int32 - Flags uint32 - Gen uint64 - Spare [10]uint64 + Dev uint64 + Ino uint64 + Nlink uint64 + Mode uint16 + _0 int16 + Uid uint32 + Gid uint32 + _1 int32 + Rdev uint64 + Atim_ext int32 + Atim Timespec + Mtim_ext int32 + Mtim Timespec + Ctim_ext int32 + Ctim Timespec + Btim_ext int32 + Birthtim Timespec + Size int64 + Blocks int64 + Blksize int32 + Flags uint32 + Gen uint64 + Spare [10]uint64 } type stat_freebsd11_t struct { - Dev uint32 - Ino uint32 - Mode uint16 - Nlink uint16 - Uid uint32 - Gid uint32 - Rdev uint32 - Atim Timespec - Mtim Timespec - Ctim Timespec - Size int64 - Blocks int64 - Blksize int32 - Flags uint32 - Gen uint32 - Lspare int32 - Btim Timespec - _ [8]byte + Dev uint32 + Ino uint32 + Mode uint16 + Nlink uint16 + Uid uint32 + Gid uint32 + Rdev uint32 + Atim Timespec + Mtim Timespec + Ctim Timespec + Size int64 + Blocks int64 + Blksize int32 + Flags uint32 + Gen uint32 + Lspare int32 + Birthtim Timespec + _ [8]byte } type Statfs_t struct { diff --git a/vendor/golang.org/x/sys/unix/ztypes_freebsd_amd64.go b/vendor/golang.org/x/sys/unix/ztypes_freebsd_amd64.go index 8881ce8428..ac33a8dd4a 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_freebsd_amd64.go +++ b/vendor/golang.org/x/sys/unix/ztypes_freebsd_amd64.go @@ -62,45 +62,45 @@ const ( ) type Stat_t struct { - Dev uint64 - Ino uint64 - Nlink uint64 - Mode uint16 - _0 int16 - Uid uint32 - Gid uint32 - _1 int32 - Rdev uint64 - Atim Timespec - Mtim Timespec - Ctim Timespec - Btim Timespec - Size int64 - Blocks int64 - Blksize int32 - Flags uint32 - Gen uint64 - Spare [10]uint64 + Dev uint64 + Ino uint64 + Nlink uint64 + Mode uint16 + _0 int16 + Uid uint32 + Gid uint32 + _1 int32 + Rdev uint64 + Atim Timespec + Mtim Timespec + Ctim Timespec + Birthtim Timespec + Size int64 + Blocks int64 + Blksize int32 + Flags uint32 + Gen uint64 + Spare [10]uint64 } type stat_freebsd11_t struct { - Dev uint32 - Ino uint32 - Mode uint16 - Nlink uint16 - Uid uint32 - Gid uint32 - Rdev uint32 - Atim Timespec - Mtim Timespec - Ctim Timespec - Size int64 - Blocks int64 - Blksize int32 - Flags uint32 - Gen uint32 - Lspare int32 - Btim Timespec + Dev uint32 + Ino uint32 + Mode uint16 + Nlink uint16 + Uid uint32 + Gid uint32 + Rdev uint32 + Atim Timespec + Mtim Timespec + Ctim Timespec + Size int64 + Blocks int64 + Blksize int32 + Flags uint32 + Gen uint32 + Lspare int32 + Birthtim Timespec } type Statfs_t struct { diff --git a/vendor/golang.org/x/sys/unix/ztypes_freebsd_arm.go b/vendor/golang.org/x/sys/unix/ztypes_freebsd_arm.go index fc713999cc..e27511a642 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_freebsd_arm.go +++ b/vendor/golang.org/x/sys/unix/ztypes_freebsd_arm.go @@ -64,45 +64,45 @@ const ( ) type Stat_t struct { - Dev uint64 - Ino uint64 - Nlink uint64 - Mode uint16 - _0 int16 - Uid uint32 - Gid uint32 - _1 int32 - Rdev uint64 - Atim Timespec - Mtim Timespec - Ctim Timespec - Btim Timespec - Size int64 - Blocks int64 - Blksize int32 - Flags uint32 - Gen uint64 - Spare [10]uint64 + Dev uint64 + Ino uint64 + Nlink uint64 + Mode uint16 + _0 int16 + Uid uint32 + Gid uint32 + _1 int32 + Rdev uint64 + Atim Timespec + Mtim Timespec + Ctim Timespec + Birthtim Timespec + Size int64 + Blocks int64 + Blksize int32 + Flags uint32 + Gen uint64 + Spare [10]uint64 } type stat_freebsd11_t struct { - Dev uint32 - Ino uint32 - Mode uint16 - Nlink uint16 - Uid uint32 - Gid uint32 - Rdev uint32 - Atim Timespec - Mtim Timespec - Ctim Timespec - Size int64 - Blocks int64 - Blksize int32 - Flags uint32 - Gen uint32 - Lspare int32 - Btim Timespec + Dev uint32 + Ino uint32 + Mode uint16 + Nlink uint16 + Uid uint32 + Gid uint32 + Rdev uint32 + Atim Timespec + Mtim Timespec + Ctim Timespec + Size int64 + Blocks int64 + Blksize int32 + Flags uint32 + Gen uint32 + Lspare int32 + Birthtim Timespec } type Statfs_t struct { diff --git a/vendor/golang.org/x/sys/unix/ztypes_freebsd_arm64.go b/vendor/golang.org/x/sys/unix/ztypes_freebsd_arm64.go index 5a0753ee4b..2aadc1a4d8 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_freebsd_arm64.go +++ b/vendor/golang.org/x/sys/unix/ztypes_freebsd_arm64.go @@ -62,45 +62,45 @@ const ( ) type Stat_t struct { - Dev uint64 - Ino uint64 - Nlink uint64 - Mode uint16 - _0 int16 - Uid uint32 - Gid uint32 - _1 int32 - Rdev uint64 - Atim Timespec - Mtim Timespec - Ctim Timespec - Btim Timespec - Size int64 - Blocks int64 - Blksize int32 - Flags uint32 - Gen uint64 - Spare [10]uint64 + Dev uint64 + Ino uint64 + Nlink uint64 + Mode uint16 + _0 int16 + Uid uint32 + Gid uint32 + _1 int32 + Rdev uint64 + Atim Timespec + Mtim Timespec + Ctim Timespec + Birthtim Timespec + Size int64 + Blocks int64 + Blksize int32 + Flags uint32 + Gen uint64 + Spare [10]uint64 } type stat_freebsd11_t struct { - Dev uint32 - Ino uint32 - Mode uint16 - Nlink uint16 - Uid uint32 - Gid uint32 - Rdev uint32 - Atim Timespec - Mtim Timespec - Ctim Timespec - Size int64 - Blocks int64 - Blksize int32 - Flags uint32 - Gen uint32 - Lspare int32 - Btim Timespec + Dev uint32 + Ino uint32 + Mode uint16 + Nlink uint16 + Uid uint32 + Gid uint32 + Rdev uint32 + Atim Timespec + Mtim Timespec + Ctim Timespec + Size int64 + Blocks int64 + Blksize int32 + Flags uint32 + Gen uint32 + Lspare int32 + Birthtim Timespec } type Statfs_t struct { diff --git a/vendor/golang.org/x/sys/unix/ztypes_linux_386.go b/vendor/golang.org/x/sys/unix/ztypes_linux_386.go index 06e3a3f4de..e09fe879ba 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_linux_386.go +++ b/vendor/golang.org/x/sys/unix/ztypes_linux_386.go @@ -829,8 +829,6 @@ type Sigset_t struct { Val [32]uint32 } -const _C__NSIG = 0x41 - type SignalfdSiginfo struct { Signo uint32 Errno int32 @@ -1454,21 +1452,6 @@ type TpacketBlockDesc struct { Hdr [40]byte } -type TpacketBDTS struct { - Sec uint32 - Usec uint32 -} - -type TpacketHdrV1 struct { - Block_status uint32 - Num_pkts uint32 - Offset_to_first_pkt uint32 - Blk_len uint32 - Seq_num uint64 - Ts_first_pkt TpacketBDTS - Ts_last_pkt TpacketBDTS -} - type TpacketReq struct { Block_size uint32 Block_nr uint32 @@ -2315,155 +2298,3 @@ type CryptoReportKPP struct { type CryptoReportAcomp struct { Type [64]int8 } - -const ( - BPF_REG_0 = 0x0 - BPF_REG_1 = 0x1 - BPF_REG_2 = 0x2 - BPF_REG_3 = 0x3 - BPF_REG_4 = 0x4 - BPF_REG_5 = 0x5 - BPF_REG_6 = 0x6 - BPF_REG_7 = 0x7 - BPF_REG_8 = 0x8 - BPF_REG_9 = 0x9 - BPF_REG_10 = 0xa - BPF_MAP_CREATE = 0x0 - BPF_MAP_LOOKUP_ELEM = 0x1 - BPF_MAP_UPDATE_ELEM = 0x2 - BPF_MAP_DELETE_ELEM = 0x3 - BPF_MAP_GET_NEXT_KEY = 0x4 - BPF_PROG_LOAD = 0x5 - BPF_OBJ_PIN = 0x6 - BPF_OBJ_GET = 0x7 - BPF_PROG_ATTACH = 0x8 - BPF_PROG_DETACH = 0x9 - BPF_PROG_TEST_RUN = 0xa - BPF_PROG_GET_NEXT_ID = 0xb - BPF_MAP_GET_NEXT_ID = 0xc - BPF_PROG_GET_FD_BY_ID = 0xd - BPF_MAP_GET_FD_BY_ID = 0xe - BPF_OBJ_GET_INFO_BY_FD = 0xf - BPF_PROG_QUERY = 0x10 - BPF_RAW_TRACEPOINT_OPEN = 0x11 - BPF_BTF_LOAD = 0x12 - BPF_BTF_GET_FD_BY_ID = 0x13 - BPF_TASK_FD_QUERY = 0x14 - BPF_MAP_LOOKUP_AND_DELETE_ELEM = 0x15 - BPF_MAP_TYPE_UNSPEC = 0x0 - BPF_MAP_TYPE_HASH = 0x1 - BPF_MAP_TYPE_ARRAY = 0x2 - BPF_MAP_TYPE_PROG_ARRAY = 0x3 - BPF_MAP_TYPE_PERF_EVENT_ARRAY = 0x4 - BPF_MAP_TYPE_PERCPU_HASH = 0x5 - BPF_MAP_TYPE_PERCPU_ARRAY = 0x6 - BPF_MAP_TYPE_STACK_TRACE = 0x7 - BPF_MAP_TYPE_CGROUP_ARRAY = 0x8 - BPF_MAP_TYPE_LRU_HASH = 0x9 - BPF_MAP_TYPE_LRU_PERCPU_HASH = 0xa - BPF_MAP_TYPE_LPM_TRIE = 0xb - BPF_MAP_TYPE_ARRAY_OF_MAPS = 0xc - BPF_MAP_TYPE_HASH_OF_MAPS = 0xd - BPF_MAP_TYPE_DEVMAP = 0xe - BPF_MAP_TYPE_SOCKMAP = 0xf - BPF_MAP_TYPE_CPUMAP = 0x10 - BPF_MAP_TYPE_XSKMAP = 0x11 - BPF_MAP_TYPE_SOCKHASH = 0x12 - BPF_MAP_TYPE_CGROUP_STORAGE = 0x13 - BPF_MAP_TYPE_REUSEPORT_SOCKARRAY = 0x14 - BPF_MAP_TYPE_PERCPU_CGROUP_STORAGE = 0x15 - BPF_MAP_TYPE_QUEUE = 0x16 - BPF_MAP_TYPE_STACK = 0x17 - BPF_PROG_TYPE_UNSPEC = 0x0 - BPF_PROG_TYPE_SOCKET_FILTER = 0x1 - BPF_PROG_TYPE_KPROBE = 0x2 - BPF_PROG_TYPE_SCHED_CLS = 0x3 - BPF_PROG_TYPE_SCHED_ACT = 0x4 - BPF_PROG_TYPE_TRACEPOINT = 0x5 - BPF_PROG_TYPE_XDP = 0x6 - BPF_PROG_TYPE_PERF_EVENT = 0x7 - BPF_PROG_TYPE_CGROUP_SKB = 0x8 - BPF_PROG_TYPE_CGROUP_SOCK = 0x9 - BPF_PROG_TYPE_LWT_IN = 0xa - BPF_PROG_TYPE_LWT_OUT = 0xb - BPF_PROG_TYPE_LWT_XMIT = 0xc - BPF_PROG_TYPE_SOCK_OPS = 0xd - BPF_PROG_TYPE_SK_SKB = 0xe - BPF_PROG_TYPE_CGROUP_DEVICE = 0xf - BPF_PROG_TYPE_SK_MSG = 0x10 - BPF_PROG_TYPE_RAW_TRACEPOINT = 0x11 - BPF_PROG_TYPE_CGROUP_SOCK_ADDR = 0x12 - BPF_PROG_TYPE_LWT_SEG6LOCAL = 0x13 - BPF_PROG_TYPE_LIRC_MODE2 = 0x14 - BPF_PROG_TYPE_SK_REUSEPORT = 0x15 - BPF_PROG_TYPE_FLOW_DISSECTOR = 0x16 - BPF_CGROUP_INET_INGRESS = 0x0 - BPF_CGROUP_INET_EGRESS = 0x1 - BPF_CGROUP_INET_SOCK_CREATE = 0x2 - BPF_CGROUP_SOCK_OPS = 0x3 - BPF_SK_SKB_STREAM_PARSER = 0x4 - BPF_SK_SKB_STREAM_VERDICT = 0x5 - BPF_CGROUP_DEVICE = 0x6 - BPF_SK_MSG_VERDICT = 0x7 - BPF_CGROUP_INET4_BIND = 0x8 - BPF_CGROUP_INET6_BIND = 0x9 - BPF_CGROUP_INET4_CONNECT = 0xa - BPF_CGROUP_INET6_CONNECT = 0xb - BPF_CGROUP_INET4_POST_BIND = 0xc - BPF_CGROUP_INET6_POST_BIND = 0xd - BPF_CGROUP_UDP4_SENDMSG = 0xe - BPF_CGROUP_UDP6_SENDMSG = 0xf - BPF_LIRC_MODE2 = 0x10 - BPF_FLOW_DISSECTOR = 0x11 - BPF_STACK_BUILD_ID_EMPTY = 0x0 - BPF_STACK_BUILD_ID_VALID = 0x1 - BPF_STACK_BUILD_ID_IP = 0x2 - BPF_ADJ_ROOM_NET = 0x0 - BPF_HDR_START_MAC = 0x0 - BPF_HDR_START_NET = 0x1 - BPF_LWT_ENCAP_SEG6 = 0x0 - BPF_LWT_ENCAP_SEG6_INLINE = 0x1 - BPF_OK = 0x0 - BPF_DROP = 0x2 - BPF_REDIRECT = 0x7 - BPF_SOCK_OPS_VOID = 0x0 - BPF_SOCK_OPS_TIMEOUT_INIT = 0x1 - BPF_SOCK_OPS_RWND_INIT = 0x2 - BPF_SOCK_OPS_TCP_CONNECT_CB = 0x3 - BPF_SOCK_OPS_ACTIVE_ESTABLISHED_CB = 0x4 - BPF_SOCK_OPS_PASSIVE_ESTABLISHED_CB = 0x5 - BPF_SOCK_OPS_NEEDS_ECN = 0x6 - BPF_SOCK_OPS_BASE_RTT = 0x7 - BPF_SOCK_OPS_RTO_CB = 0x8 - BPF_SOCK_OPS_RETRANS_CB = 0x9 - BPF_SOCK_OPS_STATE_CB = 0xa - BPF_SOCK_OPS_TCP_LISTEN_CB = 0xb - BPF_TCP_ESTABLISHED = 0x1 - BPF_TCP_SYN_SENT = 0x2 - BPF_TCP_SYN_RECV = 0x3 - BPF_TCP_FIN_WAIT1 = 0x4 - BPF_TCP_FIN_WAIT2 = 0x5 - BPF_TCP_TIME_WAIT = 0x6 - BPF_TCP_CLOSE = 0x7 - BPF_TCP_CLOSE_WAIT = 0x8 - BPF_TCP_LAST_ACK = 0x9 - BPF_TCP_LISTEN = 0xa - BPF_TCP_CLOSING = 0xb - BPF_TCP_NEW_SYN_RECV = 0xc - BPF_TCP_MAX_STATES = 0xd - BPF_FIB_LKUP_RET_SUCCESS = 0x0 - BPF_FIB_LKUP_RET_BLACKHOLE = 0x1 - BPF_FIB_LKUP_RET_UNREACHABLE = 0x2 - BPF_FIB_LKUP_RET_PROHIBIT = 0x3 - BPF_FIB_LKUP_RET_NOT_FWDED = 0x4 - BPF_FIB_LKUP_RET_FWD_DISABLED = 0x5 - BPF_FIB_LKUP_RET_UNSUPP_LWT = 0x6 - BPF_FIB_LKUP_RET_NO_NEIGH = 0x7 - BPF_FIB_LKUP_RET_FRAG_NEEDED = 0x8 - BPF_FD_TYPE_RAW_TRACEPOINT = 0x0 - BPF_FD_TYPE_TRACEPOINT = 0x1 - BPF_FD_TYPE_KPROBE = 0x2 - BPF_FD_TYPE_KRETPROBE = 0x3 - BPF_FD_TYPE_UPROBE = 0x4 - BPF_FD_TYPE_URETPROBE = 0x5 -) diff --git a/vendor/golang.org/x/sys/unix/ztypes_linux_amd64.go b/vendor/golang.org/x/sys/unix/ztypes_linux_amd64.go index cef25e732d..45e693438d 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_linux_amd64.go +++ b/vendor/golang.org/x/sys/unix/ztypes_linux_amd64.go @@ -842,8 +842,6 @@ type Sigset_t struct { Val [16]uint64 } -const _C__NSIG = 0x41 - type SignalfdSiginfo struct { Signo uint32 Errno int32 @@ -1466,21 +1464,6 @@ type TpacketBlockDesc struct { Hdr [40]byte } -type TpacketBDTS struct { - Sec uint32 - Usec uint32 -} - -type TpacketHdrV1 struct { - Block_status uint32 - Num_pkts uint32 - Offset_to_first_pkt uint32 - Blk_len uint32 - Seq_num uint64 - Ts_first_pkt TpacketBDTS - Ts_last_pkt TpacketBDTS -} - type TpacketReq struct { Block_size uint32 Block_nr uint32 @@ -2328,155 +2311,3 @@ type CryptoReportKPP struct { type CryptoReportAcomp struct { Type [64]int8 } - -const ( - BPF_REG_0 = 0x0 - BPF_REG_1 = 0x1 - BPF_REG_2 = 0x2 - BPF_REG_3 = 0x3 - BPF_REG_4 = 0x4 - BPF_REG_5 = 0x5 - BPF_REG_6 = 0x6 - BPF_REG_7 = 0x7 - BPF_REG_8 = 0x8 - BPF_REG_9 = 0x9 - BPF_REG_10 = 0xa - BPF_MAP_CREATE = 0x0 - BPF_MAP_LOOKUP_ELEM = 0x1 - BPF_MAP_UPDATE_ELEM = 0x2 - BPF_MAP_DELETE_ELEM = 0x3 - BPF_MAP_GET_NEXT_KEY = 0x4 - BPF_PROG_LOAD = 0x5 - BPF_OBJ_PIN = 0x6 - BPF_OBJ_GET = 0x7 - BPF_PROG_ATTACH = 0x8 - BPF_PROG_DETACH = 0x9 - BPF_PROG_TEST_RUN = 0xa - BPF_PROG_GET_NEXT_ID = 0xb - BPF_MAP_GET_NEXT_ID = 0xc - BPF_PROG_GET_FD_BY_ID = 0xd - BPF_MAP_GET_FD_BY_ID = 0xe - BPF_OBJ_GET_INFO_BY_FD = 0xf - BPF_PROG_QUERY = 0x10 - BPF_RAW_TRACEPOINT_OPEN = 0x11 - BPF_BTF_LOAD = 0x12 - BPF_BTF_GET_FD_BY_ID = 0x13 - BPF_TASK_FD_QUERY = 0x14 - BPF_MAP_LOOKUP_AND_DELETE_ELEM = 0x15 - BPF_MAP_TYPE_UNSPEC = 0x0 - BPF_MAP_TYPE_HASH = 0x1 - BPF_MAP_TYPE_ARRAY = 0x2 - BPF_MAP_TYPE_PROG_ARRAY = 0x3 - BPF_MAP_TYPE_PERF_EVENT_ARRAY = 0x4 - BPF_MAP_TYPE_PERCPU_HASH = 0x5 - BPF_MAP_TYPE_PERCPU_ARRAY = 0x6 - BPF_MAP_TYPE_STACK_TRACE = 0x7 - BPF_MAP_TYPE_CGROUP_ARRAY = 0x8 - BPF_MAP_TYPE_LRU_HASH = 0x9 - BPF_MAP_TYPE_LRU_PERCPU_HASH = 0xa - BPF_MAP_TYPE_LPM_TRIE = 0xb - BPF_MAP_TYPE_ARRAY_OF_MAPS = 0xc - BPF_MAP_TYPE_HASH_OF_MAPS = 0xd - BPF_MAP_TYPE_DEVMAP = 0xe - BPF_MAP_TYPE_SOCKMAP = 0xf - BPF_MAP_TYPE_CPUMAP = 0x10 - BPF_MAP_TYPE_XSKMAP = 0x11 - BPF_MAP_TYPE_SOCKHASH = 0x12 - BPF_MAP_TYPE_CGROUP_STORAGE = 0x13 - BPF_MAP_TYPE_REUSEPORT_SOCKARRAY = 0x14 - BPF_MAP_TYPE_PERCPU_CGROUP_STORAGE = 0x15 - BPF_MAP_TYPE_QUEUE = 0x16 - BPF_MAP_TYPE_STACK = 0x17 - BPF_PROG_TYPE_UNSPEC = 0x0 - BPF_PROG_TYPE_SOCKET_FILTER = 0x1 - BPF_PROG_TYPE_KPROBE = 0x2 - BPF_PROG_TYPE_SCHED_CLS = 0x3 - BPF_PROG_TYPE_SCHED_ACT = 0x4 - BPF_PROG_TYPE_TRACEPOINT = 0x5 - BPF_PROG_TYPE_XDP = 0x6 - BPF_PROG_TYPE_PERF_EVENT = 0x7 - BPF_PROG_TYPE_CGROUP_SKB = 0x8 - BPF_PROG_TYPE_CGROUP_SOCK = 0x9 - BPF_PROG_TYPE_LWT_IN = 0xa - BPF_PROG_TYPE_LWT_OUT = 0xb - BPF_PROG_TYPE_LWT_XMIT = 0xc - BPF_PROG_TYPE_SOCK_OPS = 0xd - BPF_PROG_TYPE_SK_SKB = 0xe - BPF_PROG_TYPE_CGROUP_DEVICE = 0xf - BPF_PROG_TYPE_SK_MSG = 0x10 - BPF_PROG_TYPE_RAW_TRACEPOINT = 0x11 - BPF_PROG_TYPE_CGROUP_SOCK_ADDR = 0x12 - BPF_PROG_TYPE_LWT_SEG6LOCAL = 0x13 - BPF_PROG_TYPE_LIRC_MODE2 = 0x14 - BPF_PROG_TYPE_SK_REUSEPORT = 0x15 - BPF_PROG_TYPE_FLOW_DISSECTOR = 0x16 - BPF_CGROUP_INET_INGRESS = 0x0 - BPF_CGROUP_INET_EGRESS = 0x1 - BPF_CGROUP_INET_SOCK_CREATE = 0x2 - BPF_CGROUP_SOCK_OPS = 0x3 - BPF_SK_SKB_STREAM_PARSER = 0x4 - BPF_SK_SKB_STREAM_VERDICT = 0x5 - BPF_CGROUP_DEVICE = 0x6 - BPF_SK_MSG_VERDICT = 0x7 - BPF_CGROUP_INET4_BIND = 0x8 - BPF_CGROUP_INET6_BIND = 0x9 - BPF_CGROUP_INET4_CONNECT = 0xa - BPF_CGROUP_INET6_CONNECT = 0xb - BPF_CGROUP_INET4_POST_BIND = 0xc - BPF_CGROUP_INET6_POST_BIND = 0xd - BPF_CGROUP_UDP4_SENDMSG = 0xe - BPF_CGROUP_UDP6_SENDMSG = 0xf - BPF_LIRC_MODE2 = 0x10 - BPF_FLOW_DISSECTOR = 0x11 - BPF_STACK_BUILD_ID_EMPTY = 0x0 - BPF_STACK_BUILD_ID_VALID = 0x1 - BPF_STACK_BUILD_ID_IP = 0x2 - BPF_ADJ_ROOM_NET = 0x0 - BPF_HDR_START_MAC = 0x0 - BPF_HDR_START_NET = 0x1 - BPF_LWT_ENCAP_SEG6 = 0x0 - BPF_LWT_ENCAP_SEG6_INLINE = 0x1 - BPF_OK = 0x0 - BPF_DROP = 0x2 - BPF_REDIRECT = 0x7 - BPF_SOCK_OPS_VOID = 0x0 - BPF_SOCK_OPS_TIMEOUT_INIT = 0x1 - BPF_SOCK_OPS_RWND_INIT = 0x2 - BPF_SOCK_OPS_TCP_CONNECT_CB = 0x3 - BPF_SOCK_OPS_ACTIVE_ESTABLISHED_CB = 0x4 - BPF_SOCK_OPS_PASSIVE_ESTABLISHED_CB = 0x5 - BPF_SOCK_OPS_NEEDS_ECN = 0x6 - BPF_SOCK_OPS_BASE_RTT = 0x7 - BPF_SOCK_OPS_RTO_CB = 0x8 - BPF_SOCK_OPS_RETRANS_CB = 0x9 - BPF_SOCK_OPS_STATE_CB = 0xa - BPF_SOCK_OPS_TCP_LISTEN_CB = 0xb - BPF_TCP_ESTABLISHED = 0x1 - BPF_TCP_SYN_SENT = 0x2 - BPF_TCP_SYN_RECV = 0x3 - BPF_TCP_FIN_WAIT1 = 0x4 - BPF_TCP_FIN_WAIT2 = 0x5 - BPF_TCP_TIME_WAIT = 0x6 - BPF_TCP_CLOSE = 0x7 - BPF_TCP_CLOSE_WAIT = 0x8 - BPF_TCP_LAST_ACK = 0x9 - BPF_TCP_LISTEN = 0xa - BPF_TCP_CLOSING = 0xb - BPF_TCP_NEW_SYN_RECV = 0xc - BPF_TCP_MAX_STATES = 0xd - BPF_FIB_LKUP_RET_SUCCESS = 0x0 - BPF_FIB_LKUP_RET_BLACKHOLE = 0x1 - BPF_FIB_LKUP_RET_UNREACHABLE = 0x2 - BPF_FIB_LKUP_RET_PROHIBIT = 0x3 - BPF_FIB_LKUP_RET_NOT_FWDED = 0x4 - BPF_FIB_LKUP_RET_FWD_DISABLED = 0x5 - BPF_FIB_LKUP_RET_UNSUPP_LWT = 0x6 - BPF_FIB_LKUP_RET_NO_NEIGH = 0x7 - BPF_FIB_LKUP_RET_FRAG_NEEDED = 0x8 - BPF_FD_TYPE_RAW_TRACEPOINT = 0x0 - BPF_FD_TYPE_TRACEPOINT = 0x1 - BPF_FD_TYPE_KPROBE = 0x2 - BPF_FD_TYPE_KRETPROBE = 0x3 - BPF_FD_TYPE_UPROBE = 0x4 - BPF_FD_TYPE_URETPROBE = 0x5 -) diff --git a/vendor/golang.org/x/sys/unix/ztypes_linux_arm.go b/vendor/golang.org/x/sys/unix/ztypes_linux_arm.go index c4369361e3..093afab186 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_linux_arm.go +++ b/vendor/golang.org/x/sys/unix/ztypes_linux_arm.go @@ -818,8 +818,6 @@ type Sigset_t struct { Val [32]uint32 } -const _C__NSIG = 0x41 - type SignalfdSiginfo struct { Signo uint32 Errno int32 @@ -1444,21 +1442,6 @@ type TpacketBlockDesc struct { Hdr [40]byte } -type TpacketBDTS struct { - Sec uint32 - Usec uint32 -} - -type TpacketHdrV1 struct { - Block_status uint32 - Num_pkts uint32 - Offset_to_first_pkt uint32 - Blk_len uint32 - Seq_num uint64 - Ts_first_pkt TpacketBDTS - Ts_last_pkt TpacketBDTS -} - type TpacketReq struct { Block_size uint32 Block_nr uint32 @@ -2306,155 +2289,3 @@ type CryptoReportKPP struct { type CryptoReportAcomp struct { Type [64]uint8 } - -const ( - BPF_REG_0 = 0x0 - BPF_REG_1 = 0x1 - BPF_REG_2 = 0x2 - BPF_REG_3 = 0x3 - BPF_REG_4 = 0x4 - BPF_REG_5 = 0x5 - BPF_REG_6 = 0x6 - BPF_REG_7 = 0x7 - BPF_REG_8 = 0x8 - BPF_REG_9 = 0x9 - BPF_REG_10 = 0xa - BPF_MAP_CREATE = 0x0 - BPF_MAP_LOOKUP_ELEM = 0x1 - BPF_MAP_UPDATE_ELEM = 0x2 - BPF_MAP_DELETE_ELEM = 0x3 - BPF_MAP_GET_NEXT_KEY = 0x4 - BPF_PROG_LOAD = 0x5 - BPF_OBJ_PIN = 0x6 - BPF_OBJ_GET = 0x7 - BPF_PROG_ATTACH = 0x8 - BPF_PROG_DETACH = 0x9 - BPF_PROG_TEST_RUN = 0xa - BPF_PROG_GET_NEXT_ID = 0xb - BPF_MAP_GET_NEXT_ID = 0xc - BPF_PROG_GET_FD_BY_ID = 0xd - BPF_MAP_GET_FD_BY_ID = 0xe - BPF_OBJ_GET_INFO_BY_FD = 0xf - BPF_PROG_QUERY = 0x10 - BPF_RAW_TRACEPOINT_OPEN = 0x11 - BPF_BTF_LOAD = 0x12 - BPF_BTF_GET_FD_BY_ID = 0x13 - BPF_TASK_FD_QUERY = 0x14 - BPF_MAP_LOOKUP_AND_DELETE_ELEM = 0x15 - BPF_MAP_TYPE_UNSPEC = 0x0 - BPF_MAP_TYPE_HASH = 0x1 - BPF_MAP_TYPE_ARRAY = 0x2 - BPF_MAP_TYPE_PROG_ARRAY = 0x3 - BPF_MAP_TYPE_PERF_EVENT_ARRAY = 0x4 - BPF_MAP_TYPE_PERCPU_HASH = 0x5 - BPF_MAP_TYPE_PERCPU_ARRAY = 0x6 - BPF_MAP_TYPE_STACK_TRACE = 0x7 - BPF_MAP_TYPE_CGROUP_ARRAY = 0x8 - BPF_MAP_TYPE_LRU_HASH = 0x9 - BPF_MAP_TYPE_LRU_PERCPU_HASH = 0xa - BPF_MAP_TYPE_LPM_TRIE = 0xb - BPF_MAP_TYPE_ARRAY_OF_MAPS = 0xc - BPF_MAP_TYPE_HASH_OF_MAPS = 0xd - BPF_MAP_TYPE_DEVMAP = 0xe - BPF_MAP_TYPE_SOCKMAP = 0xf - BPF_MAP_TYPE_CPUMAP = 0x10 - BPF_MAP_TYPE_XSKMAP = 0x11 - BPF_MAP_TYPE_SOCKHASH = 0x12 - BPF_MAP_TYPE_CGROUP_STORAGE = 0x13 - BPF_MAP_TYPE_REUSEPORT_SOCKARRAY = 0x14 - BPF_MAP_TYPE_PERCPU_CGROUP_STORAGE = 0x15 - BPF_MAP_TYPE_QUEUE = 0x16 - BPF_MAP_TYPE_STACK = 0x17 - BPF_PROG_TYPE_UNSPEC = 0x0 - BPF_PROG_TYPE_SOCKET_FILTER = 0x1 - BPF_PROG_TYPE_KPROBE = 0x2 - BPF_PROG_TYPE_SCHED_CLS = 0x3 - BPF_PROG_TYPE_SCHED_ACT = 0x4 - BPF_PROG_TYPE_TRACEPOINT = 0x5 - BPF_PROG_TYPE_XDP = 0x6 - BPF_PROG_TYPE_PERF_EVENT = 0x7 - BPF_PROG_TYPE_CGROUP_SKB = 0x8 - BPF_PROG_TYPE_CGROUP_SOCK = 0x9 - BPF_PROG_TYPE_LWT_IN = 0xa - BPF_PROG_TYPE_LWT_OUT = 0xb - BPF_PROG_TYPE_LWT_XMIT = 0xc - BPF_PROG_TYPE_SOCK_OPS = 0xd - BPF_PROG_TYPE_SK_SKB = 0xe - BPF_PROG_TYPE_CGROUP_DEVICE = 0xf - BPF_PROG_TYPE_SK_MSG = 0x10 - BPF_PROG_TYPE_RAW_TRACEPOINT = 0x11 - BPF_PROG_TYPE_CGROUP_SOCK_ADDR = 0x12 - BPF_PROG_TYPE_LWT_SEG6LOCAL = 0x13 - BPF_PROG_TYPE_LIRC_MODE2 = 0x14 - BPF_PROG_TYPE_SK_REUSEPORT = 0x15 - BPF_PROG_TYPE_FLOW_DISSECTOR = 0x16 - BPF_CGROUP_INET_INGRESS = 0x0 - BPF_CGROUP_INET_EGRESS = 0x1 - BPF_CGROUP_INET_SOCK_CREATE = 0x2 - BPF_CGROUP_SOCK_OPS = 0x3 - BPF_SK_SKB_STREAM_PARSER = 0x4 - BPF_SK_SKB_STREAM_VERDICT = 0x5 - BPF_CGROUP_DEVICE = 0x6 - BPF_SK_MSG_VERDICT = 0x7 - BPF_CGROUP_INET4_BIND = 0x8 - BPF_CGROUP_INET6_BIND = 0x9 - BPF_CGROUP_INET4_CONNECT = 0xa - BPF_CGROUP_INET6_CONNECT = 0xb - BPF_CGROUP_INET4_POST_BIND = 0xc - BPF_CGROUP_INET6_POST_BIND = 0xd - BPF_CGROUP_UDP4_SENDMSG = 0xe - BPF_CGROUP_UDP6_SENDMSG = 0xf - BPF_LIRC_MODE2 = 0x10 - BPF_FLOW_DISSECTOR = 0x11 - BPF_STACK_BUILD_ID_EMPTY = 0x0 - BPF_STACK_BUILD_ID_VALID = 0x1 - BPF_STACK_BUILD_ID_IP = 0x2 - BPF_ADJ_ROOM_NET = 0x0 - BPF_HDR_START_MAC = 0x0 - BPF_HDR_START_NET = 0x1 - BPF_LWT_ENCAP_SEG6 = 0x0 - BPF_LWT_ENCAP_SEG6_INLINE = 0x1 - BPF_OK = 0x0 - BPF_DROP = 0x2 - BPF_REDIRECT = 0x7 - BPF_SOCK_OPS_VOID = 0x0 - BPF_SOCK_OPS_TIMEOUT_INIT = 0x1 - BPF_SOCK_OPS_RWND_INIT = 0x2 - BPF_SOCK_OPS_TCP_CONNECT_CB = 0x3 - BPF_SOCK_OPS_ACTIVE_ESTABLISHED_CB = 0x4 - BPF_SOCK_OPS_PASSIVE_ESTABLISHED_CB = 0x5 - BPF_SOCK_OPS_NEEDS_ECN = 0x6 - BPF_SOCK_OPS_BASE_RTT = 0x7 - BPF_SOCK_OPS_RTO_CB = 0x8 - BPF_SOCK_OPS_RETRANS_CB = 0x9 - BPF_SOCK_OPS_STATE_CB = 0xa - BPF_SOCK_OPS_TCP_LISTEN_CB = 0xb - BPF_TCP_ESTABLISHED = 0x1 - BPF_TCP_SYN_SENT = 0x2 - BPF_TCP_SYN_RECV = 0x3 - BPF_TCP_FIN_WAIT1 = 0x4 - BPF_TCP_FIN_WAIT2 = 0x5 - BPF_TCP_TIME_WAIT = 0x6 - BPF_TCP_CLOSE = 0x7 - BPF_TCP_CLOSE_WAIT = 0x8 - BPF_TCP_LAST_ACK = 0x9 - BPF_TCP_LISTEN = 0xa - BPF_TCP_CLOSING = 0xb - BPF_TCP_NEW_SYN_RECV = 0xc - BPF_TCP_MAX_STATES = 0xd - BPF_FIB_LKUP_RET_SUCCESS = 0x0 - BPF_FIB_LKUP_RET_BLACKHOLE = 0x1 - BPF_FIB_LKUP_RET_UNREACHABLE = 0x2 - BPF_FIB_LKUP_RET_PROHIBIT = 0x3 - BPF_FIB_LKUP_RET_NOT_FWDED = 0x4 - BPF_FIB_LKUP_RET_FWD_DISABLED = 0x5 - BPF_FIB_LKUP_RET_UNSUPP_LWT = 0x6 - BPF_FIB_LKUP_RET_NO_NEIGH = 0x7 - BPF_FIB_LKUP_RET_FRAG_NEEDED = 0x8 - BPF_FD_TYPE_RAW_TRACEPOINT = 0x0 - BPF_FD_TYPE_TRACEPOINT = 0x1 - BPF_FD_TYPE_KPROBE = 0x2 - BPF_FD_TYPE_KRETPROBE = 0x3 - BPF_FD_TYPE_UPROBE = 0x4 - BPF_FD_TYPE_URETPROBE = 0x5 -) diff --git a/vendor/golang.org/x/sys/unix/ztypes_linux_arm64.go b/vendor/golang.org/x/sys/unix/ztypes_linux_arm64.go index 76c55e0539..5402721a18 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_linux_arm64.go +++ b/vendor/golang.org/x/sys/unix/ztypes_linux_arm64.go @@ -821,8 +821,6 @@ type Sigset_t struct { Val [16]uint64 } -const _C__NSIG = 0x41 - type SignalfdSiginfo struct { Signo uint32 Errno int32 @@ -1445,21 +1443,6 @@ type TpacketBlockDesc struct { Hdr [40]byte } -type TpacketBDTS struct { - Sec uint32 - Usec uint32 -} - -type TpacketHdrV1 struct { - Block_status uint32 - Num_pkts uint32 - Offset_to_first_pkt uint32 - Blk_len uint32 - Seq_num uint64 - Ts_first_pkt TpacketBDTS - Ts_last_pkt TpacketBDTS -} - type TpacketReq struct { Block_size uint32 Block_nr uint32 @@ -2307,155 +2290,3 @@ type CryptoReportKPP struct { type CryptoReportAcomp struct { Type [64]int8 } - -const ( - BPF_REG_0 = 0x0 - BPF_REG_1 = 0x1 - BPF_REG_2 = 0x2 - BPF_REG_3 = 0x3 - BPF_REG_4 = 0x4 - BPF_REG_5 = 0x5 - BPF_REG_6 = 0x6 - BPF_REG_7 = 0x7 - BPF_REG_8 = 0x8 - BPF_REG_9 = 0x9 - BPF_REG_10 = 0xa - BPF_MAP_CREATE = 0x0 - BPF_MAP_LOOKUP_ELEM = 0x1 - BPF_MAP_UPDATE_ELEM = 0x2 - BPF_MAP_DELETE_ELEM = 0x3 - BPF_MAP_GET_NEXT_KEY = 0x4 - BPF_PROG_LOAD = 0x5 - BPF_OBJ_PIN = 0x6 - BPF_OBJ_GET = 0x7 - BPF_PROG_ATTACH = 0x8 - BPF_PROG_DETACH = 0x9 - BPF_PROG_TEST_RUN = 0xa - BPF_PROG_GET_NEXT_ID = 0xb - BPF_MAP_GET_NEXT_ID = 0xc - BPF_PROG_GET_FD_BY_ID = 0xd - BPF_MAP_GET_FD_BY_ID = 0xe - BPF_OBJ_GET_INFO_BY_FD = 0xf - BPF_PROG_QUERY = 0x10 - BPF_RAW_TRACEPOINT_OPEN = 0x11 - BPF_BTF_LOAD = 0x12 - BPF_BTF_GET_FD_BY_ID = 0x13 - BPF_TASK_FD_QUERY = 0x14 - BPF_MAP_LOOKUP_AND_DELETE_ELEM = 0x15 - BPF_MAP_TYPE_UNSPEC = 0x0 - BPF_MAP_TYPE_HASH = 0x1 - BPF_MAP_TYPE_ARRAY = 0x2 - BPF_MAP_TYPE_PROG_ARRAY = 0x3 - BPF_MAP_TYPE_PERF_EVENT_ARRAY = 0x4 - BPF_MAP_TYPE_PERCPU_HASH = 0x5 - BPF_MAP_TYPE_PERCPU_ARRAY = 0x6 - BPF_MAP_TYPE_STACK_TRACE = 0x7 - BPF_MAP_TYPE_CGROUP_ARRAY = 0x8 - BPF_MAP_TYPE_LRU_HASH = 0x9 - BPF_MAP_TYPE_LRU_PERCPU_HASH = 0xa - BPF_MAP_TYPE_LPM_TRIE = 0xb - BPF_MAP_TYPE_ARRAY_OF_MAPS = 0xc - BPF_MAP_TYPE_HASH_OF_MAPS = 0xd - BPF_MAP_TYPE_DEVMAP = 0xe - BPF_MAP_TYPE_SOCKMAP = 0xf - BPF_MAP_TYPE_CPUMAP = 0x10 - BPF_MAP_TYPE_XSKMAP = 0x11 - BPF_MAP_TYPE_SOCKHASH = 0x12 - BPF_MAP_TYPE_CGROUP_STORAGE = 0x13 - BPF_MAP_TYPE_REUSEPORT_SOCKARRAY = 0x14 - BPF_MAP_TYPE_PERCPU_CGROUP_STORAGE = 0x15 - BPF_MAP_TYPE_QUEUE = 0x16 - BPF_MAP_TYPE_STACK = 0x17 - BPF_PROG_TYPE_UNSPEC = 0x0 - BPF_PROG_TYPE_SOCKET_FILTER = 0x1 - BPF_PROG_TYPE_KPROBE = 0x2 - BPF_PROG_TYPE_SCHED_CLS = 0x3 - BPF_PROG_TYPE_SCHED_ACT = 0x4 - BPF_PROG_TYPE_TRACEPOINT = 0x5 - BPF_PROG_TYPE_XDP = 0x6 - BPF_PROG_TYPE_PERF_EVENT = 0x7 - BPF_PROG_TYPE_CGROUP_SKB = 0x8 - BPF_PROG_TYPE_CGROUP_SOCK = 0x9 - BPF_PROG_TYPE_LWT_IN = 0xa - BPF_PROG_TYPE_LWT_OUT = 0xb - BPF_PROG_TYPE_LWT_XMIT = 0xc - BPF_PROG_TYPE_SOCK_OPS = 0xd - BPF_PROG_TYPE_SK_SKB = 0xe - BPF_PROG_TYPE_CGROUP_DEVICE = 0xf - BPF_PROG_TYPE_SK_MSG = 0x10 - BPF_PROG_TYPE_RAW_TRACEPOINT = 0x11 - BPF_PROG_TYPE_CGROUP_SOCK_ADDR = 0x12 - BPF_PROG_TYPE_LWT_SEG6LOCAL = 0x13 - BPF_PROG_TYPE_LIRC_MODE2 = 0x14 - BPF_PROG_TYPE_SK_REUSEPORT = 0x15 - BPF_PROG_TYPE_FLOW_DISSECTOR = 0x16 - BPF_CGROUP_INET_INGRESS = 0x0 - BPF_CGROUP_INET_EGRESS = 0x1 - BPF_CGROUP_INET_SOCK_CREATE = 0x2 - BPF_CGROUP_SOCK_OPS = 0x3 - BPF_SK_SKB_STREAM_PARSER = 0x4 - BPF_SK_SKB_STREAM_VERDICT = 0x5 - BPF_CGROUP_DEVICE = 0x6 - BPF_SK_MSG_VERDICT = 0x7 - BPF_CGROUP_INET4_BIND = 0x8 - BPF_CGROUP_INET6_BIND = 0x9 - BPF_CGROUP_INET4_CONNECT = 0xa - BPF_CGROUP_INET6_CONNECT = 0xb - BPF_CGROUP_INET4_POST_BIND = 0xc - BPF_CGROUP_INET6_POST_BIND = 0xd - BPF_CGROUP_UDP4_SENDMSG = 0xe - BPF_CGROUP_UDP6_SENDMSG = 0xf - BPF_LIRC_MODE2 = 0x10 - BPF_FLOW_DISSECTOR = 0x11 - BPF_STACK_BUILD_ID_EMPTY = 0x0 - BPF_STACK_BUILD_ID_VALID = 0x1 - BPF_STACK_BUILD_ID_IP = 0x2 - BPF_ADJ_ROOM_NET = 0x0 - BPF_HDR_START_MAC = 0x0 - BPF_HDR_START_NET = 0x1 - BPF_LWT_ENCAP_SEG6 = 0x0 - BPF_LWT_ENCAP_SEG6_INLINE = 0x1 - BPF_OK = 0x0 - BPF_DROP = 0x2 - BPF_REDIRECT = 0x7 - BPF_SOCK_OPS_VOID = 0x0 - BPF_SOCK_OPS_TIMEOUT_INIT = 0x1 - BPF_SOCK_OPS_RWND_INIT = 0x2 - BPF_SOCK_OPS_TCP_CONNECT_CB = 0x3 - BPF_SOCK_OPS_ACTIVE_ESTABLISHED_CB = 0x4 - BPF_SOCK_OPS_PASSIVE_ESTABLISHED_CB = 0x5 - BPF_SOCK_OPS_NEEDS_ECN = 0x6 - BPF_SOCK_OPS_BASE_RTT = 0x7 - BPF_SOCK_OPS_RTO_CB = 0x8 - BPF_SOCK_OPS_RETRANS_CB = 0x9 - BPF_SOCK_OPS_STATE_CB = 0xa - BPF_SOCK_OPS_TCP_LISTEN_CB = 0xb - BPF_TCP_ESTABLISHED = 0x1 - BPF_TCP_SYN_SENT = 0x2 - BPF_TCP_SYN_RECV = 0x3 - BPF_TCP_FIN_WAIT1 = 0x4 - BPF_TCP_FIN_WAIT2 = 0x5 - BPF_TCP_TIME_WAIT = 0x6 - BPF_TCP_CLOSE = 0x7 - BPF_TCP_CLOSE_WAIT = 0x8 - BPF_TCP_LAST_ACK = 0x9 - BPF_TCP_LISTEN = 0xa - BPF_TCP_CLOSING = 0xb - BPF_TCP_NEW_SYN_RECV = 0xc - BPF_TCP_MAX_STATES = 0xd - BPF_FIB_LKUP_RET_SUCCESS = 0x0 - BPF_FIB_LKUP_RET_BLACKHOLE = 0x1 - BPF_FIB_LKUP_RET_UNREACHABLE = 0x2 - BPF_FIB_LKUP_RET_PROHIBIT = 0x3 - BPF_FIB_LKUP_RET_NOT_FWDED = 0x4 - BPF_FIB_LKUP_RET_FWD_DISABLED = 0x5 - BPF_FIB_LKUP_RET_UNSUPP_LWT = 0x6 - BPF_FIB_LKUP_RET_NO_NEIGH = 0x7 - BPF_FIB_LKUP_RET_FRAG_NEEDED = 0x8 - BPF_FD_TYPE_RAW_TRACEPOINT = 0x0 - BPF_FD_TYPE_TRACEPOINT = 0x1 - BPF_FD_TYPE_KPROBE = 0x2 - BPF_FD_TYPE_KRETPROBE = 0x3 - BPF_FD_TYPE_UPROBE = 0x4 - BPF_FD_TYPE_URETPROBE = 0x5 -) diff --git a/vendor/golang.org/x/sys/unix/ztypes_linux_mips.go b/vendor/golang.org/x/sys/unix/ztypes_linux_mips.go index 4302d574ff..19c8351c6d 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_linux_mips.go +++ b/vendor/golang.org/x/sys/unix/ztypes_linux_mips.go @@ -823,8 +823,6 @@ type Sigset_t struct { Val [32]uint32 } -const _C__NSIG = 0x80 - type SignalfdSiginfo struct { Signo uint32 Errno int32 @@ -1450,21 +1448,6 @@ type TpacketBlockDesc struct { Hdr [40]byte } -type TpacketBDTS struct { - Sec uint32 - Usec uint32 -} - -type TpacketHdrV1 struct { - Block_status uint32 - Num_pkts uint32 - Offset_to_first_pkt uint32 - Blk_len uint32 - Seq_num uint64 - Ts_first_pkt TpacketBDTS - Ts_last_pkt TpacketBDTS -} - type TpacketReq struct { Block_size uint32 Block_nr uint32 @@ -2312,155 +2295,3 @@ type CryptoReportKPP struct { type CryptoReportAcomp struct { Type [64]int8 } - -const ( - BPF_REG_0 = 0x0 - BPF_REG_1 = 0x1 - BPF_REG_2 = 0x2 - BPF_REG_3 = 0x3 - BPF_REG_4 = 0x4 - BPF_REG_5 = 0x5 - BPF_REG_6 = 0x6 - BPF_REG_7 = 0x7 - BPF_REG_8 = 0x8 - BPF_REG_9 = 0x9 - BPF_REG_10 = 0xa - BPF_MAP_CREATE = 0x0 - BPF_MAP_LOOKUP_ELEM = 0x1 - BPF_MAP_UPDATE_ELEM = 0x2 - BPF_MAP_DELETE_ELEM = 0x3 - BPF_MAP_GET_NEXT_KEY = 0x4 - BPF_PROG_LOAD = 0x5 - BPF_OBJ_PIN = 0x6 - BPF_OBJ_GET = 0x7 - BPF_PROG_ATTACH = 0x8 - BPF_PROG_DETACH = 0x9 - BPF_PROG_TEST_RUN = 0xa - BPF_PROG_GET_NEXT_ID = 0xb - BPF_MAP_GET_NEXT_ID = 0xc - BPF_PROG_GET_FD_BY_ID = 0xd - BPF_MAP_GET_FD_BY_ID = 0xe - BPF_OBJ_GET_INFO_BY_FD = 0xf - BPF_PROG_QUERY = 0x10 - BPF_RAW_TRACEPOINT_OPEN = 0x11 - BPF_BTF_LOAD = 0x12 - BPF_BTF_GET_FD_BY_ID = 0x13 - BPF_TASK_FD_QUERY = 0x14 - BPF_MAP_LOOKUP_AND_DELETE_ELEM = 0x15 - BPF_MAP_TYPE_UNSPEC = 0x0 - BPF_MAP_TYPE_HASH = 0x1 - BPF_MAP_TYPE_ARRAY = 0x2 - BPF_MAP_TYPE_PROG_ARRAY = 0x3 - BPF_MAP_TYPE_PERF_EVENT_ARRAY = 0x4 - BPF_MAP_TYPE_PERCPU_HASH = 0x5 - BPF_MAP_TYPE_PERCPU_ARRAY = 0x6 - BPF_MAP_TYPE_STACK_TRACE = 0x7 - BPF_MAP_TYPE_CGROUP_ARRAY = 0x8 - BPF_MAP_TYPE_LRU_HASH = 0x9 - BPF_MAP_TYPE_LRU_PERCPU_HASH = 0xa - BPF_MAP_TYPE_LPM_TRIE = 0xb - BPF_MAP_TYPE_ARRAY_OF_MAPS = 0xc - BPF_MAP_TYPE_HASH_OF_MAPS = 0xd - BPF_MAP_TYPE_DEVMAP = 0xe - BPF_MAP_TYPE_SOCKMAP = 0xf - BPF_MAP_TYPE_CPUMAP = 0x10 - BPF_MAP_TYPE_XSKMAP = 0x11 - BPF_MAP_TYPE_SOCKHASH = 0x12 - BPF_MAP_TYPE_CGROUP_STORAGE = 0x13 - BPF_MAP_TYPE_REUSEPORT_SOCKARRAY = 0x14 - BPF_MAP_TYPE_PERCPU_CGROUP_STORAGE = 0x15 - BPF_MAP_TYPE_QUEUE = 0x16 - BPF_MAP_TYPE_STACK = 0x17 - BPF_PROG_TYPE_UNSPEC = 0x0 - BPF_PROG_TYPE_SOCKET_FILTER = 0x1 - BPF_PROG_TYPE_KPROBE = 0x2 - BPF_PROG_TYPE_SCHED_CLS = 0x3 - BPF_PROG_TYPE_SCHED_ACT = 0x4 - BPF_PROG_TYPE_TRACEPOINT = 0x5 - BPF_PROG_TYPE_XDP = 0x6 - BPF_PROG_TYPE_PERF_EVENT = 0x7 - BPF_PROG_TYPE_CGROUP_SKB = 0x8 - BPF_PROG_TYPE_CGROUP_SOCK = 0x9 - BPF_PROG_TYPE_LWT_IN = 0xa - BPF_PROG_TYPE_LWT_OUT = 0xb - BPF_PROG_TYPE_LWT_XMIT = 0xc - BPF_PROG_TYPE_SOCK_OPS = 0xd - BPF_PROG_TYPE_SK_SKB = 0xe - BPF_PROG_TYPE_CGROUP_DEVICE = 0xf - BPF_PROG_TYPE_SK_MSG = 0x10 - BPF_PROG_TYPE_RAW_TRACEPOINT = 0x11 - BPF_PROG_TYPE_CGROUP_SOCK_ADDR = 0x12 - BPF_PROG_TYPE_LWT_SEG6LOCAL = 0x13 - BPF_PROG_TYPE_LIRC_MODE2 = 0x14 - BPF_PROG_TYPE_SK_REUSEPORT = 0x15 - BPF_PROG_TYPE_FLOW_DISSECTOR = 0x16 - BPF_CGROUP_INET_INGRESS = 0x0 - BPF_CGROUP_INET_EGRESS = 0x1 - BPF_CGROUP_INET_SOCK_CREATE = 0x2 - BPF_CGROUP_SOCK_OPS = 0x3 - BPF_SK_SKB_STREAM_PARSER = 0x4 - BPF_SK_SKB_STREAM_VERDICT = 0x5 - BPF_CGROUP_DEVICE = 0x6 - BPF_SK_MSG_VERDICT = 0x7 - BPF_CGROUP_INET4_BIND = 0x8 - BPF_CGROUP_INET6_BIND = 0x9 - BPF_CGROUP_INET4_CONNECT = 0xa - BPF_CGROUP_INET6_CONNECT = 0xb - BPF_CGROUP_INET4_POST_BIND = 0xc - BPF_CGROUP_INET6_POST_BIND = 0xd - BPF_CGROUP_UDP4_SENDMSG = 0xe - BPF_CGROUP_UDP6_SENDMSG = 0xf - BPF_LIRC_MODE2 = 0x10 - BPF_FLOW_DISSECTOR = 0x11 - BPF_STACK_BUILD_ID_EMPTY = 0x0 - BPF_STACK_BUILD_ID_VALID = 0x1 - BPF_STACK_BUILD_ID_IP = 0x2 - BPF_ADJ_ROOM_NET = 0x0 - BPF_HDR_START_MAC = 0x0 - BPF_HDR_START_NET = 0x1 - BPF_LWT_ENCAP_SEG6 = 0x0 - BPF_LWT_ENCAP_SEG6_INLINE = 0x1 - BPF_OK = 0x0 - BPF_DROP = 0x2 - BPF_REDIRECT = 0x7 - BPF_SOCK_OPS_VOID = 0x0 - BPF_SOCK_OPS_TIMEOUT_INIT = 0x1 - BPF_SOCK_OPS_RWND_INIT = 0x2 - BPF_SOCK_OPS_TCP_CONNECT_CB = 0x3 - BPF_SOCK_OPS_ACTIVE_ESTABLISHED_CB = 0x4 - BPF_SOCK_OPS_PASSIVE_ESTABLISHED_CB = 0x5 - BPF_SOCK_OPS_NEEDS_ECN = 0x6 - BPF_SOCK_OPS_BASE_RTT = 0x7 - BPF_SOCK_OPS_RTO_CB = 0x8 - BPF_SOCK_OPS_RETRANS_CB = 0x9 - BPF_SOCK_OPS_STATE_CB = 0xa - BPF_SOCK_OPS_TCP_LISTEN_CB = 0xb - BPF_TCP_ESTABLISHED = 0x1 - BPF_TCP_SYN_SENT = 0x2 - BPF_TCP_SYN_RECV = 0x3 - BPF_TCP_FIN_WAIT1 = 0x4 - BPF_TCP_FIN_WAIT2 = 0x5 - BPF_TCP_TIME_WAIT = 0x6 - BPF_TCP_CLOSE = 0x7 - BPF_TCP_CLOSE_WAIT = 0x8 - BPF_TCP_LAST_ACK = 0x9 - BPF_TCP_LISTEN = 0xa - BPF_TCP_CLOSING = 0xb - BPF_TCP_NEW_SYN_RECV = 0xc - BPF_TCP_MAX_STATES = 0xd - BPF_FIB_LKUP_RET_SUCCESS = 0x0 - BPF_FIB_LKUP_RET_BLACKHOLE = 0x1 - BPF_FIB_LKUP_RET_UNREACHABLE = 0x2 - BPF_FIB_LKUP_RET_PROHIBIT = 0x3 - BPF_FIB_LKUP_RET_NOT_FWDED = 0x4 - BPF_FIB_LKUP_RET_FWD_DISABLED = 0x5 - BPF_FIB_LKUP_RET_UNSUPP_LWT = 0x6 - BPF_FIB_LKUP_RET_NO_NEIGH = 0x7 - BPF_FIB_LKUP_RET_FRAG_NEEDED = 0x8 - BPF_FD_TYPE_RAW_TRACEPOINT = 0x0 - BPF_FD_TYPE_TRACEPOINT = 0x1 - BPF_FD_TYPE_KPROBE = 0x2 - BPF_FD_TYPE_KRETPROBE = 0x3 - BPF_FD_TYPE_UPROBE = 0x4 - BPF_FD_TYPE_URETPROBE = 0x5 -) diff --git a/vendor/golang.org/x/sys/unix/ztypes_linux_mips64.go b/vendor/golang.org/x/sys/unix/ztypes_linux_mips64.go index 7ea742be6a..1431b26557 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_linux_mips64.go +++ b/vendor/golang.org/x/sys/unix/ztypes_linux_mips64.go @@ -823,8 +823,6 @@ type Sigset_t struct { Val [16]uint64 } -const _C__NSIG = 0x80 - type SignalfdSiginfo struct { Signo uint32 Errno int32 @@ -1447,21 +1445,6 @@ type TpacketBlockDesc struct { Hdr [40]byte } -type TpacketBDTS struct { - Sec uint32 - Usec uint32 -} - -type TpacketHdrV1 struct { - Block_status uint32 - Num_pkts uint32 - Offset_to_first_pkt uint32 - Blk_len uint32 - Seq_num uint64 - Ts_first_pkt TpacketBDTS - Ts_last_pkt TpacketBDTS -} - type TpacketReq struct { Block_size uint32 Block_nr uint32 @@ -2309,155 +2292,3 @@ type CryptoReportKPP struct { type CryptoReportAcomp struct { Type [64]int8 } - -const ( - BPF_REG_0 = 0x0 - BPF_REG_1 = 0x1 - BPF_REG_2 = 0x2 - BPF_REG_3 = 0x3 - BPF_REG_4 = 0x4 - BPF_REG_5 = 0x5 - BPF_REG_6 = 0x6 - BPF_REG_7 = 0x7 - BPF_REG_8 = 0x8 - BPF_REG_9 = 0x9 - BPF_REG_10 = 0xa - BPF_MAP_CREATE = 0x0 - BPF_MAP_LOOKUP_ELEM = 0x1 - BPF_MAP_UPDATE_ELEM = 0x2 - BPF_MAP_DELETE_ELEM = 0x3 - BPF_MAP_GET_NEXT_KEY = 0x4 - BPF_PROG_LOAD = 0x5 - BPF_OBJ_PIN = 0x6 - BPF_OBJ_GET = 0x7 - BPF_PROG_ATTACH = 0x8 - BPF_PROG_DETACH = 0x9 - BPF_PROG_TEST_RUN = 0xa - BPF_PROG_GET_NEXT_ID = 0xb - BPF_MAP_GET_NEXT_ID = 0xc - BPF_PROG_GET_FD_BY_ID = 0xd - BPF_MAP_GET_FD_BY_ID = 0xe - BPF_OBJ_GET_INFO_BY_FD = 0xf - BPF_PROG_QUERY = 0x10 - BPF_RAW_TRACEPOINT_OPEN = 0x11 - BPF_BTF_LOAD = 0x12 - BPF_BTF_GET_FD_BY_ID = 0x13 - BPF_TASK_FD_QUERY = 0x14 - BPF_MAP_LOOKUP_AND_DELETE_ELEM = 0x15 - BPF_MAP_TYPE_UNSPEC = 0x0 - BPF_MAP_TYPE_HASH = 0x1 - BPF_MAP_TYPE_ARRAY = 0x2 - BPF_MAP_TYPE_PROG_ARRAY = 0x3 - BPF_MAP_TYPE_PERF_EVENT_ARRAY = 0x4 - BPF_MAP_TYPE_PERCPU_HASH = 0x5 - BPF_MAP_TYPE_PERCPU_ARRAY = 0x6 - BPF_MAP_TYPE_STACK_TRACE = 0x7 - BPF_MAP_TYPE_CGROUP_ARRAY = 0x8 - BPF_MAP_TYPE_LRU_HASH = 0x9 - BPF_MAP_TYPE_LRU_PERCPU_HASH = 0xa - BPF_MAP_TYPE_LPM_TRIE = 0xb - BPF_MAP_TYPE_ARRAY_OF_MAPS = 0xc - BPF_MAP_TYPE_HASH_OF_MAPS = 0xd - BPF_MAP_TYPE_DEVMAP = 0xe - BPF_MAP_TYPE_SOCKMAP = 0xf - BPF_MAP_TYPE_CPUMAP = 0x10 - BPF_MAP_TYPE_XSKMAP = 0x11 - BPF_MAP_TYPE_SOCKHASH = 0x12 - BPF_MAP_TYPE_CGROUP_STORAGE = 0x13 - BPF_MAP_TYPE_REUSEPORT_SOCKARRAY = 0x14 - BPF_MAP_TYPE_PERCPU_CGROUP_STORAGE = 0x15 - BPF_MAP_TYPE_QUEUE = 0x16 - BPF_MAP_TYPE_STACK = 0x17 - BPF_PROG_TYPE_UNSPEC = 0x0 - BPF_PROG_TYPE_SOCKET_FILTER = 0x1 - BPF_PROG_TYPE_KPROBE = 0x2 - BPF_PROG_TYPE_SCHED_CLS = 0x3 - BPF_PROG_TYPE_SCHED_ACT = 0x4 - BPF_PROG_TYPE_TRACEPOINT = 0x5 - BPF_PROG_TYPE_XDP = 0x6 - BPF_PROG_TYPE_PERF_EVENT = 0x7 - BPF_PROG_TYPE_CGROUP_SKB = 0x8 - BPF_PROG_TYPE_CGROUP_SOCK = 0x9 - BPF_PROG_TYPE_LWT_IN = 0xa - BPF_PROG_TYPE_LWT_OUT = 0xb - BPF_PROG_TYPE_LWT_XMIT = 0xc - BPF_PROG_TYPE_SOCK_OPS = 0xd - BPF_PROG_TYPE_SK_SKB = 0xe - BPF_PROG_TYPE_CGROUP_DEVICE = 0xf - BPF_PROG_TYPE_SK_MSG = 0x10 - BPF_PROG_TYPE_RAW_TRACEPOINT = 0x11 - BPF_PROG_TYPE_CGROUP_SOCK_ADDR = 0x12 - BPF_PROG_TYPE_LWT_SEG6LOCAL = 0x13 - BPF_PROG_TYPE_LIRC_MODE2 = 0x14 - BPF_PROG_TYPE_SK_REUSEPORT = 0x15 - BPF_PROG_TYPE_FLOW_DISSECTOR = 0x16 - BPF_CGROUP_INET_INGRESS = 0x0 - BPF_CGROUP_INET_EGRESS = 0x1 - BPF_CGROUP_INET_SOCK_CREATE = 0x2 - BPF_CGROUP_SOCK_OPS = 0x3 - BPF_SK_SKB_STREAM_PARSER = 0x4 - BPF_SK_SKB_STREAM_VERDICT = 0x5 - BPF_CGROUP_DEVICE = 0x6 - BPF_SK_MSG_VERDICT = 0x7 - BPF_CGROUP_INET4_BIND = 0x8 - BPF_CGROUP_INET6_BIND = 0x9 - BPF_CGROUP_INET4_CONNECT = 0xa - BPF_CGROUP_INET6_CONNECT = 0xb - BPF_CGROUP_INET4_POST_BIND = 0xc - BPF_CGROUP_INET6_POST_BIND = 0xd - BPF_CGROUP_UDP4_SENDMSG = 0xe - BPF_CGROUP_UDP6_SENDMSG = 0xf - BPF_LIRC_MODE2 = 0x10 - BPF_FLOW_DISSECTOR = 0x11 - BPF_STACK_BUILD_ID_EMPTY = 0x0 - BPF_STACK_BUILD_ID_VALID = 0x1 - BPF_STACK_BUILD_ID_IP = 0x2 - BPF_ADJ_ROOM_NET = 0x0 - BPF_HDR_START_MAC = 0x0 - BPF_HDR_START_NET = 0x1 - BPF_LWT_ENCAP_SEG6 = 0x0 - BPF_LWT_ENCAP_SEG6_INLINE = 0x1 - BPF_OK = 0x0 - BPF_DROP = 0x2 - BPF_REDIRECT = 0x7 - BPF_SOCK_OPS_VOID = 0x0 - BPF_SOCK_OPS_TIMEOUT_INIT = 0x1 - BPF_SOCK_OPS_RWND_INIT = 0x2 - BPF_SOCK_OPS_TCP_CONNECT_CB = 0x3 - BPF_SOCK_OPS_ACTIVE_ESTABLISHED_CB = 0x4 - BPF_SOCK_OPS_PASSIVE_ESTABLISHED_CB = 0x5 - BPF_SOCK_OPS_NEEDS_ECN = 0x6 - BPF_SOCK_OPS_BASE_RTT = 0x7 - BPF_SOCK_OPS_RTO_CB = 0x8 - BPF_SOCK_OPS_RETRANS_CB = 0x9 - BPF_SOCK_OPS_STATE_CB = 0xa - BPF_SOCK_OPS_TCP_LISTEN_CB = 0xb - BPF_TCP_ESTABLISHED = 0x1 - BPF_TCP_SYN_SENT = 0x2 - BPF_TCP_SYN_RECV = 0x3 - BPF_TCP_FIN_WAIT1 = 0x4 - BPF_TCP_FIN_WAIT2 = 0x5 - BPF_TCP_TIME_WAIT = 0x6 - BPF_TCP_CLOSE = 0x7 - BPF_TCP_CLOSE_WAIT = 0x8 - BPF_TCP_LAST_ACK = 0x9 - BPF_TCP_LISTEN = 0xa - BPF_TCP_CLOSING = 0xb - BPF_TCP_NEW_SYN_RECV = 0xc - BPF_TCP_MAX_STATES = 0xd - BPF_FIB_LKUP_RET_SUCCESS = 0x0 - BPF_FIB_LKUP_RET_BLACKHOLE = 0x1 - BPF_FIB_LKUP_RET_UNREACHABLE = 0x2 - BPF_FIB_LKUP_RET_PROHIBIT = 0x3 - BPF_FIB_LKUP_RET_NOT_FWDED = 0x4 - BPF_FIB_LKUP_RET_FWD_DISABLED = 0x5 - BPF_FIB_LKUP_RET_UNSUPP_LWT = 0x6 - BPF_FIB_LKUP_RET_NO_NEIGH = 0x7 - BPF_FIB_LKUP_RET_FRAG_NEEDED = 0x8 - BPF_FD_TYPE_RAW_TRACEPOINT = 0x0 - BPF_FD_TYPE_TRACEPOINT = 0x1 - BPF_FD_TYPE_KPROBE = 0x2 - BPF_FD_TYPE_KRETPROBE = 0x3 - BPF_FD_TYPE_UPROBE = 0x4 - BPF_FD_TYPE_URETPROBE = 0x5 -) diff --git a/vendor/golang.org/x/sys/unix/ztypes_linux_mips64le.go b/vendor/golang.org/x/sys/unix/ztypes_linux_mips64le.go index 8f2b8ad4ee..181e370066 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_linux_mips64le.go +++ b/vendor/golang.org/x/sys/unix/ztypes_linux_mips64le.go @@ -823,8 +823,6 @@ type Sigset_t struct { Val [16]uint64 } -const _C__NSIG = 0x80 - type SignalfdSiginfo struct { Signo uint32 Errno int32 @@ -1447,21 +1445,6 @@ type TpacketBlockDesc struct { Hdr [40]byte } -type TpacketBDTS struct { - Sec uint32 - Usec uint32 -} - -type TpacketHdrV1 struct { - Block_status uint32 - Num_pkts uint32 - Offset_to_first_pkt uint32 - Blk_len uint32 - Seq_num uint64 - Ts_first_pkt TpacketBDTS - Ts_last_pkt TpacketBDTS -} - type TpacketReq struct { Block_size uint32 Block_nr uint32 @@ -2309,155 +2292,3 @@ type CryptoReportKPP struct { type CryptoReportAcomp struct { Type [64]int8 } - -const ( - BPF_REG_0 = 0x0 - BPF_REG_1 = 0x1 - BPF_REG_2 = 0x2 - BPF_REG_3 = 0x3 - BPF_REG_4 = 0x4 - BPF_REG_5 = 0x5 - BPF_REG_6 = 0x6 - BPF_REG_7 = 0x7 - BPF_REG_8 = 0x8 - BPF_REG_9 = 0x9 - BPF_REG_10 = 0xa - BPF_MAP_CREATE = 0x0 - BPF_MAP_LOOKUP_ELEM = 0x1 - BPF_MAP_UPDATE_ELEM = 0x2 - BPF_MAP_DELETE_ELEM = 0x3 - BPF_MAP_GET_NEXT_KEY = 0x4 - BPF_PROG_LOAD = 0x5 - BPF_OBJ_PIN = 0x6 - BPF_OBJ_GET = 0x7 - BPF_PROG_ATTACH = 0x8 - BPF_PROG_DETACH = 0x9 - BPF_PROG_TEST_RUN = 0xa - BPF_PROG_GET_NEXT_ID = 0xb - BPF_MAP_GET_NEXT_ID = 0xc - BPF_PROG_GET_FD_BY_ID = 0xd - BPF_MAP_GET_FD_BY_ID = 0xe - BPF_OBJ_GET_INFO_BY_FD = 0xf - BPF_PROG_QUERY = 0x10 - BPF_RAW_TRACEPOINT_OPEN = 0x11 - BPF_BTF_LOAD = 0x12 - BPF_BTF_GET_FD_BY_ID = 0x13 - BPF_TASK_FD_QUERY = 0x14 - BPF_MAP_LOOKUP_AND_DELETE_ELEM = 0x15 - BPF_MAP_TYPE_UNSPEC = 0x0 - BPF_MAP_TYPE_HASH = 0x1 - BPF_MAP_TYPE_ARRAY = 0x2 - BPF_MAP_TYPE_PROG_ARRAY = 0x3 - BPF_MAP_TYPE_PERF_EVENT_ARRAY = 0x4 - BPF_MAP_TYPE_PERCPU_HASH = 0x5 - BPF_MAP_TYPE_PERCPU_ARRAY = 0x6 - BPF_MAP_TYPE_STACK_TRACE = 0x7 - BPF_MAP_TYPE_CGROUP_ARRAY = 0x8 - BPF_MAP_TYPE_LRU_HASH = 0x9 - BPF_MAP_TYPE_LRU_PERCPU_HASH = 0xa - BPF_MAP_TYPE_LPM_TRIE = 0xb - BPF_MAP_TYPE_ARRAY_OF_MAPS = 0xc - BPF_MAP_TYPE_HASH_OF_MAPS = 0xd - BPF_MAP_TYPE_DEVMAP = 0xe - BPF_MAP_TYPE_SOCKMAP = 0xf - BPF_MAP_TYPE_CPUMAP = 0x10 - BPF_MAP_TYPE_XSKMAP = 0x11 - BPF_MAP_TYPE_SOCKHASH = 0x12 - BPF_MAP_TYPE_CGROUP_STORAGE = 0x13 - BPF_MAP_TYPE_REUSEPORT_SOCKARRAY = 0x14 - BPF_MAP_TYPE_PERCPU_CGROUP_STORAGE = 0x15 - BPF_MAP_TYPE_QUEUE = 0x16 - BPF_MAP_TYPE_STACK = 0x17 - BPF_PROG_TYPE_UNSPEC = 0x0 - BPF_PROG_TYPE_SOCKET_FILTER = 0x1 - BPF_PROG_TYPE_KPROBE = 0x2 - BPF_PROG_TYPE_SCHED_CLS = 0x3 - BPF_PROG_TYPE_SCHED_ACT = 0x4 - BPF_PROG_TYPE_TRACEPOINT = 0x5 - BPF_PROG_TYPE_XDP = 0x6 - BPF_PROG_TYPE_PERF_EVENT = 0x7 - BPF_PROG_TYPE_CGROUP_SKB = 0x8 - BPF_PROG_TYPE_CGROUP_SOCK = 0x9 - BPF_PROG_TYPE_LWT_IN = 0xa - BPF_PROG_TYPE_LWT_OUT = 0xb - BPF_PROG_TYPE_LWT_XMIT = 0xc - BPF_PROG_TYPE_SOCK_OPS = 0xd - BPF_PROG_TYPE_SK_SKB = 0xe - BPF_PROG_TYPE_CGROUP_DEVICE = 0xf - BPF_PROG_TYPE_SK_MSG = 0x10 - BPF_PROG_TYPE_RAW_TRACEPOINT = 0x11 - BPF_PROG_TYPE_CGROUP_SOCK_ADDR = 0x12 - BPF_PROG_TYPE_LWT_SEG6LOCAL = 0x13 - BPF_PROG_TYPE_LIRC_MODE2 = 0x14 - BPF_PROG_TYPE_SK_REUSEPORT = 0x15 - BPF_PROG_TYPE_FLOW_DISSECTOR = 0x16 - BPF_CGROUP_INET_INGRESS = 0x0 - BPF_CGROUP_INET_EGRESS = 0x1 - BPF_CGROUP_INET_SOCK_CREATE = 0x2 - BPF_CGROUP_SOCK_OPS = 0x3 - BPF_SK_SKB_STREAM_PARSER = 0x4 - BPF_SK_SKB_STREAM_VERDICT = 0x5 - BPF_CGROUP_DEVICE = 0x6 - BPF_SK_MSG_VERDICT = 0x7 - BPF_CGROUP_INET4_BIND = 0x8 - BPF_CGROUP_INET6_BIND = 0x9 - BPF_CGROUP_INET4_CONNECT = 0xa - BPF_CGROUP_INET6_CONNECT = 0xb - BPF_CGROUP_INET4_POST_BIND = 0xc - BPF_CGROUP_INET6_POST_BIND = 0xd - BPF_CGROUP_UDP4_SENDMSG = 0xe - BPF_CGROUP_UDP6_SENDMSG = 0xf - BPF_LIRC_MODE2 = 0x10 - BPF_FLOW_DISSECTOR = 0x11 - BPF_STACK_BUILD_ID_EMPTY = 0x0 - BPF_STACK_BUILD_ID_VALID = 0x1 - BPF_STACK_BUILD_ID_IP = 0x2 - BPF_ADJ_ROOM_NET = 0x0 - BPF_HDR_START_MAC = 0x0 - BPF_HDR_START_NET = 0x1 - BPF_LWT_ENCAP_SEG6 = 0x0 - BPF_LWT_ENCAP_SEG6_INLINE = 0x1 - BPF_OK = 0x0 - BPF_DROP = 0x2 - BPF_REDIRECT = 0x7 - BPF_SOCK_OPS_VOID = 0x0 - BPF_SOCK_OPS_TIMEOUT_INIT = 0x1 - BPF_SOCK_OPS_RWND_INIT = 0x2 - BPF_SOCK_OPS_TCP_CONNECT_CB = 0x3 - BPF_SOCK_OPS_ACTIVE_ESTABLISHED_CB = 0x4 - BPF_SOCK_OPS_PASSIVE_ESTABLISHED_CB = 0x5 - BPF_SOCK_OPS_NEEDS_ECN = 0x6 - BPF_SOCK_OPS_BASE_RTT = 0x7 - BPF_SOCK_OPS_RTO_CB = 0x8 - BPF_SOCK_OPS_RETRANS_CB = 0x9 - BPF_SOCK_OPS_STATE_CB = 0xa - BPF_SOCK_OPS_TCP_LISTEN_CB = 0xb - BPF_TCP_ESTABLISHED = 0x1 - BPF_TCP_SYN_SENT = 0x2 - BPF_TCP_SYN_RECV = 0x3 - BPF_TCP_FIN_WAIT1 = 0x4 - BPF_TCP_FIN_WAIT2 = 0x5 - BPF_TCP_TIME_WAIT = 0x6 - BPF_TCP_CLOSE = 0x7 - BPF_TCP_CLOSE_WAIT = 0x8 - BPF_TCP_LAST_ACK = 0x9 - BPF_TCP_LISTEN = 0xa - BPF_TCP_CLOSING = 0xb - BPF_TCP_NEW_SYN_RECV = 0xc - BPF_TCP_MAX_STATES = 0xd - BPF_FIB_LKUP_RET_SUCCESS = 0x0 - BPF_FIB_LKUP_RET_BLACKHOLE = 0x1 - BPF_FIB_LKUP_RET_UNREACHABLE = 0x2 - BPF_FIB_LKUP_RET_PROHIBIT = 0x3 - BPF_FIB_LKUP_RET_NOT_FWDED = 0x4 - BPF_FIB_LKUP_RET_FWD_DISABLED = 0x5 - BPF_FIB_LKUP_RET_UNSUPP_LWT = 0x6 - BPF_FIB_LKUP_RET_NO_NEIGH = 0x7 - BPF_FIB_LKUP_RET_FRAG_NEEDED = 0x8 - BPF_FD_TYPE_RAW_TRACEPOINT = 0x0 - BPF_FD_TYPE_TRACEPOINT = 0x1 - BPF_FD_TYPE_KPROBE = 0x2 - BPF_FD_TYPE_KRETPROBE = 0x3 - BPF_FD_TYPE_UPROBE = 0x4 - BPF_FD_TYPE_URETPROBE = 0x5 -) diff --git a/vendor/golang.org/x/sys/unix/ztypes_linux_mipsle.go b/vendor/golang.org/x/sys/unix/ztypes_linux_mipsle.go index 865bf57dac..8c037caddd 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_linux_mipsle.go +++ b/vendor/golang.org/x/sys/unix/ztypes_linux_mipsle.go @@ -823,8 +823,6 @@ type Sigset_t struct { Val [32]uint32 } -const _C__NSIG = 0x80 - type SignalfdSiginfo struct { Signo uint32 Errno int32 @@ -1450,21 +1448,6 @@ type TpacketBlockDesc struct { Hdr [40]byte } -type TpacketBDTS struct { - Sec uint32 - Usec uint32 -} - -type TpacketHdrV1 struct { - Block_status uint32 - Num_pkts uint32 - Offset_to_first_pkt uint32 - Blk_len uint32 - Seq_num uint64 - Ts_first_pkt TpacketBDTS - Ts_last_pkt TpacketBDTS -} - type TpacketReq struct { Block_size uint32 Block_nr uint32 @@ -2312,155 +2295,3 @@ type CryptoReportKPP struct { type CryptoReportAcomp struct { Type [64]int8 } - -const ( - BPF_REG_0 = 0x0 - BPF_REG_1 = 0x1 - BPF_REG_2 = 0x2 - BPF_REG_3 = 0x3 - BPF_REG_4 = 0x4 - BPF_REG_5 = 0x5 - BPF_REG_6 = 0x6 - BPF_REG_7 = 0x7 - BPF_REG_8 = 0x8 - BPF_REG_9 = 0x9 - BPF_REG_10 = 0xa - BPF_MAP_CREATE = 0x0 - BPF_MAP_LOOKUP_ELEM = 0x1 - BPF_MAP_UPDATE_ELEM = 0x2 - BPF_MAP_DELETE_ELEM = 0x3 - BPF_MAP_GET_NEXT_KEY = 0x4 - BPF_PROG_LOAD = 0x5 - BPF_OBJ_PIN = 0x6 - BPF_OBJ_GET = 0x7 - BPF_PROG_ATTACH = 0x8 - BPF_PROG_DETACH = 0x9 - BPF_PROG_TEST_RUN = 0xa - BPF_PROG_GET_NEXT_ID = 0xb - BPF_MAP_GET_NEXT_ID = 0xc - BPF_PROG_GET_FD_BY_ID = 0xd - BPF_MAP_GET_FD_BY_ID = 0xe - BPF_OBJ_GET_INFO_BY_FD = 0xf - BPF_PROG_QUERY = 0x10 - BPF_RAW_TRACEPOINT_OPEN = 0x11 - BPF_BTF_LOAD = 0x12 - BPF_BTF_GET_FD_BY_ID = 0x13 - BPF_TASK_FD_QUERY = 0x14 - BPF_MAP_LOOKUP_AND_DELETE_ELEM = 0x15 - BPF_MAP_TYPE_UNSPEC = 0x0 - BPF_MAP_TYPE_HASH = 0x1 - BPF_MAP_TYPE_ARRAY = 0x2 - BPF_MAP_TYPE_PROG_ARRAY = 0x3 - BPF_MAP_TYPE_PERF_EVENT_ARRAY = 0x4 - BPF_MAP_TYPE_PERCPU_HASH = 0x5 - BPF_MAP_TYPE_PERCPU_ARRAY = 0x6 - BPF_MAP_TYPE_STACK_TRACE = 0x7 - BPF_MAP_TYPE_CGROUP_ARRAY = 0x8 - BPF_MAP_TYPE_LRU_HASH = 0x9 - BPF_MAP_TYPE_LRU_PERCPU_HASH = 0xa - BPF_MAP_TYPE_LPM_TRIE = 0xb - BPF_MAP_TYPE_ARRAY_OF_MAPS = 0xc - BPF_MAP_TYPE_HASH_OF_MAPS = 0xd - BPF_MAP_TYPE_DEVMAP = 0xe - BPF_MAP_TYPE_SOCKMAP = 0xf - BPF_MAP_TYPE_CPUMAP = 0x10 - BPF_MAP_TYPE_XSKMAP = 0x11 - BPF_MAP_TYPE_SOCKHASH = 0x12 - BPF_MAP_TYPE_CGROUP_STORAGE = 0x13 - BPF_MAP_TYPE_REUSEPORT_SOCKARRAY = 0x14 - BPF_MAP_TYPE_PERCPU_CGROUP_STORAGE = 0x15 - BPF_MAP_TYPE_QUEUE = 0x16 - BPF_MAP_TYPE_STACK = 0x17 - BPF_PROG_TYPE_UNSPEC = 0x0 - BPF_PROG_TYPE_SOCKET_FILTER = 0x1 - BPF_PROG_TYPE_KPROBE = 0x2 - BPF_PROG_TYPE_SCHED_CLS = 0x3 - BPF_PROG_TYPE_SCHED_ACT = 0x4 - BPF_PROG_TYPE_TRACEPOINT = 0x5 - BPF_PROG_TYPE_XDP = 0x6 - BPF_PROG_TYPE_PERF_EVENT = 0x7 - BPF_PROG_TYPE_CGROUP_SKB = 0x8 - BPF_PROG_TYPE_CGROUP_SOCK = 0x9 - BPF_PROG_TYPE_LWT_IN = 0xa - BPF_PROG_TYPE_LWT_OUT = 0xb - BPF_PROG_TYPE_LWT_XMIT = 0xc - BPF_PROG_TYPE_SOCK_OPS = 0xd - BPF_PROG_TYPE_SK_SKB = 0xe - BPF_PROG_TYPE_CGROUP_DEVICE = 0xf - BPF_PROG_TYPE_SK_MSG = 0x10 - BPF_PROG_TYPE_RAW_TRACEPOINT = 0x11 - BPF_PROG_TYPE_CGROUP_SOCK_ADDR = 0x12 - BPF_PROG_TYPE_LWT_SEG6LOCAL = 0x13 - BPF_PROG_TYPE_LIRC_MODE2 = 0x14 - BPF_PROG_TYPE_SK_REUSEPORT = 0x15 - BPF_PROG_TYPE_FLOW_DISSECTOR = 0x16 - BPF_CGROUP_INET_INGRESS = 0x0 - BPF_CGROUP_INET_EGRESS = 0x1 - BPF_CGROUP_INET_SOCK_CREATE = 0x2 - BPF_CGROUP_SOCK_OPS = 0x3 - BPF_SK_SKB_STREAM_PARSER = 0x4 - BPF_SK_SKB_STREAM_VERDICT = 0x5 - BPF_CGROUP_DEVICE = 0x6 - BPF_SK_MSG_VERDICT = 0x7 - BPF_CGROUP_INET4_BIND = 0x8 - BPF_CGROUP_INET6_BIND = 0x9 - BPF_CGROUP_INET4_CONNECT = 0xa - BPF_CGROUP_INET6_CONNECT = 0xb - BPF_CGROUP_INET4_POST_BIND = 0xc - BPF_CGROUP_INET6_POST_BIND = 0xd - BPF_CGROUP_UDP4_SENDMSG = 0xe - BPF_CGROUP_UDP6_SENDMSG = 0xf - BPF_LIRC_MODE2 = 0x10 - BPF_FLOW_DISSECTOR = 0x11 - BPF_STACK_BUILD_ID_EMPTY = 0x0 - BPF_STACK_BUILD_ID_VALID = 0x1 - BPF_STACK_BUILD_ID_IP = 0x2 - BPF_ADJ_ROOM_NET = 0x0 - BPF_HDR_START_MAC = 0x0 - BPF_HDR_START_NET = 0x1 - BPF_LWT_ENCAP_SEG6 = 0x0 - BPF_LWT_ENCAP_SEG6_INLINE = 0x1 - BPF_OK = 0x0 - BPF_DROP = 0x2 - BPF_REDIRECT = 0x7 - BPF_SOCK_OPS_VOID = 0x0 - BPF_SOCK_OPS_TIMEOUT_INIT = 0x1 - BPF_SOCK_OPS_RWND_INIT = 0x2 - BPF_SOCK_OPS_TCP_CONNECT_CB = 0x3 - BPF_SOCK_OPS_ACTIVE_ESTABLISHED_CB = 0x4 - BPF_SOCK_OPS_PASSIVE_ESTABLISHED_CB = 0x5 - BPF_SOCK_OPS_NEEDS_ECN = 0x6 - BPF_SOCK_OPS_BASE_RTT = 0x7 - BPF_SOCK_OPS_RTO_CB = 0x8 - BPF_SOCK_OPS_RETRANS_CB = 0x9 - BPF_SOCK_OPS_STATE_CB = 0xa - BPF_SOCK_OPS_TCP_LISTEN_CB = 0xb - BPF_TCP_ESTABLISHED = 0x1 - BPF_TCP_SYN_SENT = 0x2 - BPF_TCP_SYN_RECV = 0x3 - BPF_TCP_FIN_WAIT1 = 0x4 - BPF_TCP_FIN_WAIT2 = 0x5 - BPF_TCP_TIME_WAIT = 0x6 - BPF_TCP_CLOSE = 0x7 - BPF_TCP_CLOSE_WAIT = 0x8 - BPF_TCP_LAST_ACK = 0x9 - BPF_TCP_LISTEN = 0xa - BPF_TCP_CLOSING = 0xb - BPF_TCP_NEW_SYN_RECV = 0xc - BPF_TCP_MAX_STATES = 0xd - BPF_FIB_LKUP_RET_SUCCESS = 0x0 - BPF_FIB_LKUP_RET_BLACKHOLE = 0x1 - BPF_FIB_LKUP_RET_UNREACHABLE = 0x2 - BPF_FIB_LKUP_RET_PROHIBIT = 0x3 - BPF_FIB_LKUP_RET_NOT_FWDED = 0x4 - BPF_FIB_LKUP_RET_FWD_DISABLED = 0x5 - BPF_FIB_LKUP_RET_UNSUPP_LWT = 0x6 - BPF_FIB_LKUP_RET_NO_NEIGH = 0x7 - BPF_FIB_LKUP_RET_FRAG_NEEDED = 0x8 - BPF_FD_TYPE_RAW_TRACEPOINT = 0x0 - BPF_FD_TYPE_TRACEPOINT = 0x1 - BPF_FD_TYPE_KPROBE = 0x2 - BPF_FD_TYPE_KRETPROBE = 0x3 - BPF_FD_TYPE_UPROBE = 0x4 - BPF_FD_TYPE_URETPROBE = 0x5 -) diff --git a/vendor/golang.org/x/sys/unix/ztypes_linux_ppc64.go b/vendor/golang.org/x/sys/unix/ztypes_linux_ppc64.go index 2b68027d5c..97a2f8da24 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_linux_ppc64.go +++ b/vendor/golang.org/x/sys/unix/ztypes_linux_ppc64.go @@ -831,8 +831,6 @@ type Sigset_t struct { Val [16]uint64 } -const _C__NSIG = 0x41 - type SignalfdSiginfo struct { Signo uint32 Errno int32 @@ -1455,21 +1453,6 @@ type TpacketBlockDesc struct { Hdr [40]byte } -type TpacketBDTS struct { - Sec uint32 - Usec uint32 -} - -type TpacketHdrV1 struct { - Block_status uint32 - Num_pkts uint32 - Offset_to_first_pkt uint32 - Blk_len uint32 - Seq_num uint64 - Ts_first_pkt TpacketBDTS - Ts_last_pkt TpacketBDTS -} - type TpacketReq struct { Block_size uint32 Block_nr uint32 @@ -2317,155 +2300,3 @@ type CryptoReportKPP struct { type CryptoReportAcomp struct { Type [64]uint8 } - -const ( - BPF_REG_0 = 0x0 - BPF_REG_1 = 0x1 - BPF_REG_2 = 0x2 - BPF_REG_3 = 0x3 - BPF_REG_4 = 0x4 - BPF_REG_5 = 0x5 - BPF_REG_6 = 0x6 - BPF_REG_7 = 0x7 - BPF_REG_8 = 0x8 - BPF_REG_9 = 0x9 - BPF_REG_10 = 0xa - BPF_MAP_CREATE = 0x0 - BPF_MAP_LOOKUP_ELEM = 0x1 - BPF_MAP_UPDATE_ELEM = 0x2 - BPF_MAP_DELETE_ELEM = 0x3 - BPF_MAP_GET_NEXT_KEY = 0x4 - BPF_PROG_LOAD = 0x5 - BPF_OBJ_PIN = 0x6 - BPF_OBJ_GET = 0x7 - BPF_PROG_ATTACH = 0x8 - BPF_PROG_DETACH = 0x9 - BPF_PROG_TEST_RUN = 0xa - BPF_PROG_GET_NEXT_ID = 0xb - BPF_MAP_GET_NEXT_ID = 0xc - BPF_PROG_GET_FD_BY_ID = 0xd - BPF_MAP_GET_FD_BY_ID = 0xe - BPF_OBJ_GET_INFO_BY_FD = 0xf - BPF_PROG_QUERY = 0x10 - BPF_RAW_TRACEPOINT_OPEN = 0x11 - BPF_BTF_LOAD = 0x12 - BPF_BTF_GET_FD_BY_ID = 0x13 - BPF_TASK_FD_QUERY = 0x14 - BPF_MAP_LOOKUP_AND_DELETE_ELEM = 0x15 - BPF_MAP_TYPE_UNSPEC = 0x0 - BPF_MAP_TYPE_HASH = 0x1 - BPF_MAP_TYPE_ARRAY = 0x2 - BPF_MAP_TYPE_PROG_ARRAY = 0x3 - BPF_MAP_TYPE_PERF_EVENT_ARRAY = 0x4 - BPF_MAP_TYPE_PERCPU_HASH = 0x5 - BPF_MAP_TYPE_PERCPU_ARRAY = 0x6 - BPF_MAP_TYPE_STACK_TRACE = 0x7 - BPF_MAP_TYPE_CGROUP_ARRAY = 0x8 - BPF_MAP_TYPE_LRU_HASH = 0x9 - BPF_MAP_TYPE_LRU_PERCPU_HASH = 0xa - BPF_MAP_TYPE_LPM_TRIE = 0xb - BPF_MAP_TYPE_ARRAY_OF_MAPS = 0xc - BPF_MAP_TYPE_HASH_OF_MAPS = 0xd - BPF_MAP_TYPE_DEVMAP = 0xe - BPF_MAP_TYPE_SOCKMAP = 0xf - BPF_MAP_TYPE_CPUMAP = 0x10 - BPF_MAP_TYPE_XSKMAP = 0x11 - BPF_MAP_TYPE_SOCKHASH = 0x12 - BPF_MAP_TYPE_CGROUP_STORAGE = 0x13 - BPF_MAP_TYPE_REUSEPORT_SOCKARRAY = 0x14 - BPF_MAP_TYPE_PERCPU_CGROUP_STORAGE = 0x15 - BPF_MAP_TYPE_QUEUE = 0x16 - BPF_MAP_TYPE_STACK = 0x17 - BPF_PROG_TYPE_UNSPEC = 0x0 - BPF_PROG_TYPE_SOCKET_FILTER = 0x1 - BPF_PROG_TYPE_KPROBE = 0x2 - BPF_PROG_TYPE_SCHED_CLS = 0x3 - BPF_PROG_TYPE_SCHED_ACT = 0x4 - BPF_PROG_TYPE_TRACEPOINT = 0x5 - BPF_PROG_TYPE_XDP = 0x6 - BPF_PROG_TYPE_PERF_EVENT = 0x7 - BPF_PROG_TYPE_CGROUP_SKB = 0x8 - BPF_PROG_TYPE_CGROUP_SOCK = 0x9 - BPF_PROG_TYPE_LWT_IN = 0xa - BPF_PROG_TYPE_LWT_OUT = 0xb - BPF_PROG_TYPE_LWT_XMIT = 0xc - BPF_PROG_TYPE_SOCK_OPS = 0xd - BPF_PROG_TYPE_SK_SKB = 0xe - BPF_PROG_TYPE_CGROUP_DEVICE = 0xf - BPF_PROG_TYPE_SK_MSG = 0x10 - BPF_PROG_TYPE_RAW_TRACEPOINT = 0x11 - BPF_PROG_TYPE_CGROUP_SOCK_ADDR = 0x12 - BPF_PROG_TYPE_LWT_SEG6LOCAL = 0x13 - BPF_PROG_TYPE_LIRC_MODE2 = 0x14 - BPF_PROG_TYPE_SK_REUSEPORT = 0x15 - BPF_PROG_TYPE_FLOW_DISSECTOR = 0x16 - BPF_CGROUP_INET_INGRESS = 0x0 - BPF_CGROUP_INET_EGRESS = 0x1 - BPF_CGROUP_INET_SOCK_CREATE = 0x2 - BPF_CGROUP_SOCK_OPS = 0x3 - BPF_SK_SKB_STREAM_PARSER = 0x4 - BPF_SK_SKB_STREAM_VERDICT = 0x5 - BPF_CGROUP_DEVICE = 0x6 - BPF_SK_MSG_VERDICT = 0x7 - BPF_CGROUP_INET4_BIND = 0x8 - BPF_CGROUP_INET6_BIND = 0x9 - BPF_CGROUP_INET4_CONNECT = 0xa - BPF_CGROUP_INET6_CONNECT = 0xb - BPF_CGROUP_INET4_POST_BIND = 0xc - BPF_CGROUP_INET6_POST_BIND = 0xd - BPF_CGROUP_UDP4_SENDMSG = 0xe - BPF_CGROUP_UDP6_SENDMSG = 0xf - BPF_LIRC_MODE2 = 0x10 - BPF_FLOW_DISSECTOR = 0x11 - BPF_STACK_BUILD_ID_EMPTY = 0x0 - BPF_STACK_BUILD_ID_VALID = 0x1 - BPF_STACK_BUILD_ID_IP = 0x2 - BPF_ADJ_ROOM_NET = 0x0 - BPF_HDR_START_MAC = 0x0 - BPF_HDR_START_NET = 0x1 - BPF_LWT_ENCAP_SEG6 = 0x0 - BPF_LWT_ENCAP_SEG6_INLINE = 0x1 - BPF_OK = 0x0 - BPF_DROP = 0x2 - BPF_REDIRECT = 0x7 - BPF_SOCK_OPS_VOID = 0x0 - BPF_SOCK_OPS_TIMEOUT_INIT = 0x1 - BPF_SOCK_OPS_RWND_INIT = 0x2 - BPF_SOCK_OPS_TCP_CONNECT_CB = 0x3 - BPF_SOCK_OPS_ACTIVE_ESTABLISHED_CB = 0x4 - BPF_SOCK_OPS_PASSIVE_ESTABLISHED_CB = 0x5 - BPF_SOCK_OPS_NEEDS_ECN = 0x6 - BPF_SOCK_OPS_BASE_RTT = 0x7 - BPF_SOCK_OPS_RTO_CB = 0x8 - BPF_SOCK_OPS_RETRANS_CB = 0x9 - BPF_SOCK_OPS_STATE_CB = 0xa - BPF_SOCK_OPS_TCP_LISTEN_CB = 0xb - BPF_TCP_ESTABLISHED = 0x1 - BPF_TCP_SYN_SENT = 0x2 - BPF_TCP_SYN_RECV = 0x3 - BPF_TCP_FIN_WAIT1 = 0x4 - BPF_TCP_FIN_WAIT2 = 0x5 - BPF_TCP_TIME_WAIT = 0x6 - BPF_TCP_CLOSE = 0x7 - BPF_TCP_CLOSE_WAIT = 0x8 - BPF_TCP_LAST_ACK = 0x9 - BPF_TCP_LISTEN = 0xa - BPF_TCP_CLOSING = 0xb - BPF_TCP_NEW_SYN_RECV = 0xc - BPF_TCP_MAX_STATES = 0xd - BPF_FIB_LKUP_RET_SUCCESS = 0x0 - BPF_FIB_LKUP_RET_BLACKHOLE = 0x1 - BPF_FIB_LKUP_RET_UNREACHABLE = 0x2 - BPF_FIB_LKUP_RET_PROHIBIT = 0x3 - BPF_FIB_LKUP_RET_NOT_FWDED = 0x4 - BPF_FIB_LKUP_RET_FWD_DISABLED = 0x5 - BPF_FIB_LKUP_RET_UNSUPP_LWT = 0x6 - BPF_FIB_LKUP_RET_NO_NEIGH = 0x7 - BPF_FIB_LKUP_RET_FRAG_NEEDED = 0x8 - BPF_FD_TYPE_RAW_TRACEPOINT = 0x0 - BPF_FD_TYPE_TRACEPOINT = 0x1 - BPF_FD_TYPE_KPROBE = 0x2 - BPF_FD_TYPE_KRETPROBE = 0x3 - BPF_FD_TYPE_UPROBE = 0x4 - BPF_FD_TYPE_URETPROBE = 0x5 -) diff --git a/vendor/golang.org/x/sys/unix/ztypes_linux_ppc64le.go b/vendor/golang.org/x/sys/unix/ztypes_linux_ppc64le.go index 76cd7e643d..8c2cdd77d1 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_linux_ppc64le.go +++ b/vendor/golang.org/x/sys/unix/ztypes_linux_ppc64le.go @@ -831,8 +831,6 @@ type Sigset_t struct { Val [16]uint64 } -const _C__NSIG = 0x41 - type SignalfdSiginfo struct { Signo uint32 Errno int32 @@ -1455,21 +1453,6 @@ type TpacketBlockDesc struct { Hdr [40]byte } -type TpacketBDTS struct { - Sec uint32 - Usec uint32 -} - -type TpacketHdrV1 struct { - Block_status uint32 - Num_pkts uint32 - Offset_to_first_pkt uint32 - Blk_len uint32 - Seq_num uint64 - Ts_first_pkt TpacketBDTS - Ts_last_pkt TpacketBDTS -} - type TpacketReq struct { Block_size uint32 Block_nr uint32 @@ -2317,155 +2300,3 @@ type CryptoReportKPP struct { type CryptoReportAcomp struct { Type [64]uint8 } - -const ( - BPF_REG_0 = 0x0 - BPF_REG_1 = 0x1 - BPF_REG_2 = 0x2 - BPF_REG_3 = 0x3 - BPF_REG_4 = 0x4 - BPF_REG_5 = 0x5 - BPF_REG_6 = 0x6 - BPF_REG_7 = 0x7 - BPF_REG_8 = 0x8 - BPF_REG_9 = 0x9 - BPF_REG_10 = 0xa - BPF_MAP_CREATE = 0x0 - BPF_MAP_LOOKUP_ELEM = 0x1 - BPF_MAP_UPDATE_ELEM = 0x2 - BPF_MAP_DELETE_ELEM = 0x3 - BPF_MAP_GET_NEXT_KEY = 0x4 - BPF_PROG_LOAD = 0x5 - BPF_OBJ_PIN = 0x6 - BPF_OBJ_GET = 0x7 - BPF_PROG_ATTACH = 0x8 - BPF_PROG_DETACH = 0x9 - BPF_PROG_TEST_RUN = 0xa - BPF_PROG_GET_NEXT_ID = 0xb - BPF_MAP_GET_NEXT_ID = 0xc - BPF_PROG_GET_FD_BY_ID = 0xd - BPF_MAP_GET_FD_BY_ID = 0xe - BPF_OBJ_GET_INFO_BY_FD = 0xf - BPF_PROG_QUERY = 0x10 - BPF_RAW_TRACEPOINT_OPEN = 0x11 - BPF_BTF_LOAD = 0x12 - BPF_BTF_GET_FD_BY_ID = 0x13 - BPF_TASK_FD_QUERY = 0x14 - BPF_MAP_LOOKUP_AND_DELETE_ELEM = 0x15 - BPF_MAP_TYPE_UNSPEC = 0x0 - BPF_MAP_TYPE_HASH = 0x1 - BPF_MAP_TYPE_ARRAY = 0x2 - BPF_MAP_TYPE_PROG_ARRAY = 0x3 - BPF_MAP_TYPE_PERF_EVENT_ARRAY = 0x4 - BPF_MAP_TYPE_PERCPU_HASH = 0x5 - BPF_MAP_TYPE_PERCPU_ARRAY = 0x6 - BPF_MAP_TYPE_STACK_TRACE = 0x7 - BPF_MAP_TYPE_CGROUP_ARRAY = 0x8 - BPF_MAP_TYPE_LRU_HASH = 0x9 - BPF_MAP_TYPE_LRU_PERCPU_HASH = 0xa - BPF_MAP_TYPE_LPM_TRIE = 0xb - BPF_MAP_TYPE_ARRAY_OF_MAPS = 0xc - BPF_MAP_TYPE_HASH_OF_MAPS = 0xd - BPF_MAP_TYPE_DEVMAP = 0xe - BPF_MAP_TYPE_SOCKMAP = 0xf - BPF_MAP_TYPE_CPUMAP = 0x10 - BPF_MAP_TYPE_XSKMAP = 0x11 - BPF_MAP_TYPE_SOCKHASH = 0x12 - BPF_MAP_TYPE_CGROUP_STORAGE = 0x13 - BPF_MAP_TYPE_REUSEPORT_SOCKARRAY = 0x14 - BPF_MAP_TYPE_PERCPU_CGROUP_STORAGE = 0x15 - BPF_MAP_TYPE_QUEUE = 0x16 - BPF_MAP_TYPE_STACK = 0x17 - BPF_PROG_TYPE_UNSPEC = 0x0 - BPF_PROG_TYPE_SOCKET_FILTER = 0x1 - BPF_PROG_TYPE_KPROBE = 0x2 - BPF_PROG_TYPE_SCHED_CLS = 0x3 - BPF_PROG_TYPE_SCHED_ACT = 0x4 - BPF_PROG_TYPE_TRACEPOINT = 0x5 - BPF_PROG_TYPE_XDP = 0x6 - BPF_PROG_TYPE_PERF_EVENT = 0x7 - BPF_PROG_TYPE_CGROUP_SKB = 0x8 - BPF_PROG_TYPE_CGROUP_SOCK = 0x9 - BPF_PROG_TYPE_LWT_IN = 0xa - BPF_PROG_TYPE_LWT_OUT = 0xb - BPF_PROG_TYPE_LWT_XMIT = 0xc - BPF_PROG_TYPE_SOCK_OPS = 0xd - BPF_PROG_TYPE_SK_SKB = 0xe - BPF_PROG_TYPE_CGROUP_DEVICE = 0xf - BPF_PROG_TYPE_SK_MSG = 0x10 - BPF_PROG_TYPE_RAW_TRACEPOINT = 0x11 - BPF_PROG_TYPE_CGROUP_SOCK_ADDR = 0x12 - BPF_PROG_TYPE_LWT_SEG6LOCAL = 0x13 - BPF_PROG_TYPE_LIRC_MODE2 = 0x14 - BPF_PROG_TYPE_SK_REUSEPORT = 0x15 - BPF_PROG_TYPE_FLOW_DISSECTOR = 0x16 - BPF_CGROUP_INET_INGRESS = 0x0 - BPF_CGROUP_INET_EGRESS = 0x1 - BPF_CGROUP_INET_SOCK_CREATE = 0x2 - BPF_CGROUP_SOCK_OPS = 0x3 - BPF_SK_SKB_STREAM_PARSER = 0x4 - BPF_SK_SKB_STREAM_VERDICT = 0x5 - BPF_CGROUP_DEVICE = 0x6 - BPF_SK_MSG_VERDICT = 0x7 - BPF_CGROUP_INET4_BIND = 0x8 - BPF_CGROUP_INET6_BIND = 0x9 - BPF_CGROUP_INET4_CONNECT = 0xa - BPF_CGROUP_INET6_CONNECT = 0xb - BPF_CGROUP_INET4_POST_BIND = 0xc - BPF_CGROUP_INET6_POST_BIND = 0xd - BPF_CGROUP_UDP4_SENDMSG = 0xe - BPF_CGROUP_UDP6_SENDMSG = 0xf - BPF_LIRC_MODE2 = 0x10 - BPF_FLOW_DISSECTOR = 0x11 - BPF_STACK_BUILD_ID_EMPTY = 0x0 - BPF_STACK_BUILD_ID_VALID = 0x1 - BPF_STACK_BUILD_ID_IP = 0x2 - BPF_ADJ_ROOM_NET = 0x0 - BPF_HDR_START_MAC = 0x0 - BPF_HDR_START_NET = 0x1 - BPF_LWT_ENCAP_SEG6 = 0x0 - BPF_LWT_ENCAP_SEG6_INLINE = 0x1 - BPF_OK = 0x0 - BPF_DROP = 0x2 - BPF_REDIRECT = 0x7 - BPF_SOCK_OPS_VOID = 0x0 - BPF_SOCK_OPS_TIMEOUT_INIT = 0x1 - BPF_SOCK_OPS_RWND_INIT = 0x2 - BPF_SOCK_OPS_TCP_CONNECT_CB = 0x3 - BPF_SOCK_OPS_ACTIVE_ESTABLISHED_CB = 0x4 - BPF_SOCK_OPS_PASSIVE_ESTABLISHED_CB = 0x5 - BPF_SOCK_OPS_NEEDS_ECN = 0x6 - BPF_SOCK_OPS_BASE_RTT = 0x7 - BPF_SOCK_OPS_RTO_CB = 0x8 - BPF_SOCK_OPS_RETRANS_CB = 0x9 - BPF_SOCK_OPS_STATE_CB = 0xa - BPF_SOCK_OPS_TCP_LISTEN_CB = 0xb - BPF_TCP_ESTABLISHED = 0x1 - BPF_TCP_SYN_SENT = 0x2 - BPF_TCP_SYN_RECV = 0x3 - BPF_TCP_FIN_WAIT1 = 0x4 - BPF_TCP_FIN_WAIT2 = 0x5 - BPF_TCP_TIME_WAIT = 0x6 - BPF_TCP_CLOSE = 0x7 - BPF_TCP_CLOSE_WAIT = 0x8 - BPF_TCP_LAST_ACK = 0x9 - BPF_TCP_LISTEN = 0xa - BPF_TCP_CLOSING = 0xb - BPF_TCP_NEW_SYN_RECV = 0xc - BPF_TCP_MAX_STATES = 0xd - BPF_FIB_LKUP_RET_SUCCESS = 0x0 - BPF_FIB_LKUP_RET_BLACKHOLE = 0x1 - BPF_FIB_LKUP_RET_UNREACHABLE = 0x2 - BPF_FIB_LKUP_RET_PROHIBIT = 0x3 - BPF_FIB_LKUP_RET_NOT_FWDED = 0x4 - BPF_FIB_LKUP_RET_FWD_DISABLED = 0x5 - BPF_FIB_LKUP_RET_UNSUPP_LWT = 0x6 - BPF_FIB_LKUP_RET_NO_NEIGH = 0x7 - BPF_FIB_LKUP_RET_FRAG_NEEDED = 0x8 - BPF_FD_TYPE_RAW_TRACEPOINT = 0x0 - BPF_FD_TYPE_TRACEPOINT = 0x1 - BPF_FD_TYPE_KPROBE = 0x2 - BPF_FD_TYPE_KRETPROBE = 0x3 - BPF_FD_TYPE_UPROBE = 0x4 - BPF_FD_TYPE_URETPROBE = 0x5 -) diff --git a/vendor/golang.org/x/sys/unix/ztypes_linux_riscv64.go b/vendor/golang.org/x/sys/unix/ztypes_linux_riscv64.go index f99f061557..cd3a2d0b34 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_linux_riscv64.go +++ b/vendor/golang.org/x/sys/unix/ztypes_linux_riscv64.go @@ -848,8 +848,6 @@ type Sigset_t struct { Val [16]uint64 } -const _C__NSIG = 0x41 - type SignalfdSiginfo struct { Signo uint32 Errno int32 @@ -1472,21 +1470,6 @@ type TpacketBlockDesc struct { Hdr [40]byte } -type TpacketBDTS struct { - Sec uint32 - Usec uint32 -} - -type TpacketHdrV1 struct { - Block_status uint32 - Num_pkts uint32 - Offset_to_first_pkt uint32 - Blk_len uint32 - Seq_num uint64 - Ts_first_pkt TpacketBDTS - Ts_last_pkt TpacketBDTS -} - type TpacketReq struct { Block_size uint32 Block_nr uint32 @@ -2334,155 +2317,3 @@ type CryptoReportKPP struct { type CryptoReportAcomp struct { Type [64]uint8 } - -const ( - BPF_REG_0 = 0x0 - BPF_REG_1 = 0x1 - BPF_REG_2 = 0x2 - BPF_REG_3 = 0x3 - BPF_REG_4 = 0x4 - BPF_REG_5 = 0x5 - BPF_REG_6 = 0x6 - BPF_REG_7 = 0x7 - BPF_REG_8 = 0x8 - BPF_REG_9 = 0x9 - BPF_REG_10 = 0xa - BPF_MAP_CREATE = 0x0 - BPF_MAP_LOOKUP_ELEM = 0x1 - BPF_MAP_UPDATE_ELEM = 0x2 - BPF_MAP_DELETE_ELEM = 0x3 - BPF_MAP_GET_NEXT_KEY = 0x4 - BPF_PROG_LOAD = 0x5 - BPF_OBJ_PIN = 0x6 - BPF_OBJ_GET = 0x7 - BPF_PROG_ATTACH = 0x8 - BPF_PROG_DETACH = 0x9 - BPF_PROG_TEST_RUN = 0xa - BPF_PROG_GET_NEXT_ID = 0xb - BPF_MAP_GET_NEXT_ID = 0xc - BPF_PROG_GET_FD_BY_ID = 0xd - BPF_MAP_GET_FD_BY_ID = 0xe - BPF_OBJ_GET_INFO_BY_FD = 0xf - BPF_PROG_QUERY = 0x10 - BPF_RAW_TRACEPOINT_OPEN = 0x11 - BPF_BTF_LOAD = 0x12 - BPF_BTF_GET_FD_BY_ID = 0x13 - BPF_TASK_FD_QUERY = 0x14 - BPF_MAP_LOOKUP_AND_DELETE_ELEM = 0x15 - BPF_MAP_TYPE_UNSPEC = 0x0 - BPF_MAP_TYPE_HASH = 0x1 - BPF_MAP_TYPE_ARRAY = 0x2 - BPF_MAP_TYPE_PROG_ARRAY = 0x3 - BPF_MAP_TYPE_PERF_EVENT_ARRAY = 0x4 - BPF_MAP_TYPE_PERCPU_HASH = 0x5 - BPF_MAP_TYPE_PERCPU_ARRAY = 0x6 - BPF_MAP_TYPE_STACK_TRACE = 0x7 - BPF_MAP_TYPE_CGROUP_ARRAY = 0x8 - BPF_MAP_TYPE_LRU_HASH = 0x9 - BPF_MAP_TYPE_LRU_PERCPU_HASH = 0xa - BPF_MAP_TYPE_LPM_TRIE = 0xb - BPF_MAP_TYPE_ARRAY_OF_MAPS = 0xc - BPF_MAP_TYPE_HASH_OF_MAPS = 0xd - BPF_MAP_TYPE_DEVMAP = 0xe - BPF_MAP_TYPE_SOCKMAP = 0xf - BPF_MAP_TYPE_CPUMAP = 0x10 - BPF_MAP_TYPE_XSKMAP = 0x11 - BPF_MAP_TYPE_SOCKHASH = 0x12 - BPF_MAP_TYPE_CGROUP_STORAGE = 0x13 - BPF_MAP_TYPE_REUSEPORT_SOCKARRAY = 0x14 - BPF_MAP_TYPE_PERCPU_CGROUP_STORAGE = 0x15 - BPF_MAP_TYPE_QUEUE = 0x16 - BPF_MAP_TYPE_STACK = 0x17 - BPF_PROG_TYPE_UNSPEC = 0x0 - BPF_PROG_TYPE_SOCKET_FILTER = 0x1 - BPF_PROG_TYPE_KPROBE = 0x2 - BPF_PROG_TYPE_SCHED_CLS = 0x3 - BPF_PROG_TYPE_SCHED_ACT = 0x4 - BPF_PROG_TYPE_TRACEPOINT = 0x5 - BPF_PROG_TYPE_XDP = 0x6 - BPF_PROG_TYPE_PERF_EVENT = 0x7 - BPF_PROG_TYPE_CGROUP_SKB = 0x8 - BPF_PROG_TYPE_CGROUP_SOCK = 0x9 - BPF_PROG_TYPE_LWT_IN = 0xa - BPF_PROG_TYPE_LWT_OUT = 0xb - BPF_PROG_TYPE_LWT_XMIT = 0xc - BPF_PROG_TYPE_SOCK_OPS = 0xd - BPF_PROG_TYPE_SK_SKB = 0xe - BPF_PROG_TYPE_CGROUP_DEVICE = 0xf - BPF_PROG_TYPE_SK_MSG = 0x10 - BPF_PROG_TYPE_RAW_TRACEPOINT = 0x11 - BPF_PROG_TYPE_CGROUP_SOCK_ADDR = 0x12 - BPF_PROG_TYPE_LWT_SEG6LOCAL = 0x13 - BPF_PROG_TYPE_LIRC_MODE2 = 0x14 - BPF_PROG_TYPE_SK_REUSEPORT = 0x15 - BPF_PROG_TYPE_FLOW_DISSECTOR = 0x16 - BPF_CGROUP_INET_INGRESS = 0x0 - BPF_CGROUP_INET_EGRESS = 0x1 - BPF_CGROUP_INET_SOCK_CREATE = 0x2 - BPF_CGROUP_SOCK_OPS = 0x3 - BPF_SK_SKB_STREAM_PARSER = 0x4 - BPF_SK_SKB_STREAM_VERDICT = 0x5 - BPF_CGROUP_DEVICE = 0x6 - BPF_SK_MSG_VERDICT = 0x7 - BPF_CGROUP_INET4_BIND = 0x8 - BPF_CGROUP_INET6_BIND = 0x9 - BPF_CGROUP_INET4_CONNECT = 0xa - BPF_CGROUP_INET6_CONNECT = 0xb - BPF_CGROUP_INET4_POST_BIND = 0xc - BPF_CGROUP_INET6_POST_BIND = 0xd - BPF_CGROUP_UDP4_SENDMSG = 0xe - BPF_CGROUP_UDP6_SENDMSG = 0xf - BPF_LIRC_MODE2 = 0x10 - BPF_FLOW_DISSECTOR = 0x11 - BPF_STACK_BUILD_ID_EMPTY = 0x0 - BPF_STACK_BUILD_ID_VALID = 0x1 - BPF_STACK_BUILD_ID_IP = 0x2 - BPF_ADJ_ROOM_NET = 0x0 - BPF_HDR_START_MAC = 0x0 - BPF_HDR_START_NET = 0x1 - BPF_LWT_ENCAP_SEG6 = 0x0 - BPF_LWT_ENCAP_SEG6_INLINE = 0x1 - BPF_OK = 0x0 - BPF_DROP = 0x2 - BPF_REDIRECT = 0x7 - BPF_SOCK_OPS_VOID = 0x0 - BPF_SOCK_OPS_TIMEOUT_INIT = 0x1 - BPF_SOCK_OPS_RWND_INIT = 0x2 - BPF_SOCK_OPS_TCP_CONNECT_CB = 0x3 - BPF_SOCK_OPS_ACTIVE_ESTABLISHED_CB = 0x4 - BPF_SOCK_OPS_PASSIVE_ESTABLISHED_CB = 0x5 - BPF_SOCK_OPS_NEEDS_ECN = 0x6 - BPF_SOCK_OPS_BASE_RTT = 0x7 - BPF_SOCK_OPS_RTO_CB = 0x8 - BPF_SOCK_OPS_RETRANS_CB = 0x9 - BPF_SOCK_OPS_STATE_CB = 0xa - BPF_SOCK_OPS_TCP_LISTEN_CB = 0xb - BPF_TCP_ESTABLISHED = 0x1 - BPF_TCP_SYN_SENT = 0x2 - BPF_TCP_SYN_RECV = 0x3 - BPF_TCP_FIN_WAIT1 = 0x4 - BPF_TCP_FIN_WAIT2 = 0x5 - BPF_TCP_TIME_WAIT = 0x6 - BPF_TCP_CLOSE = 0x7 - BPF_TCP_CLOSE_WAIT = 0x8 - BPF_TCP_LAST_ACK = 0x9 - BPF_TCP_LISTEN = 0xa - BPF_TCP_CLOSING = 0xb - BPF_TCP_NEW_SYN_RECV = 0xc - BPF_TCP_MAX_STATES = 0xd - BPF_FIB_LKUP_RET_SUCCESS = 0x0 - BPF_FIB_LKUP_RET_BLACKHOLE = 0x1 - BPF_FIB_LKUP_RET_UNREACHABLE = 0x2 - BPF_FIB_LKUP_RET_PROHIBIT = 0x3 - BPF_FIB_LKUP_RET_NOT_FWDED = 0x4 - BPF_FIB_LKUP_RET_FWD_DISABLED = 0x5 - BPF_FIB_LKUP_RET_UNSUPP_LWT = 0x6 - BPF_FIB_LKUP_RET_NO_NEIGH = 0x7 - BPF_FIB_LKUP_RET_FRAG_NEEDED = 0x8 - BPF_FD_TYPE_RAW_TRACEPOINT = 0x0 - BPF_FD_TYPE_TRACEPOINT = 0x1 - BPF_FD_TYPE_KPROBE = 0x2 - BPF_FD_TYPE_KRETPROBE = 0x3 - BPF_FD_TYPE_UPROBE = 0x4 - BPF_FD_TYPE_URETPROBE = 0x5 -) diff --git a/vendor/golang.org/x/sys/unix/ztypes_linux_s390x.go b/vendor/golang.org/x/sys/unix/ztypes_linux_s390x.go index d9d03ae49a..0d459c7f8d 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_linux_s390x.go +++ b/vendor/golang.org/x/sys/unix/ztypes_linux_s390x.go @@ -844,8 +844,6 @@ type Sigset_t struct { Val [16]uint64 } -const _C__NSIG = 0x41 - type SignalfdSiginfo struct { Signo uint32 Errno int32 @@ -1469,21 +1467,6 @@ type TpacketBlockDesc struct { Hdr [40]byte } -type TpacketBDTS struct { - Sec uint32 - Usec uint32 -} - -type TpacketHdrV1 struct { - Block_status uint32 - Num_pkts uint32 - Offset_to_first_pkt uint32 - Blk_len uint32 - Seq_num uint64 - Ts_first_pkt TpacketBDTS - Ts_last_pkt TpacketBDTS -} - type TpacketReq struct { Block_size uint32 Block_nr uint32 @@ -2331,155 +2314,3 @@ type CryptoReportKPP struct { type CryptoReportAcomp struct { Type [64]int8 } - -const ( - BPF_REG_0 = 0x0 - BPF_REG_1 = 0x1 - BPF_REG_2 = 0x2 - BPF_REG_3 = 0x3 - BPF_REG_4 = 0x4 - BPF_REG_5 = 0x5 - BPF_REG_6 = 0x6 - BPF_REG_7 = 0x7 - BPF_REG_8 = 0x8 - BPF_REG_9 = 0x9 - BPF_REG_10 = 0xa - BPF_MAP_CREATE = 0x0 - BPF_MAP_LOOKUP_ELEM = 0x1 - BPF_MAP_UPDATE_ELEM = 0x2 - BPF_MAP_DELETE_ELEM = 0x3 - BPF_MAP_GET_NEXT_KEY = 0x4 - BPF_PROG_LOAD = 0x5 - BPF_OBJ_PIN = 0x6 - BPF_OBJ_GET = 0x7 - BPF_PROG_ATTACH = 0x8 - BPF_PROG_DETACH = 0x9 - BPF_PROG_TEST_RUN = 0xa - BPF_PROG_GET_NEXT_ID = 0xb - BPF_MAP_GET_NEXT_ID = 0xc - BPF_PROG_GET_FD_BY_ID = 0xd - BPF_MAP_GET_FD_BY_ID = 0xe - BPF_OBJ_GET_INFO_BY_FD = 0xf - BPF_PROG_QUERY = 0x10 - BPF_RAW_TRACEPOINT_OPEN = 0x11 - BPF_BTF_LOAD = 0x12 - BPF_BTF_GET_FD_BY_ID = 0x13 - BPF_TASK_FD_QUERY = 0x14 - BPF_MAP_LOOKUP_AND_DELETE_ELEM = 0x15 - BPF_MAP_TYPE_UNSPEC = 0x0 - BPF_MAP_TYPE_HASH = 0x1 - BPF_MAP_TYPE_ARRAY = 0x2 - BPF_MAP_TYPE_PROG_ARRAY = 0x3 - BPF_MAP_TYPE_PERF_EVENT_ARRAY = 0x4 - BPF_MAP_TYPE_PERCPU_HASH = 0x5 - BPF_MAP_TYPE_PERCPU_ARRAY = 0x6 - BPF_MAP_TYPE_STACK_TRACE = 0x7 - BPF_MAP_TYPE_CGROUP_ARRAY = 0x8 - BPF_MAP_TYPE_LRU_HASH = 0x9 - BPF_MAP_TYPE_LRU_PERCPU_HASH = 0xa - BPF_MAP_TYPE_LPM_TRIE = 0xb - BPF_MAP_TYPE_ARRAY_OF_MAPS = 0xc - BPF_MAP_TYPE_HASH_OF_MAPS = 0xd - BPF_MAP_TYPE_DEVMAP = 0xe - BPF_MAP_TYPE_SOCKMAP = 0xf - BPF_MAP_TYPE_CPUMAP = 0x10 - BPF_MAP_TYPE_XSKMAP = 0x11 - BPF_MAP_TYPE_SOCKHASH = 0x12 - BPF_MAP_TYPE_CGROUP_STORAGE = 0x13 - BPF_MAP_TYPE_REUSEPORT_SOCKARRAY = 0x14 - BPF_MAP_TYPE_PERCPU_CGROUP_STORAGE = 0x15 - BPF_MAP_TYPE_QUEUE = 0x16 - BPF_MAP_TYPE_STACK = 0x17 - BPF_PROG_TYPE_UNSPEC = 0x0 - BPF_PROG_TYPE_SOCKET_FILTER = 0x1 - BPF_PROG_TYPE_KPROBE = 0x2 - BPF_PROG_TYPE_SCHED_CLS = 0x3 - BPF_PROG_TYPE_SCHED_ACT = 0x4 - BPF_PROG_TYPE_TRACEPOINT = 0x5 - BPF_PROG_TYPE_XDP = 0x6 - BPF_PROG_TYPE_PERF_EVENT = 0x7 - BPF_PROG_TYPE_CGROUP_SKB = 0x8 - BPF_PROG_TYPE_CGROUP_SOCK = 0x9 - BPF_PROG_TYPE_LWT_IN = 0xa - BPF_PROG_TYPE_LWT_OUT = 0xb - BPF_PROG_TYPE_LWT_XMIT = 0xc - BPF_PROG_TYPE_SOCK_OPS = 0xd - BPF_PROG_TYPE_SK_SKB = 0xe - BPF_PROG_TYPE_CGROUP_DEVICE = 0xf - BPF_PROG_TYPE_SK_MSG = 0x10 - BPF_PROG_TYPE_RAW_TRACEPOINT = 0x11 - BPF_PROG_TYPE_CGROUP_SOCK_ADDR = 0x12 - BPF_PROG_TYPE_LWT_SEG6LOCAL = 0x13 - BPF_PROG_TYPE_LIRC_MODE2 = 0x14 - BPF_PROG_TYPE_SK_REUSEPORT = 0x15 - BPF_PROG_TYPE_FLOW_DISSECTOR = 0x16 - BPF_CGROUP_INET_INGRESS = 0x0 - BPF_CGROUP_INET_EGRESS = 0x1 - BPF_CGROUP_INET_SOCK_CREATE = 0x2 - BPF_CGROUP_SOCK_OPS = 0x3 - BPF_SK_SKB_STREAM_PARSER = 0x4 - BPF_SK_SKB_STREAM_VERDICT = 0x5 - BPF_CGROUP_DEVICE = 0x6 - BPF_SK_MSG_VERDICT = 0x7 - BPF_CGROUP_INET4_BIND = 0x8 - BPF_CGROUP_INET6_BIND = 0x9 - BPF_CGROUP_INET4_CONNECT = 0xa - BPF_CGROUP_INET6_CONNECT = 0xb - BPF_CGROUP_INET4_POST_BIND = 0xc - BPF_CGROUP_INET6_POST_BIND = 0xd - BPF_CGROUP_UDP4_SENDMSG = 0xe - BPF_CGROUP_UDP6_SENDMSG = 0xf - BPF_LIRC_MODE2 = 0x10 - BPF_FLOW_DISSECTOR = 0x11 - BPF_STACK_BUILD_ID_EMPTY = 0x0 - BPF_STACK_BUILD_ID_VALID = 0x1 - BPF_STACK_BUILD_ID_IP = 0x2 - BPF_ADJ_ROOM_NET = 0x0 - BPF_HDR_START_MAC = 0x0 - BPF_HDR_START_NET = 0x1 - BPF_LWT_ENCAP_SEG6 = 0x0 - BPF_LWT_ENCAP_SEG6_INLINE = 0x1 - BPF_OK = 0x0 - BPF_DROP = 0x2 - BPF_REDIRECT = 0x7 - BPF_SOCK_OPS_VOID = 0x0 - BPF_SOCK_OPS_TIMEOUT_INIT = 0x1 - BPF_SOCK_OPS_RWND_INIT = 0x2 - BPF_SOCK_OPS_TCP_CONNECT_CB = 0x3 - BPF_SOCK_OPS_ACTIVE_ESTABLISHED_CB = 0x4 - BPF_SOCK_OPS_PASSIVE_ESTABLISHED_CB = 0x5 - BPF_SOCK_OPS_NEEDS_ECN = 0x6 - BPF_SOCK_OPS_BASE_RTT = 0x7 - BPF_SOCK_OPS_RTO_CB = 0x8 - BPF_SOCK_OPS_RETRANS_CB = 0x9 - BPF_SOCK_OPS_STATE_CB = 0xa - BPF_SOCK_OPS_TCP_LISTEN_CB = 0xb - BPF_TCP_ESTABLISHED = 0x1 - BPF_TCP_SYN_SENT = 0x2 - BPF_TCP_SYN_RECV = 0x3 - BPF_TCP_FIN_WAIT1 = 0x4 - BPF_TCP_FIN_WAIT2 = 0x5 - BPF_TCP_TIME_WAIT = 0x6 - BPF_TCP_CLOSE = 0x7 - BPF_TCP_CLOSE_WAIT = 0x8 - BPF_TCP_LAST_ACK = 0x9 - BPF_TCP_LISTEN = 0xa - BPF_TCP_CLOSING = 0xb - BPF_TCP_NEW_SYN_RECV = 0xc - BPF_TCP_MAX_STATES = 0xd - BPF_FIB_LKUP_RET_SUCCESS = 0x0 - BPF_FIB_LKUP_RET_BLACKHOLE = 0x1 - BPF_FIB_LKUP_RET_UNREACHABLE = 0x2 - BPF_FIB_LKUP_RET_PROHIBIT = 0x3 - BPF_FIB_LKUP_RET_NOT_FWDED = 0x4 - BPF_FIB_LKUP_RET_FWD_DISABLED = 0x5 - BPF_FIB_LKUP_RET_UNSUPP_LWT = 0x6 - BPF_FIB_LKUP_RET_NO_NEIGH = 0x7 - BPF_FIB_LKUP_RET_FRAG_NEEDED = 0x8 - BPF_FD_TYPE_RAW_TRACEPOINT = 0x0 - BPF_FD_TYPE_TRACEPOINT = 0x1 - BPF_FD_TYPE_KPROBE = 0x2 - BPF_FD_TYPE_KRETPROBE = 0x3 - BPF_FD_TYPE_UPROBE = 0x4 - BPF_FD_TYPE_URETPROBE = 0x5 -) diff --git a/vendor/golang.org/x/sys/unix/ztypes_linux_sparc64.go b/vendor/golang.org/x/sys/unix/ztypes_linux_sparc64.go index b247fe94ba..71e3b40021 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_linux_sparc64.go +++ b/vendor/golang.org/x/sys/unix/ztypes_linux_sparc64.go @@ -826,8 +826,6 @@ type Sigset_t struct { Val [16]uint64 } -const _C__NSIG = 0x41 - type SignalfdSiginfo struct { Signo uint32 Errno int32 @@ -1450,21 +1448,6 @@ type TpacketBlockDesc struct { Hdr [40]byte } -type TpacketBDTS struct { - Sec uint32 - Usec uint32 -} - -type TpacketHdrV1 struct { - Block_status uint32 - Num_pkts uint32 - Offset_to_first_pkt uint32 - Blk_len uint32 - Seq_num uint64 - Ts_first_pkt TpacketBDTS - Ts_last_pkt TpacketBDTS -} - type TpacketReq struct { Block_size uint32 Block_nr uint32 @@ -2312,155 +2295,3 @@ type CryptoReportKPP struct { type CryptoReportAcomp struct { Type [64]int8 } - -const ( - BPF_REG_0 = 0x0 - BPF_REG_1 = 0x1 - BPF_REG_2 = 0x2 - BPF_REG_3 = 0x3 - BPF_REG_4 = 0x4 - BPF_REG_5 = 0x5 - BPF_REG_6 = 0x6 - BPF_REG_7 = 0x7 - BPF_REG_8 = 0x8 - BPF_REG_9 = 0x9 - BPF_REG_10 = 0xa - BPF_MAP_CREATE = 0x0 - BPF_MAP_LOOKUP_ELEM = 0x1 - BPF_MAP_UPDATE_ELEM = 0x2 - BPF_MAP_DELETE_ELEM = 0x3 - BPF_MAP_GET_NEXT_KEY = 0x4 - BPF_PROG_LOAD = 0x5 - BPF_OBJ_PIN = 0x6 - BPF_OBJ_GET = 0x7 - BPF_PROG_ATTACH = 0x8 - BPF_PROG_DETACH = 0x9 - BPF_PROG_TEST_RUN = 0xa - BPF_PROG_GET_NEXT_ID = 0xb - BPF_MAP_GET_NEXT_ID = 0xc - BPF_PROG_GET_FD_BY_ID = 0xd - BPF_MAP_GET_FD_BY_ID = 0xe - BPF_OBJ_GET_INFO_BY_FD = 0xf - BPF_PROG_QUERY = 0x10 - BPF_RAW_TRACEPOINT_OPEN = 0x11 - BPF_BTF_LOAD = 0x12 - BPF_BTF_GET_FD_BY_ID = 0x13 - BPF_TASK_FD_QUERY = 0x14 - BPF_MAP_LOOKUP_AND_DELETE_ELEM = 0x15 - BPF_MAP_TYPE_UNSPEC = 0x0 - BPF_MAP_TYPE_HASH = 0x1 - BPF_MAP_TYPE_ARRAY = 0x2 - BPF_MAP_TYPE_PROG_ARRAY = 0x3 - BPF_MAP_TYPE_PERF_EVENT_ARRAY = 0x4 - BPF_MAP_TYPE_PERCPU_HASH = 0x5 - BPF_MAP_TYPE_PERCPU_ARRAY = 0x6 - BPF_MAP_TYPE_STACK_TRACE = 0x7 - BPF_MAP_TYPE_CGROUP_ARRAY = 0x8 - BPF_MAP_TYPE_LRU_HASH = 0x9 - BPF_MAP_TYPE_LRU_PERCPU_HASH = 0xa - BPF_MAP_TYPE_LPM_TRIE = 0xb - BPF_MAP_TYPE_ARRAY_OF_MAPS = 0xc - BPF_MAP_TYPE_HASH_OF_MAPS = 0xd - BPF_MAP_TYPE_DEVMAP = 0xe - BPF_MAP_TYPE_SOCKMAP = 0xf - BPF_MAP_TYPE_CPUMAP = 0x10 - BPF_MAP_TYPE_XSKMAP = 0x11 - BPF_MAP_TYPE_SOCKHASH = 0x12 - BPF_MAP_TYPE_CGROUP_STORAGE = 0x13 - BPF_MAP_TYPE_REUSEPORT_SOCKARRAY = 0x14 - BPF_MAP_TYPE_PERCPU_CGROUP_STORAGE = 0x15 - BPF_MAP_TYPE_QUEUE = 0x16 - BPF_MAP_TYPE_STACK = 0x17 - BPF_PROG_TYPE_UNSPEC = 0x0 - BPF_PROG_TYPE_SOCKET_FILTER = 0x1 - BPF_PROG_TYPE_KPROBE = 0x2 - BPF_PROG_TYPE_SCHED_CLS = 0x3 - BPF_PROG_TYPE_SCHED_ACT = 0x4 - BPF_PROG_TYPE_TRACEPOINT = 0x5 - BPF_PROG_TYPE_XDP = 0x6 - BPF_PROG_TYPE_PERF_EVENT = 0x7 - BPF_PROG_TYPE_CGROUP_SKB = 0x8 - BPF_PROG_TYPE_CGROUP_SOCK = 0x9 - BPF_PROG_TYPE_LWT_IN = 0xa - BPF_PROG_TYPE_LWT_OUT = 0xb - BPF_PROG_TYPE_LWT_XMIT = 0xc - BPF_PROG_TYPE_SOCK_OPS = 0xd - BPF_PROG_TYPE_SK_SKB = 0xe - BPF_PROG_TYPE_CGROUP_DEVICE = 0xf - BPF_PROG_TYPE_SK_MSG = 0x10 - BPF_PROG_TYPE_RAW_TRACEPOINT = 0x11 - BPF_PROG_TYPE_CGROUP_SOCK_ADDR = 0x12 - BPF_PROG_TYPE_LWT_SEG6LOCAL = 0x13 - BPF_PROG_TYPE_LIRC_MODE2 = 0x14 - BPF_PROG_TYPE_SK_REUSEPORT = 0x15 - BPF_PROG_TYPE_FLOW_DISSECTOR = 0x16 - BPF_CGROUP_INET_INGRESS = 0x0 - BPF_CGROUP_INET_EGRESS = 0x1 - BPF_CGROUP_INET_SOCK_CREATE = 0x2 - BPF_CGROUP_SOCK_OPS = 0x3 - BPF_SK_SKB_STREAM_PARSER = 0x4 - BPF_SK_SKB_STREAM_VERDICT = 0x5 - BPF_CGROUP_DEVICE = 0x6 - BPF_SK_MSG_VERDICT = 0x7 - BPF_CGROUP_INET4_BIND = 0x8 - BPF_CGROUP_INET6_BIND = 0x9 - BPF_CGROUP_INET4_CONNECT = 0xa - BPF_CGROUP_INET6_CONNECT = 0xb - BPF_CGROUP_INET4_POST_BIND = 0xc - BPF_CGROUP_INET6_POST_BIND = 0xd - BPF_CGROUP_UDP4_SENDMSG = 0xe - BPF_CGROUP_UDP6_SENDMSG = 0xf - BPF_LIRC_MODE2 = 0x10 - BPF_FLOW_DISSECTOR = 0x11 - BPF_STACK_BUILD_ID_EMPTY = 0x0 - BPF_STACK_BUILD_ID_VALID = 0x1 - BPF_STACK_BUILD_ID_IP = 0x2 - BPF_ADJ_ROOM_NET = 0x0 - BPF_HDR_START_MAC = 0x0 - BPF_HDR_START_NET = 0x1 - BPF_LWT_ENCAP_SEG6 = 0x0 - BPF_LWT_ENCAP_SEG6_INLINE = 0x1 - BPF_OK = 0x0 - BPF_DROP = 0x2 - BPF_REDIRECT = 0x7 - BPF_SOCK_OPS_VOID = 0x0 - BPF_SOCK_OPS_TIMEOUT_INIT = 0x1 - BPF_SOCK_OPS_RWND_INIT = 0x2 - BPF_SOCK_OPS_TCP_CONNECT_CB = 0x3 - BPF_SOCK_OPS_ACTIVE_ESTABLISHED_CB = 0x4 - BPF_SOCK_OPS_PASSIVE_ESTABLISHED_CB = 0x5 - BPF_SOCK_OPS_NEEDS_ECN = 0x6 - BPF_SOCK_OPS_BASE_RTT = 0x7 - BPF_SOCK_OPS_RTO_CB = 0x8 - BPF_SOCK_OPS_RETRANS_CB = 0x9 - BPF_SOCK_OPS_STATE_CB = 0xa - BPF_SOCK_OPS_TCP_LISTEN_CB = 0xb - BPF_TCP_ESTABLISHED = 0x1 - BPF_TCP_SYN_SENT = 0x2 - BPF_TCP_SYN_RECV = 0x3 - BPF_TCP_FIN_WAIT1 = 0x4 - BPF_TCP_FIN_WAIT2 = 0x5 - BPF_TCP_TIME_WAIT = 0x6 - BPF_TCP_CLOSE = 0x7 - BPF_TCP_CLOSE_WAIT = 0x8 - BPF_TCP_LAST_ACK = 0x9 - BPF_TCP_LISTEN = 0xa - BPF_TCP_CLOSING = 0xb - BPF_TCP_NEW_SYN_RECV = 0xc - BPF_TCP_MAX_STATES = 0xd - BPF_FIB_LKUP_RET_SUCCESS = 0x0 - BPF_FIB_LKUP_RET_BLACKHOLE = 0x1 - BPF_FIB_LKUP_RET_UNREACHABLE = 0x2 - BPF_FIB_LKUP_RET_PROHIBIT = 0x3 - BPF_FIB_LKUP_RET_NOT_FWDED = 0x4 - BPF_FIB_LKUP_RET_FWD_DISABLED = 0x5 - BPF_FIB_LKUP_RET_UNSUPP_LWT = 0x6 - BPF_FIB_LKUP_RET_NO_NEIGH = 0x7 - BPF_FIB_LKUP_RET_FRAG_NEEDED = 0x8 - BPF_FD_TYPE_RAW_TRACEPOINT = 0x0 - BPF_FD_TYPE_TRACEPOINT = 0x1 - BPF_FD_TYPE_KPROBE = 0x2 - BPF_FD_TYPE_KRETPROBE = 0x3 - BPF_FD_TYPE_UPROBE = 0x4 - BPF_FD_TYPE_URETPROBE = 0x5 -) diff --git a/vendor/golang.org/x/sys/unix/ztypes_netbsd_386.go b/vendor/golang.org/x/sys/unix/ztypes_netbsd_386.go index a2268b4f62..2dae0c17a3 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_netbsd_386.go +++ b/vendor/golang.org/x/sys/unix/ztypes_netbsd_386.go @@ -57,23 +57,23 @@ type Rlimit struct { type _Gid_t uint32 type Stat_t struct { - Dev uint64 - Mode uint32 - Ino uint64 - Nlink uint32 - Uid uint32 - Gid uint32 - Rdev uint64 - Atim Timespec - Mtim Timespec - Ctim Timespec - Btim Timespec - Size int64 - Blocks int64 - Blksize uint32 - Flags uint32 - Gen uint32 - Spare [2]uint32 + Dev uint64 + Mode uint32 + Ino uint64 + Nlink uint32 + Uid uint32 + Gid uint32 + Rdev uint64 + Atimespec Timespec + Mtimespec Timespec + Ctimespec Timespec + Birthtimespec Timespec + Size int64 + Blocks int64 + Blksize uint32 + Flags uint32 + Gen uint32 + Spare [2]uint32 } type Statfs_t [0]byte diff --git a/vendor/golang.org/x/sys/unix/ztypes_netbsd_amd64.go b/vendor/golang.org/x/sys/unix/ztypes_netbsd_amd64.go index 59e1da0a6a..1f0e76c0cc 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_netbsd_amd64.go +++ b/vendor/golang.org/x/sys/unix/ztypes_netbsd_amd64.go @@ -58,26 +58,26 @@ type Rlimit struct { type _Gid_t uint32 type Stat_t struct { - Dev uint64 - Mode uint32 - _ [4]byte - Ino uint64 - Nlink uint32 - Uid uint32 - Gid uint32 - _ [4]byte - Rdev uint64 - Atim Timespec - Mtim Timespec - Ctim Timespec - Btim Timespec - Size int64 - Blocks int64 - Blksize uint32 - Flags uint32 - Gen uint32 - Spare [2]uint32 - _ [4]byte + Dev uint64 + Mode uint32 + Pad_cgo_0 [4]byte + Ino uint64 + Nlink uint32 + Uid uint32 + Gid uint32 + Pad_cgo_1 [4]byte + Rdev uint64 + Atimespec Timespec + Mtimespec Timespec + Ctimespec Timespec + Birthtimespec Timespec + Size int64 + Blocks int64 + Blksize uint32 + Flags uint32 + Gen uint32 + Spare [2]uint32 + Pad_cgo_2 [4]byte } type Statfs_t [0]byte diff --git a/vendor/golang.org/x/sys/unix/ztypes_netbsd_arm.go b/vendor/golang.org/x/sys/unix/ztypes_netbsd_arm.go index 1f1f0f3819..53f2159c7d 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_netbsd_arm.go +++ b/vendor/golang.org/x/sys/unix/ztypes_netbsd_arm.go @@ -59,26 +59,26 @@ type Rlimit struct { type _Gid_t uint32 type Stat_t struct { - Dev uint64 - Mode uint32 - _ [4]byte - Ino uint64 - Nlink uint32 - Uid uint32 - Gid uint32 - _ [4]byte - Rdev uint64 - Atim Timespec - Mtim Timespec - Ctim Timespec - Btim Timespec - Size int64 - Blocks int64 - Blksize uint32 - Flags uint32 - Gen uint32 - Spare [2]uint32 - _ [4]byte + Dev uint64 + Mode uint32 + Pad_cgo_0 [4]byte + Ino uint64 + Nlink uint32 + Uid uint32 + Gid uint32 + Pad_cgo_1 [4]byte + Rdev uint64 + Atimespec Timespec + Mtimespec Timespec + Ctimespec Timespec + Birthtimespec Timespec + Size int64 + Blocks int64 + Blksize uint32 + Flags uint32 + Gen uint32 + Spare [2]uint32 + Pad_cgo_2 [4]byte } type Statfs_t [0]byte diff --git a/vendor/golang.org/x/sys/unix/ztypes_netbsd_arm64.go b/vendor/golang.org/x/sys/unix/ztypes_netbsd_arm64.go index 8dca204a9c..43da2c41c5 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_netbsd_arm64.go +++ b/vendor/golang.org/x/sys/unix/ztypes_netbsd_arm64.go @@ -58,26 +58,26 @@ type Rlimit struct { type _Gid_t uint32 type Stat_t struct { - Dev uint64 - Mode uint32 - _ [4]byte - Ino uint64 - Nlink uint32 - Uid uint32 - Gid uint32 - _ [4]byte - Rdev uint64 - Atim Timespec - Mtim Timespec - Ctim Timespec - Btim Timespec - Size int64 - Blocks int64 - Blksize uint32 - Flags uint32 - Gen uint32 - Spare [2]uint32 - _ [4]byte + Dev uint64 + Mode uint32 + Pad_cgo_0 [4]byte + Ino uint64 + Nlink uint32 + Uid uint32 + Gid uint32 + Pad_cgo_1 [4]byte + Rdev uint64 + Atimespec Timespec + Mtimespec Timespec + Ctimespec Timespec + Birthtimespec Timespec + Size int64 + Blocks int64 + Blksize uint32 + Flags uint32 + Gen uint32 + Spare [2]uint32 + Pad_cgo_2 [4]byte } type Statfs_t [0]byte diff --git a/vendor/golang.org/x/sys/windows/env_windows.go b/vendor/golang.org/x/sys/windows/env_windows.go index f482a9fab3..bdc71e241e 100644 --- a/vendor/golang.org/x/sys/windows/env_windows.go +++ b/vendor/golang.org/x/sys/windows/env_windows.go @@ -6,11 +6,7 @@ package windows -import ( - "syscall" - "unicode/utf16" - "unsafe" -) +import "syscall" func Getenv(key string) (value string, found bool) { return syscall.Getenv(key) @@ -28,34 +24,6 @@ func Environ() []string { return syscall.Environ() } -// Returns a default environment associated with the token, rather than the current -// process. If inheritExisting is true, then this environment also inherits the -// environment of the current process. -func (token Token) Environ(inheritExisting bool) (env []string, err error) { - var block *uint16 - err = CreateEnvironmentBlock(&block, token, inheritExisting) - if err != nil { - return nil, err - } - defer DestroyEnvironmentBlock(block) - blockp := uintptr(unsafe.Pointer(block)) - for { - entry := (*[(1 << 30) - 1]uint16)(unsafe.Pointer(blockp))[:] - for i, v := range entry { - if v == 0 { - entry = entry[:i] - break - } - } - if len(entry) == 0 { - break - } - env = append(env, string(utf16.Decode(entry))) - blockp += 2 * (uintptr(len(entry)) + 1) - } - return env, nil -} - func Unsetenv(key string) error { return syscall.Unsetenv(key) } diff --git a/vendor/golang.org/x/sys/windows/mkerrors.bash b/vendor/golang.org/x/sys/windows/mkerrors.bash index 2163843a11..a70b24f398 100644 --- a/vendor/golang.org/x/sys/windows/mkerrors.bash +++ b/vendor/golang.org/x/sys/windows/mkerrors.bash @@ -7,13 +7,14 @@ set -e shopt -s nullglob +[[ $# -eq 1 ]] || { echo "Usage: $0 OUTPUT_FILE.go" >&2; exit 1; } winerror="$(printf '%s\n' "/mnt/c/Program Files (x86)/Windows Kits/"/*/Include/*/shared/winerror.h | sort -Vr | head -n 1)" [[ -n $winerror ]] || { echo "Unable to find winerror.h" >&2; exit 1; } declare -A errors { - echo "// Code generated by 'mkerrors.bash'; DO NOT EDIT." + echo "// Code generated by 'go generate'; DO NOT EDIT." echo echo "package windows" echo "import \"syscall\"" @@ -60,4 +61,4 @@ declare -A errors done < "$winerror" echo ")" -} | gofmt > "zerrors_windows.go" +} | gofmt > "$1" diff --git a/vendor/golang.org/x/sys/windows/mkerrors.go b/vendor/golang.org/x/sys/windows/mkerrors.go new file mode 100644 index 0000000000..cbf123ef89 --- /dev/null +++ b/vendor/golang.org/x/sys/windows/mkerrors.go @@ -0,0 +1,7 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package windows + +//go:generate ./mkerrors.bash zerrors_windows.go diff --git a/vendor/golang.org/x/sys/windows/mkknownfolderids.bash b/vendor/golang.org/x/sys/windows/mkknownfolderids.bash deleted file mode 100644 index ab8924e936..0000000000 --- a/vendor/golang.org/x/sys/windows/mkknownfolderids.bash +++ /dev/null @@ -1,27 +0,0 @@ -#!/bin/bash - -# Copyright 2019 The Go Authors. All rights reserved. -# Use of this source code is governed by a BSD-style -# license that can be found in the LICENSE file. - -set -e -shopt -s nullglob - -knownfolders="$(printf '%s\n' "/mnt/c/Program Files (x86)/Windows Kits/"/*/Include/*/um/KnownFolders.h | sort -Vr | head -n 1)" -[[ -n $knownfolders ]] || { echo "Unable to find KnownFolders.h" >&2; exit 1; } - -{ - echo "// Code generated by 'mkknownfolderids.bash'; DO NOT EDIT." - echo - echo "package windows" - echo "type KNOWNFOLDERID GUID" - echo "var (" - while read -r line; do - [[ $line =~ DEFINE_KNOWN_FOLDER\((FOLDERID_[^,]+),[\t\ ]*(0x[^,]+),[\t\ ]*(0x[^,]+),[\t\ ]*(0x[^,]+),[\t\ ]*(0x[^,]+),[\t\ ]*(0x[^,]+),[\t\ ]*(0x[^,]+),[\t\ ]*(0x[^,]+),[\t\ ]*(0x[^,]+),[\t\ ]*(0x[^,]+),[\t\ ]*(0x[^,]+),[\t\ ]*(0x[^,]+)\) ]] || continue - printf "%s = &KNOWNFOLDERID{0x%08x, 0x%04x, 0x%04x, [8]byte{0x%02x, 0x%02x, 0x%02x, 0x%02x, 0x%02x, 0x%02x, 0x%02x, 0x%02x}}\n" \ - "${BASH_REMATCH[1]}" $(( "${BASH_REMATCH[2]}" )) $(( "${BASH_REMATCH[3]}" )) $(( "${BASH_REMATCH[4]}" )) \ - $(( "${BASH_REMATCH[5]}" )) $(( "${BASH_REMATCH[6]}" )) $(( "${BASH_REMATCH[7]}" )) $(( "${BASH_REMATCH[8]}" )) \ - $(( "${BASH_REMATCH[9]}" )) $(( "${BASH_REMATCH[10]}" )) $(( "${BASH_REMATCH[11]}" )) $(( "${BASH_REMATCH[12]}" )) - done < "$knownfolders" - echo ")" -} | gofmt > "zknownfolderids_windows.go" diff --git a/vendor/golang.org/x/sys/windows/mksyscall.go b/vendor/golang.org/x/sys/windows/mksyscall.go index 6277057274..fb7db0ef8d 100644 --- a/vendor/golang.org/x/sys/windows/mksyscall.go +++ b/vendor/golang.org/x/sys/windows/mksyscall.go @@ -2,8 +2,6 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -// +build generate - package windows //go:generate go run $GOROOT/src/syscall/mksyscall_windows.go -output zsyscall_windows.go eventlog.go service.go syscall_windows.go security_windows.go diff --git a/vendor/golang.org/x/sys/windows/security_windows.go b/vendor/golang.org/x/sys/windows/security_windows.go index 61b49647b9..da06406c44 100644 --- a/vendor/golang.org/x/sys/windows/security_windows.go +++ b/vendor/golang.org/x/sys/windows/security_windows.go @@ -170,20 +170,15 @@ const ( //sys CopySid(destSidLen uint32, destSid *SID, srcSid *SID) (err error) = advapi32.CopySid //sys AllocateAndInitializeSid(identAuth *SidIdentifierAuthority, subAuth byte, subAuth0 uint32, subAuth1 uint32, subAuth2 uint32, subAuth3 uint32, subAuth4 uint32, subAuth5 uint32, subAuth6 uint32, subAuth7 uint32, sid **SID) (err error) = advapi32.AllocateAndInitializeSid //sys createWellKnownSid(sidType WELL_KNOWN_SID_TYPE, domainSid *SID, sid *SID, sizeSid *uint32) (err error) = advapi32.CreateWellKnownSid -//sys isWellKnownSid(sid *SID, sidType WELL_KNOWN_SID_TYPE) (isWellKnown bool) = advapi32.IsWellKnownSid //sys FreeSid(sid *SID) (err error) [failretval!=0] = advapi32.FreeSid //sys EqualSid(sid1 *SID, sid2 *SID) (isEqual bool) = advapi32.EqualSid -//sys getSidIdentifierAuthority(sid *SID) (authority *SidIdentifierAuthority) = advapi32.GetSidIdentifierAuthority -//sys getSidSubAuthorityCount(sid *SID) (count *uint8) = advapi32.GetSidSubAuthorityCount -//sys getSidSubAuthority(sid *SID, index uint32) (subAuthority *uint32) = advapi32.GetSidSubAuthority -//sys isValidSid(sid *SID) (isValid bool) = advapi32.IsValidSid // The security identifier (SID) structure is a variable-length // structure used to uniquely identify users or groups. type SID struct{} // StringToSid converts a string-format security identifier -// SID into a valid, functional SID. +// sid into a valid, functional sid. func StringToSid(s string) (*SID, error) { var sid *SID p, e := UTF16PtrFromString(s) @@ -198,7 +193,7 @@ func StringToSid(s string) (*SID, error) { return sid.Copy() } -// LookupSID retrieves a security identifier SID for the account +// LookupSID retrieves a security identifier sid for the account // and the name of the domain on which the account was found. // System specify target computer to search. func LookupSID(system, account string) (sid *SID, domain string, accType uint32, err error) { @@ -235,7 +230,7 @@ func LookupSID(system, account string) (sid *SID, domain string, accType uint32, } } -// String converts SID to a string format +// String converts sid to a string format // suitable for display, storage, or transmission. func (sid *SID) String() (string, error) { var s *uint16 @@ -247,12 +242,12 @@ func (sid *SID) String() (string, error) { return UTF16ToString((*[256]uint16)(unsafe.Pointer(s))[:]), nil } -// Len returns the length, in bytes, of a valid security identifier SID. +// Len returns the length, in bytes, of a valid security identifier sid. func (sid *SID) Len() int { return int(GetLengthSid(sid)) } -// Copy creates a duplicate of security identifier SID. +// Copy creates a duplicate of security identifier sid. func (sid *SID) Copy() (*SID, error) { b := make([]byte, sid.Len()) sid2 := (*SID)(unsafe.Pointer(&b[0])) @@ -263,42 +258,8 @@ func (sid *SID) Copy() (*SID, error) { return sid2, nil } -// IdentifierAuthority returns the identifier authority of the SID. -func (sid *SID) IdentifierAuthority() SidIdentifierAuthority { - return *getSidIdentifierAuthority(sid) -} - -// SubAuthorityCount returns the number of sub-authorities in the SID. -func (sid *SID) SubAuthorityCount() uint8 { - return *getSidSubAuthorityCount(sid) -} - -// SubAuthority returns the sub-authority of the SID as specified by -// the index, which must be less than sid.SubAuthorityCount(). -func (sid *SID) SubAuthority(idx uint32) uint32 { - if idx >= uint32(sid.SubAuthorityCount()) { - panic("sub-authority index out of range") - } - return *getSidSubAuthority(sid, idx) -} - -// IsValid returns whether the SID has a valid revision and length. -func (sid *SID) IsValid() bool { - return isValidSid(sid) -} - -// Equals compares two SIDs for equality. -func (sid *SID) Equals(sid2 *SID) bool { - return EqualSid(sid, sid2) -} - -// IsWellKnown determines whether the SID matches the well-known sidType. -func (sid *SID) IsWellKnown(sidType WELL_KNOWN_SID_TYPE) bool { - return isWellKnownSid(sid, sidType) -} - -// LookupAccount retrieves the name of the account for this SID -// and the name of the first domain on which this SID is found. +// LookupAccount retrieves the name of the account for this sid +// and the name of the first domain on which this sid is found. // System specify target computer to search for. func (sid *SID) LookupAccount(system string) (account, domain string, accType uint32, err error) { var sys *uint16 @@ -326,7 +287,7 @@ func (sid *SID) LookupAccount(system string) (account, domain string, accType ui } } -// Various types of pre-specified SIDs that can be synthesized and compared at runtime. +// Various types of pre-specified sids that can be synthesized at runtime. type WELL_KNOWN_SID_TYPE uint32 const ( @@ -452,13 +413,13 @@ const ( WinBuiltinDeviceOwnersSid = 119 ) -// Creates a SID for a well-known predefined alias, generally using the constants of the form +// Creates a sid for a well-known predefined alias, generally using the constants of the form // Win*Sid, for the local machine. func CreateWellKnownSid(sidType WELL_KNOWN_SID_TYPE) (*SID, error) { return CreateWellKnownDomainSid(sidType, nil) } -// Creates a SID for a well-known predefined alias, generally using the constants of the form +// Creates a sid for a well-known predefined alias, generally using the constants of the form // Win*Sid, for the domain specified by the domainSid parameter. func CreateWellKnownDomainSid(sidType WELL_KNOWN_SID_TYPE, domainSid *SID) (*SID, error) { n := uint32(50) @@ -541,53 +502,6 @@ const ( MaxTokenInfoClass ) -// Group attributes inside of Tokengroups.Groups[i].Attributes -const ( - SE_GROUP_MANDATORY = 0x00000001 - SE_GROUP_ENABLED_BY_DEFAULT = 0x00000002 - SE_GROUP_ENABLED = 0x00000004 - SE_GROUP_OWNER = 0x00000008 - SE_GROUP_USE_FOR_DENY_ONLY = 0x00000010 - SE_GROUP_INTEGRITY = 0x00000020 - SE_GROUP_INTEGRITY_ENABLED = 0x00000040 - SE_GROUP_LOGON_ID = 0xC0000000 - SE_GROUP_RESOURCE = 0x20000000 - SE_GROUP_VALID_ATTRIBUTES = SE_GROUP_MANDATORY | SE_GROUP_ENABLED_BY_DEFAULT | SE_GROUP_ENABLED | SE_GROUP_OWNER | SE_GROUP_USE_FOR_DENY_ONLY | SE_GROUP_LOGON_ID | SE_GROUP_RESOURCE | SE_GROUP_INTEGRITY | SE_GROUP_INTEGRITY_ENABLED -) - -// Privilege attributes -const ( - SE_PRIVILEGE_ENABLED_BY_DEFAULT = 0x00000001 - SE_PRIVILEGE_ENABLED = 0x00000002 - SE_PRIVILEGE_REMOVED = 0x00000004 - SE_PRIVILEGE_USED_FOR_ACCESS = 0x80000000 - SE_PRIVILEGE_VALID_ATTRIBUTES = SE_PRIVILEGE_ENABLED_BY_DEFAULT | SE_PRIVILEGE_ENABLED | SE_PRIVILEGE_REMOVED | SE_PRIVILEGE_USED_FOR_ACCESS -) - -// Token types -const ( - TokenPrimary = 1 - TokenImpersonation = 2 -) - -// Impersonation levels -const ( - SecurityAnonymous = 0 - SecurityIdentification = 1 - SecurityImpersonation = 2 - SecurityDelegation = 3 -) - -type LUID struct { - LowPart uint32 - HighPart int32 -} - -type LUIDAndAttributes struct { - Luid LUID - Attributes uint32 -} - type SIDAndAttributes struct { Sid *SID Attributes uint32 @@ -603,45 +517,13 @@ type Tokenprimarygroup struct { type Tokengroups struct { GroupCount uint32 - Groups [1]SIDAndAttributes // Use AllGroups() for iterating. -} - -// AllGroups returns a slice that can be used to iterate over the groups in g. -func (g *Tokengroups) AllGroups() []SIDAndAttributes { - return (*[(1 << 28) - 1]SIDAndAttributes)(unsafe.Pointer(&g.Groups[0]))[:g.GroupCount:g.GroupCount] -} - -type Tokenprivileges struct { - PrivilegeCount uint32 - Privileges [1]LUIDAndAttributes // Use AllPrivileges() for iterating. -} - -// AllPrivileges returns a slice that can be used to iterate over the privileges in p. -func (p *Tokenprivileges) AllPrivileges() []LUIDAndAttributes { - return (*[(1 << 27) - 1]LUIDAndAttributes)(unsafe.Pointer(&p.Privileges[0]))[:p.PrivilegeCount:p.PrivilegeCount] -} - -type Tokenmandatorylabel struct { - Label SIDAndAttributes -} - -func (tml *Tokenmandatorylabel) Size() uint32 { - return uint32(unsafe.Sizeof(Tokenmandatorylabel{})) + GetLengthSid(tml.Label.Sid) + Groups [1]SIDAndAttributes } // Authorization Functions -//sys checkTokenMembership(tokenHandle Token, sidToCheck *SID, isMember *int32) (err error) = advapi32.CheckTokenMembership -//sys OpenProcessToken(process Handle, access uint32, token *Token) (err error) = advapi32.OpenProcessToken -//sys OpenThreadToken(thread Handle, access uint32, openAsSelf bool, token *Token) (err error) = advapi32.OpenThreadToken -//sys ImpersonateSelf(impersonationlevel uint32) (err error) = advapi32.ImpersonateSelf -//sys RevertToSelf() (err error) = advapi32.RevertToSelf -//sys SetThreadToken(thread *Handle, token Token) (err error) = advapi32.SetThreadToken -//sys LookupPrivilegeValue(systemname *uint16, name *uint16, luid *LUID) (err error) = advapi32.LookupPrivilegeValueW -//sys AdjustTokenPrivileges(token Token, disableAllPrivileges bool, newstate *Tokenprivileges, buflen uint32, prevstate *Tokenprivileges, returnlen *uint32) (err error) = advapi32.AdjustTokenPrivileges -//sys AdjustTokenGroups(token Token, resetToDefault bool, newstate *Tokengroups, buflen uint32, prevstate *Tokengroups, returnlen *uint32) (err error) = advapi32.AdjustTokenGroups -//sys GetTokenInformation(token Token, infoClass uint32, info *byte, infoLen uint32, returnedLen *uint32) (err error) = advapi32.GetTokenInformation -//sys SetTokenInformation(token Token, infoClass uint32, info *byte, infoLen uint32) (err error) = advapi32.SetTokenInformation -//sys DuplicateTokenEx(existingToken Token, desiredAccess uint32, tokenAttributes *SecurityAttributes, impersonationLevel uint32, tokenType uint32, newToken *Token) (err error) = advapi32.DuplicateTokenEx +//sys checkTokenMembership(tokenHandle Token, sidToCheck *SID, isMember *int32) (err error) = advapi32.CheckTokenMembership +//sys OpenProcessToken(h Handle, access uint32, token *Token) (err error) = advapi32.OpenProcessToken +//sys GetTokenInformation(t Token, infoClass uint32, info *byte, infoLen uint32, returnedLen *uint32) (err error) = advapi32.GetTokenInformation //sys GetUserProfileDirectory(t Token, dir *uint16, dirLen *uint32) (err error) = userenv.GetUserProfileDirectoryW //sys getSystemDirectory(dir *uint16, dirLen uint32) (len uint32, err error) = kernel32.GetSystemDirectoryW @@ -655,9 +537,7 @@ func (tml *Tokenmandatorylabel) Size() uint32 { type Token Handle // OpenCurrentProcessToken opens the access token -// associated with current process. It is a real -// token that needs to be closed, unlike -// GetCurrentProcessToken. +// associated with current process. func OpenCurrentProcessToken() (Token, error) { p, e := GetCurrentProcess() if e != nil { @@ -671,27 +551,6 @@ func OpenCurrentProcessToken() (Token, error) { return t, nil } -// GetCurrentProcessToken returns the access token associated with -// the current process. It is a pseudo token that does not need -// to be closed. -func GetCurrentProcessToken() Token { - return Token(^uintptr(4 - 1)) -} - -// GetCurrentThreadToken return the access token associated with -// the current thread. It is a pseudo token that does not need -// to be closed. -func GetCurrentThreadToken() Token { - return Token(^uintptr(5 - 1)) -} - -// GetCurrentThreadEffectiveToken returns the effective access token -// associated with the current thread. It is a pseudo token that does -// not need to be closed. -func GetCurrentThreadEffectiveToken() Token { - return Token(^uintptr(6 - 1)) -} - // Close releases access to access token. func (t Token) Close() error { return CloseHandle(Handle(t)) @@ -763,28 +622,6 @@ func (t Token) GetUserProfileDirectory() (string, error) { } } -// IsElevated returns whether the current token is elevated from a UAC perspective. -func (token Token) IsElevated() bool { - var isElevated uint32 - var outLen uint32 - err := GetTokenInformation(token, TokenElevation, (*byte)(unsafe.Pointer(&isElevated)), uint32(unsafe.Sizeof(isElevated)), &outLen) - if err != nil { - return false - } - return outLen == uint32(unsafe.Sizeof(isElevated)) && isElevated != 0 -} - -// GetLinkedToken returns the linked token, which may be an elevated UAC token. -func (token Token) GetLinkedToken() (Token, error) { - var linkedToken Token - var outLen uint32 - err := GetTokenInformation(token, TokenLinkedToken, (*byte)(unsafe.Pointer(&linkedToken)), uint32(unsafe.Sizeof(linkedToken)), &outLen) - if err != nil { - return Token(0), err - } - return linkedToken, nil -} - // GetSystemDirectory retrieves path to current location of the system // directory, which is typically, though not always, C:\Windows\System32. func GetSystemDirectory() (string, error) { @@ -810,45 +647,3 @@ func (t Token) IsMember(sid *SID) (bool, error) { } return b != 0, nil } - -const ( - WTS_CONSOLE_CONNECT = 0x1 - WTS_CONSOLE_DISCONNECT = 0x2 - WTS_REMOTE_CONNECT = 0x3 - WTS_REMOTE_DISCONNECT = 0x4 - WTS_SESSION_LOGON = 0x5 - WTS_SESSION_LOGOFF = 0x6 - WTS_SESSION_LOCK = 0x7 - WTS_SESSION_UNLOCK = 0x8 - WTS_SESSION_REMOTE_CONTROL = 0x9 - WTS_SESSION_CREATE = 0xa - WTS_SESSION_TERMINATE = 0xb -) - -const ( - WTSActive = 0 - WTSConnected = 1 - WTSConnectQuery = 2 - WTSShadow = 3 - WTSDisconnected = 4 - WTSIdle = 5 - WTSListen = 6 - WTSReset = 7 - WTSDown = 8 - WTSInit = 9 -) - -type WTSSESSION_NOTIFICATION struct { - Size uint32 - SessionID uint32 -} - -type WTS_SESSION_INFO struct { - SessionID uint32 - WindowStationName *uint16 - State uint32 -} - -//sys WTSQueryUserToken(session uint32, token *Token) (err error) = wtsapi32.WTSQueryUserToken -//sys WTSEnumerateSessions(handle Handle, reserved uint32, version uint32, sessions **WTS_SESSION_INFO, count *uint32) (err error) = wtsapi32.WTSEnumerateSessionsW -//sys WTSFreeMemory(ptr uintptr) = wtsapi32.WTSFreeMemory diff --git a/vendor/golang.org/x/sys/windows/service.go b/vendor/golang.org/x/sys/windows/service.go index 03383f1dfd..994b1290ce 100644 --- a/vendor/golang.org/x/sys/windows/service.go +++ b/vendor/golang.org/x/sys/windows/service.go @@ -85,47 +85,21 @@ const ( SERVICE_INACTIVE = 2 SERVICE_STATE_ALL = 3 - SERVICE_QUERY_CONFIG = 1 - SERVICE_CHANGE_CONFIG = 2 - SERVICE_QUERY_STATUS = 4 - SERVICE_ENUMERATE_DEPENDENTS = 8 - SERVICE_START = 16 - SERVICE_STOP = 32 - SERVICE_PAUSE_CONTINUE = 64 - SERVICE_INTERROGATE = 128 - SERVICE_USER_DEFINED_CONTROL = 256 - SERVICE_ALL_ACCESS = STANDARD_RIGHTS_REQUIRED | SERVICE_QUERY_CONFIG | SERVICE_CHANGE_CONFIG | SERVICE_QUERY_STATUS | SERVICE_ENUMERATE_DEPENDENTS | SERVICE_START | SERVICE_STOP | SERVICE_PAUSE_CONTINUE | SERVICE_INTERROGATE | SERVICE_USER_DEFINED_CONTROL - + SERVICE_QUERY_CONFIG = 1 + SERVICE_CHANGE_CONFIG = 2 + SERVICE_QUERY_STATUS = 4 + SERVICE_ENUMERATE_DEPENDENTS = 8 + SERVICE_START = 16 + SERVICE_STOP = 32 + SERVICE_PAUSE_CONTINUE = 64 + SERVICE_INTERROGATE = 128 + SERVICE_USER_DEFINED_CONTROL = 256 + SERVICE_ALL_ACCESS = STANDARD_RIGHTS_REQUIRED | SERVICE_QUERY_CONFIG | SERVICE_CHANGE_CONFIG | SERVICE_QUERY_STATUS | SERVICE_ENUMERATE_DEPENDENTS | SERVICE_START | SERVICE_STOP | SERVICE_PAUSE_CONTINUE | SERVICE_INTERROGATE | SERVICE_USER_DEFINED_CONTROL SERVICE_RUNS_IN_SYSTEM_PROCESS = 1 - - SERVICE_CONFIG_DESCRIPTION = 1 - SERVICE_CONFIG_FAILURE_ACTIONS = 2 - SERVICE_CONFIG_DELAYED_AUTO_START_INFO = 3 - SERVICE_CONFIG_FAILURE_ACTIONS_FLAG = 4 - SERVICE_CONFIG_SERVICE_SID_INFO = 5 - SERVICE_CONFIG_REQUIRED_PRIVILEGES_INFO = 6 - SERVICE_CONFIG_PRESHUTDOWN_INFO = 7 - SERVICE_CONFIG_TRIGGER_INFO = 8 - SERVICE_CONFIG_PREFERRED_NODE = 9 - SERVICE_CONFIG_LAUNCH_PROTECTED = 12 - - SERVICE_SID_TYPE_NONE = 0 - SERVICE_SID_TYPE_UNRESTRICTED = 1 - SERVICE_SID_TYPE_RESTRICTED = 2 | SERVICE_SID_TYPE_UNRESTRICTED + SERVICE_CONFIG_DESCRIPTION = 1 + SERVICE_CONFIG_FAILURE_ACTIONS = 2 SC_ENUM_PROCESS_INFO = 0 - - SERVICE_NOTIFY_STATUS_CHANGE = 2 - SERVICE_NOTIFY_STOPPED = 0x00000001 - SERVICE_NOTIFY_START_PENDING = 0x00000002 - SERVICE_NOTIFY_STOP_PENDING = 0x00000004 - SERVICE_NOTIFY_RUNNING = 0x00000008 - SERVICE_NOTIFY_CONTINUE_PENDING = 0x00000010 - SERVICE_NOTIFY_PAUSE_PENDING = 0x00000020 - SERVICE_NOTIFY_PAUSED = 0x00000040 - SERVICE_NOTIFY_CREATED = 0x00000080 - SERVICE_NOTIFY_DELETED = 0x00000100 - SERVICE_NOTIFY_DELETE_PENDING = 0x00000200 ) type SERVICE_STATUS struct { @@ -177,16 +151,6 @@ type ENUM_SERVICE_STATUS_PROCESS struct { ServiceStatusProcess SERVICE_STATUS_PROCESS } -type SERVICE_NOTIFY struct { - Version uint32 - NotifyCallback uintptr - Context uintptr - NotificationStatus uint32 - ServiceStatus SERVICE_STATUS_PROCESS - NotificationTriggered uint32 - ServiceNames *uint16 -} - type SERVICE_FAILURE_ACTIONS struct { ResetPeriod uint32 RebootMsg *uint16 @@ -200,19 +164,12 @@ type SC_ACTION struct { Delay uint32 } -type QUERY_SERVICE_LOCK_STATUS struct { - IsLocked uint32 - LockOwner *uint16 - LockDuration uint32 -} - //sys CloseServiceHandle(handle Handle) (err error) = advapi32.CloseServiceHandle //sys CreateService(mgr Handle, serviceName *uint16, displayName *uint16, access uint32, srvType uint32, startType uint32, errCtl uint32, pathName *uint16, loadOrderGroup *uint16, tagId *uint32, dependencies *uint16, serviceStartName *uint16, password *uint16) (handle Handle, err error) [failretval==0] = advapi32.CreateServiceW //sys OpenService(mgr Handle, serviceName *uint16, access uint32) (handle Handle, err error) [failretval==0] = advapi32.OpenServiceW //sys DeleteService(service Handle) (err error) = advapi32.DeleteService //sys StartService(service Handle, numArgs uint32, argVectors **uint16) (err error) = advapi32.StartServiceW //sys QueryServiceStatus(service Handle, status *SERVICE_STATUS) (err error) = advapi32.QueryServiceStatus -//sys QueryServiceLockStatus(mgr Handle, lockStatus *QUERY_SERVICE_LOCK_STATUS, bufSize uint32, bytesNeeded *uint32) (err error) = advapi32.QueryServiceLockStatusW //sys ControlService(service Handle, control uint32, status *SERVICE_STATUS) (err error) = advapi32.ControlService //sys StartServiceCtrlDispatcher(serviceTable *SERVICE_TABLE_ENTRY) (err error) = advapi32.StartServiceCtrlDispatcherW //sys SetServiceStatus(service Handle, serviceStatus *SERVICE_STATUS) (err error) = advapi32.SetServiceStatus @@ -221,5 +178,4 @@ type QUERY_SERVICE_LOCK_STATUS struct { //sys ChangeServiceConfig2(service Handle, infoLevel uint32, info *byte) (err error) = advapi32.ChangeServiceConfig2W //sys QueryServiceConfig2(service Handle, infoLevel uint32, buff *byte, buffSize uint32, bytesNeeded *uint32) (err error) = advapi32.QueryServiceConfig2W //sys EnumServicesStatusEx(mgr Handle, infoLevel uint32, serviceType uint32, serviceState uint32, services *byte, bufSize uint32, bytesNeeded *uint32, servicesReturned *uint32, resumeHandle *uint32, groupName *uint16) (err error) = advapi32.EnumServicesStatusExW -//sys QueryServiceStatusEx(service Handle, infoLevel uint32, buff *byte, buffSize uint32, bytesNeeded *uint32) (err error) = advapi32.QueryServiceStatusEx -//sys NotifyServiceStatusChange(service Handle, notifyMask uint32, notifier *SERVICE_NOTIFY) (ret error) = advapi32.NotifyServiceStatusChangeW +//sys QueryServiceStatusEx(service Handle, infoLevel uint32, buff *byte, buffSize uint32, bytesNeeded *uint32) (err error) = advapi32.QueryServiceStatusEx diff --git a/vendor/golang.org/x/sys/windows/syscall_windows.go b/vendor/golang.org/x/sys/windows/syscall_windows.go index 92ce02bbca..f4d19644e1 100644 --- a/vendor/golang.org/x/sys/windows/syscall_windows.go +++ b/vendor/golang.org/x/sys/windows/syscall_windows.go @@ -10,7 +10,6 @@ import ( errorspkg "errors" "sync" "syscall" - "time" "unicode/utf16" "unsafe" ) @@ -56,10 +55,6 @@ const ( FILE_UNICODE_ON_DISK = 0x00000004 FILE_VOLUME_IS_COMPRESSED = 0x00008000 FILE_VOLUME_QUOTAS = 0x00000020 - - // Return values of SleepEx and other APC functions - STATUS_USER_APC = 0x000000C0 - WAIT_IO_COMPLETION = STATUS_USER_APC ) // StringToUTF16 is deprecated. Use UTF16FromString instead. @@ -139,8 +134,7 @@ func NewCallbackCDecl(fn interface{}) uintptr { //sys GetVersion() (ver uint32, err error) //sys FormatMessage(flags uint32, msgsrc uintptr, msgid uint32, langid uint32, buf []uint16, args *byte) (n uint32, err error) = FormatMessageW //sys ExitProcess(exitcode uint32) -//sys IsWow64Process(handle Handle, isWow64 *bool) (err error) = IsWow64Process -//sys CreateFile(name *uint16, access uint32, mode uint32, sa *SecurityAttributes, createmode uint32, attrs uint32, templatefile Handle) (handle Handle, err error) [failretval==InvalidHandle] = CreateFileW +//sys CreateFile(name *uint16, access uint32, mode uint32, sa *SecurityAttributes, createmode uint32, attrs uint32, templatefile int32) (handle Handle, err error) [failretval==InvalidHandle] = CreateFileW //sys ReadFile(handle Handle, buf []byte, done *uint32, overlapped *Overlapped) (err error) //sys WriteFile(handle Handle, buf []byte, done *uint32, overlapped *Overlapped) (err error) //sys GetOverlappedResult(handle Handle, overlapped *Overlapped, done *uint32, wait bool) (err error) @@ -172,14 +166,11 @@ func NewCallbackCDecl(fn interface{}) uintptr { //sys CancelIo(s Handle) (err error) //sys CancelIoEx(s Handle, o *Overlapped) (err error) //sys CreateProcess(appName *uint16, commandLine *uint16, procSecurity *SecurityAttributes, threadSecurity *SecurityAttributes, inheritHandles bool, creationFlags uint32, env *uint16, currentDir *uint16, startupInfo *StartupInfo, outProcInfo *ProcessInformation) (err error) = CreateProcessW -//sys OpenProcess(desiredAccess uint32, inheritHandle bool, processId uint32) (handle Handle, err error) -//sys ShellExecute(hwnd Handle, verb *uint16, file *uint16, args *uint16, cwd *uint16, showCmd int32) (err error) = shell32.ShellExecuteW -//sys shGetKnownFolderPath(id *KNOWNFOLDERID, flags uint32, token Token, path **uint16) (ret error) = shell32.SHGetKnownFolderPath +//sys OpenProcess(da uint32, inheritHandle bool, pid uint32) (handle Handle, err error) //sys TerminateProcess(handle Handle, exitcode uint32) (err error) //sys GetExitCodeProcess(handle Handle, exitcode *uint32) (err error) //sys GetStartupInfo(startupInfo *StartupInfo) (err error) = GetStartupInfoW //sys GetCurrentProcess() (pseudoHandle Handle, err error) -//sys GetCurrentThread() (pseudoHandle Handle, err error) //sys GetProcessTimes(handle Handle, creationTime *Filetime, exitTime *Filetime, kernelTime *Filetime, userTime *Filetime) (err error) //sys DuplicateHandle(hSourceProcessHandle Handle, hSourceHandle Handle, hTargetProcessHandle Handle, lpTargetHandle *Handle, dwDesiredAccess uint32, bInheritHandle bool, dwOptions uint32) (err error) //sys WaitForSingleObject(handle Handle, waitMilliseconds uint32) (event uint32, err error) [failretval==0xffffffff] @@ -194,9 +185,6 @@ func NewCallbackCDecl(fn interface{}) uintptr { //sys FreeEnvironmentStrings(envs *uint16) (err error) = kernel32.FreeEnvironmentStringsW //sys GetEnvironmentVariable(name *uint16, buffer *uint16, size uint32) (n uint32, err error) = kernel32.GetEnvironmentVariableW //sys SetEnvironmentVariable(name *uint16, value *uint16) (err error) = kernel32.SetEnvironmentVariableW -//sys CreateEnvironmentBlock(block **uint16, token Token, inheritExisting bool) (err error) = userenv.CreateEnvironmentBlock -//sys DestroyEnvironmentBlock(block *uint16) (err error) = userenv.DestroyEnvironmentBlock -//sys getTickCount64() (ms uint64) = kernel32.GetTickCount64 //sys SetFileTime(handle Handle, ctime *Filetime, atime *Filetime, wtime *Filetime) (err error) //sys GetFileAttributes(name *uint16) (attrs uint32, err error) [failretval==INVALID_FILE_ATTRIBUTES] = kernel32.GetFileAttributesW //sys SetFileAttributes(name *uint16, attrs uint32) (err error) = kernel32.SetFileAttributesW @@ -235,7 +223,7 @@ func NewCallbackCDecl(fn interface{}) uintptr { //sys RegQueryInfoKey(key Handle, class *uint16, classLen *uint32, reserved *uint32, subkeysLen *uint32, maxSubkeyLen *uint32, maxClassLen *uint32, valuesLen *uint32, maxValueNameLen *uint32, maxValueLen *uint32, saLen *uint32, lastWriteTime *Filetime) (regerrno error) = advapi32.RegQueryInfoKeyW //sys RegEnumKeyEx(key Handle, index uint32, name *uint16, nameLen *uint32, reserved *uint32, class *uint16, classLen *uint32, lastWriteTime *Filetime) (regerrno error) = advapi32.RegEnumKeyExW //sys RegQueryValueEx(key Handle, name *uint16, reserved *uint32, valtype *uint32, buf *byte, buflen *uint32) (regerrno error) = advapi32.RegQueryValueExW -//sys GetCurrentProcessId() (pid uint32) = kernel32.GetCurrentProcessId +//sys getCurrentProcessId() (pid uint32) = kernel32.GetCurrentProcessId //sys GetConsoleMode(console Handle, mode *uint32) (err error) = kernel32.GetConsoleMode //sys SetConsoleMode(console Handle, mode uint32) (err error) = kernel32.SetConsoleMode //sys GetConsoleScreenBufferInfo(console Handle, info *ConsoleScreenBufferInfo) (err error) = kernel32.GetConsoleScreenBufferInfo @@ -244,8 +232,6 @@ func NewCallbackCDecl(fn interface{}) uintptr { //sys CreateToolhelp32Snapshot(flags uint32, processId uint32) (handle Handle, err error) [failretval==InvalidHandle] = kernel32.CreateToolhelp32Snapshot //sys Process32First(snapshot Handle, procEntry *ProcessEntry32) (err error) = kernel32.Process32FirstW //sys Process32Next(snapshot Handle, procEntry *ProcessEntry32) (err error) = kernel32.Process32NextW -//sys Thread32First(snapshot Handle, threadEntry *ThreadEntry32) (err error) -//sys Thread32Next(snapshot Handle, threadEntry *ThreadEntry32) (err error) //sys DeviceIoControl(handle Handle, ioControlCode uint32, inBuffer *byte, inBufferSize uint32, outBuffer *byte, outBufferSize uint32, bytesReturned *uint32, overlapped *Overlapped) (err error) // This function returns 1 byte BOOLEAN rather than the 4 byte BOOL. //sys CreateSymbolicLink(symlinkfilename *uint16, targetfilename *uint16, flags uint32) (err error) [failretval&0xff==0] = CreateSymbolicLinkW @@ -257,18 +243,6 @@ func NewCallbackCDecl(fn interface{}) uintptr { //sys SetEvent(event Handle) (err error) = kernel32.SetEvent //sys ResetEvent(event Handle) (err error) = kernel32.ResetEvent //sys PulseEvent(event Handle) (err error) = kernel32.PulseEvent -//sys SleepEx(milliseconds uint32, alertable bool) (ret uint32) = kernel32.SleepEx -//sys CreateJobObject(jobAttr *SecurityAttributes, name *uint16) (handle Handle, err error) = kernel32.CreateJobObjectW -//sys AssignProcessToJobObject(job Handle, process Handle) (err error) = kernel32.AssignProcessToJobObject -//sys TerminateJobObject(job Handle, exitCode uint32) (err error) = kernel32.TerminateJobObject -//sys SetErrorMode(mode uint32) (ret uint32) = kernel32.SetErrorMode -//sys ResumeThread(thread Handle) (ret uint32, err error) [failretval==0xffffffff] = kernel32.ResumeThread -//sys SetPriorityClass(process Handle, priorityClass uint32) (err error) = kernel32.SetPriorityClass -//sys GetPriorityClass(process Handle) (ret uint32, err error) = kernel32.GetPriorityClass -//sys SetInformationJobObject(job Handle, JobObjectInformationClass uint32, JobObjectInformation uintptr, JobObjectInformationLength uint32) (ret int, err error) -//sys GenerateConsoleCtrlEvent(ctrlEvent uint32, processGroupID uint32) (err error) -//sys GetProcessId(process Handle) (id uint32, err error) -//sys OpenThread(desiredAccess uint32, inheritHandle bool, threadId uint32) (handle Handle, err error) // Volume Management Functions //sys DefineDosDevice(flags uint32, deviceName *uint16, targetPath *uint16) (err error) = DefineDosDeviceW @@ -290,11 +264,6 @@ func NewCallbackCDecl(fn interface{}) uintptr { //sys QueryDosDevice(deviceName *uint16, targetPath *uint16, max uint32) (n uint32, err error) [failretval==0] = QueryDosDeviceW //sys SetVolumeLabel(rootPathName *uint16, volumeName *uint16) (err error) = SetVolumeLabelW //sys SetVolumeMountPoint(volumeMountPoint *uint16, volumeName *uint16) (err error) = SetVolumeMountPointW -//sys MessageBox(hwnd Handle, text *uint16, caption *uint16, boxtype uint32) (ret int32, err error) [failretval==0] = user32.MessageBoxW -//sys clsidFromString(lpsz *uint16, pclsid *GUID) (ret error) = ole32.CLSIDFromString -//sys stringFromGUID2(rguid *GUID, lpsz *uint16, cchMax int32) (chars int32) = ole32.StringFromGUID2 -//sys coCreateGuid(pguid *GUID) (ret error) = ole32.CoCreateGuid -//sys coTaskMemFree(address unsafe.Pointer) = ole32.CoTaskMemFree // syscall interface implementation for other packages @@ -509,10 +478,6 @@ func ComputerName() (name string, err error) { return string(utf16.Decode(b[0:n])), nil } -func DurationSinceBoot() time.Duration { - return time.Duration(getTickCount64()) * time.Millisecond -} - func Ftruncate(fd Handle, length int64) (err error) { curoffset, e := Seek(fd, 0, 1) if e != nil { @@ -1122,7 +1087,7 @@ func SetsockoptIPv6Mreq(fd Handle, level, opt int, mreq *IPv6Mreq) (err error) { return syscall.EWINDOWS } -func Getpid() (pid int) { return int(GetCurrentProcessId()) } +func Getpid() (pid int) { return int(getCurrentProcessId()) } func FindFirstFile(name *uint16, data *Win32finddata) (handle Handle, err error) { // NOTE(rsc): The Win32finddata struct is wrong for the system call: @@ -1250,57 +1215,3 @@ func Readlink(path string, buf []byte) (n int, err error) { return n, nil } - -// GUIDFromString parses a string in the form of -// "{XXXXXX-XXXX-XXXX-XXXX-XXXXXXXXXXXX}" into a GUID. -func GUIDFromString(str string) (GUID, error) { - guid := GUID{} - str16, err := syscall.UTF16PtrFromString(str) - if err != nil { - return guid, err - } - err = clsidFromString(str16, &guid) - if err != nil { - return guid, err - } - return guid, nil -} - -// GenerateGUID creates a new random GUID. -func GenerateGUID() (GUID, error) { - guid := GUID{} - err := coCreateGuid(&guid) - if err != nil { - return guid, err - } - return guid, nil -} - -// String returns the canonical string form of the GUID, -// in the form of "{XXXXXX-XXXX-XXXX-XXXX-XXXXXXXXXXXX}". -func (guid GUID) String() string { - var str [100]uint16 - chars := stringFromGUID2(&guid, &str[0], int32(len(str))) - if chars <= 1 { - return "" - } - return string(utf16.Decode(str[:chars-1])) -} - -// KnownFolderPath returns a well-known folder path for the current user, specified by one of -// the FOLDERID_ constants, and chosen and optionally created based on a KF_ flag. -func KnownFolderPath(folderID *KNOWNFOLDERID, flags uint32) (string, error) { - return Token(0).KnownFolderPath(folderID, flags) -} - -// KnownFolderPath returns a well-known folder path for the user token, specified by one of -// the FOLDERID_ constants, and chosen and optionally created based on a KF_ flag. -func (t Token) KnownFolderPath(folderID *KNOWNFOLDERID, flags uint32) (string, error) { - var p *uint16 - err := shGetKnownFolderPath(folderID, flags, t, &p) - if err != nil { - return "", err - } - defer coTaskMemFree(unsafe.Pointer(p)) - return UTF16ToString((*[(1 << 30) - 1]uint16)(unsafe.Pointer(p))[:]), nil -} diff --git a/vendor/golang.org/x/sys/windows/types_windows.go b/vendor/golang.org/x/sys/windows/types_windows.go index 1cba11ed59..ee27936840 100644 --- a/vendor/golang.org/x/sys/windows/types_windows.go +++ b/vendor/golang.org/x/sys/windows/types_windows.go @@ -4,11 +4,7 @@ package windows -import ( - "net" - "syscall" - "unsafe" -) +import "syscall" const ( // Invented values to support what package os expects. @@ -158,39 +154,9 @@ const ( WAIT_OBJECT_0 = 0x00000000 WAIT_FAILED = 0xFFFFFFFF - // Standard access rights. - DELETE = 0x00010000 - READ_CONTROL = 0x00020000 - SYNCHRONIZE = 0x00100000 - WRITE_DAC = 0x00040000 - WRITE_OWNER = 0x00080000 - - // Access rights for process. - PROCESS_CREATE_PROCESS = 0x0080 - PROCESS_CREATE_THREAD = 0x0002 - PROCESS_DUP_HANDLE = 0x0040 - PROCESS_QUERY_INFORMATION = 0x0400 - PROCESS_QUERY_LIMITED_INFORMATION = 0x1000 - PROCESS_SET_INFORMATION = 0x0200 - PROCESS_SET_QUOTA = 0x0100 - PROCESS_SUSPEND_RESUME = 0x0800 - PROCESS_TERMINATE = 0x0001 - PROCESS_VM_OPERATION = 0x0008 - PROCESS_VM_READ = 0x0010 - PROCESS_VM_WRITE = 0x0020 - - // Access rights for thread. - THREAD_DIRECT_IMPERSONATION = 0x0200 - THREAD_GET_CONTEXT = 0x0008 - THREAD_IMPERSONATE = 0x0100 - THREAD_QUERY_INFORMATION = 0x0040 - THREAD_QUERY_LIMITED_INFORMATION = 0x0800 - THREAD_SET_CONTEXT = 0x0010 - THREAD_SET_INFORMATION = 0x0020 - THREAD_SET_LIMITED_INFORMATION = 0x0400 - THREAD_SET_THREAD_TOKEN = 0x0080 - THREAD_SUSPEND_RESUME = 0x0002 - THREAD_TERMINATE = 0x0001 + PROCESS_TERMINATE = 1 + PROCESS_QUERY_INFORMATION = 0x00000400 + SYNCHRONIZE = 0x00100000 FILE_MAP_COPY = 0x01 FILE_MAP_WRITE = 0x02 @@ -430,26 +396,6 @@ const ( SECURITY_FLAG_IGNORE_CERT_DATE_INVALID = 0x00002000 ) -const ( - // flags for SetErrorMode - SEM_FAILCRITICALERRORS = 0x0001 - SEM_NOALIGNMENTFAULTEXCEPT = 0x0004 - SEM_NOGPFAULTERRORBOX = 0x0002 - SEM_NOOPENFILEERRORBOX = 0x8000 -) - -const ( - // Priority class. - ABOVE_NORMAL_PRIORITY_CLASS = 0x00008000 - BELOW_NORMAL_PRIORITY_CLASS = 0x00004000 - HIGH_PRIORITY_CLASS = 0x00000080 - IDLE_PRIORITY_CLASS = 0x00000040 - NORMAL_PRIORITY_CLASS = 0x00000020 - PROCESS_MODE_BACKGROUND_BEGIN = 0x00100000 - PROCESS_MODE_BACKGROUND_END = 0x00200000 - REALTIME_PRIORITY_CLASS = 0x00000100 -) - var ( OID_PKIX_KP_SERVER_AUTH = []byte("1.3.6.1.5.5.7.3.1\x00") OID_SERVER_GATED_CRYPTO = []byte("1.3.6.1.4.1.311.10.3.3\x00") @@ -659,16 +605,6 @@ type ProcessEntry32 struct { ExeFile [MAX_PATH]uint16 } -type ThreadEntry32 struct { - Size uint32 - Usage uint32 - ThreadID uint32 - OwnerProcessID uint32 - BasePri int32 - DeltaPri int32 - Flags uint32 -} - type Systemtime struct { Year uint16 Month uint16 @@ -1350,41 +1286,6 @@ const ( ComputerNameMax = 8 ) -// For MessageBox() -const ( - MB_OK = 0x00000000 - MB_OKCANCEL = 0x00000001 - MB_ABORTRETRYIGNORE = 0x00000002 - MB_YESNOCANCEL = 0x00000003 - MB_YESNO = 0x00000004 - MB_RETRYCANCEL = 0x00000005 - MB_CANCELTRYCONTINUE = 0x00000006 - MB_ICONHAND = 0x00000010 - MB_ICONQUESTION = 0x00000020 - MB_ICONEXCLAMATION = 0x00000030 - MB_ICONASTERISK = 0x00000040 - MB_USERICON = 0x00000080 - MB_ICONWARNING = MB_ICONEXCLAMATION - MB_ICONERROR = MB_ICONHAND - MB_ICONINFORMATION = MB_ICONASTERISK - MB_ICONSTOP = MB_ICONHAND - MB_DEFBUTTON1 = 0x00000000 - MB_DEFBUTTON2 = 0x00000100 - MB_DEFBUTTON3 = 0x00000200 - MB_DEFBUTTON4 = 0x00000300 - MB_APPLMODAL = 0x00000000 - MB_SYSTEMMODAL = 0x00001000 - MB_TASKMODAL = 0x00002000 - MB_HELP = 0x00004000 - MB_NOFOCUS = 0x00008000 - MB_SETFOREGROUND = 0x00010000 - MB_DEFAULT_DESKTOP_ONLY = 0x00020000 - MB_TOPMOST = 0x00040000 - MB_RIGHT = 0x00080000 - MB_RTLREADING = 0x00100000 - MB_SERVICE_NOTIFICATION = 0x00200000 -) - const ( MOVEFILE_REPLACE_EXISTING = 0x1 MOVEFILE_COPY_ALLOWED = 0x2 @@ -1413,16 +1314,6 @@ type SocketAddress struct { SockaddrLength int32 } -// IP returns an IPv4 or IPv6 address, or nil if the underlying SocketAddress is neither. -func (addr *SocketAddress) IP() net.IP { - if uintptr(addr.SockaddrLength) >= unsafe.Sizeof(RawSockaddrInet4{}) && addr.Sockaddr.Addr.Family == AF_INET { - return (*RawSockaddrInet4)(unsafe.Pointer(addr.Sockaddr)).Addr[:] - } else if uintptr(addr.SockaddrLength) >= unsafe.Sizeof(RawSockaddrInet6{}) && addr.Sockaddr.Addr.Family == AF_INET6 { - return (*RawSockaddrInet6)(unsafe.Pointer(addr.Sockaddr)).Addr[:] - } - return nil -} - type IpAdapterUnicastAddress struct { Length uint32 Flags uint32 @@ -1548,104 +1439,3 @@ type ConsoleScreenBufferInfo struct { } const UNIX_PATH_MAX = 108 // defined in afunix.h - -const ( - // flags for JOBOBJECT_BASIC_LIMIT_INFORMATION.LimitFlags - JOB_OBJECT_LIMIT_ACTIVE_PROCESS = 0x00000008 - JOB_OBJECT_LIMIT_AFFINITY = 0x00000010 - JOB_OBJECT_LIMIT_BREAKAWAY_OK = 0x00000800 - JOB_OBJECT_LIMIT_DIE_ON_UNHANDLED_EXCEPTION = 0x00000400 - JOB_OBJECT_LIMIT_JOB_MEMORY = 0x00000200 - JOB_OBJECT_LIMIT_JOB_TIME = 0x00000004 - JOB_OBJECT_LIMIT_KILL_ON_JOB_CLOSE = 0x00002000 - JOB_OBJECT_LIMIT_PRESERVE_JOB_TIME = 0x00000040 - JOB_OBJECT_LIMIT_PRIORITY_CLASS = 0x00000020 - JOB_OBJECT_LIMIT_PROCESS_MEMORY = 0x00000100 - JOB_OBJECT_LIMIT_PROCESS_TIME = 0x00000002 - JOB_OBJECT_LIMIT_SCHEDULING_CLASS = 0x00000080 - JOB_OBJECT_LIMIT_SILENT_BREAKAWAY_OK = 0x00001000 - JOB_OBJECT_LIMIT_SUBSET_AFFINITY = 0x00004000 - JOB_OBJECT_LIMIT_WORKINGSET = 0x00000001 -) - -type JOBOBJECT_BASIC_LIMIT_INFORMATION struct { - PerProcessUserTimeLimit int64 - PerJobUserTimeLimit int64 - LimitFlags uint32 - MinimumWorkingSetSize uintptr - MaximumWorkingSetSize uintptr - ActiveProcessLimit uint32 - Affinity uintptr - PriorityClass uint32 - SchedulingClass uint32 -} - -type IO_COUNTERS struct { - ReadOperationCount uint64 - WriteOperationCount uint64 - OtherOperationCount uint64 - ReadTransferCount uint64 - WriteTransferCount uint64 - OtherTransferCount uint64 -} - -type JOBOBJECT_EXTENDED_LIMIT_INFORMATION struct { - BasicLimitInformation JOBOBJECT_BASIC_LIMIT_INFORMATION - IoInfo IO_COUNTERS - ProcessMemoryLimit uintptr - JobMemoryLimit uintptr - PeakProcessMemoryUsed uintptr - PeakJobMemoryUsed uintptr -} - -const ( - // UIRestrictionsClass - JOB_OBJECT_UILIMIT_DESKTOP = 0x00000040 - JOB_OBJECT_UILIMIT_DISPLAYSETTINGS = 0x00000010 - JOB_OBJECT_UILIMIT_EXITWINDOWS = 0x00000080 - JOB_OBJECT_UILIMIT_GLOBALATOMS = 0x00000020 - JOB_OBJECT_UILIMIT_HANDLES = 0x00000001 - JOB_OBJECT_UILIMIT_READCLIPBOARD = 0x00000002 - JOB_OBJECT_UILIMIT_SYSTEMPARAMETERS = 0x00000008 - JOB_OBJECT_UILIMIT_WRITECLIPBOARD = 0x00000004 -) - -type JOBOBJECT_BASIC_UI_RESTRICTIONS struct { - UIRestrictionsClass uint32 -} - -const ( - // JobObjectInformationClass - JobObjectAssociateCompletionPortInformation = 7 - JobObjectBasicLimitInformation = 2 - JobObjectBasicUIRestrictions = 4 - JobObjectCpuRateControlInformation = 15 - JobObjectEndOfJobTimeInformation = 6 - JobObjectExtendedLimitInformation = 9 - JobObjectGroupInformation = 11 - JobObjectGroupInformationEx = 14 - JobObjectLimitViolationInformation2 = 35 - JobObjectNetRateControlInformation = 32 - JobObjectNotificationLimitInformation = 12 - JobObjectNotificationLimitInformation2 = 34 - JobObjectSecurityLimitInformation = 5 -) - -const ( - KF_FLAG_DEFAULT = 0x00000000 - KF_FLAG_FORCE_APP_DATA_REDIRECTION = 0x00080000 - KF_FLAG_RETURN_FILTER_REDIRECTION_TARGET = 0x00040000 - KF_FLAG_FORCE_PACKAGE_REDIRECTION = 0x00020000 - KF_FLAG_NO_PACKAGE_REDIRECTION = 0x00010000 - KF_FLAG_FORCE_APPCONTAINER_REDIRECTION = 0x00020000 - KF_FLAG_NO_APPCONTAINER_REDIRECTION = 0x00010000 - KF_FLAG_CREATE = 0x00008000 - KF_FLAG_DONT_VERIFY = 0x00004000 - KF_FLAG_DONT_UNEXPAND = 0x00002000 - KF_FLAG_NO_ALIAS = 0x00001000 - KF_FLAG_INIT = 0x00000800 - KF_FLAG_DEFAULT_PATH = 0x00000400 - KF_FLAG_NOT_PARENT_RELATIVE = 0x00000200 - KF_FLAG_SIMPLE_IDLIST = 0x00000100 - KF_FLAG_ALIAS_ONLY = 0x80000000 -) diff --git a/vendor/golang.org/x/sys/windows/zerrors_windows.go b/vendor/golang.org/x/sys/windows/zerrors_windows.go index f021200352..2b4cea5b94 100644 --- a/vendor/golang.org/x/sys/windows/zerrors_windows.go +++ b/vendor/golang.org/x/sys/windows/zerrors_windows.go @@ -1,4 +1,4 @@ -// Code generated by 'mkerrors.bash'; DO NOT EDIT. +// Code generated by 'go generate'; DO NOT EDIT. package windows diff --git a/vendor/golang.org/x/sys/windows/zknownfolderids_windows.go b/vendor/golang.org/x/sys/windows/zknownfolderids_windows.go deleted file mode 100644 index 6048ac679f..0000000000 --- a/vendor/golang.org/x/sys/windows/zknownfolderids_windows.go +++ /dev/null @@ -1,149 +0,0 @@ -// Code generated by 'mkknownfolderids.bash'; DO NOT EDIT. - -package windows - -type KNOWNFOLDERID GUID - -var ( - FOLDERID_NetworkFolder = &KNOWNFOLDERID{0xd20beec4, 0x5ca8, 0x4905, [8]byte{0xae, 0x3b, 0xbf, 0x25, 0x1e, 0xa0, 0x9b, 0x53}} - FOLDERID_ComputerFolder = &KNOWNFOLDERID{0x0ac0837c, 0xbbf8, 0x452a, [8]byte{0x85, 0x0d, 0x79, 0xd0, 0x8e, 0x66, 0x7c, 0xa7}} - FOLDERID_InternetFolder = &KNOWNFOLDERID{0x4d9f7874, 0x4e0c, 0x4904, [8]byte{0x96, 0x7b, 0x40, 0xb0, 0xd2, 0x0c, 0x3e, 0x4b}} - FOLDERID_ControlPanelFolder = &KNOWNFOLDERID{0x82a74aeb, 0xaeb4, 0x465c, [8]byte{0xa0, 0x14, 0xd0, 0x97, 0xee, 0x34, 0x6d, 0x63}} - FOLDERID_PrintersFolder = &KNOWNFOLDERID{0x76fc4e2d, 0xd6ad, 0x4519, [8]byte{0xa6, 0x63, 0x37, 0xbd, 0x56, 0x06, 0x81, 0x85}} - FOLDERID_SyncManagerFolder = &KNOWNFOLDERID{0x43668bf8, 0xc14e, 0x49b2, [8]byte{0x97, 0xc9, 0x74, 0x77, 0x84, 0xd7, 0x84, 0xb7}} - FOLDERID_SyncSetupFolder = &KNOWNFOLDERID{0x0f214138, 0xb1d3, 0x4a90, [8]byte{0xbb, 0xa9, 0x27, 0xcb, 0xc0, 0xc5, 0x38, 0x9a}} - FOLDERID_ConflictFolder = &KNOWNFOLDERID{0x4bfefb45, 0x347d, 0x4006, [8]byte{0xa5, 0xbe, 0xac, 0x0c, 0xb0, 0x56, 0x71, 0x92}} - FOLDERID_SyncResultsFolder = &KNOWNFOLDERID{0x289a9a43, 0xbe44, 0x4057, [8]byte{0xa4, 0x1b, 0x58, 0x7a, 0x76, 0xd7, 0xe7, 0xf9}} - FOLDERID_RecycleBinFolder = &KNOWNFOLDERID{0xb7534046, 0x3ecb, 0x4c18, [8]byte{0xbe, 0x4e, 0x64, 0xcd, 0x4c, 0xb7, 0xd6, 0xac}} - FOLDERID_ConnectionsFolder = &KNOWNFOLDERID{0x6f0cd92b, 0x2e97, 0x45d1, [8]byte{0x88, 0xff, 0xb0, 0xd1, 0x86, 0xb8, 0xde, 0xdd}} - FOLDERID_Fonts = &KNOWNFOLDERID{0xfd228cb7, 0xae11, 0x4ae3, [8]byte{0x86, 0x4c, 0x16, 0xf3, 0x91, 0x0a, 0xb8, 0xfe}} - FOLDERID_Desktop = &KNOWNFOLDERID{0xb4bfcc3a, 0xdb2c, 0x424c, [8]byte{0xb0, 0x29, 0x7f, 0xe9, 0x9a, 0x87, 0xc6, 0x41}} - FOLDERID_Startup = &KNOWNFOLDERID{0xb97d20bb, 0xf46a, 0x4c97, [8]byte{0xba, 0x10, 0x5e, 0x36, 0x08, 0x43, 0x08, 0x54}} - FOLDERID_Programs = &KNOWNFOLDERID{0xa77f5d77, 0x2e2b, 0x44c3, [8]byte{0xa6, 0xa2, 0xab, 0xa6, 0x01, 0x05, 0x4a, 0x51}} - FOLDERID_StartMenu = &KNOWNFOLDERID{0x625b53c3, 0xab48, 0x4ec1, [8]byte{0xba, 0x1f, 0xa1, 0xef, 0x41, 0x46, 0xfc, 0x19}} - FOLDERID_Recent = &KNOWNFOLDERID{0xae50c081, 0xebd2, 0x438a, [8]byte{0x86, 0x55, 0x8a, 0x09, 0x2e, 0x34, 0x98, 0x7a}} - FOLDERID_SendTo = &KNOWNFOLDERID{0x8983036c, 0x27c0, 0x404b, [8]byte{0x8f, 0x08, 0x10, 0x2d, 0x10, 0xdc, 0xfd, 0x74}} - FOLDERID_Documents = &KNOWNFOLDERID{0xfdd39ad0, 0x238f, 0x46af, [8]byte{0xad, 0xb4, 0x6c, 0x85, 0x48, 0x03, 0x69, 0xc7}} - FOLDERID_Favorites = &KNOWNFOLDERID{0x1777f761, 0x68ad, 0x4d8a, [8]byte{0x87, 0xbd, 0x30, 0xb7, 0x59, 0xfa, 0x33, 0xdd}} - FOLDERID_NetHood = &KNOWNFOLDERID{0xc5abbf53, 0xe17f, 0x4121, [8]byte{0x89, 0x00, 0x86, 0x62, 0x6f, 0xc2, 0xc9, 0x73}} - FOLDERID_PrintHood = &KNOWNFOLDERID{0x9274bd8d, 0xcfd1, 0x41c3, [8]byte{0xb3, 0x5e, 0xb1, 0x3f, 0x55, 0xa7, 0x58, 0xf4}} - FOLDERID_Templates = &KNOWNFOLDERID{0xa63293e8, 0x664e, 0x48db, [8]byte{0xa0, 0x79, 0xdf, 0x75, 0x9e, 0x05, 0x09, 0xf7}} - FOLDERID_CommonStartup = &KNOWNFOLDERID{0x82a5ea35, 0xd9cd, 0x47c5, [8]byte{0x96, 0x29, 0xe1, 0x5d, 0x2f, 0x71, 0x4e, 0x6e}} - FOLDERID_CommonPrograms = &KNOWNFOLDERID{0x0139d44e, 0x6afe, 0x49f2, [8]byte{0x86, 0x90, 0x3d, 0xaf, 0xca, 0xe6, 0xff, 0xb8}} - FOLDERID_CommonStartMenu = &KNOWNFOLDERID{0xa4115719, 0xd62e, 0x491d, [8]byte{0xaa, 0x7c, 0xe7, 0x4b, 0x8b, 0xe3, 0xb0, 0x67}} - FOLDERID_PublicDesktop = &KNOWNFOLDERID{0xc4aa340d, 0xf20f, 0x4863, [8]byte{0xaf, 0xef, 0xf8, 0x7e, 0xf2, 0xe6, 0xba, 0x25}} - FOLDERID_ProgramData = &KNOWNFOLDERID{0x62ab5d82, 0xfdc1, 0x4dc3, [8]byte{0xa9, 0xdd, 0x07, 0x0d, 0x1d, 0x49, 0x5d, 0x97}} - FOLDERID_CommonTemplates = &KNOWNFOLDERID{0xb94237e7, 0x57ac, 0x4347, [8]byte{0x91, 0x51, 0xb0, 0x8c, 0x6c, 0x32, 0xd1, 0xf7}} - FOLDERID_PublicDocuments = &KNOWNFOLDERID{0xed4824af, 0xdce4, 0x45a8, [8]byte{0x81, 0xe2, 0xfc, 0x79, 0x65, 0x08, 0x36, 0x34}} - FOLDERID_RoamingAppData = &KNOWNFOLDERID{0x3eb685db, 0x65f9, 0x4cf6, [8]byte{0xa0, 0x3a, 0xe3, 0xef, 0x65, 0x72, 0x9f, 0x3d}} - FOLDERID_LocalAppData = &KNOWNFOLDERID{0xf1b32785, 0x6fba, 0x4fcf, [8]byte{0x9d, 0x55, 0x7b, 0x8e, 0x7f, 0x15, 0x70, 0x91}} - FOLDERID_LocalAppDataLow = &KNOWNFOLDERID{0xa520a1a4, 0x1780, 0x4ff6, [8]byte{0xbd, 0x18, 0x16, 0x73, 0x43, 0xc5, 0xaf, 0x16}} - FOLDERID_InternetCache = &KNOWNFOLDERID{0x352481e8, 0x33be, 0x4251, [8]byte{0xba, 0x85, 0x60, 0x07, 0xca, 0xed, 0xcf, 0x9d}} - FOLDERID_Cookies = &KNOWNFOLDERID{0x2b0f765d, 0xc0e9, 0x4171, [8]byte{0x90, 0x8e, 0x08, 0xa6, 0x11, 0xb8, 0x4f, 0xf6}} - FOLDERID_History = &KNOWNFOLDERID{0xd9dc8a3b, 0xb784, 0x432e, [8]byte{0xa7, 0x81, 0x5a, 0x11, 0x30, 0xa7, 0x59, 0x63}} - FOLDERID_System = &KNOWNFOLDERID{0x1ac14e77, 0x02e7, 0x4e5d, [8]byte{0xb7, 0x44, 0x2e, 0xb1, 0xae, 0x51, 0x98, 0xb7}} - FOLDERID_SystemX86 = &KNOWNFOLDERID{0xd65231b0, 0xb2f1, 0x4857, [8]byte{0xa4, 0xce, 0xa8, 0xe7, 0xc6, 0xea, 0x7d, 0x27}} - FOLDERID_Windows = &KNOWNFOLDERID{0xf38bf404, 0x1d43, 0x42f2, [8]byte{0x93, 0x05, 0x67, 0xde, 0x0b, 0x28, 0xfc, 0x23}} - FOLDERID_Profile = &KNOWNFOLDERID{0x5e6c858f, 0x0e22, 0x4760, [8]byte{0x9a, 0xfe, 0xea, 0x33, 0x17, 0xb6, 0x71, 0x73}} - FOLDERID_Pictures = &KNOWNFOLDERID{0x33e28130, 0x4e1e, 0x4676, [8]byte{0x83, 0x5a, 0x98, 0x39, 0x5c, 0x3b, 0xc3, 0xbb}} - FOLDERID_ProgramFilesX86 = &KNOWNFOLDERID{0x7c5a40ef, 0xa0fb, 0x4bfc, [8]byte{0x87, 0x4a, 0xc0, 0xf2, 0xe0, 0xb9, 0xfa, 0x8e}} - FOLDERID_ProgramFilesCommonX86 = &KNOWNFOLDERID{0xde974d24, 0xd9c6, 0x4d3e, [8]byte{0xbf, 0x91, 0xf4, 0x45, 0x51, 0x20, 0xb9, 0x17}} - FOLDERID_ProgramFilesX64 = &KNOWNFOLDERID{0x6d809377, 0x6af0, 0x444b, [8]byte{0x89, 0x57, 0xa3, 0x77, 0x3f, 0x02, 0x20, 0x0e}} - FOLDERID_ProgramFilesCommonX64 = &KNOWNFOLDERID{0x6365d5a7, 0x0f0d, 0x45e5, [8]byte{0x87, 0xf6, 0x0d, 0xa5, 0x6b, 0x6a, 0x4f, 0x7d}} - FOLDERID_ProgramFiles = &KNOWNFOLDERID{0x905e63b6, 0xc1bf, 0x494e, [8]byte{0xb2, 0x9c, 0x65, 0xb7, 0x32, 0xd3, 0xd2, 0x1a}} - FOLDERID_ProgramFilesCommon = &KNOWNFOLDERID{0xf7f1ed05, 0x9f6d, 0x47a2, [8]byte{0xaa, 0xae, 0x29, 0xd3, 0x17, 0xc6, 0xf0, 0x66}} - FOLDERID_UserProgramFiles = &KNOWNFOLDERID{0x5cd7aee2, 0x2219, 0x4a67, [8]byte{0xb8, 0x5d, 0x6c, 0x9c, 0xe1, 0x56, 0x60, 0xcb}} - FOLDERID_UserProgramFilesCommon = &KNOWNFOLDERID{0xbcbd3057, 0xca5c, 0x4622, [8]byte{0xb4, 0x2d, 0xbc, 0x56, 0xdb, 0x0a, 0xe5, 0x16}} - FOLDERID_AdminTools = &KNOWNFOLDERID{0x724ef170, 0xa42d, 0x4fef, [8]byte{0x9f, 0x26, 0xb6, 0x0e, 0x84, 0x6f, 0xba, 0x4f}} - FOLDERID_CommonAdminTools = &KNOWNFOLDERID{0xd0384e7d, 0xbac3, 0x4797, [8]byte{0x8f, 0x14, 0xcb, 0xa2, 0x29, 0xb3, 0x92, 0xb5}} - FOLDERID_Music = &KNOWNFOLDERID{0x4bd8d571, 0x6d19, 0x48d3, [8]byte{0xbe, 0x97, 0x42, 0x22, 0x20, 0x08, 0x0e, 0x43}} - FOLDERID_Videos = &KNOWNFOLDERID{0x18989b1d, 0x99b5, 0x455b, [8]byte{0x84, 0x1c, 0xab, 0x7c, 0x74, 0xe4, 0xdd, 0xfc}} - FOLDERID_Ringtones = &KNOWNFOLDERID{0xc870044b, 0xf49e, 0x4126, [8]byte{0xa9, 0xc3, 0xb5, 0x2a, 0x1f, 0xf4, 0x11, 0xe8}} - FOLDERID_PublicPictures = &KNOWNFOLDERID{0xb6ebfb86, 0x6907, 0x413c, [8]byte{0x9a, 0xf7, 0x4f, 0xc2, 0xab, 0xf0, 0x7c, 0xc5}} - FOLDERID_PublicMusic = &KNOWNFOLDERID{0x3214fab5, 0x9757, 0x4298, [8]byte{0xbb, 0x61, 0x92, 0xa9, 0xde, 0xaa, 0x44, 0xff}} - FOLDERID_PublicVideos = &KNOWNFOLDERID{0x2400183a, 0x6185, 0x49fb, [8]byte{0xa2, 0xd8, 0x4a, 0x39, 0x2a, 0x60, 0x2b, 0xa3}} - FOLDERID_PublicRingtones = &KNOWNFOLDERID{0xe555ab60, 0x153b, 0x4d17, [8]byte{0x9f, 0x04, 0xa5, 0xfe, 0x99, 0xfc, 0x15, 0xec}} - FOLDERID_ResourceDir = &KNOWNFOLDERID{0x8ad10c31, 0x2adb, 0x4296, [8]byte{0xa8, 0xf7, 0xe4, 0x70, 0x12, 0x32, 0xc9, 0x72}} - FOLDERID_LocalizedResourcesDir = &KNOWNFOLDERID{0x2a00375e, 0x224c, 0x49de, [8]byte{0xb8, 0xd1, 0x44, 0x0d, 0xf7, 0xef, 0x3d, 0xdc}} - FOLDERID_CommonOEMLinks = &KNOWNFOLDERID{0xc1bae2d0, 0x10df, 0x4334, [8]byte{0xbe, 0xdd, 0x7a, 0xa2, 0x0b, 0x22, 0x7a, 0x9d}} - FOLDERID_CDBurning = &KNOWNFOLDERID{0x9e52ab10, 0xf80d, 0x49df, [8]byte{0xac, 0xb8, 0x43, 0x30, 0xf5, 0x68, 0x78, 0x55}} - FOLDERID_UserProfiles = &KNOWNFOLDERID{0x0762d272, 0xc50a, 0x4bb0, [8]byte{0xa3, 0x82, 0x69, 0x7d, 0xcd, 0x72, 0x9b, 0x80}} - FOLDERID_Playlists = &KNOWNFOLDERID{0xde92c1c7, 0x837f, 0x4f69, [8]byte{0xa3, 0xbb, 0x86, 0xe6, 0x31, 0x20, 0x4a, 0x23}} - FOLDERID_SamplePlaylists = &KNOWNFOLDERID{0x15ca69b3, 0x30ee, 0x49c1, [8]byte{0xac, 0xe1, 0x6b, 0x5e, 0xc3, 0x72, 0xaf, 0xb5}} - FOLDERID_SampleMusic = &KNOWNFOLDERID{0xb250c668, 0xf57d, 0x4ee1, [8]byte{0xa6, 0x3c, 0x29, 0x0e, 0xe7, 0xd1, 0xaa, 0x1f}} - FOLDERID_SamplePictures = &KNOWNFOLDERID{0xc4900540, 0x2379, 0x4c75, [8]byte{0x84, 0x4b, 0x64, 0xe6, 0xfa, 0xf8, 0x71, 0x6b}} - FOLDERID_SampleVideos = &KNOWNFOLDERID{0x859ead94, 0x2e85, 0x48ad, [8]byte{0xa7, 0x1a, 0x09, 0x69, 0xcb, 0x56, 0xa6, 0xcd}} - FOLDERID_PhotoAlbums = &KNOWNFOLDERID{0x69d2cf90, 0xfc33, 0x4fb7, [8]byte{0x9a, 0x0c, 0xeb, 0xb0, 0xf0, 0xfc, 0xb4, 0x3c}} - FOLDERID_Public = &KNOWNFOLDERID{0xdfdf76a2, 0xc82a, 0x4d63, [8]byte{0x90, 0x6a, 0x56, 0x44, 0xac, 0x45, 0x73, 0x85}} - FOLDERID_ChangeRemovePrograms = &KNOWNFOLDERID{0xdf7266ac, 0x9274, 0x4867, [8]byte{0x8d, 0x55, 0x3b, 0xd6, 0x61, 0xde, 0x87, 0x2d}} - FOLDERID_AppUpdates = &KNOWNFOLDERID{0xa305ce99, 0xf527, 0x492b, [8]byte{0x8b, 0x1a, 0x7e, 0x76, 0xfa, 0x98, 0xd6, 0xe4}} - FOLDERID_AddNewPrograms = &KNOWNFOLDERID{0xde61d971, 0x5ebc, 0x4f02, [8]byte{0xa3, 0xa9, 0x6c, 0x82, 0x89, 0x5e, 0x5c, 0x04}} - FOLDERID_Downloads = &KNOWNFOLDERID{0x374de290, 0x123f, 0x4565, [8]byte{0x91, 0x64, 0x39, 0xc4, 0x92, 0x5e, 0x46, 0x7b}} - FOLDERID_PublicDownloads = &KNOWNFOLDERID{0x3d644c9b, 0x1fb8, 0x4f30, [8]byte{0x9b, 0x45, 0xf6, 0x70, 0x23, 0x5f, 0x79, 0xc0}} - FOLDERID_SavedSearches = &KNOWNFOLDERID{0x7d1d3a04, 0xdebb, 0x4115, [8]byte{0x95, 0xcf, 0x2f, 0x29, 0xda, 0x29, 0x20, 0xda}} - FOLDERID_QuickLaunch = &KNOWNFOLDERID{0x52a4f021, 0x7b75, 0x48a9, [8]byte{0x9f, 0x6b, 0x4b, 0x87, 0xa2, 0x10, 0xbc, 0x8f}} - FOLDERID_Contacts = &KNOWNFOLDERID{0x56784854, 0xc6cb, 0x462b, [8]byte{0x81, 0x69, 0x88, 0xe3, 0x50, 0xac, 0xb8, 0x82}} - FOLDERID_SidebarParts = &KNOWNFOLDERID{0xa75d362e, 0x50fc, 0x4fb7, [8]byte{0xac, 0x2c, 0xa8, 0xbe, 0xaa, 0x31, 0x44, 0x93}} - FOLDERID_SidebarDefaultParts = &KNOWNFOLDERID{0x7b396e54, 0x9ec5, 0x4300, [8]byte{0xbe, 0x0a, 0x24, 0x82, 0xeb, 0xae, 0x1a, 0x26}} - FOLDERID_PublicGameTasks = &KNOWNFOLDERID{0xdebf2536, 0xe1a8, 0x4c59, [8]byte{0xb6, 0xa2, 0x41, 0x45, 0x86, 0x47, 0x6a, 0xea}} - FOLDERID_GameTasks = &KNOWNFOLDERID{0x054fae61, 0x4dd8, 0x4787, [8]byte{0x80, 0xb6, 0x09, 0x02, 0x20, 0xc4, 0xb7, 0x00}} - FOLDERID_SavedGames = &KNOWNFOLDERID{0x4c5c32ff, 0xbb9d, 0x43b0, [8]byte{0xb5, 0xb4, 0x2d, 0x72, 0xe5, 0x4e, 0xaa, 0xa4}} - FOLDERID_Games = &KNOWNFOLDERID{0xcac52c1a, 0xb53d, 0x4edc, [8]byte{0x92, 0xd7, 0x6b, 0x2e, 0x8a, 0xc1, 0x94, 0x34}} - FOLDERID_SEARCH_MAPI = &KNOWNFOLDERID{0x98ec0e18, 0x2098, 0x4d44, [8]byte{0x86, 0x44, 0x66, 0x97, 0x93, 0x15, 0xa2, 0x81}} - FOLDERID_SEARCH_CSC = &KNOWNFOLDERID{0xee32e446, 0x31ca, 0x4aba, [8]byte{0x81, 0x4f, 0xa5, 0xeb, 0xd2, 0xfd, 0x6d, 0x5e}} - FOLDERID_Links = &KNOWNFOLDERID{0xbfb9d5e0, 0xc6a9, 0x404c, [8]byte{0xb2, 0xb2, 0xae, 0x6d, 0xb6, 0xaf, 0x49, 0x68}} - FOLDERID_UsersFiles = &KNOWNFOLDERID{0xf3ce0f7c, 0x4901, 0x4acc, [8]byte{0x86, 0x48, 0xd5, 0xd4, 0x4b, 0x04, 0xef, 0x8f}} - FOLDERID_UsersLibraries = &KNOWNFOLDERID{0xa302545d, 0xdeff, 0x464b, [8]byte{0xab, 0xe8, 0x61, 0xc8, 0x64, 0x8d, 0x93, 0x9b}} - FOLDERID_SearchHome = &KNOWNFOLDERID{0x190337d1, 0xb8ca, 0x4121, [8]byte{0xa6, 0x39, 0x6d, 0x47, 0x2d, 0x16, 0x97, 0x2a}} - FOLDERID_OriginalImages = &KNOWNFOLDERID{0x2c36c0aa, 0x5812, 0x4b87, [8]byte{0xbf, 0xd0, 0x4c, 0xd0, 0xdf, 0xb1, 0x9b, 0x39}} - FOLDERID_DocumentsLibrary = &KNOWNFOLDERID{0x7b0db17d, 0x9cd2, 0x4a93, [8]byte{0x97, 0x33, 0x46, 0xcc, 0x89, 0x02, 0x2e, 0x7c}} - FOLDERID_MusicLibrary = &KNOWNFOLDERID{0x2112ab0a, 0xc86a, 0x4ffe, [8]byte{0xa3, 0x68, 0x0d, 0xe9, 0x6e, 0x47, 0x01, 0x2e}} - FOLDERID_PicturesLibrary = &KNOWNFOLDERID{0xa990ae9f, 0xa03b, 0x4e80, [8]byte{0x94, 0xbc, 0x99, 0x12, 0xd7, 0x50, 0x41, 0x04}} - FOLDERID_VideosLibrary = &KNOWNFOLDERID{0x491e922f, 0x5643, 0x4af4, [8]byte{0xa7, 0xeb, 0x4e, 0x7a, 0x13, 0x8d, 0x81, 0x74}} - FOLDERID_RecordedTVLibrary = &KNOWNFOLDERID{0x1a6fdba2, 0xf42d, 0x4358, [8]byte{0xa7, 0x98, 0xb7, 0x4d, 0x74, 0x59, 0x26, 0xc5}} - FOLDERID_HomeGroup = &KNOWNFOLDERID{0x52528a6b, 0xb9e3, 0x4add, [8]byte{0xb6, 0x0d, 0x58, 0x8c, 0x2d, 0xba, 0x84, 0x2d}} - FOLDERID_HomeGroupCurrentUser = &KNOWNFOLDERID{0x9b74b6a3, 0x0dfd, 0x4f11, [8]byte{0x9e, 0x78, 0x5f, 0x78, 0x00, 0xf2, 0xe7, 0x72}} - FOLDERID_DeviceMetadataStore = &KNOWNFOLDERID{0x5ce4a5e9, 0xe4eb, 0x479d, [8]byte{0xb8, 0x9f, 0x13, 0x0c, 0x02, 0x88, 0x61, 0x55}} - FOLDERID_Libraries = &KNOWNFOLDERID{0x1b3ea5dc, 0xb587, 0x4786, [8]byte{0xb4, 0xef, 0xbd, 0x1d, 0xc3, 0x32, 0xae, 0xae}} - FOLDERID_PublicLibraries = &KNOWNFOLDERID{0x48daf80b, 0xe6cf, 0x4f4e, [8]byte{0xb8, 0x00, 0x0e, 0x69, 0xd8, 0x4e, 0xe3, 0x84}} - FOLDERID_UserPinned = &KNOWNFOLDERID{0x9e3995ab, 0x1f9c, 0x4f13, [8]byte{0xb8, 0x27, 0x48, 0xb2, 0x4b, 0x6c, 0x71, 0x74}} - FOLDERID_ImplicitAppShortcuts = &KNOWNFOLDERID{0xbcb5256f, 0x79f6, 0x4cee, [8]byte{0xb7, 0x25, 0xdc, 0x34, 0xe4, 0x02, 0xfd, 0x46}} - FOLDERID_AccountPictures = &KNOWNFOLDERID{0x008ca0b1, 0x55b4, 0x4c56, [8]byte{0xb8, 0xa8, 0x4d, 0xe4, 0xb2, 0x99, 0xd3, 0xbe}} - FOLDERID_PublicUserTiles = &KNOWNFOLDERID{0x0482af6c, 0x08f1, 0x4c34, [8]byte{0x8c, 0x90, 0xe1, 0x7e, 0xc9, 0x8b, 0x1e, 0x17}} - FOLDERID_AppsFolder = &KNOWNFOLDERID{0x1e87508d, 0x89c2, 0x42f0, [8]byte{0x8a, 0x7e, 0x64, 0x5a, 0x0f, 0x50, 0xca, 0x58}} - FOLDERID_StartMenuAllPrograms = &KNOWNFOLDERID{0xf26305ef, 0x6948, 0x40b9, [8]byte{0xb2, 0x55, 0x81, 0x45, 0x3d, 0x09, 0xc7, 0x85}} - FOLDERID_CommonStartMenuPlaces = &KNOWNFOLDERID{0xa440879f, 0x87a0, 0x4f7d, [8]byte{0xb7, 0x00, 0x02, 0x07, 0xb9, 0x66, 0x19, 0x4a}} - FOLDERID_ApplicationShortcuts = &KNOWNFOLDERID{0xa3918781, 0xe5f2, 0x4890, [8]byte{0xb3, 0xd9, 0xa7, 0xe5, 0x43, 0x32, 0x32, 0x8c}} - FOLDERID_RoamingTiles = &KNOWNFOLDERID{0x00bcfc5a, 0xed94, 0x4e48, [8]byte{0x96, 0xa1, 0x3f, 0x62, 0x17, 0xf2, 0x19, 0x90}} - FOLDERID_RoamedTileImages = &KNOWNFOLDERID{0xaaa8d5a5, 0xf1d6, 0x4259, [8]byte{0xba, 0xa8, 0x78, 0xe7, 0xef, 0x60, 0x83, 0x5e}} - FOLDERID_Screenshots = &KNOWNFOLDERID{0xb7bede81, 0xdf94, 0x4682, [8]byte{0xa7, 0xd8, 0x57, 0xa5, 0x26, 0x20, 0xb8, 0x6f}} - FOLDERID_CameraRoll = &KNOWNFOLDERID{0xab5fb87b, 0x7ce2, 0x4f83, [8]byte{0x91, 0x5d, 0x55, 0x08, 0x46, 0xc9, 0x53, 0x7b}} - FOLDERID_SkyDrive = &KNOWNFOLDERID{0xa52bba46, 0xe9e1, 0x435f, [8]byte{0xb3, 0xd9, 0x28, 0xda, 0xa6, 0x48, 0xc0, 0xf6}} - FOLDERID_OneDrive = &KNOWNFOLDERID{0xa52bba46, 0xe9e1, 0x435f, [8]byte{0xb3, 0xd9, 0x28, 0xda, 0xa6, 0x48, 0xc0, 0xf6}} - FOLDERID_SkyDriveDocuments = &KNOWNFOLDERID{0x24d89e24, 0x2f19, 0x4534, [8]byte{0x9d, 0xde, 0x6a, 0x66, 0x71, 0xfb, 0xb8, 0xfe}} - FOLDERID_SkyDrivePictures = &KNOWNFOLDERID{0x339719b5, 0x8c47, 0x4894, [8]byte{0x94, 0xc2, 0xd8, 0xf7, 0x7a, 0xdd, 0x44, 0xa6}} - FOLDERID_SkyDriveMusic = &KNOWNFOLDERID{0xc3f2459e, 0x80d6, 0x45dc, [8]byte{0xbf, 0xef, 0x1f, 0x76, 0x9f, 0x2b, 0xe7, 0x30}} - FOLDERID_SkyDriveCameraRoll = &KNOWNFOLDERID{0x767e6811, 0x49cb, 0x4273, [8]byte{0x87, 0xc2, 0x20, 0xf3, 0x55, 0xe1, 0x08, 0x5b}} - FOLDERID_SearchHistory = &KNOWNFOLDERID{0x0d4c3db6, 0x03a3, 0x462f, [8]byte{0xa0, 0xe6, 0x08, 0x92, 0x4c, 0x41, 0xb5, 0xd4}} - FOLDERID_SearchTemplates = &KNOWNFOLDERID{0x7e636bfe, 0xdfa9, 0x4d5e, [8]byte{0xb4, 0x56, 0xd7, 0xb3, 0x98, 0x51, 0xd8, 0xa9}} - FOLDERID_CameraRollLibrary = &KNOWNFOLDERID{0x2b20df75, 0x1eda, 0x4039, [8]byte{0x80, 0x97, 0x38, 0x79, 0x82, 0x27, 0xd5, 0xb7}} - FOLDERID_SavedPictures = &KNOWNFOLDERID{0x3b193882, 0xd3ad, 0x4eab, [8]byte{0x96, 0x5a, 0x69, 0x82, 0x9d, 0x1f, 0xb5, 0x9f}} - FOLDERID_SavedPicturesLibrary = &KNOWNFOLDERID{0xe25b5812, 0xbe88, 0x4bd9, [8]byte{0x94, 0xb0, 0x29, 0x23, 0x34, 0x77, 0xb6, 0xc3}} - FOLDERID_RetailDemo = &KNOWNFOLDERID{0x12d4c69e, 0x24ad, 0x4923, [8]byte{0xbe, 0x19, 0x31, 0x32, 0x1c, 0x43, 0xa7, 0x67}} - FOLDERID_Device = &KNOWNFOLDERID{0x1c2ac1dc, 0x4358, 0x4b6c, [8]byte{0x97, 0x33, 0xaf, 0x21, 0x15, 0x65, 0x76, 0xf0}} - FOLDERID_DevelopmentFiles = &KNOWNFOLDERID{0xdbe8e08e, 0x3053, 0x4bbc, [8]byte{0xb1, 0x83, 0x2a, 0x7b, 0x2b, 0x19, 0x1e, 0x59}} - FOLDERID_Objects3D = &KNOWNFOLDERID{0x31c0dd25, 0x9439, 0x4f12, [8]byte{0xbf, 0x41, 0x7f, 0xf4, 0xed, 0xa3, 0x87, 0x22}} - FOLDERID_AppCaptures = &KNOWNFOLDERID{0xedc0fe71, 0x98d8, 0x4f4a, [8]byte{0xb9, 0x20, 0xc8, 0xdc, 0x13, 0x3c, 0xb1, 0x65}} - FOLDERID_LocalDocuments = &KNOWNFOLDERID{0xf42ee2d3, 0x909f, 0x4907, [8]byte{0x88, 0x71, 0x4c, 0x22, 0xfc, 0x0b, 0xf7, 0x56}} - FOLDERID_LocalPictures = &KNOWNFOLDERID{0x0ddd015d, 0xb06c, 0x45d5, [8]byte{0x8c, 0x4c, 0xf5, 0x97, 0x13, 0x85, 0x46, 0x39}} - FOLDERID_LocalVideos = &KNOWNFOLDERID{0x35286a68, 0x3c57, 0x41a1, [8]byte{0xbb, 0xb1, 0x0e, 0xae, 0x73, 0xd7, 0x6c, 0x95}} - FOLDERID_LocalMusic = &KNOWNFOLDERID{0xa0c69a99, 0x21c8, 0x4671, [8]byte{0x87, 0x03, 0x79, 0x34, 0x16, 0x2f, 0xcf, 0x1d}} - FOLDERID_LocalDownloads = &KNOWNFOLDERID{0x7d83ee9b, 0x2244, 0x4e70, [8]byte{0xb1, 0xf5, 0x53, 0x93, 0x04, 0x2a, 0xf1, 0xe4}} - FOLDERID_RecordedCalls = &KNOWNFOLDERID{0x2f8b40c2, 0x83ed, 0x48ee, [8]byte{0xb3, 0x83, 0xa1, 0xf1, 0x57, 0xec, 0x6f, 0x9a}} - FOLDERID_AllAppMods = &KNOWNFOLDERID{0x7ad67899, 0x66af, 0x43ba, [8]byte{0x91, 0x56, 0x6a, 0xad, 0x42, 0xe6, 0xc5, 0x96}} - FOLDERID_CurrentAppMods = &KNOWNFOLDERID{0x3db40b20, 0x2a30, 0x4dbe, [8]byte{0x91, 0x7e, 0x77, 0x1d, 0xd2, 0x1d, 0xd0, 0x99}} - FOLDERID_AppDataDesktop = &KNOWNFOLDERID{0xb2c5e279, 0x7add, 0x439f, [8]byte{0xb2, 0x8c, 0xc4, 0x1f, 0xe1, 0xbb, 0xf6, 0x72}} - FOLDERID_AppDataDocuments = &KNOWNFOLDERID{0x7be16610, 0x1f7f, 0x44ac, [8]byte{0xbf, 0xf0, 0x83, 0xe1, 0x5f, 0x2f, 0xfc, 0xa1}} - FOLDERID_AppDataFavorites = &KNOWNFOLDERID{0x7cfbefbc, 0xde1f, 0x45aa, [8]byte{0xb8, 0x43, 0xa5, 0x42, 0xac, 0x53, 0x6c, 0xc9}} - FOLDERID_AppDataProgramData = &KNOWNFOLDERID{0x559d40a3, 0xa036, 0x40fa, [8]byte{0xaf, 0x61, 0x84, 0xcb, 0x43, 0x0a, 0x4d, 0x34}} -) diff --git a/vendor/golang.org/x/sys/windows/zsyscall_windows.go b/vendor/golang.org/x/sys/windows/zsyscall_windows.go index e66ab04949..7060f4341d 100644 --- a/vendor/golang.org/x/sys/windows/zsyscall_windows.go +++ b/vendor/golang.org/x/sys/windows/zsyscall_windows.go @@ -38,17 +38,14 @@ var ( modadvapi32 = NewLazySystemDLL("advapi32.dll") modkernel32 = NewLazySystemDLL("kernel32.dll") modshell32 = NewLazySystemDLL("shell32.dll") - moduserenv = NewLazySystemDLL("userenv.dll") modmswsock = NewLazySystemDLL("mswsock.dll") modcrypt32 = NewLazySystemDLL("crypt32.dll") - moduser32 = NewLazySystemDLL("user32.dll") - modole32 = NewLazySystemDLL("ole32.dll") modws2_32 = NewLazySystemDLL("ws2_32.dll") moddnsapi = NewLazySystemDLL("dnsapi.dll") modiphlpapi = NewLazySystemDLL("iphlpapi.dll") modsecur32 = NewLazySystemDLL("secur32.dll") modnetapi32 = NewLazySystemDLL("netapi32.dll") - modwtsapi32 = NewLazySystemDLL("wtsapi32.dll") + moduserenv = NewLazySystemDLL("userenv.dll") procRegisterEventSourceW = modadvapi32.NewProc("RegisterEventSourceW") procDeregisterEventSource = modadvapi32.NewProc("DeregisterEventSource") @@ -60,7 +57,6 @@ var ( procDeleteService = modadvapi32.NewProc("DeleteService") procStartServiceW = modadvapi32.NewProc("StartServiceW") procQueryServiceStatus = modadvapi32.NewProc("QueryServiceStatus") - procQueryServiceLockStatusW = modadvapi32.NewProc("QueryServiceLockStatusW") procControlService = modadvapi32.NewProc("ControlService") procStartServiceCtrlDispatcherW = modadvapi32.NewProc("StartServiceCtrlDispatcherW") procSetServiceStatus = modadvapi32.NewProc("SetServiceStatus") @@ -70,7 +66,6 @@ var ( procQueryServiceConfig2W = modadvapi32.NewProc("QueryServiceConfig2W") procEnumServicesStatusExW = modadvapi32.NewProc("EnumServicesStatusExW") procQueryServiceStatusEx = modadvapi32.NewProc("QueryServiceStatusEx") - procNotifyServiceStatusChangeW = modadvapi32.NewProc("NotifyServiceStatusChangeW") procGetLastError = modkernel32.NewProc("GetLastError") procLoadLibraryW = modkernel32.NewProc("LoadLibraryW") procLoadLibraryExW = modkernel32.NewProc("LoadLibraryExW") @@ -79,7 +74,6 @@ var ( procGetVersion = modkernel32.NewProc("GetVersion") procFormatMessageW = modkernel32.NewProc("FormatMessageW") procExitProcess = modkernel32.NewProc("ExitProcess") - procIsWow64Process = modkernel32.NewProc("IsWow64Process") procCreateFileW = modkernel32.NewProc("CreateFileW") procReadFile = modkernel32.NewProc("ReadFile") procWriteFile = modkernel32.NewProc("WriteFile") @@ -113,13 +107,10 @@ var ( procCancelIoEx = modkernel32.NewProc("CancelIoEx") procCreateProcessW = modkernel32.NewProc("CreateProcessW") procOpenProcess = modkernel32.NewProc("OpenProcess") - procShellExecuteW = modshell32.NewProc("ShellExecuteW") - procSHGetKnownFolderPath = modshell32.NewProc("SHGetKnownFolderPath") procTerminateProcess = modkernel32.NewProc("TerminateProcess") procGetExitCodeProcess = modkernel32.NewProc("GetExitCodeProcess") procGetStartupInfoW = modkernel32.NewProc("GetStartupInfoW") procGetCurrentProcess = modkernel32.NewProc("GetCurrentProcess") - procGetCurrentThread = modkernel32.NewProc("GetCurrentThread") procGetProcessTimes = modkernel32.NewProc("GetProcessTimes") procDuplicateHandle = modkernel32.NewProc("DuplicateHandle") procWaitForSingleObject = modkernel32.NewProc("WaitForSingleObject") @@ -134,9 +125,6 @@ var ( procFreeEnvironmentStringsW = modkernel32.NewProc("FreeEnvironmentStringsW") procGetEnvironmentVariableW = modkernel32.NewProc("GetEnvironmentVariableW") procSetEnvironmentVariableW = modkernel32.NewProc("SetEnvironmentVariableW") - procCreateEnvironmentBlock = moduserenv.NewProc("CreateEnvironmentBlock") - procDestroyEnvironmentBlock = moduserenv.NewProc("DestroyEnvironmentBlock") - procGetTickCount64 = modkernel32.NewProc("GetTickCount64") procSetFileTime = modkernel32.NewProc("SetFileTime") procGetFileAttributesW = modkernel32.NewProc("GetFileAttributesW") procSetFileAttributesW = modkernel32.NewProc("SetFileAttributesW") @@ -184,8 +172,6 @@ var ( procCreateToolhelp32Snapshot = modkernel32.NewProc("CreateToolhelp32Snapshot") procProcess32FirstW = modkernel32.NewProc("Process32FirstW") procProcess32NextW = modkernel32.NewProc("Process32NextW") - procThread32First = modkernel32.NewProc("Thread32First") - procThread32Next = modkernel32.NewProc("Thread32Next") procDeviceIoControl = modkernel32.NewProc("DeviceIoControl") procCreateSymbolicLinkW = modkernel32.NewProc("CreateSymbolicLinkW") procCreateHardLinkW = modkernel32.NewProc("CreateHardLinkW") @@ -196,18 +182,6 @@ var ( procSetEvent = modkernel32.NewProc("SetEvent") procResetEvent = modkernel32.NewProc("ResetEvent") procPulseEvent = modkernel32.NewProc("PulseEvent") - procSleepEx = modkernel32.NewProc("SleepEx") - procCreateJobObjectW = modkernel32.NewProc("CreateJobObjectW") - procAssignProcessToJobObject = modkernel32.NewProc("AssignProcessToJobObject") - procTerminateJobObject = modkernel32.NewProc("TerminateJobObject") - procSetErrorMode = modkernel32.NewProc("SetErrorMode") - procResumeThread = modkernel32.NewProc("ResumeThread") - procSetPriorityClass = modkernel32.NewProc("SetPriorityClass") - procGetPriorityClass = modkernel32.NewProc("GetPriorityClass") - procSetInformationJobObject = modkernel32.NewProc("SetInformationJobObject") - procGenerateConsoleCtrlEvent = modkernel32.NewProc("GenerateConsoleCtrlEvent") - procGetProcessId = modkernel32.NewProc("GetProcessId") - procOpenThread = modkernel32.NewProc("OpenThread") procDefineDosDeviceW = modkernel32.NewProc("DefineDosDeviceW") procDeleteVolumeMountPointW = modkernel32.NewProc("DeleteVolumeMountPointW") procFindFirstVolumeW = modkernel32.NewProc("FindFirstVolumeW") @@ -227,11 +201,6 @@ var ( procQueryDosDeviceW = modkernel32.NewProc("QueryDosDeviceW") procSetVolumeLabelW = modkernel32.NewProc("SetVolumeLabelW") procSetVolumeMountPointW = modkernel32.NewProc("SetVolumeMountPointW") - procMessageBoxW = moduser32.NewProc("MessageBoxW") - procCLSIDFromString = modole32.NewProc("CLSIDFromString") - procStringFromGUID2 = modole32.NewProc("StringFromGUID2") - procCoCreateGuid = modole32.NewProc("CoCreateGuid") - procCoTaskMemFree = modole32.NewProc("CoTaskMemFree") procWSAStartup = modws2_32.NewProc("WSAStartup") procWSACleanup = modws2_32.NewProc("WSACleanup") procWSAIoctl = modws2_32.NewProc("WSAIoctl") @@ -280,30 +249,13 @@ var ( procCopySid = modadvapi32.NewProc("CopySid") procAllocateAndInitializeSid = modadvapi32.NewProc("AllocateAndInitializeSid") procCreateWellKnownSid = modadvapi32.NewProc("CreateWellKnownSid") - procIsWellKnownSid = modadvapi32.NewProc("IsWellKnownSid") procFreeSid = modadvapi32.NewProc("FreeSid") procEqualSid = modadvapi32.NewProc("EqualSid") - procGetSidIdentifierAuthority = modadvapi32.NewProc("GetSidIdentifierAuthority") - procGetSidSubAuthorityCount = modadvapi32.NewProc("GetSidSubAuthorityCount") - procGetSidSubAuthority = modadvapi32.NewProc("GetSidSubAuthority") - procIsValidSid = modadvapi32.NewProc("IsValidSid") procCheckTokenMembership = modadvapi32.NewProc("CheckTokenMembership") procOpenProcessToken = modadvapi32.NewProc("OpenProcessToken") - procOpenThreadToken = modadvapi32.NewProc("OpenThreadToken") - procImpersonateSelf = modadvapi32.NewProc("ImpersonateSelf") - procRevertToSelf = modadvapi32.NewProc("RevertToSelf") - procSetThreadToken = modadvapi32.NewProc("SetThreadToken") - procLookupPrivilegeValueW = modadvapi32.NewProc("LookupPrivilegeValueW") - procAdjustTokenPrivileges = modadvapi32.NewProc("AdjustTokenPrivileges") - procAdjustTokenGroups = modadvapi32.NewProc("AdjustTokenGroups") procGetTokenInformation = modadvapi32.NewProc("GetTokenInformation") - procSetTokenInformation = modadvapi32.NewProc("SetTokenInformation") - procDuplicateTokenEx = modadvapi32.NewProc("DuplicateTokenEx") procGetUserProfileDirectoryW = moduserenv.NewProc("GetUserProfileDirectoryW") procGetSystemDirectoryW = modkernel32.NewProc("GetSystemDirectoryW") - procWTSQueryUserToken = modwtsapi32.NewProc("WTSQueryUserToken") - procWTSEnumerateSessionsW = modwtsapi32.NewProc("WTSEnumerateSessionsW") - procWTSFreeMemory = modwtsapi32.NewProc("WTSFreeMemory") ) func RegisterEventSource(uncServerName *uint16, sourceName *uint16) (handle Handle, err error) { @@ -430,18 +382,6 @@ func QueryServiceStatus(service Handle, status *SERVICE_STATUS) (err error) { return } -func QueryServiceLockStatus(mgr Handle, lockStatus *QUERY_SERVICE_LOCK_STATUS, bufSize uint32, bytesNeeded *uint32) (err error) { - r1, _, e1 := syscall.Syscall6(procQueryServiceLockStatusW.Addr(), 4, uintptr(mgr), uintptr(unsafe.Pointer(lockStatus)), uintptr(bufSize), uintptr(unsafe.Pointer(bytesNeeded)), 0, 0) - if r1 == 0 { - if e1 != 0 { - err = errnoErr(e1) - } else { - err = syscall.EINVAL - } - } - return -} - func ControlService(service Handle, control uint32, status *SERVICE_STATUS) (err error) { r1, _, e1 := syscall.Syscall(procControlService.Addr(), 3, uintptr(service), uintptr(control), uintptr(unsafe.Pointer(status))) if r1 == 0 { @@ -550,14 +490,6 @@ func QueryServiceStatusEx(service Handle, infoLevel uint32, buff *byte, buffSize return } -func NotifyServiceStatusChange(service Handle, notifyMask uint32, notifier *SERVICE_NOTIFY) (ret error) { - r0, _, _ := syscall.Syscall(procNotifyServiceStatusChangeW.Addr(), 3, uintptr(service), uintptr(notifyMask), uintptr(unsafe.Pointer(notifier))) - if r0 != 0 { - ret = syscall.Errno(r0) - } - return -} - func GetLastError() (lasterr error) { r0, _, _ := syscall.Syscall(procGetLastError.Addr(), 0, 0, 0, 0) if r0 != 0 { @@ -679,19 +611,7 @@ func ExitProcess(exitcode uint32) { return } -func IsWow64Process(handle Handle, isWow64 *bool) (err error) { - r1, _, e1 := syscall.Syscall(procIsWow64Process.Addr(), 2, uintptr(handle), uintptr(unsafe.Pointer(isWow64)), 0) - if r1 == 0 { - if e1 != 0 { - err = errnoErr(e1) - } else { - err = syscall.EINVAL - } - } - return -} - -func CreateFile(name *uint16, access uint32, mode uint32, sa *SecurityAttributes, createmode uint32, attrs uint32, templatefile Handle) (handle Handle, err error) { +func CreateFile(name *uint16, access uint32, mode uint32, sa *SecurityAttributes, createmode uint32, attrs uint32, templatefile int32) (handle Handle, err error) { r0, _, e1 := syscall.Syscall9(procCreateFileW.Addr(), 7, uintptr(unsafe.Pointer(name)), uintptr(access), uintptr(mode), uintptr(unsafe.Pointer(sa)), uintptr(createmode), uintptr(attrs), uintptr(templatefile), 0, 0) handle = Handle(r0) if handle == InvalidHandle { @@ -1088,14 +1008,14 @@ func CreateProcess(appName *uint16, commandLine *uint16, procSecurity *SecurityA return } -func OpenProcess(desiredAccess uint32, inheritHandle bool, processId uint32) (handle Handle, err error) { +func OpenProcess(da uint32, inheritHandle bool, pid uint32) (handle Handle, err error) { var _p0 uint32 if inheritHandle { _p0 = 1 } else { _p0 = 0 } - r0, _, e1 := syscall.Syscall(procOpenProcess.Addr(), 3, uintptr(desiredAccess), uintptr(_p0), uintptr(processId)) + r0, _, e1 := syscall.Syscall(procOpenProcess.Addr(), 3, uintptr(da), uintptr(_p0), uintptr(pid)) handle = Handle(r0) if handle == 0 { if e1 != 0 { @@ -1107,26 +1027,6 @@ func OpenProcess(desiredAccess uint32, inheritHandle bool, processId uint32) (ha return } -func ShellExecute(hwnd Handle, verb *uint16, file *uint16, args *uint16, cwd *uint16, showCmd int32) (err error) { - r1, _, e1 := syscall.Syscall6(procShellExecuteW.Addr(), 6, uintptr(hwnd), uintptr(unsafe.Pointer(verb)), uintptr(unsafe.Pointer(file)), uintptr(unsafe.Pointer(args)), uintptr(unsafe.Pointer(cwd)), uintptr(showCmd)) - if r1 == 0 { - if e1 != 0 { - err = errnoErr(e1) - } else { - err = syscall.EINVAL - } - } - return -} - -func shGetKnownFolderPath(id *KNOWNFOLDERID, flags uint32, token Token, path **uint16) (ret error) { - r0, _, _ := syscall.Syscall6(procSHGetKnownFolderPath.Addr(), 4, uintptr(unsafe.Pointer(id)), uintptr(flags), uintptr(token), uintptr(unsafe.Pointer(path)), 0, 0) - if r0 != 0 { - ret = syscall.Errno(r0) - } - return -} - func TerminateProcess(handle Handle, exitcode uint32) (err error) { r1, _, e1 := syscall.Syscall(procTerminateProcess.Addr(), 2, uintptr(handle), uintptr(exitcode), 0) if r1 == 0 { @@ -1176,19 +1076,6 @@ func GetCurrentProcess() (pseudoHandle Handle, err error) { return } -func GetCurrentThread() (pseudoHandle Handle, err error) { - r0, _, e1 := syscall.Syscall(procGetCurrentThread.Addr(), 0, 0, 0, 0) - pseudoHandle = Handle(r0) - if pseudoHandle == 0 { - if e1 != 0 { - err = errnoErr(e1) - } else { - err = syscall.EINVAL - } - } - return -} - func GetProcessTimes(handle Handle, creationTime *Filetime, exitTime *Filetime, kernelTime *Filetime, userTime *Filetime) (err error) { r1, _, e1 := syscall.Syscall6(procGetProcessTimes.Addr(), 5, uintptr(handle), uintptr(unsafe.Pointer(creationTime)), uintptr(unsafe.Pointer(exitTime)), uintptr(unsafe.Pointer(kernelTime)), uintptr(unsafe.Pointer(userTime)), 0) if r1 == 0 { @@ -1375,42 +1262,6 @@ func SetEnvironmentVariable(name *uint16, value *uint16) (err error) { return } -func CreateEnvironmentBlock(block **uint16, token Token, inheritExisting bool) (err error) { - var _p0 uint32 - if inheritExisting { - _p0 = 1 - } else { - _p0 = 0 - } - r1, _, e1 := syscall.Syscall(procCreateEnvironmentBlock.Addr(), 3, uintptr(unsafe.Pointer(block)), uintptr(token), uintptr(_p0)) - if r1 == 0 { - if e1 != 0 { - err = errnoErr(e1) - } else { - err = syscall.EINVAL - } - } - return -} - -func DestroyEnvironmentBlock(block *uint16) (err error) { - r1, _, e1 := syscall.Syscall(procDestroyEnvironmentBlock.Addr(), 1, uintptr(unsafe.Pointer(block)), 0, 0) - if r1 == 0 { - if e1 != 0 { - err = errnoErr(e1) - } else { - err = syscall.EINVAL - } - } - return -} - -func getTickCount64() (ms uint64) { - r0, _, _ := syscall.Syscall(procGetTickCount64.Addr(), 0, 0, 0, 0) - ms = uint64(r0) - return -} - func SetFileTime(handle Handle, ctime *Filetime, atime *Filetime, wtime *Filetime) (err error) { r1, _, e1 := syscall.Syscall6(procSetFileTime.Addr(), 4, uintptr(handle), uintptr(unsafe.Pointer(ctime)), uintptr(unsafe.Pointer(atime)), uintptr(unsafe.Pointer(wtime)), 0, 0) if r1 == 0 { @@ -1853,7 +1704,7 @@ func RegQueryValueEx(key Handle, name *uint16, reserved *uint32, valtype *uint32 return } -func GetCurrentProcessId() (pid uint32) { +func getCurrentProcessId() (pid uint32) { r0, _, _ := syscall.Syscall(procGetCurrentProcessId.Addr(), 0, 0, 0, 0) pid = uint32(r0) return @@ -1956,30 +1807,6 @@ func Process32Next(snapshot Handle, procEntry *ProcessEntry32) (err error) { return } -func Thread32First(snapshot Handle, threadEntry *ThreadEntry32) (err error) { - r1, _, e1 := syscall.Syscall(procThread32First.Addr(), 2, uintptr(snapshot), uintptr(unsafe.Pointer(threadEntry)), 0) - if r1 == 0 { - if e1 != 0 { - err = errnoErr(e1) - } else { - err = syscall.EINVAL - } - } - return -} - -func Thread32Next(snapshot Handle, threadEntry *ThreadEntry32) (err error) { - r1, _, e1 := syscall.Syscall(procThread32Next.Addr(), 2, uintptr(snapshot), uintptr(unsafe.Pointer(threadEntry)), 0) - if r1 == 0 { - if e1 != 0 { - err = errnoErr(e1) - } else { - err = syscall.EINVAL - } - } - return -} - func DeviceIoControl(handle Handle, ioControlCode uint32, inBuffer *byte, inBufferSize uint32, outBuffer *byte, outBufferSize uint32, bytesReturned *uint32, overlapped *Overlapped) (err error) { r1, _, e1 := syscall.Syscall9(procDeviceIoControl.Addr(), 8, uintptr(handle), uintptr(ioControlCode), uintptr(unsafe.Pointer(inBuffer)), uintptr(inBufferSize), uintptr(unsafe.Pointer(outBuffer)), uintptr(outBufferSize), uintptr(unsafe.Pointer(bytesReturned)), uintptr(unsafe.Pointer(overlapped)), 0) if r1 == 0 { @@ -2103,156 +1930,6 @@ func PulseEvent(event Handle) (err error) { return } -func SleepEx(milliseconds uint32, alertable bool) (ret uint32) { - var _p0 uint32 - if alertable { - _p0 = 1 - } else { - _p0 = 0 - } - r0, _, _ := syscall.Syscall(procSleepEx.Addr(), 2, uintptr(milliseconds), uintptr(_p0), 0) - ret = uint32(r0) - return -} - -func CreateJobObject(jobAttr *SecurityAttributes, name *uint16) (handle Handle, err error) { - r0, _, e1 := syscall.Syscall(procCreateJobObjectW.Addr(), 2, uintptr(unsafe.Pointer(jobAttr)), uintptr(unsafe.Pointer(name)), 0) - handle = Handle(r0) - if handle == 0 { - if e1 != 0 { - err = errnoErr(e1) - } else { - err = syscall.EINVAL - } - } - return -} - -func AssignProcessToJobObject(job Handle, process Handle) (err error) { - r1, _, e1 := syscall.Syscall(procAssignProcessToJobObject.Addr(), 2, uintptr(job), uintptr(process), 0) - if r1 == 0 { - if e1 != 0 { - err = errnoErr(e1) - } else { - err = syscall.EINVAL - } - } - return -} - -func TerminateJobObject(job Handle, exitCode uint32) (err error) { - r1, _, e1 := syscall.Syscall(procTerminateJobObject.Addr(), 2, uintptr(job), uintptr(exitCode), 0) - if r1 == 0 { - if e1 != 0 { - err = errnoErr(e1) - } else { - err = syscall.EINVAL - } - } - return -} - -func SetErrorMode(mode uint32) (ret uint32) { - r0, _, _ := syscall.Syscall(procSetErrorMode.Addr(), 1, uintptr(mode), 0, 0) - ret = uint32(r0) - return -} - -func ResumeThread(thread Handle) (ret uint32, err error) { - r0, _, e1 := syscall.Syscall(procResumeThread.Addr(), 1, uintptr(thread), 0, 0) - ret = uint32(r0) - if ret == 0xffffffff { - if e1 != 0 { - err = errnoErr(e1) - } else { - err = syscall.EINVAL - } - } - return -} - -func SetPriorityClass(process Handle, priorityClass uint32) (err error) { - r1, _, e1 := syscall.Syscall(procSetPriorityClass.Addr(), 2, uintptr(process), uintptr(priorityClass), 0) - if r1 == 0 { - if e1 != 0 { - err = errnoErr(e1) - } else { - err = syscall.EINVAL - } - } - return -} - -func GetPriorityClass(process Handle) (ret uint32, err error) { - r0, _, e1 := syscall.Syscall(procGetPriorityClass.Addr(), 1, uintptr(process), 0, 0) - ret = uint32(r0) - if ret == 0 { - if e1 != 0 { - err = errnoErr(e1) - } else { - err = syscall.EINVAL - } - } - return -} - -func SetInformationJobObject(job Handle, JobObjectInformationClass uint32, JobObjectInformation uintptr, JobObjectInformationLength uint32) (ret int, err error) { - r0, _, e1 := syscall.Syscall6(procSetInformationJobObject.Addr(), 4, uintptr(job), uintptr(JobObjectInformationClass), uintptr(JobObjectInformation), uintptr(JobObjectInformationLength), 0, 0) - ret = int(r0) - if ret == 0 { - if e1 != 0 { - err = errnoErr(e1) - } else { - err = syscall.EINVAL - } - } - return -} - -func GenerateConsoleCtrlEvent(ctrlEvent uint32, processGroupID uint32) (err error) { - r1, _, e1 := syscall.Syscall(procGenerateConsoleCtrlEvent.Addr(), 2, uintptr(ctrlEvent), uintptr(processGroupID), 0) - if r1 == 0 { - if e1 != 0 { - err = errnoErr(e1) - } else { - err = syscall.EINVAL - } - } - return -} - -func GetProcessId(process Handle) (id uint32, err error) { - r0, _, e1 := syscall.Syscall(procGetProcessId.Addr(), 1, uintptr(process), 0, 0) - id = uint32(r0) - if id == 0 { - if e1 != 0 { - err = errnoErr(e1) - } else { - err = syscall.EINVAL - } - } - return -} - -func OpenThread(desiredAccess uint32, inheritHandle bool, threadId uint32) (handle Handle, err error) { - var _p0 uint32 - if inheritHandle { - _p0 = 1 - } else { - _p0 = 0 - } - r0, _, e1 := syscall.Syscall(procOpenThread.Addr(), 3, uintptr(desiredAccess), uintptr(_p0), uintptr(threadId)) - handle = Handle(r0) - if handle == 0 { - if e1 != 0 { - err = errnoErr(e1) - } else { - err = syscall.EINVAL - } - } - return -} - func DefineDosDevice(flags uint32, deviceName *uint16, targetPath *uint16) (err error) { r1, _, e1 := syscall.Syscall(procDefineDosDeviceW.Addr(), 3, uintptr(flags), uintptr(unsafe.Pointer(deviceName)), uintptr(unsafe.Pointer(targetPath))) if r1 == 0 { @@ -2480,46 +2157,6 @@ func SetVolumeMountPoint(volumeMountPoint *uint16, volumeName *uint16) (err erro return } -func MessageBox(hwnd Handle, text *uint16, caption *uint16, boxtype uint32) (ret int32, err error) { - r0, _, e1 := syscall.Syscall6(procMessageBoxW.Addr(), 4, uintptr(hwnd), uintptr(unsafe.Pointer(text)), uintptr(unsafe.Pointer(caption)), uintptr(boxtype), 0, 0) - ret = int32(r0) - if ret == 0 { - if e1 != 0 { - err = errnoErr(e1) - } else { - err = syscall.EINVAL - } - } - return -} - -func clsidFromString(lpsz *uint16, pclsid *GUID) (ret error) { - r0, _, _ := syscall.Syscall(procCLSIDFromString.Addr(), 2, uintptr(unsafe.Pointer(lpsz)), uintptr(unsafe.Pointer(pclsid)), 0) - if r0 != 0 { - ret = syscall.Errno(r0) - } - return -} - -func stringFromGUID2(rguid *GUID, lpsz *uint16, cchMax int32) (chars int32) { - r0, _, _ := syscall.Syscall(procStringFromGUID2.Addr(), 3, uintptr(unsafe.Pointer(rguid)), uintptr(unsafe.Pointer(lpsz)), uintptr(cchMax)) - chars = int32(r0) - return -} - -func coCreateGuid(pguid *GUID) (ret error) { - r0, _, _ := syscall.Syscall(procCoCreateGuid.Addr(), 1, uintptr(unsafe.Pointer(pguid)), 0, 0) - if r0 != 0 { - ret = syscall.Errno(r0) - } - return -} - -func coTaskMemFree(address unsafe.Pointer) { - syscall.Syscall(procCoTaskMemFree.Addr(), 1, uintptr(address), 0, 0) - return -} - func WSAStartup(verreq uint32, data *WSAData) (sockerr error) { r0, _, _ := syscall.Syscall(procWSAStartup.Addr(), 2, uintptr(verreq), uintptr(unsafe.Pointer(data)), 0) if r0 != 0 { @@ -3062,12 +2699,6 @@ func createWellKnownSid(sidType WELL_KNOWN_SID_TYPE, domainSid *SID, sid *SID, s return } -func isWellKnownSid(sid *SID, sidType WELL_KNOWN_SID_TYPE) (isWellKnown bool) { - r0, _, _ := syscall.Syscall(procIsWellKnownSid.Addr(), 2, uintptr(unsafe.Pointer(sid)), uintptr(sidType), 0) - isWellKnown = r0 != 0 - return -} - func FreeSid(sid *SID) (err error) { r1, _, e1 := syscall.Syscall(procFreeSid.Addr(), 1, uintptr(unsafe.Pointer(sid)), 0, 0) if r1 != 0 { @@ -3086,30 +2717,6 @@ func EqualSid(sid1 *SID, sid2 *SID) (isEqual bool) { return } -func getSidIdentifierAuthority(sid *SID) (authority *SidIdentifierAuthority) { - r0, _, _ := syscall.Syscall(procGetSidIdentifierAuthority.Addr(), 1, uintptr(unsafe.Pointer(sid)), 0, 0) - authority = (*SidIdentifierAuthority)(unsafe.Pointer(r0)) - return -} - -func getSidSubAuthorityCount(sid *SID) (count *uint8) { - r0, _, _ := syscall.Syscall(procGetSidSubAuthorityCount.Addr(), 1, uintptr(unsafe.Pointer(sid)), 0, 0) - count = (*uint8)(unsafe.Pointer(r0)) - return -} - -func getSidSubAuthority(sid *SID, index uint32) (subAuthority *uint32) { - r0, _, _ := syscall.Syscall(procGetSidSubAuthority.Addr(), 2, uintptr(unsafe.Pointer(sid)), uintptr(index), 0) - subAuthority = (*uint32)(unsafe.Pointer(r0)) - return -} - -func isValidSid(sid *SID) (isValid bool) { - r0, _, _ := syscall.Syscall(procIsValidSid.Addr(), 1, uintptr(unsafe.Pointer(sid)), 0, 0) - isValid = r0 != 0 - return -} - func checkTokenMembership(tokenHandle Token, sidToCheck *SID, isMember *int32) (err error) { r1, _, e1 := syscall.Syscall(procCheckTokenMembership.Addr(), 3, uintptr(tokenHandle), uintptr(unsafe.Pointer(sidToCheck)), uintptr(unsafe.Pointer(isMember))) if r1 == 0 { @@ -3122,134 +2729,8 @@ func checkTokenMembership(tokenHandle Token, sidToCheck *SID, isMember *int32) ( return } -func OpenProcessToken(process Handle, access uint32, token *Token) (err error) { - r1, _, e1 := syscall.Syscall(procOpenProcessToken.Addr(), 3, uintptr(process), uintptr(access), uintptr(unsafe.Pointer(token))) - if r1 == 0 { - if e1 != 0 { - err = errnoErr(e1) - } else { - err = syscall.EINVAL - } - } - return -} - -func OpenThreadToken(thread Handle, access uint32, openAsSelf bool, token *Token) (err error) { - var _p0 uint32 - if openAsSelf { - _p0 = 1 - } else { - _p0 = 0 - } - r1, _, e1 := syscall.Syscall6(procOpenThreadToken.Addr(), 4, uintptr(thread), uintptr(access), uintptr(_p0), uintptr(unsafe.Pointer(token)), 0, 0) - if r1 == 0 { - if e1 != 0 { - err = errnoErr(e1) - } else { - err = syscall.EINVAL - } - } - return -} - -func ImpersonateSelf(impersonationlevel uint32) (err error) { - r1, _, e1 := syscall.Syscall(procImpersonateSelf.Addr(), 1, uintptr(impersonationlevel), 0, 0) - if r1 == 0 { - if e1 != 0 { - err = errnoErr(e1) - } else { - err = syscall.EINVAL - } - } - return -} - -func RevertToSelf() (err error) { - r1, _, e1 := syscall.Syscall(procRevertToSelf.Addr(), 0, 0, 0, 0) - if r1 == 0 { - if e1 != 0 { - err = errnoErr(e1) - } else { - err = syscall.EINVAL - } - } - return -} - -func SetThreadToken(thread *Handle, token Token) (err error) { - r1, _, e1 := syscall.Syscall(procSetThreadToken.Addr(), 2, uintptr(unsafe.Pointer(thread)), uintptr(token), 0) - if r1 == 0 { - if e1 != 0 { - err = errnoErr(e1) - } else { - err = syscall.EINVAL - } - } - return -} - -func LookupPrivilegeValue(systemname *uint16, name *uint16, luid *LUID) (err error) { - r1, _, e1 := syscall.Syscall(procLookupPrivilegeValueW.Addr(), 3, uintptr(unsafe.Pointer(systemname)), uintptr(unsafe.Pointer(name)), uintptr(unsafe.Pointer(luid))) - if r1 == 0 { - if e1 != 0 { - err = errnoErr(e1) - } else { - err = syscall.EINVAL - } - } - return -} - -func AdjustTokenPrivileges(token Token, disableAllPrivileges bool, newstate *Tokenprivileges, buflen uint32, prevstate *Tokenprivileges, returnlen *uint32) (err error) { - var _p0 uint32 - if disableAllPrivileges { - _p0 = 1 - } else { - _p0 = 0 - } - r1, _, e1 := syscall.Syscall6(procAdjustTokenPrivileges.Addr(), 6, uintptr(token), uintptr(_p0), uintptr(unsafe.Pointer(newstate)), uintptr(buflen), uintptr(unsafe.Pointer(prevstate)), uintptr(unsafe.Pointer(returnlen))) - if r1 == 0 { - if e1 != 0 { - err = errnoErr(e1) - } else { - err = syscall.EINVAL - } - } - return -} - -func AdjustTokenGroups(token Token, resetToDefault bool, newstate *Tokengroups, buflen uint32, prevstate *Tokengroups, returnlen *uint32) (err error) { - var _p0 uint32 - if resetToDefault { - _p0 = 1 - } else { - _p0 = 0 - } - r1, _, e1 := syscall.Syscall6(procAdjustTokenGroups.Addr(), 6, uintptr(token), uintptr(_p0), uintptr(unsafe.Pointer(newstate)), uintptr(buflen), uintptr(unsafe.Pointer(prevstate)), uintptr(unsafe.Pointer(returnlen))) - if r1 == 0 { - if e1 != 0 { - err = errnoErr(e1) - } else { - err = syscall.EINVAL - } - } - return -} - -func GetTokenInformation(token Token, infoClass uint32, info *byte, infoLen uint32, returnedLen *uint32) (err error) { - r1, _, e1 := syscall.Syscall6(procGetTokenInformation.Addr(), 5, uintptr(token), uintptr(infoClass), uintptr(unsafe.Pointer(info)), uintptr(infoLen), uintptr(unsafe.Pointer(returnedLen)), 0) - if r1 == 0 { - if e1 != 0 { - err = errnoErr(e1) - } else { - err = syscall.EINVAL - } - } - return -} - -func SetTokenInformation(token Token, infoClass uint32, info *byte, infoLen uint32) (err error) { - r1, _, e1 := syscall.Syscall6(procSetTokenInformation.Addr(), 4, uintptr(token), uintptr(infoClass), uintptr(unsafe.Pointer(info)), uintptr(infoLen), 0, 0) +func OpenProcessToken(h Handle, access uint32, token *Token) (err error) { + r1, _, e1 := syscall.Syscall(procOpenProcessToken.Addr(), 3, uintptr(h), uintptr(access), uintptr(unsafe.Pointer(token))) if r1 == 0 { if e1 != 0 { err = errnoErr(e1) @@ -3260,8 +2741,8 @@ func SetTokenInformation(token Token, infoClass uint32, info *byte, infoLen uint return } -func DuplicateTokenEx(existingToken Token, desiredAccess uint32, tokenAttributes *SecurityAttributes, impersonationLevel uint32, tokenType uint32, newToken *Token) (err error) { - r1, _, e1 := syscall.Syscall6(procDuplicateTokenEx.Addr(), 6, uintptr(existingToken), uintptr(desiredAccess), uintptr(unsafe.Pointer(tokenAttributes)), uintptr(impersonationLevel), uintptr(tokenType), uintptr(unsafe.Pointer(newToken))) +func GetTokenInformation(t Token, infoClass uint32, info *byte, infoLen uint32, returnedLen *uint32) (err error) { + r1, _, e1 := syscall.Syscall6(procGetTokenInformation.Addr(), 5, uintptr(t), uintptr(infoClass), uintptr(unsafe.Pointer(info)), uintptr(infoLen), uintptr(unsafe.Pointer(returnedLen)), 0) if r1 == 0 { if e1 != 0 { err = errnoErr(e1) @@ -3296,32 +2777,3 @@ func getSystemDirectory(dir *uint16, dirLen uint32) (len uint32, err error) { } return } - -func WTSQueryUserToken(session uint32, token *Token) (err error) { - r1, _, e1 := syscall.Syscall(procWTSQueryUserToken.Addr(), 2, uintptr(session), uintptr(unsafe.Pointer(token)), 0) - if r1 == 0 { - if e1 != 0 { - err = errnoErr(e1) - } else { - err = syscall.EINVAL - } - } - return -} - -func WTSEnumerateSessions(handle Handle, reserved uint32, version uint32, sessions **WTS_SESSION_INFO, count *uint32) (err error) { - r1, _, e1 := syscall.Syscall6(procWTSEnumerateSessionsW.Addr(), 5, uintptr(handle), uintptr(reserved), uintptr(version), uintptr(unsafe.Pointer(sessions)), uintptr(unsafe.Pointer(count)), 0) - if r1 == 0 { - if e1 != 0 { - err = errnoErr(e1) - } else { - err = syscall.EINVAL - } - } - return -} - -func WTSFreeMemory(ptr uintptr) { - syscall.Syscall(procWTSFreeMemory.Addr(), 1, uintptr(ptr), 0, 0) - return -} diff --git a/vendor/golang.org/x/text/encoding/charmap/maketables.go b/vendor/golang.org/x/text/encoding/charmap/maketables.go deleted file mode 100644 index f7941701e8..0000000000 --- a/vendor/golang.org/x/text/encoding/charmap/maketables.go +++ /dev/null @@ -1,556 +0,0 @@ -// Copyright 2013 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build ignore - -package main - -import ( - "bufio" - "fmt" - "log" - "net/http" - "sort" - "strings" - "unicode/utf8" - - "golang.org/x/text/encoding" - "golang.org/x/text/internal/gen" -) - -const ascii = "\x00\x01\x02\x03\x04\x05\x06\x07\x08\x09\x0a\x0b\x0c\x0d\x0e\x0f" + - "\x10\x11\x12\x13\x14\x15\x16\x17\x18\x19\x1a\x1b\x1c\x1d\x1e\x1f" + - ` !"#$%&'()*+,-./0123456789:;<=>?` + - `@ABCDEFGHIJKLMNOPQRSTUVWXYZ[\]^_` + - "`abcdefghijklmnopqrstuvwxyz{|}~\u007f" - -var encodings = []struct { - name string - mib string - comment string - varName string - replacement byte - mapping string -}{ - { - "IBM Code Page 037", - "IBM037", - "", - "CodePage037", - 0x3f, - "http://source.icu-project.org/repos/icu/data/trunk/charset/data/ucm/glibc-IBM037-2.1.2.ucm", - }, - { - "IBM Code Page 437", - "PC8CodePage437", - "", - "CodePage437", - encoding.ASCIISub, - "http://source.icu-project.org/repos/icu/data/trunk/charset/data/ucm/glibc-IBM437-2.1.2.ucm", - }, - { - "IBM Code Page 850", - "PC850Multilingual", - "", - "CodePage850", - encoding.ASCIISub, - "http://source.icu-project.org/repos/icu/data/trunk/charset/data/ucm/glibc-IBM850-2.1.2.ucm", - }, - { - "IBM Code Page 852", - "PCp852", - "", - "CodePage852", - encoding.ASCIISub, - "http://source.icu-project.org/repos/icu/data/trunk/charset/data/ucm/glibc-IBM852-2.1.2.ucm", - }, - { - "IBM Code Page 855", - "IBM855", - "", - "CodePage855", - encoding.ASCIISub, - "http://source.icu-project.org/repos/icu/data/trunk/charset/data/ucm/glibc-IBM855-2.1.2.ucm", - }, - { - "Windows Code Page 858", // PC latin1 with Euro - "IBM00858", - "", - "CodePage858", - encoding.ASCIISub, - "http://source.icu-project.org/repos/icu/data/trunk/charset/data/ucm/windows-858-2000.ucm", - }, - { - "IBM Code Page 860", - "IBM860", - "", - "CodePage860", - encoding.ASCIISub, - "http://source.icu-project.org/repos/icu/data/trunk/charset/data/ucm/glibc-IBM860-2.1.2.ucm", - }, - { - "IBM Code Page 862", - "PC862LatinHebrew", - "", - "CodePage862", - encoding.ASCIISub, - "http://source.icu-project.org/repos/icu/data/trunk/charset/data/ucm/glibc-IBM862-2.1.2.ucm", - }, - { - "IBM Code Page 863", - "IBM863", - "", - "CodePage863", - encoding.ASCIISub, - "http://source.icu-project.org/repos/icu/data/trunk/charset/data/ucm/glibc-IBM863-2.1.2.ucm", - }, - { - "IBM Code Page 865", - "IBM865", - "", - "CodePage865", - encoding.ASCIISub, - "http://source.icu-project.org/repos/icu/data/trunk/charset/data/ucm/glibc-IBM865-2.1.2.ucm", - }, - { - "IBM Code Page 866", - "IBM866", - "", - "CodePage866", - encoding.ASCIISub, - "http://encoding.spec.whatwg.org/index-ibm866.txt", - }, - { - "IBM Code Page 1047", - "IBM1047", - "", - "CodePage1047", - 0x3f, - "http://source.icu-project.org/repos/icu/data/trunk/charset/data/ucm/glibc-IBM1047-2.1.2.ucm", - }, - { - "IBM Code Page 1140", - "IBM01140", - "", - "CodePage1140", - 0x3f, - "http://source.icu-project.org/repos/icu/data/trunk/charset/data/ucm/ibm-1140_P100-1997.ucm", - }, - { - "ISO 8859-1", - "ISOLatin1", - "", - "ISO8859_1", - encoding.ASCIISub, - "http://source.icu-project.org/repos/icu/data/trunk/charset/data/ucm/iso-8859_1-1998.ucm", - }, - { - "ISO 8859-2", - "ISOLatin2", - "", - "ISO8859_2", - encoding.ASCIISub, - "http://encoding.spec.whatwg.org/index-iso-8859-2.txt", - }, - { - "ISO 8859-3", - "ISOLatin3", - "", - "ISO8859_3", - encoding.ASCIISub, - "http://encoding.spec.whatwg.org/index-iso-8859-3.txt", - }, - { - "ISO 8859-4", - "ISOLatin4", - "", - "ISO8859_4", - encoding.ASCIISub, - "http://encoding.spec.whatwg.org/index-iso-8859-4.txt", - }, - { - "ISO 8859-5", - "ISOLatinCyrillic", - "", - "ISO8859_5", - encoding.ASCIISub, - "http://encoding.spec.whatwg.org/index-iso-8859-5.txt", - }, - { - "ISO 8859-6", - "ISOLatinArabic", - "", - "ISO8859_6,ISO8859_6E,ISO8859_6I", - encoding.ASCIISub, - "http://encoding.spec.whatwg.org/index-iso-8859-6.txt", - }, - { - "ISO 8859-7", - "ISOLatinGreek", - "", - "ISO8859_7", - encoding.ASCIISub, - "http://encoding.spec.whatwg.org/index-iso-8859-7.txt", - }, - { - "ISO 8859-8", - "ISOLatinHebrew", - "", - "ISO8859_8,ISO8859_8E,ISO8859_8I", - encoding.ASCIISub, - "http://encoding.spec.whatwg.org/index-iso-8859-8.txt", - }, - { - "ISO 8859-9", - "ISOLatin5", - "", - "ISO8859_9", - encoding.ASCIISub, - "http://source.icu-project.org/repos/icu/data/trunk/charset/data/ucm/iso-8859_9-1999.ucm", - }, - { - "ISO 8859-10", - "ISOLatin6", - "", - "ISO8859_10", - encoding.ASCIISub, - "http://encoding.spec.whatwg.org/index-iso-8859-10.txt", - }, - { - "ISO 8859-13", - "ISO885913", - "", - "ISO8859_13", - encoding.ASCIISub, - "http://encoding.spec.whatwg.org/index-iso-8859-13.txt", - }, - { - "ISO 8859-14", - "ISO885914", - "", - "ISO8859_14", - encoding.ASCIISub, - "http://encoding.spec.whatwg.org/index-iso-8859-14.txt", - }, - { - "ISO 8859-15", - "ISO885915", - "", - "ISO8859_15", - encoding.ASCIISub, - "http://encoding.spec.whatwg.org/index-iso-8859-15.txt", - }, - { - "ISO 8859-16", - "ISO885916", - "", - "ISO8859_16", - encoding.ASCIISub, - "http://encoding.spec.whatwg.org/index-iso-8859-16.txt", - }, - { - "KOI8-R", - "KOI8R", - "", - "KOI8R", - encoding.ASCIISub, - "http://encoding.spec.whatwg.org/index-koi8-r.txt", - }, - { - "KOI8-U", - "KOI8U", - "", - "KOI8U", - encoding.ASCIISub, - "http://encoding.spec.whatwg.org/index-koi8-u.txt", - }, - { - "Macintosh", - "Macintosh", - "", - "Macintosh", - encoding.ASCIISub, - "http://encoding.spec.whatwg.org/index-macintosh.txt", - }, - { - "Macintosh Cyrillic", - "MacintoshCyrillic", - "", - "MacintoshCyrillic", - encoding.ASCIISub, - "http://encoding.spec.whatwg.org/index-x-mac-cyrillic.txt", - }, - { - "Windows 874", - "Windows874", - "", - "Windows874", - encoding.ASCIISub, - "http://encoding.spec.whatwg.org/index-windows-874.txt", - }, - { - "Windows 1250", - "Windows1250", - "", - "Windows1250", - encoding.ASCIISub, - "http://encoding.spec.whatwg.org/index-windows-1250.txt", - }, - { - "Windows 1251", - "Windows1251", - "", - "Windows1251", - encoding.ASCIISub, - "http://encoding.spec.whatwg.org/index-windows-1251.txt", - }, - { - "Windows 1252", - "Windows1252", - "", - "Windows1252", - encoding.ASCIISub, - "http://encoding.spec.whatwg.org/index-windows-1252.txt", - }, - { - "Windows 1253", - "Windows1253", - "", - "Windows1253", - encoding.ASCIISub, - "http://encoding.spec.whatwg.org/index-windows-1253.txt", - }, - { - "Windows 1254", - "Windows1254", - "", - "Windows1254", - encoding.ASCIISub, - "http://encoding.spec.whatwg.org/index-windows-1254.txt", - }, - { - "Windows 1255", - "Windows1255", - "", - "Windows1255", - encoding.ASCIISub, - "http://encoding.spec.whatwg.org/index-windows-1255.txt", - }, - { - "Windows 1256", - "Windows1256", - "", - "Windows1256", - encoding.ASCIISub, - "http://encoding.spec.whatwg.org/index-windows-1256.txt", - }, - { - "Windows 1257", - "Windows1257", - "", - "Windows1257", - encoding.ASCIISub, - "http://encoding.spec.whatwg.org/index-windows-1257.txt", - }, - { - "Windows 1258", - "Windows1258", - "", - "Windows1258", - encoding.ASCIISub, - "http://encoding.spec.whatwg.org/index-windows-1258.txt", - }, - { - "X-User-Defined", - "XUserDefined", - "It is defined at http://encoding.spec.whatwg.org/#x-user-defined", - "XUserDefined", - encoding.ASCIISub, - ascii + - "\uf780\uf781\uf782\uf783\uf784\uf785\uf786\uf787" + - "\uf788\uf789\uf78a\uf78b\uf78c\uf78d\uf78e\uf78f" + - "\uf790\uf791\uf792\uf793\uf794\uf795\uf796\uf797" + - "\uf798\uf799\uf79a\uf79b\uf79c\uf79d\uf79e\uf79f" + - "\uf7a0\uf7a1\uf7a2\uf7a3\uf7a4\uf7a5\uf7a6\uf7a7" + - "\uf7a8\uf7a9\uf7aa\uf7ab\uf7ac\uf7ad\uf7ae\uf7af" + - "\uf7b0\uf7b1\uf7b2\uf7b3\uf7b4\uf7b5\uf7b6\uf7b7" + - "\uf7b8\uf7b9\uf7ba\uf7bb\uf7bc\uf7bd\uf7be\uf7bf" + - "\uf7c0\uf7c1\uf7c2\uf7c3\uf7c4\uf7c5\uf7c6\uf7c7" + - "\uf7c8\uf7c9\uf7ca\uf7cb\uf7cc\uf7cd\uf7ce\uf7cf" + - "\uf7d0\uf7d1\uf7d2\uf7d3\uf7d4\uf7d5\uf7d6\uf7d7" + - "\uf7d8\uf7d9\uf7da\uf7db\uf7dc\uf7dd\uf7de\uf7df" + - "\uf7e0\uf7e1\uf7e2\uf7e3\uf7e4\uf7e5\uf7e6\uf7e7" + - "\uf7e8\uf7e9\uf7ea\uf7eb\uf7ec\uf7ed\uf7ee\uf7ef" + - "\uf7f0\uf7f1\uf7f2\uf7f3\uf7f4\uf7f5\uf7f6\uf7f7" + - "\uf7f8\uf7f9\uf7fa\uf7fb\uf7fc\uf7fd\uf7fe\uf7ff", - }, -} - -func getWHATWG(url string) string { - res, err := http.Get(url) - if err != nil { - log.Fatalf("%q: Get: %v", url, err) - } - defer res.Body.Close() - - mapping := make([]rune, 128) - for i := range mapping { - mapping[i] = '\ufffd' - } - - scanner := bufio.NewScanner(res.Body) - for scanner.Scan() { - s := strings.TrimSpace(scanner.Text()) - if s == "" || s[0] == '#' { - continue - } - x, y := 0, 0 - if _, err := fmt.Sscanf(s, "%d\t0x%x", &x, &y); err != nil { - log.Fatalf("could not parse %q", s) - } - if x < 0 || 128 <= x { - log.Fatalf("code %d is out of range", x) - } - if 0x80 <= y && y < 0xa0 { - // We diverge from the WHATWG spec by mapping control characters - // in the range [0x80, 0xa0) to U+FFFD. - continue - } - mapping[x] = rune(y) - } - return ascii + string(mapping) -} - -func getUCM(url string) string { - res, err := http.Get(url) - if err != nil { - log.Fatalf("%q: Get: %v", url, err) - } - defer res.Body.Close() - - mapping := make([]rune, 256) - for i := range mapping { - mapping[i] = '\ufffd' - } - - charsFound := 0 - scanner := bufio.NewScanner(res.Body) - for scanner.Scan() { - s := strings.TrimSpace(scanner.Text()) - if s == "" || s[0] == '#' { - continue - } - var c byte - var r rune - if _, err := fmt.Sscanf(s, ` \x%x |0`, &r, &c); err != nil { - continue - } - mapping[c] = r - charsFound++ - } - - if charsFound < 200 { - log.Fatalf("%q: only %d characters found (wrong page format?)", url, charsFound) - } - - return string(mapping) -} - -func main() { - mibs := map[string]bool{} - all := []string{} - - w := gen.NewCodeWriter() - defer w.WriteGoFile("tables.go", "charmap") - - printf := func(s string, a ...interface{}) { fmt.Fprintf(w, s, a...) } - - printf("import (\n") - printf("\t\"golang.org/x/text/encoding\"\n") - printf("\t\"golang.org/x/text/encoding/internal/identifier\"\n") - printf(")\n\n") - for _, e := range encodings { - varNames := strings.Split(e.varName, ",") - all = append(all, varNames...) - varName := varNames[0] - switch { - case strings.HasPrefix(e.mapping, "http://encoding.spec.whatwg.org/"): - e.mapping = getWHATWG(e.mapping) - case strings.HasPrefix(e.mapping, "http://source.icu-project.org/repos/icu/data/trunk/charset/data/ucm/"): - e.mapping = getUCM(e.mapping) - } - - asciiSuperset, low := strings.HasPrefix(e.mapping, ascii), 0x00 - if asciiSuperset { - low = 0x80 - } - lvn := 1 - if strings.HasPrefix(varName, "ISO") || strings.HasPrefix(varName, "KOI") { - lvn = 3 - } - lowerVarName := strings.ToLower(varName[:lvn]) + varName[lvn:] - printf("// %s is the %s encoding.\n", varName, e.name) - if e.comment != "" { - printf("//\n// %s\n", e.comment) - } - printf("var %s *Charmap = &%s\n\nvar %s = Charmap{\nname: %q,\n", - varName, lowerVarName, lowerVarName, e.name) - if mibs[e.mib] { - log.Fatalf("MIB type %q declared multiple times.", e.mib) - } - printf("mib: identifier.%s,\n", e.mib) - printf("asciiSuperset: %t,\n", asciiSuperset) - printf("low: 0x%02x,\n", low) - printf("replacement: 0x%02x,\n", e.replacement) - - printf("decode: [256]utf8Enc{\n") - i, backMapping := 0, map[rune]byte{} - for _, c := range e.mapping { - if _, ok := backMapping[c]; !ok && c != utf8.RuneError { - backMapping[c] = byte(i) - } - var buf [8]byte - n := utf8.EncodeRune(buf[:], c) - if n > 3 { - panic(fmt.Sprintf("rune %q (%U) is too long", c, c)) - } - printf("{%d,[3]byte{0x%02x,0x%02x,0x%02x}},", n, buf[0], buf[1], buf[2]) - if i%2 == 1 { - printf("\n") - } - i++ - } - printf("},\n") - - printf("encode: [256]uint32{\n") - encode := make([]uint32, 0, 256) - for c, i := range backMapping { - encode = append(encode, uint32(i)<<24|uint32(c)) - } - sort.Sort(byRune(encode)) - for len(encode) < cap(encode) { - encode = append(encode, encode[len(encode)-1]) - } - for i, enc := range encode { - printf("0x%08x,", enc) - if i%8 == 7 { - printf("\n") - } - } - printf("},\n}\n") - - // Add an estimate of the size of a single Charmap{} struct value, which - // includes two 256 elem arrays of 4 bytes and some extra fields, which - // align to 3 uint64s on 64-bit architectures. - w.Size += 2*4*256 + 3*8 - } - // TODO: add proper line breaking. - printf("var listAll = []encoding.Encoding{\n%s,\n}\n\n", strings.Join(all, ",\n")) -} - -type byRune []uint32 - -func (b byRune) Len() int { return len(b) } -func (b byRune) Less(i, j int) bool { return b[i]&0xffffff < b[j]&0xffffff } -func (b byRune) Swap(i, j int) { b[i], b[j] = b[j], b[i] } diff --git a/vendor/golang.org/x/text/encoding/htmlindex/gen.go b/vendor/golang.org/x/text/encoding/htmlindex/gen.go deleted file mode 100644 index ac6b4a77fd..0000000000 --- a/vendor/golang.org/x/text/encoding/htmlindex/gen.go +++ /dev/null @@ -1,173 +0,0 @@ -// Copyright 2015 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build ignore - -package main - -import ( - "bytes" - "encoding/json" - "fmt" - "log" - "strings" - - "golang.org/x/text/internal/gen" -) - -type group struct { - Encodings []struct { - Labels []string - Name string - } -} - -func main() { - gen.Init() - - r := gen.Open("https://encoding.spec.whatwg.org", "whatwg", "encodings.json") - var groups []group - if err := json.NewDecoder(r).Decode(&groups); err != nil { - log.Fatalf("Error reading encodings.json: %v", err) - } - - w := &bytes.Buffer{} - fmt.Fprintln(w, "type htmlEncoding byte") - fmt.Fprintln(w, "const (") - for i, g := range groups { - for _, e := range g.Encodings { - key := strings.ToLower(e.Name) - name := consts[key] - if name == "" { - log.Fatalf("No const defined for %s.", key) - } - if i == 0 { - fmt.Fprintf(w, "%s htmlEncoding = iota\n", name) - } else { - fmt.Fprintf(w, "%s\n", name) - } - } - } - fmt.Fprintln(w, "numEncodings") - fmt.Fprint(w, ")\n\n") - - fmt.Fprintln(w, "var canonical = [numEncodings]string{") - for _, g := range groups { - for _, e := range g.Encodings { - fmt.Fprintf(w, "%q,\n", strings.ToLower(e.Name)) - } - } - fmt.Fprint(w, "}\n\n") - - fmt.Fprintln(w, "var nameMap = map[string]htmlEncoding{") - for _, g := range groups { - for _, e := range g.Encodings { - for _, l := range e.Labels { - key := strings.ToLower(e.Name) - name := consts[key] - fmt.Fprintf(w, "%q: %s,\n", l, name) - } - } - } - fmt.Fprint(w, "}\n\n") - - var tags []string - fmt.Fprintln(w, "var localeMap = []htmlEncoding{") - for _, loc := range locales { - tags = append(tags, loc.tag) - fmt.Fprintf(w, "%s, // %s \n", consts[loc.name], loc.tag) - } - fmt.Fprint(w, "}\n\n") - - fmt.Fprintf(w, "const locales = %q\n", strings.Join(tags, " ")) - - gen.WriteGoFile("tables.go", "htmlindex", w.Bytes()) -} - -// consts maps canonical encoding name to internal constant. -var consts = map[string]string{ - "utf-8": "utf8", - "ibm866": "ibm866", - "iso-8859-2": "iso8859_2", - "iso-8859-3": "iso8859_3", - "iso-8859-4": "iso8859_4", - "iso-8859-5": "iso8859_5", - "iso-8859-6": "iso8859_6", - "iso-8859-7": "iso8859_7", - "iso-8859-8": "iso8859_8", - "iso-8859-8-i": "iso8859_8I", - "iso-8859-10": "iso8859_10", - "iso-8859-13": "iso8859_13", - "iso-8859-14": "iso8859_14", - "iso-8859-15": "iso8859_15", - "iso-8859-16": "iso8859_16", - "koi8-r": "koi8r", - "koi8-u": "koi8u", - "macintosh": "macintosh", - "windows-874": "windows874", - "windows-1250": "windows1250", - "windows-1251": "windows1251", - "windows-1252": "windows1252", - "windows-1253": "windows1253", - "windows-1254": "windows1254", - "windows-1255": "windows1255", - "windows-1256": "windows1256", - "windows-1257": "windows1257", - "windows-1258": "windows1258", - "x-mac-cyrillic": "macintoshCyrillic", - "gbk": "gbk", - "gb18030": "gb18030", - // "hz-gb-2312": "hzgb2312", // Was removed from WhatWG - "big5": "big5", - "euc-jp": "eucjp", - "iso-2022-jp": "iso2022jp", - "shift_jis": "shiftJIS", - "euc-kr": "euckr", - "replacement": "replacement", - "utf-16be": "utf16be", - "utf-16le": "utf16le", - "x-user-defined": "xUserDefined", -} - -// locales is taken from -// https://html.spec.whatwg.org/multipage/syntax.html#encoding-sniffing-algorithm. -var locales = []struct{ tag, name string }{ - // The default value. Explicitly state latin to benefit from the exact - // script option, while still making 1252 the default encoding for languages - // written in Latin script. - {"und_Latn", "windows-1252"}, - {"ar", "windows-1256"}, - {"ba", "windows-1251"}, - {"be", "windows-1251"}, - {"bg", "windows-1251"}, - {"cs", "windows-1250"}, - {"el", "iso-8859-7"}, - {"et", "windows-1257"}, - {"fa", "windows-1256"}, - {"he", "windows-1255"}, - {"hr", "windows-1250"}, - {"hu", "iso-8859-2"}, - {"ja", "shift_jis"}, - {"kk", "windows-1251"}, - {"ko", "euc-kr"}, - {"ku", "windows-1254"}, - {"ky", "windows-1251"}, - {"lt", "windows-1257"}, - {"lv", "windows-1257"}, - {"mk", "windows-1251"}, - {"pl", "iso-8859-2"}, - {"ru", "windows-1251"}, - {"sah", "windows-1251"}, - {"sk", "windows-1250"}, - {"sl", "iso-8859-2"}, - {"sr", "windows-1251"}, - {"tg", "windows-1251"}, - {"th", "windows-874"}, - {"tr", "windows-1254"}, - {"tt", "windows-1251"}, - {"uk", "windows-1251"}, - {"vi", "windows-1258"}, - {"zh-hans", "gb18030"}, - {"zh-hant", "big5"}, -} diff --git a/vendor/golang.org/x/text/encoding/internal/identifier/gen.go b/vendor/golang.org/x/text/encoding/internal/identifier/gen.go deleted file mode 100644 index 26cfef9c6b..0000000000 --- a/vendor/golang.org/x/text/encoding/internal/identifier/gen.go +++ /dev/null @@ -1,142 +0,0 @@ -// Copyright 2015 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build ignore - -package main - -import ( - "bytes" - "encoding/xml" - "fmt" - "io" - "log" - "strings" - - "golang.org/x/text/internal/gen" -) - -type registry struct { - XMLName xml.Name `xml:"registry"` - Updated string `xml:"updated"` - Registry []struct { - ID string `xml:"id,attr"` - Record []struct { - Name string `xml:"name"` - Xref []struct { - Type string `xml:"type,attr"` - Data string `xml:"data,attr"` - } `xml:"xref"` - Desc struct { - Data string `xml:",innerxml"` - // Any []struct { - // Data string `xml:",chardata"` - // } `xml:",any"` - // Data string `xml:",chardata"` - } `xml:"description,"` - MIB string `xml:"value"` - Alias []string `xml:"alias"` - MIME string `xml:"preferred_alias"` - } `xml:"record"` - } `xml:"registry"` -} - -func main() { - r := gen.OpenIANAFile("assignments/character-sets/character-sets.xml") - reg := ®istry{} - if err := xml.NewDecoder(r).Decode(®); err != nil && err != io.EOF { - log.Fatalf("Error decoding charset registry: %v", err) - } - if len(reg.Registry) == 0 || reg.Registry[0].ID != "character-sets-1" { - log.Fatalf("Unexpected ID %s", reg.Registry[0].ID) - } - - w := &bytes.Buffer{} - fmt.Fprintf(w, "const (\n") - for _, rec := range reg.Registry[0].Record { - constName := "" - for _, a := range rec.Alias { - if strings.HasPrefix(a, "cs") && strings.IndexByte(a, '-') == -1 { - // Some of the constant definitions have comments in them. Strip those. - constName = strings.Title(strings.SplitN(a[2:], "\n", 2)[0]) - } - } - if constName == "" { - switch rec.MIB { - case "2085": - constName = "HZGB2312" // Not listed as alias for some reason. - default: - log.Fatalf("No cs alias defined for %s.", rec.MIB) - } - } - if rec.MIME != "" { - rec.MIME = fmt.Sprintf(" (MIME: %s)", rec.MIME) - } - fmt.Fprintf(w, "// %s is the MIB identifier with IANA name %s%s.\n//\n", constName, rec.Name, rec.MIME) - if len(rec.Desc.Data) > 0 { - fmt.Fprint(w, "// ") - d := xml.NewDecoder(strings.NewReader(rec.Desc.Data)) - inElem := true - attr := "" - for { - t, err := d.Token() - if err != nil { - if err != io.EOF { - log.Fatal(err) - } - break - } - switch x := t.(type) { - case xml.CharData: - attr = "" // Don't need attribute info. - a := bytes.Split([]byte(x), []byte("\n")) - for i, b := range a { - if b = bytes.TrimSpace(b); len(b) != 0 { - if !inElem && i > 0 { - fmt.Fprint(w, "\n// ") - } - inElem = false - fmt.Fprintf(w, "%s ", string(b)) - } - } - case xml.StartElement: - if x.Name.Local == "xref" { - inElem = true - use := false - for _, a := range x.Attr { - if a.Name.Local == "type" { - use = use || a.Value != "person" - } - if a.Name.Local == "data" && use { - // Patch up URLs to use https. From some links, the - // https version is different from the http one. - s := a.Value - s = strings.Replace(s, "http://", "https://", -1) - s = strings.Replace(s, "/unicode/", "/", -1) - attr = s + " " - } - } - } - case xml.EndElement: - inElem = false - fmt.Fprint(w, attr) - } - } - fmt.Fprint(w, "\n") - } - for _, x := range rec.Xref { - switch x.Type { - case "rfc": - fmt.Fprintf(w, "// Reference: %s\n", strings.ToUpper(x.Data)) - case "uri": - fmt.Fprintf(w, "// Reference: %s\n", x.Data) - } - } - fmt.Fprintf(w, "%s MIB = %s\n", constName, rec.MIB) - fmt.Fprintln(w) - } - fmt.Fprintln(w, ")") - - gen.WriteGoFile("mib.go", "identifier", w.Bytes()) -} diff --git a/vendor/golang.org/x/text/encoding/japanese/maketables.go b/vendor/golang.org/x/text/encoding/japanese/maketables.go deleted file mode 100644 index 023957a672..0000000000 --- a/vendor/golang.org/x/text/encoding/japanese/maketables.go +++ /dev/null @@ -1,161 +0,0 @@ -// Copyright 2013 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build ignore - -package main - -// This program generates tables.go: -// go run maketables.go | gofmt > tables.go - -// TODO: Emoji extensions? -// https://www.unicode.org/faq/emoji_dingbats.html -// https://www.unicode.org/Public/UNIDATA/EmojiSources.txt - -import ( - "bufio" - "fmt" - "log" - "net/http" - "sort" - "strings" -) - -type entry struct { - jisCode, table int -} - -func main() { - fmt.Printf("// generated by go run maketables.go; DO NOT EDIT\n\n") - fmt.Printf("// Package japanese provides Japanese encodings such as EUC-JP and Shift JIS.\n") - fmt.Printf(`package japanese // import "golang.org/x/text/encoding/japanese"` + "\n\n") - - reverse := [65536]entry{} - for i := range reverse { - reverse[i].table = -1 - } - - tables := []struct { - url string - name string - }{ - {"http://encoding.spec.whatwg.org/index-jis0208.txt", "0208"}, - {"http://encoding.spec.whatwg.org/index-jis0212.txt", "0212"}, - } - for i, table := range tables { - res, err := http.Get(table.url) - if err != nil { - log.Fatalf("%q: Get: %v", table.url, err) - } - defer res.Body.Close() - - mapping := [65536]uint16{} - - scanner := bufio.NewScanner(res.Body) - for scanner.Scan() { - s := strings.TrimSpace(scanner.Text()) - if s == "" || s[0] == '#' { - continue - } - x, y := 0, uint16(0) - if _, err := fmt.Sscanf(s, "%d 0x%x", &x, &y); err != nil { - log.Fatalf("%q: could not parse %q", table.url, s) - } - if x < 0 || 120*94 <= x { - log.Fatalf("%q: JIS code %d is out of range", table.url, x) - } - mapping[x] = y - if reverse[y].table == -1 { - reverse[y] = entry{jisCode: x, table: i} - } - } - if err := scanner.Err(); err != nil { - log.Fatalf("%q: scanner error: %v", table.url, err) - } - - fmt.Printf("// jis%sDecode is the decoding table from JIS %s code to Unicode.\n// It is defined at %s\n", - table.name, table.name, table.url) - fmt.Printf("var jis%sDecode = [...]uint16{\n", table.name) - for i, m := range mapping { - if m != 0 { - fmt.Printf("\t%d: 0x%04X,\n", i, m) - } - } - fmt.Printf("}\n\n") - } - - // Any run of at least separation continuous zero entries in the reverse map will - // be a separate encode table. - const separation = 1024 - - intervals := []interval(nil) - low, high := -1, -1 - for i, v := range reverse { - if v.table == -1 { - continue - } - if low < 0 { - low = i - } else if i-high >= separation { - if high >= 0 { - intervals = append(intervals, interval{low, high}) - } - low = i - } - high = i + 1 - } - if high >= 0 { - intervals = append(intervals, interval{low, high}) - } - sort.Sort(byDecreasingLength(intervals)) - - fmt.Printf("const (\n") - fmt.Printf("\tjis0208 = 1\n") - fmt.Printf("\tjis0212 = 2\n") - fmt.Printf("\tcodeMask = 0x7f\n") - fmt.Printf("\tcodeShift = 7\n") - fmt.Printf("\ttableShift = 14\n") - fmt.Printf(")\n\n") - - fmt.Printf("const numEncodeTables = %d\n\n", len(intervals)) - fmt.Printf("// encodeX are the encoding tables from Unicode to JIS code,\n") - fmt.Printf("// sorted by decreasing length.\n") - for i, v := range intervals { - fmt.Printf("// encode%d: %5d entries for runes in [%5d, %5d).\n", i, v.len(), v.low, v.high) - } - fmt.Printf("//\n") - fmt.Printf("// The high two bits of the value record whether the JIS code comes from the\n") - fmt.Printf("// JIS0208 table (high bits == 1) or the JIS0212 table (high bits == 2).\n") - fmt.Printf("// The low 14 bits are two 7-bit unsigned integers j1 and j2 that form the\n") - fmt.Printf("// JIS code (94*j1 + j2) within that table.\n") - fmt.Printf("\n") - - for i, v := range intervals { - fmt.Printf("const encode%dLow, encode%dHigh = %d, %d\n\n", i, i, v.low, v.high) - fmt.Printf("var encode%d = [...]uint16{\n", i) - for j := v.low; j < v.high; j++ { - x := reverse[j] - if x.table == -1 { - continue - } - fmt.Printf("\t%d - %d: jis%s<<14 | 0x%02X<<7 | 0x%02X,\n", - j, v.low, tables[x.table].name, x.jisCode/94, x.jisCode%94) - } - fmt.Printf("}\n\n") - } -} - -// interval is a half-open interval [low, high). -type interval struct { - low, high int -} - -func (i interval) len() int { return i.high - i.low } - -// byDecreasingLength sorts intervals by decreasing length. -type byDecreasingLength []interval - -func (b byDecreasingLength) Len() int { return len(b) } -func (b byDecreasingLength) Less(i, j int) bool { return b[i].len() > b[j].len() } -func (b byDecreasingLength) Swap(i, j int) { b[i], b[j] = b[j], b[i] } diff --git a/vendor/golang.org/x/text/encoding/korean/maketables.go b/vendor/golang.org/x/text/encoding/korean/maketables.go deleted file mode 100644 index c84034fb67..0000000000 --- a/vendor/golang.org/x/text/encoding/korean/maketables.go +++ /dev/null @@ -1,143 +0,0 @@ -// Copyright 2013 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build ignore - -package main - -// This program generates tables.go: -// go run maketables.go | gofmt > tables.go - -import ( - "bufio" - "fmt" - "log" - "net/http" - "sort" - "strings" -) - -func main() { - fmt.Printf("// generated by go run maketables.go; DO NOT EDIT\n\n") - fmt.Printf("// Package korean provides Korean encodings such as EUC-KR.\n") - fmt.Printf(`package korean // import "golang.org/x/text/encoding/korean"` + "\n\n") - - res, err := http.Get("http://encoding.spec.whatwg.org/index-euc-kr.txt") - if err != nil { - log.Fatalf("Get: %v", err) - } - defer res.Body.Close() - - mapping := [65536]uint16{} - reverse := [65536]uint16{} - - scanner := bufio.NewScanner(res.Body) - for scanner.Scan() { - s := strings.TrimSpace(scanner.Text()) - if s == "" || s[0] == '#' { - continue - } - x, y := uint16(0), uint16(0) - if _, err := fmt.Sscanf(s, "%d 0x%x", &x, &y); err != nil { - log.Fatalf("could not parse %q", s) - } - if x < 0 || 178*(0xc7-0x81)+(0xfe-0xc7)*94+(0xff-0xa1) <= x { - log.Fatalf("EUC-KR code %d is out of range", x) - } - mapping[x] = y - if reverse[y] == 0 { - c0, c1 := uint16(0), uint16(0) - if x < 178*(0xc7-0x81) { - c0 = uint16(x/178) + 0x81 - c1 = uint16(x % 178) - switch { - case c1 < 1*26: - c1 += 0x41 - case c1 < 2*26: - c1 += 0x47 - default: - c1 += 0x4d - } - } else { - x -= 178 * (0xc7 - 0x81) - c0 = uint16(x/94) + 0xc7 - c1 = uint16(x%94) + 0xa1 - } - reverse[y] = c0<<8 | c1 - } - } - if err := scanner.Err(); err != nil { - log.Fatalf("scanner error: %v", err) - } - - fmt.Printf("// decode is the decoding table from EUC-KR code to Unicode.\n") - fmt.Printf("// It is defined at http://encoding.spec.whatwg.org/index-euc-kr.txt\n") - fmt.Printf("var decode = [...]uint16{\n") - for i, v := range mapping { - if v != 0 { - fmt.Printf("\t%d: 0x%04X,\n", i, v) - } - } - fmt.Printf("}\n\n") - - // Any run of at least separation continuous zero entries in the reverse map will - // be a separate encode table. - const separation = 1024 - - intervals := []interval(nil) - low, high := -1, -1 - for i, v := range reverse { - if v == 0 { - continue - } - if low < 0 { - low = i - } else if i-high >= separation { - if high >= 0 { - intervals = append(intervals, interval{low, high}) - } - low = i - } - high = i + 1 - } - if high >= 0 { - intervals = append(intervals, interval{low, high}) - } - sort.Sort(byDecreasingLength(intervals)) - - fmt.Printf("const numEncodeTables = %d\n\n", len(intervals)) - fmt.Printf("// encodeX are the encoding tables from Unicode to EUC-KR code,\n") - fmt.Printf("// sorted by decreasing length.\n") - for i, v := range intervals { - fmt.Printf("// encode%d: %5d entries for runes in [%5d, %5d).\n", i, v.len(), v.low, v.high) - } - fmt.Printf("\n") - - for i, v := range intervals { - fmt.Printf("const encode%dLow, encode%dHigh = %d, %d\n\n", i, i, v.low, v.high) - fmt.Printf("var encode%d = [...]uint16{\n", i) - for j := v.low; j < v.high; j++ { - x := reverse[j] - if x == 0 { - continue - } - fmt.Printf("\t%d-%d: 0x%04X,\n", j, v.low, x) - } - fmt.Printf("}\n\n") - } -} - -// interval is a half-open interval [low, high). -type interval struct { - low, high int -} - -func (i interval) len() int { return i.high - i.low } - -// byDecreasingLength sorts intervals by decreasing length. -type byDecreasingLength []interval - -func (b byDecreasingLength) Len() int { return len(b) } -func (b byDecreasingLength) Less(i, j int) bool { return b[i].len() > b[j].len() } -func (b byDecreasingLength) Swap(i, j int) { b[i], b[j] = b[j], b[i] } diff --git a/vendor/golang.org/x/text/encoding/simplifiedchinese/maketables.go b/vendor/golang.org/x/text/encoding/simplifiedchinese/maketables.go deleted file mode 100644 index 55016c7862..0000000000 --- a/vendor/golang.org/x/text/encoding/simplifiedchinese/maketables.go +++ /dev/null @@ -1,161 +0,0 @@ -// Copyright 2013 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build ignore - -package main - -// This program generates tables.go: -// go run maketables.go | gofmt > tables.go - -import ( - "bufio" - "fmt" - "log" - "net/http" - "sort" - "strings" -) - -func main() { - fmt.Printf("// generated by go run maketables.go; DO NOT EDIT\n\n") - fmt.Printf("// Package simplifiedchinese provides Simplified Chinese encodings such as GBK.\n") - fmt.Printf(`package simplifiedchinese // import "golang.org/x/text/encoding/simplifiedchinese"` + "\n\n") - - printGB18030() - printGBK() -} - -func printGB18030() { - res, err := http.Get("http://encoding.spec.whatwg.org/index-gb18030.txt") - if err != nil { - log.Fatalf("Get: %v", err) - } - defer res.Body.Close() - - fmt.Printf("// gb18030 is the table from http://encoding.spec.whatwg.org/index-gb18030.txt\n") - fmt.Printf("var gb18030 = [...][2]uint16{\n") - scanner := bufio.NewScanner(res.Body) - for scanner.Scan() { - s := strings.TrimSpace(scanner.Text()) - if s == "" || s[0] == '#' { - continue - } - x, y := uint32(0), uint32(0) - if _, err := fmt.Sscanf(s, "%d 0x%x", &x, &y); err != nil { - log.Fatalf("could not parse %q", s) - } - if x < 0x10000 && y < 0x10000 { - fmt.Printf("\t{0x%04x, 0x%04x},\n", x, y) - } - } - fmt.Printf("}\n\n") -} - -func printGBK() { - res, err := http.Get("http://encoding.spec.whatwg.org/index-gbk.txt") - if err != nil { - log.Fatalf("Get: %v", err) - } - defer res.Body.Close() - - mapping := [65536]uint16{} - reverse := [65536]uint16{} - - scanner := bufio.NewScanner(res.Body) - for scanner.Scan() { - s := strings.TrimSpace(scanner.Text()) - if s == "" || s[0] == '#' { - continue - } - x, y := uint16(0), uint16(0) - if _, err := fmt.Sscanf(s, "%d 0x%x", &x, &y); err != nil { - log.Fatalf("could not parse %q", s) - } - if x < 0 || 126*190 <= x { - log.Fatalf("GBK code %d is out of range", x) - } - mapping[x] = y - if reverse[y] == 0 { - c0, c1 := x/190, x%190 - if c1 >= 0x3f { - c1++ - } - reverse[y] = (0x81+c0)<<8 | (0x40 + c1) - } - } - if err := scanner.Err(); err != nil { - log.Fatalf("scanner error: %v", err) - } - - fmt.Printf("// decode is the decoding table from GBK code to Unicode.\n") - fmt.Printf("// It is defined at http://encoding.spec.whatwg.org/index-gbk.txt\n") - fmt.Printf("var decode = [...]uint16{\n") - for i, v := range mapping { - if v != 0 { - fmt.Printf("\t%d: 0x%04X,\n", i, v) - } - } - fmt.Printf("}\n\n") - - // Any run of at least separation continuous zero entries in the reverse map will - // be a separate encode table. - const separation = 1024 - - intervals := []interval(nil) - low, high := -1, -1 - for i, v := range reverse { - if v == 0 { - continue - } - if low < 0 { - low = i - } else if i-high >= separation { - if high >= 0 { - intervals = append(intervals, interval{low, high}) - } - low = i - } - high = i + 1 - } - if high >= 0 { - intervals = append(intervals, interval{low, high}) - } - sort.Sort(byDecreasingLength(intervals)) - - fmt.Printf("const numEncodeTables = %d\n\n", len(intervals)) - fmt.Printf("// encodeX are the encoding tables from Unicode to GBK code,\n") - fmt.Printf("// sorted by decreasing length.\n") - for i, v := range intervals { - fmt.Printf("// encode%d: %5d entries for runes in [%5d, %5d).\n", i, v.len(), v.low, v.high) - } - fmt.Printf("\n") - - for i, v := range intervals { - fmt.Printf("const encode%dLow, encode%dHigh = %d, %d\n\n", i, i, v.low, v.high) - fmt.Printf("var encode%d = [...]uint16{\n", i) - for j := v.low; j < v.high; j++ { - x := reverse[j] - if x == 0 { - continue - } - fmt.Printf("\t%d-%d: 0x%04X,\n", j, v.low, x) - } - fmt.Printf("}\n\n") - } -} - -// interval is a half-open interval [low, high). -type interval struct { - low, high int -} - -func (i interval) len() int { return i.high - i.low } - -// byDecreasingLength sorts intervals by decreasing length. -type byDecreasingLength []interval - -func (b byDecreasingLength) Len() int { return len(b) } -func (b byDecreasingLength) Less(i, j int) bool { return b[i].len() > b[j].len() } -func (b byDecreasingLength) Swap(i, j int) { b[i], b[j] = b[j], b[i] } diff --git a/vendor/golang.org/x/text/encoding/traditionalchinese/maketables.go b/vendor/golang.org/x/text/encoding/traditionalchinese/maketables.go deleted file mode 100644 index cf7fdb31a5..0000000000 --- a/vendor/golang.org/x/text/encoding/traditionalchinese/maketables.go +++ /dev/null @@ -1,140 +0,0 @@ -// Copyright 2013 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build ignore - -package main - -// This program generates tables.go: -// go run maketables.go | gofmt > tables.go - -import ( - "bufio" - "fmt" - "log" - "net/http" - "sort" - "strings" -) - -func main() { - fmt.Printf("// generated by go run maketables.go; DO NOT EDIT\n\n") - fmt.Printf("// Package traditionalchinese provides Traditional Chinese encodings such as Big5.\n") - fmt.Printf(`package traditionalchinese // import "golang.org/x/text/encoding/traditionalchinese"` + "\n\n") - - res, err := http.Get("http://encoding.spec.whatwg.org/index-big5.txt") - if err != nil { - log.Fatalf("Get: %v", err) - } - defer res.Body.Close() - - mapping := [65536]uint32{} - reverse := [65536 * 4]uint16{} - - scanner := bufio.NewScanner(res.Body) - for scanner.Scan() { - s := strings.TrimSpace(scanner.Text()) - if s == "" || s[0] == '#' { - continue - } - x, y := uint16(0), uint32(0) - if _, err := fmt.Sscanf(s, "%d 0x%x", &x, &y); err != nil { - log.Fatalf("could not parse %q", s) - } - if x < 0 || 126*157 <= x { - log.Fatalf("Big5 code %d is out of range", x) - } - mapping[x] = y - - // The WHATWG spec http://encoding.spec.whatwg.org/#indexes says that - // "The index pointer for code point in index is the first pointer - // corresponding to code point in index", which would normally mean - // that the code below should be guarded by "if reverse[y] == 0", but - // last instead of first seems to match the behavior of - // "iconv -f UTF-8 -t BIG5". For example, U+8005 者 occurs twice in - // http://encoding.spec.whatwg.org/index-big5.txt, as index 2148 - // (encoded as "\x8e\xcd") and index 6543 (encoded as "\xaa\xcc") - // and "echo 者 | iconv -f UTF-8 -t BIG5 | xxd" gives "\xaa\xcc". - c0, c1 := x/157, x%157 - if c1 < 0x3f { - c1 += 0x40 - } else { - c1 += 0x62 - } - reverse[y] = (0x81+c0)<<8 | c1 - } - if err := scanner.Err(); err != nil { - log.Fatalf("scanner error: %v", err) - } - - fmt.Printf("// decode is the decoding table from Big5 code to Unicode.\n") - fmt.Printf("// It is defined at http://encoding.spec.whatwg.org/index-big5.txt\n") - fmt.Printf("var decode = [...]uint32{\n") - for i, v := range mapping { - if v != 0 { - fmt.Printf("\t%d: 0x%08X,\n", i, v) - } - } - fmt.Printf("}\n\n") - - // Any run of at least separation continuous zero entries in the reverse map will - // be a separate encode table. - const separation = 1024 - - intervals := []interval(nil) - low, high := -1, -1 - for i, v := range reverse { - if v == 0 { - continue - } - if low < 0 { - low = i - } else if i-high >= separation { - if high >= 0 { - intervals = append(intervals, interval{low, high}) - } - low = i - } - high = i + 1 - } - if high >= 0 { - intervals = append(intervals, interval{low, high}) - } - sort.Sort(byDecreasingLength(intervals)) - - fmt.Printf("const numEncodeTables = %d\n\n", len(intervals)) - fmt.Printf("// encodeX are the encoding tables from Unicode to Big5 code,\n") - fmt.Printf("// sorted by decreasing length.\n") - for i, v := range intervals { - fmt.Printf("// encode%d: %5d entries for runes in [%6d, %6d).\n", i, v.len(), v.low, v.high) - } - fmt.Printf("\n") - - for i, v := range intervals { - fmt.Printf("const encode%dLow, encode%dHigh = %d, %d\n\n", i, i, v.low, v.high) - fmt.Printf("var encode%d = [...]uint16{\n", i) - for j := v.low; j < v.high; j++ { - x := reverse[j] - if x == 0 { - continue - } - fmt.Printf("\t%d-%d: 0x%04X,\n", j, v.low, x) - } - fmt.Printf("}\n\n") - } -} - -// interval is a half-open interval [low, high). -type interval struct { - low, high int -} - -func (i interval) len() int { return i.high - i.low } - -// byDecreasingLength sorts intervals by decreasing length. -type byDecreasingLength []interval - -func (b byDecreasingLength) Len() int { return len(b) } -func (b byDecreasingLength) Less(i, j int) bool { return b[i].len() > b[j].len() } -func (b byDecreasingLength) Swap(i, j int) { b[i], b[j] = b[j], b[i] } diff --git a/vendor/golang.org/x/text/internal/language/compact/gen.go b/vendor/golang.org/x/text/internal/language/compact/gen.go deleted file mode 100644 index 0c36a052f6..0000000000 --- a/vendor/golang.org/x/text/internal/language/compact/gen.go +++ /dev/null @@ -1,64 +0,0 @@ -// Copyright 2013 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build ignore - -// Language tag table generator. -// Data read from the web. - -package main - -import ( - "flag" - "fmt" - "log" - - "golang.org/x/text/internal/gen" - "golang.org/x/text/unicode/cldr" -) - -var ( - test = flag.Bool("test", - false, - "test existing tables; can be used to compare web data with package data.") - outputFile = flag.String("output", - "tables.go", - "output file for generated tables") -) - -func main() { - gen.Init() - - w := gen.NewCodeWriter() - defer w.WriteGoFile("tables.go", "compact") - - fmt.Fprintln(w, `import "golang.org/x/text/internal/language"`) - - b := newBuilder(w) - gen.WriteCLDRVersion(w) - - b.writeCompactIndex() -} - -type builder struct { - w *gen.CodeWriter - data *cldr.CLDR - supp *cldr.SupplementalData -} - -func newBuilder(w *gen.CodeWriter) *builder { - r := gen.OpenCLDRCoreZip() - defer r.Close() - d := &cldr.Decoder{} - data, err := d.DecodeZip(r) - if err != nil { - log.Fatal(err) - } - b := builder{ - w: w, - data: data, - supp: data.Supplemental(), - } - return &b -} diff --git a/vendor/golang.org/x/text/internal/language/compact/gen_index.go b/vendor/golang.org/x/text/internal/language/compact/gen_index.go deleted file mode 100644 index 136cefaf08..0000000000 --- a/vendor/golang.org/x/text/internal/language/compact/gen_index.go +++ /dev/null @@ -1,113 +0,0 @@ -// Copyright 2015 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build ignore - -package main - -// This file generates derivative tables based on the language package itself. - -import ( - "fmt" - "log" - "sort" - "strings" - - "golang.org/x/text/internal/language" -) - -// Compact indices: -// Note -va-X variants only apply to localization variants. -// BCP variants only ever apply to language. -// The only ambiguity between tags is with regions. - -func (b *builder) writeCompactIndex() { - // Collect all language tags for which we have any data in CLDR. - m := map[language.Tag]bool{} - for _, lang := range b.data.Locales() { - // We include all locales unconditionally to be consistent with en_US. - // We want en_US, even though it has no data associated with it. - - // TODO: put any of the languages for which no data exists at the end - // of the index. This allows all components based on ICU to use that - // as the cutoff point. - // if x := data.RawLDML(lang); false || - // x.LocaleDisplayNames != nil || - // x.Characters != nil || - // x.Delimiters != nil || - // x.Measurement != nil || - // x.Dates != nil || - // x.Numbers != nil || - // x.Units != nil || - // x.ListPatterns != nil || - // x.Collations != nil || - // x.Segmentations != nil || - // x.Rbnf != nil || - // x.Annotations != nil || - // x.Metadata != nil { - - // TODO: support POSIX natively, albeit non-standard. - tag := language.Make(strings.Replace(lang, "_POSIX", "-u-va-posix", 1)) - m[tag] = true - // } - } - - // TODO: plural rules are also defined for the deprecated tags: - // iw mo sh tl - // Consider removing these as compact tags. - - // Include locales for plural rules, which uses a different structure. - for _, plurals := range b.supp.Plurals { - for _, rules := range plurals.PluralRules { - for _, lang := range strings.Split(rules.Locales, " ") { - m[language.Make(lang)] = true - } - } - } - - var coreTags []language.CompactCoreInfo - var special []string - - for t := range m { - if x := t.Extensions(); len(x) != 0 && fmt.Sprint(x) != "[u-va-posix]" { - log.Fatalf("Unexpected extension %v in %v", x, t) - } - if len(t.Variants()) == 0 && len(t.Extensions()) == 0 { - cci, ok := language.GetCompactCore(t) - if !ok { - log.Fatalf("Locale for non-basic language %q", t) - } - coreTags = append(coreTags, cci) - } else { - special = append(special, t.String()) - } - } - - w := b.w - - sort.Slice(coreTags, func(i, j int) bool { return coreTags[i] < coreTags[j] }) - sort.Strings(special) - - w.WriteComment(` - NumCompactTags is the number of common tags. The maximum tag is - NumCompactTags-1.`) - w.WriteConst("NumCompactTags", len(m)) - - fmt.Fprintln(w, "const (") - for i, t := range coreTags { - fmt.Fprintf(w, "%s ID = %d\n", ident(t.Tag().String()), i) - } - for i, t := range special { - fmt.Fprintf(w, "%s ID = %d\n", ident(t), i+len(coreTags)) - } - fmt.Fprintln(w, ")") - - w.WriteVar("coreTags", coreTags) - - w.WriteConst("specialTagsStr", strings.Join(special, " ")) -} - -func ident(s string) string { - return strings.Replace(s, "-", "", -1) + "Index" -} diff --git a/vendor/golang.org/x/text/internal/language/compact/gen_parents.go b/vendor/golang.org/x/text/internal/language/compact/gen_parents.go deleted file mode 100644 index 9543d58323..0000000000 --- a/vendor/golang.org/x/text/internal/language/compact/gen_parents.go +++ /dev/null @@ -1,54 +0,0 @@ -// Copyright 2018 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build ignore - -package main - -import ( - "log" - - "golang.org/x/text/internal/gen" - "golang.org/x/text/internal/language" - "golang.org/x/text/internal/language/compact" - "golang.org/x/text/unicode/cldr" -) - -func main() { - r := gen.OpenCLDRCoreZip() - defer r.Close() - - d := &cldr.Decoder{} - data, err := d.DecodeZip(r) - if err != nil { - log.Fatalf("DecodeZip: %v", err) - } - - w := gen.NewCodeWriter() - defer w.WriteGoFile("parents.go", "compact") - - // Create parents table. - type ID uint16 - parents := make([]ID, compact.NumCompactTags) - for _, loc := range data.Locales() { - tag := language.MustParse(loc) - index, ok := compact.FromTag(tag) - if !ok { - continue - } - parentIndex := compact.ID(0) // und - for p := tag.Parent(); p != language.Und; p = p.Parent() { - if x, ok := compact.FromTag(p); ok { - parentIndex = x - break - } - } - parents[index] = ID(parentIndex) - } - - w.WriteComment(` - parents maps a compact index of a tag to the compact index of the parent of - this tag.`) - w.WriteVar("parents", parents) -} diff --git a/vendor/golang.org/x/text/internal/language/gen.go b/vendor/golang.org/x/text/internal/language/gen.go deleted file mode 100644 index cdcc7febcb..0000000000 --- a/vendor/golang.org/x/text/internal/language/gen.go +++ /dev/null @@ -1,1520 +0,0 @@ -// Copyright 2013 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build ignore - -// Language tag table generator. -// Data read from the web. - -package main - -import ( - "bufio" - "flag" - "fmt" - "io" - "io/ioutil" - "log" - "math" - "reflect" - "regexp" - "sort" - "strconv" - "strings" - - "golang.org/x/text/internal/gen" - "golang.org/x/text/internal/tag" - "golang.org/x/text/unicode/cldr" -) - -var ( - test = flag.Bool("test", - false, - "test existing tables; can be used to compare web data with package data.") - outputFile = flag.String("output", - "tables.go", - "output file for generated tables") -) - -var comment = []string{ - ` -lang holds an alphabetically sorted list of ISO-639 language identifiers. -All entries are 4 bytes. The index of the identifier (divided by 4) is the language tag. -For 2-byte language identifiers, the two successive bytes have the following meaning: - - if the first letter of the 2- and 3-letter ISO codes are the same: - the second and third letter of the 3-letter ISO code. - - otherwise: a 0 and a by 2 bits right-shifted index into altLangISO3. -For 3-byte language identifiers the 4th byte is 0.`, - ` -langNoIndex is a bit vector of all 3-letter language codes that are not used as an index -in lookup tables. The language ids for these language codes are derived directly -from the letters and are not consecutive.`, - ` -altLangISO3 holds an alphabetically sorted list of 3-letter language code alternatives -to 2-letter language codes that cannot be derived using the method described above. -Each 3-letter code is followed by its 1-byte langID.`, - ` -altLangIndex is used to convert indexes in altLangISO3 to langIDs.`, - ` -AliasMap maps langIDs to their suggested replacements.`, - ` -script is an alphabetically sorted list of ISO 15924 codes. The index -of the script in the string, divided by 4, is the internal scriptID.`, - ` -isoRegionOffset needs to be added to the index of regionISO to obtain the regionID -for 2-letter ISO codes. (The first isoRegionOffset regionIDs are reserved for -the UN.M49 codes used for groups.)`, - ` -regionISO holds a list of alphabetically sorted 2-letter ISO region codes. -Each 2-letter codes is followed by two bytes with the following meaning: - - [A-Z}{2}: the first letter of the 2-letter code plus these two - letters form the 3-letter ISO code. - - 0, n: index into altRegionISO3.`, - ` -regionTypes defines the status of a region for various standards.`, - ` -m49 maps regionIDs to UN.M49 codes. The first isoRegionOffset entries are -codes indicating collections of regions.`, - ` -m49Index gives indexes into fromM49 based on the three most significant bits -of a 10-bit UN.M49 code. To search an UN.M49 code in fromM49, search in - fromM49[m49Index[msb39(code)]:m49Index[msb3(code)+1]] -for an entry where the first 7 bits match the 7 lsb of the UN.M49 code. -The region code is stored in the 9 lsb of the indexed value.`, - ` -fromM49 contains entries to map UN.M49 codes to regions. See m49Index for details.`, - ` -altRegionISO3 holds a list of 3-letter region codes that cannot be -mapped to 2-letter codes using the default algorithm. This is a short list.`, - ` -altRegionIDs holds a list of regionIDs the positions of which match those -of the 3-letter ISO codes in altRegionISO3.`, - ` -variantNumSpecialized is the number of specialized variants in variants.`, - ` -suppressScript is an index from langID to the dominant script for that language, -if it exists. If a script is given, it should be suppressed from the language tag.`, - ` -likelyLang is a lookup table, indexed by langID, for the most likely -scripts and regions given incomplete information. If more entries exist for a -given language, region and script are the index and size respectively -of the list in likelyLangList.`, - ` -likelyLangList holds lists info associated with likelyLang.`, - ` -likelyRegion is a lookup table, indexed by regionID, for the most likely -languages and scripts given incomplete information. If more entries exist -for a given regionID, lang and script are the index and size respectively -of the list in likelyRegionList. -TODO: exclude containers and user-definable regions from the list.`, - ` -likelyRegionList holds lists info associated with likelyRegion.`, - ` -likelyScript is a lookup table, indexed by scriptID, for the most likely -languages and regions given a script.`, - ` -nRegionGroups is the number of region groups.`, - ` -regionInclusion maps region identifiers to sets of regions in regionInclusionBits, -where each set holds all groupings that are directly connected in a region -containment graph.`, - ` -regionInclusionBits is an array of bit vectors where every vector represents -a set of region groupings. These sets are used to compute the distance -between two regions for the purpose of language matching.`, - ` -regionInclusionNext marks, for each entry in regionInclusionBits, the set of -all groups that are reachable from the groups set in the respective entry.`, -} - -// TODO: consider changing some of these structures to tries. This can reduce -// memory, but may increase the need for memory allocations. This could be -// mitigated if we can piggyback on language tags for common cases. - -func failOnError(e error) { - if e != nil { - log.Panic(e) - } -} - -type setType int - -const ( - Indexed setType = 1 + iota // all elements must be of same size - Linear -) - -type stringSet struct { - s []string - sorted, frozen bool - - // We often need to update values after the creation of an index is completed. - // We include a convenience map for keeping track of this. - update map[string]string - typ setType // used for checking. -} - -func (ss *stringSet) clone() stringSet { - c := *ss - c.s = append([]string(nil), c.s...) - return c -} - -func (ss *stringSet) setType(t setType) { - if ss.typ != t && ss.typ != 0 { - log.Panicf("type %d cannot be assigned as it was already %d", t, ss.typ) - } -} - -// parse parses a whitespace-separated string and initializes ss with its -// components. -func (ss *stringSet) parse(s string) { - scan := bufio.NewScanner(strings.NewReader(s)) - scan.Split(bufio.ScanWords) - for scan.Scan() { - ss.add(scan.Text()) - } -} - -func (ss *stringSet) assertChangeable() { - if ss.frozen { - log.Panic("attempt to modify a frozen stringSet") - } -} - -func (ss *stringSet) add(s string) { - ss.assertChangeable() - ss.s = append(ss.s, s) - ss.sorted = ss.frozen -} - -func (ss *stringSet) freeze() { - ss.compact() - ss.frozen = true -} - -func (ss *stringSet) compact() { - if ss.sorted { - return - } - a := ss.s - sort.Strings(a) - k := 0 - for i := 1; i < len(a); i++ { - if a[k] != a[i] { - a[k+1] = a[i] - k++ - } - } - ss.s = a[:k+1] - ss.sorted = ss.frozen -} - -type funcSorter struct { - fn func(a, b string) bool - sort.StringSlice -} - -func (s funcSorter) Less(i, j int) bool { - return s.fn(s.StringSlice[i], s.StringSlice[j]) -} - -func (ss *stringSet) sortFunc(f func(a, b string) bool) { - ss.compact() - sort.Sort(funcSorter{f, sort.StringSlice(ss.s)}) -} - -func (ss *stringSet) remove(s string) { - ss.assertChangeable() - if i, ok := ss.find(s); ok { - copy(ss.s[i:], ss.s[i+1:]) - ss.s = ss.s[:len(ss.s)-1] - } -} - -func (ss *stringSet) replace(ol, nu string) { - ss.s[ss.index(ol)] = nu - ss.sorted = ss.frozen -} - -func (ss *stringSet) index(s string) int { - ss.setType(Indexed) - i, ok := ss.find(s) - if !ok { - if i < len(ss.s) { - log.Panicf("find: item %q is not in list. Closest match is %q.", s, ss.s[i]) - } - log.Panicf("find: item %q is not in list", s) - - } - return i -} - -func (ss *stringSet) find(s string) (int, bool) { - ss.compact() - i := sort.SearchStrings(ss.s, s) - return i, i != len(ss.s) && ss.s[i] == s -} - -func (ss *stringSet) slice() []string { - ss.compact() - return ss.s -} - -func (ss *stringSet) updateLater(v, key string) { - if ss.update == nil { - ss.update = map[string]string{} - } - ss.update[v] = key -} - -// join joins the string and ensures that all entries are of the same length. -func (ss *stringSet) join() string { - ss.setType(Indexed) - n := len(ss.s[0]) - for _, s := range ss.s { - if len(s) != n { - log.Panicf("join: not all entries are of the same length: %q", s) - } - } - ss.s = append(ss.s, strings.Repeat("\xff", n)) - return strings.Join(ss.s, "") -} - -// ianaEntry holds information for an entry in the IANA Language Subtag Repository. -// All types use the same entry. -// See http://tools.ietf.org/html/bcp47#section-5.1 for a description of the various -// fields. -type ianaEntry struct { - typ string - description []string - scope string - added string - preferred string - deprecated string - suppressScript string - macro string - prefix []string -} - -type builder struct { - w *gen.CodeWriter - hw io.Writer // MultiWriter for w and w.Hash - data *cldr.CLDR - supp *cldr.SupplementalData - - // indices - locale stringSet // common locales - lang stringSet // canonical language ids (2 or 3 letter ISO codes) with data - langNoIndex stringSet // 3-letter ISO codes with no associated data - script stringSet // 4-letter ISO codes - region stringSet // 2-letter ISO or 3-digit UN M49 codes - variant stringSet // 4-8-alphanumeric variant code. - - // Region codes that are groups with their corresponding group IDs. - groups map[int]index - - // langInfo - registry map[string]*ianaEntry -} - -type index uint - -func newBuilder(w *gen.CodeWriter) *builder { - r := gen.OpenCLDRCoreZip() - defer r.Close() - d := &cldr.Decoder{} - data, err := d.DecodeZip(r) - failOnError(err) - b := builder{ - w: w, - hw: io.MultiWriter(w, w.Hash), - data: data, - supp: data.Supplemental(), - } - b.parseRegistry() - return &b -} - -func (b *builder) parseRegistry() { - r := gen.OpenIANAFile("assignments/language-subtag-registry") - defer r.Close() - b.registry = make(map[string]*ianaEntry) - - scan := bufio.NewScanner(r) - scan.Split(bufio.ScanWords) - var record *ianaEntry - for more := scan.Scan(); more; { - key := scan.Text() - more = scan.Scan() - value := scan.Text() - switch key { - case "Type:": - record = &ianaEntry{typ: value} - case "Subtag:", "Tag:": - if s := strings.SplitN(value, "..", 2); len(s) > 1 { - for a := s[0]; a <= s[1]; a = inc(a) { - b.addToRegistry(a, record) - } - } else { - b.addToRegistry(value, record) - } - case "Suppress-Script:": - record.suppressScript = value - case "Added:": - record.added = value - case "Deprecated:": - record.deprecated = value - case "Macrolanguage:": - record.macro = value - case "Preferred-Value:": - record.preferred = value - case "Prefix:": - record.prefix = append(record.prefix, value) - case "Scope:": - record.scope = value - case "Description:": - buf := []byte(value) - for more = scan.Scan(); more; more = scan.Scan() { - b := scan.Bytes() - if b[0] == '%' || b[len(b)-1] == ':' { - break - } - buf = append(buf, ' ') - buf = append(buf, b...) - } - record.description = append(record.description, string(buf)) - continue - default: - continue - } - more = scan.Scan() - } - if scan.Err() != nil { - log.Panic(scan.Err()) - } -} - -func (b *builder) addToRegistry(key string, entry *ianaEntry) { - if info, ok := b.registry[key]; ok { - if info.typ != "language" || entry.typ != "extlang" { - log.Fatalf("parseRegistry: tag %q already exists", key) - } - } else { - b.registry[key] = entry - } -} - -var commentIndex = make(map[string]string) - -func init() { - for _, s := range comment { - key := strings.TrimSpace(strings.SplitN(s, " ", 2)[0]) - commentIndex[key] = s - } -} - -func (b *builder) comment(name string) { - if s := commentIndex[name]; len(s) > 0 { - b.w.WriteComment(s) - } else { - fmt.Fprintln(b.w) - } -} - -func (b *builder) pf(f string, x ...interface{}) { - fmt.Fprintf(b.hw, f, x...) - fmt.Fprint(b.hw, "\n") -} - -func (b *builder) p(x ...interface{}) { - fmt.Fprintln(b.hw, x...) -} - -func (b *builder) addSize(s int) { - b.w.Size += s - b.pf("// Size: %d bytes", s) -} - -func (b *builder) writeConst(name string, x interface{}) { - b.comment(name) - b.w.WriteConst(name, x) -} - -// writeConsts computes f(v) for all v in values and writes the results -// as constants named _v to a single constant block. -func (b *builder) writeConsts(f func(string) int, values ...string) { - b.pf("const (") - for _, v := range values { - b.pf("\t_%s = %v", v, f(v)) - } - b.pf(")") -} - -// writeType writes the type of the given value, which must be a struct. -func (b *builder) writeType(value interface{}) { - b.comment(reflect.TypeOf(value).Name()) - b.w.WriteType(value) -} - -func (b *builder) writeSlice(name string, ss interface{}) { - b.writeSliceAddSize(name, 0, ss) -} - -func (b *builder) writeSliceAddSize(name string, extraSize int, ss interface{}) { - b.comment(name) - b.w.Size += extraSize - v := reflect.ValueOf(ss) - t := v.Type().Elem() - b.pf("// Size: %d bytes, %d elements", v.Len()*int(t.Size())+extraSize, v.Len()) - - fmt.Fprintf(b.w, "var %s = ", name) - b.w.WriteArray(ss) - b.p() -} - -type FromTo struct { - From, To uint16 -} - -func (b *builder) writeSortedMap(name string, ss *stringSet, index func(s string) uint16) { - ss.sortFunc(func(a, b string) bool { - return index(a) < index(b) - }) - m := []FromTo{} - for _, s := range ss.s { - m = append(m, FromTo{index(s), index(ss.update[s])}) - } - b.writeSlice(name, m) -} - -const base = 'z' - 'a' + 1 - -func strToInt(s string) uint { - v := uint(0) - for i := 0; i < len(s); i++ { - v *= base - v += uint(s[i] - 'a') - } - return v -} - -// converts the given integer to the original ASCII string passed to strToInt. -// len(s) must match the number of characters obtained. -func intToStr(v uint, s []byte) { - for i := len(s) - 1; i >= 0; i-- { - s[i] = byte(v%base) + 'a' - v /= base - } -} - -func (b *builder) writeBitVector(name string, ss []string) { - vec := make([]uint8, int(math.Ceil(math.Pow(base, float64(len(ss[0])))/8))) - for _, s := range ss { - v := strToInt(s) - vec[v/8] |= 1 << (v % 8) - } - b.writeSlice(name, vec) -} - -// TODO: convert this type into a list or two-stage trie. -func (b *builder) writeMapFunc(name string, m map[string]string, f func(string) uint16) { - b.comment(name) - v := reflect.ValueOf(m) - sz := v.Len() * (2 + int(v.Type().Key().Size())) - for _, k := range m { - sz += len(k) - } - b.addSize(sz) - keys := []string{} - b.pf(`var %s = map[string]uint16{`, name) - for k := range m { - keys = append(keys, k) - } - sort.Strings(keys) - for _, k := range keys { - b.pf("\t%q: %v,", k, f(m[k])) - } - b.p("}") -} - -func (b *builder) writeMap(name string, m interface{}) { - b.comment(name) - v := reflect.ValueOf(m) - sz := v.Len() * (2 + int(v.Type().Key().Size()) + int(v.Type().Elem().Size())) - b.addSize(sz) - f := strings.FieldsFunc(fmt.Sprintf("%#v", m), func(r rune) bool { - return strings.IndexRune("{}, ", r) != -1 - }) - sort.Strings(f[1:]) - b.pf(`var %s = %s{`, name, f[0]) - for _, kv := range f[1:] { - b.pf("\t%s,", kv) - } - b.p("}") -} - -func (b *builder) langIndex(s string) uint16 { - if s == "und" { - return 0 - } - if i, ok := b.lang.find(s); ok { - return uint16(i) - } - return uint16(strToInt(s)) + uint16(len(b.lang.s)) -} - -// inc advances the string to its lexicographical successor. -func inc(s string) string { - const maxTagLength = 4 - var buf [maxTagLength]byte - intToStr(strToInt(strings.ToLower(s))+1, buf[:len(s)]) - for i := 0; i < len(s); i++ { - if s[i] <= 'Z' { - buf[i] -= 'a' - 'A' - } - } - return string(buf[:len(s)]) -} - -func (b *builder) parseIndices() { - meta := b.supp.Metadata - - for k, v := range b.registry { - var ss *stringSet - switch v.typ { - case "language": - if len(k) == 2 || v.suppressScript != "" || v.scope == "special" { - b.lang.add(k) - continue - } else { - ss = &b.langNoIndex - } - case "region": - ss = &b.region - case "script": - ss = &b.script - case "variant": - ss = &b.variant - default: - continue - } - ss.add(k) - } - // Include any language for which there is data. - for _, lang := range b.data.Locales() { - if x := b.data.RawLDML(lang); false || - x.LocaleDisplayNames != nil || - x.Characters != nil || - x.Delimiters != nil || - x.Measurement != nil || - x.Dates != nil || - x.Numbers != nil || - x.Units != nil || - x.ListPatterns != nil || - x.Collations != nil || - x.Segmentations != nil || - x.Rbnf != nil || - x.Annotations != nil || - x.Metadata != nil { - - from := strings.Split(lang, "_") - if lang := from[0]; lang != "root" { - b.lang.add(lang) - } - } - } - // Include locales for plural rules, which uses a different structure. - for _, plurals := range b.data.Supplemental().Plurals { - for _, rules := range plurals.PluralRules { - for _, lang := range strings.Split(rules.Locales, " ") { - if lang = strings.Split(lang, "_")[0]; lang != "root" { - b.lang.add(lang) - } - } - } - } - // Include languages in likely subtags. - for _, m := range b.supp.LikelySubtags.LikelySubtag { - from := strings.Split(m.From, "_") - b.lang.add(from[0]) - } - // Include ISO-639 alpha-3 bibliographic entries. - for _, a := range meta.Alias.LanguageAlias { - if a.Reason == "bibliographic" { - b.langNoIndex.add(a.Type) - } - } - // Include regions in territoryAlias (not all are in the IANA registry!) - for _, reg := range b.supp.Metadata.Alias.TerritoryAlias { - if len(reg.Type) == 2 { - b.region.add(reg.Type) - } - } - - for _, s := range b.lang.s { - if len(s) == 3 { - b.langNoIndex.remove(s) - } - } - b.writeConst("NumLanguages", len(b.lang.slice())+len(b.langNoIndex.slice())) - b.writeConst("NumScripts", len(b.script.slice())) - b.writeConst("NumRegions", len(b.region.slice())) - - // Add dummy codes at the start of each list to represent "unspecified". - b.lang.add("---") - b.script.add("----") - b.region.add("---") - - // common locales - b.locale.parse(meta.DefaultContent.Locales) -} - -// TODO: region inclusion data will probably not be use used in future matchers. - -func (b *builder) computeRegionGroups() { - b.groups = make(map[int]index) - - // Create group indices. - for i := 1; b.region.s[i][0] < 'A'; i++ { // Base M49 indices on regionID. - b.groups[i] = index(len(b.groups)) - } - for _, g := range b.supp.TerritoryContainment.Group { - // Skip UN and EURO zone as they are flattening the containment - // relationship. - if g.Type == "EZ" || g.Type == "UN" { - continue - } - group := b.region.index(g.Type) - if _, ok := b.groups[group]; !ok { - b.groups[group] = index(len(b.groups)) - } - } - if len(b.groups) > 64 { - log.Fatalf("only 64 groups supported, found %d", len(b.groups)) - } - b.writeConst("nRegionGroups", len(b.groups)) -} - -var langConsts = []string{ - "af", "am", "ar", "az", "bg", "bn", "ca", "cs", "da", "de", "el", "en", "es", - "et", "fa", "fi", "fil", "fr", "gu", "he", "hi", "hr", "hu", "hy", "id", "is", - "it", "ja", "ka", "kk", "km", "kn", "ko", "ky", "lo", "lt", "lv", "mk", "ml", - "mn", "mo", "mr", "ms", "mul", "my", "nb", "ne", "nl", "no", "pa", "pl", "pt", - "ro", "ru", "sh", "si", "sk", "sl", "sq", "sr", "sv", "sw", "ta", "te", "th", - "tl", "tn", "tr", "uk", "ur", "uz", "vi", "zh", "zu", - - // constants for grandfathered tags (if not already defined) - "jbo", "ami", "bnn", "hak", "tlh", "lb", "nv", "pwn", "tao", "tay", "tsu", - "nn", "sfb", "vgt", "sgg", "cmn", "nan", "hsn", -} - -// writeLanguage generates all tables needed for language canonicalization. -func (b *builder) writeLanguage() { - meta := b.supp.Metadata - - b.writeConst("nonCanonicalUnd", b.lang.index("und")) - b.writeConsts(func(s string) int { return int(b.langIndex(s)) }, langConsts...) - b.writeConst("langPrivateStart", b.langIndex("qaa")) - b.writeConst("langPrivateEnd", b.langIndex("qtz")) - - // Get language codes that need to be mapped (overlong 3-letter codes, - // deprecated 2-letter codes, legacy and grandfathered tags.) - langAliasMap := stringSet{} - aliasTypeMap := map[string]AliasType{} - - // altLangISO3 get the alternative ISO3 names that need to be mapped. - altLangISO3 := stringSet{} - // Add dummy start to avoid the use of index 0. - altLangISO3.add("---") - altLangISO3.updateLater("---", "aa") - - lang := b.lang.clone() - for _, a := range meta.Alias.LanguageAlias { - if a.Replacement == "" { - a.Replacement = "und" - } - // TODO: support mapping to tags - repl := strings.SplitN(a.Replacement, "_", 2)[0] - if a.Reason == "overlong" { - if len(a.Replacement) == 2 && len(a.Type) == 3 { - lang.updateLater(a.Replacement, a.Type) - } - } else if len(a.Type) <= 3 { - switch a.Reason { - case "macrolanguage": - aliasTypeMap[a.Type] = Macro - case "deprecated": - // handled elsewhere - continue - case "bibliographic", "legacy": - if a.Type == "no" { - continue - } - aliasTypeMap[a.Type] = Legacy - default: - log.Fatalf("new %s alias: %s", a.Reason, a.Type) - } - langAliasMap.add(a.Type) - langAliasMap.updateLater(a.Type, repl) - } - } - // Manually add the mapping of "nb" (Norwegian) to its macro language. - // This can be removed if CLDR adopts this change. - langAliasMap.add("nb") - langAliasMap.updateLater("nb", "no") - aliasTypeMap["nb"] = Macro - - for k, v := range b.registry { - // Also add deprecated values for 3-letter ISO codes, which CLDR omits. - if v.typ == "language" && v.deprecated != "" && v.preferred != "" { - langAliasMap.add(k) - langAliasMap.updateLater(k, v.preferred) - aliasTypeMap[k] = Deprecated - } - } - // Fix CLDR mappings. - lang.updateLater("tl", "tgl") - lang.updateLater("sh", "hbs") - lang.updateLater("mo", "mol") - lang.updateLater("no", "nor") - lang.updateLater("tw", "twi") - lang.updateLater("nb", "nob") - lang.updateLater("ak", "aka") - lang.updateLater("bh", "bih") - - // Ensure that each 2-letter code is matched with a 3-letter code. - for _, v := range lang.s[1:] { - s, ok := lang.update[v] - if !ok { - if s, ok = lang.update[langAliasMap.update[v]]; !ok { - continue - } - lang.update[v] = s - } - if v[0] != s[0] { - altLangISO3.add(s) - altLangISO3.updateLater(s, v) - } - } - - // Complete canonicalized language tags. - lang.freeze() - for i, v := range lang.s { - // We can avoid these manual entries by using the IANA registry directly. - // Seems easier to update the list manually, as changes are rare. - // The panic in this loop will trigger if we miss an entry. - add := "" - if s, ok := lang.update[v]; ok { - if s[0] == v[0] { - add = s[1:] - } else { - add = string([]byte{0, byte(altLangISO3.index(s))}) - } - } else if len(v) == 3 { - add = "\x00" - } else { - log.Panicf("no data for long form of %q", v) - } - lang.s[i] += add - } - b.writeConst("lang", tag.Index(lang.join())) - - b.writeConst("langNoIndexOffset", len(b.lang.s)) - - // space of all valid 3-letter language identifiers. - b.writeBitVector("langNoIndex", b.langNoIndex.slice()) - - altLangIndex := []uint16{} - for i, s := range altLangISO3.slice() { - altLangISO3.s[i] += string([]byte{byte(len(altLangIndex))}) - if i > 0 { - idx := b.lang.index(altLangISO3.update[s]) - altLangIndex = append(altLangIndex, uint16(idx)) - } - } - b.writeConst("altLangISO3", tag.Index(altLangISO3.join())) - b.writeSlice("altLangIndex", altLangIndex) - - b.writeSortedMap("AliasMap", &langAliasMap, b.langIndex) - types := make([]AliasType, len(langAliasMap.s)) - for i, s := range langAliasMap.s { - types[i] = aliasTypeMap[s] - } - b.writeSlice("AliasTypes", types) -} - -var scriptConsts = []string{ - "Latn", "Hani", "Hans", "Hant", "Qaaa", "Qaai", "Qabx", "Zinh", "Zyyy", - "Zzzz", -} - -func (b *builder) writeScript() { - b.writeConsts(b.script.index, scriptConsts...) - b.writeConst("script", tag.Index(b.script.join())) - - supp := make([]uint8, len(b.lang.slice())) - for i, v := range b.lang.slice()[1:] { - if sc := b.registry[v].suppressScript; sc != "" { - supp[i+1] = uint8(b.script.index(sc)) - } - } - b.writeSlice("suppressScript", supp) - - // There is only one deprecated script in CLDR. This value is hard-coded. - // We check here if the code must be updated. - for _, a := range b.supp.Metadata.Alias.ScriptAlias { - if a.Type != "Qaai" { - log.Panicf("unexpected deprecated stript %q", a.Type) - } - } -} - -func parseM49(s string) int16 { - if len(s) == 0 { - return 0 - } - v, err := strconv.ParseUint(s, 10, 10) - failOnError(err) - return int16(v) -} - -var regionConsts = []string{ - "001", "419", "BR", "CA", "ES", "GB", "MD", "PT", "UK", "US", - "ZZ", "XA", "XC", "XK", // Unofficial tag for Kosovo. -} - -func (b *builder) writeRegion() { - b.writeConsts(b.region.index, regionConsts...) - - isoOffset := b.region.index("AA") - m49map := make([]int16, len(b.region.slice())) - fromM49map := make(map[int16]int) - altRegionISO3 := "" - altRegionIDs := []uint16{} - - b.writeConst("isoRegionOffset", isoOffset) - - // 2-letter region lookup and mapping to numeric codes. - regionISO := b.region.clone() - regionISO.s = regionISO.s[isoOffset:] - regionISO.sorted = false - - regionTypes := make([]byte, len(b.region.s)) - - // Is the region valid BCP 47? - for s, e := range b.registry { - if len(s) == 2 && s == strings.ToUpper(s) { - i := b.region.index(s) - for _, d := range e.description { - if strings.Contains(d, "Private use") { - regionTypes[i] = iso3166UserAssigned - } - } - regionTypes[i] |= bcp47Region - } - } - - // Is the region a valid ccTLD? - r := gen.OpenIANAFile("domains/root/db") - defer r.Close() - - buf, err := ioutil.ReadAll(r) - failOnError(err) - re := regexp.MustCompile(`"/domains/root/db/([a-z]{2}).html"`) - for _, m := range re.FindAllSubmatch(buf, -1) { - i := b.region.index(strings.ToUpper(string(m[1]))) - regionTypes[i] |= ccTLD - } - - b.writeSlice("regionTypes", regionTypes) - - iso3Set := make(map[string]int) - update := func(iso2, iso3 string) { - i := regionISO.index(iso2) - if j, ok := iso3Set[iso3]; !ok && iso3[0] == iso2[0] { - regionISO.s[i] += iso3[1:] - iso3Set[iso3] = -1 - } else { - if ok && j >= 0 { - regionISO.s[i] += string([]byte{0, byte(j)}) - } else { - iso3Set[iso3] = len(altRegionISO3) - regionISO.s[i] += string([]byte{0, byte(len(altRegionISO3))}) - altRegionISO3 += iso3 - altRegionIDs = append(altRegionIDs, uint16(isoOffset+i)) - } - } - } - for _, tc := range b.supp.CodeMappings.TerritoryCodes { - i := regionISO.index(tc.Type) + isoOffset - if d := m49map[i]; d != 0 { - log.Panicf("%s found as a duplicate UN.M49 code of %03d", tc.Numeric, d) - } - m49 := parseM49(tc.Numeric) - m49map[i] = m49 - if r := fromM49map[m49]; r == 0 { - fromM49map[m49] = i - } else if r != i { - dep := b.registry[regionISO.s[r-isoOffset]].deprecated - if t := b.registry[tc.Type]; t != nil && dep != "" && (t.deprecated == "" || t.deprecated > dep) { - fromM49map[m49] = i - } - } - } - for _, ta := range b.supp.Metadata.Alias.TerritoryAlias { - if len(ta.Type) == 3 && ta.Type[0] <= '9' && len(ta.Replacement) == 2 { - from := parseM49(ta.Type) - if r := fromM49map[from]; r == 0 { - fromM49map[from] = regionISO.index(ta.Replacement) + isoOffset - } - } - } - for _, tc := range b.supp.CodeMappings.TerritoryCodes { - if len(tc.Alpha3) == 3 { - update(tc.Type, tc.Alpha3) - } - } - // This entries are not included in territoryCodes. Mostly 3-letter variants - // of deleted codes and an entry for QU. - for _, m := range []struct{ iso2, iso3 string }{ - {"CT", "CTE"}, - {"DY", "DHY"}, - {"HV", "HVO"}, - {"JT", "JTN"}, - {"MI", "MID"}, - {"NH", "NHB"}, - {"NQ", "ATN"}, - {"PC", "PCI"}, - {"PU", "PUS"}, - {"PZ", "PCZ"}, - {"RH", "RHO"}, - {"VD", "VDR"}, - {"WK", "WAK"}, - // These three-letter codes are used for others as well. - {"FQ", "ATF"}, - } { - update(m.iso2, m.iso3) - } - for i, s := range regionISO.s { - if len(s) != 4 { - regionISO.s[i] = s + " " - } - } - b.writeConst("regionISO", tag.Index(regionISO.join())) - b.writeConst("altRegionISO3", altRegionISO3) - b.writeSlice("altRegionIDs", altRegionIDs) - - // Create list of deprecated regions. - // TODO: consider inserting SF -> FI. Not included by CLDR, but is the only - // Transitionally-reserved mapping not included. - regionOldMap := stringSet{} - // Include regions in territoryAlias (not all are in the IANA registry!) - for _, reg := range b.supp.Metadata.Alias.TerritoryAlias { - if len(reg.Type) == 2 && reg.Reason == "deprecated" && len(reg.Replacement) == 2 { - regionOldMap.add(reg.Type) - regionOldMap.updateLater(reg.Type, reg.Replacement) - i, _ := regionISO.find(reg.Type) - j, _ := regionISO.find(reg.Replacement) - if k := m49map[i+isoOffset]; k == 0 { - m49map[i+isoOffset] = m49map[j+isoOffset] - } - } - } - b.writeSortedMap("regionOldMap", ®ionOldMap, func(s string) uint16 { - return uint16(b.region.index(s)) - }) - // 3-digit region lookup, groupings. - for i := 1; i < isoOffset; i++ { - m := parseM49(b.region.s[i]) - m49map[i] = m - fromM49map[m] = i - } - b.writeSlice("m49", m49map) - - const ( - searchBits = 7 - regionBits = 9 - ) - if len(m49map) >= 1< %d", len(m49map), 1<>searchBits] = int16(len(fromM49)) - } - b.writeSlice("m49Index", m49Index) - b.writeSlice("fromM49", fromM49) -} - -const ( - // TODO: put these lists in regionTypes as user data? Could be used for - // various optimizations and refinements and could be exposed in the API. - iso3166Except = "AC CP DG EA EU FX IC SU TA UK" - iso3166Trans = "AN BU CS NT TP YU ZR" // SF is not in our set of Regions. - // DY and RH are actually not deleted, but indeterminately reserved. - iso3166DelCLDR = "CT DD DY FQ HV JT MI NH NQ PC PU PZ RH VD WK YD" -) - -const ( - iso3166UserAssigned = 1 << iota - ccTLD - bcp47Region -) - -func find(list []string, s string) int { - for i, t := range list { - if t == s { - return i - } - } - return -1 -} - -// writeVariants generates per-variant information and creates a map from variant -// name to index value. We assign index values such that sorting multiple -// variants by index value will result in the correct order. -// There are two types of variants: specialized and general. Specialized variants -// are only applicable to certain language or language-script pairs. Generalized -// variants apply to any language. Generalized variants always sort after -// specialized variants. We will therefore always assign a higher index value -// to a generalized variant than any other variant. Generalized variants are -// sorted alphabetically among themselves. -// Specialized variants may also sort after other specialized variants. Such -// variants will be ordered after any of the variants they may follow. -// We assume that if a variant x is followed by a variant y, then for any prefix -// p of x, p-x is a prefix of y. This allows us to order tags based on the -// maximum of the length of any of its prefixes. -// TODO: it is possible to define a set of Prefix values on variants such that -// a total order cannot be defined to the point that this algorithm breaks. -// In other words, we cannot guarantee the same order of variants for the -// future using the same algorithm or for non-compliant combinations of -// variants. For this reason, consider using simple alphabetic sorting -// of variants and ignore Prefix restrictions altogether. -func (b *builder) writeVariant() { - generalized := stringSet{} - specialized := stringSet{} - specializedExtend := stringSet{} - // Collate the variants by type and check assumptions. - for _, v := range b.variant.slice() { - e := b.registry[v] - if len(e.prefix) == 0 { - generalized.add(v) - continue - } - c := strings.Split(e.prefix[0], "-") - hasScriptOrRegion := false - if len(c) > 1 { - _, hasScriptOrRegion = b.script.find(c[1]) - if !hasScriptOrRegion { - _, hasScriptOrRegion = b.region.find(c[1]) - - } - } - if len(c) == 1 || len(c) == 2 && hasScriptOrRegion { - // Variant is preceded by a language. - specialized.add(v) - continue - } - // Variant is preceded by another variant. - specializedExtend.add(v) - prefix := c[0] + "-" - if hasScriptOrRegion { - prefix += c[1] - } - for _, p := range e.prefix { - // Verify that the prefix minus the last element is a prefix of the - // predecessor element. - i := strings.LastIndex(p, "-") - pred := b.registry[p[i+1:]] - if find(pred.prefix, p[:i]) < 0 { - log.Fatalf("prefix %q for variant %q not consistent with predecessor spec", p, v) - } - // The sorting used below does not work in the general case. It works - // if we assume that variants that may be followed by others only have - // prefixes of the same length. Verify this. - count := strings.Count(p[:i], "-") - for _, q := range pred.prefix { - if c := strings.Count(q, "-"); c != count { - log.Fatalf("variant %q preceding %q has a prefix %q of size %d; want %d", p[i+1:], v, q, c, count) - } - } - if !strings.HasPrefix(p, prefix) { - log.Fatalf("prefix %q of variant %q should start with %q", p, v, prefix) - } - } - } - - // Sort extended variants. - a := specializedExtend.s - less := func(v, w string) bool { - // Sort by the maximum number of elements. - maxCount := func(s string) (max int) { - for _, p := range b.registry[s].prefix { - if c := strings.Count(p, "-"); c > max { - max = c - } - } - return - } - if cv, cw := maxCount(v), maxCount(w); cv != cw { - return cv < cw - } - // Sort by name as tie breaker. - return v < w - } - sort.Sort(funcSorter{less, sort.StringSlice(a)}) - specializedExtend.frozen = true - - // Create index from variant name to index. - variantIndex := make(map[string]uint8) - add := func(s []string) { - for _, v := range s { - variantIndex[v] = uint8(len(variantIndex)) - } - } - add(specialized.slice()) - add(specializedExtend.s) - numSpecialized := len(variantIndex) - add(generalized.slice()) - if n := len(variantIndex); n > 255 { - log.Fatalf("maximum number of variants exceeded: was %d; want <= 255", n) - } - b.writeMap("variantIndex", variantIndex) - b.writeConst("variantNumSpecialized", numSpecialized) -} - -func (b *builder) writeLanguageInfo() { -} - -// writeLikelyData writes tables that are used both for finding parent relations and for -// language matching. Each entry contains additional bits to indicate the status of the -// data to know when it cannot be used for parent relations. -func (b *builder) writeLikelyData() { - const ( - isList = 1 << iota - scriptInFrom - regionInFrom - ) - type ( // generated types - likelyScriptRegion struct { - region uint16 - script uint8 - flags uint8 - } - likelyLangScript struct { - lang uint16 - script uint8 - flags uint8 - } - likelyLangRegion struct { - lang uint16 - region uint16 - } - // likelyTag is used for getting likely tags for group regions, where - // the likely region might be a region contained in the group. - likelyTag struct { - lang uint16 - region uint16 - script uint8 - } - ) - var ( // generated variables - likelyRegionGroup = make([]likelyTag, len(b.groups)) - likelyLang = make([]likelyScriptRegion, len(b.lang.s)) - likelyRegion = make([]likelyLangScript, len(b.region.s)) - likelyScript = make([]likelyLangRegion, len(b.script.s)) - likelyLangList = []likelyScriptRegion{} - likelyRegionList = []likelyLangScript{} - ) - type fromTo struct { - from, to []string - } - langToOther := map[int][]fromTo{} - regionToOther := map[int][]fromTo{} - for _, m := range b.supp.LikelySubtags.LikelySubtag { - from := strings.Split(m.From, "_") - to := strings.Split(m.To, "_") - if len(to) != 3 { - log.Fatalf("invalid number of subtags in %q: found %d, want 3", m.To, len(to)) - } - if len(from) > 3 { - log.Fatalf("invalid number of subtags: found %d, want 1-3", len(from)) - } - if from[0] != to[0] && from[0] != "und" { - log.Fatalf("unexpected language change in expansion: %s -> %s", from, to) - } - if len(from) == 3 { - if from[2] != to[2] { - log.Fatalf("unexpected region change in expansion: %s -> %s", from, to) - } - if from[0] != "und" { - log.Fatalf("unexpected fully specified from tag: %s -> %s", from, to) - } - } - if len(from) == 1 || from[0] != "und" { - id := 0 - if from[0] != "und" { - id = b.lang.index(from[0]) - } - langToOther[id] = append(langToOther[id], fromTo{from, to}) - } else if len(from) == 2 && len(from[1]) == 4 { - sid := b.script.index(from[1]) - likelyScript[sid].lang = uint16(b.langIndex(to[0])) - likelyScript[sid].region = uint16(b.region.index(to[2])) - } else { - r := b.region.index(from[len(from)-1]) - if id, ok := b.groups[r]; ok { - if from[0] != "und" { - log.Fatalf("region changed unexpectedly: %s -> %s", from, to) - } - likelyRegionGroup[id].lang = uint16(b.langIndex(to[0])) - likelyRegionGroup[id].script = uint8(b.script.index(to[1])) - likelyRegionGroup[id].region = uint16(b.region.index(to[2])) - } else { - regionToOther[r] = append(regionToOther[r], fromTo{from, to}) - } - } - } - b.writeType(likelyLangRegion{}) - b.writeSlice("likelyScript", likelyScript) - - for id := range b.lang.s { - list := langToOther[id] - if len(list) == 1 { - likelyLang[id].region = uint16(b.region.index(list[0].to[2])) - likelyLang[id].script = uint8(b.script.index(list[0].to[1])) - } else if len(list) > 1 { - likelyLang[id].flags = isList - likelyLang[id].region = uint16(len(likelyLangList)) - likelyLang[id].script = uint8(len(list)) - for _, x := range list { - flags := uint8(0) - if len(x.from) > 1 { - if x.from[1] == x.to[2] { - flags = regionInFrom - } else { - flags = scriptInFrom - } - } - likelyLangList = append(likelyLangList, likelyScriptRegion{ - region: uint16(b.region.index(x.to[2])), - script: uint8(b.script.index(x.to[1])), - flags: flags, - }) - } - } - } - // TODO: merge suppressScript data with this table. - b.writeType(likelyScriptRegion{}) - b.writeSlice("likelyLang", likelyLang) - b.writeSlice("likelyLangList", likelyLangList) - - for id := range b.region.s { - list := regionToOther[id] - if len(list) == 1 { - likelyRegion[id].lang = uint16(b.langIndex(list[0].to[0])) - likelyRegion[id].script = uint8(b.script.index(list[0].to[1])) - if len(list[0].from) > 2 { - likelyRegion[id].flags = scriptInFrom - } - } else if len(list) > 1 { - likelyRegion[id].flags = isList - likelyRegion[id].lang = uint16(len(likelyRegionList)) - likelyRegion[id].script = uint8(len(list)) - for i, x := range list { - if len(x.from) == 2 && i != 0 || i > 0 && len(x.from) != 3 { - log.Fatalf("unspecified script must be first in list: %v at %d", x.from, i) - } - x := likelyLangScript{ - lang: uint16(b.langIndex(x.to[0])), - script: uint8(b.script.index(x.to[1])), - } - if len(list[0].from) > 2 { - x.flags = scriptInFrom - } - likelyRegionList = append(likelyRegionList, x) - } - } - } - b.writeType(likelyLangScript{}) - b.writeSlice("likelyRegion", likelyRegion) - b.writeSlice("likelyRegionList", likelyRegionList) - - b.writeType(likelyTag{}) - b.writeSlice("likelyRegionGroup", likelyRegionGroup) -} - -func (b *builder) writeRegionInclusionData() { - var ( - // mm holds for each group the set of groups with a distance of 1. - mm = make(map[int][]index) - - // containment holds for each group the transitive closure of - // containment of other groups. - containment = make(map[index][]index) - ) - for _, g := range b.supp.TerritoryContainment.Group { - // Skip UN and EURO zone as they are flattening the containment - // relationship. - if g.Type == "EZ" || g.Type == "UN" { - continue - } - group := b.region.index(g.Type) - groupIdx := b.groups[group] - for _, mem := range strings.Split(g.Contains, " ") { - r := b.region.index(mem) - mm[r] = append(mm[r], groupIdx) - if g, ok := b.groups[r]; ok { - mm[group] = append(mm[group], g) - containment[groupIdx] = append(containment[groupIdx], g) - } - } - } - - regionContainment := make([]uint64, len(b.groups)) - for _, g := range b.groups { - l := containment[g] - - // Compute the transitive closure of containment. - for i := 0; i < len(l); i++ { - l = append(l, containment[l[i]]...) - } - - // Compute the bitmask. - regionContainment[g] = 1 << g - for _, v := range l { - regionContainment[g] |= 1 << v - } - } - b.writeSlice("regionContainment", regionContainment) - - regionInclusion := make([]uint8, len(b.region.s)) - bvs := make(map[uint64]index) - // Make the first bitvector positions correspond with the groups. - for r, i := range b.groups { - bv := uint64(1 << i) - for _, g := range mm[r] { - bv |= 1 << g - } - bvs[bv] = i - regionInclusion[r] = uint8(bvs[bv]) - } - for r := 1; r < len(b.region.s); r++ { - if _, ok := b.groups[r]; !ok { - bv := uint64(0) - for _, g := range mm[r] { - bv |= 1 << g - } - if bv == 0 { - // Pick the world for unspecified regions. - bv = 1 << b.groups[b.region.index("001")] - } - if _, ok := bvs[bv]; !ok { - bvs[bv] = index(len(bvs)) - } - regionInclusion[r] = uint8(bvs[bv]) - } - } - b.writeSlice("regionInclusion", regionInclusion) - regionInclusionBits := make([]uint64, len(bvs)) - for k, v := range bvs { - regionInclusionBits[v] = uint64(k) - } - // Add bit vectors for increasingly large distances until a fixed point is reached. - regionInclusionNext := []uint8{} - for i := 0; i < len(regionInclusionBits); i++ { - bits := regionInclusionBits[i] - next := bits - for i := uint(0); i < uint(len(b.groups)); i++ { - if bits&(1< 6 { - log.Fatalf("Too many groups: %d", i) - } - idToIndex[mv.Id] = uint8(i + 1) - // TODO: also handle '-' - for _, r := range strings.Split(mv.Value, "+") { - todo := []string{r} - for k := 0; k < len(todo); k++ { - r := todo[k] - regionToGroups[b.regionIndex(r)] |= 1 << uint8(i) - todo = append(todo, regionHierarchy[r]...) - } - } - } - b.w.WriteVar("regionToGroups", regionToGroups) - - // maps language id to in- and out-of-group region. - paradigmLocales := [][3]uint16{} - locales := strings.Split(lm[0].ParadigmLocales[0].Locales, " ") - for i := 0; i < len(locales); i += 2 { - x := [3]uint16{} - for j := 0; j < 2; j++ { - pc := strings.SplitN(locales[i+j], "-", 2) - x[0] = b.langIndex(pc[0]) - if len(pc) == 2 { - x[1+j] = uint16(b.regionIndex(pc[1])) - } - } - paradigmLocales = append(paradigmLocales, x) - } - b.w.WriteVar("paradigmLocales", paradigmLocales) - - b.w.WriteType(mutualIntelligibility{}) - b.w.WriteType(scriptIntelligibility{}) - b.w.WriteType(regionIntelligibility{}) - - matchLang := []mutualIntelligibility{} - matchScript := []scriptIntelligibility{} - matchRegion := []regionIntelligibility{} - // Convert the languageMatch entries in lists keyed by desired language. - for _, m := range lm[0].LanguageMatch { - // Different versions of CLDR use different separators. - desired := strings.Replace(m.Desired, "-", "_", -1) - supported := strings.Replace(m.Supported, "-", "_", -1) - d := strings.Split(desired, "_") - s := strings.Split(supported, "_") - if len(d) != len(s) { - log.Fatalf("not supported: desired=%q; supported=%q", desired, supported) - continue - } - distance, _ := strconv.ParseInt(m.Distance, 10, 8) - switch len(d) { - case 2: - if desired == supported && desired == "*_*" { - continue - } - // language-script pair. - matchScript = append(matchScript, scriptIntelligibility{ - wantLang: uint16(b.langIndex(d[0])), - haveLang: uint16(b.langIndex(s[0])), - wantScript: uint8(b.scriptIndex(d[1])), - haveScript: uint8(b.scriptIndex(s[1])), - distance: uint8(distance), - }) - if m.Oneway != "true" { - matchScript = append(matchScript, scriptIntelligibility{ - wantLang: uint16(b.langIndex(s[0])), - haveLang: uint16(b.langIndex(d[0])), - wantScript: uint8(b.scriptIndex(s[1])), - haveScript: uint8(b.scriptIndex(d[1])), - distance: uint8(distance), - }) - } - case 1: - if desired == supported && desired == "*" { - continue - } - if distance == 1 { - // nb == no is already handled by macro mapping. Check there - // really is only this case. - if d[0] != "no" || s[0] != "nb" { - log.Fatalf("unhandled equivalence %s == %s", s[0], d[0]) - } - continue - } - // TODO: consider dropping oneway field and just doubling the entry. - matchLang = append(matchLang, mutualIntelligibility{ - want: uint16(b.langIndex(d[0])), - have: uint16(b.langIndex(s[0])), - distance: uint8(distance), - oneway: m.Oneway == "true", - }) - case 3: - if desired == supported && desired == "*_*_*" { - continue - } - if desired != supported { - // This is now supported by CLDR, but only one case, which - // should already be covered by paradigm locales. For instance, - // test case "und, en, en-GU, en-IN, en-GB ; en-ZA ; en-GB" in - // testdata/CLDRLocaleMatcherTest.txt tests this. - if supported != "en_*_GB" { - log.Fatalf("not supported: desired=%q; supported=%q", desired, supported) - } - continue - } - ri := regionIntelligibility{ - lang: b.langIndex(d[0]), - distance: uint8(distance), - } - if d[1] != "*" { - ri.script = uint8(b.scriptIndex(d[1])) - } - switch { - case d[2] == "*": - ri.group = 0x80 // not contained in anything - case strings.HasPrefix(d[2], "$!"): - ri.group = 0x80 - d[2] = "$" + d[2][len("$!"):] - fallthrough - case strings.HasPrefix(d[2], "$"): - ri.group |= idToIndex[d[2]] - } - matchRegion = append(matchRegion, ri) - default: - log.Fatalf("not supported: desired=%q; supported=%q", desired, supported) - } - } - sort.SliceStable(matchLang, func(i, j int) bool { - return matchLang[i].distance < matchLang[j].distance - }) - b.w.WriteComment(` - matchLang holds pairs of langIDs of base languages that are typically - mutually intelligible. Each pair is associated with a confidence and - whether the intelligibility goes one or both ways.`) - b.w.WriteVar("matchLang", matchLang) - - b.w.WriteComment(` - matchScript holds pairs of scriptIDs where readers of one script - can typically also read the other. Each is associated with a confidence.`) - sort.SliceStable(matchScript, func(i, j int) bool { - return matchScript[i].distance < matchScript[j].distance - }) - b.w.WriteVar("matchScript", matchScript) - - sort.SliceStable(matchRegion, func(i, j int) bool { - return matchRegion[i].distance < matchRegion[j].distance - }) - b.w.WriteVar("matchRegion", matchRegion) -} diff --git a/vendor/golang.org/x/text/unicode/bidi/gen.go b/vendor/golang.org/x/text/unicode/bidi/gen.go deleted file mode 100644 index 987fc169cc..0000000000 --- a/vendor/golang.org/x/text/unicode/bidi/gen.go +++ /dev/null @@ -1,133 +0,0 @@ -// Copyright 2015 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build ignore - -package main - -import ( - "flag" - "log" - - "golang.org/x/text/internal/gen" - "golang.org/x/text/internal/triegen" - "golang.org/x/text/internal/ucd" -) - -var outputFile = flag.String("out", "tables.go", "output file") - -func main() { - gen.Init() - gen.Repackage("gen_trieval.go", "trieval.go", "bidi") - gen.Repackage("gen_ranges.go", "ranges_test.go", "bidi") - - genTables() -} - -// bidiClass names and codes taken from class "bc" in -// https://www.unicode.org/Public/8.0.0/ucd/PropertyValueAliases.txt -var bidiClass = map[string]Class{ - "AL": AL, // ArabicLetter - "AN": AN, // ArabicNumber - "B": B, // ParagraphSeparator - "BN": BN, // BoundaryNeutral - "CS": CS, // CommonSeparator - "EN": EN, // EuropeanNumber - "ES": ES, // EuropeanSeparator - "ET": ET, // EuropeanTerminator - "L": L, // LeftToRight - "NSM": NSM, // NonspacingMark - "ON": ON, // OtherNeutral - "R": R, // RightToLeft - "S": S, // SegmentSeparator - "WS": WS, // WhiteSpace - - "FSI": Control, - "PDF": Control, - "PDI": Control, - "LRE": Control, - "LRI": Control, - "LRO": Control, - "RLE": Control, - "RLI": Control, - "RLO": Control, -} - -func genTables() { - if numClass > 0x0F { - log.Fatalf("Too many Class constants (%#x > 0x0F).", numClass) - } - w := gen.NewCodeWriter() - defer w.WriteVersionedGoFile(*outputFile, "bidi") - - gen.WriteUnicodeVersion(w) - - t := triegen.NewTrie("bidi") - - // Build data about bracket mapping. These bits need to be or-ed with - // any other bits. - orMask := map[rune]uint64{} - - xorMap := map[rune]int{} - xorMasks := []rune{0} // First value is no-op. - - ucd.Parse(gen.OpenUCDFile("BidiBrackets.txt"), func(p *ucd.Parser) { - r1 := p.Rune(0) - r2 := p.Rune(1) - xor := r1 ^ r2 - if _, ok := xorMap[xor]; !ok { - xorMap[xor] = len(xorMasks) - xorMasks = append(xorMasks, xor) - } - entry := uint64(xorMap[xor]) << xorMaskShift - switch p.String(2) { - case "o": - entry |= openMask - case "c", "n": - default: - log.Fatalf("Unknown bracket class %q.", p.String(2)) - } - orMask[r1] = entry - }) - - w.WriteComment(` - xorMasks contains masks to be xor-ed with brackets to get the reverse - version.`) - w.WriteVar("xorMasks", xorMasks) - - done := map[rune]bool{} - - insert := func(r rune, c Class) { - if !done[r] { - t.Insert(r, orMask[r]|uint64(c)) - done[r] = true - } - } - - // Insert the derived BiDi properties. - ucd.Parse(gen.OpenUCDFile("extracted/DerivedBidiClass.txt"), func(p *ucd.Parser) { - r := p.Rune(0) - class, ok := bidiClass[p.String(1)] - if !ok { - log.Fatalf("%U: Unknown BiDi class %q", r, p.String(1)) - } - insert(r, class) - }) - visitDefaults(insert) - - // TODO: use sparse blocks. This would reduce table size considerably - // from the looks of it. - - sz, err := t.Gen(w) - if err != nil { - log.Fatal(err) - } - w.Size += sz -} - -// dummy values to make methods in gen_common compile. The real versions -// will be generated by this file to tables.go. -var ( - xorMasks []rune -) diff --git a/vendor/golang.org/x/text/unicode/bidi/gen_ranges.go b/vendor/golang.org/x/text/unicode/bidi/gen_ranges.go deleted file mode 100644 index 02c3b505d6..0000000000 --- a/vendor/golang.org/x/text/unicode/bidi/gen_ranges.go +++ /dev/null @@ -1,57 +0,0 @@ -// Copyright 2015 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build ignore - -package main - -import ( - "unicode" - - "golang.org/x/text/internal/gen" - "golang.org/x/text/internal/ucd" - "golang.org/x/text/unicode/rangetable" -) - -// These tables are hand-extracted from: -// https://www.unicode.org/Public/8.0.0/ucd/extracted/DerivedBidiClass.txt -func visitDefaults(fn func(r rune, c Class)) { - // first write default values for ranges listed above. - visitRunes(fn, AL, []rune{ - 0x0600, 0x07BF, // Arabic - 0x08A0, 0x08FF, // Arabic Extended-A - 0xFB50, 0xFDCF, // Arabic Presentation Forms - 0xFDF0, 0xFDFF, - 0xFE70, 0xFEFF, - 0x0001EE00, 0x0001EEFF, // Arabic Mathematical Alpha Symbols - }) - visitRunes(fn, R, []rune{ - 0x0590, 0x05FF, // Hebrew - 0x07C0, 0x089F, // Nko et al. - 0xFB1D, 0xFB4F, - 0x00010800, 0x00010FFF, // Cypriot Syllabary et. al. - 0x0001E800, 0x0001EDFF, - 0x0001EF00, 0x0001EFFF, - }) - visitRunes(fn, ET, []rune{ // European Terminator - 0x20A0, 0x20Cf, // Currency symbols - }) - rangetable.Visit(unicode.Noncharacter_Code_Point, func(r rune) { - fn(r, BN) // Boundary Neutral - }) - ucd.Parse(gen.OpenUCDFile("DerivedCoreProperties.txt"), func(p *ucd.Parser) { - if p.String(1) == "Default_Ignorable_Code_Point" { - fn(p.Rune(0), BN) // Boundary Neutral - } - }) -} - -func visitRunes(fn func(r rune, c Class), c Class, runes []rune) { - for i := 0; i < len(runes); i += 2 { - lo, hi := runes[i], runes[i+1] - for j := lo; j <= hi; j++ { - fn(j, c) - } - } -} diff --git a/vendor/golang.org/x/text/unicode/bidi/gen_trieval.go b/vendor/golang.org/x/text/unicode/bidi/gen_trieval.go deleted file mode 100644 index 9cb9942894..0000000000 --- a/vendor/golang.org/x/text/unicode/bidi/gen_trieval.go +++ /dev/null @@ -1,64 +0,0 @@ -// Copyright 2015 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build ignore - -package main - -// Class is the Unicode BiDi class. Each rune has a single class. -type Class uint - -const ( - L Class = iota // LeftToRight - R // RightToLeft - EN // EuropeanNumber - ES // EuropeanSeparator - ET // EuropeanTerminator - AN // ArabicNumber - CS // CommonSeparator - B // ParagraphSeparator - S // SegmentSeparator - WS // WhiteSpace - ON // OtherNeutral - BN // BoundaryNeutral - NSM // NonspacingMark - AL // ArabicLetter - Control // Control LRO - PDI - - numClass - - LRO // LeftToRightOverride - RLO // RightToLeftOverride - LRE // LeftToRightEmbedding - RLE // RightToLeftEmbedding - PDF // PopDirectionalFormat - LRI // LeftToRightIsolate - RLI // RightToLeftIsolate - FSI // FirstStrongIsolate - PDI // PopDirectionalIsolate - - unknownClass = ^Class(0) -) - -var controlToClass = map[rune]Class{ - 0x202D: LRO, // LeftToRightOverride, - 0x202E: RLO, // RightToLeftOverride, - 0x202A: LRE, // LeftToRightEmbedding, - 0x202B: RLE, // RightToLeftEmbedding, - 0x202C: PDF, // PopDirectionalFormat, - 0x2066: LRI, // LeftToRightIsolate, - 0x2067: RLI, // RightToLeftIsolate, - 0x2068: FSI, // FirstStrongIsolate, - 0x2069: PDI, // PopDirectionalIsolate, -} - -// A trie entry has the following bits: -// 7..5 XOR mask for brackets -// 4 1: Bracket open, 0: Bracket close -// 3..0 Class type - -const ( - openMask = 0x10 - xorMaskShift = 5 -) diff --git a/vendor/golang.org/x/text/unicode/norm/maketables.go b/vendor/golang.org/x/text/unicode/norm/maketables.go deleted file mode 100644 index 30a3aa9334..0000000000 --- a/vendor/golang.org/x/text/unicode/norm/maketables.go +++ /dev/null @@ -1,986 +0,0 @@ -// Copyright 2011 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build ignore - -// Normalization table generator. -// Data read from the web. -// See forminfo.go for a description of the trie values associated with each rune. - -package main - -import ( - "bytes" - "encoding/binary" - "flag" - "fmt" - "io" - "log" - "sort" - "strconv" - "strings" - - "golang.org/x/text/internal/gen" - "golang.org/x/text/internal/triegen" - "golang.org/x/text/internal/ucd" -) - -func main() { - gen.Init() - loadUnicodeData() - compactCCC() - loadCompositionExclusions() - completeCharFields(FCanonical) - completeCharFields(FCompatibility) - computeNonStarterCounts() - verifyComputed() - printChars() - testDerived() - printTestdata() - makeTables() -} - -var ( - tablelist = flag.String("tables", - "all", - "comma-separated list of which tables to generate; "+ - "can be 'decomp', 'recomp', 'info' and 'all'") - test = flag.Bool("test", - false, - "test existing tables against DerivedNormalizationProps and generate test data for regression testing") - verbose = flag.Bool("verbose", - false, - "write data to stdout as it is parsed") -) - -const MaxChar = 0x10FFFF // anything above this shouldn't exist - -// Quick Check properties of runes allow us to quickly -// determine whether a rune may occur in a normal form. -// For a given normal form, a rune may be guaranteed to occur -// verbatim (QC=Yes), may or may not combine with another -// rune (QC=Maybe), or may not occur (QC=No). -type QCResult int - -const ( - QCUnknown QCResult = iota - QCYes - QCNo - QCMaybe -) - -func (r QCResult) String() string { - switch r { - case QCYes: - return "Yes" - case QCNo: - return "No" - case QCMaybe: - return "Maybe" - } - return "***UNKNOWN***" -} - -const ( - FCanonical = iota // NFC or NFD - FCompatibility // NFKC or NFKD - FNumberOfFormTypes -) - -const ( - MComposed = iota // NFC or NFKC - MDecomposed // NFD or NFKD - MNumberOfModes -) - -// This contains only the properties we're interested in. -type Char struct { - name string - codePoint rune // if zero, this index is not a valid code point. - ccc uint8 // canonical combining class - origCCC uint8 - excludeInComp bool // from CompositionExclusions.txt - compatDecomp bool // it has a compatibility expansion - - nTrailingNonStarters uint8 - nLeadingNonStarters uint8 // must be equal to trailing if non-zero - - forms [FNumberOfFormTypes]FormInfo // For FCanonical and FCompatibility - - state State -} - -var chars = make([]Char, MaxChar+1) -var cccMap = make(map[uint8]uint8) - -func (c Char) String() string { - buf := new(bytes.Buffer) - - fmt.Fprintf(buf, "%U [%s]:\n", c.codePoint, c.name) - fmt.Fprintf(buf, " ccc: %v\n", c.ccc) - fmt.Fprintf(buf, " excludeInComp: %v\n", c.excludeInComp) - fmt.Fprintf(buf, " compatDecomp: %v\n", c.compatDecomp) - fmt.Fprintf(buf, " state: %v\n", c.state) - fmt.Fprintf(buf, " NFC:\n") - fmt.Fprint(buf, c.forms[FCanonical]) - fmt.Fprintf(buf, " NFKC:\n") - fmt.Fprint(buf, c.forms[FCompatibility]) - - return buf.String() -} - -// In UnicodeData.txt, some ranges are marked like this: -// 3400;;Lo;0;L;;;;;N;;;;; -// 4DB5;;Lo;0;L;;;;;N;;;;; -// parseCharacter keeps a state variable indicating the weirdness. -type State int - -const ( - SNormal State = iota // known to be zero for the type - SFirst - SLast - SMissing -) - -var lastChar = rune('\u0000') - -func (c Char) isValid() bool { - return c.codePoint != 0 && c.state != SMissing -} - -type FormInfo struct { - quickCheck [MNumberOfModes]QCResult // index: MComposed or MDecomposed - verified [MNumberOfModes]bool // index: MComposed or MDecomposed - - combinesForward bool // May combine with rune on the right - combinesBackward bool // May combine with rune on the left - isOneWay bool // Never appears in result - inDecomp bool // Some decompositions result in this char. - decomp Decomposition - expandedDecomp Decomposition -} - -func (f FormInfo) String() string { - buf := bytes.NewBuffer(make([]byte, 0)) - - fmt.Fprintf(buf, " quickCheck[C]: %v\n", f.quickCheck[MComposed]) - fmt.Fprintf(buf, " quickCheck[D]: %v\n", f.quickCheck[MDecomposed]) - fmt.Fprintf(buf, " cmbForward: %v\n", f.combinesForward) - fmt.Fprintf(buf, " cmbBackward: %v\n", f.combinesBackward) - fmt.Fprintf(buf, " isOneWay: %v\n", f.isOneWay) - fmt.Fprintf(buf, " inDecomp: %v\n", f.inDecomp) - fmt.Fprintf(buf, " decomposition: %X\n", f.decomp) - fmt.Fprintf(buf, " expandedDecomp: %X\n", f.expandedDecomp) - - return buf.String() -} - -type Decomposition []rune - -func parseDecomposition(s string, skipfirst bool) (a []rune, err error) { - decomp := strings.Split(s, " ") - if len(decomp) > 0 && skipfirst { - decomp = decomp[1:] - } - for _, d := range decomp { - point, err := strconv.ParseUint(d, 16, 64) - if err != nil { - return a, err - } - a = append(a, rune(point)) - } - return a, nil -} - -func loadUnicodeData() { - f := gen.OpenUCDFile("UnicodeData.txt") - defer f.Close() - p := ucd.New(f) - for p.Next() { - r := p.Rune(ucd.CodePoint) - char := &chars[r] - - char.ccc = uint8(p.Uint(ucd.CanonicalCombiningClass)) - decmap := p.String(ucd.DecompMapping) - - exp, err := parseDecomposition(decmap, false) - isCompat := false - if err != nil { - if len(decmap) > 0 { - exp, err = parseDecomposition(decmap, true) - if err != nil { - log.Fatalf(`%U: bad decomp |%v|: "%s"`, r, decmap, err) - } - isCompat = true - } - } - - char.name = p.String(ucd.Name) - char.codePoint = r - char.forms[FCompatibility].decomp = exp - if !isCompat { - char.forms[FCanonical].decomp = exp - } else { - char.compatDecomp = true - } - if len(decmap) > 0 { - char.forms[FCompatibility].decomp = exp - } - } - if err := p.Err(); err != nil { - log.Fatal(err) - } -} - -// compactCCC converts the sparse set of CCC values to a continguous one, -// reducing the number of bits needed from 8 to 6. -func compactCCC() { - m := make(map[uint8]uint8) - for i := range chars { - c := &chars[i] - m[c.ccc] = 0 - } - cccs := []int{} - for v, _ := range m { - cccs = append(cccs, int(v)) - } - sort.Ints(cccs) - for i, c := range cccs { - cccMap[uint8(i)] = uint8(c) - m[uint8(c)] = uint8(i) - } - for i := range chars { - c := &chars[i] - c.origCCC = c.ccc - c.ccc = m[c.ccc] - } - if len(m) >= 1<<6 { - log.Fatalf("too many difference CCC values: %d >= 64", len(m)) - } -} - -// CompositionExclusions.txt has form: -// 0958 # ... -// See https://unicode.org/reports/tr44/ for full explanation -func loadCompositionExclusions() { - f := gen.OpenUCDFile("CompositionExclusions.txt") - defer f.Close() - p := ucd.New(f) - for p.Next() { - c := &chars[p.Rune(0)] - if c.excludeInComp { - log.Fatalf("%U: Duplicate entry in exclusions.", c.codePoint) - } - c.excludeInComp = true - } - if e := p.Err(); e != nil { - log.Fatal(e) - } -} - -// hasCompatDecomp returns true if any of the recursive -// decompositions contains a compatibility expansion. -// In this case, the character may not occur in NFK*. -func hasCompatDecomp(r rune) bool { - c := &chars[r] - if c.compatDecomp { - return true - } - for _, d := range c.forms[FCompatibility].decomp { - if hasCompatDecomp(d) { - return true - } - } - return false -} - -// Hangul related constants. -const ( - HangulBase = 0xAC00 - HangulEnd = 0xD7A4 // hangulBase + Jamo combinations (19 * 21 * 28) - - JamoLBase = 0x1100 - JamoLEnd = 0x1113 - JamoVBase = 0x1161 - JamoVEnd = 0x1176 - JamoTBase = 0x11A8 - JamoTEnd = 0x11C3 - - JamoLVTCount = 19 * 21 * 28 - JamoTCount = 28 -) - -func isHangul(r rune) bool { - return HangulBase <= r && r < HangulEnd -} - -func isHangulWithoutJamoT(r rune) bool { - if !isHangul(r) { - return false - } - r -= HangulBase - return r < JamoLVTCount && r%JamoTCount == 0 -} - -func ccc(r rune) uint8 { - return chars[r].ccc -} - -// Insert a rune in a buffer, ordered by Canonical Combining Class. -func insertOrdered(b Decomposition, r rune) Decomposition { - n := len(b) - b = append(b, 0) - cc := ccc(r) - if cc > 0 { - // Use bubble sort. - for ; n > 0; n-- { - if ccc(b[n-1]) <= cc { - break - } - b[n] = b[n-1] - } - } - b[n] = r - return b -} - -// Recursively decompose. -func decomposeRecursive(form int, r rune, d Decomposition) Decomposition { - dcomp := chars[r].forms[form].decomp - if len(dcomp) == 0 { - return insertOrdered(d, r) - } - for _, c := range dcomp { - d = decomposeRecursive(form, c, d) - } - return d -} - -func completeCharFields(form int) { - // Phase 0: pre-expand decomposition. - for i := range chars { - f := &chars[i].forms[form] - if len(f.decomp) == 0 { - continue - } - exp := make(Decomposition, 0) - for _, c := range f.decomp { - exp = decomposeRecursive(form, c, exp) - } - f.expandedDecomp = exp - } - - // Phase 1: composition exclusion, mark decomposition. - for i := range chars { - c := &chars[i] - f := &c.forms[form] - - // Marks script-specific exclusions and version restricted. - f.isOneWay = c.excludeInComp - - // Singletons - f.isOneWay = f.isOneWay || len(f.decomp) == 1 - - // Non-starter decompositions - if len(f.decomp) > 1 { - chk := c.ccc != 0 || chars[f.decomp[0]].ccc != 0 - f.isOneWay = f.isOneWay || chk - } - - // Runes that decompose into more than two runes. - f.isOneWay = f.isOneWay || len(f.decomp) > 2 - - if form == FCompatibility { - f.isOneWay = f.isOneWay || hasCompatDecomp(c.codePoint) - } - - for _, r := range f.decomp { - chars[r].forms[form].inDecomp = true - } - } - - // Phase 2: forward and backward combining. - for i := range chars { - c := &chars[i] - f := &c.forms[form] - - if !f.isOneWay && len(f.decomp) == 2 { - f0 := &chars[f.decomp[0]].forms[form] - f1 := &chars[f.decomp[1]].forms[form] - if !f0.isOneWay { - f0.combinesForward = true - } - if !f1.isOneWay { - f1.combinesBackward = true - } - } - if isHangulWithoutJamoT(rune(i)) { - f.combinesForward = true - } - } - - // Phase 3: quick check values. - for i := range chars { - c := &chars[i] - f := &c.forms[form] - - switch { - case len(f.decomp) > 0: - f.quickCheck[MDecomposed] = QCNo - case isHangul(rune(i)): - f.quickCheck[MDecomposed] = QCNo - default: - f.quickCheck[MDecomposed] = QCYes - } - switch { - case f.isOneWay: - f.quickCheck[MComposed] = QCNo - case (i & 0xffff00) == JamoLBase: - f.quickCheck[MComposed] = QCYes - if JamoLBase <= i && i < JamoLEnd { - f.combinesForward = true - } - if JamoVBase <= i && i < JamoVEnd { - f.quickCheck[MComposed] = QCMaybe - f.combinesBackward = true - f.combinesForward = true - } - if JamoTBase <= i && i < JamoTEnd { - f.quickCheck[MComposed] = QCMaybe - f.combinesBackward = true - } - case !f.combinesBackward: - f.quickCheck[MComposed] = QCYes - default: - f.quickCheck[MComposed] = QCMaybe - } - } -} - -func computeNonStarterCounts() { - // Phase 4: leading and trailing non-starter count - for i := range chars { - c := &chars[i] - - runes := []rune{rune(i)} - // We always use FCompatibility so that the CGJ insertion points do not - // change for repeated normalizations with different forms. - if exp := c.forms[FCompatibility].expandedDecomp; len(exp) > 0 { - runes = exp - } - // We consider runes that combine backwards to be non-starters for the - // purpose of Stream-Safe Text Processing. - for _, r := range runes { - if cr := &chars[r]; cr.ccc == 0 && !cr.forms[FCompatibility].combinesBackward { - break - } - c.nLeadingNonStarters++ - } - for i := len(runes) - 1; i >= 0; i-- { - if cr := &chars[runes[i]]; cr.ccc == 0 && !cr.forms[FCompatibility].combinesBackward { - break - } - c.nTrailingNonStarters++ - } - if c.nTrailingNonStarters > 3 { - log.Fatalf("%U: Decomposition with more than 3 (%d) trailing modifiers (%U)", i, c.nTrailingNonStarters, runes) - } - - if isHangul(rune(i)) { - c.nTrailingNonStarters = 2 - if isHangulWithoutJamoT(rune(i)) { - c.nTrailingNonStarters = 1 - } - } - - if l, t := c.nLeadingNonStarters, c.nTrailingNonStarters; l > 0 && l != t { - log.Fatalf("%U: number of leading and trailing non-starters should be equal (%d vs %d)", i, l, t) - } - if t := c.nTrailingNonStarters; t > 3 { - log.Fatalf("%U: number of trailing non-starters is %d > 3", t) - } - } -} - -func printBytes(w io.Writer, b []byte, name string) { - fmt.Fprintf(w, "// %s: %d bytes\n", name, len(b)) - fmt.Fprintf(w, "var %s = [...]byte {", name) - for i, c := range b { - switch { - case i%64 == 0: - fmt.Fprintf(w, "\n// Bytes %x - %x\n", i, i+63) - case i%8 == 0: - fmt.Fprintf(w, "\n") - } - fmt.Fprintf(w, "0x%.2X, ", c) - } - fmt.Fprint(w, "\n}\n\n") -} - -// See forminfo.go for format. -func makeEntry(f *FormInfo, c *Char) uint16 { - e := uint16(0) - if r := c.codePoint; HangulBase <= r && r < HangulEnd { - e |= 0x40 - } - if f.combinesForward { - e |= 0x20 - } - if f.quickCheck[MDecomposed] == QCNo { - e |= 0x4 - } - switch f.quickCheck[MComposed] { - case QCYes: - case QCNo: - e |= 0x10 - case QCMaybe: - e |= 0x18 - default: - log.Fatalf("Illegal quickcheck value %v.", f.quickCheck[MComposed]) - } - e |= uint16(c.nTrailingNonStarters) - return e -} - -// decompSet keeps track of unique decompositions, grouped by whether -// the decomposition is followed by a trailing and/or leading CCC. -type decompSet [7]map[string]bool - -const ( - normalDecomp = iota - firstMulti - firstCCC - endMulti - firstLeadingCCC - firstCCCZeroExcept - firstStarterWithNLead - lastDecomp -) - -var cname = []string{"firstMulti", "firstCCC", "endMulti", "firstLeadingCCC", "firstCCCZeroExcept", "firstStarterWithNLead", "lastDecomp"} - -func makeDecompSet() decompSet { - m := decompSet{} - for i := range m { - m[i] = make(map[string]bool) - } - return m -} -func (m *decompSet) insert(key int, s string) { - m[key][s] = true -} - -func printCharInfoTables(w io.Writer) int { - mkstr := func(r rune, f *FormInfo) (int, string) { - d := f.expandedDecomp - s := string([]rune(d)) - if max := 1 << 6; len(s) >= max { - const msg = "%U: too many bytes in decomposition: %d >= %d" - log.Fatalf(msg, r, len(s), max) - } - head := uint8(len(s)) - if f.quickCheck[MComposed] != QCYes { - head |= 0x40 - } - if f.combinesForward { - head |= 0x80 - } - s = string([]byte{head}) + s - - lccc := ccc(d[0]) - tccc := ccc(d[len(d)-1]) - cc := ccc(r) - if cc != 0 && lccc == 0 && tccc == 0 { - log.Fatalf("%U: trailing and leading ccc are 0 for non-zero ccc %d", r, cc) - } - if tccc < lccc && lccc != 0 { - const msg = "%U: lccc (%d) must be <= tcc (%d)" - log.Fatalf(msg, r, lccc, tccc) - } - index := normalDecomp - nTrail := chars[r].nTrailingNonStarters - nLead := chars[r].nLeadingNonStarters - if tccc > 0 || lccc > 0 || nTrail > 0 { - tccc <<= 2 - tccc |= nTrail - s += string([]byte{tccc}) - index = endMulti - for _, r := range d[1:] { - if ccc(r) == 0 { - index = firstCCC - } - } - if lccc > 0 || nLead > 0 { - s += string([]byte{lccc}) - if index == firstCCC { - log.Fatalf("%U: multi-segment decomposition not supported for decompositions with leading CCC != 0", r) - } - index = firstLeadingCCC - } - if cc != lccc { - if cc != 0 { - log.Fatalf("%U: for lccc != ccc, expected ccc to be 0; was %d", r, cc) - } - index = firstCCCZeroExcept - } - } else if len(d) > 1 { - index = firstMulti - } - return index, s - } - - decompSet := makeDecompSet() - const nLeadStr = "\x00\x01" // 0-byte length and tccc with nTrail. - decompSet.insert(firstStarterWithNLead, nLeadStr) - - // Store the uniqued decompositions in a byte buffer, - // preceded by their byte length. - for _, c := range chars { - for _, f := range c.forms { - if len(f.expandedDecomp) == 0 { - continue - } - if f.combinesBackward { - log.Fatalf("%U: combinesBackward and decompose", c.codePoint) - } - index, s := mkstr(c.codePoint, &f) - decompSet.insert(index, s) - } - } - - decompositions := bytes.NewBuffer(make([]byte, 0, 10000)) - size := 0 - positionMap := make(map[string]uint16) - decompositions.WriteString("\000") - fmt.Fprintln(w, "const (") - for i, m := range decompSet { - sa := []string{} - for s := range m { - sa = append(sa, s) - } - sort.Strings(sa) - for _, s := range sa { - p := decompositions.Len() - decompositions.WriteString(s) - positionMap[s] = uint16(p) - } - if cname[i] != "" { - fmt.Fprintf(w, "%s = 0x%X\n", cname[i], decompositions.Len()) - } - } - fmt.Fprintln(w, "maxDecomp = 0x8000") - fmt.Fprintln(w, ")") - b := decompositions.Bytes() - printBytes(w, b, "decomps") - size += len(b) - - varnames := []string{"nfc", "nfkc"} - for i := 0; i < FNumberOfFormTypes; i++ { - trie := triegen.NewTrie(varnames[i]) - - for r, c := range chars { - f := c.forms[i] - d := f.expandedDecomp - if len(d) != 0 { - _, key := mkstr(c.codePoint, &f) - trie.Insert(rune(r), uint64(positionMap[key])) - if c.ccc != ccc(d[0]) { - // We assume the lead ccc of a decomposition !=0 in this case. - if ccc(d[0]) == 0 { - log.Fatalf("Expected leading CCC to be non-zero; ccc is %d", c.ccc) - } - } - } else if c.nLeadingNonStarters > 0 && len(f.expandedDecomp) == 0 && c.ccc == 0 && !f.combinesBackward { - // Handle cases where it can't be detected that the nLead should be equal - // to nTrail. - trie.Insert(c.codePoint, uint64(positionMap[nLeadStr])) - } else if v := makeEntry(&f, &c)<<8 | uint16(c.ccc); v != 0 { - trie.Insert(c.codePoint, uint64(0x8000|v)) - } - } - sz, err := trie.Gen(w, triegen.Compact(&normCompacter{name: varnames[i]})) - if err != nil { - log.Fatal(err) - } - size += sz - } - return size -} - -func contains(sa []string, s string) bool { - for _, a := range sa { - if a == s { - return true - } - } - return false -} - -func makeTables() { - w := &bytes.Buffer{} - - size := 0 - if *tablelist == "" { - return - } - list := strings.Split(*tablelist, ",") - if *tablelist == "all" { - list = []string{"recomp", "info"} - } - - // Compute maximum decomposition size. - max := 0 - for _, c := range chars { - if n := len(string(c.forms[FCompatibility].expandedDecomp)); n > max { - max = n - } - } - fmt.Fprintln(w, `import "sync"`) - fmt.Fprintln(w) - - fmt.Fprintln(w, "const (") - fmt.Fprintln(w, "\t// Version is the Unicode edition from which the tables are derived.") - fmt.Fprintf(w, "\tVersion = %q\n", gen.UnicodeVersion()) - fmt.Fprintln(w) - fmt.Fprintln(w, "\t// MaxTransformChunkSize indicates the maximum number of bytes that Transform") - fmt.Fprintln(w, "\t// may need to write atomically for any Form. Making a destination buffer at") - fmt.Fprintln(w, "\t// least this size ensures that Transform can always make progress and that") - fmt.Fprintln(w, "\t// the user does not need to grow the buffer on an ErrShortDst.") - fmt.Fprintf(w, "\tMaxTransformChunkSize = %d+maxNonStarters*4\n", len(string(0x034F))+max) - fmt.Fprintln(w, ")\n") - - // Print the CCC remap table. - size += len(cccMap) - fmt.Fprintf(w, "var ccc = [%d]uint8{", len(cccMap)) - for i := 0; i < len(cccMap); i++ { - if i%8 == 0 { - fmt.Fprintln(w) - } - fmt.Fprintf(w, "%3d, ", cccMap[uint8(i)]) - } - fmt.Fprintln(w, "\n}\n") - - if contains(list, "info") { - size += printCharInfoTables(w) - } - - if contains(list, "recomp") { - // Note that we use 32 bit keys, instead of 64 bit. - // This clips the bits of three entries, but we know - // this won't cause a collision. The compiler will catch - // any changes made to UnicodeData.txt that introduces - // a collision. - // Note that the recomposition map for NFC and NFKC - // are identical. - - // Recomposition map - nrentries := 0 - for _, c := range chars { - f := c.forms[FCanonical] - if !f.isOneWay && len(f.decomp) > 0 { - nrentries++ - } - } - sz := nrentries * 8 - size += sz - fmt.Fprintf(w, "// recompMap: %d bytes (entries only)\n", sz) - fmt.Fprintln(w, "var recompMap map[uint32]rune") - fmt.Fprintln(w, "var recompMapOnce sync.Once\n") - fmt.Fprintln(w, `const recompMapPacked = "" +`) - var buf [8]byte - for i, c := range chars { - f := c.forms[FCanonical] - d := f.decomp - if !f.isOneWay && len(d) > 0 { - key := uint32(uint16(d[0]))<<16 + uint32(uint16(d[1])) - binary.BigEndian.PutUint32(buf[:4], key) - binary.BigEndian.PutUint32(buf[4:], uint32(i)) - fmt.Fprintf(w, "\t\t%q + // 0x%.8X: 0x%.8X\n", string(buf[:]), key, uint32(i)) - } - } - // hack so we don't have to special case the trailing plus sign - fmt.Fprintf(w, ` ""`) - fmt.Fprintln(w) - } - - fmt.Fprintf(w, "// Total size of tables: %dKB (%d bytes)\n", (size+512)/1024, size) - gen.WriteVersionedGoFile("tables.go", "norm", w.Bytes()) -} - -func printChars() { - if *verbose { - for _, c := range chars { - if !c.isValid() || c.state == SMissing { - continue - } - fmt.Println(c) - } - } -} - -// verifyComputed does various consistency tests. -func verifyComputed() { - for i, c := range chars { - for _, f := range c.forms { - isNo := (f.quickCheck[MDecomposed] == QCNo) - if (len(f.decomp) > 0) != isNo && !isHangul(rune(i)) { - log.Fatalf("%U: NF*D QC must be No if rune decomposes", i) - } - - isMaybe := f.quickCheck[MComposed] == QCMaybe - if f.combinesBackward != isMaybe { - log.Fatalf("%U: NF*C QC must be Maybe if combinesBackward", i) - } - if len(f.decomp) > 0 && f.combinesForward && isMaybe { - log.Fatalf("%U: NF*C QC must be Yes or No if combinesForward and decomposes", i) - } - - if len(f.expandedDecomp) != 0 { - continue - } - if a, b := c.nLeadingNonStarters > 0, (c.ccc > 0 || f.combinesBackward); a != b { - // We accept these runes to be treated differently (it only affects - // segment breaking in iteration, most likely on improper use), but - // reconsider if more characters are added. - // U+FF9E HALFWIDTH KATAKANA VOICED SOUND MARK;Lm;0;L; 3099;;;;N;;;;; - // U+FF9F HALFWIDTH KATAKANA SEMI-VOICED SOUND MARK;Lm;0;L; 309A;;;;N;;;;; - // U+3133 HANGUL LETTER KIYEOK-SIOS;Lo;0;L; 11AA;;;;N;HANGUL LETTER GIYEOG SIOS;;;; - // U+318E HANGUL LETTER ARAEAE;Lo;0;L; 11A1;;;;N;HANGUL LETTER ALAE AE;;;; - // U+FFA3 HALFWIDTH HANGUL LETTER KIYEOK-SIOS;Lo;0;L; 3133;;;;N;HALFWIDTH HANGUL LETTER GIYEOG SIOS;;;; - // U+FFDC HALFWIDTH HANGUL LETTER I;Lo;0;L; 3163;;;;N;;;;; - if i != 0xFF9E && i != 0xFF9F && !(0x3133 <= i && i <= 0x318E) && !(0xFFA3 <= i && i <= 0xFFDC) { - log.Fatalf("%U: nLead was %v; want %v", i, a, b) - } - } - } - nfc := c.forms[FCanonical] - nfkc := c.forms[FCompatibility] - if nfc.combinesBackward != nfkc.combinesBackward { - log.Fatalf("%U: Cannot combine combinesBackward\n", c.codePoint) - } - } -} - -// Use values in DerivedNormalizationProps.txt to compare against the -// values we computed. -// DerivedNormalizationProps.txt has form: -// 00C0..00C5 ; NFD_QC; N # ... -// 0374 ; NFD_QC; N # ... -// See https://unicode.org/reports/tr44/ for full explanation -func testDerived() { - f := gen.OpenUCDFile("DerivedNormalizationProps.txt") - defer f.Close() - p := ucd.New(f) - for p.Next() { - r := p.Rune(0) - c := &chars[r] - - var ftype, mode int - qt := p.String(1) - switch qt { - case "NFC_QC": - ftype, mode = FCanonical, MComposed - case "NFD_QC": - ftype, mode = FCanonical, MDecomposed - case "NFKC_QC": - ftype, mode = FCompatibility, MComposed - case "NFKD_QC": - ftype, mode = FCompatibility, MDecomposed - default: - continue - } - var qr QCResult - switch p.String(2) { - case "Y": - qr = QCYes - case "N": - qr = QCNo - case "M": - qr = QCMaybe - default: - log.Fatalf(`Unexpected quick check value "%s"`, p.String(2)) - } - if got := c.forms[ftype].quickCheck[mode]; got != qr { - log.Printf("%U: FAILED %s (was %v need %v)\n", r, qt, got, qr) - } - c.forms[ftype].verified[mode] = true - } - if err := p.Err(); err != nil { - log.Fatal(err) - } - // Any unspecified value must be QCYes. Verify this. - for i, c := range chars { - for j, fd := range c.forms { - for k, qr := range fd.quickCheck { - if !fd.verified[k] && qr != QCYes { - m := "%U: FAIL F:%d M:%d (was %v need Yes) %s\n" - log.Printf(m, i, j, k, qr, c.name) - } - } - } - } -} - -var testHeader = `const ( - Yes = iota - No - Maybe -) - -type formData struct { - qc uint8 - combinesForward bool - decomposition string -} - -type runeData struct { - r rune - ccc uint8 - nLead uint8 - nTrail uint8 - f [2]formData // 0: canonical; 1: compatibility -} - -func f(qc uint8, cf bool, dec string) [2]formData { - return [2]formData{{qc, cf, dec}, {qc, cf, dec}} -} - -func g(qc, qck uint8, cf, cfk bool, d, dk string) [2]formData { - return [2]formData{{qc, cf, d}, {qck, cfk, dk}} -} - -var testData = []runeData{ -` - -func printTestdata() { - type lastInfo struct { - ccc uint8 - nLead uint8 - nTrail uint8 - f string - } - - last := lastInfo{} - w := &bytes.Buffer{} - fmt.Fprintf(w, testHeader) - for r, c := range chars { - f := c.forms[FCanonical] - qc, cf, d := f.quickCheck[MComposed], f.combinesForward, string(f.expandedDecomp) - f = c.forms[FCompatibility] - qck, cfk, dk := f.quickCheck[MComposed], f.combinesForward, string(f.expandedDecomp) - s := "" - if d == dk && qc == qck && cf == cfk { - s = fmt.Sprintf("f(%s, %v, %q)", qc, cf, d) - } else { - s = fmt.Sprintf("g(%s, %s, %v, %v, %q, %q)", qc, qck, cf, cfk, d, dk) - } - current := lastInfo{c.ccc, c.nLeadingNonStarters, c.nTrailingNonStarters, s} - if last != current { - fmt.Fprintf(w, "\t{0x%x, %d, %d, %d, %s},\n", r, c.origCCC, c.nLeadingNonStarters, c.nTrailingNonStarters, s) - last = current - } - } - fmt.Fprintln(w, "}") - gen.WriteVersionedGoFile("data_test.go", "norm", w.Bytes()) -} diff --git a/vendor/golang.org/x/text/unicode/norm/triegen.go b/vendor/golang.org/x/text/unicode/norm/triegen.go deleted file mode 100644 index 45d711900d..0000000000 --- a/vendor/golang.org/x/text/unicode/norm/triegen.go +++ /dev/null @@ -1,117 +0,0 @@ -// Copyright 2011 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build ignore - -// Trie table generator. -// Used by make*tables tools to generate a go file with trie data structures -// for mapping UTF-8 to a 16-bit value. All but the last byte in a UTF-8 byte -// sequence are used to lookup offsets in the index table to be used for the -// next byte. The last byte is used to index into a table with 16-bit values. - -package main - -import ( - "fmt" - "io" -) - -const maxSparseEntries = 16 - -type normCompacter struct { - sparseBlocks [][]uint64 - sparseOffset []uint16 - sparseCount int - name string -} - -func mostFrequentStride(a []uint64) int { - counts := make(map[int]int) - var v int - for _, x := range a { - if stride := int(x) - v; v != 0 && stride >= 0 { - counts[stride]++ - } - v = int(x) - } - var maxs, maxc int - for stride, cnt := range counts { - if cnt > maxc || (cnt == maxc && stride < maxs) { - maxs, maxc = stride, cnt - } - } - return maxs -} - -func countSparseEntries(a []uint64) int { - stride := mostFrequentStride(a) - var v, count int - for _, tv := range a { - if int(tv)-v != stride { - if tv != 0 { - count++ - } - } - v = int(tv) - } - return count -} - -func (c *normCompacter) Size(v []uint64) (sz int, ok bool) { - if n := countSparseEntries(v); n <= maxSparseEntries { - return (n+1)*4 + 2, true - } - return 0, false -} - -func (c *normCompacter) Store(v []uint64) uint32 { - h := uint32(len(c.sparseOffset)) - c.sparseBlocks = append(c.sparseBlocks, v) - c.sparseOffset = append(c.sparseOffset, uint16(c.sparseCount)) - c.sparseCount += countSparseEntries(v) + 1 - return h -} - -func (c *normCompacter) Handler() string { - return c.name + "Sparse.lookup" -} - -func (c *normCompacter) Print(w io.Writer) (retErr error) { - p := func(f string, x ...interface{}) { - if _, err := fmt.Fprintf(w, f, x...); retErr == nil && err != nil { - retErr = err - } - } - - ls := len(c.sparseBlocks) - p("// %sSparseOffset: %d entries, %d bytes\n", c.name, ls, ls*2) - p("var %sSparseOffset = %#v\n\n", c.name, c.sparseOffset) - - ns := c.sparseCount - p("// %sSparseValues: %d entries, %d bytes\n", c.name, ns, ns*4) - p("var %sSparseValues = [%d]valueRange {", c.name, ns) - for i, b := range c.sparseBlocks { - p("\n// Block %#x, offset %#x", i, c.sparseOffset[i]) - var v int - stride := mostFrequentStride(b) - n := countSparseEntries(b) - p("\n{value:%#04x,lo:%#02x},", stride, uint8(n)) - for i, nv := range b { - if int(nv)-v != stride { - if v != 0 { - p(",hi:%#02x},", 0x80+i-1) - } - if nv != 0 { - p("\n{value:%#04x,lo:%#02x", nv, 0x80+i) - } - } - v = int(nv) - } - if v != 0 { - p(",hi:%#02x},", 0x80+len(b)-1) - } - } - p("\n}\n\n") - return -} diff --git a/vendor/golang.org/x/time/rate/rate.go b/vendor/golang.org/x/time/rate/rate.go index ae93e24719..7228d97e96 100644 --- a/vendor/golang.org/x/time/rate/rate.go +++ b/vendor/golang.org/x/time/rate/rate.go @@ -6,7 +6,6 @@ package rate import ( - "context" "fmt" "math" "sync" @@ -213,8 +212,19 @@ func (lim *Limiter) ReserveN(now time.Time, n int) *Reservation { return &r } +// contextContext is a temporary(?) copy of the context.Context type +// to support both Go 1.6 using golang.org/x/net/context and Go 1.7+ +// with the built-in context package. If people ever stop using Go 1.6 +// we can remove this. +type contextContext interface { + Deadline() (deadline time.Time, ok bool) + Done() <-chan struct{} + Err() error + Value(key interface{}) interface{} +} + // Wait is shorthand for WaitN(ctx, 1). -func (lim *Limiter) Wait(ctx context.Context) (err error) { +func (lim *Limiter) wait(ctx contextContext) (err error) { return lim.WaitN(ctx, 1) } @@ -222,7 +232,7 @@ func (lim *Limiter) Wait(ctx context.Context) (err error) { // It returns an error if n exceeds the Limiter's burst size, the Context is // canceled, or the expected wait time exceeds the Context's Deadline. // The burst limit is ignored if the rate limit is Inf. -func (lim *Limiter) WaitN(ctx context.Context, n int) (err error) { +func (lim *Limiter) waitN(ctx contextContext, n int) (err error) { if n > lim.burst && lim.limit != Inf { return fmt.Errorf("rate: Wait(n=%d) exceeds limiter's burst %d", n, lim.burst) } diff --git a/vendor/golang.org/x/time/rate/rate_go16.go b/vendor/golang.org/x/time/rate/rate_go16.go new file mode 100644 index 0000000000..6bab1850f8 --- /dev/null +++ b/vendor/golang.org/x/time/rate/rate_go16.go @@ -0,0 +1,21 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build !go1.7 + +package rate + +import "golang.org/x/net/context" + +// Wait is shorthand for WaitN(ctx, 1). +func (lim *Limiter) Wait(ctx context.Context) (err error) { + return lim.waitN(ctx, 1) +} + +// WaitN blocks until lim permits n events to happen. +// It returns an error if n exceeds the Limiter's burst size, the Context is +// canceled, or the expected wait time exceeds the Context's Deadline. +func (lim *Limiter) WaitN(ctx context.Context, n int) (err error) { + return lim.waitN(ctx, n) +} diff --git a/vendor/golang.org/x/time/rate/rate_go17.go b/vendor/golang.org/x/time/rate/rate_go17.go new file mode 100644 index 0000000000..f90d85f51e --- /dev/null +++ b/vendor/golang.org/x/time/rate/rate_go17.go @@ -0,0 +1,21 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build go1.7 + +package rate + +import "context" + +// Wait is shorthand for WaitN(ctx, 1). +func (lim *Limiter) Wait(ctx context.Context) (err error) { + return lim.waitN(ctx, 1) +} + +// WaitN blocks until lim permits n events to happen. +// It returns an error if n exceeds the Limiter's burst size, the Context is +// canceled, or the expected wait time exceeds the Context's Deadline. +func (lim *Limiter) WaitN(ctx context.Context, n int) (err error) { + return lim.waitN(ctx, n) +} diff --git a/vendor/golang.org/x/tools/go/gcexportdata/main.go b/vendor/golang.org/x/tools/go/gcexportdata/main.go deleted file mode 100644 index 2713dce64a..0000000000 --- a/vendor/golang.org/x/tools/go/gcexportdata/main.go +++ /dev/null @@ -1,99 +0,0 @@ -// Copyright 2017 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build ignore - -// The gcexportdata command is a diagnostic tool that displays the -// contents of gc export data files. -package main - -import ( - "flag" - "fmt" - "go/token" - "go/types" - "log" - "os" - - "golang.org/x/tools/go/gcexportdata" - "golang.org/x/tools/go/types/typeutil" -) - -var packageFlag = flag.String("package", "", "alternative package to print") - -func main() { - log.SetPrefix("gcexportdata: ") - log.SetFlags(0) - flag.Usage = func() { - fmt.Fprintln(os.Stderr, "usage: gcexportdata [-package path] file.a") - } - flag.Parse() - if flag.NArg() != 1 { - flag.Usage() - os.Exit(2) - } - filename := flag.Args()[0] - - f, err := os.Open(filename) - if err != nil { - log.Fatal(err) - } - - r, err := gcexportdata.NewReader(f) - if err != nil { - log.Fatalf("%s: %s", filename, err) - } - - // Decode the package. - const primary = "" - imports := make(map[string]*types.Package) - fset := token.NewFileSet() - pkg, err := gcexportdata.Read(r, fset, imports, primary) - if err != nil { - log.Fatalf("%s: %s", filename, err) - } - - // Optionally select an indirectly mentioned package. - if *packageFlag != "" { - pkg = imports[*packageFlag] - if pkg == nil { - fmt.Fprintf(os.Stderr, "export data file %s does not mention %s; has:\n", - filename, *packageFlag) - for p := range imports { - if p != primary { - fmt.Fprintf(os.Stderr, "\t%s\n", p) - } - } - os.Exit(1) - } - } - - // Print all package-level declarations, including non-exported ones. - fmt.Printf("package %s\n", pkg.Name()) - for _, imp := range pkg.Imports() { - fmt.Printf("import %q\n", imp.Path()) - } - qual := func(p *types.Package) string { - if pkg == p { - return "" - } - return p.Name() - } - scope := pkg.Scope() - for _, name := range scope.Names() { - obj := scope.Lookup(name) - fmt.Printf("%s: %s\n", - fset.Position(obj.Pos()), - types.ObjectString(obj, qual)) - - // For types, print each method. - if _, ok := obj.(*types.TypeName); ok { - for _, method := range typeutil.IntuitiveMethodSet(obj.Type(), nil) { - fmt.Printf("%s: %s\n", - fset.Position(method.Obj().Pos()), - types.SelectionString(method, qual)) - } - } - } -} diff --git a/vendor/golang.org/x/tools/go/internal/cgo/cgo.go b/vendor/golang.org/x/tools/go/internal/cgo/cgo.go deleted file mode 100644 index 0f652ea6fb..0000000000 --- a/vendor/golang.org/x/tools/go/internal/cgo/cgo.go +++ /dev/null @@ -1,220 +0,0 @@ -// Copyright 2013 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package cgo - -// This file handles cgo preprocessing of files containing `import "C"`. -// -// DESIGN -// -// The approach taken is to run the cgo processor on the package's -// CgoFiles and parse the output, faking the filenames of the -// resulting ASTs so that the synthetic file containing the C types is -// called "C" (e.g. "~/go/src/net/C") and the preprocessed files -// have their original names (e.g. "~/go/src/net/cgo_unix.go"), -// not the names of the actual temporary files. -// -// The advantage of this approach is its fidelity to 'go build'. The -// downside is that the token.Position.Offset for each AST node is -// incorrect, being an offset within the temporary file. Line numbers -// should still be correct because of the //line comments. -// -// The logic of this file is mostly plundered from the 'go build' -// tool, which also invokes the cgo preprocessor. -// -// -// REJECTED ALTERNATIVE -// -// An alternative approach that we explored is to extend go/types' -// Importer mechanism to provide the identity of the importing package -// so that each time `import "C"` appears it resolves to a different -// synthetic package containing just the objects needed in that case. -// The loader would invoke cgo but parse only the cgo_types.go file -// defining the package-level objects, discarding the other files -// resulting from preprocessing. -// -// The benefit of this approach would have been that source-level -// syntax information would correspond exactly to the original cgo -// file, with no preprocessing involved, making source tools like -// godoc, guru, and eg happy. However, the approach was rejected -// due to the additional complexity it would impose on go/types. (It -// made for a beautiful demo, though.) -// -// cgo files, despite their *.go extension, are not legal Go source -// files per the specification since they may refer to unexported -// members of package "C" such as C.int. Also, a function such as -// C.getpwent has in effect two types, one matching its C type and one -// which additionally returns (errno C.int). The cgo preprocessor -// uses name mangling to distinguish these two functions in the -// processed code, but go/types would need to duplicate this logic in -// its handling of function calls, analogous to the treatment of map -// lookups in which y=m[k] and y,ok=m[k] are both legal. - -import ( - "fmt" - "go/ast" - "go/build" - "go/parser" - "go/token" - "io/ioutil" - "log" - "os" - "os/exec" - "path/filepath" - "regexp" - "strings" -) - -// ProcessFiles invokes the cgo preprocessor on bp.CgoFiles, parses -// the output and returns the resulting ASTs. -// -func ProcessFiles(bp *build.Package, fset *token.FileSet, DisplayPath func(path string) string, mode parser.Mode) ([]*ast.File, error) { - tmpdir, err := ioutil.TempDir("", strings.Replace(bp.ImportPath, "/", "_", -1)+"_C") - if err != nil { - return nil, err - } - defer os.RemoveAll(tmpdir) - - pkgdir := bp.Dir - if DisplayPath != nil { - pkgdir = DisplayPath(pkgdir) - } - - cgoFiles, cgoDisplayFiles, err := Run(bp, pkgdir, tmpdir, false) - if err != nil { - return nil, err - } - var files []*ast.File - for i := range cgoFiles { - rd, err := os.Open(cgoFiles[i]) - if err != nil { - return nil, err - } - display := filepath.Join(bp.Dir, cgoDisplayFiles[i]) - f, err := parser.ParseFile(fset, display, rd, mode) - rd.Close() - if err != nil { - return nil, err - } - files = append(files, f) - } - return files, nil -} - -var cgoRe = regexp.MustCompile(`[/\\:]`) - -// Run invokes the cgo preprocessor on bp.CgoFiles and returns two -// lists of files: the resulting processed files (in temporary -// directory tmpdir) and the corresponding names of the unprocessed files. -// -// Run is adapted from (*builder).cgo in -// $GOROOT/src/cmd/go/build.go, but these features are unsupported: -// Objective C, CGOPKGPATH, CGO_FLAGS. -// -// If useabs is set to true, absolute paths of the bp.CgoFiles will be passed in -// to the cgo preprocessor. This in turn will set the // line comments -// referring to those files to use absolute paths. This is needed for -// go/packages using the legacy go list support so it is able to find -// the original files. -func Run(bp *build.Package, pkgdir, tmpdir string, useabs bool) (files, displayFiles []string, err error) { - cgoCPPFLAGS, _, _, _ := cflags(bp, true) - _, cgoexeCFLAGS, _, _ := cflags(bp, false) - - if len(bp.CgoPkgConfig) > 0 { - pcCFLAGS, err := pkgConfigFlags(bp) - if err != nil { - return nil, nil, err - } - cgoCPPFLAGS = append(cgoCPPFLAGS, pcCFLAGS...) - } - - // Allows including _cgo_export.h from .[ch] files in the package. - cgoCPPFLAGS = append(cgoCPPFLAGS, "-I", tmpdir) - - // _cgo_gotypes.go (displayed "C") contains the type definitions. - files = append(files, filepath.Join(tmpdir, "_cgo_gotypes.go")) - displayFiles = append(displayFiles, "C") - for _, fn := range bp.CgoFiles { - // "foo.cgo1.go" (displayed "foo.go") is the processed Go source. - f := cgoRe.ReplaceAllString(fn[:len(fn)-len("go")], "_") - files = append(files, filepath.Join(tmpdir, f+"cgo1.go")) - displayFiles = append(displayFiles, fn) - } - - var cgoflags []string - if bp.Goroot && bp.ImportPath == "runtime/cgo" { - cgoflags = append(cgoflags, "-import_runtime_cgo=false") - } - if bp.Goroot && bp.ImportPath == "runtime/race" || bp.ImportPath == "runtime/cgo" { - cgoflags = append(cgoflags, "-import_syscall=false") - } - - var cgoFiles []string = bp.CgoFiles - if useabs { - cgoFiles = make([]string, len(bp.CgoFiles)) - for i := range cgoFiles { - cgoFiles[i] = filepath.Join(pkgdir, bp.CgoFiles[i]) - } - } - - args := stringList( - "go", "tool", "cgo", "-objdir", tmpdir, cgoflags, "--", - cgoCPPFLAGS, cgoexeCFLAGS, cgoFiles, - ) - if false { - log.Printf("Running cgo for package %q: %s (dir=%s)", bp.ImportPath, args, pkgdir) - } - cmd := exec.Command(args[0], args[1:]...) - cmd.Dir = pkgdir - cmd.Stdout = os.Stderr - cmd.Stderr = os.Stderr - if err := cmd.Run(); err != nil { - return nil, nil, fmt.Errorf("cgo failed: %s: %s", args, err) - } - - return files, displayFiles, nil -} - -// -- unmodified from 'go build' --------------------------------------- - -// Return the flags to use when invoking the C or C++ compilers, or cgo. -func cflags(p *build.Package, def bool) (cppflags, cflags, cxxflags, ldflags []string) { - var defaults string - if def { - defaults = "-g -O2" - } - - cppflags = stringList(envList("CGO_CPPFLAGS", ""), p.CgoCPPFLAGS) - cflags = stringList(envList("CGO_CFLAGS", defaults), p.CgoCFLAGS) - cxxflags = stringList(envList("CGO_CXXFLAGS", defaults), p.CgoCXXFLAGS) - ldflags = stringList(envList("CGO_LDFLAGS", defaults), p.CgoLDFLAGS) - return -} - -// envList returns the value of the given environment variable broken -// into fields, using the default value when the variable is empty. -func envList(key, def string) []string { - v := os.Getenv(key) - if v == "" { - v = def - } - return strings.Fields(v) -} - -// stringList's arguments should be a sequence of string or []string values. -// stringList flattens them into a single []string. -func stringList(args ...interface{}) []string { - var x []string - for _, arg := range args { - switch arg := arg.(type) { - case []string: - x = append(x, arg...) - case string: - x = append(x, arg) - default: - panic("stringList: invalid argument") - } - } - return x -} diff --git a/vendor/golang.org/x/tools/go/internal/cgo/cgo_pkgconfig.go b/vendor/golang.org/x/tools/go/internal/cgo/cgo_pkgconfig.go deleted file mode 100644 index b5bb95a63e..0000000000 --- a/vendor/golang.org/x/tools/go/internal/cgo/cgo_pkgconfig.go +++ /dev/null @@ -1,39 +0,0 @@ -// Copyright 2013 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package cgo - -import ( - "errors" - "fmt" - "go/build" - "os/exec" - "strings" -) - -// pkgConfig runs pkg-config with the specified arguments and returns the flags it prints. -func pkgConfig(mode string, pkgs []string) (flags []string, err error) { - cmd := exec.Command("pkg-config", append([]string{mode}, pkgs...)...) - out, err := cmd.CombinedOutput() - if err != nil { - s := fmt.Sprintf("%s failed: %v", strings.Join(cmd.Args, " "), err) - if len(out) > 0 { - s = fmt.Sprintf("%s: %s", s, out) - } - return nil, errors.New(s) - } - if len(out) > 0 { - flags = strings.Fields(string(out)) - } - return -} - -// pkgConfigFlags calls pkg-config if needed and returns the cflags -// needed to build the package. -func pkgConfigFlags(p *build.Package) (cflags []string, err error) { - if len(p.CgoPkgConfig) == 0 { - return nil, nil - } - return pkgConfig("--cflags", p.CgoPkgConfig) -} diff --git a/vendor/golang.org/x/tools/go/internal/gcimporter/bexport.go b/vendor/golang.org/x/tools/go/internal/gcimporter/bexport.go index 9f65049140..a807d0aaa2 100644 --- a/vendor/golang.org/x/tools/go/internal/gcimporter/bexport.go +++ b/vendor/golang.org/x/tools/go/internal/gcimporter/bexport.go @@ -127,10 +127,10 @@ func BExportData(fset *token.FileSet, pkg *types.Package) (b []byte, err error) // --- generic export data --- // populate type map with predeclared "known" types - for index, typ := range predeclared { + for index, typ := range predeclared() { p.typIndex[typ] = index } - if len(p.typIndex) != len(predeclared) { + if len(p.typIndex) != len(predeclared()) { return nil, internalError("duplicate entries in type map?") } diff --git a/vendor/golang.org/x/tools/go/internal/gcimporter/bimport.go b/vendor/golang.org/x/tools/go/internal/gcimporter/bimport.go index b31eacfc0f..e3c3107825 100644 --- a/vendor/golang.org/x/tools/go/internal/gcimporter/bimport.go +++ b/vendor/golang.org/x/tools/go/internal/gcimporter/bimport.go @@ -126,7 +126,7 @@ func BImportData(fset *token.FileSet, imports map[string]*types.Package, data [] // --- generic export data --- // populate typList with predeclared "known" types - p.typList = append(p.typList, predeclared...) + p.typList = append(p.typList, predeclared()...) // read package data pkg = p.pkg() @@ -976,50 +976,58 @@ const ( aliasTag ) -var predeclared = []types.Type{ - // basic types - types.Typ[types.Bool], - types.Typ[types.Int], - types.Typ[types.Int8], - types.Typ[types.Int16], - types.Typ[types.Int32], - types.Typ[types.Int64], - types.Typ[types.Uint], - types.Typ[types.Uint8], - types.Typ[types.Uint16], - types.Typ[types.Uint32], - types.Typ[types.Uint64], - types.Typ[types.Uintptr], - types.Typ[types.Float32], - types.Typ[types.Float64], - types.Typ[types.Complex64], - types.Typ[types.Complex128], - types.Typ[types.String], - - // basic type aliases - types.Universe.Lookup("byte").Type(), - types.Universe.Lookup("rune").Type(), - - // error - types.Universe.Lookup("error").Type(), - - // untyped types - types.Typ[types.UntypedBool], - types.Typ[types.UntypedInt], - types.Typ[types.UntypedRune], - types.Typ[types.UntypedFloat], - types.Typ[types.UntypedComplex], - types.Typ[types.UntypedString], - types.Typ[types.UntypedNil], - - // package unsafe - types.Typ[types.UnsafePointer], - - // invalid type - types.Typ[types.Invalid], // only appears in packages with errors - - // used internally by gc; never used by this package or in .a files - anyType{}, +var predecl []types.Type // initialized lazily + +func predeclared() []types.Type { + if predecl == nil { + // initialize lazily to be sure that all + // elements have been initialized before + predecl = []types.Type{ // basic types + types.Typ[types.Bool], + types.Typ[types.Int], + types.Typ[types.Int8], + types.Typ[types.Int16], + types.Typ[types.Int32], + types.Typ[types.Int64], + types.Typ[types.Uint], + types.Typ[types.Uint8], + types.Typ[types.Uint16], + types.Typ[types.Uint32], + types.Typ[types.Uint64], + types.Typ[types.Uintptr], + types.Typ[types.Float32], + types.Typ[types.Float64], + types.Typ[types.Complex64], + types.Typ[types.Complex128], + types.Typ[types.String], + + // basic type aliases + types.Universe.Lookup("byte").Type(), + types.Universe.Lookup("rune").Type(), + + // error + types.Universe.Lookup("error").Type(), + + // untyped types + types.Typ[types.UntypedBool], + types.Typ[types.UntypedInt], + types.Typ[types.UntypedRune], + types.Typ[types.UntypedFloat], + types.Typ[types.UntypedComplex], + types.Typ[types.UntypedString], + types.Typ[types.UntypedNil], + + // package unsafe + types.Typ[types.UnsafePointer], + + // invalid type + types.Typ[types.Invalid], // only appears in packages with errors + + // used internally by gc; never used by this package or in .a files + anyType{}, + } + } + return predecl } type anyType struct{} diff --git a/vendor/golang.org/x/tools/go/internal/gcimporter/iexport.go b/vendor/golang.org/x/tools/go/internal/gcimporter/iexport.go new file mode 100644 index 0000000000..be671c79b7 --- /dev/null +++ b/vendor/golang.org/x/tools/go/internal/gcimporter/iexport.go @@ -0,0 +1,723 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Indexed binary package export. +// This file was derived from $GOROOT/src/cmd/compile/internal/gc/iexport.go; +// see that file for specification of the format. + +// +build go1.11 + +package gcimporter + +import ( + "bytes" + "encoding/binary" + "go/ast" + "go/constant" + "go/token" + "go/types" + "io" + "math/big" + "reflect" + "sort" +) + +// Current indexed export format version. Increase with each format change. +// 0: Go1.11 encoding +const iexportVersion = 0 + +// IExportData returns the binary export data for pkg. +// If no file set is provided, position info will be missing. +func IExportData(fset *token.FileSet, pkg *types.Package) (b []byte, err error) { + defer func() { + if e := recover(); e != nil { + if ierr, ok := e.(internalError); ok { + err = ierr + return + } + // Not an internal error; panic again. + panic(e) + } + }() + + p := iexporter{ + out: bytes.NewBuffer(nil), + fset: fset, + allPkgs: map[*types.Package]bool{}, + stringIndex: map[string]uint64{}, + declIndex: map[types.Object]uint64{}, + typIndex: map[types.Type]uint64{}, + } + + for i, pt := range predeclared() { + p.typIndex[pt] = uint64(i) + } + if len(p.typIndex) > predeclReserved { + panic(internalErrorf("too many predeclared types: %d > %d", len(p.typIndex), predeclReserved)) + } + + // Initialize work queue with exported declarations. + scope := pkg.Scope() + for _, name := range scope.Names() { + if ast.IsExported(name) { + p.pushDecl(scope.Lookup(name)) + } + } + + // Loop until no more work. + for !p.declTodo.empty() { + p.doDecl(p.declTodo.popHead()) + } + + // Append indices to data0 section. + dataLen := uint64(p.data0.Len()) + w := p.newWriter() + w.writeIndex(p.declIndex, pkg) + w.flush() + + // Assemble header. + var hdr intWriter + hdr.WriteByte('i') + hdr.uint64(iexportVersion) + hdr.uint64(uint64(p.strings.Len())) + hdr.uint64(dataLen) + + // Flush output. + io.Copy(p.out, &hdr) + io.Copy(p.out, &p.strings) + io.Copy(p.out, &p.data0) + + return p.out.Bytes(), nil +} + +// writeIndex writes out an object index. mainIndex indicates whether +// we're writing out the main index, which is also read by +// non-compiler tools and includes a complete package description +// (i.e., name and height). +func (w *exportWriter) writeIndex(index map[types.Object]uint64, localpkg *types.Package) { + // Build a map from packages to objects from that package. + pkgObjs := map[*types.Package][]types.Object{} + + // For the main index, make sure to include every package that + // we reference, even if we're not exporting (or reexporting) + // any symbols from it. + pkgObjs[localpkg] = nil + for pkg := range w.p.allPkgs { + pkgObjs[pkg] = nil + } + + for obj := range index { + pkgObjs[obj.Pkg()] = append(pkgObjs[obj.Pkg()], obj) + } + + var pkgs []*types.Package + for pkg, objs := range pkgObjs { + pkgs = append(pkgs, pkg) + + sort.Slice(objs, func(i, j int) bool { + return objs[i].Name() < objs[j].Name() + }) + } + + sort.Slice(pkgs, func(i, j int) bool { + return pkgs[i].Path() < pkgs[j].Path() + }) + + w.uint64(uint64(len(pkgs))) + for _, pkg := range pkgs { + w.string(pkg.Path()) + w.string(pkg.Name()) + w.uint64(uint64(0)) // package height is not needed for go/types + + objs := pkgObjs[pkg] + w.uint64(uint64(len(objs))) + for _, obj := range objs { + w.string(obj.Name()) + w.uint64(index[obj]) + } + } +} + +type iexporter struct { + fset *token.FileSet + out *bytes.Buffer + + // allPkgs tracks all packages that have been referenced by + // the export data, so we can ensure to include them in the + // main index. + allPkgs map[*types.Package]bool + + declTodo objQueue + + strings intWriter + stringIndex map[string]uint64 + + data0 intWriter + declIndex map[types.Object]uint64 + typIndex map[types.Type]uint64 +} + +// stringOff returns the offset of s within the string section. +// If not already present, it's added to the end. +func (p *iexporter) stringOff(s string) uint64 { + off, ok := p.stringIndex[s] + if !ok { + off = uint64(p.strings.Len()) + p.stringIndex[s] = off + + p.strings.uint64(uint64(len(s))) + p.strings.WriteString(s) + } + return off +} + +// pushDecl adds n to the declaration work queue, if not already present. +func (p *iexporter) pushDecl(obj types.Object) { + // Package unsafe is known to the compiler and predeclared. + assert(obj.Pkg() != types.Unsafe) + + if _, ok := p.declIndex[obj]; ok { + return + } + + p.declIndex[obj] = ^uint64(0) // mark n present in work queue + p.declTodo.pushTail(obj) +} + +// exportWriter handles writing out individual data section chunks. +type exportWriter struct { + p *iexporter + + data intWriter + currPkg *types.Package + prevFile string + prevLine int64 +} + +func (p *iexporter) doDecl(obj types.Object) { + w := p.newWriter() + w.setPkg(obj.Pkg(), false) + + switch obj := obj.(type) { + case *types.Var: + w.tag('V') + w.pos(obj.Pos()) + w.typ(obj.Type(), obj.Pkg()) + + case *types.Func: + sig, _ := obj.Type().(*types.Signature) + if sig.Recv() != nil { + panic(internalErrorf("unexpected method: %v", sig)) + } + w.tag('F') + w.pos(obj.Pos()) + w.signature(sig) + + case *types.Const: + w.tag('C') + w.pos(obj.Pos()) + w.value(obj.Type(), obj.Val()) + + case *types.TypeName: + if obj.IsAlias() { + w.tag('A') + w.pos(obj.Pos()) + w.typ(obj.Type(), obj.Pkg()) + break + } + + // Defined type. + w.tag('T') + w.pos(obj.Pos()) + + underlying := obj.Type().Underlying() + w.typ(underlying, obj.Pkg()) + + t := obj.Type() + if types.IsInterface(t) { + break + } + + named, ok := t.(*types.Named) + if !ok { + panic(internalErrorf("%s is not a defined type", t)) + } + + n := named.NumMethods() + w.uint64(uint64(n)) + for i := 0; i < n; i++ { + m := named.Method(i) + w.pos(m.Pos()) + w.string(m.Name()) + sig, _ := m.Type().(*types.Signature) + w.param(sig.Recv()) + w.signature(sig) + } + + default: + panic(internalErrorf("unexpected object: %v", obj)) + } + + p.declIndex[obj] = w.flush() +} + +func (w *exportWriter) tag(tag byte) { + w.data.WriteByte(tag) +} + +func (w *exportWriter) pos(pos token.Pos) { + p := w.p.fset.Position(pos) + file := p.Filename + line := int64(p.Line) + + // When file is the same as the last position (common case), + // we can save a few bytes by delta encoding just the line + // number. + // + // Note: Because data objects may be read out of order (or not + // at all), we can only apply delta encoding within a single + // object. This is handled implicitly by tracking prevFile and + // prevLine as fields of exportWriter. + + if file == w.prevFile { + delta := line - w.prevLine + w.int64(delta) + if delta == deltaNewFile { + w.int64(-1) + } + } else { + w.int64(deltaNewFile) + w.int64(line) // line >= 0 + w.string(file) + w.prevFile = file + } + w.prevLine = line +} + +func (w *exportWriter) pkg(pkg *types.Package) { + // Ensure any referenced packages are declared in the main index. + w.p.allPkgs[pkg] = true + + w.string(pkg.Path()) +} + +func (w *exportWriter) qualifiedIdent(obj types.Object) { + // Ensure any referenced declarations are written out too. + w.p.pushDecl(obj) + + w.string(obj.Name()) + w.pkg(obj.Pkg()) +} + +func (w *exportWriter) typ(t types.Type, pkg *types.Package) { + w.data.uint64(w.p.typOff(t, pkg)) +} + +func (p *iexporter) newWriter() *exportWriter { + return &exportWriter{p: p} +} + +func (w *exportWriter) flush() uint64 { + off := uint64(w.p.data0.Len()) + io.Copy(&w.p.data0, &w.data) + return off +} + +func (p *iexporter) typOff(t types.Type, pkg *types.Package) uint64 { + off, ok := p.typIndex[t] + if !ok { + w := p.newWriter() + w.doTyp(t, pkg) + off = predeclReserved + w.flush() + p.typIndex[t] = off + } + return off +} + +func (w *exportWriter) startType(k itag) { + w.data.uint64(uint64(k)) +} + +func (w *exportWriter) doTyp(t types.Type, pkg *types.Package) { + switch t := t.(type) { + case *types.Named: + w.startType(definedType) + w.qualifiedIdent(t.Obj()) + + case *types.Pointer: + w.startType(pointerType) + w.typ(t.Elem(), pkg) + + case *types.Slice: + w.startType(sliceType) + w.typ(t.Elem(), pkg) + + case *types.Array: + w.startType(arrayType) + w.uint64(uint64(t.Len())) + w.typ(t.Elem(), pkg) + + case *types.Chan: + w.startType(chanType) + // 1 RecvOnly; 2 SendOnly; 3 SendRecv + var dir uint64 + switch t.Dir() { + case types.RecvOnly: + dir = 1 + case types.SendOnly: + dir = 2 + case types.SendRecv: + dir = 3 + } + w.uint64(dir) + w.typ(t.Elem(), pkg) + + case *types.Map: + w.startType(mapType) + w.typ(t.Key(), pkg) + w.typ(t.Elem(), pkg) + + case *types.Signature: + w.startType(signatureType) + w.setPkg(pkg, true) + w.signature(t) + + case *types.Struct: + w.startType(structType) + w.setPkg(pkg, true) + + n := t.NumFields() + w.uint64(uint64(n)) + for i := 0; i < n; i++ { + f := t.Field(i) + w.pos(f.Pos()) + w.string(f.Name()) + w.typ(f.Type(), pkg) + w.bool(f.Embedded()) + w.string(t.Tag(i)) // note (or tag) + } + + case *types.Interface: + w.startType(interfaceType) + w.setPkg(pkg, true) + + n := t.NumEmbeddeds() + w.uint64(uint64(n)) + for i := 0; i < n; i++ { + f := t.Embedded(i) + w.pos(f.Obj().Pos()) + w.typ(f.Obj().Type(), f.Obj().Pkg()) + } + + n = t.NumExplicitMethods() + w.uint64(uint64(n)) + for i := 0; i < n; i++ { + m := t.ExplicitMethod(i) + w.pos(m.Pos()) + w.string(m.Name()) + sig, _ := m.Type().(*types.Signature) + w.signature(sig) + } + + default: + panic(internalErrorf("unexpected type: %v, %v", t, reflect.TypeOf(t))) + } +} + +func (w *exportWriter) setPkg(pkg *types.Package, write bool) { + if write { + w.pkg(pkg) + } + + w.currPkg = pkg +} + +func (w *exportWriter) signature(sig *types.Signature) { + w.paramList(sig.Params()) + w.paramList(sig.Results()) + if sig.Params().Len() > 0 { + w.bool(sig.Variadic()) + } +} + +func (w *exportWriter) paramList(tup *types.Tuple) { + n := tup.Len() + w.uint64(uint64(n)) + for i := 0; i < n; i++ { + w.param(tup.At(i)) + } +} + +func (w *exportWriter) param(obj types.Object) { + w.pos(obj.Pos()) + w.localIdent(obj) + w.typ(obj.Type(), obj.Pkg()) +} + +func (w *exportWriter) value(typ types.Type, v constant.Value) { + w.typ(typ, nil) + + switch v.Kind() { + case constant.Bool: + w.bool(constant.BoolVal(v)) + case constant.Int: + var i big.Int + if i64, exact := constant.Int64Val(v); exact { + i.SetInt64(i64) + } else if ui64, exact := constant.Uint64Val(v); exact { + i.SetUint64(ui64) + } else { + i.SetString(v.ExactString(), 10) + } + w.mpint(&i, typ) + case constant.Float: + f := constantToFloat(v) + w.mpfloat(f, typ) + case constant.Complex: + w.mpfloat(constantToFloat(constant.Real(v)), typ) + w.mpfloat(constantToFloat(constant.Imag(v)), typ) + case constant.String: + w.string(constant.StringVal(v)) + case constant.Unknown: + // package contains type errors + default: + panic(internalErrorf("unexpected value %v (%T)", v, v)) + } +} + +// constantToFloat converts a constant.Value with kind constant.Float to a +// big.Float. +func constantToFloat(x constant.Value) *big.Float { + assert(x.Kind() == constant.Float) + // Use the same floating-point precision (512) as cmd/compile + // (see Mpprec in cmd/compile/internal/gc/mpfloat.go). + const mpprec = 512 + var f big.Float + f.SetPrec(mpprec) + if v, exact := constant.Float64Val(x); exact { + // float64 + f.SetFloat64(v) + } else if num, denom := constant.Num(x), constant.Denom(x); num.Kind() == constant.Int { + // TODO(gri): add big.Rat accessor to constant.Value. + n := valueToRat(num) + d := valueToRat(denom) + f.SetRat(n.Quo(n, d)) + } else { + // Value too large to represent as a fraction => inaccessible. + // TODO(gri): add big.Float accessor to constant.Value. + _, ok := f.SetString(x.ExactString()) + assert(ok) + } + return &f +} + +// mpint exports a multi-precision integer. +// +// For unsigned types, small values are written out as a single +// byte. Larger values are written out as a length-prefixed big-endian +// byte string, where the length prefix is encoded as its complement. +// For example, bytes 0, 1, and 2 directly represent the integer +// values 0, 1, and 2; while bytes 255, 254, and 253 indicate a 1-, +// 2-, and 3-byte big-endian string follow. +// +// Encoding for signed types use the same general approach as for +// unsigned types, except small values use zig-zag encoding and the +// bottom bit of length prefix byte for large values is reserved as a +// sign bit. +// +// The exact boundary between small and large encodings varies +// according to the maximum number of bytes needed to encode a value +// of type typ. As a special case, 8-bit types are always encoded as a +// single byte. +// +// TODO(mdempsky): Is this level of complexity really worthwhile? +func (w *exportWriter) mpint(x *big.Int, typ types.Type) { + basic, ok := typ.Underlying().(*types.Basic) + if !ok { + panic(internalErrorf("unexpected type %v (%T)", typ.Underlying(), typ.Underlying())) + } + + signed, maxBytes := intSize(basic) + + negative := x.Sign() < 0 + if !signed && negative { + panic(internalErrorf("negative unsigned integer; type %v, value %v", typ, x)) + } + + b := x.Bytes() + if len(b) > 0 && b[0] == 0 { + panic(internalErrorf("leading zeros")) + } + if uint(len(b)) > maxBytes { + panic(internalErrorf("bad mpint length: %d > %d (type %v, value %v)", len(b), maxBytes, typ, x)) + } + + maxSmall := 256 - maxBytes + if signed { + maxSmall = 256 - 2*maxBytes + } + if maxBytes == 1 { + maxSmall = 256 + } + + // Check if x can use small value encoding. + if len(b) <= 1 { + var ux uint + if len(b) == 1 { + ux = uint(b[0]) + } + if signed { + ux <<= 1 + if negative { + ux-- + } + } + if ux < maxSmall { + w.data.WriteByte(byte(ux)) + return + } + } + + n := 256 - uint(len(b)) + if signed { + n = 256 - 2*uint(len(b)) + if negative { + n |= 1 + } + } + if n < maxSmall || n >= 256 { + panic(internalErrorf("encoding mistake: %d, %v, %v => %d", len(b), signed, negative, n)) + } + + w.data.WriteByte(byte(n)) + w.data.Write(b) +} + +// mpfloat exports a multi-precision floating point number. +// +// The number's value is decomposed into mantissa × 2**exponent, where +// mantissa is an integer. The value is written out as mantissa (as a +// multi-precision integer) and then the exponent, except exponent is +// omitted if mantissa is zero. +func (w *exportWriter) mpfloat(f *big.Float, typ types.Type) { + if f.IsInf() { + panic("infinite constant") + } + + // Break into f = mant × 2**exp, with 0.5 <= mant < 1. + var mant big.Float + exp := int64(f.MantExp(&mant)) + + // Scale so that mant is an integer. + prec := mant.MinPrec() + mant.SetMantExp(&mant, int(prec)) + exp -= int64(prec) + + manti, acc := mant.Int(nil) + if acc != big.Exact { + panic(internalErrorf("mantissa scaling failed for %f (%s)", f, acc)) + } + w.mpint(manti, typ) + if manti.Sign() != 0 { + w.int64(exp) + } +} + +func (w *exportWriter) bool(b bool) bool { + var x uint64 + if b { + x = 1 + } + w.uint64(x) + return b +} + +func (w *exportWriter) int64(x int64) { w.data.int64(x) } +func (w *exportWriter) uint64(x uint64) { w.data.uint64(x) } +func (w *exportWriter) string(s string) { w.uint64(w.p.stringOff(s)) } + +func (w *exportWriter) localIdent(obj types.Object) { + // Anonymous parameters. + if obj == nil { + w.string("") + return + } + + name := obj.Name() + if name == "_" { + w.string("_") + return + } + + w.string(name) +} + +type intWriter struct { + bytes.Buffer +} + +func (w *intWriter) int64(x int64) { + var buf [binary.MaxVarintLen64]byte + n := binary.PutVarint(buf[:], x) + w.Write(buf[:n]) +} + +func (w *intWriter) uint64(x uint64) { + var buf [binary.MaxVarintLen64]byte + n := binary.PutUvarint(buf[:], x) + w.Write(buf[:n]) +} + +func assert(cond bool) { + if !cond { + panic("internal error: assertion failed") + } +} + +// The below is copied from go/src/cmd/compile/internal/gc/syntax.go. + +// objQueue is a FIFO queue of types.Object. The zero value of objQueue is +// a ready-to-use empty queue. +type objQueue struct { + ring []types.Object + head, tail int +} + +// empty returns true if q contains no Nodes. +func (q *objQueue) empty() bool { + return q.head == q.tail +} + +// pushTail appends n to the tail of the queue. +func (q *objQueue) pushTail(obj types.Object) { + if len(q.ring) == 0 { + q.ring = make([]types.Object, 16) + } else if q.head+len(q.ring) == q.tail { + // Grow the ring. + nring := make([]types.Object, len(q.ring)*2) + // Copy the old elements. + part := q.ring[q.head%len(q.ring):] + if q.tail-q.head <= len(part) { + part = part[:q.tail-q.head] + copy(nring, part) + } else { + pos := copy(nring, part) + copy(nring[pos:], q.ring[:q.tail%len(q.ring)]) + } + q.ring, q.head, q.tail = nring, 0, q.tail-q.head + } + + q.ring[q.tail%len(q.ring)] = obj + q.tail++ +} + +// popHead pops a node from the head of the queue. It panics if q is empty. +func (q *objQueue) popHead() types.Object { + if q.empty() { + panic("dequeue empty") + } + obj := q.ring[q.head%len(q.ring)] + q.head++ + return obj +} diff --git a/vendor/golang.org/x/tools/go/internal/gcimporter/iimport.go b/vendor/golang.org/x/tools/go/internal/gcimporter/iimport.go index 0fd22bb038..3cb7ae5b9e 100644 --- a/vendor/golang.org/x/tools/go/internal/gcimporter/iimport.go +++ b/vendor/golang.org/x/tools/go/internal/gcimporter/iimport.go @@ -109,7 +109,7 @@ func IImportData(fset *token.FileSet, imports map[string]*types.Package, data [] }, } - for i, pt := range predeclared { + for i, pt := range predeclared() { p.typCache[uint64(i)] = pt } @@ -142,8 +142,12 @@ func IImportData(fset *token.FileSet, imports map[string]*types.Package, data [] p.pkgIndex[pkg] = nameIndex pkgList[i] = pkg } - - localpkg := pkgList[0] + var localpkg *types.Package + for _, pkg := range pkgList { + if pkg.Path() == path { + localpkg = pkg + } + } names := make([]string, 0, len(p.pkgIndex[localpkg])) for name := range p.pkgIndex[localpkg] { @@ -330,6 +334,10 @@ func (r *importReader) value() (typ types.Type, val constant.Value) { val = constant.BinaryOp(re, token.ADD, constant.MakeImag(im)) default: + if b.Kind() == types.Invalid { + val = constant.MakeUnknown() + return + } errorf("unexpected type %v", typ) // panics panic("unreachable") } diff --git a/vendor/golang.org/x/tools/go/packages/doc.go b/vendor/golang.org/x/tools/go/packages/doc.go index 0ec0fab24b..3799f8ed8b 100644 --- a/vendor/golang.org/x/tools/go/packages/doc.go +++ b/vendor/golang.org/x/tools/go/packages/doc.go @@ -14,7 +14,7 @@ but all patterns with the prefix "query=", where query is a non-empty string of letters from [a-z], are reserved and may be interpreted as query operators. -Three query operators are currently supported: "file", "pattern", and "name". +Two query operators are currently supported: "file" and "pattern". The query "file=path/to/file.go" matches the package or packages enclosing the Go source file path/to/file.go. For example "file=~/go/src/fmt/print.go" @@ -25,10 +25,6 @@ the underlying build tool. In most cases this is unnecessary, but an application can use Load("pattern=" + x) as an escaping mechanism to ensure that x is not interpreted as a query operator if it contains '='. -The query "name=identifier" matches packages whose package declaration contains -the specified identifier. For example, "name=rand" would match the packages -"math/rand" and "crypto/rand", and "name=main" would match all executables. - All other query operators are reserved for future use and currently cause Load to report an error. diff --git a/vendor/golang.org/x/tools/go/packages/external.go b/vendor/golang.org/x/tools/go/packages/external.go index 860c3ec156..22ff769ef2 100644 --- a/vendor/golang.org/x/tools/go/packages/external.go +++ b/vendor/golang.org/x/tools/go/packages/external.go @@ -18,7 +18,7 @@ import ( // Driver type driverRequest struct { - Command string `json "command"` + Command string `json:"command"` Mode LoadMode `json:"mode"` Env []string `json:"env"` BuildFlags []string `json:"build_flags"` diff --git a/vendor/golang.org/x/tools/go/packages/golist.go b/vendor/golang.org/x/tools/go/packages/golist.go index 7ca5ebe459..3a0d4b0123 100644 --- a/vendor/golang.org/x/tools/go/packages/golist.go +++ b/vendor/golang.org/x/tools/go/packages/golist.go @@ -78,7 +78,7 @@ func goListDriver(cfg *Config, patterns ...string) (*driverResponse, error) { var sizes types.Sizes var sizeserr error var sizeswg sync.WaitGroup - if cfg.Mode >= LoadTypes { + if cfg.Mode&NeedTypesSizes != 0 || cfg.Mode&NeedTypes != 0 { sizeswg.Add(1) go func() { sizes, sizeserr = getSizes(cfg) @@ -104,7 +104,7 @@ extractQueries: containFiles = append(containFiles, value) case "pattern": restPatterns = append(restPatterns, value) - case "name": + case "iamashamedtousethedisabledqueryname": packagesNamed = append(packagesNamed, value) case "": // not a reserved query restPatterns = append(restPatterns, pattern) @@ -121,20 +121,6 @@ extractQueries: } } - // TODO(matloob): Remove the definition of listfunc and just use golistPackages once go1.12 is released. - var listfunc driver - var isFallback bool - listfunc = func(cfg *Config, words ...string) (*driverResponse, error) { - response, err := golistDriverCurrent(cfg, words...) - if _, ok := err.(goTooOldError); ok { - isFallback = true - listfunc = golistDriverFallback - return listfunc(cfg, words...) - } - listfunc = golistDriverCurrent - return response, err - } - response := &responseDeduper{} var err error @@ -142,7 +128,7 @@ extractQueries: // patterns also requires a go list call, since it's the equivalent of // ".". if len(restPatterns) > 0 || len(patterns) == 0 { - dr, err := listfunc(cfg, restPatterns...) + dr, err := golistDriver(cfg, restPatterns...) if err != nil { return nil, err } @@ -161,13 +147,13 @@ extractQueries: var containsCandidates []string if len(containFiles) != 0 { - if err := runContainsQueries(cfg, listfunc, isFallback, response, containFiles); err != nil { + if err := runContainsQueries(cfg, golistDriver, response, containFiles); err != nil { return nil, err } } if len(packagesNamed) != 0 { - if err := runNamedQueries(cfg, listfunc, response, packagesNamed); err != nil { + if err := runNamedQueries(cfg, golistDriver, response, packagesNamed); err != nil { return nil, err } } @@ -180,12 +166,8 @@ extractQueries: containsCandidates = append(containsCandidates, modifiedPkgs...) containsCandidates = append(containsCandidates, needPkgs...) } - - if len(needPkgs) > 0 { - addNeededOverlayPackages(cfg, listfunc, response, needPkgs) - if err != nil { - return nil, err - } + if err := addNeededOverlayPackages(cfg, golistDriver, response, needPkgs); err != nil { + return nil, err } // Check candidate packages for containFiles. if len(containFiles) > 0 { @@ -205,6 +187,9 @@ extractQueries: } func addNeededOverlayPackages(cfg *Config, driver driver, response *responseDeduper, pkgs []string) error { + if len(pkgs) == 0 { + return nil + } dr, err := driver(cfg, pkgs...) if err != nil { return err @@ -212,10 +197,15 @@ func addNeededOverlayPackages(cfg *Config, driver driver, response *responseDedu for _, pkg := range dr.Packages { response.addPackage(pkg) } + _, needPkgs, err := processGolistOverlay(cfg, response.dr) + if err != nil { + return err + } + addNeededOverlayPackages(cfg, driver, response, needPkgs) return nil } -func runContainsQueries(cfg *Config, driver driver, isFallback bool, response *responseDeduper, queries []string) error { +func runContainsQueries(cfg *Config, driver driver, response *responseDeduper, queries []string) error { for _, query := range queries { // TODO(matloob): Do only one query per directory. fdir := filepath.Dir(query) @@ -225,11 +215,6 @@ func runContainsQueries(cfg *Config, driver driver, isFallback bool, response *r if err != nil { return fmt.Errorf("could not determine absolute path of file= query path %q: %v", query, err) } - if isFallback { - pattern = "." - cfg.Dir = fdir - } - dirResponse, err := driver(cfg, pattern) if err != nil { return err @@ -559,10 +544,10 @@ func otherFiles(p *jsonPackage) [][]string { return [][]string{p.CFiles, p.CXXFiles, p.MFiles, p.HFiles, p.FFiles, p.SFiles, p.SwigFiles, p.SwigCXXFiles, p.SysoFiles} } -// golistDriverCurrent uses the "go list" command to expand the -// pattern words and return metadata for the specified packages. -// dir may be "" and env may be nil, as per os/exec.Command. -func golistDriverCurrent(cfg *Config, words ...string) (*driverResponse, error) { +// golistDriver uses the "go list" command to expand the pattern +// words and return metadata for the specified packages. dir may be +// "" and env may be nil, as per os/exec.Command. +func golistDriver(cfg *Config, words ...string) (*driverResponse, error) { // go list uses the following identifiers in ImportPath and Imports: // // "p" -- importable package or main (command) @@ -605,7 +590,7 @@ func golistDriverCurrent(cfg *Config, words ...string) (*driverResponse, error) if old, found := seen[p.ImportPath]; found { if !reflect.DeepEqual(p, old) { - return nil, fmt.Errorf("go list repeated package %v with different values", p.ImportPath) + return nil, fmt.Errorf("internal error: go list gives conflicting information for package %v", p.ImportPath) } // skip the duplicate continue @@ -620,16 +605,25 @@ func golistDriverCurrent(cfg *Config, words ...string) (*driverResponse, error) OtherFiles: absJoin(p.Dir, otherFiles(p)...), } - // Workaround for https://golang.org/issue/28749. - // TODO(adonovan): delete before go1.12 release. - out := pkg.CompiledGoFiles[:0] - for _, f := range pkg.CompiledGoFiles { - if strings.HasSuffix(f, ".s") { - continue + // Work around https://golang.org/issue/28749: + // cmd/go puts assembly, C, and C++ files in CompiledGoFiles. + // Filter out any elements of CompiledGoFiles that are also in OtherFiles. + // We have to keep this workaround in place until go1.12 is a distant memory. + if len(pkg.OtherFiles) > 0 { + other := make(map[string]bool, len(pkg.OtherFiles)) + for _, f := range pkg.OtherFiles { + other[f] = true } - out = append(out, f) + + out := pkg.CompiledGoFiles[:0] + for _, f := range pkg.CompiledGoFiles { + if other[f] { + continue + } + out = append(out, f) + } + pkg.CompiledGoFiles = out } - pkg.CompiledGoFiles = out // Extract the PkgPath from the package's ID. if i := strings.IndexByte(pkg.ID, ' '); i >= 0 { @@ -711,14 +705,16 @@ func absJoin(dir string, fileses ...[]string) (res []string) { } func golistargs(cfg *Config, words []string) []string { + const findFlags = NeedImports | NeedTypes | NeedSyntax | NeedTypesInfo fullargs := []string{ - "list", "-e", "-json", "-compiled", + "list", "-e", "-json", + fmt.Sprintf("-compiled=%t", cfg.Mode&(NeedCompiledGoFiles|NeedSyntax|NeedTypesInfo|NeedTypesSizes) != 0), fmt.Sprintf("-test=%t", cfg.Tests), fmt.Sprintf("-export=%t", usesExportData(cfg)), - fmt.Sprintf("-deps=%t", cfg.Mode >= LoadImports), + fmt.Sprintf("-deps=%t", cfg.Mode&NeedDeps != 0), // go list doesn't let you pass -test and -find together, // probably because you'd just get the TestMain. - fmt.Sprintf("-find=%t", cfg.Mode < LoadImports && !cfg.Tests), + fmt.Sprintf("-find=%t", !cfg.Tests && cfg.Mode&findFlags == 0), } fullargs = append(fullargs, cfg.BuildFlags...) fullargs = append(fullargs, "--") @@ -728,9 +724,6 @@ func golistargs(cfg *Config, words []string) []string { // invokeGo returns the stdout of a go command invocation. func invokeGo(cfg *Config, args ...string) (*bytes.Buffer, error) { - if debug { - defer func(start time.Time) { log.Printf("%s for %v", time.Since(start), cmdDebugStr(cfg, args...)) }(time.Now()) - } stdout := new(bytes.Buffer) stderr := new(bytes.Buffer) cmd := exec.CommandContext(cfg.Context, "go", args...) @@ -744,11 +737,21 @@ func invokeGo(cfg *Config, args ...string) (*bytes.Buffer, error) { cmd.Dir = cfg.Dir cmd.Stdout = stdout cmd.Stderr = stderr + if debug { + defer func(start time.Time) { + log.Printf("%s for %v, stderr: <<%s>>\n", time.Since(start), cmdDebugStr(cmd, args...), stderr) + }(time.Now()) + } + if err := cmd.Run(); err != nil { + // Check for 'go' executable not being found. + if ee, ok := err.(*exec.Error); ok && ee.Err == exec.ErrNotFound { + return nil, fmt.Errorf("'go list' driver requires 'go', but %s", exec.ErrNotFound) + } + exitErr, ok := err.(*exec.ExitError) if !ok { // Catastrophic error: - // - executable not found // - context cancellation return nil, fmt.Errorf("couldn't exec 'go %v': %s %T", args, err, err) } @@ -758,6 +761,22 @@ func invokeGo(cfg *Config, args ...string) (*bytes.Buffer, error) { return nil, goTooOldError{fmt.Errorf("unsupported version of go: %s: %s", exitErr, stderr)} } + // This error only appears in stderr. See golang.org/cl/166398 for a fix in go list to show + // the error in the Err section of stdout in case -e option is provided. + // This fix is provided for backwards compatibility. + if len(stderr.String()) > 0 && strings.Contains(stderr.String(), "named files must be .go files") { + output := fmt.Sprintf(`{"ImportPath": "command-line-arguments","Incomplete": true,"Error": {"Pos": "","Err": %q}}`, + strings.Trim(stderr.String(), "\n")) + return bytes.NewBufferString(output), nil + } + + // Workaround for #29280: go list -e has incorrect behavior when an ad-hoc package doesn't exist. + if len(stderr.String()) > 0 && strings.Contains(stderr.String(), "no such file or directory") { + output := fmt.Sprintf(`{"ImportPath": "command-line-arguments","Incomplete": true,"Error": {"Pos": "","Err": %q}}`, + strings.Trim(stderr.String(), "\n")) + return bytes.NewBufferString(output), nil + } + // Export mode entails a build. // If that build fails, errors appear on stderr // (despite the -e flag) and the Export field is blank. @@ -777,12 +796,12 @@ func invokeGo(cfg *Config, args ...string) (*bytes.Buffer, error) { // be useful for debugging. Print them if $GOPACKAGESPRINTGOLISTERRORS // is set. if len(stderr.Bytes()) != 0 && os.Getenv("GOPACKAGESPRINTGOLISTERRORS") != "" { - fmt.Fprintf(os.Stderr, "%s stderr: <<%s>>\n", cmdDebugStr(cfg, args...), stderr) + fmt.Fprintf(os.Stderr, "%s stderr: <<%s>>\n", cmdDebugStr(cmd, args...), stderr) } // debugging if false { - fmt.Fprintf(os.Stderr, "%s stdout: <<%s>>\n", cmdDebugStr(cfg, args...), stdout) + fmt.Fprintf(os.Stderr, "%s stdout: <<%s>>\n", cmdDebugStr(cmd, args...), stdout) } return stdout, nil @@ -797,13 +816,17 @@ func containsGoFile(s []string) bool { return false } -func cmdDebugStr(cfg *Config, args ...string) string { +func cmdDebugStr(cmd *exec.Cmd, args ...string) string { env := make(map[string]string) - for _, kv := range cfg.Env { + for _, kv := range cmd.Env { split := strings.Split(kv, "=") k, v := split[0], split[1] env[k] = v } + var quotedArgs []string + for _, arg := range args { + quotedArgs = append(quotedArgs, strconv.Quote(arg)) + } - return fmt.Sprintf("GOROOT=%v GOPATH=%v GO111MODULE=%v PWD=%v go %v", env["GOROOT"], env["GOPATH"], env["GO111MODULE"], env["PWD"], args) + return fmt.Sprintf("GOROOT=%v GOPATH=%v GO111MODULE=%v PWD=%v go %s", env["GOROOT"], env["GOPATH"], env["GO111MODULE"], env["PWD"], strings.Join(quotedArgs, " ")) } diff --git a/vendor/golang.org/x/tools/go/packages/golist_fallback.go b/vendor/golang.org/x/tools/go/packages/golist_fallback.go deleted file mode 100644 index 141fa19ac1..0000000000 --- a/vendor/golang.org/x/tools/go/packages/golist_fallback.go +++ /dev/null @@ -1,450 +0,0 @@ -// Copyright 2018 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package packages - -import ( - "encoding/json" - "fmt" - "go/build" - "io/ioutil" - "os" - "os/exec" - "path/filepath" - "sort" - "strings" - - "golang.org/x/tools/go/internal/cgo" -) - -// TODO(matloob): Delete this file once Go 1.12 is released. - -// This file provides backwards compatibility support for -// loading for versions of Go earlier than 1.11. This support is meant to -// assist with migration to the Package API until there's -// widespread adoption of these newer Go versions. -// This support will be removed once Go 1.12 is released -// in Q1 2019. - -func golistDriverFallback(cfg *Config, words ...string) (*driverResponse, error) { - // Turn absolute paths into GOROOT and GOPATH-relative paths to provide to go list. - // This will have surprising behavior if GOROOT or GOPATH contain multiple packages with the same - // path and a user provides an absolute path to a directory that's shadowed by an earlier - // directory in GOROOT or GOPATH with the same package path. - words = cleanAbsPaths(cfg, words) - - original, deps, err := getDeps(cfg, words...) - if err != nil { - return nil, err - } - - var tmpdir string // used for generated cgo files - var needsTestVariant []struct { - pkg, xtestPkg *Package - } - - var response driverResponse - allPkgs := make(map[string]bool) - addPackage := func(p *jsonPackage, isRoot bool) { - id := p.ImportPath - - if allPkgs[id] { - return - } - allPkgs[id] = true - - pkgpath := id - - if pkgpath == "unsafe" { - p.GoFiles = nil // ignore fake unsafe.go file - } - - importMap := func(importlist []string) map[string]*Package { - importMap := make(map[string]*Package) - for _, id := range importlist { - - if id == "C" { - for _, path := range []string{"unsafe", "syscall", "runtime/cgo"} { - if pkgpath != path && importMap[path] == nil { - importMap[path] = &Package{ID: path} - } - } - continue - } - importMap[vendorlessPath(id)] = &Package{ID: id} - } - return importMap - } - compiledGoFiles := absJoin(p.Dir, p.GoFiles) - // Use a function to simplify control flow. It's just a bunch of gotos. - var cgoErrors []error - var outdir string - getOutdir := func() (string, error) { - if outdir != "" { - return outdir, nil - } - if tmpdir == "" { - if tmpdir, err = ioutil.TempDir("", "gopackages"); err != nil { - return "", err - } - } - outdir = filepath.Join(tmpdir, strings.Replace(p.ImportPath, "/", "_", -1)) - if err := os.MkdirAll(outdir, 0755); err != nil { - outdir = "" - return "", err - } - return outdir, nil - } - processCgo := func() bool { - // Suppress any cgo errors. Any relevant errors will show up in typechecking. - // TODO(matloob): Skip running cgo if Mode < LoadTypes. - outdir, err := getOutdir() - if err != nil { - cgoErrors = append(cgoErrors, err) - return false - } - files, _, err := runCgo(p.Dir, outdir, cfg.Env) - if err != nil { - cgoErrors = append(cgoErrors, err) - return false - } - compiledGoFiles = append(compiledGoFiles, files...) - return true - } - if len(p.CgoFiles) == 0 || !processCgo() { - compiledGoFiles = append(compiledGoFiles, absJoin(p.Dir, p.CgoFiles)...) // Punt to typechecker. - } - if isRoot { - response.Roots = append(response.Roots, id) - } - pkg := &Package{ - ID: id, - Name: p.Name, - GoFiles: absJoin(p.Dir, p.GoFiles, p.CgoFiles), - CompiledGoFiles: compiledGoFiles, - OtherFiles: absJoin(p.Dir, otherFiles(p)...), - PkgPath: pkgpath, - Imports: importMap(p.Imports), - // TODO(matloob): set errors on the Package to cgoErrors - } - if p.Error != nil { - pkg.Errors = append(pkg.Errors, Error{ - Pos: p.Error.Pos, - Msg: p.Error.Err, - }) - } - response.Packages = append(response.Packages, pkg) - if cfg.Tests && isRoot { - testID := fmt.Sprintf("%s [%s.test]", id, id) - if len(p.TestGoFiles) > 0 || len(p.XTestGoFiles) > 0 { - response.Roots = append(response.Roots, testID) - testPkg := &Package{ - ID: testID, - Name: p.Name, - GoFiles: absJoin(p.Dir, p.GoFiles, p.CgoFiles, p.TestGoFiles), - CompiledGoFiles: append(compiledGoFiles, absJoin(p.Dir, p.TestGoFiles)...), - OtherFiles: absJoin(p.Dir, otherFiles(p)...), - PkgPath: pkgpath, - Imports: importMap(append(p.Imports, p.TestImports...)), - // TODO(matloob): set errors on the Package to cgoErrors - } - response.Packages = append(response.Packages, testPkg) - var xtestPkg *Package - if len(p.XTestGoFiles) > 0 { - xtestID := fmt.Sprintf("%s_test [%s.test]", id, id) - response.Roots = append(response.Roots, xtestID) - // Generate test variants for all packages q where a path exists - // such that xtestPkg -> ... -> q -> ... -> p (where p is the package under test) - // and rewrite all import map entries of p to point to testPkg (the test variant of - // p), and of each q to point to the test variant of that q. - xtestPkg = &Package{ - ID: xtestID, - Name: p.Name + "_test", - GoFiles: absJoin(p.Dir, p.XTestGoFiles), - CompiledGoFiles: absJoin(p.Dir, p.XTestGoFiles), - PkgPath: pkgpath + "_test", - Imports: importMap(p.XTestImports), - } - // Add to list of packages we need to rewrite imports for to refer to test variants. - // We may need to create a test variant of a package that hasn't been loaded yet, so - // the test variants need to be created later. - needsTestVariant = append(needsTestVariant, struct{ pkg, xtestPkg *Package }{pkg, xtestPkg}) - response.Packages = append(response.Packages, xtestPkg) - } - // testmain package - testmainID := id + ".test" - response.Roots = append(response.Roots, testmainID) - imports := map[string]*Package{} - imports[testPkg.PkgPath] = &Package{ID: testPkg.ID} - if xtestPkg != nil { - imports[xtestPkg.PkgPath] = &Package{ID: xtestPkg.ID} - } - testmainPkg := &Package{ - ID: testmainID, - Name: "main", - PkgPath: testmainID, - Imports: imports, - } - response.Packages = append(response.Packages, testmainPkg) - outdir, err := getOutdir() - if err != nil { - testmainPkg.Errors = append(testmainPkg.Errors, Error{ - Pos: "-", - Msg: fmt.Sprintf("failed to generate testmain: %v", err), - Kind: ListError, - }) - return - } - // Don't use a .go extension on the file, so that the tests think the file is inside GOCACHE. - // This allows the same test to test the pre- and post-Go 1.11 go list logic because the Go 1.11 - // go list generates test mains in the cache, and the test code knows not to rely on paths in the - // cache to stay stable. - testmain := filepath.Join(outdir, "testmain-go") - extraimports, extradeps, err := generateTestmain(testmain, testPkg, xtestPkg) - if err != nil { - testmainPkg.Errors = append(testmainPkg.Errors, Error{ - Pos: "-", - Msg: fmt.Sprintf("failed to generate testmain: %v", err), - Kind: ListError, - }) - } - deps = append(deps, extradeps...) - for _, imp := range extraimports { // testing, testing/internal/testdeps, and maybe os - imports[imp] = &Package{ID: imp} - } - testmainPkg.GoFiles = []string{testmain} - testmainPkg.CompiledGoFiles = []string{testmain} - } - } - } - - for _, pkg := range original { - addPackage(pkg, true) - } - if cfg.Mode < LoadImports || len(deps) == 0 { - return &response, nil - } - - buf, err := invokeGo(cfg, golistArgsFallback(cfg, deps)...) - if err != nil { - return nil, err - } - - // Decode the JSON and convert it to Package form. - for dec := json.NewDecoder(buf); dec.More(); { - p := new(jsonPackage) - if err := dec.Decode(p); err != nil { - return nil, fmt.Errorf("JSON decoding failed: %v", err) - } - - addPackage(p, false) - } - - for _, v := range needsTestVariant { - createTestVariants(&response, v.pkg, v.xtestPkg) - } - - return &response, nil -} - -func createTestVariants(response *driverResponse, pkgUnderTest, xtestPkg *Package) { - allPkgs := make(map[string]*Package) - for _, pkg := range response.Packages { - allPkgs[pkg.ID] = pkg - } - needsTestVariant := make(map[string]bool) - needsTestVariant[pkgUnderTest.ID] = true - var needsVariantRec func(p *Package) bool - needsVariantRec = func(p *Package) bool { - if needsTestVariant[p.ID] { - return true - } - for _, imp := range p.Imports { - if needsVariantRec(allPkgs[imp.ID]) { - // Don't break because we want to make sure all dependencies - // have been processed, and all required test variants of our dependencies - // exist. - needsTestVariant[p.ID] = true - } - } - if !needsTestVariant[p.ID] { - return false - } - // Create a clone of the package. It will share the same strings and lists of source files, - // but that's okay. It's only necessary for the Imports map to have a separate identity. - testVariant := *p - testVariant.ID = fmt.Sprintf("%s [%s.test]", p.ID, pkgUnderTest.ID) - testVariant.Imports = make(map[string]*Package) - for imp, pkg := range p.Imports { - testVariant.Imports[imp] = pkg - if needsTestVariant[pkg.ID] { - testVariant.Imports[imp] = &Package{ID: fmt.Sprintf("%s [%s.test]", pkg.ID, pkgUnderTest.ID)} - } - } - response.Packages = append(response.Packages, &testVariant) - return needsTestVariant[p.ID] - } - // finally, update the xtest package's imports - for imp, pkg := range xtestPkg.Imports { - if allPkgs[pkg.ID] == nil { - fmt.Printf("for %s: package %s doesn't exist\n", xtestPkg.ID, pkg.ID) - } - if needsVariantRec(allPkgs[pkg.ID]) { - xtestPkg.Imports[imp] = &Package{ID: fmt.Sprintf("%s [%s.test]", pkg.ID, pkgUnderTest.ID)} - } - } -} - -// cleanAbsPaths replaces all absolute paths with GOPATH- and GOROOT-relative -// paths. If an absolute path is not GOPATH- or GOROOT- relative, it is left as an -// absolute path so an error can be returned later. -func cleanAbsPaths(cfg *Config, words []string) []string { - var searchpaths []string - var cleaned = make([]string, len(words)) - for i := range cleaned { - cleaned[i] = words[i] - // Ignore relative directory paths (they must already be goroot-relative) and Go source files - // (absolute source files are already allowed for ad-hoc packages). - // TODO(matloob): Can there be non-.go files in ad-hoc packages. - if !filepath.IsAbs(cleaned[i]) || strings.HasSuffix(cleaned[i], ".go") { - continue - } - // otherwise, it's an absolute path. Search GOPATH and GOROOT to find it. - if searchpaths == nil { - cmd := exec.Command("go", "env", "GOPATH", "GOROOT") - cmd.Env = cfg.Env - out, err := cmd.Output() - if err != nil { - searchpaths = []string{} - continue // suppress the error, it will show up again when running go list - } - lines := strings.Split(string(out), "\n") - if len(lines) != 3 || lines[0] == "" || lines[1] == "" || lines[2] != "" { - continue // suppress error - } - // first line is GOPATH - for _, path := range filepath.SplitList(lines[0]) { - searchpaths = append(searchpaths, filepath.Join(path, "src")) - } - // second line is GOROOT - searchpaths = append(searchpaths, filepath.Join(lines[1], "src")) - } - for _, sp := range searchpaths { - if strings.HasPrefix(cleaned[i], sp) { - cleaned[i] = strings.TrimPrefix(cleaned[i], sp) - cleaned[i] = strings.TrimLeft(cleaned[i], string(filepath.Separator)) - } - } - } - return cleaned -} - -// vendorlessPath returns the devendorized version of the import path ipath. -// For example, VendorlessPath("foo/bar/vendor/a/b") returns "a/b". -// Copied from golang.org/x/tools/imports/fix.go. -func vendorlessPath(ipath string) string { - // Devendorize for use in import statement. - if i := strings.LastIndex(ipath, "/vendor/"); i >= 0 { - return ipath[i+len("/vendor/"):] - } - if strings.HasPrefix(ipath, "vendor/") { - return ipath[len("vendor/"):] - } - return ipath -} - -// getDeps runs an initial go list to determine all the dependency packages. -func getDeps(cfg *Config, words ...string) (initial []*jsonPackage, deps []string, err error) { - buf, err := invokeGo(cfg, golistArgsFallback(cfg, words)...) - if err != nil { - return nil, nil, err - } - - depsSet := make(map[string]bool) - var testImports []string - - // Extract deps from the JSON. - for dec := json.NewDecoder(buf); dec.More(); { - p := new(jsonPackage) - if err := dec.Decode(p); err != nil { - return nil, nil, fmt.Errorf("JSON decoding failed: %v", err) - } - - initial = append(initial, p) - for _, dep := range p.Deps { - depsSet[dep] = true - } - if cfg.Tests { - // collect the additional imports of the test packages. - pkgTestImports := append(p.TestImports, p.XTestImports...) - for _, imp := range pkgTestImports { - if depsSet[imp] { - continue - } - depsSet[imp] = true - testImports = append(testImports, imp) - } - } - } - // Get the deps of the packages imported by tests. - if len(testImports) > 0 { - buf, err = invokeGo(cfg, golistArgsFallback(cfg, testImports)...) - if err != nil { - return nil, nil, err - } - // Extract deps from the JSON. - for dec := json.NewDecoder(buf); dec.More(); { - p := new(jsonPackage) - if err := dec.Decode(p); err != nil { - return nil, nil, fmt.Errorf("JSON decoding failed: %v", err) - } - for _, dep := range p.Deps { - depsSet[dep] = true - } - } - } - - for _, orig := range initial { - delete(depsSet, orig.ImportPath) - } - - deps = make([]string, 0, len(depsSet)) - for dep := range depsSet { - deps = append(deps, dep) - } - sort.Strings(deps) // ensure output is deterministic - return initial, deps, nil -} - -func golistArgsFallback(cfg *Config, words []string) []string { - fullargs := []string{"list", "-e", "-json"} - fullargs = append(fullargs, cfg.BuildFlags...) - fullargs = append(fullargs, "--") - fullargs = append(fullargs, words...) - return fullargs -} - -func runCgo(pkgdir, tmpdir string, env []string) (files, displayfiles []string, err error) { - // Use go/build to open cgo files and determine the cgo flags, etc, from them. - // This is tricky so it's best to avoid reimplementing as much as we can, and - // we plan to delete this support once Go 1.12 is released anyways. - // TODO(matloob): This isn't completely correct because we're using the Default - // context. Perhaps we should more accurately fill in the context. - bp, err := build.ImportDir(pkgdir, build.ImportMode(0)) - if err != nil { - return nil, nil, err - } - for _, ev := range env { - if v := strings.TrimPrefix(ev, "CGO_CPPFLAGS"); v != ev { - bp.CgoCPPFLAGS = append(bp.CgoCPPFLAGS, strings.Fields(v)...) - } else if v := strings.TrimPrefix(ev, "CGO_CFLAGS"); v != ev { - bp.CgoCFLAGS = append(bp.CgoCFLAGS, strings.Fields(v)...) - } else if v := strings.TrimPrefix(ev, "CGO_CXXFLAGS"); v != ev { - bp.CgoCXXFLAGS = append(bp.CgoCXXFLAGS, strings.Fields(v)...) - } else if v := strings.TrimPrefix(ev, "CGO_LDFLAGS"); v != ev { - bp.CgoLDFLAGS = append(bp.CgoLDFLAGS, strings.Fields(v)...) - } - } - return cgo.Run(bp, pkgdir, tmpdir, true) -} diff --git a/vendor/golang.org/x/tools/go/packages/golist_fallback_testmain.go b/vendor/golang.org/x/tools/go/packages/golist_fallback_testmain.go deleted file mode 100644 index 128e00e25a..0000000000 --- a/vendor/golang.org/x/tools/go/packages/golist_fallback_testmain.go +++ /dev/null @@ -1,318 +0,0 @@ -// Copyright 2018 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// This file is largely based on the Go 1.10-era cmd/go/internal/test/test.go -// testmain generation code. - -package packages - -import ( - "errors" - "fmt" - "go/ast" - "go/doc" - "go/parser" - "go/token" - "os" - "sort" - "strings" - "text/template" - "unicode" - "unicode/utf8" -) - -// TODO(matloob): Delete this file once Go 1.12 is released. - -// This file complements golist_fallback.go by providing -// support for generating testmains. - -func generateTestmain(out string, testPkg, xtestPkg *Package) (extraimports, extradeps []string, err error) { - testFuncs, err := loadTestFuncs(testPkg, xtestPkg) - if err != nil { - return nil, nil, err - } - extraimports = []string{"testing", "testing/internal/testdeps"} - if testFuncs.TestMain == nil { - extraimports = append(extraimports, "os") - } - // Transitive dependencies of ("testing", "testing/internal/testdeps"). - // os is part of the transitive closure so it and its transitive dependencies are - // included regardless of whether it's imported in the template below. - extradeps = []string{ - "errors", - "internal/cpu", - "unsafe", - "internal/bytealg", - "internal/race", - "runtime/internal/atomic", - "runtime/internal/sys", - "runtime", - "sync/atomic", - "sync", - "io", - "unicode", - "unicode/utf8", - "bytes", - "math", - "syscall", - "time", - "internal/poll", - "internal/syscall/unix", - "internal/testlog", - "os", - "math/bits", - "strconv", - "reflect", - "fmt", - "sort", - "strings", - "flag", - "runtime/debug", - "context", - "runtime/trace", - "testing", - "bufio", - "regexp/syntax", - "regexp", - "compress/flate", - "encoding/binary", - "hash", - "hash/crc32", - "compress/gzip", - "path/filepath", - "io/ioutil", - "text/tabwriter", - "runtime/pprof", - "testing/internal/testdeps", - } - return extraimports, extradeps, writeTestmain(out, testFuncs) -} - -// The following is adapted from the cmd/go testmain generation code. - -// isTestFunc tells whether fn has the type of a testing function. arg -// specifies the parameter type we look for: B, M or T. -func isTestFunc(fn *ast.FuncDecl, arg string) bool { - if fn.Type.Results != nil && len(fn.Type.Results.List) > 0 || - fn.Type.Params.List == nil || - len(fn.Type.Params.List) != 1 || - len(fn.Type.Params.List[0].Names) > 1 { - return false - } - ptr, ok := fn.Type.Params.List[0].Type.(*ast.StarExpr) - if !ok { - return false - } - // We can't easily check that the type is *testing.M - // because we don't know how testing has been imported, - // but at least check that it's *M or *something.M. - // Same applies for B and T. - if name, ok := ptr.X.(*ast.Ident); ok && name.Name == arg { - return true - } - if sel, ok := ptr.X.(*ast.SelectorExpr); ok && sel.Sel.Name == arg { - return true - } - return false -} - -// isTest tells whether name looks like a test (or benchmark, according to prefix). -// It is a Test (say) if there is a character after Test that is not a lower-case letter. -// We don't want TesticularCancer. -func isTest(name, prefix string) bool { - if !strings.HasPrefix(name, prefix) { - return false - } - if len(name) == len(prefix) { // "Test" is ok - return true - } - rune, _ := utf8.DecodeRuneInString(name[len(prefix):]) - return !unicode.IsLower(rune) -} - -// loadTestFuncs returns the testFuncs describing the tests that will be run. -func loadTestFuncs(ptest, pxtest *Package) (*testFuncs, error) { - t := &testFuncs{ - TestPackage: ptest, - XTestPackage: pxtest, - } - for _, file := range ptest.GoFiles { - if !strings.HasSuffix(file, "_test.go") { - continue - } - if err := t.load(file, "_test", &t.ImportTest, &t.NeedTest); err != nil { - return nil, err - } - } - if pxtest != nil { - for _, file := range pxtest.GoFiles { - if err := t.load(file, "_xtest", &t.ImportXtest, &t.NeedXtest); err != nil { - return nil, err - } - } - } - return t, nil -} - -// writeTestmain writes the _testmain.go file for t to the file named out. -func writeTestmain(out string, t *testFuncs) error { - f, err := os.Create(out) - if err != nil { - return err - } - defer f.Close() - - if err := testmainTmpl.Execute(f, t); err != nil { - return err - } - - return nil -} - -type testFuncs struct { - Tests []testFunc - Benchmarks []testFunc - Examples []testFunc - TestMain *testFunc - TestPackage *Package - XTestPackage *Package - ImportTest bool - NeedTest bool - ImportXtest bool - NeedXtest bool -} - -// Tested returns the name of the package being tested. -func (t *testFuncs) Tested() string { - return t.TestPackage.Name -} - -type testFunc struct { - Package string // imported package name (_test or _xtest) - Name string // function name - Output string // output, for examples - Unordered bool // output is allowed to be unordered. -} - -func (t *testFuncs) load(filename, pkg string, doImport, seen *bool) error { - var fset = token.NewFileSet() - - f, err := parser.ParseFile(fset, filename, nil, parser.ParseComments) - if err != nil { - return errors.New("failed to parse test file " + filename) - } - for _, d := range f.Decls { - n, ok := d.(*ast.FuncDecl) - if !ok { - continue - } - if n.Recv != nil { - continue - } - name := n.Name.String() - switch { - case name == "TestMain": - if isTestFunc(n, "T") { - t.Tests = append(t.Tests, testFunc{pkg, name, "", false}) - *doImport, *seen = true, true - continue - } - err := checkTestFunc(fset, n, "M") - if err != nil { - return err - } - if t.TestMain != nil { - return errors.New("multiple definitions of TestMain") - } - t.TestMain = &testFunc{pkg, name, "", false} - *doImport, *seen = true, true - case isTest(name, "Test"): - err := checkTestFunc(fset, n, "T") - if err != nil { - return err - } - t.Tests = append(t.Tests, testFunc{pkg, name, "", false}) - *doImport, *seen = true, true - case isTest(name, "Benchmark"): - err := checkTestFunc(fset, n, "B") - if err != nil { - return err - } - t.Benchmarks = append(t.Benchmarks, testFunc{pkg, name, "", false}) - *doImport, *seen = true, true - } - } - ex := doc.Examples(f) - sort.Slice(ex, func(i, j int) bool { return ex[i].Order < ex[j].Order }) - for _, e := range ex { - *doImport = true // import test file whether executed or not - if e.Output == "" && !e.EmptyOutput { - // Don't run examples with no output. - continue - } - t.Examples = append(t.Examples, testFunc{pkg, "Example" + e.Name, e.Output, e.Unordered}) - *seen = true - } - return nil -} - -func checkTestFunc(fset *token.FileSet, fn *ast.FuncDecl, arg string) error { - if !isTestFunc(fn, arg) { - name := fn.Name.String() - pos := fset.Position(fn.Pos()) - return fmt.Errorf("%s: wrong signature for %s, must be: func %s(%s *testing.%s)", pos, name, name, strings.ToLower(arg), arg) - } - return nil -} - -var testmainTmpl = template.Must(template.New("main").Parse(` -package main - -import ( -{{if not .TestMain}} - "os" -{{end}} - "testing" - "testing/internal/testdeps" - -{{if .ImportTest}} - {{if .NeedTest}}_test{{else}}_{{end}} {{.TestPackage.PkgPath | printf "%q"}} -{{end}} -{{if .ImportXtest}} - {{if .NeedXtest}}_xtest{{else}}_{{end}} {{.XTestPackage.PkgPath | printf "%q"}} -{{end}} -) - -var tests = []testing.InternalTest{ -{{range .Tests}} - {"{{.Name}}", {{.Package}}.{{.Name}}}, -{{end}} -} - -var benchmarks = []testing.InternalBenchmark{ -{{range .Benchmarks}} - {"{{.Name}}", {{.Package}}.{{.Name}}}, -{{end}} -} - -var examples = []testing.InternalExample{ -{{range .Examples}} - {"{{.Name}}", {{.Package}}.{{.Name}}, {{.Output | printf "%q"}}, {{.Unordered}}}, -{{end}} -} - -func init() { - testdeps.ImportPath = {{.TestPackage.PkgPath | printf "%q"}} -} - -func main() { - m := testing.MainStart(testdeps.TestDeps{}, tests, benchmarks, examples) -{{with .TestMain}} - {{.Package}}.{{.Name}}(m) -{{else}} - os.Exit(m.Run()) -{{end}} -} - -`)) diff --git a/vendor/golang.org/x/tools/go/packages/golist_overlay.go b/vendor/golang.org/x/tools/go/packages/golist_overlay.go index 71ffcd9d55..33a0a28f2c 100644 --- a/vendor/golang.org/x/tools/go/packages/golist_overlay.go +++ b/vendor/golang.org/x/tools/go/packages/golist_overlay.go @@ -46,7 +46,9 @@ outer: fileExists = true } } - if dirContains { + // The overlay could have included an entirely new package. + isNewPackage := extractPackage(pkg, path, contents) + if dirContains || isNewPackage { if !fileExists { pkg.GoFiles = append(pkg.GoFiles, path) // TODO(matloob): should the file just be added to GoFiles? pkg.CompiledGoFiles = append(pkg.CompiledGoFiles, path) @@ -102,3 +104,35 @@ func extractImports(filename string, contents []byte) ([]string, error) { } return res, nil } + +// extractPackage attempts to extract a package defined in an overlay. +// +// If the package has errors and has no Name, GoFiles, or Imports, +// then it's possible that it doesn't yet exist on disk. +func extractPackage(pkg *Package, filename string, contents []byte) bool { + // TODO(rstambler): Check the message of the actual error? + // It differs between $GOPATH and module mode. + if len(pkg.Errors) != 1 { + return false + } + if pkg.Name != "" || pkg.ExportFile != "" { + return false + } + if len(pkg.GoFiles) > 0 || len(pkg.CompiledGoFiles) > 0 || len(pkg.OtherFiles) > 0 { + return false + } + if len(pkg.Imports) > 0 { + return false + } + f, err := parser.ParseFile(token.NewFileSet(), filename, contents, parser.PackageClauseOnly) // TODO(matloob): reuse fileset? + if err != nil { + return false + } + // TODO(rstambler): This doesn't work for main packages. + if filepath.Base(pkg.PkgPath) != f.Name.Name { + return false + } + pkg.Name = f.Name.Name + pkg.Errors = nil + return true +} diff --git a/vendor/golang.org/x/tools/go/packages/packages.go b/vendor/golang.org/x/tools/go/packages/packages.go index f4aac56e97..eedd43bb6b 100644 --- a/vendor/golang.org/x/tools/go/packages/packages.go +++ b/vendor/golang.org/x/tools/go/packages/packages.go @@ -19,6 +19,7 @@ import ( "log" "os" "path/filepath" + "strings" "sync" "golang.org/x/tools/go/gcexportdata" @@ -29,32 +30,78 @@ import ( // but may be slower. Load may return more information than requested. type LoadMode int +const ( + // The following constants are used to specify which fields of the Package + // should be filled when loading is done. As a special case to provide + // backwards compatibility, a LoadMode of 0 is equivalent to LoadFiles. + // For all other LoadModes, the bits below specify which fields will be filled + // in the result packages. + // WARNING: This part of the go/packages API is EXPERIMENTAL. It might + // be changed or removed up until April 15 2019. After that date it will + // be frozen. + // TODO(matloob): Remove this comment on April 15. + + // ID and Errors (if present) will always be filled. + + // NeedName adds Name and PkgPath. + NeedName LoadMode = 1 << iota + + // NeedFiles adds GoFiles and OtherFiles. + NeedFiles + + // NeedCompiledGoFiles adds CompiledGoFiles. + NeedCompiledGoFiles + + // NeedImports adds Imports. If NeedDeps is not set, the Imports field will contain + // "placeholder" Packages with only the ID set. + NeedImports + + // NeedDeps adds the fields requested by the LoadMode in the packages in Imports. If NeedImports + // is not set NeedDeps has no effect. + NeedDeps + + // NeedExportsFile adds ExportsFile. + NeedExportsFile + + // NeedTypes adds Types, Fset, and IllTyped. + NeedTypes + + // NeedSyntax adds Syntax. + NeedSyntax + + // NeedTypesInfo adds TypesInfo. + NeedTypesInfo + + // NeedTypesSizes adds TypesSizes. + NeedTypesSizes +) + const ( // LoadFiles finds the packages and computes their source file lists. - // Package fields: ID, Name, Errors, GoFiles, and OtherFiles. - LoadFiles LoadMode = iota + // Package fields: ID, Name, Errors, GoFiles, CompiledGoFiles, and OtherFiles. + LoadFiles = NeedName | NeedFiles | NeedCompiledGoFiles // LoadImports adds import information for each package // and its dependencies. // Package fields added: Imports. - LoadImports + LoadImports = LoadFiles | NeedImports | NeedDeps // LoadTypes adds type information for package-level // declarations in the packages matching the patterns. - // Package fields added: Types, Fset, and IllTyped. + // Package fields added: Types, TypesSizes, Fset, and IllTyped. // This mode uses type information provided by the build system when // possible, and may fill in the ExportFile field. - LoadTypes + LoadTypes = LoadImports | NeedTypes | NeedTypesSizes // LoadSyntax adds typed syntax trees for the packages matching the patterns. // Package fields added: Syntax, and TypesInfo, for direct pattern matches only. - LoadSyntax + LoadSyntax = LoadTypes | NeedSyntax | NeedTypesInfo // LoadAllSyntax adds typed syntax trees for the packages matching the patterns // and all dependencies. // Package fields added: Types, Fset, IllTyped, Syntax, and TypesInfo, // for all packages in the import graph. - LoadAllSyntax + LoadAllSyntax = LoadSyntax ) // A Config specifies details about how packages should be loaded. @@ -90,7 +137,7 @@ type Config struct { BuildFlags []string // Fset provides source position information for syntax trees and types. - // If Fset is nil, the loader will create a new FileSet. + // If Fset is nil, Load will use a new fileset, but preserve Fset's value. Fset *token.FileSet // ParseFile is called to read and parse each file @@ -371,15 +418,34 @@ type loaderPackage struct { type loader struct { pkgs map[string]*loaderPackage Config - sizes types.Sizes - exportMu sync.Mutex // enforces mutual exclusion of exportdata operations + sizes types.Sizes + parseCache map[string]*parseValue + parseCacheMu sync.Mutex + exportMu sync.Mutex // enforces mutual exclusion of exportdata operations + + // TODO(matloob): Add an implied mode here and use that instead of mode. + // Implied mode would contain all the fields we need the data for so we can + // get the actually requested fields. We'll zero them out before returning + // packages to the user. This will make it easier for us to get the conditions + // where we need certain modes right. +} + +type parseValue struct { + f *ast.File + err error + ready chan struct{} } func newLoader(cfg *Config) *loader { - ld := &loader{} + ld := &loader{ + parseCache: map[string]*parseValue{}, + } if cfg != nil { ld.Config = *cfg } + if ld.Config.Mode == 0 { + ld.Config.Mode = LoadFiles // Preserve zero behavior of Mode for backwards compatibility. + } if ld.Config.Env == nil { ld.Config.Env = os.Environ() } @@ -392,7 +458,7 @@ func newLoader(cfg *Config) *loader { } } - if ld.Mode >= LoadTypes { + if ld.Mode&NeedTypes != 0 { if ld.Fset == nil { ld.Fset = token.NewFileSet() } @@ -401,12 +467,8 @@ func newLoader(cfg *Config) *loader { // because we load source if export data is missing. if ld.ParseFile == nil { ld.ParseFile = func(fset *token.FileSet, filename string, src []byte) (*ast.File, error) { - var isrc interface{} - if src != nil { - isrc = src - } const mode = parser.AllErrors | parser.ParseComments - return parser.ParseFile(fset, filename, isrc, mode) + return parser.ParseFile(fset, filename, src, mode) } } } @@ -429,11 +491,9 @@ func (ld *loader) refine(roots []string, list ...*Package) ([]*Package, error) { rootIndex = i } lpkg := &loaderPackage{ - Package: pkg, - needtypes: ld.Mode >= LoadAllSyntax || - ld.Mode >= LoadTypes && rootIndex >= 0, - needsrc: ld.Mode >= LoadAllSyntax || - ld.Mode >= LoadSyntax && rootIndex >= 0 || + Package: pkg, + needtypes: (ld.Mode&(NeedTypes|NeedTypesInfo) != 0 && rootIndex < 0) || rootIndex >= 0, + needsrc: (ld.Mode&(NeedSyntax|NeedTypesInfo) != 0 && rootIndex < 0) || rootIndex >= 0 || len(ld.Overlay) > 0 || // Overlays can invalidate export data. TODO(matloob): make this check fine-grained based on dependencies on overlaid files pkg.ExportFile == "" && pkg.PkgPath != "unsafe", } @@ -506,14 +566,17 @@ func (ld *loader) refine(roots []string, list ...*Package) ([]*Package, error) { if lpkg.needsrc { srcPkgs = append(srcPkgs, lpkg) } + if ld.Mode&NeedTypesSizes != 0 { + lpkg.TypesSizes = ld.sizes + } stack = stack[:len(stack)-1] // pop lpkg.color = black return lpkg.needsrc } - if ld.Mode < LoadImports { - //we do this to drop the stub import packages that we are not even going to try to resolve + if ld.Mode&(NeedImports|NeedDeps) == 0 { + // We do this to drop the stub import packages that we are not even going to try to resolve. for _, lpkg := range initial { lpkg.Imports = nil } @@ -523,17 +586,19 @@ func (ld *loader) refine(roots []string, list ...*Package) ([]*Package, error) { visit(lpkg) } } - for _, lpkg := range srcPkgs { - // Complete type information is required for the - // immediate dependencies of each source package. - for _, ipkg := range lpkg.Imports { - imp := ld.pkgs[ipkg.ID] - imp.needtypes = true + if ld.Mode&NeedDeps != 0 { // TODO(matloob): This is only the case if NeedTypes is also set, right? + for _, lpkg := range srcPkgs { + // Complete type information is required for the + // immediate dependencies of each source package. + for _, ipkg := range lpkg.Imports { + imp := ld.pkgs[ipkg.ID] + imp.needtypes = true + } } } // Load type data if needed, starting at // the initial packages (roots of the import DAG). - if ld.Mode >= LoadTypes { + if ld.Mode&NeedTypes != 0 { var wg sync.WaitGroup for _, lpkg := range initial { wg.Add(1) @@ -546,16 +611,61 @@ func (ld *loader) refine(roots []string, list ...*Package) ([]*Package, error) { } result := make([]*Package, len(initial)) + importPlaceholders := make(map[string]*Package) for i, lpkg := range initial { result[i] = lpkg.Package } + for i := range ld.pkgs { + // Clear all unrequested fields, for extra de-Hyrum-ization. + if ld.Mode&NeedName == 0 { + ld.pkgs[i].Name = "" + ld.pkgs[i].PkgPath = "" + } + if ld.Mode&NeedFiles == 0 { + ld.pkgs[i].GoFiles = nil + ld.pkgs[i].OtherFiles = nil + } + if ld.Mode&NeedCompiledGoFiles == 0 { + ld.pkgs[i].CompiledGoFiles = nil + } + if ld.Mode&NeedImports == 0 { + ld.pkgs[i].Imports = nil + } + if ld.Mode&NeedExportsFile == 0 { + ld.pkgs[i].ExportFile = "" + } + if ld.Mode&NeedTypes == 0 { + ld.pkgs[i].Types = nil + ld.pkgs[i].Fset = nil + ld.pkgs[i].IllTyped = false + } + if ld.Mode&NeedSyntax == 0 { + ld.pkgs[i].Syntax = nil + } + if ld.Mode&NeedTypesInfo == 0 { + ld.pkgs[i].TypesInfo = nil + } + if ld.Mode&NeedTypesSizes == 0 { + ld.pkgs[i].TypesSizes = nil + } + if ld.Mode&NeedDeps == 0 { + for j, pkg := range ld.pkgs[i].Imports { + ph, ok := importPlaceholders[pkg.ID] + if !ok { + ph = &Package{ID: pkg.ID} + importPlaceholders[pkg.ID] = ph + } + ld.pkgs[i].Imports[j] = ph + } + } + } return result, nil } // loadRecursive loads the specified package and its dependencies, // recursively, in parallel, in topological order. // It is atomic and idempotent. -// Precondition: ld.Mode >= LoadTypes. +// Precondition: ld.Mode&NeedTypes. func (ld *loader) loadRecursive(lpkg *loaderPackage) { lpkg.loadOnce.Do(func() { // Load the direct dependencies, in parallel. @@ -707,7 +817,7 @@ func (ld *loader) loadPackage(lpkg *loaderPackage) { // Type-check bodies of functions only in non-initial packages. // Example: for import graph A->B->C and initial packages {A,C}, // we can ignore function bodies in B. - IgnoreFuncBodies: ld.Mode < LoadAllSyntax && !lpkg.initial, + IgnoreFuncBodies: (ld.Mode&(NeedDeps|NeedTypesInfo) == 0) && !lpkg.initial, Error: appendError, Sizes: ld.sizes, @@ -760,6 +870,42 @@ func (f importerFunc) Import(path string) (*types.Package, error) { return f(pat // the number of parallel I/O calls per process. var ioLimit = make(chan bool, 20) +func (ld *loader) parseFile(filename string) (*ast.File, error) { + ld.parseCacheMu.Lock() + v, ok := ld.parseCache[filename] + if ok { + // cache hit + ld.parseCacheMu.Unlock() + <-v.ready + } else { + // cache miss + v = &parseValue{ready: make(chan struct{})} + ld.parseCache[filename] = v + ld.parseCacheMu.Unlock() + + var src []byte + for f, contents := range ld.Config.Overlay { + if sameFile(f, filename) { + src = contents + } + } + var err error + if src == nil { + ioLimit <- true // wait + src, err = ioutil.ReadFile(filename) + <-ioLimit // signal + } + if err != nil { + v.err = err + } else { + v.f, v.err = ld.ParseFile(ld.Fset, filename, src) + } + + close(v.ready) + } + return v.f, v.err +} + // parseFiles reads and parses the Go source files and returns the ASTs // of the ones that could be at least partially parsed, along with a // list of I/O and parse errors encountered. @@ -780,24 +926,7 @@ func (ld *loader) parseFiles(filenames []string) ([]*ast.File, []error) { } wg.Add(1) go func(i int, filename string) { - ioLimit <- true // wait - // ParseFile may return both an AST and an error. - var src []byte - for f, contents := range ld.Config.Overlay { - if sameFile(f, filename) { - src = contents - } - } - var err error - if src == nil { - src, err = ioutil.ReadFile(filename) - } - if err != nil { - parsed[i], errors[i] = nil, err - } else { - parsed[i], errors[i] = ld.ParseFile(ld.Fset, filename, src) - } - <-ioLimit // signal + parsed[i], errors[i] = ld.parseFile(filename) wg.Done() }(i, file) } @@ -838,7 +967,7 @@ func sameFile(x, y string) bool { // overlay case implies x==y.) return true } - if filepath.Base(x) == filepath.Base(y) { // (optimisation) + if strings.EqualFold(filepath.Base(x), filepath.Base(y)) { // (optimisation) if xi, err := os.Stat(x); err == nil { if yi, err := os.Stat(y); err == nil { return os.SameFile(xi, yi) @@ -951,5 +1080,5 @@ func (ld *loader) loadFromExportData(lpkg *loaderPackage) (*types.Package, error } func usesExportData(cfg *Config) bool { - return LoadTypes <= cfg.Mode && cfg.Mode < LoadAllSyntax + return cfg.Mode&NeedExportsFile != 0 || cfg.Mode&NeedTypes != 0 && cfg.Mode&NeedTypesInfo == 0 } diff --git a/vendor/golang.org/x/tools/imports/fix.go b/vendor/golang.org/x/tools/imports/fix.go index f18c413514..777d28ccdb 100644 --- a/vendor/golang.org/x/tools/imports/fix.go +++ b/vendor/golang.org/x/tools/imports/fix.go @@ -23,6 +23,8 @@ import ( "strings" "sync" "time" + "unicode" + "unicode/utf8" "golang.org/x/tools/go/ast/astutil" "golang.org/x/tools/go/packages" @@ -289,7 +291,7 @@ func (p *pass) importIdentifier(imp *importInfo) string { if known != nil && known.name != "" { return known.name } - return importPathToNameBasic(imp.importPath, p.srcDir) + return importPathToAssumedName(imp.importPath) } // load reads in everything necessary to run a pass, and reports whether the @@ -389,7 +391,7 @@ func (p *pass) fix() bool { } path := strings.Trim(imp.Path.Value, `""`) ident := p.importIdentifier(&importInfo{importPath: path}) - if ident != importPathToNameBasic(path, p.srcDir) { + if ident != importPathToAssumedName(path) { imp.Name = &ast.Ident{Name: ident, NamePos: imp.Pos()} } } @@ -648,7 +650,7 @@ func (r *goPackagesResolver) loadPackageNames(importPaths []string, srcDir strin if _, ok := names[path]; ok { continue } - names[path] = importPathToNameBasic(path, srcDir) + names[path] = importPathToAssumedName(path) } return names, nil @@ -657,7 +659,7 @@ func (r *goPackagesResolver) loadPackageNames(importPaths []string, srcDir strin func (r *goPackagesResolver) scan(refs references) ([]*pkg, error) { var loadQueries []string for pkgName := range refs { - loadQueries = append(loadQueries, "name="+pkgName) + loadQueries = append(loadQueries, "iamashamedtousethedisabledqueryname="+pkgName) } sort.Strings(loadQueries) cfg := r.env.newPackagesConfig(packages.LoadFiles) @@ -741,18 +743,36 @@ func addExternalCandidates(pass *pass, refs references, filename string) error { return firstErr } -// importPathToNameBasic assumes the package name is the base of import path, -// except that if the path ends in foo/vN, it assumes the package name is foo. -func importPathToNameBasic(importPath, srcDir string) (packageName string) { +// notIdentifier reports whether ch is an invalid identifier character. +func notIdentifier(ch rune) bool { + return !('a' <= ch && ch <= 'z' || 'A' <= ch && ch <= 'Z' || + '0' <= ch && ch <= '9' || + ch == '_' || + ch >= utf8.RuneSelf && (unicode.IsLetter(ch) || unicode.IsDigit(ch))) +} + +// importPathToAssumedName returns the assumed package name of an import path. +// It does this using only string parsing of the import path. +// It picks the last element of the path that does not look like a major +// version, and then picks the valid identifier off the start of that element. +// It is used to determine if a local rename should be added to an import for +// clarity. +// This function could be moved to a standard package and exported if we want +// for use in other tools. +func importPathToAssumedName(importPath string) string { base := path.Base(importPath) if strings.HasPrefix(base, "v") { if _, err := strconv.Atoi(base[1:]); err == nil { dir := path.Dir(importPath) if dir != "." { - return path.Base(dir) + base = path.Base(dir) } } } + base = strings.TrimPrefix(base, "go-") + if i := strings.IndexFunc(base, notIdentifier); i >= 0 { + base = base[:i] + } return base } @@ -1023,7 +1043,7 @@ func findImport(ctx context.Context, env *fixEnv, dirScan []*pkg, pkgName string // Find candidate packages, looking only at their directory names first. var candidates []pkgDistance for _, pkg := range dirScan { - if pkgIsCandidate(filename, pkgName, pkg) { + if pkg.dir != pkgDir && pkgIsCandidate(filename, pkgName, pkg) { candidates = append(candidates, pkgDistance{ pkg: pkg, distance: distance(pkgDir, pkg.dir), diff --git a/vendor/golang.org/x/tools/imports/mkindex.go b/vendor/golang.org/x/tools/imports/mkindex.go deleted file mode 100644 index 755e2394f2..0000000000 --- a/vendor/golang.org/x/tools/imports/mkindex.go +++ /dev/null @@ -1,173 +0,0 @@ -// +build ignore - -// Copyright 2013 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// Command mkindex creates the file "pkgindex.go" containing an index of the Go -// standard library. The file is intended to be built as part of the imports -// package, so that the package may be used in environments where a GOROOT is -// not available (such as App Engine). -package main - -import ( - "bytes" - "fmt" - "go/ast" - "go/build" - "go/format" - "go/parser" - "go/token" - "io/ioutil" - "log" - "os" - "path" - "path/filepath" - "strings" -) - -var ( - pkgIndex = make(map[string][]pkg) - exports = make(map[string]map[string]bool) -) - -func main() { - // Don't use GOPATH. - ctx := build.Default - ctx.GOPATH = "" - - // Populate pkgIndex global from GOROOT. - for _, path := range ctx.SrcDirs() { - f, err := os.Open(path) - if err != nil { - log.Print(err) - continue - } - children, err := f.Readdir(-1) - f.Close() - if err != nil { - log.Print(err) - continue - } - for _, child := range children { - if child.IsDir() { - loadPkg(path, child.Name()) - } - } - } - // Populate exports global. - for _, ps := range pkgIndex { - for _, p := range ps { - e := loadExports(p.dir) - if e != nil { - exports[p.dir] = e - } - } - } - - // Construct source file. - var buf bytes.Buffer - fmt.Fprint(&buf, pkgIndexHead) - fmt.Fprintf(&buf, "var pkgIndexMaster = %#v\n", pkgIndex) - fmt.Fprintf(&buf, "var exportsMaster = %#v\n", exports) - src := buf.Bytes() - - // Replace main.pkg type name with pkg. - src = bytes.Replace(src, []byte("main.pkg"), []byte("pkg"), -1) - // Replace actual GOROOT with "/go". - src = bytes.Replace(src, []byte(ctx.GOROOT), []byte("/go"), -1) - // Add some line wrapping. - src = bytes.Replace(src, []byte("}, "), []byte("},\n"), -1) - src = bytes.Replace(src, []byte("true, "), []byte("true,\n"), -1) - - var err error - src, err = format.Source(src) - if err != nil { - log.Fatal(err) - } - - // Write out source file. - err = ioutil.WriteFile("pkgindex.go", src, 0644) - if err != nil { - log.Fatal(err) - } -} - -const pkgIndexHead = `package imports - -func init() { - pkgIndexOnce.Do(func() { - pkgIndex.m = pkgIndexMaster - }) - loadExports = func(dir string) map[string]bool { - return exportsMaster[dir] - } -} -` - -type pkg struct { - importpath string // full pkg import path, e.g. "net/http" - dir string // absolute file path to pkg directory e.g. "/usr/lib/go/src/fmt" -} - -var fset = token.NewFileSet() - -func loadPkg(root, importpath string) { - shortName := path.Base(importpath) - if shortName == "testdata" { - return - } - - dir := filepath.Join(root, importpath) - pkgIndex[shortName] = append(pkgIndex[shortName], pkg{ - importpath: importpath, - dir: dir, - }) - - pkgDir, err := os.Open(dir) - if err != nil { - return - } - children, err := pkgDir.Readdir(-1) - pkgDir.Close() - if err != nil { - return - } - for _, child := range children { - name := child.Name() - if name == "" { - continue - } - if c := name[0]; c == '.' || ('0' <= c && c <= '9') { - continue - } - if child.IsDir() { - loadPkg(root, filepath.Join(importpath, name)) - } - } -} - -func loadExports(dir string) map[string]bool { - exports := make(map[string]bool) - buildPkg, err := build.ImportDir(dir, 0) - if err != nil { - if strings.Contains(err.Error(), "no buildable Go source files in") { - return nil - } - log.Printf("could not import %q: %v", dir, err) - return nil - } - for _, file := range buildPkg.GoFiles { - f, err := parser.ParseFile(fset, filepath.Join(dir, file), nil, 0) - if err != nil { - log.Printf("could not parse %q: %v", file, err) - continue - } - for name := range f.Scope.Objects { - if ast.IsExported(name) { - exports[name] = true - } - } - } - return exports -} diff --git a/vendor/golang.org/x/tools/imports/mkstdlib.go b/vendor/golang.org/x/tools/imports/mkstdlib.go deleted file mode 100644 index 5e53378fc8..0000000000 --- a/vendor/golang.org/x/tools/imports/mkstdlib.go +++ /dev/null @@ -1,111 +0,0 @@ -// +build ignore - -// mkstdlib generates the zstdlib.go file, containing the Go standard -// library API symbols. It's baked into the binary to avoid scanning -// GOPATH in the common case. -package main - -import ( - "bufio" - "bytes" - "fmt" - "go/format" - "io" - "io/ioutil" - "log" - "os" - "path/filepath" - "regexp" - "runtime" - "sort" - "strings" -) - -func mustOpen(name string) io.Reader { - f, err := os.Open(name) - if err != nil { - log.Fatal(err) - } - return f -} - -func api(base string) string { - return filepath.Join(runtime.GOROOT(), "api", base) -} - -var sym = regexp.MustCompile(`^pkg (\S+).*?, (?:var|func|type|const) ([A-Z]\w*)`) - -var unsafeSyms = map[string]bool{"Alignof": true, "ArbitraryType": true, "Offsetof": true, "Pointer": true, "Sizeof": true} - -func main() { - var buf bytes.Buffer - outf := func(format string, args ...interface{}) { - fmt.Fprintf(&buf, format, args...) - } - outf("// Code generated by mkstdlib.go. DO NOT EDIT.\n\n") - outf("package imports\n") - outf("var stdlib = map[string]map[string]bool{\n") - f := io.MultiReader( - mustOpen(api("go1.txt")), - mustOpen(api("go1.1.txt")), - mustOpen(api("go1.2.txt")), - mustOpen(api("go1.3.txt")), - mustOpen(api("go1.4.txt")), - mustOpen(api("go1.5.txt")), - mustOpen(api("go1.6.txt")), - mustOpen(api("go1.7.txt")), - mustOpen(api("go1.8.txt")), - mustOpen(api("go1.9.txt")), - mustOpen(api("go1.10.txt")), - mustOpen(api("go1.11.txt")), - ) - sc := bufio.NewScanner(f) - - pkgs := map[string]map[string]bool{ - "unsafe": unsafeSyms, - } - paths := []string{"unsafe"} - - for sc.Scan() { - l := sc.Text() - has := func(v string) bool { return strings.Contains(l, v) } - if has("struct, ") || has("interface, ") || has(", method (") { - continue - } - if m := sym.FindStringSubmatch(l); m != nil { - path, sym := m[1], m[2] - - if _, ok := pkgs[path]; !ok { - pkgs[path] = map[string]bool{} - paths = append(paths, path) - } - pkgs[path][sym] = true - } - } - if err := sc.Err(); err != nil { - log.Fatal(err) - } - sort.Strings(paths) - for _, path := range paths { - outf("\t%q: map[string]bool{\n", path) - pkg := pkgs[path] - var syms []string - for sym := range pkg { - syms = append(syms, sym) - } - sort.Strings(syms) - for _, sym := range syms { - outf("\t\t%q: true,\n", sym) - } - outf("},\n") - } - outf("}\n") - fmtbuf, err := format.Source(buf.Bytes()) - if err != nil { - log.Fatal(err) - } - err = ioutil.WriteFile("zstdlib.go", fmtbuf, 0666) - if err != nil { - log.Fatal(err) - } -} diff --git a/vendor/golang.org/x/tools/imports/mod.go b/vendor/golang.org/x/tools/imports/mod.go index ec769145cc..018c43ce84 100644 --- a/vendor/golang.org/x/tools/imports/mod.go +++ b/vendor/golang.org/x/tools/imports/mod.go @@ -24,6 +24,7 @@ import ( type moduleResolver struct { env *fixEnv + initialized bool main *moduleJSON modsByModPath []*moduleJSON // All modules, ordered by # of path components in module Path... modsByDir []*moduleJSON // ...or Dir. @@ -48,7 +49,7 @@ type moduleErrorJSON struct { } func (r *moduleResolver) init() error { - if r.main != nil { + if r.initialized { return nil } stdout, err := r.env.invokeGo("list", "-m", "-json", "...") @@ -87,6 +88,7 @@ func (r *moduleResolver) init() error { return count(j) < count(i) // descending order }) + r.initialized = true return nil } @@ -202,7 +204,9 @@ func (r *moduleResolver) scan(_ references) ([]*pkg, error) { // Walk GOROOT, GOPATH/pkg/mod, and the main module. roots := []gopathwalk.Root{ {filepath.Join(r.env.GOROOT, "/src"), gopathwalk.RootGOROOT}, - {r.main.Dir, gopathwalk.RootCurrentModule}, + } + if r.main != nil { + roots = append(roots, gopathwalk.Root{r.main.Dir, gopathwalk.RootCurrentModule}) } for _, p := range filepath.SplitList(r.env.GOPATH) { roots = append(roots, gopathwalk.Root{filepath.Join(p, "/pkg/mod"), gopathwalk.RootModuleCache}) diff --git a/vendor/golang.org/x/tools/imports/zstdlib.go b/vendor/golang.org/x/tools/imports/zstdlib.go index ca4f0b2f21..d81b8c5307 100644 --- a/vendor/golang.org/x/tools/imports/zstdlib.go +++ b/vendor/golang.org/x/tools/imports/zstdlib.go @@ -112,6 +112,7 @@ var stdlib = map[string]map[string]bool{ "Reader": true, "Repeat": true, "Replace": true, + "ReplaceAll": true, "Runes": true, "Split": true, "SplitAfter": true, @@ -441,6 +442,9 @@ var stdlib = map[string]map[string]bool{ "RequireAnyClientCert": true, "Server": true, "SignatureScheme": true, + "TLS_AES_128_GCM_SHA256": true, + "TLS_AES_256_GCM_SHA384": true, + "TLS_CHACHA20_POLY1305_SHA256": true, "TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA": true, "TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256": true, "TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256": true, @@ -469,6 +473,7 @@ var stdlib = map[string]map[string]bool{ "VersionTLS10": true, "VersionTLS11": true, "VersionTLS12": true, + "VersionTLS13": true, "X25519": true, "X509KeyPair": true, }, @@ -1835,6 +1840,7 @@ var stdlib = map[string]map[string]bool{ "R_PPC_UADDR32": true, "R_RISCV": true, "R_RISCV_32": true, + "R_RISCV_32_PCREL": true, "R_RISCV_64": true, "R_RISCV_ADD16": true, "R_RISCV_ADD32": true, @@ -2260,6 +2266,7 @@ var stdlib = map[string]map[string]bool{ "IMAGE_FILE_MACHINE_AMD64": true, "IMAGE_FILE_MACHINE_ARM": true, "IMAGE_FILE_MACHINE_ARM64": true, + "IMAGE_FILE_MACHINE_ARMNT": true, "IMAGE_FILE_MACHINE_EBC": true, "IMAGE_FILE_MACHINE_I386": true, "IMAGE_FILE_MACHINE_IA64": true, @@ -2753,6 +2760,7 @@ var stdlib = map[string]map[string]bool{ "New": true, "Note": true, "Package": true, + "PreserveAST": true, "Synopsis": true, "ToHTML": true, "ToText": true, @@ -2764,9 +2772,10 @@ var stdlib = map[string]map[string]bool{ "Source": true, }, "go/importer": map[string]bool{ - "Default": true, - "For": true, - "Lookup": true, + "Default": true, + "For": true, + "ForCompiler": true, + "Lookup": true, }, "go/parser": map[string]bool{ "AllErrors": true, @@ -3296,6 +3305,7 @@ var stdlib = map[string]map[string]bool{ "SeekEnd": true, "SeekStart": true, "Seeker": true, + "StringWriter": true, "TeeReader": true, "WriteCloser": true, "WriteSeeker": true, @@ -3498,6 +3508,12 @@ var stdlib = map[string]map[string]bool{ "Word": true, }, "math/bits": map[string]bool{ + "Add": true, + "Add32": true, + "Add64": true, + "Div": true, + "Div32": true, + "Div64": true, "LeadingZeros": true, "LeadingZeros16": true, "LeadingZeros32": true, @@ -3508,6 +3524,9 @@ var stdlib = map[string]map[string]bool{ "Len32": true, "Len64": true, "Len8": true, + "Mul": true, + "Mul32": true, + "Mul64": true, "OnesCount": true, "OnesCount16": true, "OnesCount32": true, @@ -3527,6 +3546,9 @@ var stdlib = map[string]map[string]bool{ "RotateLeft32": true, "RotateLeft64": true, "RotateLeft8": true, + "Sub": true, + "Sub32": true, + "Sub64": true, "TrailingZeros": true, "TrailingZeros16": true, "TrailingZeros32": true, @@ -3870,6 +3892,7 @@ var stdlib = map[string]map[string]bool{ "StatusTeapot": true, "StatusTemporaryRedirect": true, "StatusText": true, + "StatusTooEarly": true, "StatusTooManyRequests": true, "StatusUnauthorized": true, "StatusUnavailableForLegalReasons": true, @@ -4140,6 +4163,7 @@ var stdlib = map[string]map[string]bool{ "Truncate": true, "Unsetenv": true, "UserCacheDir": true, + "UserHomeDir": true, }, "os/exec": map[string]bool{ "Cmd": true, @@ -4244,6 +4268,7 @@ var stdlib = map[string]map[string]bool{ "MakeMapWithSize": true, "MakeSlice": true, "Map": true, + "MapIter": true, "MapOf": true, "Method": true, "New": true, @@ -4419,9 +4444,12 @@ var stdlib = map[string]map[string]bool{ "Version": true, }, "runtime/debug": map[string]bool{ + "BuildInfo": true, "FreeOSMemory": true, "GCStats": true, + "Module": true, "PrintStack": true, + "ReadBuildInfo": true, "ReadGCStats": true, "SetGCPercent": true, "SetMaxStack": true, @@ -4547,6 +4575,7 @@ var stdlib = map[string]map[string]bool{ "Reader": true, "Repeat": true, "Replace": true, + "ReplaceAll": true, "Replacer": true, "Split": true, "SplitAfter": true, @@ -6105,6 +6134,7 @@ var stdlib = map[string]map[string]bool{ "FreeLibrary": true, "Fsid": true, "Fstat": true, + "Fstatat": true, "Fstatfs": true, "Fstore_t": true, "Fsync": true, @@ -9362,6 +9392,7 @@ var stdlib = map[string]map[string]bool{ "Syscall": true, "Syscall12": true, "Syscall15": true, + "Syscall18": true, "Syscall6": true, "Syscall9": true, "Sysctl": true, @@ -9630,6 +9661,7 @@ var stdlib = map[string]map[string]bool{ "TransmitFile": true, "TransmitFileBuffers": true, "Truncate": true, + "UNIX_PATH_MAX": true, "USAGE_MATCH_TYPE_AND": true, "USAGE_MATCH_TYPE_OR": true, "UTF16FromString": true, @@ -9751,6 +9783,29 @@ var stdlib = map[string]map[string]bool{ "XP1_UNI_RECV": true, "XP1_UNI_SEND": true, }, + "syscall/js": map[string]bool{ + "Error": true, + "Func": true, + "FuncOf": true, + "Global": true, + "Null": true, + "Type": true, + "TypeBoolean": true, + "TypeFunction": true, + "TypeNull": true, + "TypeNumber": true, + "TypeObject": true, + "TypeString": true, + "TypeSymbol": true, + "TypeUndefined": true, + "TypedArray": true, + "TypedArrayOf": true, + "Undefined": true, + "Value": true, + "ValueError": true, + "ValueOf": true, + "Wrapper": true, + }, "testing": map[string]bool{ "AllocsPerRun": true, "B": true, diff --git a/vendor/github.com/appscode/jsonpatch/LICENSE b/vendor/gomodules.xyz/jsonpatch/v2/LICENSE similarity index 100% rename from vendor/github.com/appscode/jsonpatch/LICENSE rename to vendor/gomodules.xyz/jsonpatch/v2/LICENSE diff --git a/vendor/gomodules.xyz/jsonpatch/v2/go.mod b/vendor/gomodules.xyz/jsonpatch/v2/go.mod new file mode 100644 index 0000000000..b5eaf830eb --- /dev/null +++ b/vendor/gomodules.xyz/jsonpatch/v2/go.mod @@ -0,0 +1,9 @@ +module gomodules.xyz/jsonpatch/v2 + +go 1.12 + +require ( + github.com/evanphx/json-patch v4.5.0+incompatible + github.com/pkg/errors v0.8.1 // indirect + github.com/stretchr/testify v1.3.0 +) diff --git a/vendor/gomodules.xyz/jsonpatch/v2/go.sum b/vendor/gomodules.xyz/jsonpatch/v2/go.sum new file mode 100644 index 0000000000..d8f9ffe1c9 --- /dev/null +++ b/vendor/gomodules.xyz/jsonpatch/v2/go.sum @@ -0,0 +1,11 @@ +github.com/davecgh/go-spew v1.1.0 h1:ZDRjVQ15GmhC3fiQ8ni8+OwkZQO4DARzQgrnXU1Liz8= +github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/evanphx/json-patch v4.5.0+incompatible h1:ouOWdg56aJriqS0huScTkVXPC5IcNrDCXZ6OoTAWu7M= +github.com/evanphx/json-patch v4.5.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk= +github.com/pkg/errors v0.8.1 h1:iURUrRGxPUNPdy5/HRSm+Yj6okJ6UtLINN0Q9M4+h3I= +github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= +github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= +github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= +github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= +github.com/stretchr/testify v1.3.0 h1:TivCn/peBQ7UY8ooIcPgZFpTNSz0Q2U6UrFlUfqbe0Q= +github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= diff --git a/vendor/github.com/appscode/jsonpatch/jsonpatch.go b/vendor/gomodules.xyz/jsonpatch/v2/jsonpatch.go similarity index 97% rename from vendor/github.com/appscode/jsonpatch/jsonpatch.go rename to vendor/gomodules.xyz/jsonpatch/v2/jsonpatch.go index 3e698949e5..e7cb7d6da5 100644 --- a/vendor/github.com/appscode/jsonpatch/jsonpatch.go +++ b/vendor/gomodules.xyz/jsonpatch/v2/jsonpatch.go @@ -58,8 +58,8 @@ func NewPatch(operation, path string, value interface{}) Operation { // // An error will be returned if any of the two documents are invalid. func CreatePatch(a, b []byte) ([]Operation, error) { - aI := map[string]interface{}{} - bI := map[string]interface{}{} + var aI interface{} + var bI interface{} err := json.Unmarshal(a, &aI) if err != nil { return nil, errBadJSONDoc @@ -68,7 +68,7 @@ func CreatePatch(a, b []byte) ([]Operation, error) { if err != nil { return nil, errBadJSONDoc } - return diff(aI, bI, "", []Operation{}) + return handleValues(aI, bI, "", []Operation{}) } // Returns true if the values matches (must be json types) @@ -326,7 +326,7 @@ func backtrace(s, t []interface{}, p string, i int, j int, matrix [][]int) []Ope return append([]Operation{op}, backtrace(s, t, p, i-1, j-1, matrix)...) } - p2, _ := handleValues(s[j-1], t[j-1], makePath(p, i-1), []Operation{}) + p2, _ := handleValues(s[i-1], t[j-1], makePath(p, i-1), []Operation{}) return append(p2, backtrace(s, t, p, i-1, j-1, matrix)...) } if i > 0 && j > 0 && matrix[i-1][j-1] == matrix[i][j] { diff --git a/vendor/google.golang.org/api/AUTHORS b/vendor/google.golang.org/api/AUTHORS deleted file mode 100644 index f73b725745..0000000000 --- a/vendor/google.golang.org/api/AUTHORS +++ /dev/null @@ -1,10 +0,0 @@ -# This is the official list of authors for copyright purposes. -# This file is distinct from the CONTRIBUTORS files. -# See the latter for an explanation. - -# Names should be added to this file as -# Name or Organization -# The email address is not required for organizations. - -# Please keep the list sorted. -Google Inc. diff --git a/vendor/google.golang.org/api/CONTRIBUTORS b/vendor/google.golang.org/api/CONTRIBUTORS deleted file mode 100644 index fe55ebff07..0000000000 --- a/vendor/google.golang.org/api/CONTRIBUTORS +++ /dev/null @@ -1,55 +0,0 @@ -# This is the official list of people who can contribute -# (and typically have contributed) code to the repository. -# The AUTHORS file lists the copyright holders; this file -# lists people. For example, Google employees are listed here -# but not in AUTHORS, because Google holds the copyright. -# -# The submission process automatically checks to make sure -# that people submitting code are listed in this file (by email address). -# -# Names should be added to this file only after verifying that -# the individual or the individual's organization has agreed to -# the appropriate Contributor License Agreement, found here: -# -# https://cla.developers.google.com/about/google-individual -# https://cla.developers.google.com/about/google-corporate -# -# The CLA can be filled out on the web: -# -# https://cla.developers.google.com/ -# -# When adding J Random Contributor's name to this file, -# either J's name or J's organization's name should be -# added to the AUTHORS file, depending on whether the -# individual or corporate CLA was used. - -# Names should be added to this file like so: -# Name -# -# An entry with two email addresses specifies that the -# first address should be used in the submit logs and -# that the second address should be recognized as the -# same person when interacting with Rietveld. - -# Please keep the list sorted. - -Alain Vongsouvanhalainv -Andrew Gerrand -Brad Fitzpatrick -Eric Koleda -Francesc Campoy -Garrick Evans -Glenn Lewis -Ivan Krasin -Jason Hall -Johan Euphrosine -Kostik Shtoyk -Kunpei Sakai -Matthew Whisenhunt -Michael McGreevy -Nick Craig-Wood -Robbie Trencheny -Ross Light -Sarah Adams -Scott Van Woudenberg -Takashi Matsuo diff --git a/vendor/google.golang.org/api/LICENSE b/vendor/google.golang.org/api/LICENSE deleted file mode 100644 index 263aa7a0c1..0000000000 --- a/vendor/google.golang.org/api/LICENSE +++ /dev/null @@ -1,27 +0,0 @@ -Copyright (c) 2011 Google Inc. All rights reserved. - -Redistribution and use in source and binary forms, with or without -modification, are permitted provided that the following conditions are -met: - - * Redistributions of source code must retain the above copyright -notice, this list of conditions and the following disclaimer. - * Redistributions in binary form must reproduce the above -copyright notice, this list of conditions and the following disclaimer -in the documentation and/or other materials provided with the -distribution. - * Neither the name of Google Inc. nor the names of its -contributors may be used to endorse or promote products derived from -this software without specific prior written permission. - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/vendor/google.golang.org/api/support/bundler/bundler.go b/vendor/google.golang.org/api/support/bundler/bundler.go deleted file mode 100644 index c553271190..0000000000 --- a/vendor/google.golang.org/api/support/bundler/bundler.go +++ /dev/null @@ -1,349 +0,0 @@ -// Copyright 2016 Google LLC -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -// Package bundler supports bundling (batching) of items. Bundling amortizes an -// action with fixed costs over multiple items. For example, if an API provides -// an RPC that accepts a list of items as input, but clients would prefer -// adding items one at a time, then a Bundler can accept individual items from -// the client and bundle many of them into a single RPC. -// -// This package is experimental and subject to change without notice. -package bundler - -import ( - "context" - "errors" - "math" - "reflect" - "sync" - "time" - - "golang.org/x/sync/semaphore" -) - -const ( - DefaultDelayThreshold = time.Second - DefaultBundleCountThreshold = 10 - DefaultBundleByteThreshold = 1e6 // 1M - DefaultBufferedByteLimit = 1e9 // 1G -) - -var ( - // ErrOverflow indicates that Bundler's stored bytes exceeds its BufferedByteLimit. - ErrOverflow = errors.New("bundler reached buffered byte limit") - - // ErrOversizedItem indicates that an item's size exceeds the maximum bundle size. - ErrOversizedItem = errors.New("item size exceeds bundle byte limit") -) - -// A Bundler collects items added to it into a bundle until the bundle -// exceeds a given size, then calls a user-provided function to handle the bundle. -type Bundler struct { - // Starting from the time that the first message is added to a bundle, once - // this delay has passed, handle the bundle. The default is DefaultDelayThreshold. - DelayThreshold time.Duration - - // Once a bundle has this many items, handle the bundle. Since only one - // item at a time is added to a bundle, no bundle will exceed this - // threshold, so it also serves as a limit. The default is - // DefaultBundleCountThreshold. - BundleCountThreshold int - - // Once the number of bytes in current bundle reaches this threshold, handle - // the bundle. The default is DefaultBundleByteThreshold. This triggers handling, - // but does not cap the total size of a bundle. - BundleByteThreshold int - - // The maximum size of a bundle, in bytes. Zero means unlimited. - BundleByteLimit int - - // The maximum number of bytes that the Bundler will keep in memory before - // returning ErrOverflow. The default is DefaultBufferedByteLimit. - BufferedByteLimit int - - // The maximum number of handler invocations that can be running at once. - // The default is 1. - HandlerLimit int - - handler func(interface{}) // called to handle a bundle - itemSliceZero reflect.Value // nil (zero value) for slice of items - flushTimer *time.Timer // implements DelayThreshold - - mu sync.Mutex - sem *semaphore.Weighted // enforces BufferedByteLimit - semOnce sync.Once - curBundle bundle // incoming items added to this bundle - - // Each bundle is assigned a unique ticket that determines the order in which the - // handler is called. The ticket is assigned with mu locked, but waiting for tickets - // to be handled is done via mu2 and cond, below. - nextTicket uint64 // next ticket to be assigned - - mu2 sync.Mutex - cond *sync.Cond - nextHandled uint64 // next ticket to be handled - - // In this implementation, active uses space proportional to HandlerLimit, and - // waitUntilAllHandled takes time proportional to HandlerLimit each time an acquire - // or release occurs, so large values of HandlerLimit max may cause performance - // issues. - active map[uint64]bool // tickets of bundles actively being handled -} - -type bundle struct { - items reflect.Value // slice of item type - size int // size in bytes of all items -} - -// NewBundler creates a new Bundler. -// -// itemExample is a value of the type that will be bundled. For example, if you -// want to create bundles of *Entry, you could pass &Entry{} for itemExample. -// -// handler is a function that will be called on each bundle. If itemExample is -// of type T, the argument to handler is of type []T. handler is always called -// sequentially for each bundle, and never in parallel. -// -// Configure the Bundler by setting its thresholds and limits before calling -// any of its methods. -func NewBundler(itemExample interface{}, handler func(interface{})) *Bundler { - b := &Bundler{ - DelayThreshold: DefaultDelayThreshold, - BundleCountThreshold: DefaultBundleCountThreshold, - BundleByteThreshold: DefaultBundleByteThreshold, - BufferedByteLimit: DefaultBufferedByteLimit, - HandlerLimit: 1, - - handler: handler, - itemSliceZero: reflect.Zero(reflect.SliceOf(reflect.TypeOf(itemExample))), - active: map[uint64]bool{}, - } - b.curBundle.items = b.itemSliceZero - b.cond = sync.NewCond(&b.mu2) - return b -} - -func (b *Bundler) initSemaphores() { - // Create the semaphores lazily, because the user may set limits - // after NewBundler. - b.semOnce.Do(func() { - b.sem = semaphore.NewWeighted(int64(b.BufferedByteLimit)) - }) -} - -// Add adds item to the current bundle. It marks the bundle for handling and -// starts a new one if any of the thresholds or limits are exceeded. -// -// If the item's size exceeds the maximum bundle size (Bundler.BundleByteLimit), then -// the item can never be handled. Add returns ErrOversizedItem in this case. -// -// If adding the item would exceed the maximum memory allowed -// (Bundler.BufferedByteLimit) or an AddWait call is blocked waiting for -// memory, Add returns ErrOverflow. -// -// Add never blocks. -func (b *Bundler) Add(item interface{}, size int) error { - // If this item exceeds the maximum size of a bundle, - // we can never send it. - if b.BundleByteLimit > 0 && size > b.BundleByteLimit { - return ErrOversizedItem - } - // If adding this item would exceed our allotted memory - // footprint, we can't accept it. - // (TryAcquire also returns false if anything is waiting on the semaphore, - // so calls to Add and AddWait shouldn't be mixed.) - b.initSemaphores() - if !b.sem.TryAcquire(int64(size)) { - return ErrOverflow - } - b.add(item, size) - return nil -} - -// add adds item to the current bundle. It marks the bundle for handling and -// starts a new one if any of the thresholds or limits are exceeded. -func (b *Bundler) add(item interface{}, size int) { - b.mu.Lock() - defer b.mu.Unlock() - - // If adding this item to the current bundle would cause it to exceed the - // maximum bundle size, close the current bundle and start a new one. - if b.BundleByteLimit > 0 && b.curBundle.size+size > b.BundleByteLimit { - b.startFlushLocked() - } - // Add the item. - b.curBundle.items = reflect.Append(b.curBundle.items, reflect.ValueOf(item)) - b.curBundle.size += size - - // Start a timer to flush the item if one isn't already running. - // startFlushLocked clears the timer and closes the bundle at the same time, - // so we only allocate a new timer for the first item in each bundle. - // (We could try to call Reset on the timer instead, but that would add a lot - // of complexity to the code just to save one small allocation.) - if b.flushTimer == nil { - b.flushTimer = time.AfterFunc(b.DelayThreshold, b.Flush) - } - - // If the current bundle equals the count threshold, close it. - if b.curBundle.items.Len() == b.BundleCountThreshold { - b.startFlushLocked() - } - // If the current bundle equals or exceeds the byte threshold, close it. - if b.curBundle.size >= b.BundleByteThreshold { - b.startFlushLocked() - } -} - -// AddWait adds item to the current bundle. It marks the bundle for handling and -// starts a new one if any of the thresholds or limits are exceeded. -// -// If the item's size exceeds the maximum bundle size (Bundler.BundleByteLimit), then -// the item can never be handled. AddWait returns ErrOversizedItem in this case. -// -// If adding the item would exceed the maximum memory allowed (Bundler.BufferedByteLimit), -// AddWait blocks until space is available or ctx is done. -// -// Calls to Add and AddWait should not be mixed on the same Bundler. -func (b *Bundler) AddWait(ctx context.Context, item interface{}, size int) error { - // If this item exceeds the maximum size of a bundle, - // we can never send it. - if b.BundleByteLimit > 0 && size > b.BundleByteLimit { - return ErrOversizedItem - } - // If adding this item would exceed our allotted memory footprint, block - // until space is available. The semaphore is FIFO, so there will be no - // starvation. - b.initSemaphores() - if err := b.sem.Acquire(ctx, int64(size)); err != nil { - return err - } - // Here, we've reserved space for item. Other goroutines can call AddWait - // and even acquire space, but no one can take away our reservation - // (assuming sem.Release is used correctly). So there is no race condition - // resulting from locking the mutex after sem.Acquire returns. - b.add(item, size) - return nil -} - -// Flush invokes the handler for all remaining items in the Bundler and waits -// for it to return. -func (b *Bundler) Flush() { - b.mu.Lock() - b.startFlushLocked() - // Here, all bundles with tickets < b.nextTicket are - // either finished or active. Those are the ones - // we want to wait for. - t := b.nextTicket - b.mu.Unlock() - b.initSemaphores() - b.waitUntilAllHandled(t) -} - -func (b *Bundler) startFlushLocked() { - if b.flushTimer != nil { - b.flushTimer.Stop() - b.flushTimer = nil - } - if b.curBundle.items.Len() == 0 { - return - } - // Here, both semaphores must have been initialized. - bun := b.curBundle - b.curBundle = bundle{items: b.itemSliceZero} - ticket := b.nextTicket - b.nextTicket++ - go func() { - defer func() { - b.sem.Release(int64(bun.size)) - b.release(ticket) - }() - b.acquire(ticket) - b.handler(bun.items.Interface()) - }() -} - -// acquire blocks until ticket is the next to be served, then returns. In order for N -// acquire calls to return, the tickets must be in the range [0, N). A ticket must -// not be presented to acquire more than once. -func (b *Bundler) acquire(ticket uint64) { - b.mu2.Lock() - defer b.mu2.Unlock() - if ticket < b.nextHandled { - panic("bundler: acquire: arg too small") - } - for !(ticket == b.nextHandled && len(b.active) < b.HandlerLimit) { - b.cond.Wait() - } - // Here, - // ticket == b.nextHandled: the caller is the next one to be handled; - // and len(b.active) < b.HandlerLimit: there is space available. - b.active[ticket] = true - b.nextHandled++ - // Broadcast, not Signal: although at most one acquire waiter can make progress, - // there might be waiters in waitUntilAllHandled. - b.cond.Broadcast() -} - -// If a ticket is used for a call to acquire, it must later be passed to release. A -// ticket must not be presented to release more than once. -func (b *Bundler) release(ticket uint64) { - b.mu2.Lock() - defer b.mu2.Unlock() - if !b.active[ticket] { - panic("bundler: release: not an active ticket") - } - delete(b.active, ticket) - b.cond.Broadcast() -} - -// waitUntilAllHandled blocks until all tickets < n have called release, meaning -// all bundles with tickets < n have been handled. -func (b *Bundler) waitUntilAllHandled(n uint64) { - // Proof of correctness of this function. - // "N is acquired" means acquire(N) has returned. - // "N is released" means release(N) has returned. - // 1. If N is acquired, N-1 is acquired. - // Follows from the loop test in acquire, and the fact - // that nextHandled is incremented by 1. - // 2. If nextHandled >= N, then N-1 is acquired. - // Because we only increment nextHandled to N after N-1 is acquired. - // 3. If nextHandled >= N, then all n < N is acquired. - // Follows from #1 and #2. - // 4. If N is acquired and N is not in active, then N is released. - // Because we put N in active before acquire returns, and only - // remove it when it is released. - // Let min(active) be the smallest member of active, or infinity if active is empty. - // 5. If nextHandled >= N and N <= min(active), then all n < N is released. - // From nextHandled >= N and #3, all n < N is acquired. - // N <= min(active) implies n < min(active) for all n < N. So all n < N is not in active. - // So from #4, all n < N is released. - // The loop test below is the antecedent of #5. - b.mu2.Lock() - defer b.mu2.Unlock() - for !(b.nextHandled >= n && n <= min(b.active)) { - b.cond.Wait() - } -} - -// min returns the minimum value of the set s, or the largest uint64 if -// s is empty. -func min(s map[uint64]bool) uint64 { - var m uint64 = math.MaxUint64 - for n := range s { - if n < m { - m = n - } - } - return m -} diff --git a/vendor/google.golang.org/appengine/.travis.yml b/vendor/google.golang.org/appengine/.travis.yml deleted file mode 100644 index 70ffe89d5e..0000000000 --- a/vendor/google.golang.org/appengine/.travis.yml +++ /dev/null @@ -1,20 +0,0 @@ -language: go - -go_import_path: google.golang.org/appengine - -install: - - ./travis_install.sh - -script: - - ./travis_test.sh - -matrix: - include: - - go: 1.8.x - env: GOAPP=true - - go: 1.9.x - env: GOAPP=true - - go: 1.10.x - env: GOAPP=false - - go: 1.11.x - env: GO111MODULE=on diff --git a/vendor/google.golang.org/appengine/CONTRIBUTING.md b/vendor/google.golang.org/appengine/CONTRIBUTING.md deleted file mode 100644 index ffc2985208..0000000000 --- a/vendor/google.golang.org/appengine/CONTRIBUTING.md +++ /dev/null @@ -1,90 +0,0 @@ -# Contributing - -1. Sign one of the contributor license agreements below. -1. Get the package: - - `go get -d google.golang.org/appengine` -1. Change into the checked out source: - - `cd $GOPATH/src/google.golang.org/appengine` -1. Fork the repo. -1. Set your fork as a remote: - - `git remote add fork git@github.com:GITHUB_USERNAME/appengine.git` -1. Make changes, commit to your fork. -1. Send a pull request with your changes. - The first line of your commit message is conventionally a one-line summary of the change, prefixed by the primary affected package, and is used as the title of your pull request. - -# Testing - -## Running system tests - -Download and install the [Go App Engine SDK](https://cloud.google.com/appengine/docs/go/download). Make sure the `go_appengine` dir is in your `PATH`. - -Set the `APPENGINE_DEV_APPSERVER` environment variable to `/path/to/go_appengine/dev_appserver.py`. - -Run tests with `goapp test`: - -``` -goapp test -v google.golang.org/appengine/... -``` - -## Contributor License Agreements - -Before we can accept your pull requests you'll need to sign a Contributor -License Agreement (CLA): - -- **If you are an individual writing original source code** and **you own the -intellectual property**, then you'll need to sign an [individual CLA][indvcla]. -- **If you work for a company that wants to allow you to contribute your work**, -then you'll need to sign a [corporate CLA][corpcla]. - -You can sign these electronically (just scroll to the bottom). After that, -we'll be able to accept your pull requests. - -## Contributor Code of Conduct - -As contributors and maintainers of this project, -and in the interest of fostering an open and welcoming community, -we pledge to respect all people who contribute through reporting issues, -posting feature requests, updating documentation, -submitting pull requests or patches, and other activities. - -We are committed to making participation in this project -a harassment-free experience for everyone, -regardless of level of experience, gender, gender identity and expression, -sexual orientation, disability, personal appearance, -body size, race, ethnicity, age, religion, or nationality. - -Examples of unacceptable behavior by participants include: - -* The use of sexualized language or imagery -* Personal attacks -* Trolling or insulting/derogatory comments -* Public or private harassment -* Publishing other's private information, -such as physical or electronic -addresses, without explicit permission -* Other unethical or unprofessional conduct. - -Project maintainers have the right and responsibility to remove, edit, or reject -comments, commits, code, wiki edits, issues, and other contributions -that are not aligned to this Code of Conduct. -By adopting this Code of Conduct, -project maintainers commit themselves to fairly and consistently -applying these principles to every aspect of managing this project. -Project maintainers who do not follow or enforce the Code of Conduct -may be permanently removed from the project team. - -This code of conduct applies both within project spaces and in public spaces -when an individual is representing the project or its community. - -Instances of abusive, harassing, or otherwise unacceptable behavior -may be reported by opening an issue -or contacting one or more of the project maintainers. - -This Code of Conduct is adapted from the [Contributor Covenant](http://contributor-covenant.org), version 1.2.0, -available at [http://contributor-covenant.org/version/1/2/0/](http://contributor-covenant.org/version/1/2/0/) - -[indvcla]: https://developers.google.com/open-source/cla/individual -[corpcla]: https://developers.google.com/open-source/cla/corporate diff --git a/vendor/google.golang.org/appengine/README.md b/vendor/google.golang.org/appengine/README.md deleted file mode 100644 index d86768a2c6..0000000000 --- a/vendor/google.golang.org/appengine/README.md +++ /dev/null @@ -1,73 +0,0 @@ -# Go App Engine packages - -[![Build Status](https://travis-ci.org/golang/appengine.svg)](https://travis-ci.org/golang/appengine) - -This repository supports the Go runtime on *App Engine standard*. -It provides APIs for interacting with App Engine services. -Its canonical import path is `google.golang.org/appengine`. - -See https://cloud.google.com/appengine/docs/go/ -for more information. - -File issue reports and feature requests on the [GitHub's issue -tracker](https://github.com/golang/appengine/issues). - -## Upgrading an App Engine app to the flexible environment - -This package does not work on *App Engine flexible*. - -There are many differences between the App Engine standard environment and -the flexible environment. - -See the [documentation on upgrading to the flexible environment](https://cloud.google.com/appengine/docs/flexible/go/upgrading). - -## Directory structure - -The top level directory of this repository is the `appengine` package. It -contains the -basic APIs (e.g. `appengine.NewContext`) that apply across APIs. Specific API -packages are in subdirectories (e.g. `datastore`). - -There is an `internal` subdirectory that contains service protocol buffers, -plus packages required for connectivity to make API calls. App Engine apps -should not directly import any package under `internal`. - -## Updating from legacy (`import "appengine"`) packages - -If you're currently using the bare `appengine` packages -(that is, not these ones, imported via `google.golang.org/appengine`), -then you can use the `aefix` tool to help automate an upgrade to these packages. - -Run `go get google.golang.org/appengine/cmd/aefix` to install it. - -### 1. Update import paths - -The import paths for App Engine packages are now fully qualified, based at `google.golang.org/appengine`. -You will need to update your code to use import paths starting with that; for instance, -code importing `appengine/datastore` will now need to import `google.golang.org/appengine/datastore`. - -### 2. Update code using deprecated, removed or modified APIs - -Most App Engine services are available with exactly the same API. -A few APIs were cleaned up, and there are some differences: - -* `appengine.Context` has been replaced with the `Context` type from `golang.org/x/net/context`. -* Logging methods that were on `appengine.Context` are now functions in `google.golang.org/appengine/log`. -* `appengine.Timeout` has been removed. Use `context.WithTimeout` instead. -* `appengine.Datacenter` now takes a `context.Context` argument. -* `datastore.PropertyLoadSaver` has been simplified to use slices in place of channels. -* `delay.Call` now returns an error. -* `search.FieldLoadSaver` now handles document metadata. -* `urlfetch.Transport` no longer has a Deadline field; set a deadline on the - `context.Context` instead. -* `aetest` no longer declares its own Context type, and uses the standard one instead. -* `taskqueue.QueueStats` no longer takes a maxTasks argument. That argument has been - deprecated and unused for a long time. -* `appengine.BackendHostname` and `appengine.BackendInstance` were for the deprecated backends feature. - Use `appengine.ModuleHostname`and `appengine.ModuleName` instead. -* Most of `appengine/file` and parts of `appengine/blobstore` are deprecated. - Use [Google Cloud Storage](https://godoc.org/cloud.google.com/go/storage) if the - feature you require is not present in the new - [blobstore package](https://google.golang.org/appengine/blobstore). -* `appengine/socket` is not required on App Engine flexible environment / Managed VMs. - Use the standard `net` package instead. diff --git a/vendor/google.golang.org/appengine/appengine.go b/vendor/google.golang.org/appengine/appengine.go deleted file mode 100644 index 0cca033d37..0000000000 --- a/vendor/google.golang.org/appengine/appengine.go +++ /dev/null @@ -1,137 +0,0 @@ -// Copyright 2011 Google Inc. All rights reserved. -// Use of this source code is governed by the Apache 2.0 -// license that can be found in the LICENSE file. - -// Package appengine provides basic functionality for Google App Engine. -// -// For more information on how to write Go apps for Google App Engine, see: -// https://cloud.google.com/appengine/docs/go/ -package appengine // import "google.golang.org/appengine" - -import ( - "net/http" - - "github.com/golang/protobuf/proto" - "golang.org/x/net/context" - - "google.golang.org/appengine/internal" -) - -// The gophers party all night; the rabbits provide the beats. - -// Main is the principal entry point for an app running in App Engine. -// -// On App Engine Flexible it installs a trivial health checker if one isn't -// already registered, and starts listening on port 8080 (overridden by the -// $PORT environment variable). -// -// See https://cloud.google.com/appengine/docs/flexible/custom-runtimes#health_check_requests -// for details on how to do your own health checking. -// -// On App Engine Standard it ensures the server has started and is prepared to -// receive requests. -// -// Main never returns. -// -// Main is designed so that the app's main package looks like this: -// -// package main -// -// import ( -// "google.golang.org/appengine" -// -// _ "myapp/package0" -// _ "myapp/package1" -// ) -// -// func main() { -// appengine.Main() -// } -// -// The "myapp/packageX" packages are expected to register HTTP handlers -// in their init functions. -func Main() { - internal.Main() -} - -// IsDevAppServer reports whether the App Engine app is running in the -// development App Server. -func IsDevAppServer() bool { - return internal.IsDevAppServer() -} - -// IsStandard reports whether the App Engine app is running in the standard -// environment. This includes both the first generation runtimes (<= Go 1.9) -// and the second generation runtimes (>= Go 1.11). -func IsStandard() bool { - return internal.IsStandard() -} - -// IsFlex reports whether the App Engine app is running in the flexible environment. -func IsFlex() bool { - return internal.IsFlex() -} - -// IsAppEngine reports whether the App Engine app is running on App Engine, in either -// the standard or flexible environment. -func IsAppEngine() bool { - return internal.IsAppEngine() -} - -// IsSecondGen reports whether the App Engine app is running on the second generation -// runtimes (>= Go 1.11). -func IsSecondGen() bool { - return internal.IsSecondGen() -} - -// NewContext returns a context for an in-flight HTTP request. -// This function is cheap. -func NewContext(req *http.Request) context.Context { - return internal.ReqContext(req) -} - -// WithContext returns a copy of the parent context -// and associates it with an in-flight HTTP request. -// This function is cheap. -func WithContext(parent context.Context, req *http.Request) context.Context { - return internal.WithContext(parent, req) -} - -// TODO(dsymonds): Add a Call function here? Otherwise other packages can't access internal.Call. - -// BlobKey is a key for a blobstore blob. -// -// Conceptually, this type belongs in the blobstore package, but it lives in -// the appengine package to avoid a circular dependency: blobstore depends on -// datastore, and datastore needs to refer to the BlobKey type. -type BlobKey string - -// GeoPoint represents a location as latitude/longitude in degrees. -type GeoPoint struct { - Lat, Lng float64 -} - -// Valid returns whether a GeoPoint is within [-90, 90] latitude and [-180, 180] longitude. -func (g GeoPoint) Valid() bool { - return -90 <= g.Lat && g.Lat <= 90 && -180 <= g.Lng && g.Lng <= 180 -} - -// APICallFunc defines a function type for handling an API call. -// See WithCallOverride. -type APICallFunc func(ctx context.Context, service, method string, in, out proto.Message) error - -// WithAPICallFunc returns a copy of the parent context -// that will cause API calls to invoke f instead of their normal operation. -// -// This is intended for advanced users only. -func WithAPICallFunc(ctx context.Context, f APICallFunc) context.Context { - return internal.WithCallOverride(ctx, internal.CallOverrideFunc(f)) -} - -// APICall performs an API call. -// -// This is not intended for general use; it is exported for use in conjunction -// with WithAPICallFunc. -func APICall(ctx context.Context, service, method string, in, out proto.Message) error { - return internal.Call(ctx, service, method, in, out) -} diff --git a/vendor/google.golang.org/appengine/appengine_vm.go b/vendor/google.golang.org/appengine/appengine_vm.go deleted file mode 100644 index f4b645aad3..0000000000 --- a/vendor/google.golang.org/appengine/appengine_vm.go +++ /dev/null @@ -1,20 +0,0 @@ -// Copyright 2015 Google Inc. All rights reserved. -// Use of this source code is governed by the Apache 2.0 -// license that can be found in the LICENSE file. - -// +build !appengine - -package appengine - -import ( - "golang.org/x/net/context" - - "google.golang.org/appengine/internal" -) - -// BackgroundContext returns a context not associated with a request. -// This should only be used when not servicing a request. -// This only works in App Engine "flexible environment". -func BackgroundContext() context.Context { - return internal.BackgroundContext() -} diff --git a/vendor/google.golang.org/appengine/errors.go b/vendor/google.golang.org/appengine/errors.go deleted file mode 100644 index 16d0772e2a..0000000000 --- a/vendor/google.golang.org/appengine/errors.go +++ /dev/null @@ -1,46 +0,0 @@ -// Copyright 2011 Google Inc. All rights reserved. -// Use of this source code is governed by the Apache 2.0 -// license that can be found in the LICENSE file. - -// This file provides error functions for common API failure modes. - -package appengine - -import ( - "fmt" - - "google.golang.org/appengine/internal" -) - -// IsOverQuota reports whether err represents an API call failure -// due to insufficient available quota. -func IsOverQuota(err error) bool { - callErr, ok := err.(*internal.CallError) - return ok && callErr.Code == 4 -} - -// MultiError is returned by batch operations when there are errors with -// particular elements. Errors will be in a one-to-one correspondence with -// the input elements; successful elements will have a nil entry. -type MultiError []error - -func (m MultiError) Error() string { - s, n := "", 0 - for _, e := range m { - if e != nil { - if n == 0 { - s = e.Error() - } - n++ - } - } - switch n { - case 0: - return "(0 errors)" - case 1: - return s - case 2: - return s + " (and 1 other error)" - } - return fmt.Sprintf("%s (and %d other errors)", s, n-1) -} diff --git a/vendor/google.golang.org/appengine/go.mod b/vendor/google.golang.org/appengine/go.mod deleted file mode 100644 index f449359d2f..0000000000 --- a/vendor/google.golang.org/appengine/go.mod +++ /dev/null @@ -1,7 +0,0 @@ -module google.golang.org/appengine - -require ( - github.com/golang/protobuf v1.2.0 - golang.org/x/net v0.0.0-20180724234803-3673e40ba225 - golang.org/x/text v0.3.0 -) diff --git a/vendor/google.golang.org/appengine/go.sum b/vendor/google.golang.org/appengine/go.sum deleted file mode 100644 index 1a221c0896..0000000000 --- a/vendor/google.golang.org/appengine/go.sum +++ /dev/null @@ -1,6 +0,0 @@ -github.com/golang/protobuf v1.2.0 h1:P3YflyNX/ehuJFLhxviNdFxQPkGK5cDcApsge1SqnvM= -github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= -golang.org/x/net v0.0.0-20180724234803-3673e40ba225 h1:kNX+jCowfMYzvlSvJu5pQWEmyWFrBXJ3PBy10xKMXK8= -golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/text v0.3.0 h1:g61tztE5qeGQ89tm6NTjjM9VPIm088od1l6aSorWRWg= -golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= diff --git a/vendor/google.golang.org/appengine/identity.go b/vendor/google.golang.org/appengine/identity.go deleted file mode 100644 index b8dcf8f361..0000000000 --- a/vendor/google.golang.org/appengine/identity.go +++ /dev/null @@ -1,142 +0,0 @@ -// Copyright 2011 Google Inc. All rights reserved. -// Use of this source code is governed by the Apache 2.0 -// license that can be found in the LICENSE file. - -package appengine - -import ( - "time" - - "golang.org/x/net/context" - - "google.golang.org/appengine/internal" - pb "google.golang.org/appengine/internal/app_identity" - modpb "google.golang.org/appengine/internal/modules" -) - -// AppID returns the application ID for the current application. -// The string will be a plain application ID (e.g. "appid"), with a -// domain prefix for custom domain deployments (e.g. "example.com:appid"). -func AppID(c context.Context) string { return internal.AppID(c) } - -// DefaultVersionHostname returns the standard hostname of the default version -// of the current application (e.g. "my-app.appspot.com"). This is suitable for -// use in constructing URLs. -func DefaultVersionHostname(c context.Context) string { - return internal.DefaultVersionHostname(c) -} - -// ModuleName returns the module name of the current instance. -func ModuleName(c context.Context) string { - return internal.ModuleName(c) -} - -// ModuleHostname returns a hostname of a module instance. -// If module is the empty string, it refers to the module of the current instance. -// If version is empty, it refers to the version of the current instance if valid, -// or the default version of the module of the current instance. -// If instance is empty, ModuleHostname returns the load-balancing hostname. -func ModuleHostname(c context.Context, module, version, instance string) (string, error) { - req := &modpb.GetHostnameRequest{} - if module != "" { - req.Module = &module - } - if version != "" { - req.Version = &version - } - if instance != "" { - req.Instance = &instance - } - res := &modpb.GetHostnameResponse{} - if err := internal.Call(c, "modules", "GetHostname", req, res); err != nil { - return "", err - } - return *res.Hostname, nil -} - -// VersionID returns the version ID for the current application. -// It will be of the form "X.Y", where X is specified in app.yaml, -// and Y is a number generated when each version of the app is uploaded. -// It does not include a module name. -func VersionID(c context.Context) string { return internal.VersionID(c) } - -// InstanceID returns a mostly-unique identifier for this instance. -func InstanceID() string { return internal.InstanceID() } - -// Datacenter returns an identifier for the datacenter that the instance is running in. -func Datacenter(c context.Context) string { return internal.Datacenter(c) } - -// ServerSoftware returns the App Engine release version. -// In production, it looks like "Google App Engine/X.Y.Z". -// In the development appserver, it looks like "Development/X.Y". -func ServerSoftware() string { return internal.ServerSoftware() } - -// RequestID returns a string that uniquely identifies the request. -func RequestID(c context.Context) string { return internal.RequestID(c) } - -// AccessToken generates an OAuth2 access token for the specified scopes on -// behalf of service account of this application. This token will expire after -// the returned time. -func AccessToken(c context.Context, scopes ...string) (token string, expiry time.Time, err error) { - req := &pb.GetAccessTokenRequest{Scope: scopes} - res := &pb.GetAccessTokenResponse{} - - err = internal.Call(c, "app_identity_service", "GetAccessToken", req, res) - if err != nil { - return "", time.Time{}, err - } - return res.GetAccessToken(), time.Unix(res.GetExpirationTime(), 0), nil -} - -// Certificate represents a public certificate for the app. -type Certificate struct { - KeyName string - Data []byte // PEM-encoded X.509 certificate -} - -// PublicCertificates retrieves the public certificates for the app. -// They can be used to verify a signature returned by SignBytes. -func PublicCertificates(c context.Context) ([]Certificate, error) { - req := &pb.GetPublicCertificateForAppRequest{} - res := &pb.GetPublicCertificateForAppResponse{} - if err := internal.Call(c, "app_identity_service", "GetPublicCertificatesForApp", req, res); err != nil { - return nil, err - } - var cs []Certificate - for _, pc := range res.PublicCertificateList { - cs = append(cs, Certificate{ - KeyName: pc.GetKeyName(), - Data: []byte(pc.GetX509CertificatePem()), - }) - } - return cs, nil -} - -// ServiceAccount returns a string representing the service account name, in -// the form of an email address (typically app_id@appspot.gserviceaccount.com). -func ServiceAccount(c context.Context) (string, error) { - req := &pb.GetServiceAccountNameRequest{} - res := &pb.GetServiceAccountNameResponse{} - - err := internal.Call(c, "app_identity_service", "GetServiceAccountName", req, res) - if err != nil { - return "", err - } - return res.GetServiceAccountName(), err -} - -// SignBytes signs bytes using a private key unique to your application. -func SignBytes(c context.Context, bytes []byte) (keyName string, signature []byte, err error) { - req := &pb.SignForAppRequest{BytesToSign: bytes} - res := &pb.SignForAppResponse{} - - if err := internal.Call(c, "app_identity_service", "SignForApp", req, res); err != nil { - return "", nil, err - } - return res.GetKeyName(), res.GetSignatureBytes(), nil -} - -func init() { - internal.RegisterErrorCodeMap("app_identity_service", pb.AppIdentityServiceError_ErrorCode_name) - internal.RegisterErrorCodeMap("modules", modpb.ModulesServiceError_ErrorCode_name) -} diff --git a/vendor/google.golang.org/appengine/internal/app_identity/app_identity_service.pb.go b/vendor/google.golang.org/appengine/internal/app_identity/app_identity_service.pb.go deleted file mode 100644 index 9a2ff77ab5..0000000000 --- a/vendor/google.golang.org/appengine/internal/app_identity/app_identity_service.pb.go +++ /dev/null @@ -1,611 +0,0 @@ -// Code generated by protoc-gen-go. DO NOT EDIT. -// source: google.golang.org/appengine/internal/app_identity/app_identity_service.proto - -package app_identity - -import proto "github.com/golang/protobuf/proto" -import fmt "fmt" -import math "math" - -// Reference imports to suppress errors if they are not otherwise used. -var _ = proto.Marshal -var _ = fmt.Errorf -var _ = math.Inf - -// This is a compile-time assertion to ensure that this generated file -// is compatible with the proto package it is being compiled against. -// A compilation error at this line likely means your copy of the -// proto package needs to be updated. -const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package - -type AppIdentityServiceError_ErrorCode int32 - -const ( - AppIdentityServiceError_SUCCESS AppIdentityServiceError_ErrorCode = 0 - AppIdentityServiceError_UNKNOWN_SCOPE AppIdentityServiceError_ErrorCode = 9 - AppIdentityServiceError_BLOB_TOO_LARGE AppIdentityServiceError_ErrorCode = 1000 - AppIdentityServiceError_DEADLINE_EXCEEDED AppIdentityServiceError_ErrorCode = 1001 - AppIdentityServiceError_NOT_A_VALID_APP AppIdentityServiceError_ErrorCode = 1002 - AppIdentityServiceError_UNKNOWN_ERROR AppIdentityServiceError_ErrorCode = 1003 - AppIdentityServiceError_NOT_ALLOWED AppIdentityServiceError_ErrorCode = 1005 - AppIdentityServiceError_NOT_IMPLEMENTED AppIdentityServiceError_ErrorCode = 1006 -) - -var AppIdentityServiceError_ErrorCode_name = map[int32]string{ - 0: "SUCCESS", - 9: "UNKNOWN_SCOPE", - 1000: "BLOB_TOO_LARGE", - 1001: "DEADLINE_EXCEEDED", - 1002: "NOT_A_VALID_APP", - 1003: "UNKNOWN_ERROR", - 1005: "NOT_ALLOWED", - 1006: "NOT_IMPLEMENTED", -} -var AppIdentityServiceError_ErrorCode_value = map[string]int32{ - "SUCCESS": 0, - "UNKNOWN_SCOPE": 9, - "BLOB_TOO_LARGE": 1000, - "DEADLINE_EXCEEDED": 1001, - "NOT_A_VALID_APP": 1002, - "UNKNOWN_ERROR": 1003, - "NOT_ALLOWED": 1005, - "NOT_IMPLEMENTED": 1006, -} - -func (x AppIdentityServiceError_ErrorCode) Enum() *AppIdentityServiceError_ErrorCode { - p := new(AppIdentityServiceError_ErrorCode) - *p = x - return p -} -func (x AppIdentityServiceError_ErrorCode) String() string { - return proto.EnumName(AppIdentityServiceError_ErrorCode_name, int32(x)) -} -func (x *AppIdentityServiceError_ErrorCode) UnmarshalJSON(data []byte) error { - value, err := proto.UnmarshalJSONEnum(AppIdentityServiceError_ErrorCode_value, data, "AppIdentityServiceError_ErrorCode") - if err != nil { - return err - } - *x = AppIdentityServiceError_ErrorCode(value) - return nil -} -func (AppIdentityServiceError_ErrorCode) EnumDescriptor() ([]byte, []int) { - return fileDescriptor_app_identity_service_08a6e3f74b04cfa4, []int{0, 0} -} - -type AppIdentityServiceError struct { - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *AppIdentityServiceError) Reset() { *m = AppIdentityServiceError{} } -func (m *AppIdentityServiceError) String() string { return proto.CompactTextString(m) } -func (*AppIdentityServiceError) ProtoMessage() {} -func (*AppIdentityServiceError) Descriptor() ([]byte, []int) { - return fileDescriptor_app_identity_service_08a6e3f74b04cfa4, []int{0} -} -func (m *AppIdentityServiceError) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_AppIdentityServiceError.Unmarshal(m, b) -} -func (m *AppIdentityServiceError) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_AppIdentityServiceError.Marshal(b, m, deterministic) -} -func (dst *AppIdentityServiceError) XXX_Merge(src proto.Message) { - xxx_messageInfo_AppIdentityServiceError.Merge(dst, src) -} -func (m *AppIdentityServiceError) XXX_Size() int { - return xxx_messageInfo_AppIdentityServiceError.Size(m) -} -func (m *AppIdentityServiceError) XXX_DiscardUnknown() { - xxx_messageInfo_AppIdentityServiceError.DiscardUnknown(m) -} - -var xxx_messageInfo_AppIdentityServiceError proto.InternalMessageInfo - -type SignForAppRequest struct { - BytesToSign []byte `protobuf:"bytes,1,opt,name=bytes_to_sign,json=bytesToSign" json:"bytes_to_sign,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *SignForAppRequest) Reset() { *m = SignForAppRequest{} } -func (m *SignForAppRequest) String() string { return proto.CompactTextString(m) } -func (*SignForAppRequest) ProtoMessage() {} -func (*SignForAppRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_app_identity_service_08a6e3f74b04cfa4, []int{1} -} -func (m *SignForAppRequest) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_SignForAppRequest.Unmarshal(m, b) -} -func (m *SignForAppRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_SignForAppRequest.Marshal(b, m, deterministic) -} -func (dst *SignForAppRequest) XXX_Merge(src proto.Message) { - xxx_messageInfo_SignForAppRequest.Merge(dst, src) -} -func (m *SignForAppRequest) XXX_Size() int { - return xxx_messageInfo_SignForAppRequest.Size(m) -} -func (m *SignForAppRequest) XXX_DiscardUnknown() { - xxx_messageInfo_SignForAppRequest.DiscardUnknown(m) -} - -var xxx_messageInfo_SignForAppRequest proto.InternalMessageInfo - -func (m *SignForAppRequest) GetBytesToSign() []byte { - if m != nil { - return m.BytesToSign - } - return nil -} - -type SignForAppResponse struct { - KeyName *string `protobuf:"bytes,1,opt,name=key_name,json=keyName" json:"key_name,omitempty"` - SignatureBytes []byte `protobuf:"bytes,2,opt,name=signature_bytes,json=signatureBytes" json:"signature_bytes,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *SignForAppResponse) Reset() { *m = SignForAppResponse{} } -func (m *SignForAppResponse) String() string { return proto.CompactTextString(m) } -func (*SignForAppResponse) ProtoMessage() {} -func (*SignForAppResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_app_identity_service_08a6e3f74b04cfa4, []int{2} -} -func (m *SignForAppResponse) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_SignForAppResponse.Unmarshal(m, b) -} -func (m *SignForAppResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_SignForAppResponse.Marshal(b, m, deterministic) -} -func (dst *SignForAppResponse) XXX_Merge(src proto.Message) { - xxx_messageInfo_SignForAppResponse.Merge(dst, src) -} -func (m *SignForAppResponse) XXX_Size() int { - return xxx_messageInfo_SignForAppResponse.Size(m) -} -func (m *SignForAppResponse) XXX_DiscardUnknown() { - xxx_messageInfo_SignForAppResponse.DiscardUnknown(m) -} - -var xxx_messageInfo_SignForAppResponse proto.InternalMessageInfo - -func (m *SignForAppResponse) GetKeyName() string { - if m != nil && m.KeyName != nil { - return *m.KeyName - } - return "" -} - -func (m *SignForAppResponse) GetSignatureBytes() []byte { - if m != nil { - return m.SignatureBytes - } - return nil -} - -type GetPublicCertificateForAppRequest struct { - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *GetPublicCertificateForAppRequest) Reset() { *m = GetPublicCertificateForAppRequest{} } -func (m *GetPublicCertificateForAppRequest) String() string { return proto.CompactTextString(m) } -func (*GetPublicCertificateForAppRequest) ProtoMessage() {} -func (*GetPublicCertificateForAppRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_app_identity_service_08a6e3f74b04cfa4, []int{3} -} -func (m *GetPublicCertificateForAppRequest) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_GetPublicCertificateForAppRequest.Unmarshal(m, b) -} -func (m *GetPublicCertificateForAppRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_GetPublicCertificateForAppRequest.Marshal(b, m, deterministic) -} -func (dst *GetPublicCertificateForAppRequest) XXX_Merge(src proto.Message) { - xxx_messageInfo_GetPublicCertificateForAppRequest.Merge(dst, src) -} -func (m *GetPublicCertificateForAppRequest) XXX_Size() int { - return xxx_messageInfo_GetPublicCertificateForAppRequest.Size(m) -} -func (m *GetPublicCertificateForAppRequest) XXX_DiscardUnknown() { - xxx_messageInfo_GetPublicCertificateForAppRequest.DiscardUnknown(m) -} - -var xxx_messageInfo_GetPublicCertificateForAppRequest proto.InternalMessageInfo - -type PublicCertificate struct { - KeyName *string `protobuf:"bytes,1,opt,name=key_name,json=keyName" json:"key_name,omitempty"` - X509CertificatePem *string `protobuf:"bytes,2,opt,name=x509_certificate_pem,json=x509CertificatePem" json:"x509_certificate_pem,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *PublicCertificate) Reset() { *m = PublicCertificate{} } -func (m *PublicCertificate) String() string { return proto.CompactTextString(m) } -func (*PublicCertificate) ProtoMessage() {} -func (*PublicCertificate) Descriptor() ([]byte, []int) { - return fileDescriptor_app_identity_service_08a6e3f74b04cfa4, []int{4} -} -func (m *PublicCertificate) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_PublicCertificate.Unmarshal(m, b) -} -func (m *PublicCertificate) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_PublicCertificate.Marshal(b, m, deterministic) -} -func (dst *PublicCertificate) XXX_Merge(src proto.Message) { - xxx_messageInfo_PublicCertificate.Merge(dst, src) -} -func (m *PublicCertificate) XXX_Size() int { - return xxx_messageInfo_PublicCertificate.Size(m) -} -func (m *PublicCertificate) XXX_DiscardUnknown() { - xxx_messageInfo_PublicCertificate.DiscardUnknown(m) -} - -var xxx_messageInfo_PublicCertificate proto.InternalMessageInfo - -func (m *PublicCertificate) GetKeyName() string { - if m != nil && m.KeyName != nil { - return *m.KeyName - } - return "" -} - -func (m *PublicCertificate) GetX509CertificatePem() string { - if m != nil && m.X509CertificatePem != nil { - return *m.X509CertificatePem - } - return "" -} - -type GetPublicCertificateForAppResponse struct { - PublicCertificateList []*PublicCertificate `protobuf:"bytes,1,rep,name=public_certificate_list,json=publicCertificateList" json:"public_certificate_list,omitempty"` - MaxClientCacheTimeInSecond *int64 `protobuf:"varint,2,opt,name=max_client_cache_time_in_second,json=maxClientCacheTimeInSecond" json:"max_client_cache_time_in_second,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *GetPublicCertificateForAppResponse) Reset() { *m = GetPublicCertificateForAppResponse{} } -func (m *GetPublicCertificateForAppResponse) String() string { return proto.CompactTextString(m) } -func (*GetPublicCertificateForAppResponse) ProtoMessage() {} -func (*GetPublicCertificateForAppResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_app_identity_service_08a6e3f74b04cfa4, []int{5} -} -func (m *GetPublicCertificateForAppResponse) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_GetPublicCertificateForAppResponse.Unmarshal(m, b) -} -func (m *GetPublicCertificateForAppResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_GetPublicCertificateForAppResponse.Marshal(b, m, deterministic) -} -func (dst *GetPublicCertificateForAppResponse) XXX_Merge(src proto.Message) { - xxx_messageInfo_GetPublicCertificateForAppResponse.Merge(dst, src) -} -func (m *GetPublicCertificateForAppResponse) XXX_Size() int { - return xxx_messageInfo_GetPublicCertificateForAppResponse.Size(m) -} -func (m *GetPublicCertificateForAppResponse) XXX_DiscardUnknown() { - xxx_messageInfo_GetPublicCertificateForAppResponse.DiscardUnknown(m) -} - -var xxx_messageInfo_GetPublicCertificateForAppResponse proto.InternalMessageInfo - -func (m *GetPublicCertificateForAppResponse) GetPublicCertificateList() []*PublicCertificate { - if m != nil { - return m.PublicCertificateList - } - return nil -} - -func (m *GetPublicCertificateForAppResponse) GetMaxClientCacheTimeInSecond() int64 { - if m != nil && m.MaxClientCacheTimeInSecond != nil { - return *m.MaxClientCacheTimeInSecond - } - return 0 -} - -type GetServiceAccountNameRequest struct { - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *GetServiceAccountNameRequest) Reset() { *m = GetServiceAccountNameRequest{} } -func (m *GetServiceAccountNameRequest) String() string { return proto.CompactTextString(m) } -func (*GetServiceAccountNameRequest) ProtoMessage() {} -func (*GetServiceAccountNameRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_app_identity_service_08a6e3f74b04cfa4, []int{6} -} -func (m *GetServiceAccountNameRequest) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_GetServiceAccountNameRequest.Unmarshal(m, b) -} -func (m *GetServiceAccountNameRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_GetServiceAccountNameRequest.Marshal(b, m, deterministic) -} -func (dst *GetServiceAccountNameRequest) XXX_Merge(src proto.Message) { - xxx_messageInfo_GetServiceAccountNameRequest.Merge(dst, src) -} -func (m *GetServiceAccountNameRequest) XXX_Size() int { - return xxx_messageInfo_GetServiceAccountNameRequest.Size(m) -} -func (m *GetServiceAccountNameRequest) XXX_DiscardUnknown() { - xxx_messageInfo_GetServiceAccountNameRequest.DiscardUnknown(m) -} - -var xxx_messageInfo_GetServiceAccountNameRequest proto.InternalMessageInfo - -type GetServiceAccountNameResponse struct { - ServiceAccountName *string `protobuf:"bytes,1,opt,name=service_account_name,json=serviceAccountName" json:"service_account_name,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *GetServiceAccountNameResponse) Reset() { *m = GetServiceAccountNameResponse{} } -func (m *GetServiceAccountNameResponse) String() string { return proto.CompactTextString(m) } -func (*GetServiceAccountNameResponse) ProtoMessage() {} -func (*GetServiceAccountNameResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_app_identity_service_08a6e3f74b04cfa4, []int{7} -} -func (m *GetServiceAccountNameResponse) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_GetServiceAccountNameResponse.Unmarshal(m, b) -} -func (m *GetServiceAccountNameResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_GetServiceAccountNameResponse.Marshal(b, m, deterministic) -} -func (dst *GetServiceAccountNameResponse) XXX_Merge(src proto.Message) { - xxx_messageInfo_GetServiceAccountNameResponse.Merge(dst, src) -} -func (m *GetServiceAccountNameResponse) XXX_Size() int { - return xxx_messageInfo_GetServiceAccountNameResponse.Size(m) -} -func (m *GetServiceAccountNameResponse) XXX_DiscardUnknown() { - xxx_messageInfo_GetServiceAccountNameResponse.DiscardUnknown(m) -} - -var xxx_messageInfo_GetServiceAccountNameResponse proto.InternalMessageInfo - -func (m *GetServiceAccountNameResponse) GetServiceAccountName() string { - if m != nil && m.ServiceAccountName != nil { - return *m.ServiceAccountName - } - return "" -} - -type GetAccessTokenRequest struct { - Scope []string `protobuf:"bytes,1,rep,name=scope" json:"scope,omitempty"` - ServiceAccountId *int64 `protobuf:"varint,2,opt,name=service_account_id,json=serviceAccountId" json:"service_account_id,omitempty"` - ServiceAccountName *string `protobuf:"bytes,3,opt,name=service_account_name,json=serviceAccountName" json:"service_account_name,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *GetAccessTokenRequest) Reset() { *m = GetAccessTokenRequest{} } -func (m *GetAccessTokenRequest) String() string { return proto.CompactTextString(m) } -func (*GetAccessTokenRequest) ProtoMessage() {} -func (*GetAccessTokenRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_app_identity_service_08a6e3f74b04cfa4, []int{8} -} -func (m *GetAccessTokenRequest) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_GetAccessTokenRequest.Unmarshal(m, b) -} -func (m *GetAccessTokenRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_GetAccessTokenRequest.Marshal(b, m, deterministic) -} -func (dst *GetAccessTokenRequest) XXX_Merge(src proto.Message) { - xxx_messageInfo_GetAccessTokenRequest.Merge(dst, src) -} -func (m *GetAccessTokenRequest) XXX_Size() int { - return xxx_messageInfo_GetAccessTokenRequest.Size(m) -} -func (m *GetAccessTokenRequest) XXX_DiscardUnknown() { - xxx_messageInfo_GetAccessTokenRequest.DiscardUnknown(m) -} - -var xxx_messageInfo_GetAccessTokenRequest proto.InternalMessageInfo - -func (m *GetAccessTokenRequest) GetScope() []string { - if m != nil { - return m.Scope - } - return nil -} - -func (m *GetAccessTokenRequest) GetServiceAccountId() int64 { - if m != nil && m.ServiceAccountId != nil { - return *m.ServiceAccountId - } - return 0 -} - -func (m *GetAccessTokenRequest) GetServiceAccountName() string { - if m != nil && m.ServiceAccountName != nil { - return *m.ServiceAccountName - } - return "" -} - -type GetAccessTokenResponse struct { - AccessToken *string `protobuf:"bytes,1,opt,name=access_token,json=accessToken" json:"access_token,omitempty"` - ExpirationTime *int64 `protobuf:"varint,2,opt,name=expiration_time,json=expirationTime" json:"expiration_time,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *GetAccessTokenResponse) Reset() { *m = GetAccessTokenResponse{} } -func (m *GetAccessTokenResponse) String() string { return proto.CompactTextString(m) } -func (*GetAccessTokenResponse) ProtoMessage() {} -func (*GetAccessTokenResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_app_identity_service_08a6e3f74b04cfa4, []int{9} -} -func (m *GetAccessTokenResponse) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_GetAccessTokenResponse.Unmarshal(m, b) -} -func (m *GetAccessTokenResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_GetAccessTokenResponse.Marshal(b, m, deterministic) -} -func (dst *GetAccessTokenResponse) XXX_Merge(src proto.Message) { - xxx_messageInfo_GetAccessTokenResponse.Merge(dst, src) -} -func (m *GetAccessTokenResponse) XXX_Size() int { - return xxx_messageInfo_GetAccessTokenResponse.Size(m) -} -func (m *GetAccessTokenResponse) XXX_DiscardUnknown() { - xxx_messageInfo_GetAccessTokenResponse.DiscardUnknown(m) -} - -var xxx_messageInfo_GetAccessTokenResponse proto.InternalMessageInfo - -func (m *GetAccessTokenResponse) GetAccessToken() string { - if m != nil && m.AccessToken != nil { - return *m.AccessToken - } - return "" -} - -func (m *GetAccessTokenResponse) GetExpirationTime() int64 { - if m != nil && m.ExpirationTime != nil { - return *m.ExpirationTime - } - return 0 -} - -type GetDefaultGcsBucketNameRequest struct { - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *GetDefaultGcsBucketNameRequest) Reset() { *m = GetDefaultGcsBucketNameRequest{} } -func (m *GetDefaultGcsBucketNameRequest) String() string { return proto.CompactTextString(m) } -func (*GetDefaultGcsBucketNameRequest) ProtoMessage() {} -func (*GetDefaultGcsBucketNameRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_app_identity_service_08a6e3f74b04cfa4, []int{10} -} -func (m *GetDefaultGcsBucketNameRequest) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_GetDefaultGcsBucketNameRequest.Unmarshal(m, b) -} -func (m *GetDefaultGcsBucketNameRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_GetDefaultGcsBucketNameRequest.Marshal(b, m, deterministic) -} -func (dst *GetDefaultGcsBucketNameRequest) XXX_Merge(src proto.Message) { - xxx_messageInfo_GetDefaultGcsBucketNameRequest.Merge(dst, src) -} -func (m *GetDefaultGcsBucketNameRequest) XXX_Size() int { - return xxx_messageInfo_GetDefaultGcsBucketNameRequest.Size(m) -} -func (m *GetDefaultGcsBucketNameRequest) XXX_DiscardUnknown() { - xxx_messageInfo_GetDefaultGcsBucketNameRequest.DiscardUnknown(m) -} - -var xxx_messageInfo_GetDefaultGcsBucketNameRequest proto.InternalMessageInfo - -type GetDefaultGcsBucketNameResponse struct { - DefaultGcsBucketName *string `protobuf:"bytes,1,opt,name=default_gcs_bucket_name,json=defaultGcsBucketName" json:"default_gcs_bucket_name,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *GetDefaultGcsBucketNameResponse) Reset() { *m = GetDefaultGcsBucketNameResponse{} } -func (m *GetDefaultGcsBucketNameResponse) String() string { return proto.CompactTextString(m) } -func (*GetDefaultGcsBucketNameResponse) ProtoMessage() {} -func (*GetDefaultGcsBucketNameResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_app_identity_service_08a6e3f74b04cfa4, []int{11} -} -func (m *GetDefaultGcsBucketNameResponse) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_GetDefaultGcsBucketNameResponse.Unmarshal(m, b) -} -func (m *GetDefaultGcsBucketNameResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_GetDefaultGcsBucketNameResponse.Marshal(b, m, deterministic) -} -func (dst *GetDefaultGcsBucketNameResponse) XXX_Merge(src proto.Message) { - xxx_messageInfo_GetDefaultGcsBucketNameResponse.Merge(dst, src) -} -func (m *GetDefaultGcsBucketNameResponse) XXX_Size() int { - return xxx_messageInfo_GetDefaultGcsBucketNameResponse.Size(m) -} -func (m *GetDefaultGcsBucketNameResponse) XXX_DiscardUnknown() { - xxx_messageInfo_GetDefaultGcsBucketNameResponse.DiscardUnknown(m) -} - -var xxx_messageInfo_GetDefaultGcsBucketNameResponse proto.InternalMessageInfo - -func (m *GetDefaultGcsBucketNameResponse) GetDefaultGcsBucketName() string { - if m != nil && m.DefaultGcsBucketName != nil { - return *m.DefaultGcsBucketName - } - return "" -} - -func init() { - proto.RegisterType((*AppIdentityServiceError)(nil), "appengine.AppIdentityServiceError") - proto.RegisterType((*SignForAppRequest)(nil), "appengine.SignForAppRequest") - proto.RegisterType((*SignForAppResponse)(nil), "appengine.SignForAppResponse") - proto.RegisterType((*GetPublicCertificateForAppRequest)(nil), "appengine.GetPublicCertificateForAppRequest") - proto.RegisterType((*PublicCertificate)(nil), "appengine.PublicCertificate") - proto.RegisterType((*GetPublicCertificateForAppResponse)(nil), "appengine.GetPublicCertificateForAppResponse") - proto.RegisterType((*GetServiceAccountNameRequest)(nil), "appengine.GetServiceAccountNameRequest") - proto.RegisterType((*GetServiceAccountNameResponse)(nil), "appengine.GetServiceAccountNameResponse") - proto.RegisterType((*GetAccessTokenRequest)(nil), "appengine.GetAccessTokenRequest") - proto.RegisterType((*GetAccessTokenResponse)(nil), "appengine.GetAccessTokenResponse") - proto.RegisterType((*GetDefaultGcsBucketNameRequest)(nil), "appengine.GetDefaultGcsBucketNameRequest") - proto.RegisterType((*GetDefaultGcsBucketNameResponse)(nil), "appengine.GetDefaultGcsBucketNameResponse") -} - -func init() { - proto.RegisterFile("google.golang.org/appengine/internal/app_identity/app_identity_service.proto", fileDescriptor_app_identity_service_08a6e3f74b04cfa4) -} - -var fileDescriptor_app_identity_service_08a6e3f74b04cfa4 = []byte{ - // 676 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x7c, 0x54, 0xdb, 0x6e, 0xda, 0x58, - 0x14, 0x1d, 0x26, 0x1a, 0x31, 0x6c, 0x12, 0x62, 0xce, 0x90, 0xcb, 0x8c, 0x32, 0xb9, 0x78, 0x1e, - 0x26, 0x0f, 0x15, 0x89, 0x2a, 0x45, 0x55, 0x1f, 0x8d, 0xed, 0x22, 0x54, 0x07, 0x53, 0x43, 0x9a, - 0xa8, 0x2f, 0xa7, 0xce, 0x61, 0xc7, 0x3d, 0x02, 0x9f, 0xe3, 0xda, 0x87, 0x0a, 0x3e, 0xa2, 0x3f, - 0xd2, 0x9f, 0xe8, 0x5b, 0xbf, 0xa5, 0x17, 0xb5, 0xdf, 0x50, 0xd9, 0x38, 0x5c, 0x92, 0x92, 0x37, - 0xbc, 0xf6, 0x5a, 0xcb, 0x6b, 0x2f, 0x6d, 0x0c, 0x4e, 0x20, 0x65, 0x30, 0xc4, 0x7a, 0x20, 0x87, - 0xbe, 0x08, 0xea, 0x32, 0x0e, 0x4e, 0xfc, 0x28, 0x42, 0x11, 0x70, 0x81, 0x27, 0x5c, 0x28, 0x8c, - 0x85, 0x3f, 0x4c, 0x21, 0xca, 0xfb, 0x28, 0x14, 0x57, 0x93, 0xa5, 0x07, 0x9a, 0x60, 0xfc, 0x8e, - 0x33, 0xac, 0x47, 0xb1, 0x54, 0x92, 0x94, 0x66, 0x5a, 0xfd, 0x53, 0x01, 0x76, 0x8c, 0x28, 0x6a, - 0xe5, 0xc4, 0xee, 0x94, 0x67, 0xc7, 0xb1, 0x8c, 0xf5, 0x0f, 0x05, 0x28, 0x65, 0xbf, 0x4c, 0xd9, - 0x47, 0x52, 0x86, 0x62, 0xf7, 0xc2, 0x34, 0xed, 0x6e, 0x57, 0xfb, 0x8d, 0x54, 0x61, 0xe3, 0xa2, - 0xfd, 0xbc, 0xed, 0x5e, 0xb6, 0x69, 0xd7, 0x74, 0x3b, 0xb6, 0x56, 0x22, 0x7f, 0x41, 0xa5, 0xe1, - 0xb8, 0x0d, 0xda, 0x73, 0x5d, 0xea, 0x18, 0x5e, 0xd3, 0xd6, 0x3e, 0x17, 0xc9, 0x36, 0x54, 0x2d, - 0xdb, 0xb0, 0x9c, 0x56, 0xdb, 0xa6, 0xf6, 0x95, 0x69, 0xdb, 0x96, 0x6d, 0x69, 0x5f, 0x8a, 0xa4, - 0x06, 0x9b, 0x6d, 0xb7, 0x47, 0x0d, 0xfa, 0xd2, 0x70, 0x5a, 0x16, 0x35, 0x3a, 0x1d, 0xed, 0x6b, - 0x91, 0x90, 0xb9, 0xab, 0xed, 0x79, 0xae, 0xa7, 0x7d, 0x2b, 0x12, 0x0d, 0xca, 0x19, 0xd3, 0x71, - 0xdc, 0x4b, 0xdb, 0xd2, 0xbe, 0xcf, 0xb4, 0xad, 0xf3, 0x8e, 0x63, 0x9f, 0xdb, 0xed, 0x9e, 0x6d, - 0x69, 0x3f, 0x8a, 0xfa, 0x13, 0xa8, 0x76, 0x79, 0x20, 0x9e, 0xc9, 0xd8, 0x88, 0x22, 0x0f, 0xdf, - 0x8e, 0x30, 0x51, 0x44, 0x87, 0x8d, 0xeb, 0x89, 0xc2, 0x84, 0x2a, 0x49, 0x13, 0x1e, 0x88, 0xdd, - 0xc2, 0x61, 0xe1, 0x78, 0xdd, 0x2b, 0x67, 0x60, 0x4f, 0xa6, 0x02, 0xfd, 0x0a, 0xc8, 0xa2, 0x30, - 0x89, 0xa4, 0x48, 0x90, 0xfc, 0x0d, 0x7f, 0x0e, 0x70, 0x42, 0x85, 0x1f, 0x62, 0x26, 0x2a, 0x79, - 0xc5, 0x01, 0x4e, 0xda, 0x7e, 0x88, 0xe4, 0x7f, 0xd8, 0x4c, 0xbd, 0x7c, 0x35, 0x8a, 0x91, 0x66, - 0x4e, 0xbb, 0xbf, 0x67, 0xb6, 0x95, 0x19, 0xdc, 0x48, 0x51, 0xfd, 0x3f, 0x38, 0x6a, 0xa2, 0xea, - 0x8c, 0xae, 0x87, 0x9c, 0x99, 0x18, 0x2b, 0x7e, 0xc3, 0x99, 0xaf, 0x70, 0x29, 0xa2, 0xfe, 0x1a, - 0xaa, 0xf7, 0x18, 0x0f, 0xbd, 0xfd, 0x14, 0x6a, 0xe3, 0xb3, 0xd3, 0xa7, 0x94, 0xcd, 0xe9, 0x34, - 0xc2, 0x30, 0x8b, 0x50, 0xf2, 0x48, 0x3a, 0x5b, 0x70, 0xea, 0x60, 0xa8, 0x7f, 0x2c, 0x80, 0xfe, - 0x50, 0x8e, 0x7c, 0xe3, 0x1e, 0xec, 0x44, 0x19, 0x65, 0xc9, 0x7a, 0xc8, 0x13, 0xb5, 0x5b, 0x38, - 0x5c, 0x3b, 0x2e, 0x3f, 0xde, 0xab, 0xcf, 0xce, 0xa6, 0x7e, 0xcf, 0xcc, 0xdb, 0x8a, 0xee, 0x42, - 0x0e, 0x4f, 0x14, 0x31, 0xe1, 0x20, 0xf4, 0xc7, 0x94, 0x0d, 0x39, 0x0a, 0x45, 0x99, 0xcf, 0xde, - 0x20, 0x55, 0x3c, 0x44, 0xca, 0x05, 0x4d, 0x90, 0x49, 0xd1, 0xcf, 0x92, 0xaf, 0x79, 0xff, 0x84, - 0xfe, 0xd8, 0xcc, 0x58, 0x66, 0x4a, 0xea, 0xf1, 0x10, 0x5b, 0xa2, 0x9b, 0x31, 0xf4, 0x7d, 0xd8, - 0x6b, 0xa2, 0xca, 0x6f, 0xd3, 0x60, 0x4c, 0x8e, 0x84, 0x4a, 0xcb, 0xb8, 0xed, 0xf0, 0x05, 0xfc, - 0xbb, 0x62, 0x9e, 0xef, 0x76, 0x0a, 0xb5, 0xfc, 0x1f, 0x40, 0xfd, 0xe9, 0x78, 0xb1, 0x5b, 0x92, - 0xdc, 0x53, 0xea, 0xef, 0x0b, 0xb0, 0xd5, 0x44, 0x65, 0x30, 0x86, 0x49, 0xd2, 0x93, 0x03, 0x14, - 0xb7, 0x37, 0x55, 0x83, 0x3f, 0x12, 0x26, 0x23, 0xcc, 0x5a, 0x29, 0x79, 0xd3, 0x07, 0xf2, 0x08, - 0xc8, 0xdd, 0x37, 0xf0, 0xdb, 0xd5, 0xb4, 0x65, 0xff, 0x56, 0x7f, 0x65, 0x9e, 0xb5, 0x95, 0x79, - 0xfa, 0xb0, 0x7d, 0x37, 0x4e, 0xbe, 0xdb, 0x11, 0xac, 0xfb, 0x19, 0x4c, 0x55, 0x8a, 0xe7, 0x3b, - 0x95, 0xfd, 0x39, 0x35, 0xbd, 0x58, 0x1c, 0x47, 0x3c, 0xf6, 0x15, 0x97, 0x22, 0xab, 0x3f, 0x4f, - 0x56, 0x99, 0xc3, 0x69, 0xe1, 0xfa, 0x21, 0xec, 0x37, 0x51, 0x59, 0x78, 0xe3, 0x8f, 0x86, 0xaa, - 0xc9, 0x92, 0xc6, 0x88, 0x0d, 0x70, 0xa9, 0xea, 0x2b, 0x38, 0x58, 0xc9, 0xc8, 0x03, 0x9d, 0xc1, - 0x4e, 0x7f, 0x3a, 0xa7, 0x01, 0x4b, 0xe8, 0x75, 0xc6, 0x58, 0xec, 0xbb, 0xd6, 0xff, 0x85, 0xbc, - 0x51, 0x79, 0xb5, 0xbe, 0xf8, 0xc9, 0xfa, 0x19, 0x00, 0x00, 0xff, 0xff, 0x37, 0x4c, 0x56, 0x38, - 0xf3, 0x04, 0x00, 0x00, -} diff --git a/vendor/google.golang.org/appengine/internal/app_identity/app_identity_service.proto b/vendor/google.golang.org/appengine/internal/app_identity/app_identity_service.proto deleted file mode 100644 index 19610ca5b7..0000000000 --- a/vendor/google.golang.org/appengine/internal/app_identity/app_identity_service.proto +++ /dev/null @@ -1,64 +0,0 @@ -syntax = "proto2"; -option go_package = "app_identity"; - -package appengine; - -message AppIdentityServiceError { - enum ErrorCode { - SUCCESS = 0; - UNKNOWN_SCOPE = 9; - BLOB_TOO_LARGE = 1000; - DEADLINE_EXCEEDED = 1001; - NOT_A_VALID_APP = 1002; - UNKNOWN_ERROR = 1003; - NOT_ALLOWED = 1005; - NOT_IMPLEMENTED = 1006; - } -} - -message SignForAppRequest { - optional bytes bytes_to_sign = 1; -} - -message SignForAppResponse { - optional string key_name = 1; - optional bytes signature_bytes = 2; -} - -message GetPublicCertificateForAppRequest { -} - -message PublicCertificate { - optional string key_name = 1; - optional string x509_certificate_pem = 2; -} - -message GetPublicCertificateForAppResponse { - repeated PublicCertificate public_certificate_list = 1; - optional int64 max_client_cache_time_in_second = 2; -} - -message GetServiceAccountNameRequest { -} - -message GetServiceAccountNameResponse { - optional string service_account_name = 1; -} - -message GetAccessTokenRequest { - repeated string scope = 1; - optional int64 service_account_id = 2; - optional string service_account_name = 3; -} - -message GetAccessTokenResponse { - optional string access_token = 1; - optional int64 expiration_time = 2; -} - -message GetDefaultGcsBucketNameRequest { -} - -message GetDefaultGcsBucketNameResponse { - optional string default_gcs_bucket_name = 1; -} diff --git a/vendor/google.golang.org/appengine/internal/modules/modules_service.pb.go b/vendor/google.golang.org/appengine/internal/modules/modules_service.pb.go deleted file mode 100644 index ddfc0c04a1..0000000000 --- a/vendor/google.golang.org/appengine/internal/modules/modules_service.pb.go +++ /dev/null @@ -1,786 +0,0 @@ -// Code generated by protoc-gen-go. DO NOT EDIT. -// source: google.golang.org/appengine/internal/modules/modules_service.proto - -package modules - -import proto "github.com/golang/protobuf/proto" -import fmt "fmt" -import math "math" - -// Reference imports to suppress errors if they are not otherwise used. -var _ = proto.Marshal -var _ = fmt.Errorf -var _ = math.Inf - -// This is a compile-time assertion to ensure that this generated file -// is compatible with the proto package it is being compiled against. -// A compilation error at this line likely means your copy of the -// proto package needs to be updated. -const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package - -type ModulesServiceError_ErrorCode int32 - -const ( - ModulesServiceError_OK ModulesServiceError_ErrorCode = 0 - ModulesServiceError_INVALID_MODULE ModulesServiceError_ErrorCode = 1 - ModulesServiceError_INVALID_VERSION ModulesServiceError_ErrorCode = 2 - ModulesServiceError_INVALID_INSTANCES ModulesServiceError_ErrorCode = 3 - ModulesServiceError_TRANSIENT_ERROR ModulesServiceError_ErrorCode = 4 - ModulesServiceError_UNEXPECTED_STATE ModulesServiceError_ErrorCode = 5 -) - -var ModulesServiceError_ErrorCode_name = map[int32]string{ - 0: "OK", - 1: "INVALID_MODULE", - 2: "INVALID_VERSION", - 3: "INVALID_INSTANCES", - 4: "TRANSIENT_ERROR", - 5: "UNEXPECTED_STATE", -} -var ModulesServiceError_ErrorCode_value = map[string]int32{ - "OK": 0, - "INVALID_MODULE": 1, - "INVALID_VERSION": 2, - "INVALID_INSTANCES": 3, - "TRANSIENT_ERROR": 4, - "UNEXPECTED_STATE": 5, -} - -func (x ModulesServiceError_ErrorCode) Enum() *ModulesServiceError_ErrorCode { - p := new(ModulesServiceError_ErrorCode) - *p = x - return p -} -func (x ModulesServiceError_ErrorCode) String() string { - return proto.EnumName(ModulesServiceError_ErrorCode_name, int32(x)) -} -func (x *ModulesServiceError_ErrorCode) UnmarshalJSON(data []byte) error { - value, err := proto.UnmarshalJSONEnum(ModulesServiceError_ErrorCode_value, data, "ModulesServiceError_ErrorCode") - if err != nil { - return err - } - *x = ModulesServiceError_ErrorCode(value) - return nil -} -func (ModulesServiceError_ErrorCode) EnumDescriptor() ([]byte, []int) { - return fileDescriptor_modules_service_9cd3bffe4e91c59a, []int{0, 0} -} - -type ModulesServiceError struct { - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *ModulesServiceError) Reset() { *m = ModulesServiceError{} } -func (m *ModulesServiceError) String() string { return proto.CompactTextString(m) } -func (*ModulesServiceError) ProtoMessage() {} -func (*ModulesServiceError) Descriptor() ([]byte, []int) { - return fileDescriptor_modules_service_9cd3bffe4e91c59a, []int{0} -} -func (m *ModulesServiceError) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_ModulesServiceError.Unmarshal(m, b) -} -func (m *ModulesServiceError) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_ModulesServiceError.Marshal(b, m, deterministic) -} -func (dst *ModulesServiceError) XXX_Merge(src proto.Message) { - xxx_messageInfo_ModulesServiceError.Merge(dst, src) -} -func (m *ModulesServiceError) XXX_Size() int { - return xxx_messageInfo_ModulesServiceError.Size(m) -} -func (m *ModulesServiceError) XXX_DiscardUnknown() { - xxx_messageInfo_ModulesServiceError.DiscardUnknown(m) -} - -var xxx_messageInfo_ModulesServiceError proto.InternalMessageInfo - -type GetModulesRequest struct { - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *GetModulesRequest) Reset() { *m = GetModulesRequest{} } -func (m *GetModulesRequest) String() string { return proto.CompactTextString(m) } -func (*GetModulesRequest) ProtoMessage() {} -func (*GetModulesRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_modules_service_9cd3bffe4e91c59a, []int{1} -} -func (m *GetModulesRequest) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_GetModulesRequest.Unmarshal(m, b) -} -func (m *GetModulesRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_GetModulesRequest.Marshal(b, m, deterministic) -} -func (dst *GetModulesRequest) XXX_Merge(src proto.Message) { - xxx_messageInfo_GetModulesRequest.Merge(dst, src) -} -func (m *GetModulesRequest) XXX_Size() int { - return xxx_messageInfo_GetModulesRequest.Size(m) -} -func (m *GetModulesRequest) XXX_DiscardUnknown() { - xxx_messageInfo_GetModulesRequest.DiscardUnknown(m) -} - -var xxx_messageInfo_GetModulesRequest proto.InternalMessageInfo - -type GetModulesResponse struct { - Module []string `protobuf:"bytes,1,rep,name=module" json:"module,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *GetModulesResponse) Reset() { *m = GetModulesResponse{} } -func (m *GetModulesResponse) String() string { return proto.CompactTextString(m) } -func (*GetModulesResponse) ProtoMessage() {} -func (*GetModulesResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_modules_service_9cd3bffe4e91c59a, []int{2} -} -func (m *GetModulesResponse) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_GetModulesResponse.Unmarshal(m, b) -} -func (m *GetModulesResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_GetModulesResponse.Marshal(b, m, deterministic) -} -func (dst *GetModulesResponse) XXX_Merge(src proto.Message) { - xxx_messageInfo_GetModulesResponse.Merge(dst, src) -} -func (m *GetModulesResponse) XXX_Size() int { - return xxx_messageInfo_GetModulesResponse.Size(m) -} -func (m *GetModulesResponse) XXX_DiscardUnknown() { - xxx_messageInfo_GetModulesResponse.DiscardUnknown(m) -} - -var xxx_messageInfo_GetModulesResponse proto.InternalMessageInfo - -func (m *GetModulesResponse) GetModule() []string { - if m != nil { - return m.Module - } - return nil -} - -type GetVersionsRequest struct { - Module *string `protobuf:"bytes,1,opt,name=module" json:"module,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *GetVersionsRequest) Reset() { *m = GetVersionsRequest{} } -func (m *GetVersionsRequest) String() string { return proto.CompactTextString(m) } -func (*GetVersionsRequest) ProtoMessage() {} -func (*GetVersionsRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_modules_service_9cd3bffe4e91c59a, []int{3} -} -func (m *GetVersionsRequest) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_GetVersionsRequest.Unmarshal(m, b) -} -func (m *GetVersionsRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_GetVersionsRequest.Marshal(b, m, deterministic) -} -func (dst *GetVersionsRequest) XXX_Merge(src proto.Message) { - xxx_messageInfo_GetVersionsRequest.Merge(dst, src) -} -func (m *GetVersionsRequest) XXX_Size() int { - return xxx_messageInfo_GetVersionsRequest.Size(m) -} -func (m *GetVersionsRequest) XXX_DiscardUnknown() { - xxx_messageInfo_GetVersionsRequest.DiscardUnknown(m) -} - -var xxx_messageInfo_GetVersionsRequest proto.InternalMessageInfo - -func (m *GetVersionsRequest) GetModule() string { - if m != nil && m.Module != nil { - return *m.Module - } - return "" -} - -type GetVersionsResponse struct { - Version []string `protobuf:"bytes,1,rep,name=version" json:"version,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *GetVersionsResponse) Reset() { *m = GetVersionsResponse{} } -func (m *GetVersionsResponse) String() string { return proto.CompactTextString(m) } -func (*GetVersionsResponse) ProtoMessage() {} -func (*GetVersionsResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_modules_service_9cd3bffe4e91c59a, []int{4} -} -func (m *GetVersionsResponse) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_GetVersionsResponse.Unmarshal(m, b) -} -func (m *GetVersionsResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_GetVersionsResponse.Marshal(b, m, deterministic) -} -func (dst *GetVersionsResponse) XXX_Merge(src proto.Message) { - xxx_messageInfo_GetVersionsResponse.Merge(dst, src) -} -func (m *GetVersionsResponse) XXX_Size() int { - return xxx_messageInfo_GetVersionsResponse.Size(m) -} -func (m *GetVersionsResponse) XXX_DiscardUnknown() { - xxx_messageInfo_GetVersionsResponse.DiscardUnknown(m) -} - -var xxx_messageInfo_GetVersionsResponse proto.InternalMessageInfo - -func (m *GetVersionsResponse) GetVersion() []string { - if m != nil { - return m.Version - } - return nil -} - -type GetDefaultVersionRequest struct { - Module *string `protobuf:"bytes,1,opt,name=module" json:"module,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *GetDefaultVersionRequest) Reset() { *m = GetDefaultVersionRequest{} } -func (m *GetDefaultVersionRequest) String() string { return proto.CompactTextString(m) } -func (*GetDefaultVersionRequest) ProtoMessage() {} -func (*GetDefaultVersionRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_modules_service_9cd3bffe4e91c59a, []int{5} -} -func (m *GetDefaultVersionRequest) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_GetDefaultVersionRequest.Unmarshal(m, b) -} -func (m *GetDefaultVersionRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_GetDefaultVersionRequest.Marshal(b, m, deterministic) -} -func (dst *GetDefaultVersionRequest) XXX_Merge(src proto.Message) { - xxx_messageInfo_GetDefaultVersionRequest.Merge(dst, src) -} -func (m *GetDefaultVersionRequest) XXX_Size() int { - return xxx_messageInfo_GetDefaultVersionRequest.Size(m) -} -func (m *GetDefaultVersionRequest) XXX_DiscardUnknown() { - xxx_messageInfo_GetDefaultVersionRequest.DiscardUnknown(m) -} - -var xxx_messageInfo_GetDefaultVersionRequest proto.InternalMessageInfo - -func (m *GetDefaultVersionRequest) GetModule() string { - if m != nil && m.Module != nil { - return *m.Module - } - return "" -} - -type GetDefaultVersionResponse struct { - Version *string `protobuf:"bytes,1,req,name=version" json:"version,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *GetDefaultVersionResponse) Reset() { *m = GetDefaultVersionResponse{} } -func (m *GetDefaultVersionResponse) String() string { return proto.CompactTextString(m) } -func (*GetDefaultVersionResponse) ProtoMessage() {} -func (*GetDefaultVersionResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_modules_service_9cd3bffe4e91c59a, []int{6} -} -func (m *GetDefaultVersionResponse) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_GetDefaultVersionResponse.Unmarshal(m, b) -} -func (m *GetDefaultVersionResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_GetDefaultVersionResponse.Marshal(b, m, deterministic) -} -func (dst *GetDefaultVersionResponse) XXX_Merge(src proto.Message) { - xxx_messageInfo_GetDefaultVersionResponse.Merge(dst, src) -} -func (m *GetDefaultVersionResponse) XXX_Size() int { - return xxx_messageInfo_GetDefaultVersionResponse.Size(m) -} -func (m *GetDefaultVersionResponse) XXX_DiscardUnknown() { - xxx_messageInfo_GetDefaultVersionResponse.DiscardUnknown(m) -} - -var xxx_messageInfo_GetDefaultVersionResponse proto.InternalMessageInfo - -func (m *GetDefaultVersionResponse) GetVersion() string { - if m != nil && m.Version != nil { - return *m.Version - } - return "" -} - -type GetNumInstancesRequest struct { - Module *string `protobuf:"bytes,1,opt,name=module" json:"module,omitempty"` - Version *string `protobuf:"bytes,2,opt,name=version" json:"version,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *GetNumInstancesRequest) Reset() { *m = GetNumInstancesRequest{} } -func (m *GetNumInstancesRequest) String() string { return proto.CompactTextString(m) } -func (*GetNumInstancesRequest) ProtoMessage() {} -func (*GetNumInstancesRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_modules_service_9cd3bffe4e91c59a, []int{7} -} -func (m *GetNumInstancesRequest) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_GetNumInstancesRequest.Unmarshal(m, b) -} -func (m *GetNumInstancesRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_GetNumInstancesRequest.Marshal(b, m, deterministic) -} -func (dst *GetNumInstancesRequest) XXX_Merge(src proto.Message) { - xxx_messageInfo_GetNumInstancesRequest.Merge(dst, src) -} -func (m *GetNumInstancesRequest) XXX_Size() int { - return xxx_messageInfo_GetNumInstancesRequest.Size(m) -} -func (m *GetNumInstancesRequest) XXX_DiscardUnknown() { - xxx_messageInfo_GetNumInstancesRequest.DiscardUnknown(m) -} - -var xxx_messageInfo_GetNumInstancesRequest proto.InternalMessageInfo - -func (m *GetNumInstancesRequest) GetModule() string { - if m != nil && m.Module != nil { - return *m.Module - } - return "" -} - -func (m *GetNumInstancesRequest) GetVersion() string { - if m != nil && m.Version != nil { - return *m.Version - } - return "" -} - -type GetNumInstancesResponse struct { - Instances *int64 `protobuf:"varint,1,req,name=instances" json:"instances,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *GetNumInstancesResponse) Reset() { *m = GetNumInstancesResponse{} } -func (m *GetNumInstancesResponse) String() string { return proto.CompactTextString(m) } -func (*GetNumInstancesResponse) ProtoMessage() {} -func (*GetNumInstancesResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_modules_service_9cd3bffe4e91c59a, []int{8} -} -func (m *GetNumInstancesResponse) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_GetNumInstancesResponse.Unmarshal(m, b) -} -func (m *GetNumInstancesResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_GetNumInstancesResponse.Marshal(b, m, deterministic) -} -func (dst *GetNumInstancesResponse) XXX_Merge(src proto.Message) { - xxx_messageInfo_GetNumInstancesResponse.Merge(dst, src) -} -func (m *GetNumInstancesResponse) XXX_Size() int { - return xxx_messageInfo_GetNumInstancesResponse.Size(m) -} -func (m *GetNumInstancesResponse) XXX_DiscardUnknown() { - xxx_messageInfo_GetNumInstancesResponse.DiscardUnknown(m) -} - -var xxx_messageInfo_GetNumInstancesResponse proto.InternalMessageInfo - -func (m *GetNumInstancesResponse) GetInstances() int64 { - if m != nil && m.Instances != nil { - return *m.Instances - } - return 0 -} - -type SetNumInstancesRequest struct { - Module *string `protobuf:"bytes,1,opt,name=module" json:"module,omitempty"` - Version *string `protobuf:"bytes,2,opt,name=version" json:"version,omitempty"` - Instances *int64 `protobuf:"varint,3,req,name=instances" json:"instances,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *SetNumInstancesRequest) Reset() { *m = SetNumInstancesRequest{} } -func (m *SetNumInstancesRequest) String() string { return proto.CompactTextString(m) } -func (*SetNumInstancesRequest) ProtoMessage() {} -func (*SetNumInstancesRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_modules_service_9cd3bffe4e91c59a, []int{9} -} -func (m *SetNumInstancesRequest) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_SetNumInstancesRequest.Unmarshal(m, b) -} -func (m *SetNumInstancesRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_SetNumInstancesRequest.Marshal(b, m, deterministic) -} -func (dst *SetNumInstancesRequest) XXX_Merge(src proto.Message) { - xxx_messageInfo_SetNumInstancesRequest.Merge(dst, src) -} -func (m *SetNumInstancesRequest) XXX_Size() int { - return xxx_messageInfo_SetNumInstancesRequest.Size(m) -} -func (m *SetNumInstancesRequest) XXX_DiscardUnknown() { - xxx_messageInfo_SetNumInstancesRequest.DiscardUnknown(m) -} - -var xxx_messageInfo_SetNumInstancesRequest proto.InternalMessageInfo - -func (m *SetNumInstancesRequest) GetModule() string { - if m != nil && m.Module != nil { - return *m.Module - } - return "" -} - -func (m *SetNumInstancesRequest) GetVersion() string { - if m != nil && m.Version != nil { - return *m.Version - } - return "" -} - -func (m *SetNumInstancesRequest) GetInstances() int64 { - if m != nil && m.Instances != nil { - return *m.Instances - } - return 0 -} - -type SetNumInstancesResponse struct { - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *SetNumInstancesResponse) Reset() { *m = SetNumInstancesResponse{} } -func (m *SetNumInstancesResponse) String() string { return proto.CompactTextString(m) } -func (*SetNumInstancesResponse) ProtoMessage() {} -func (*SetNumInstancesResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_modules_service_9cd3bffe4e91c59a, []int{10} -} -func (m *SetNumInstancesResponse) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_SetNumInstancesResponse.Unmarshal(m, b) -} -func (m *SetNumInstancesResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_SetNumInstancesResponse.Marshal(b, m, deterministic) -} -func (dst *SetNumInstancesResponse) XXX_Merge(src proto.Message) { - xxx_messageInfo_SetNumInstancesResponse.Merge(dst, src) -} -func (m *SetNumInstancesResponse) XXX_Size() int { - return xxx_messageInfo_SetNumInstancesResponse.Size(m) -} -func (m *SetNumInstancesResponse) XXX_DiscardUnknown() { - xxx_messageInfo_SetNumInstancesResponse.DiscardUnknown(m) -} - -var xxx_messageInfo_SetNumInstancesResponse proto.InternalMessageInfo - -type StartModuleRequest struct { - Module *string `protobuf:"bytes,1,req,name=module" json:"module,omitempty"` - Version *string `protobuf:"bytes,2,req,name=version" json:"version,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *StartModuleRequest) Reset() { *m = StartModuleRequest{} } -func (m *StartModuleRequest) String() string { return proto.CompactTextString(m) } -func (*StartModuleRequest) ProtoMessage() {} -func (*StartModuleRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_modules_service_9cd3bffe4e91c59a, []int{11} -} -func (m *StartModuleRequest) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_StartModuleRequest.Unmarshal(m, b) -} -func (m *StartModuleRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_StartModuleRequest.Marshal(b, m, deterministic) -} -func (dst *StartModuleRequest) XXX_Merge(src proto.Message) { - xxx_messageInfo_StartModuleRequest.Merge(dst, src) -} -func (m *StartModuleRequest) XXX_Size() int { - return xxx_messageInfo_StartModuleRequest.Size(m) -} -func (m *StartModuleRequest) XXX_DiscardUnknown() { - xxx_messageInfo_StartModuleRequest.DiscardUnknown(m) -} - -var xxx_messageInfo_StartModuleRequest proto.InternalMessageInfo - -func (m *StartModuleRequest) GetModule() string { - if m != nil && m.Module != nil { - return *m.Module - } - return "" -} - -func (m *StartModuleRequest) GetVersion() string { - if m != nil && m.Version != nil { - return *m.Version - } - return "" -} - -type StartModuleResponse struct { - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *StartModuleResponse) Reset() { *m = StartModuleResponse{} } -func (m *StartModuleResponse) String() string { return proto.CompactTextString(m) } -func (*StartModuleResponse) ProtoMessage() {} -func (*StartModuleResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_modules_service_9cd3bffe4e91c59a, []int{12} -} -func (m *StartModuleResponse) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_StartModuleResponse.Unmarshal(m, b) -} -func (m *StartModuleResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_StartModuleResponse.Marshal(b, m, deterministic) -} -func (dst *StartModuleResponse) XXX_Merge(src proto.Message) { - xxx_messageInfo_StartModuleResponse.Merge(dst, src) -} -func (m *StartModuleResponse) XXX_Size() int { - return xxx_messageInfo_StartModuleResponse.Size(m) -} -func (m *StartModuleResponse) XXX_DiscardUnknown() { - xxx_messageInfo_StartModuleResponse.DiscardUnknown(m) -} - -var xxx_messageInfo_StartModuleResponse proto.InternalMessageInfo - -type StopModuleRequest struct { - Module *string `protobuf:"bytes,1,opt,name=module" json:"module,omitempty"` - Version *string `protobuf:"bytes,2,opt,name=version" json:"version,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *StopModuleRequest) Reset() { *m = StopModuleRequest{} } -func (m *StopModuleRequest) String() string { return proto.CompactTextString(m) } -func (*StopModuleRequest) ProtoMessage() {} -func (*StopModuleRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_modules_service_9cd3bffe4e91c59a, []int{13} -} -func (m *StopModuleRequest) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_StopModuleRequest.Unmarshal(m, b) -} -func (m *StopModuleRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_StopModuleRequest.Marshal(b, m, deterministic) -} -func (dst *StopModuleRequest) XXX_Merge(src proto.Message) { - xxx_messageInfo_StopModuleRequest.Merge(dst, src) -} -func (m *StopModuleRequest) XXX_Size() int { - return xxx_messageInfo_StopModuleRequest.Size(m) -} -func (m *StopModuleRequest) XXX_DiscardUnknown() { - xxx_messageInfo_StopModuleRequest.DiscardUnknown(m) -} - -var xxx_messageInfo_StopModuleRequest proto.InternalMessageInfo - -func (m *StopModuleRequest) GetModule() string { - if m != nil && m.Module != nil { - return *m.Module - } - return "" -} - -func (m *StopModuleRequest) GetVersion() string { - if m != nil && m.Version != nil { - return *m.Version - } - return "" -} - -type StopModuleResponse struct { - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *StopModuleResponse) Reset() { *m = StopModuleResponse{} } -func (m *StopModuleResponse) String() string { return proto.CompactTextString(m) } -func (*StopModuleResponse) ProtoMessage() {} -func (*StopModuleResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_modules_service_9cd3bffe4e91c59a, []int{14} -} -func (m *StopModuleResponse) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_StopModuleResponse.Unmarshal(m, b) -} -func (m *StopModuleResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_StopModuleResponse.Marshal(b, m, deterministic) -} -func (dst *StopModuleResponse) XXX_Merge(src proto.Message) { - xxx_messageInfo_StopModuleResponse.Merge(dst, src) -} -func (m *StopModuleResponse) XXX_Size() int { - return xxx_messageInfo_StopModuleResponse.Size(m) -} -func (m *StopModuleResponse) XXX_DiscardUnknown() { - xxx_messageInfo_StopModuleResponse.DiscardUnknown(m) -} - -var xxx_messageInfo_StopModuleResponse proto.InternalMessageInfo - -type GetHostnameRequest struct { - Module *string `protobuf:"bytes,1,opt,name=module" json:"module,omitempty"` - Version *string `protobuf:"bytes,2,opt,name=version" json:"version,omitempty"` - Instance *string `protobuf:"bytes,3,opt,name=instance" json:"instance,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *GetHostnameRequest) Reset() { *m = GetHostnameRequest{} } -func (m *GetHostnameRequest) String() string { return proto.CompactTextString(m) } -func (*GetHostnameRequest) ProtoMessage() {} -func (*GetHostnameRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_modules_service_9cd3bffe4e91c59a, []int{15} -} -func (m *GetHostnameRequest) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_GetHostnameRequest.Unmarshal(m, b) -} -func (m *GetHostnameRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_GetHostnameRequest.Marshal(b, m, deterministic) -} -func (dst *GetHostnameRequest) XXX_Merge(src proto.Message) { - xxx_messageInfo_GetHostnameRequest.Merge(dst, src) -} -func (m *GetHostnameRequest) XXX_Size() int { - return xxx_messageInfo_GetHostnameRequest.Size(m) -} -func (m *GetHostnameRequest) XXX_DiscardUnknown() { - xxx_messageInfo_GetHostnameRequest.DiscardUnknown(m) -} - -var xxx_messageInfo_GetHostnameRequest proto.InternalMessageInfo - -func (m *GetHostnameRequest) GetModule() string { - if m != nil && m.Module != nil { - return *m.Module - } - return "" -} - -func (m *GetHostnameRequest) GetVersion() string { - if m != nil && m.Version != nil { - return *m.Version - } - return "" -} - -func (m *GetHostnameRequest) GetInstance() string { - if m != nil && m.Instance != nil { - return *m.Instance - } - return "" -} - -type GetHostnameResponse struct { - Hostname *string `protobuf:"bytes,1,req,name=hostname" json:"hostname,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *GetHostnameResponse) Reset() { *m = GetHostnameResponse{} } -func (m *GetHostnameResponse) String() string { return proto.CompactTextString(m) } -func (*GetHostnameResponse) ProtoMessage() {} -func (*GetHostnameResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_modules_service_9cd3bffe4e91c59a, []int{16} -} -func (m *GetHostnameResponse) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_GetHostnameResponse.Unmarshal(m, b) -} -func (m *GetHostnameResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_GetHostnameResponse.Marshal(b, m, deterministic) -} -func (dst *GetHostnameResponse) XXX_Merge(src proto.Message) { - xxx_messageInfo_GetHostnameResponse.Merge(dst, src) -} -func (m *GetHostnameResponse) XXX_Size() int { - return xxx_messageInfo_GetHostnameResponse.Size(m) -} -func (m *GetHostnameResponse) XXX_DiscardUnknown() { - xxx_messageInfo_GetHostnameResponse.DiscardUnknown(m) -} - -var xxx_messageInfo_GetHostnameResponse proto.InternalMessageInfo - -func (m *GetHostnameResponse) GetHostname() string { - if m != nil && m.Hostname != nil { - return *m.Hostname - } - return "" -} - -func init() { - proto.RegisterType((*ModulesServiceError)(nil), "appengine.ModulesServiceError") - proto.RegisterType((*GetModulesRequest)(nil), "appengine.GetModulesRequest") - proto.RegisterType((*GetModulesResponse)(nil), "appengine.GetModulesResponse") - proto.RegisterType((*GetVersionsRequest)(nil), "appengine.GetVersionsRequest") - proto.RegisterType((*GetVersionsResponse)(nil), "appengine.GetVersionsResponse") - proto.RegisterType((*GetDefaultVersionRequest)(nil), "appengine.GetDefaultVersionRequest") - proto.RegisterType((*GetDefaultVersionResponse)(nil), "appengine.GetDefaultVersionResponse") - proto.RegisterType((*GetNumInstancesRequest)(nil), "appengine.GetNumInstancesRequest") - proto.RegisterType((*GetNumInstancesResponse)(nil), "appengine.GetNumInstancesResponse") - proto.RegisterType((*SetNumInstancesRequest)(nil), "appengine.SetNumInstancesRequest") - proto.RegisterType((*SetNumInstancesResponse)(nil), "appengine.SetNumInstancesResponse") - proto.RegisterType((*StartModuleRequest)(nil), "appengine.StartModuleRequest") - proto.RegisterType((*StartModuleResponse)(nil), "appengine.StartModuleResponse") - proto.RegisterType((*StopModuleRequest)(nil), "appengine.StopModuleRequest") - proto.RegisterType((*StopModuleResponse)(nil), "appengine.StopModuleResponse") - proto.RegisterType((*GetHostnameRequest)(nil), "appengine.GetHostnameRequest") - proto.RegisterType((*GetHostnameResponse)(nil), "appengine.GetHostnameResponse") -} - -func init() { - proto.RegisterFile("google.golang.org/appengine/internal/modules/modules_service.proto", fileDescriptor_modules_service_9cd3bffe4e91c59a) -} - -var fileDescriptor_modules_service_9cd3bffe4e91c59a = []byte{ - // 457 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xac, 0x94, 0xc1, 0x6f, 0xd3, 0x30, - 0x14, 0xc6, 0x69, 0x02, 0xdb, 0xf2, 0x0e, 0x90, 0x3a, 0x5b, 0xd7, 0x4d, 0x1c, 0x50, 0x4e, 0x1c, - 0x50, 0x2b, 0x90, 0x10, 0xe7, 0xae, 0x35, 0x25, 0xb0, 0xa5, 0x28, 0xce, 0x2a, 0xc4, 0xa5, 0x0a, - 0xdb, 0x23, 0x8b, 0x94, 0xda, 0xc1, 0x76, 0x77, 0xe4, 0xbf, 0xe0, 0xff, 0x45, 0x4b, 0xed, 0xb6, - 0x81, 0x4e, 0x45, 0x68, 0xa7, 0xe4, 0x7d, 0xfe, 0xfc, 0x7b, 0x9f, 0x5f, 0xac, 0xc0, 0x59, 0x2e, - 0x44, 0x5e, 0x62, 0x2f, 0x17, 0x65, 0xc6, 0xf3, 0x9e, 0x90, 0x79, 0x3f, 0xab, 0x2a, 0xe4, 0x79, - 0xc1, 0xb1, 0x5f, 0x70, 0x8d, 0x92, 0x67, 0x65, 0x7f, 0x2e, 0xae, 0x17, 0x25, 0x2a, 0xfb, 0x9c, - 0x29, 0x94, 0xb7, 0xc5, 0x15, 0xf6, 0x2a, 0x29, 0xb4, 0x20, 0xde, 0x6a, 0x47, 0xf8, 0xab, 0x05, - 0xc1, 0xc5, 0xd2, 0xc4, 0x96, 0x1e, 0x2a, 0xa5, 0x90, 0xe1, 0x4f, 0xf0, 0xea, 0x97, 0xa1, 0xb8, - 0x46, 0xb2, 0x07, 0xce, 0xe4, 0x93, 0xff, 0x88, 0x10, 0x78, 0x1a, 0xc5, 0xd3, 0xc1, 0x79, 0x34, - 0x9a, 0x5d, 0x4c, 0x46, 0x97, 0xe7, 0xd4, 0x6f, 0x91, 0x00, 0x9e, 0x59, 0x6d, 0x4a, 0x13, 0x16, - 0x4d, 0x62, 0xdf, 0x21, 0x47, 0xd0, 0xb6, 0x62, 0x14, 0xb3, 0x74, 0x10, 0x0f, 0x29, 0xf3, 0xdd, - 0x3b, 0x6f, 0x9a, 0x0c, 0x62, 0x16, 0xd1, 0x38, 0x9d, 0xd1, 0x24, 0x99, 0x24, 0xfe, 0x63, 0x72, - 0x08, 0xfe, 0x65, 0x4c, 0xbf, 0x7c, 0xa6, 0xc3, 0x94, 0x8e, 0x66, 0x2c, 0x1d, 0xa4, 0xd4, 0x7f, - 0x12, 0x06, 0xd0, 0x1e, 0xa3, 0x36, 0xc9, 0x12, 0xfc, 0xb1, 0x40, 0xa5, 0xc3, 0x57, 0x40, 0x36, - 0x45, 0x55, 0x09, 0xae, 0x90, 0x74, 0x60, 0x6f, 0x79, 0xcc, 0x6e, 0xeb, 0x85, 0xfb, 0xd2, 0x4b, - 0x4c, 0x65, 0xdc, 0x53, 0x94, 0xaa, 0x10, 0xdc, 0x32, 0x1a, 0xee, 0xd6, 0x86, 0xbb, 0x0f, 0x41, - 0xc3, 0x6d, 0xe0, 0x5d, 0xd8, 0xbf, 0x5d, 0x6a, 0x86, 0x6e, 0xcb, 0xf0, 0x0d, 0x74, 0xc7, 0xa8, - 0x47, 0xf8, 0x3d, 0x5b, 0x94, 0x76, 0xdf, 0xae, 0x26, 0x6f, 0xe1, 0x64, 0xcb, 0x9e, 0x6d, 0xad, - 0x9c, 0xcd, 0x56, 0x1f, 0xa1, 0x33, 0x46, 0x1d, 0x2f, 0xe6, 0x11, 0x57, 0x3a, 0xe3, 0x57, 0xb8, - 0xeb, 0x34, 0x9b, 0x2c, 0xa7, 0x5e, 0x58, 0xb1, 0xde, 0xc1, 0xf1, 0x5f, 0x2c, 0x13, 0xe0, 0x39, - 0x78, 0x85, 0x15, 0xeb, 0x08, 0x6e, 0xb2, 0x16, 0xc2, 0x1b, 0xe8, 0xb0, 0x07, 0x0a, 0xd1, 0xec, - 0xe4, 0xfe, 0xd9, 0xe9, 0x04, 0x8e, 0xd9, 0xf6, 0x88, 0xe1, 0x7b, 0x20, 0x4c, 0x67, 0xd2, 0xdc, - 0x81, 0x6d, 0x01, 0x9c, 0xfb, 0x02, 0x34, 0x26, 0x7a, 0x04, 0x41, 0x83, 0x63, 0xf0, 0x14, 0xda, - 0x4c, 0x8b, 0xea, 0x7e, 0xfa, 0xbf, 0xcd, 0xf8, 0xf0, 0x2e, 0xe5, 0x1a, 0x63, 0xe0, 0xdf, 0xea, - 0xfb, 0xf8, 0x41, 0x28, 0xcd, 0xb3, 0xf9, 0xff, 0xd3, 0xc9, 0x29, 0x1c, 0xd8, 0x59, 0x75, 0xdd, - 0x7a, 0x69, 0x55, 0x87, 0xaf, 0xeb, 0x5b, 0xbc, 0xee, 0x61, 0xbe, 0xec, 0x29, 0x1c, 0xdc, 0x18, - 0xcd, 0x8c, 0x68, 0x55, 0x9f, 0x79, 0x5f, 0xf7, 0xcd, 0x5f, 0xe2, 0x77, 0x00, 0x00, 0x00, 0xff, - 0xff, 0x6e, 0xbc, 0xe0, 0x61, 0x5c, 0x04, 0x00, 0x00, -} diff --git a/vendor/google.golang.org/appengine/internal/modules/modules_service.proto b/vendor/google.golang.org/appengine/internal/modules/modules_service.proto deleted file mode 100644 index d29f0065a2..0000000000 --- a/vendor/google.golang.org/appengine/internal/modules/modules_service.proto +++ /dev/null @@ -1,80 +0,0 @@ -syntax = "proto2"; -option go_package = "modules"; - -package appengine; - -message ModulesServiceError { - enum ErrorCode { - OK = 0; - INVALID_MODULE = 1; - INVALID_VERSION = 2; - INVALID_INSTANCES = 3; - TRANSIENT_ERROR = 4; - UNEXPECTED_STATE = 5; - } -} - -message GetModulesRequest { -} - -message GetModulesResponse { - repeated string module = 1; -} - -message GetVersionsRequest { - optional string module = 1; -} - -message GetVersionsResponse { - repeated string version = 1; -} - -message GetDefaultVersionRequest { - optional string module = 1; -} - -message GetDefaultVersionResponse { - required string version = 1; -} - -message GetNumInstancesRequest { - optional string module = 1; - optional string version = 2; -} - -message GetNumInstancesResponse { - required int64 instances = 1; -} - -message SetNumInstancesRequest { - optional string module = 1; - optional string version = 2; - required int64 instances = 3; -} - -message SetNumInstancesResponse {} - -message StartModuleRequest { - required string module = 1; - required string version = 2; -} - -message StartModuleResponse {} - -message StopModuleRequest { - optional string module = 1; - optional string version = 2; -} - -message StopModuleResponse {} - -message GetHostnameRequest { - optional string module = 1; - optional string version = 2; - optional string instance = 3; -} - -message GetHostnameResponse { - required string hostname = 1; -} - diff --git a/vendor/google.golang.org/appengine/namespace.go b/vendor/google.golang.org/appengine/namespace.go deleted file mode 100644 index 21860ca082..0000000000 --- a/vendor/google.golang.org/appengine/namespace.go +++ /dev/null @@ -1,25 +0,0 @@ -// Copyright 2012 Google Inc. All rights reserved. -// Use of this source code is governed by the Apache 2.0 -// license that can be found in the LICENSE file. - -package appengine - -import ( - "fmt" - "regexp" - - "golang.org/x/net/context" - - "google.golang.org/appengine/internal" -) - -// Namespace returns a replacement context that operates within the given namespace. -func Namespace(c context.Context, namespace string) (context.Context, error) { - if !validNamespace.MatchString(namespace) { - return nil, fmt.Errorf("appengine: namespace %q does not match /%s/", namespace, validNamespace) - } - return internal.NamespacedContext(c, namespace), nil -} - -// validNamespace matches valid namespace names. -var validNamespace = regexp.MustCompile(`^[0-9A-Za-z._-]{0,100}$`) diff --git a/vendor/google.golang.org/appengine/timeout.go b/vendor/google.golang.org/appengine/timeout.go deleted file mode 100644 index 05642a992a..0000000000 --- a/vendor/google.golang.org/appengine/timeout.go +++ /dev/null @@ -1,20 +0,0 @@ -// Copyright 2013 Google Inc. All rights reserved. -// Use of this source code is governed by the Apache 2.0 -// license that can be found in the LICENSE file. - -package appengine - -import "golang.org/x/net/context" - -// IsTimeoutError reports whether err is a timeout error. -func IsTimeoutError(err error) bool { - if err == context.DeadlineExceeded { - return true - } - if t, ok := err.(interface { - IsTimeout() bool - }); ok { - return t.IsTimeout() - } - return false -} diff --git a/vendor/google.golang.org/appengine/travis_install.sh b/vendor/google.golang.org/appengine/travis_install.sh deleted file mode 100644 index 785b62f46e..0000000000 --- a/vendor/google.golang.org/appengine/travis_install.sh +++ /dev/null @@ -1,18 +0,0 @@ -#!/bin/bash -set -e - -if [[ $GO111MODULE == "on" ]]; then - go get . -else - go get -u -v $(go list -f '{{join .Imports "\n"}}{{"\n"}}{{join .TestImports "\n"}}' ./... | sort | uniq | grep -v appengine) -fi - -if [[ $GOAPP == "true" ]]; then - mkdir /tmp/sdk - curl -o /tmp/sdk.zip "https://storage.googleapis.com/appengine-sdks/featured/go_appengine_sdk_linux_amd64-1.9.68.zip" - unzip -q /tmp/sdk.zip -d /tmp/sdk - # NOTE: Set the following env vars in the test script: - # export PATH="$PATH:/tmp/sdk/go_appengine" - # export APPENGINE_DEV_APPSERVER=/tmp/sdk/go_appengine/dev_appserver.py -fi - diff --git a/vendor/google.golang.org/appengine/travis_test.sh b/vendor/google.golang.org/appengine/travis_test.sh deleted file mode 100644 index d4390f045b..0000000000 --- a/vendor/google.golang.org/appengine/travis_test.sh +++ /dev/null @@ -1,12 +0,0 @@ -#!/bin/bash -set -e - -go version -go test -v google.golang.org/appengine/... -go test -v -race google.golang.org/appengine/... -if [[ $GOAPP == "true" ]]; then - export PATH="$PATH:/tmp/sdk/go_appengine" - export APPENGINE_DEV_APPSERVER=/tmp/sdk/go_appengine/dev_appserver.py - goapp version - goapp test -v google.golang.org/appengine/... -fi diff --git a/vendor/google.golang.org/genproto/LICENSE b/vendor/google.golang.org/genproto/LICENSE deleted file mode 100644 index d645695673..0000000000 --- a/vendor/google.golang.org/genproto/LICENSE +++ /dev/null @@ -1,202 +0,0 @@ - - Apache License - Version 2.0, January 2004 - http://www.apache.org/licenses/ - - TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - - 1. Definitions. - - "License" shall mean the terms and conditions for use, reproduction, - and distribution as defined by Sections 1 through 9 of this document. - - "Licensor" shall mean the copyright owner or entity authorized by - the copyright owner that is granting the License. - - "Legal Entity" shall mean the union of the acting entity and all - other entities that control, are controlled by, or are under common - control with that entity. For the purposes of this definition, - "control" means (i) the power, direct or indirect, to cause the - direction or management of such entity, whether by contract or - otherwise, or (ii) ownership of fifty percent (50%) or more of the - outstanding shares, or (iii) beneficial ownership of such entity. - - "You" (or "Your") shall mean an individual or Legal Entity - exercising permissions granted by this License. - - "Source" form shall mean the preferred form for making modifications, - including but not limited to software source code, documentation - source, and configuration files. - - "Object" form shall mean any form resulting from mechanical - transformation or translation of a Source form, including but - not limited to compiled object code, generated documentation, - and conversions to other media types. - - "Work" shall mean the work of authorship, whether in Source or - Object form, made available under the License, as indicated by a - copyright notice that is included in or attached to the work - (an example is provided in the Appendix below). - - "Derivative Works" shall mean any work, whether in Source or Object - form, that is based on (or derived from) the Work and for which the - editorial revisions, annotations, elaborations, or other modifications - represent, as a whole, an original work of authorship. For the purposes - of this License, Derivative Works shall not include works that remain - separable from, or merely link (or bind by name) to the interfaces of, - the Work and Derivative Works thereof. - - "Contribution" shall mean any work of authorship, including - the original version of the Work and any modifications or additions - to that Work or Derivative Works thereof, that is intentionally - submitted to Licensor for inclusion in the Work by the copyright owner - or by an individual or Legal Entity authorized to submit on behalf of - the copyright owner. For the purposes of this definition, "submitted" - means any form of electronic, verbal, or written communication sent - to the Licensor or its representatives, including but not limited to - communication on electronic mailing lists, source code control systems, - and issue tracking systems that are managed by, or on behalf of, the - Licensor for the purpose of discussing and improving the Work, but - excluding communication that is conspicuously marked or otherwise - designated in writing by the copyright owner as "Not a Contribution." - - "Contributor" shall mean Licensor and any individual or Legal Entity - on behalf of whom a Contribution has been received by Licensor and - subsequently incorporated within the Work. - - 2. Grant of Copyright License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - copyright license to reproduce, prepare Derivative Works of, - publicly display, publicly perform, sublicense, and distribute the - Work and such Derivative Works in Source or Object form. - - 3. Grant of Patent License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - (except as stated in this section) patent license to make, have made, - use, offer to sell, sell, import, and otherwise transfer the Work, - where such license applies only to those patent claims licensable - by such Contributor that are necessarily infringed by their - Contribution(s) alone or by combination of their Contribution(s) - with the Work to which such Contribution(s) was submitted. If You - institute patent litigation against any entity (including a - cross-claim or counterclaim in a lawsuit) alleging that the Work - or a Contribution incorporated within the Work constitutes direct - or contributory patent infringement, then any patent licenses - granted to You under this License for that Work shall terminate - as of the date such litigation is filed. - - 4. Redistribution. You may reproduce and distribute copies of the - Work or Derivative Works thereof in any medium, with or without - modifications, and in Source or Object form, provided that You - meet the following conditions: - - (a) You must give any other recipients of the Work or - Derivative Works a copy of this License; and - - (b) You must cause any modified files to carry prominent notices - stating that You changed the files; and - - (c) You must retain, in the Source form of any Derivative Works - that You distribute, all copyright, patent, trademark, and - attribution notices from the Source form of the Work, - excluding those notices that do not pertain to any part of - the Derivative Works; and - - (d) If the Work includes a "NOTICE" text file as part of its - distribution, then any Derivative Works that You distribute must - include a readable copy of the attribution notices contained - within such NOTICE file, excluding those notices that do not - pertain to any part of the Derivative Works, in at least one - of the following places: within a NOTICE text file distributed - as part of the Derivative Works; within the Source form or - documentation, if provided along with the Derivative Works; or, - within a display generated by the Derivative Works, if and - wherever such third-party notices normally appear. The contents - of the NOTICE file are for informational purposes only and - do not modify the License. You may add Your own attribution - notices within Derivative Works that You distribute, alongside - or as an addendum to the NOTICE text from the Work, provided - that such additional attribution notices cannot be construed - as modifying the License. - - You may add Your own copyright statement to Your modifications and - may provide additional or different license terms and conditions - for use, reproduction, or distribution of Your modifications, or - for any such Derivative Works as a whole, provided Your use, - reproduction, and distribution of the Work otherwise complies with - the conditions stated in this License. - - 5. Submission of Contributions. Unless You explicitly state otherwise, - any Contribution intentionally submitted for inclusion in the Work - by You to the Licensor shall be under the terms and conditions of - this License, without any additional terms or conditions. - Notwithstanding the above, nothing herein shall supersede or modify - the terms of any separate license agreement you may have executed - with Licensor regarding such Contributions. - - 6. Trademarks. This License does not grant permission to use the trade - names, trademarks, service marks, or product names of the Licensor, - except as required for reasonable and customary use in describing the - origin of the Work and reproducing the content of the NOTICE file. - - 7. Disclaimer of Warranty. Unless required by applicable law or - agreed to in writing, Licensor provides the Work (and each - Contributor provides its Contributions) on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or - implied, including, without limitation, any warranties or conditions - of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A - PARTICULAR PURPOSE. You are solely responsible for determining the - appropriateness of using or redistributing the Work and assume any - risks associated with Your exercise of permissions under this License. - - 8. Limitation of Liability. In no event and under no legal theory, - whether in tort (including negligence), contract, or otherwise, - unless required by applicable law (such as deliberate and grossly - negligent acts) or agreed to in writing, shall any Contributor be - liable to You for damages, including any direct, indirect, special, - incidental, or consequential damages of any character arising as a - result of this License or out of the use or inability to use the - Work (including but not limited to damages for loss of goodwill, - work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses), even if such Contributor - has been advised of the possibility of such damages. - - 9. Accepting Warranty or Additional Liability. While redistributing - the Work or Derivative Works thereof, You may choose to offer, - and charge a fee for, acceptance of support, warranty, indemnity, - or other liability obligations and/or rights consistent with this - License. However, in accepting such obligations, You may act only - on Your own behalf and on Your sole responsibility, not on behalf - of any other Contributor, and only if You agree to indemnify, - defend, and hold each Contributor harmless for any liability - incurred by, or claims asserted against, such Contributor by reason - of your accepting any such warranty or additional liability. - - END OF TERMS AND CONDITIONS - - APPENDIX: How to apply the Apache License to your work. - - To apply the Apache License to your work, attach the following - boilerplate notice, with the fields enclosed by brackets "[]" - replaced with your own identifying information. (Don't include - the brackets!) The text should be enclosed in the appropriate - comment syntax for the file format. We also recommend that a - file or class name and description of purpose be included on the - same "printed page" as the copyright notice for easier - identification within third-party archives. - - Copyright [yyyy] [name of copyright owner] - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. diff --git a/vendor/google.golang.org/genproto/googleapis/rpc/status/status.pb.go b/vendor/google.golang.org/genproto/googleapis/rpc/status/status.pb.go deleted file mode 100644 index 7bfe37a3db..0000000000 --- a/vendor/google.golang.org/genproto/googleapis/rpc/status/status.pb.go +++ /dev/null @@ -1,156 +0,0 @@ -// Code generated by protoc-gen-go. DO NOT EDIT. -// source: google/rpc/status.proto - -package status // import "google.golang.org/genproto/googleapis/rpc/status" - -import proto "github.com/golang/protobuf/proto" -import fmt "fmt" -import math "math" -import any "github.com/golang/protobuf/ptypes/any" - -// Reference imports to suppress errors if they are not otherwise used. -var _ = proto.Marshal -var _ = fmt.Errorf -var _ = math.Inf - -// This is a compile-time assertion to ensure that this generated file -// is compatible with the proto package it is being compiled against. -// A compilation error at this line likely means your copy of the -// proto package needs to be updated. -const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package - -// The `Status` type defines a logical error model that is suitable for different -// programming environments, including REST APIs and RPC APIs. It is used by -// [gRPC](https://github.com/grpc). The error model is designed to be: -// -// - Simple to use and understand for most users -// - Flexible enough to meet unexpected needs -// -// # Overview -// -// The `Status` message contains three pieces of data: error code, error message, -// and error details. The error code should be an enum value of -// [google.rpc.Code][google.rpc.Code], but it may accept additional error codes if needed. The -// error message should be a developer-facing English message that helps -// developers *understand* and *resolve* the error. If a localized user-facing -// error message is needed, put the localized message in the error details or -// localize it in the client. The optional error details may contain arbitrary -// information about the error. There is a predefined set of error detail types -// in the package `google.rpc` that can be used for common error conditions. -// -// # Language mapping -// -// The `Status` message is the logical representation of the error model, but it -// is not necessarily the actual wire format. When the `Status` message is -// exposed in different client libraries and different wire protocols, it can be -// mapped differently. For example, it will likely be mapped to some exceptions -// in Java, but more likely mapped to some error codes in C. -// -// # Other uses -// -// The error model and the `Status` message can be used in a variety of -// environments, either with or without APIs, to provide a -// consistent developer experience across different environments. -// -// Example uses of this error model include: -// -// - Partial errors. If a service needs to return partial errors to the client, -// it may embed the `Status` in the normal response to indicate the partial -// errors. -// -// - Workflow errors. A typical workflow has multiple steps. Each step may -// have a `Status` message for error reporting. -// -// - Batch operations. If a client uses batch request and batch response, the -// `Status` message should be used directly inside batch response, one for -// each error sub-response. -// -// - Asynchronous operations. If an API call embeds asynchronous operation -// results in its response, the status of those operations should be -// represented directly using the `Status` message. -// -// - Logging. If some API errors are stored in logs, the message `Status` could -// be used directly after any stripping needed for security/privacy reasons. -type Status struct { - // The status code, which should be an enum value of [google.rpc.Code][google.rpc.Code]. - Code int32 `protobuf:"varint,1,opt,name=code,proto3" json:"code,omitempty"` - // A developer-facing error message, which should be in English. Any - // user-facing error message should be localized and sent in the - // [google.rpc.Status.details][google.rpc.Status.details] field, or localized by the client. - Message string `protobuf:"bytes,2,opt,name=message,proto3" json:"message,omitempty"` - // A list of messages that carry the error details. There is a common set of - // message types for APIs to use. - Details []*any.Any `protobuf:"bytes,3,rep,name=details,proto3" json:"details,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *Status) Reset() { *m = Status{} } -func (m *Status) String() string { return proto.CompactTextString(m) } -func (*Status) ProtoMessage() {} -func (*Status) Descriptor() ([]byte, []int) { - return fileDescriptor_status_c6e4de62dcdf2edf, []int{0} -} -func (m *Status) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_Status.Unmarshal(m, b) -} -func (m *Status) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_Status.Marshal(b, m, deterministic) -} -func (dst *Status) XXX_Merge(src proto.Message) { - xxx_messageInfo_Status.Merge(dst, src) -} -func (m *Status) XXX_Size() int { - return xxx_messageInfo_Status.Size(m) -} -func (m *Status) XXX_DiscardUnknown() { - xxx_messageInfo_Status.DiscardUnknown(m) -} - -var xxx_messageInfo_Status proto.InternalMessageInfo - -func (m *Status) GetCode() int32 { - if m != nil { - return m.Code - } - return 0 -} - -func (m *Status) GetMessage() string { - if m != nil { - return m.Message - } - return "" -} - -func (m *Status) GetDetails() []*any.Any { - if m != nil { - return m.Details - } - return nil -} - -func init() { - proto.RegisterType((*Status)(nil), "google.rpc.Status") -} - -func init() { proto.RegisterFile("google/rpc/status.proto", fileDescriptor_status_c6e4de62dcdf2edf) } - -var fileDescriptor_status_c6e4de62dcdf2edf = []byte{ - // 209 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0x12, 0x4f, 0xcf, 0xcf, 0x4f, - 0xcf, 0x49, 0xd5, 0x2f, 0x2a, 0x48, 0xd6, 0x2f, 0x2e, 0x49, 0x2c, 0x29, 0x2d, 0xd6, 0x2b, 0x28, - 0xca, 0x2f, 0xc9, 0x17, 0xe2, 0x82, 0x48, 0xe8, 0x15, 0x15, 0x24, 0x4b, 0x49, 0x42, 0x15, 0x81, - 0x65, 0x92, 0x4a, 0xd3, 0xf4, 0x13, 0xf3, 0x2a, 0x21, 0xca, 0x94, 0xd2, 0xb8, 0xd8, 0x82, 0xc1, - 0xda, 0x84, 0x84, 0xb8, 0x58, 0x92, 0xf3, 0x53, 0x52, 0x25, 0x18, 0x15, 0x18, 0x35, 0x58, 0x83, - 0xc0, 0x6c, 0x21, 0x09, 0x2e, 0xf6, 0xdc, 0xd4, 0xe2, 0xe2, 0xc4, 0xf4, 0x54, 0x09, 0x26, 0x05, - 0x46, 0x0d, 0xce, 0x20, 0x18, 0x57, 0x48, 0x8f, 0x8b, 0x3d, 0x25, 0xb5, 0x24, 0x31, 0x33, 0xa7, - 0x58, 0x82, 0x59, 0x81, 0x59, 0x83, 0xdb, 0x48, 0x44, 0x0f, 0x6a, 0x21, 0xcc, 0x12, 0x3d, 0xc7, - 0xbc, 0xca, 0x20, 0x98, 0x22, 0xa7, 0x38, 0x2e, 0xbe, 0xe4, 0xfc, 0x5c, 0x3d, 0x84, 0xa3, 0x9c, - 0xb8, 0x21, 0xf6, 0x06, 0x80, 0x94, 0x07, 0x30, 0x46, 0x99, 0x43, 0xa5, 0xd2, 0xf3, 0x73, 0x12, - 0xf3, 0xd2, 0xf5, 0xf2, 0x8b, 0xd2, 0xf5, 0xd3, 0x53, 0xf3, 0xc0, 0x86, 0xe9, 0x43, 0xa4, 0x12, - 0x0b, 0x32, 0x8b, 0x91, 0xfc, 0x69, 0x0d, 0xa1, 0x16, 0x31, 0x31, 0x07, 0x05, 0x38, 0x27, 0xb1, - 0x81, 0x55, 0x1a, 0x03, 0x02, 0x00, 0x00, 0xff, 0xff, 0xa4, 0x53, 0xf0, 0x7c, 0x10, 0x01, 0x00, - 0x00, -} diff --git a/vendor/google.golang.org/grpc/.travis.yml b/vendor/google.golang.org/grpc/.travis.yml deleted file mode 100644 index f443eec9a0..0000000000 --- a/vendor/google.golang.org/grpc/.travis.yml +++ /dev/null @@ -1,37 +0,0 @@ -language: go - -matrix: - include: - - go: 1.11.x - env: VET=1 GO111MODULE=on - - go: 1.11.x - env: RACE=1 GO111MODULE=on - - go: 1.11.x - env: RUN386=1 - - go: 1.11.x - env: GRPC_GO_RETRY=on - - go: 1.10.x - - go: 1.9.x - - go: 1.9.x - env: GAE=1 - -go_import_path: google.golang.org/grpc - -before_install: - - if [[ "${GO111MODULE}" = "on" ]]; then mkdir "${HOME}/go"; export GOPATH="${HOME}/go"; fi - - if [[ -n "${RUN386}" ]]; then export GOARCH=386; fi - - if [[ "${TRAVIS_EVENT_TYPE}" = "cron" && -z "${RUN386}" ]]; then RACE=1; fi - - if [[ "${TRAVIS_EVENT_TYPE}" != "cron" ]]; then VET_SKIP_PROTO=1; fi - -install: - - try3() { eval "$*" || eval "$*" || eval "$*"; } - - try3 'if [[ "${GO111MODULE}" = "on" ]]; then go mod download; else make testdeps; fi' - - if [[ "${GAE}" = 1 ]]; then source ./install_gae.sh; make testappenginedeps; fi - - if [[ "${VET}" = 1 ]]; then ./vet.sh -install; fi - -script: - - set -e - - if [[ "${VET}" = 1 ]]; then ./vet.sh; fi - - if [[ "${GAE}" = 1 ]]; then make testappengine; exit 0; fi - - if [[ "${RACE}" = 1 ]]; then make testrace; exit 0; fi - - make test diff --git a/vendor/google.golang.org/grpc/AUTHORS b/vendor/google.golang.org/grpc/AUTHORS deleted file mode 100644 index e491a9e7f7..0000000000 --- a/vendor/google.golang.org/grpc/AUTHORS +++ /dev/null @@ -1 +0,0 @@ -Google Inc. diff --git a/vendor/google.golang.org/grpc/CONTRIBUTING.md b/vendor/google.golang.org/grpc/CONTRIBUTING.md deleted file mode 100644 index 0863eb26b6..0000000000 --- a/vendor/google.golang.org/grpc/CONTRIBUTING.md +++ /dev/null @@ -1,36 +0,0 @@ -# How to contribute - -We definitely welcome your patches and contributions to gRPC! - -If you are new to github, please start by reading [Pull Request howto](https://help.github.com/articles/about-pull-requests/) - -## Legal requirements - -In order to protect both you and ourselves, you will need to sign the -[Contributor License Agreement](https://identity.linuxfoundation.org/projects/cncf). - -## Guidelines for Pull Requests -How to get your contributions merged smoothly and quickly. - -- Create **small PRs** that are narrowly focused on **addressing a single concern**. We often times receive PRs that are trying to fix several things at a time, but only one fix is considered acceptable, nothing gets merged and both author's & review's time is wasted. Create more PRs to address different concerns and everyone will be happy. - -- For speculative changes, consider opening an issue and discussing it first. If you are suggesting a behavioral or API change, consider starting with a [gRFC proposal](https://github.com/grpc/proposal). - -- Provide a good **PR description** as a record of **what** change is being made and **why** it was made. Link to a github issue if it exists. - -- Don't fix code style and formatting unless you are already changing that line to address an issue. PRs with irrelevant changes won't be merged. If you do want to fix formatting or style, do that in a separate PR. - -- Unless your PR is trivial, you should expect there will be reviewer comments that you'll need to address before merging. We expect you to be reasonably responsive to those comments, otherwise the PR will be closed after 2-3 weeks of inactivity. - -- Maintain **clean commit history** and use **meaningful commit messages**. PRs with messy commit history are difficult to review and won't be merged. Use `rebase -i upstream/master` to curate your commit history and/or to bring in latest changes from master (but avoid rebasing in the middle of a code review). - -- Keep your PR up to date with upstream/master (if there are merge conflicts, we can't really merge your change). - -- **All tests need to be passing** before your change can be merged. We recommend you **run tests locally** before creating your PR to catch breakages early on. - - `make all` to test everything, OR - - `make vet` to catch vet errors - - `make test` to run the tests - - `make testrace` to run tests in race mode - -- Exceptions to the rules can be made if there's a compelling reason for doing so. - diff --git a/vendor/google.golang.org/grpc/LICENSE b/vendor/google.golang.org/grpc/LICENSE deleted file mode 100644 index d645695673..0000000000 --- a/vendor/google.golang.org/grpc/LICENSE +++ /dev/null @@ -1,202 +0,0 @@ - - Apache License - Version 2.0, January 2004 - http://www.apache.org/licenses/ - - TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - - 1. Definitions. - - "License" shall mean the terms and conditions for use, reproduction, - and distribution as defined by Sections 1 through 9 of this document. - - "Licensor" shall mean the copyright owner or entity authorized by - the copyright owner that is granting the License. - - "Legal Entity" shall mean the union of the acting entity and all - other entities that control, are controlled by, or are under common - control with that entity. For the purposes of this definition, - "control" means (i) the power, direct or indirect, to cause the - direction or management of such entity, whether by contract or - otherwise, or (ii) ownership of fifty percent (50%) or more of the - outstanding shares, or (iii) beneficial ownership of such entity. - - "You" (or "Your") shall mean an individual or Legal Entity - exercising permissions granted by this License. - - "Source" form shall mean the preferred form for making modifications, - including but not limited to software source code, documentation - source, and configuration files. - - "Object" form shall mean any form resulting from mechanical - transformation or translation of a Source form, including but - not limited to compiled object code, generated documentation, - and conversions to other media types. - - "Work" shall mean the work of authorship, whether in Source or - Object form, made available under the License, as indicated by a - copyright notice that is included in or attached to the work - (an example is provided in the Appendix below). - - "Derivative Works" shall mean any work, whether in Source or Object - form, that is based on (or derived from) the Work and for which the - editorial revisions, annotations, elaborations, or other modifications - represent, as a whole, an original work of authorship. For the purposes - of this License, Derivative Works shall not include works that remain - separable from, or merely link (or bind by name) to the interfaces of, - the Work and Derivative Works thereof. - - "Contribution" shall mean any work of authorship, including - the original version of the Work and any modifications or additions - to that Work or Derivative Works thereof, that is intentionally - submitted to Licensor for inclusion in the Work by the copyright owner - or by an individual or Legal Entity authorized to submit on behalf of - the copyright owner. For the purposes of this definition, "submitted" - means any form of electronic, verbal, or written communication sent - to the Licensor or its representatives, including but not limited to - communication on electronic mailing lists, source code control systems, - and issue tracking systems that are managed by, or on behalf of, the - Licensor for the purpose of discussing and improving the Work, but - excluding communication that is conspicuously marked or otherwise - designated in writing by the copyright owner as "Not a Contribution." - - "Contributor" shall mean Licensor and any individual or Legal Entity - on behalf of whom a Contribution has been received by Licensor and - subsequently incorporated within the Work. - - 2. Grant of Copyright License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - copyright license to reproduce, prepare Derivative Works of, - publicly display, publicly perform, sublicense, and distribute the - Work and such Derivative Works in Source or Object form. - - 3. Grant of Patent License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - (except as stated in this section) patent license to make, have made, - use, offer to sell, sell, import, and otherwise transfer the Work, - where such license applies only to those patent claims licensable - by such Contributor that are necessarily infringed by their - Contribution(s) alone or by combination of their Contribution(s) - with the Work to which such Contribution(s) was submitted. If You - institute patent litigation against any entity (including a - cross-claim or counterclaim in a lawsuit) alleging that the Work - or a Contribution incorporated within the Work constitutes direct - or contributory patent infringement, then any patent licenses - granted to You under this License for that Work shall terminate - as of the date such litigation is filed. - - 4. Redistribution. You may reproduce and distribute copies of the - Work or Derivative Works thereof in any medium, with or without - modifications, and in Source or Object form, provided that You - meet the following conditions: - - (a) You must give any other recipients of the Work or - Derivative Works a copy of this License; and - - (b) You must cause any modified files to carry prominent notices - stating that You changed the files; and - - (c) You must retain, in the Source form of any Derivative Works - that You distribute, all copyright, patent, trademark, and - attribution notices from the Source form of the Work, - excluding those notices that do not pertain to any part of - the Derivative Works; and - - (d) If the Work includes a "NOTICE" text file as part of its - distribution, then any Derivative Works that You distribute must - include a readable copy of the attribution notices contained - within such NOTICE file, excluding those notices that do not - pertain to any part of the Derivative Works, in at least one - of the following places: within a NOTICE text file distributed - as part of the Derivative Works; within the Source form or - documentation, if provided along with the Derivative Works; or, - within a display generated by the Derivative Works, if and - wherever such third-party notices normally appear. The contents - of the NOTICE file are for informational purposes only and - do not modify the License. You may add Your own attribution - notices within Derivative Works that You distribute, alongside - or as an addendum to the NOTICE text from the Work, provided - that such additional attribution notices cannot be construed - as modifying the License. - - You may add Your own copyright statement to Your modifications and - may provide additional or different license terms and conditions - for use, reproduction, or distribution of Your modifications, or - for any such Derivative Works as a whole, provided Your use, - reproduction, and distribution of the Work otherwise complies with - the conditions stated in this License. - - 5. Submission of Contributions. Unless You explicitly state otherwise, - any Contribution intentionally submitted for inclusion in the Work - by You to the Licensor shall be under the terms and conditions of - this License, without any additional terms or conditions. - Notwithstanding the above, nothing herein shall supersede or modify - the terms of any separate license agreement you may have executed - with Licensor regarding such Contributions. - - 6. Trademarks. This License does not grant permission to use the trade - names, trademarks, service marks, or product names of the Licensor, - except as required for reasonable and customary use in describing the - origin of the Work and reproducing the content of the NOTICE file. - - 7. Disclaimer of Warranty. Unless required by applicable law or - agreed to in writing, Licensor provides the Work (and each - Contributor provides its Contributions) on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or - implied, including, without limitation, any warranties or conditions - of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A - PARTICULAR PURPOSE. You are solely responsible for determining the - appropriateness of using or redistributing the Work and assume any - risks associated with Your exercise of permissions under this License. - - 8. Limitation of Liability. In no event and under no legal theory, - whether in tort (including negligence), contract, or otherwise, - unless required by applicable law (such as deliberate and grossly - negligent acts) or agreed to in writing, shall any Contributor be - liable to You for damages, including any direct, indirect, special, - incidental, or consequential damages of any character arising as a - result of this License or out of the use or inability to use the - Work (including but not limited to damages for loss of goodwill, - work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses), even if such Contributor - has been advised of the possibility of such damages. - - 9. Accepting Warranty or Additional Liability. While redistributing - the Work or Derivative Works thereof, You may choose to offer, - and charge a fee for, acceptance of support, warranty, indemnity, - or other liability obligations and/or rights consistent with this - License. However, in accepting such obligations, You may act only - on Your own behalf and on Your sole responsibility, not on behalf - of any other Contributor, and only if You agree to indemnify, - defend, and hold each Contributor harmless for any liability - incurred by, or claims asserted against, such Contributor by reason - of your accepting any such warranty or additional liability. - - END OF TERMS AND CONDITIONS - - APPENDIX: How to apply the Apache License to your work. - - To apply the Apache License to your work, attach the following - boilerplate notice, with the fields enclosed by brackets "[]" - replaced with your own identifying information. (Don't include - the brackets!) The text should be enclosed in the appropriate - comment syntax for the file format. We also recommend that a - file or class name and description of purpose be included on the - same "printed page" as the copyright notice for easier - identification within third-party archives. - - Copyright [yyyy] [name of copyright owner] - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. diff --git a/vendor/google.golang.org/grpc/Makefile b/vendor/google.golang.org/grpc/Makefile deleted file mode 100644 index 41a754f977..0000000000 --- a/vendor/google.golang.org/grpc/Makefile +++ /dev/null @@ -1,60 +0,0 @@ -all: vet test testrace testappengine - -build: deps - go build google.golang.org/grpc/... - -clean: - go clean -i google.golang.org/grpc/... - -deps: - go get -d -v google.golang.org/grpc/... - -proto: - @ if ! which protoc > /dev/null; then \ - echo "error: protoc not installed" >&2; \ - exit 1; \ - fi - go generate google.golang.org/grpc/... - -test: testdeps - go test -cpu 1,4 -timeout 7m google.golang.org/grpc/... - -testappengine: testappenginedeps - goapp test -cpu 1,4 -timeout 7m google.golang.org/grpc/... - -testappenginedeps: - goapp get -d -v -t -tags 'appengine appenginevm' google.golang.org/grpc/... - -testdeps: - go get -d -v -t google.golang.org/grpc/... - -testrace: testdeps - go test -race -cpu 1,4 -timeout 7m google.golang.org/grpc/... - -updatedeps: - go get -d -v -u -f google.golang.org/grpc/... - -updatetestdeps: - go get -d -v -t -u -f google.golang.org/grpc/... - -vet: vetdeps - ./vet.sh - -vetdeps: - ./vet.sh -install - -.PHONY: \ - all \ - build \ - clean \ - deps \ - proto \ - test \ - testappengine \ - testappenginedeps \ - testdeps \ - testrace \ - updatedeps \ - updatetestdeps \ - vet \ - vetdeps diff --git a/vendor/google.golang.org/grpc/README.md b/vendor/google.golang.org/grpc/README.md deleted file mode 100644 index e3fb3c75ae..0000000000 --- a/vendor/google.golang.org/grpc/README.md +++ /dev/null @@ -1,67 +0,0 @@ -# gRPC-Go - -[![Build Status](https://travis-ci.org/grpc/grpc-go.svg)](https://travis-ci.org/grpc/grpc-go) [![GoDoc](https://godoc.org/google.golang.org/grpc?status.svg)](https://godoc.org/google.golang.org/grpc) [![GoReportCard](https://goreportcard.com/badge/grpc/grpc-go)](https://goreportcard.com/report/github.com/grpc/grpc-go) - -The Go implementation of [gRPC](https://grpc.io/): A high performance, open source, general RPC framework that puts mobile and HTTP/2 first. For more information see the [gRPC Quick Start: Go](https://grpc.io/docs/quickstart/go.html) guide. - -Installation ------------- - -To install this package, you need to install Go and setup your Go workspace on your computer. The simplest way to install the library is to run: - -``` -$ go get -u google.golang.org/grpc -``` - -Prerequisites -------------- - -gRPC-Go requires Go 1.9 or later. - -Constraints ------------ -The grpc package should only depend on standard Go packages and a small number of exceptions. If your contribution introduces new dependencies which are NOT in the [list](http://godoc.org/google.golang.org/grpc?imports), you need a discussion with gRPC-Go authors and consultants. - -Documentation -------------- -See [API documentation](https://godoc.org/google.golang.org/grpc) for package and API descriptions and find examples in the [examples directory](examples/). - -Performance ------------ -See the current benchmarks for some of the languages supported in [this dashboard](https://performance-dot-grpc-testing.appspot.com/explore?dashboard=5652536396611584&widget=490377658&container=1286539696). - -Status ------- -General Availability [Google Cloud Platform Launch Stages](https://cloud.google.com/terms/launch-stages). - -FAQ ---- - -#### Compiling error, undefined: grpc.SupportPackageIsVersion - -Please update proto package, gRPC package and rebuild the proto files: - - `go get -u github.com/golang/protobuf/{proto,protoc-gen-go}` - - `go get -u google.golang.org/grpc` - - `protoc --go_out=plugins=grpc:. *.proto` - -#### How to turn on logging - -The default logger is controlled by the environment variables. Turn everything -on by setting: - -``` -GRPC_GO_LOG_VERBOSITY_LEVEL=99 GRPC_GO_LOG_SEVERITY_LEVEL=info -``` - -#### The RPC failed with error `"code = Unavailable desc = transport is closing"` - -This error means the connection the RPC is using was closed, and there are many -possible reasons, including: - 1. mis-configured transport credentials, connection failed on handshaking - 1. bytes disrupted, possibly by a proxy in between - 1. server shutdown - -It can be tricky to debug this because the error happens on the client side but -the root cause of the connection being closed is on the server side. Turn on -logging on __both client and server__, and see if there are any transport -errors. diff --git a/vendor/google.golang.org/grpc/backoff.go b/vendor/google.golang.org/grpc/backoff.go deleted file mode 100644 index fa31565fd2..0000000000 --- a/vendor/google.golang.org/grpc/backoff.go +++ /dev/null @@ -1,38 +0,0 @@ -/* - * - * Copyright 2017 gRPC authors. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ - -// See internal/backoff package for the backoff implementation. This file is -// kept for the exported types and API backward compatility. - -package grpc - -import ( - "time" -) - -// DefaultBackoffConfig uses values specified for backoff in -// https://github.com/grpc/grpc/blob/master/doc/connection-backoff.md. -var DefaultBackoffConfig = BackoffConfig{ - MaxDelay: 120 * time.Second, -} - -// BackoffConfig defines the parameters for the default gRPC backoff strategy. -type BackoffConfig struct { - // MaxDelay is the upper bound of backoff delay. - MaxDelay time.Duration -} diff --git a/vendor/google.golang.org/grpc/balancer.go b/vendor/google.golang.org/grpc/balancer.go deleted file mode 100644 index a78e702bae..0000000000 --- a/vendor/google.golang.org/grpc/balancer.go +++ /dev/null @@ -1,391 +0,0 @@ -/* - * - * Copyright 2016 gRPC authors. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ - -package grpc - -import ( - "context" - "net" - "sync" - - "google.golang.org/grpc/codes" - "google.golang.org/grpc/credentials" - "google.golang.org/grpc/grpclog" - "google.golang.org/grpc/naming" - "google.golang.org/grpc/status" -) - -// Address represents a server the client connects to. -// -// Deprecated: please use package balancer. -type Address struct { - // Addr is the server address on which a connection will be established. - Addr string - // Metadata is the information associated with Addr, which may be used - // to make load balancing decision. - Metadata interface{} -} - -// BalancerConfig specifies the configurations for Balancer. -// -// Deprecated: please use package balancer. -type BalancerConfig struct { - // DialCreds is the transport credential the Balancer implementation can - // use to dial to a remote load balancer server. The Balancer implementations - // can ignore this if it does not need to talk to another party securely. - DialCreds credentials.TransportCredentials - // Dialer is the custom dialer the Balancer implementation can use to dial - // to a remote load balancer server. The Balancer implementations - // can ignore this if it doesn't need to talk to remote balancer. - Dialer func(context.Context, string) (net.Conn, error) -} - -// BalancerGetOptions configures a Get call. -// -// Deprecated: please use package balancer. -type BalancerGetOptions struct { - // BlockingWait specifies whether Get should block when there is no - // connected address. - BlockingWait bool -} - -// Balancer chooses network addresses for RPCs. -// -// Deprecated: please use package balancer. -type Balancer interface { - // Start does the initialization work to bootstrap a Balancer. For example, - // this function may start the name resolution and watch the updates. It will - // be called when dialing. - Start(target string, config BalancerConfig) error - // Up informs the Balancer that gRPC has a connection to the server at - // addr. It returns down which is called once the connection to addr gets - // lost or closed. - // TODO: It is not clear how to construct and take advantage of the meaningful error - // parameter for down. Need realistic demands to guide. - Up(addr Address) (down func(error)) - // Get gets the address of a server for the RPC corresponding to ctx. - // i) If it returns a connected address, gRPC internals issues the RPC on the - // connection to this address; - // ii) If it returns an address on which the connection is under construction - // (initiated by Notify(...)) but not connected, gRPC internals - // * fails RPC if the RPC is fail-fast and connection is in the TransientFailure or - // Shutdown state; - // or - // * issues RPC on the connection otherwise. - // iii) If it returns an address on which the connection does not exist, gRPC - // internals treats it as an error and will fail the corresponding RPC. - // - // Therefore, the following is the recommended rule when writing a custom Balancer. - // If opts.BlockingWait is true, it should return a connected address or - // block if there is no connected address. It should respect the timeout or - // cancellation of ctx when blocking. If opts.BlockingWait is false (for fail-fast - // RPCs), it should return an address it has notified via Notify(...) immediately - // instead of blocking. - // - // The function returns put which is called once the rpc has completed or failed. - // put can collect and report RPC stats to a remote load balancer. - // - // This function should only return the errors Balancer cannot recover by itself. - // gRPC internals will fail the RPC if an error is returned. - Get(ctx context.Context, opts BalancerGetOptions) (addr Address, put func(), err error) - // Notify returns a channel that is used by gRPC internals to watch the addresses - // gRPC needs to connect. The addresses might be from a name resolver or remote - // load balancer. gRPC internals will compare it with the existing connected - // addresses. If the address Balancer notified is not in the existing connected - // addresses, gRPC starts to connect the address. If an address in the existing - // connected addresses is not in the notification list, the corresponding connection - // is shutdown gracefully. Otherwise, there are no operations to take. Note that - // the Address slice must be the full list of the Addresses which should be connected. - // It is NOT delta. - Notify() <-chan []Address - // Close shuts down the balancer. - Close() error -} - -// RoundRobin returns a Balancer that selects addresses round-robin. It uses r to watch -// the name resolution updates and updates the addresses available correspondingly. -// -// Deprecated: please use package balancer/roundrobin. -func RoundRobin(r naming.Resolver) Balancer { - return &roundRobin{r: r} -} - -type addrInfo struct { - addr Address - connected bool -} - -type roundRobin struct { - r naming.Resolver - w naming.Watcher - addrs []*addrInfo // all the addresses the client should potentially connect - mu sync.Mutex - addrCh chan []Address // the channel to notify gRPC internals the list of addresses the client should connect to. - next int // index of the next address to return for Get() - waitCh chan struct{} // the channel to block when there is no connected address available - done bool // The Balancer is closed. -} - -func (rr *roundRobin) watchAddrUpdates() error { - updates, err := rr.w.Next() - if err != nil { - grpclog.Warningf("grpc: the naming watcher stops working due to %v.", err) - return err - } - rr.mu.Lock() - defer rr.mu.Unlock() - for _, update := range updates { - addr := Address{ - Addr: update.Addr, - Metadata: update.Metadata, - } - switch update.Op { - case naming.Add: - var exist bool - for _, v := range rr.addrs { - if addr == v.addr { - exist = true - grpclog.Infoln("grpc: The name resolver wanted to add an existing address: ", addr) - break - } - } - if exist { - continue - } - rr.addrs = append(rr.addrs, &addrInfo{addr: addr}) - case naming.Delete: - for i, v := range rr.addrs { - if addr == v.addr { - copy(rr.addrs[i:], rr.addrs[i+1:]) - rr.addrs = rr.addrs[:len(rr.addrs)-1] - break - } - } - default: - grpclog.Errorln("Unknown update.Op ", update.Op) - } - } - // Make a copy of rr.addrs and write it onto rr.addrCh so that gRPC internals gets notified. - open := make([]Address, len(rr.addrs)) - for i, v := range rr.addrs { - open[i] = v.addr - } - if rr.done { - return ErrClientConnClosing - } - select { - case <-rr.addrCh: - default: - } - rr.addrCh <- open - return nil -} - -func (rr *roundRobin) Start(target string, config BalancerConfig) error { - rr.mu.Lock() - defer rr.mu.Unlock() - if rr.done { - return ErrClientConnClosing - } - if rr.r == nil { - // If there is no name resolver installed, it is not needed to - // do name resolution. In this case, target is added into rr.addrs - // as the only address available and rr.addrCh stays nil. - rr.addrs = append(rr.addrs, &addrInfo{addr: Address{Addr: target}}) - return nil - } - w, err := rr.r.Resolve(target) - if err != nil { - return err - } - rr.w = w - rr.addrCh = make(chan []Address, 1) - go func() { - for { - if err := rr.watchAddrUpdates(); err != nil { - return - } - } - }() - return nil -} - -// Up sets the connected state of addr and sends notification if there are pending -// Get() calls. -func (rr *roundRobin) Up(addr Address) func(error) { - rr.mu.Lock() - defer rr.mu.Unlock() - var cnt int - for _, a := range rr.addrs { - if a.addr == addr { - if a.connected { - return nil - } - a.connected = true - } - if a.connected { - cnt++ - } - } - // addr is only one which is connected. Notify the Get() callers who are blocking. - if cnt == 1 && rr.waitCh != nil { - close(rr.waitCh) - rr.waitCh = nil - } - return func(err error) { - rr.down(addr, err) - } -} - -// down unsets the connected state of addr. -func (rr *roundRobin) down(addr Address, err error) { - rr.mu.Lock() - defer rr.mu.Unlock() - for _, a := range rr.addrs { - if addr == a.addr { - a.connected = false - break - } - } -} - -// Get returns the next addr in the rotation. -func (rr *roundRobin) Get(ctx context.Context, opts BalancerGetOptions) (addr Address, put func(), err error) { - var ch chan struct{} - rr.mu.Lock() - if rr.done { - rr.mu.Unlock() - err = ErrClientConnClosing - return - } - - if len(rr.addrs) > 0 { - if rr.next >= len(rr.addrs) { - rr.next = 0 - } - next := rr.next - for { - a := rr.addrs[next] - next = (next + 1) % len(rr.addrs) - if a.connected { - addr = a.addr - rr.next = next - rr.mu.Unlock() - return - } - if next == rr.next { - // Has iterated all the possible address but none is connected. - break - } - } - } - if !opts.BlockingWait { - if len(rr.addrs) == 0 { - rr.mu.Unlock() - err = status.Errorf(codes.Unavailable, "there is no address available") - return - } - // Returns the next addr on rr.addrs for failfast RPCs. - addr = rr.addrs[rr.next].addr - rr.next++ - rr.mu.Unlock() - return - } - // Wait on rr.waitCh for non-failfast RPCs. - if rr.waitCh == nil { - ch = make(chan struct{}) - rr.waitCh = ch - } else { - ch = rr.waitCh - } - rr.mu.Unlock() - for { - select { - case <-ctx.Done(): - err = ctx.Err() - return - case <-ch: - rr.mu.Lock() - if rr.done { - rr.mu.Unlock() - err = ErrClientConnClosing - return - } - - if len(rr.addrs) > 0 { - if rr.next >= len(rr.addrs) { - rr.next = 0 - } - next := rr.next - for { - a := rr.addrs[next] - next = (next + 1) % len(rr.addrs) - if a.connected { - addr = a.addr - rr.next = next - rr.mu.Unlock() - return - } - if next == rr.next { - // Has iterated all the possible address but none is connected. - break - } - } - } - // The newly added addr got removed by Down() again. - if rr.waitCh == nil { - ch = make(chan struct{}) - rr.waitCh = ch - } else { - ch = rr.waitCh - } - rr.mu.Unlock() - } - } -} - -func (rr *roundRobin) Notify() <-chan []Address { - return rr.addrCh -} - -func (rr *roundRobin) Close() error { - rr.mu.Lock() - defer rr.mu.Unlock() - if rr.done { - return errBalancerClosed - } - rr.done = true - if rr.w != nil { - rr.w.Close() - } - if rr.waitCh != nil { - close(rr.waitCh) - rr.waitCh = nil - } - if rr.addrCh != nil { - close(rr.addrCh) - } - return nil -} - -// pickFirst is used to test multi-addresses in one addrConn in which all addresses share the same addrConn. -// It is a wrapper around roundRobin balancer. The logic of all methods works fine because balancer.Get() -// returns the only address Up by resetTransport(). -type pickFirst struct { - *roundRobin -} diff --git a/vendor/google.golang.org/grpc/balancer/balancer.go b/vendor/google.golang.org/grpc/balancer/balancer.go deleted file mode 100644 index 317c2e728e..0000000000 --- a/vendor/google.golang.org/grpc/balancer/balancer.go +++ /dev/null @@ -1,303 +0,0 @@ -/* - * - * Copyright 2017 gRPC authors. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ - -// Package balancer defines APIs for load balancing in gRPC. -// All APIs in this package are experimental. -package balancer - -import ( - "context" - "errors" - "net" - "strings" - - "google.golang.org/grpc/connectivity" - "google.golang.org/grpc/credentials" - "google.golang.org/grpc/internal" - "google.golang.org/grpc/metadata" - "google.golang.org/grpc/resolver" -) - -var ( - // m is a map from name to balancer builder. - m = make(map[string]Builder) -) - -// Register registers the balancer builder to the balancer map. b.Name -// (lowercased) will be used as the name registered with this builder. -// -// NOTE: this function must only be called during initialization time (i.e. in -// an init() function), and is not thread-safe. If multiple Balancers are -// registered with the same name, the one registered last will take effect. -func Register(b Builder) { - m[strings.ToLower(b.Name())] = b -} - -// unregisterForTesting deletes the balancer with the given name from the -// balancer map. -// -// This function is not thread-safe. -func unregisterForTesting(name string) { - delete(m, name) -} - -func init() { - internal.BalancerUnregister = unregisterForTesting -} - -// Get returns the resolver builder registered with the given name. -// Note that the compare is done in a case-insenstive fashion. -// If no builder is register with the name, nil will be returned. -func Get(name string) Builder { - if b, ok := m[strings.ToLower(name)]; ok { - return b - } - return nil -} - -// SubConn represents a gRPC sub connection. -// Each sub connection contains a list of addresses. gRPC will -// try to connect to them (in sequence), and stop trying the -// remainder once one connection is successful. -// -// The reconnect backoff will be applied on the list, not a single address. -// For example, try_on_all_addresses -> backoff -> try_on_all_addresses. -// -// All SubConns start in IDLE, and will not try to connect. To trigger -// the connecting, Balancers must call Connect. -// When the connection encounters an error, it will reconnect immediately. -// When the connection becomes IDLE, it will not reconnect unless Connect is -// called. -// -// This interface is to be implemented by gRPC. Users should not need a -// brand new implementation of this interface. For the situations like -// testing, the new implementation should embed this interface. This allows -// gRPC to add new methods to this interface. -type SubConn interface { - // UpdateAddresses updates the addresses used in this SubConn. - // gRPC checks if currently-connected address is still in the new list. - // If it's in the list, the connection will be kept. - // If it's not in the list, the connection will gracefully closed, and - // a new connection will be created. - // - // This will trigger a state transition for the SubConn. - UpdateAddresses([]resolver.Address) - // Connect starts the connecting for this SubConn. - Connect() -} - -// NewSubConnOptions contains options to create new SubConn. -type NewSubConnOptions struct { - // CredsBundle is the credentials bundle that will be used in the created - // SubConn. If it's nil, the original creds from grpc DialOptions will be - // used. - CredsBundle credentials.Bundle - // HealthCheckEnabled indicates whether health check service should be - // enabled on this SubConn - HealthCheckEnabled bool -} - -// ClientConn represents a gRPC ClientConn. -// -// This interface is to be implemented by gRPC. Users should not need a -// brand new implementation of this interface. For the situations like -// testing, the new implementation should embed this interface. This allows -// gRPC to add new methods to this interface. -type ClientConn interface { - // NewSubConn is called by balancer to create a new SubConn. - // It doesn't block and wait for the connections to be established. - // Behaviors of the SubConn can be controlled by options. - NewSubConn([]resolver.Address, NewSubConnOptions) (SubConn, error) - // RemoveSubConn removes the SubConn from ClientConn. - // The SubConn will be shutdown. - RemoveSubConn(SubConn) - - // UpdateBalancerState is called by balancer to nofity gRPC that some internal - // state in balancer has changed. - // - // gRPC will update the connectivity state of the ClientConn, and will call pick - // on the new picker to pick new SubConn. - UpdateBalancerState(s connectivity.State, p Picker) - - // ResolveNow is called by balancer to notify gRPC to do a name resolving. - ResolveNow(resolver.ResolveNowOption) - - // Target returns the dial target for this ClientConn. - Target() string -} - -// BuildOptions contains additional information for Build. -type BuildOptions struct { - // DialCreds is the transport credential the Balancer implementation can - // use to dial to a remote load balancer server. The Balancer implementations - // can ignore this if it does not need to talk to another party securely. - DialCreds credentials.TransportCredentials - // CredsBundle is the credentials bundle that the Balancer can use. - CredsBundle credentials.Bundle - // Dialer is the custom dialer the Balancer implementation can use to dial - // to a remote load balancer server. The Balancer implementations - // can ignore this if it doesn't need to talk to remote balancer. - Dialer func(context.Context, string) (net.Conn, error) - // ChannelzParentID is the entity parent's channelz unique identification number. - ChannelzParentID int64 -} - -// Builder creates a balancer. -type Builder interface { - // Build creates a new balancer with the ClientConn. - Build(cc ClientConn, opts BuildOptions) Balancer - // Name returns the name of balancers built by this builder. - // It will be used to pick balancers (for example in service config). - Name() string -} - -// PickOptions contains addition information for the Pick operation. -type PickOptions struct { - // FullMethodName is the method name that NewClientStream() is called - // with. The canonical format is /service/Method. - FullMethodName string - // Header contains the metadata from the RPC's client header. The metadata - // should not be modified; make a copy first if needed. - Header metadata.MD -} - -// DoneInfo contains additional information for done. -type DoneInfo struct { - // Err is the rpc error the RPC finished with. It could be nil. - Err error - // Trailer contains the metadata from the RPC's trailer, if present. - Trailer metadata.MD - // BytesSent indicates if any bytes have been sent to the server. - BytesSent bool - // BytesReceived indicates if any byte has been received from the server. - BytesReceived bool -} - -var ( - // ErrNoSubConnAvailable indicates no SubConn is available for pick(). - // gRPC will block the RPC until a new picker is available via UpdateBalancerState(). - ErrNoSubConnAvailable = errors.New("no SubConn is available") - // ErrTransientFailure indicates all SubConns are in TransientFailure. - // WaitForReady RPCs will block, non-WaitForReady RPCs will fail. - ErrTransientFailure = errors.New("all SubConns are in TransientFailure") -) - -// Picker is used by gRPC to pick a SubConn to send an RPC. -// Balancer is expected to generate a new picker from its snapshot every time its -// internal state has changed. -// -// The pickers used by gRPC can be updated by ClientConn.UpdateBalancerState(). -type Picker interface { - // Pick returns the SubConn to be used to send the RPC. - // The returned SubConn must be one returned by NewSubConn(). - // - // This functions is expected to return: - // - a SubConn that is known to be READY; - // - ErrNoSubConnAvailable if no SubConn is available, but progress is being - // made (for example, some SubConn is in CONNECTING mode); - // - other errors if no active connecting is happening (for example, all SubConn - // are in TRANSIENT_FAILURE mode). - // - // If a SubConn is returned: - // - If it is READY, gRPC will send the RPC on it; - // - If it is not ready, or becomes not ready after it's returned, gRPC will block - // until UpdateBalancerState() is called and will call pick on the new picker. - // - // If the returned error is not nil: - // - If the error is ErrNoSubConnAvailable, gRPC will block until UpdateBalancerState() - // - If the error is ErrTransientFailure: - // - If the RPC is wait-for-ready, gRPC will block until UpdateBalancerState() - // is called to pick again; - // - Otherwise, RPC will fail with unavailable error. - // - Else (error is other non-nil error): - // - The RPC will fail with unavailable error. - // - // The returned done() function will be called once the rpc has finished, with the - // final status of that RPC. - // done may be nil if balancer doesn't care about the RPC status. - Pick(ctx context.Context, opts PickOptions) (conn SubConn, done func(DoneInfo), err error) -} - -// Balancer takes input from gRPC, manages SubConns, and collects and aggregates -// the connectivity states. -// -// It also generates and updates the Picker used by gRPC to pick SubConns for RPCs. -// -// HandleSubConnectionStateChange, HandleResolvedAddrs and Close are guaranteed -// to be called synchronously from the same goroutine. -// There's no guarantee on picker.Pick, it may be called anytime. -type Balancer interface { - // HandleSubConnStateChange is called by gRPC when the connectivity state - // of sc has changed. - // Balancer is expected to aggregate all the state of SubConn and report - // that back to gRPC. - // Balancer should also generate and update Pickers when its internal state has - // been changed by the new state. - HandleSubConnStateChange(sc SubConn, state connectivity.State) - // HandleResolvedAddrs is called by gRPC to send updated resolved addresses to - // balancers. - // Balancer can create new SubConn or remove SubConn with the addresses. - // An empty address slice and a non-nil error will be passed if the resolver returns - // non-nil error to gRPC. - HandleResolvedAddrs([]resolver.Address, error) - // Close closes the balancer. The balancer is not required to call - // ClientConn.RemoveSubConn for its existing SubConns. - Close() -} - -// ConnectivityStateEvaluator takes the connectivity states of multiple SubConns -// and returns one aggregated connectivity state. -// -// It's not thread safe. -type ConnectivityStateEvaluator struct { - numReady uint64 // Number of addrConns in ready state. - numConnecting uint64 // Number of addrConns in connecting state. - numTransientFailure uint64 // Number of addrConns in transientFailure. -} - -// RecordTransition records state change happening in subConn and based on that -// it evaluates what aggregated state should be. -// -// - If at least one SubConn in Ready, the aggregated state is Ready; -// - Else if at least one SubConn in Connecting, the aggregated state is Connecting; -// - Else the aggregated state is TransientFailure. -// -// Idle and Shutdown are not considered. -func (cse *ConnectivityStateEvaluator) RecordTransition(oldState, newState connectivity.State) connectivity.State { - // Update counters. - for idx, state := range []connectivity.State{oldState, newState} { - updateVal := 2*uint64(idx) - 1 // -1 for oldState and +1 for new. - switch state { - case connectivity.Ready: - cse.numReady += updateVal - case connectivity.Connecting: - cse.numConnecting += updateVal - case connectivity.TransientFailure: - cse.numTransientFailure += updateVal - } - } - - // Evaluate. - if cse.numReady > 0 { - return connectivity.Ready - } - if cse.numConnecting > 0 { - return connectivity.Connecting - } - return connectivity.TransientFailure -} diff --git a/vendor/google.golang.org/grpc/balancer/base/balancer.go b/vendor/google.golang.org/grpc/balancer/base/balancer.go deleted file mode 100644 index 245785e7a2..0000000000 --- a/vendor/google.golang.org/grpc/balancer/base/balancer.go +++ /dev/null @@ -1,171 +0,0 @@ -/* - * - * Copyright 2017 gRPC authors. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ - -package base - -import ( - "context" - - "google.golang.org/grpc/balancer" - "google.golang.org/grpc/connectivity" - "google.golang.org/grpc/grpclog" - "google.golang.org/grpc/resolver" -) - -type baseBuilder struct { - name string - pickerBuilder PickerBuilder - config Config -} - -func (bb *baseBuilder) Build(cc balancer.ClientConn, opt balancer.BuildOptions) balancer.Balancer { - return &baseBalancer{ - cc: cc, - pickerBuilder: bb.pickerBuilder, - - subConns: make(map[resolver.Address]balancer.SubConn), - scStates: make(map[balancer.SubConn]connectivity.State), - csEvltr: &balancer.ConnectivityStateEvaluator{}, - // Initialize picker to a picker that always return - // ErrNoSubConnAvailable, because when state of a SubConn changes, we - // may call UpdateBalancerState with this picker. - picker: NewErrPicker(balancer.ErrNoSubConnAvailable), - config: bb.config, - } -} - -func (bb *baseBuilder) Name() string { - return bb.name -} - -type baseBalancer struct { - cc balancer.ClientConn - pickerBuilder PickerBuilder - - csEvltr *balancer.ConnectivityStateEvaluator - state connectivity.State - - subConns map[resolver.Address]balancer.SubConn - scStates map[balancer.SubConn]connectivity.State - picker balancer.Picker - config Config -} - -func (b *baseBalancer) HandleResolvedAddrs(addrs []resolver.Address, err error) { - if err != nil { - grpclog.Infof("base.baseBalancer: HandleResolvedAddrs called with error %v", err) - return - } - grpclog.Infoln("base.baseBalancer: got new resolved addresses: ", addrs) - // addrsSet is the set converted from addrs, it's used for quick lookup of an address. - addrsSet := make(map[resolver.Address]struct{}) - for _, a := range addrs { - addrsSet[a] = struct{}{} - if _, ok := b.subConns[a]; !ok { - // a is a new address (not existing in b.subConns). - sc, err := b.cc.NewSubConn([]resolver.Address{a}, balancer.NewSubConnOptions{HealthCheckEnabled: b.config.HealthCheck}) - if err != nil { - grpclog.Warningf("base.baseBalancer: failed to create new SubConn: %v", err) - continue - } - b.subConns[a] = sc - b.scStates[sc] = connectivity.Idle - sc.Connect() - } - } - for a, sc := range b.subConns { - // a was removed by resolver. - if _, ok := addrsSet[a]; !ok { - b.cc.RemoveSubConn(sc) - delete(b.subConns, a) - // Keep the state of this sc in b.scStates until sc's state becomes Shutdown. - // The entry will be deleted in HandleSubConnStateChange. - } - } -} - -// regeneratePicker takes a snapshot of the balancer, and generates a picker -// from it. The picker is -// - errPicker with ErrTransientFailure if the balancer is in TransientFailure, -// - built by the pickerBuilder with all READY SubConns otherwise. -func (b *baseBalancer) regeneratePicker() { - if b.state == connectivity.TransientFailure { - b.picker = NewErrPicker(balancer.ErrTransientFailure) - return - } - readySCs := make(map[resolver.Address]balancer.SubConn) - - // Filter out all ready SCs from full subConn map. - for addr, sc := range b.subConns { - if st, ok := b.scStates[sc]; ok && st == connectivity.Ready { - readySCs[addr] = sc - } - } - b.picker = b.pickerBuilder.Build(readySCs) -} - -func (b *baseBalancer) HandleSubConnStateChange(sc balancer.SubConn, s connectivity.State) { - grpclog.Infof("base.baseBalancer: handle SubConn state change: %p, %v", sc, s) - oldS, ok := b.scStates[sc] - if !ok { - grpclog.Infof("base.baseBalancer: got state changes for an unknown SubConn: %p, %v", sc, s) - return - } - b.scStates[sc] = s - switch s { - case connectivity.Idle: - sc.Connect() - case connectivity.Shutdown: - // When an address was removed by resolver, b called RemoveSubConn but - // kept the sc's state in scStates. Remove state for this sc here. - delete(b.scStates, sc) - } - - oldAggrState := b.state - b.state = b.csEvltr.RecordTransition(oldS, s) - - // Regenerate picker when one of the following happens: - // - this sc became ready from not-ready - // - this sc became not-ready from ready - // - the aggregated state of balancer became TransientFailure from non-TransientFailure - // - the aggregated state of balancer became non-TransientFailure from TransientFailure - if (s == connectivity.Ready) != (oldS == connectivity.Ready) || - (b.state == connectivity.TransientFailure) != (oldAggrState == connectivity.TransientFailure) { - b.regeneratePicker() - } - - b.cc.UpdateBalancerState(b.state, b.picker) -} - -// Close is a nop because base balancer doesn't have internal state to clean up, -// and it doesn't need to call RemoveSubConn for the SubConns. -func (b *baseBalancer) Close() { -} - -// NewErrPicker returns a picker that always returns err on Pick(). -func NewErrPicker(err error) balancer.Picker { - return &errPicker{err: err} -} - -type errPicker struct { - err error // Pick() always returns this err. -} - -func (p *errPicker) Pick(ctx context.Context, opts balancer.PickOptions) (balancer.SubConn, func(balancer.DoneInfo), error) { - return nil, nil, p.err -} diff --git a/vendor/google.golang.org/grpc/balancer/base/base.go b/vendor/google.golang.org/grpc/balancer/base/base.go deleted file mode 100644 index 34b1f2994a..0000000000 --- a/vendor/google.golang.org/grpc/balancer/base/base.go +++ /dev/null @@ -1,64 +0,0 @@ -/* - * - * Copyright 2017 gRPC authors. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ - -// Package base defines a balancer base that can be used to build balancers with -// different picking algorithms. -// -// The base balancer creates a new SubConn for each resolved address. The -// provided picker will only be notified about READY SubConns. -// -// This package is the base of round_robin balancer, its purpose is to be used -// to build round_robin like balancers with complex picking algorithms. -// Balancers with more complicated logic should try to implement a balancer -// builder from scratch. -// -// All APIs in this package are experimental. -package base - -import ( - "google.golang.org/grpc/balancer" - "google.golang.org/grpc/resolver" -) - -// PickerBuilder creates balancer.Picker. -type PickerBuilder interface { - // Build takes a slice of ready SubConns, and returns a picker that will be - // used by gRPC to pick a SubConn. - Build(readySCs map[resolver.Address]balancer.SubConn) balancer.Picker -} - -// NewBalancerBuilder returns a balancer builder. The balancers -// built by this builder will use the picker builder to build pickers. -func NewBalancerBuilder(name string, pb PickerBuilder) balancer.Builder { - return NewBalancerBuilderWithConfig(name, pb, Config{}) -} - -// Config contains the config info about the base balancer builder. -type Config struct { - // HealthCheck indicates whether health checking should be enabled for this specific balancer. - HealthCheck bool -} - -// NewBalancerBuilderWithConfig returns a base balancer builder configured by the provided config. -func NewBalancerBuilderWithConfig(name string, pb PickerBuilder, config Config) balancer.Builder { - return &baseBuilder{ - name: name, - pickerBuilder: pb, - config: config, - } -} diff --git a/vendor/google.golang.org/grpc/balancer/roundrobin/roundrobin.go b/vendor/google.golang.org/grpc/balancer/roundrobin/roundrobin.go deleted file mode 100644 index 57aea9fb4d..0000000000 --- a/vendor/google.golang.org/grpc/balancer/roundrobin/roundrobin.go +++ /dev/null @@ -1,79 +0,0 @@ -/* - * - * Copyright 2017 gRPC authors. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ - -// Package roundrobin defines a roundrobin balancer. Roundrobin balancer is -// installed as one of the default balancers in gRPC, users don't need to -// explicitly install this balancer. -package roundrobin - -import ( - "context" - "sync" - - "google.golang.org/grpc/balancer" - "google.golang.org/grpc/balancer/base" - "google.golang.org/grpc/grpclog" - "google.golang.org/grpc/resolver" -) - -// Name is the name of round_robin balancer. -const Name = "round_robin" - -// newBuilder creates a new roundrobin balancer builder. -func newBuilder() balancer.Builder { - return base.NewBalancerBuilderWithConfig(Name, &rrPickerBuilder{}, base.Config{HealthCheck: true}) -} - -func init() { - balancer.Register(newBuilder()) -} - -type rrPickerBuilder struct{} - -func (*rrPickerBuilder) Build(readySCs map[resolver.Address]balancer.SubConn) balancer.Picker { - grpclog.Infof("roundrobinPicker: newPicker called with readySCs: %v", readySCs) - var scs []balancer.SubConn - for _, sc := range readySCs { - scs = append(scs, sc) - } - return &rrPicker{ - subConns: scs, - } -} - -type rrPicker struct { - // subConns is the snapshot of the roundrobin balancer when this picker was - // created. The slice is immutable. Each Get() will do a round robin - // selection from it and return the selected SubConn. - subConns []balancer.SubConn - - mu sync.Mutex - next int -} - -func (p *rrPicker) Pick(ctx context.Context, opts balancer.PickOptions) (balancer.SubConn, func(balancer.DoneInfo), error) { - if len(p.subConns) <= 0 { - return nil, nil, balancer.ErrNoSubConnAvailable - } - - p.mu.Lock() - sc := p.subConns[p.next] - p.next = (p.next + 1) % len(p.subConns) - p.mu.Unlock() - return sc, nil, nil -} diff --git a/vendor/google.golang.org/grpc/balancer_conn_wrappers.go b/vendor/google.golang.org/grpc/balancer_conn_wrappers.go deleted file mode 100644 index 7233ade293..0000000000 --- a/vendor/google.golang.org/grpc/balancer_conn_wrappers.go +++ /dev/null @@ -1,328 +0,0 @@ -/* - * - * Copyright 2017 gRPC authors. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ - -package grpc - -import ( - "fmt" - "sync" - - "google.golang.org/grpc/balancer" - "google.golang.org/grpc/connectivity" - "google.golang.org/grpc/grpclog" - "google.golang.org/grpc/resolver" -) - -// scStateUpdate contains the subConn and the new state it changed to. -type scStateUpdate struct { - sc balancer.SubConn - state connectivity.State -} - -// scStateUpdateBuffer is an unbounded channel for scStateChangeTuple. -// TODO make a general purpose buffer that uses interface{}. -type scStateUpdateBuffer struct { - c chan *scStateUpdate - mu sync.Mutex - backlog []*scStateUpdate -} - -func newSCStateUpdateBuffer() *scStateUpdateBuffer { - return &scStateUpdateBuffer{ - c: make(chan *scStateUpdate, 1), - } -} - -func (b *scStateUpdateBuffer) put(t *scStateUpdate) { - b.mu.Lock() - defer b.mu.Unlock() - if len(b.backlog) == 0 { - select { - case b.c <- t: - return - default: - } - } - b.backlog = append(b.backlog, t) -} - -func (b *scStateUpdateBuffer) load() { - b.mu.Lock() - defer b.mu.Unlock() - if len(b.backlog) > 0 { - select { - case b.c <- b.backlog[0]: - b.backlog[0] = nil - b.backlog = b.backlog[1:] - default: - } - } -} - -// get returns the channel that the scStateUpdate will be sent to. -// -// Upon receiving, the caller should call load to send another -// scStateChangeTuple onto the channel if there is any. -func (b *scStateUpdateBuffer) get() <-chan *scStateUpdate { - return b.c -} - -// resolverUpdate contains the new resolved addresses or error if there's -// any. -type resolverUpdate struct { - addrs []resolver.Address - err error -} - -// ccBalancerWrapper is a wrapper on top of cc for balancers. -// It implements balancer.ClientConn interface. -type ccBalancerWrapper struct { - cc *ClientConn - balancer balancer.Balancer - stateChangeQueue *scStateUpdateBuffer - resolverUpdateCh chan *resolverUpdate - done chan struct{} - - mu sync.Mutex - subConns map[*acBalancerWrapper]struct{} -} - -func newCCBalancerWrapper(cc *ClientConn, b balancer.Builder, bopts balancer.BuildOptions) *ccBalancerWrapper { - ccb := &ccBalancerWrapper{ - cc: cc, - stateChangeQueue: newSCStateUpdateBuffer(), - resolverUpdateCh: make(chan *resolverUpdate, 1), - done: make(chan struct{}), - subConns: make(map[*acBalancerWrapper]struct{}), - } - go ccb.watcher() - ccb.balancer = b.Build(ccb, bopts) - return ccb -} - -// watcher balancer functions sequentially, so the balancer can be implemented -// lock-free. -func (ccb *ccBalancerWrapper) watcher() { - for { - select { - case t := <-ccb.stateChangeQueue.get(): - ccb.stateChangeQueue.load() - select { - case <-ccb.done: - ccb.balancer.Close() - return - default: - } - ccb.balancer.HandleSubConnStateChange(t.sc, t.state) - case t := <-ccb.resolverUpdateCh: - select { - case <-ccb.done: - ccb.balancer.Close() - return - default: - } - ccb.balancer.HandleResolvedAddrs(t.addrs, t.err) - case <-ccb.done: - } - - select { - case <-ccb.done: - ccb.balancer.Close() - ccb.mu.Lock() - scs := ccb.subConns - ccb.subConns = nil - ccb.mu.Unlock() - for acbw := range scs { - ccb.cc.removeAddrConn(acbw.getAddrConn(), errConnDrain) - } - return - default: - } - } -} - -func (ccb *ccBalancerWrapper) close() { - close(ccb.done) -} - -func (ccb *ccBalancerWrapper) handleSubConnStateChange(sc balancer.SubConn, s connectivity.State) { - // When updating addresses for a SubConn, if the address in use is not in - // the new addresses, the old ac will be tearDown() and a new ac will be - // created. tearDown() generates a state change with Shutdown state, we - // don't want the balancer to receive this state change. So before - // tearDown() on the old ac, ac.acbw (acWrapper) will be set to nil, and - // this function will be called with (nil, Shutdown). We don't need to call - // balancer method in this case. - if sc == nil { - return - } - ccb.stateChangeQueue.put(&scStateUpdate{ - sc: sc, - state: s, - }) -} - -func (ccb *ccBalancerWrapper) handleResolvedAddrs(addrs []resolver.Address, err error) { - if ccb.cc.curBalancerName != grpclbName { - var containsGRPCLB bool - for _, a := range addrs { - if a.Type == resolver.GRPCLB { - containsGRPCLB = true - break - } - } - if containsGRPCLB { - // The current balancer is not grpclb, but addresses contain grpclb - // address. This means we failed to switch to grpclb, most likely - // because grpclb is not registered. Filter out all grpclb addresses - // from addrs before sending to balancer. - tempAddrs := make([]resolver.Address, 0, len(addrs)) - for _, a := range addrs { - if a.Type != resolver.GRPCLB { - tempAddrs = append(tempAddrs, a) - } - } - addrs = tempAddrs - } - } - select { - case <-ccb.resolverUpdateCh: - default: - } - ccb.resolverUpdateCh <- &resolverUpdate{ - addrs: addrs, - err: err, - } -} - -func (ccb *ccBalancerWrapper) NewSubConn(addrs []resolver.Address, opts balancer.NewSubConnOptions) (balancer.SubConn, error) { - if len(addrs) <= 0 { - return nil, fmt.Errorf("grpc: cannot create SubConn with empty address list") - } - ccb.mu.Lock() - defer ccb.mu.Unlock() - if ccb.subConns == nil { - return nil, fmt.Errorf("grpc: ClientConn balancer wrapper was closed") - } - ac, err := ccb.cc.newAddrConn(addrs, opts) - if err != nil { - return nil, err - } - acbw := &acBalancerWrapper{ac: ac} - acbw.ac.mu.Lock() - ac.acbw = acbw - acbw.ac.mu.Unlock() - ccb.subConns[acbw] = struct{}{} - return acbw, nil -} - -func (ccb *ccBalancerWrapper) RemoveSubConn(sc balancer.SubConn) { - acbw, ok := sc.(*acBalancerWrapper) - if !ok { - return - } - ccb.mu.Lock() - defer ccb.mu.Unlock() - if ccb.subConns == nil { - return - } - delete(ccb.subConns, acbw) - ccb.cc.removeAddrConn(acbw.getAddrConn(), errConnDrain) -} - -func (ccb *ccBalancerWrapper) UpdateBalancerState(s connectivity.State, p balancer.Picker) { - ccb.mu.Lock() - defer ccb.mu.Unlock() - if ccb.subConns == nil { - return - } - // Update picker before updating state. Even though the ordering here does - // not matter, it can lead to multiple calls of Pick in the common start-up - // case where we wait for ready and then perform an RPC. If the picker is - // updated later, we could call the "connecting" picker when the state is - // updated, and then call the "ready" picker after the picker gets updated. - ccb.cc.blockingpicker.updatePicker(p) - ccb.cc.csMgr.updateState(s) -} - -func (ccb *ccBalancerWrapper) ResolveNow(o resolver.ResolveNowOption) { - ccb.cc.resolveNow(o) -} - -func (ccb *ccBalancerWrapper) Target() string { - return ccb.cc.target -} - -// acBalancerWrapper is a wrapper on top of ac for balancers. -// It implements balancer.SubConn interface. -type acBalancerWrapper struct { - mu sync.Mutex - ac *addrConn -} - -func (acbw *acBalancerWrapper) UpdateAddresses(addrs []resolver.Address) { - acbw.mu.Lock() - defer acbw.mu.Unlock() - if len(addrs) <= 0 { - acbw.ac.tearDown(errConnDrain) - return - } - if !acbw.ac.tryUpdateAddrs(addrs) { - cc := acbw.ac.cc - opts := acbw.ac.scopts - acbw.ac.mu.Lock() - // Set old ac.acbw to nil so the Shutdown state update will be ignored - // by balancer. - // - // TODO(bar) the state transition could be wrong when tearDown() old ac - // and creating new ac, fix the transition. - acbw.ac.acbw = nil - acbw.ac.mu.Unlock() - acState := acbw.ac.getState() - acbw.ac.tearDown(errConnDrain) - - if acState == connectivity.Shutdown { - return - } - - ac, err := cc.newAddrConn(addrs, opts) - if err != nil { - grpclog.Warningf("acBalancerWrapper: UpdateAddresses: failed to newAddrConn: %v", err) - return - } - acbw.ac = ac - ac.mu.Lock() - ac.acbw = acbw - ac.mu.Unlock() - if acState != connectivity.Idle { - ac.connect() - } - } -} - -func (acbw *acBalancerWrapper) Connect() { - acbw.mu.Lock() - defer acbw.mu.Unlock() - acbw.ac.connect() -} - -func (acbw *acBalancerWrapper) getAddrConn() *addrConn { - acbw.mu.Lock() - defer acbw.mu.Unlock() - return acbw.ac -} diff --git a/vendor/google.golang.org/grpc/balancer_v1_wrapper.go b/vendor/google.golang.org/grpc/balancer_v1_wrapper.go deleted file mode 100644 index 42b60feaa6..0000000000 --- a/vendor/google.golang.org/grpc/balancer_v1_wrapper.go +++ /dev/null @@ -1,326 +0,0 @@ -/* - * - * Copyright 2017 gRPC authors. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ - -package grpc - -import ( - "context" - "strings" - "sync" - - "google.golang.org/grpc/balancer" - "google.golang.org/grpc/connectivity" - "google.golang.org/grpc/grpclog" - "google.golang.org/grpc/resolver" -) - -type balancerWrapperBuilder struct { - b Balancer // The v1 balancer. -} - -func (bwb *balancerWrapperBuilder) Build(cc balancer.ClientConn, opts balancer.BuildOptions) balancer.Balancer { - targetAddr := cc.Target() - targetSplitted := strings.Split(targetAddr, ":///") - if len(targetSplitted) >= 2 { - targetAddr = targetSplitted[1] - } - - bwb.b.Start(targetAddr, BalancerConfig{ - DialCreds: opts.DialCreds, - Dialer: opts.Dialer, - }) - _, pickfirst := bwb.b.(*pickFirst) - bw := &balancerWrapper{ - balancer: bwb.b, - pickfirst: pickfirst, - cc: cc, - targetAddr: targetAddr, - startCh: make(chan struct{}), - conns: make(map[resolver.Address]balancer.SubConn), - connSt: make(map[balancer.SubConn]*scState), - csEvltr: &balancer.ConnectivityStateEvaluator{}, - state: connectivity.Idle, - } - cc.UpdateBalancerState(connectivity.Idle, bw) - go bw.lbWatcher() - return bw -} - -func (bwb *balancerWrapperBuilder) Name() string { - return "wrapper" -} - -type scState struct { - addr Address // The v1 address type. - s connectivity.State - down func(error) -} - -type balancerWrapper struct { - balancer Balancer // The v1 balancer. - pickfirst bool - - cc balancer.ClientConn - targetAddr string // Target without the scheme. - - mu sync.Mutex - conns map[resolver.Address]balancer.SubConn - connSt map[balancer.SubConn]*scState - // This channel is closed when handling the first resolver result. - // lbWatcher blocks until this is closed, to avoid race between - // - NewSubConn is created, cc wants to notify balancer of state changes; - // - Build hasn't return, cc doesn't have access to balancer. - startCh chan struct{} - - // To aggregate the connectivity state. - csEvltr *balancer.ConnectivityStateEvaluator - state connectivity.State -} - -// lbWatcher watches the Notify channel of the balancer and manages -// connections accordingly. -func (bw *balancerWrapper) lbWatcher() { - <-bw.startCh - notifyCh := bw.balancer.Notify() - if notifyCh == nil { - // There's no resolver in the balancer. Connect directly. - a := resolver.Address{ - Addr: bw.targetAddr, - Type: resolver.Backend, - } - sc, err := bw.cc.NewSubConn([]resolver.Address{a}, balancer.NewSubConnOptions{}) - if err != nil { - grpclog.Warningf("Error creating connection to %v. Err: %v", a, err) - } else { - bw.mu.Lock() - bw.conns[a] = sc - bw.connSt[sc] = &scState{ - addr: Address{Addr: bw.targetAddr}, - s: connectivity.Idle, - } - bw.mu.Unlock() - sc.Connect() - } - return - } - - for addrs := range notifyCh { - grpclog.Infof("balancerWrapper: got update addr from Notify: %v\n", addrs) - if bw.pickfirst { - var ( - oldA resolver.Address - oldSC balancer.SubConn - ) - bw.mu.Lock() - for oldA, oldSC = range bw.conns { - break - } - bw.mu.Unlock() - if len(addrs) <= 0 { - if oldSC != nil { - // Teardown old sc. - bw.mu.Lock() - delete(bw.conns, oldA) - delete(bw.connSt, oldSC) - bw.mu.Unlock() - bw.cc.RemoveSubConn(oldSC) - } - continue - } - - var newAddrs []resolver.Address - for _, a := range addrs { - newAddr := resolver.Address{ - Addr: a.Addr, - Type: resolver.Backend, // All addresses from balancer are all backends. - ServerName: "", - Metadata: a.Metadata, - } - newAddrs = append(newAddrs, newAddr) - } - if oldSC == nil { - // Create new sc. - sc, err := bw.cc.NewSubConn(newAddrs, balancer.NewSubConnOptions{}) - if err != nil { - grpclog.Warningf("Error creating connection to %v. Err: %v", newAddrs, err) - } else { - bw.mu.Lock() - // For pickfirst, there should be only one SubConn, so the - // address doesn't matter. All states updating (up and down) - // and picking should all happen on that only SubConn. - bw.conns[resolver.Address{}] = sc - bw.connSt[sc] = &scState{ - addr: addrs[0], // Use the first address. - s: connectivity.Idle, - } - bw.mu.Unlock() - sc.Connect() - } - } else { - bw.mu.Lock() - bw.connSt[oldSC].addr = addrs[0] - bw.mu.Unlock() - oldSC.UpdateAddresses(newAddrs) - } - } else { - var ( - add []resolver.Address // Addresses need to setup connections. - del []balancer.SubConn // Connections need to tear down. - ) - resAddrs := make(map[resolver.Address]Address) - for _, a := range addrs { - resAddrs[resolver.Address{ - Addr: a.Addr, - Type: resolver.Backend, // All addresses from balancer are all backends. - ServerName: "", - Metadata: a.Metadata, - }] = a - } - bw.mu.Lock() - for a := range resAddrs { - if _, ok := bw.conns[a]; !ok { - add = append(add, a) - } - } - for a, c := range bw.conns { - if _, ok := resAddrs[a]; !ok { - del = append(del, c) - delete(bw.conns, a) - // Keep the state of this sc in bw.connSt until its state becomes Shutdown. - } - } - bw.mu.Unlock() - for _, a := range add { - sc, err := bw.cc.NewSubConn([]resolver.Address{a}, balancer.NewSubConnOptions{}) - if err != nil { - grpclog.Warningf("Error creating connection to %v. Err: %v", a, err) - } else { - bw.mu.Lock() - bw.conns[a] = sc - bw.connSt[sc] = &scState{ - addr: resAddrs[a], - s: connectivity.Idle, - } - bw.mu.Unlock() - sc.Connect() - } - } - for _, c := range del { - bw.cc.RemoveSubConn(c) - } - } - } -} - -func (bw *balancerWrapper) HandleSubConnStateChange(sc balancer.SubConn, s connectivity.State) { - bw.mu.Lock() - defer bw.mu.Unlock() - scSt, ok := bw.connSt[sc] - if !ok { - return - } - if s == connectivity.Idle { - sc.Connect() - } - oldS := scSt.s - scSt.s = s - if oldS != connectivity.Ready && s == connectivity.Ready { - scSt.down = bw.balancer.Up(scSt.addr) - } else if oldS == connectivity.Ready && s != connectivity.Ready { - if scSt.down != nil { - scSt.down(errConnClosing) - } - } - sa := bw.csEvltr.RecordTransition(oldS, s) - if bw.state != sa { - bw.state = sa - } - bw.cc.UpdateBalancerState(bw.state, bw) - if s == connectivity.Shutdown { - // Remove state for this sc. - delete(bw.connSt, sc) - } -} - -func (bw *balancerWrapper) HandleResolvedAddrs([]resolver.Address, error) { - bw.mu.Lock() - defer bw.mu.Unlock() - select { - case <-bw.startCh: - default: - close(bw.startCh) - } - // There should be a resolver inside the balancer. - // All updates here, if any, are ignored. -} - -func (bw *balancerWrapper) Close() { - bw.mu.Lock() - defer bw.mu.Unlock() - select { - case <-bw.startCh: - default: - close(bw.startCh) - } - bw.balancer.Close() -} - -// The picker is the balancerWrapper itself. -// Pick should never return ErrNoSubConnAvailable. -// It either blocks or returns error, consistent with v1 balancer Get(). -func (bw *balancerWrapper) Pick(ctx context.Context, opts balancer.PickOptions) (balancer.SubConn, func(balancer.DoneInfo), error) { - failfast := true // Default failfast is true. - if ss, ok := rpcInfoFromContext(ctx); ok { - failfast = ss.failfast - } - a, p, err := bw.balancer.Get(ctx, BalancerGetOptions{BlockingWait: !failfast}) - if err != nil { - return nil, nil, err - } - var done func(balancer.DoneInfo) - if p != nil { - done = func(i balancer.DoneInfo) { p() } - } - var sc balancer.SubConn - bw.mu.Lock() - defer bw.mu.Unlock() - if bw.pickfirst { - // Get the first sc in conns. - for _, sc = range bw.conns { - break - } - } else { - var ok bool - sc, ok = bw.conns[resolver.Address{ - Addr: a.Addr, - Type: resolver.Backend, - ServerName: "", - Metadata: a.Metadata, - }] - if !ok && failfast { - return nil, nil, balancer.ErrTransientFailure - } - if s, ok := bw.connSt[sc]; failfast && (!ok || s.s != connectivity.Ready) { - // If the returned sc is not ready and RPC is failfast, - // return error, and this RPC will fail. - return nil, nil, balancer.ErrTransientFailure - } - } - - return sc, done, nil -} diff --git a/vendor/google.golang.org/grpc/binarylog/grpc_binarylog_v1/binarylog.pb.go b/vendor/google.golang.org/grpc/binarylog/grpc_binarylog_v1/binarylog.pb.go deleted file mode 100644 index f393bb6618..0000000000 --- a/vendor/google.golang.org/grpc/binarylog/grpc_binarylog_v1/binarylog.pb.go +++ /dev/null @@ -1,900 +0,0 @@ -// Code generated by protoc-gen-go. DO NOT EDIT. -// source: grpc/binarylog/grpc_binarylog_v1/binarylog.proto - -package grpc_binarylog_v1 // import "google.golang.org/grpc/binarylog/grpc_binarylog_v1" - -import proto "github.com/golang/protobuf/proto" -import fmt "fmt" -import math "math" -import duration "github.com/golang/protobuf/ptypes/duration" -import timestamp "github.com/golang/protobuf/ptypes/timestamp" - -// Reference imports to suppress errors if they are not otherwise used. -var _ = proto.Marshal -var _ = fmt.Errorf -var _ = math.Inf - -// This is a compile-time assertion to ensure that this generated file -// is compatible with the proto package it is being compiled against. -// A compilation error at this line likely means your copy of the -// proto package needs to be updated. -const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package - -// Enumerates the type of event -// Note the terminology is different from the RPC semantics -// definition, but the same meaning is expressed here. -type GrpcLogEntry_EventType int32 - -const ( - GrpcLogEntry_EVENT_TYPE_UNKNOWN GrpcLogEntry_EventType = 0 - // Header sent from client to server - GrpcLogEntry_EVENT_TYPE_CLIENT_HEADER GrpcLogEntry_EventType = 1 - // Header sent from server to client - GrpcLogEntry_EVENT_TYPE_SERVER_HEADER GrpcLogEntry_EventType = 2 - // Message sent from client to server - GrpcLogEntry_EVENT_TYPE_CLIENT_MESSAGE GrpcLogEntry_EventType = 3 - // Message sent from server to client - GrpcLogEntry_EVENT_TYPE_SERVER_MESSAGE GrpcLogEntry_EventType = 4 - // A signal that client is done sending - GrpcLogEntry_EVENT_TYPE_CLIENT_HALF_CLOSE GrpcLogEntry_EventType = 5 - // Trailer indicates the end of the RPC. - // On client side, this event means a trailer was either received - // from the network or the gRPC library locally generated a status - // to inform the application about a failure. - // On server side, this event means the server application requested - // to send a trailer. Note: EVENT_TYPE_CANCEL may still arrive after - // this due to races on server side. - GrpcLogEntry_EVENT_TYPE_SERVER_TRAILER GrpcLogEntry_EventType = 6 - // A signal that the RPC is cancelled. On client side, this - // indicates the client application requests a cancellation. - // On server side, this indicates that cancellation was detected. - // Note: This marks the end of the RPC. Events may arrive after - // this due to races. For example, on client side a trailer - // may arrive even though the application requested to cancel the RPC. - GrpcLogEntry_EVENT_TYPE_CANCEL GrpcLogEntry_EventType = 7 -) - -var GrpcLogEntry_EventType_name = map[int32]string{ - 0: "EVENT_TYPE_UNKNOWN", - 1: "EVENT_TYPE_CLIENT_HEADER", - 2: "EVENT_TYPE_SERVER_HEADER", - 3: "EVENT_TYPE_CLIENT_MESSAGE", - 4: "EVENT_TYPE_SERVER_MESSAGE", - 5: "EVENT_TYPE_CLIENT_HALF_CLOSE", - 6: "EVENT_TYPE_SERVER_TRAILER", - 7: "EVENT_TYPE_CANCEL", -} -var GrpcLogEntry_EventType_value = map[string]int32{ - "EVENT_TYPE_UNKNOWN": 0, - "EVENT_TYPE_CLIENT_HEADER": 1, - "EVENT_TYPE_SERVER_HEADER": 2, - "EVENT_TYPE_CLIENT_MESSAGE": 3, - "EVENT_TYPE_SERVER_MESSAGE": 4, - "EVENT_TYPE_CLIENT_HALF_CLOSE": 5, - "EVENT_TYPE_SERVER_TRAILER": 6, - "EVENT_TYPE_CANCEL": 7, -} - -func (x GrpcLogEntry_EventType) String() string { - return proto.EnumName(GrpcLogEntry_EventType_name, int32(x)) -} -func (GrpcLogEntry_EventType) EnumDescriptor() ([]byte, []int) { - return fileDescriptor_binarylog_264c8c9c551ce911, []int{0, 0} -} - -// Enumerates the entity that generates the log entry -type GrpcLogEntry_Logger int32 - -const ( - GrpcLogEntry_LOGGER_UNKNOWN GrpcLogEntry_Logger = 0 - GrpcLogEntry_LOGGER_CLIENT GrpcLogEntry_Logger = 1 - GrpcLogEntry_LOGGER_SERVER GrpcLogEntry_Logger = 2 -) - -var GrpcLogEntry_Logger_name = map[int32]string{ - 0: "LOGGER_UNKNOWN", - 1: "LOGGER_CLIENT", - 2: "LOGGER_SERVER", -} -var GrpcLogEntry_Logger_value = map[string]int32{ - "LOGGER_UNKNOWN": 0, - "LOGGER_CLIENT": 1, - "LOGGER_SERVER": 2, -} - -func (x GrpcLogEntry_Logger) String() string { - return proto.EnumName(GrpcLogEntry_Logger_name, int32(x)) -} -func (GrpcLogEntry_Logger) EnumDescriptor() ([]byte, []int) { - return fileDescriptor_binarylog_264c8c9c551ce911, []int{0, 1} -} - -type Address_Type int32 - -const ( - Address_TYPE_UNKNOWN Address_Type = 0 - // address is in 1.2.3.4 form - Address_TYPE_IPV4 Address_Type = 1 - // address is in IPv6 canonical form (RFC5952 section 4) - // The scope is NOT included in the address string. - Address_TYPE_IPV6 Address_Type = 2 - // address is UDS string - Address_TYPE_UNIX Address_Type = 3 -) - -var Address_Type_name = map[int32]string{ - 0: "TYPE_UNKNOWN", - 1: "TYPE_IPV4", - 2: "TYPE_IPV6", - 3: "TYPE_UNIX", -} -var Address_Type_value = map[string]int32{ - "TYPE_UNKNOWN": 0, - "TYPE_IPV4": 1, - "TYPE_IPV6": 2, - "TYPE_UNIX": 3, -} - -func (x Address_Type) String() string { - return proto.EnumName(Address_Type_name, int32(x)) -} -func (Address_Type) EnumDescriptor() ([]byte, []int) { - return fileDescriptor_binarylog_264c8c9c551ce911, []int{7, 0} -} - -// Log entry we store in binary logs -type GrpcLogEntry struct { - // The timestamp of the binary log message - Timestamp *timestamp.Timestamp `protobuf:"bytes,1,opt,name=timestamp,proto3" json:"timestamp,omitempty"` - // Uniquely identifies a call. The value must not be 0 in order to disambiguate - // from an unset value. - // Each call may have several log entries, they will all have the same call_id. - // Nothing is guaranteed about their value other than they are unique across - // different RPCs in the same gRPC process. - CallId uint64 `protobuf:"varint,2,opt,name=call_id,json=callId,proto3" json:"call_id,omitempty"` - // The entry sequence id for this call. The first GrpcLogEntry has a - // value of 1, to disambiguate from an unset value. The purpose of - // this field is to detect missing entries in environments where - // durability or ordering is not guaranteed. - SequenceIdWithinCall uint64 `protobuf:"varint,3,opt,name=sequence_id_within_call,json=sequenceIdWithinCall,proto3" json:"sequence_id_within_call,omitempty"` - Type GrpcLogEntry_EventType `protobuf:"varint,4,opt,name=type,proto3,enum=grpc.binarylog.v1.GrpcLogEntry_EventType" json:"type,omitempty"` - Logger GrpcLogEntry_Logger `protobuf:"varint,5,opt,name=logger,proto3,enum=grpc.binarylog.v1.GrpcLogEntry_Logger" json:"logger,omitempty"` - // The logger uses one of the following fields to record the payload, - // according to the type of the log entry. - // - // Types that are valid to be assigned to Payload: - // *GrpcLogEntry_ClientHeader - // *GrpcLogEntry_ServerHeader - // *GrpcLogEntry_Message - // *GrpcLogEntry_Trailer - Payload isGrpcLogEntry_Payload `protobuf_oneof:"payload"` - // true if payload does not represent the full message or metadata. - PayloadTruncated bool `protobuf:"varint,10,opt,name=payload_truncated,json=payloadTruncated,proto3" json:"payload_truncated,omitempty"` - // Peer address information, will only be recorded on the first - // incoming event. On client side, peer is logged on - // EVENT_TYPE_SERVER_HEADER normally or EVENT_TYPE_SERVER_TRAILER in - // the case of trailers-only. On server side, peer is always - // logged on EVENT_TYPE_CLIENT_HEADER. - Peer *Address `protobuf:"bytes,11,opt,name=peer,proto3" json:"peer,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *GrpcLogEntry) Reset() { *m = GrpcLogEntry{} } -func (m *GrpcLogEntry) String() string { return proto.CompactTextString(m) } -func (*GrpcLogEntry) ProtoMessage() {} -func (*GrpcLogEntry) Descriptor() ([]byte, []int) { - return fileDescriptor_binarylog_264c8c9c551ce911, []int{0} -} -func (m *GrpcLogEntry) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_GrpcLogEntry.Unmarshal(m, b) -} -func (m *GrpcLogEntry) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_GrpcLogEntry.Marshal(b, m, deterministic) -} -func (dst *GrpcLogEntry) XXX_Merge(src proto.Message) { - xxx_messageInfo_GrpcLogEntry.Merge(dst, src) -} -func (m *GrpcLogEntry) XXX_Size() int { - return xxx_messageInfo_GrpcLogEntry.Size(m) -} -func (m *GrpcLogEntry) XXX_DiscardUnknown() { - xxx_messageInfo_GrpcLogEntry.DiscardUnknown(m) -} - -var xxx_messageInfo_GrpcLogEntry proto.InternalMessageInfo - -func (m *GrpcLogEntry) GetTimestamp() *timestamp.Timestamp { - if m != nil { - return m.Timestamp - } - return nil -} - -func (m *GrpcLogEntry) GetCallId() uint64 { - if m != nil { - return m.CallId - } - return 0 -} - -func (m *GrpcLogEntry) GetSequenceIdWithinCall() uint64 { - if m != nil { - return m.SequenceIdWithinCall - } - return 0 -} - -func (m *GrpcLogEntry) GetType() GrpcLogEntry_EventType { - if m != nil { - return m.Type - } - return GrpcLogEntry_EVENT_TYPE_UNKNOWN -} - -func (m *GrpcLogEntry) GetLogger() GrpcLogEntry_Logger { - if m != nil { - return m.Logger - } - return GrpcLogEntry_LOGGER_UNKNOWN -} - -type isGrpcLogEntry_Payload interface { - isGrpcLogEntry_Payload() -} - -type GrpcLogEntry_ClientHeader struct { - ClientHeader *ClientHeader `protobuf:"bytes,6,opt,name=client_header,json=clientHeader,proto3,oneof"` -} - -type GrpcLogEntry_ServerHeader struct { - ServerHeader *ServerHeader `protobuf:"bytes,7,opt,name=server_header,json=serverHeader,proto3,oneof"` -} - -type GrpcLogEntry_Message struct { - Message *Message `protobuf:"bytes,8,opt,name=message,proto3,oneof"` -} - -type GrpcLogEntry_Trailer struct { - Trailer *Trailer `protobuf:"bytes,9,opt,name=trailer,proto3,oneof"` -} - -func (*GrpcLogEntry_ClientHeader) isGrpcLogEntry_Payload() {} - -func (*GrpcLogEntry_ServerHeader) isGrpcLogEntry_Payload() {} - -func (*GrpcLogEntry_Message) isGrpcLogEntry_Payload() {} - -func (*GrpcLogEntry_Trailer) isGrpcLogEntry_Payload() {} - -func (m *GrpcLogEntry) GetPayload() isGrpcLogEntry_Payload { - if m != nil { - return m.Payload - } - return nil -} - -func (m *GrpcLogEntry) GetClientHeader() *ClientHeader { - if x, ok := m.GetPayload().(*GrpcLogEntry_ClientHeader); ok { - return x.ClientHeader - } - return nil -} - -func (m *GrpcLogEntry) GetServerHeader() *ServerHeader { - if x, ok := m.GetPayload().(*GrpcLogEntry_ServerHeader); ok { - return x.ServerHeader - } - return nil -} - -func (m *GrpcLogEntry) GetMessage() *Message { - if x, ok := m.GetPayload().(*GrpcLogEntry_Message); ok { - return x.Message - } - return nil -} - -func (m *GrpcLogEntry) GetTrailer() *Trailer { - if x, ok := m.GetPayload().(*GrpcLogEntry_Trailer); ok { - return x.Trailer - } - return nil -} - -func (m *GrpcLogEntry) GetPayloadTruncated() bool { - if m != nil { - return m.PayloadTruncated - } - return false -} - -func (m *GrpcLogEntry) GetPeer() *Address { - if m != nil { - return m.Peer - } - return nil -} - -// XXX_OneofFuncs is for the internal use of the proto package. -func (*GrpcLogEntry) XXX_OneofFuncs() (func(msg proto.Message, b *proto.Buffer) error, func(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error), func(msg proto.Message) (n int), []interface{}) { - return _GrpcLogEntry_OneofMarshaler, _GrpcLogEntry_OneofUnmarshaler, _GrpcLogEntry_OneofSizer, []interface{}{ - (*GrpcLogEntry_ClientHeader)(nil), - (*GrpcLogEntry_ServerHeader)(nil), - (*GrpcLogEntry_Message)(nil), - (*GrpcLogEntry_Trailer)(nil), - } -} - -func _GrpcLogEntry_OneofMarshaler(msg proto.Message, b *proto.Buffer) error { - m := msg.(*GrpcLogEntry) - // payload - switch x := m.Payload.(type) { - case *GrpcLogEntry_ClientHeader: - b.EncodeVarint(6<<3 | proto.WireBytes) - if err := b.EncodeMessage(x.ClientHeader); err != nil { - return err - } - case *GrpcLogEntry_ServerHeader: - b.EncodeVarint(7<<3 | proto.WireBytes) - if err := b.EncodeMessage(x.ServerHeader); err != nil { - return err - } - case *GrpcLogEntry_Message: - b.EncodeVarint(8<<3 | proto.WireBytes) - if err := b.EncodeMessage(x.Message); err != nil { - return err - } - case *GrpcLogEntry_Trailer: - b.EncodeVarint(9<<3 | proto.WireBytes) - if err := b.EncodeMessage(x.Trailer); err != nil { - return err - } - case nil: - default: - return fmt.Errorf("GrpcLogEntry.Payload has unexpected type %T", x) - } - return nil -} - -func _GrpcLogEntry_OneofUnmarshaler(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error) { - m := msg.(*GrpcLogEntry) - switch tag { - case 6: // payload.client_header - if wire != proto.WireBytes { - return true, proto.ErrInternalBadWireType - } - msg := new(ClientHeader) - err := b.DecodeMessage(msg) - m.Payload = &GrpcLogEntry_ClientHeader{msg} - return true, err - case 7: // payload.server_header - if wire != proto.WireBytes { - return true, proto.ErrInternalBadWireType - } - msg := new(ServerHeader) - err := b.DecodeMessage(msg) - m.Payload = &GrpcLogEntry_ServerHeader{msg} - return true, err - case 8: // payload.message - if wire != proto.WireBytes { - return true, proto.ErrInternalBadWireType - } - msg := new(Message) - err := b.DecodeMessage(msg) - m.Payload = &GrpcLogEntry_Message{msg} - return true, err - case 9: // payload.trailer - if wire != proto.WireBytes { - return true, proto.ErrInternalBadWireType - } - msg := new(Trailer) - err := b.DecodeMessage(msg) - m.Payload = &GrpcLogEntry_Trailer{msg} - return true, err - default: - return false, nil - } -} - -func _GrpcLogEntry_OneofSizer(msg proto.Message) (n int) { - m := msg.(*GrpcLogEntry) - // payload - switch x := m.Payload.(type) { - case *GrpcLogEntry_ClientHeader: - s := proto.Size(x.ClientHeader) - n += 1 // tag and wire - n += proto.SizeVarint(uint64(s)) - n += s - case *GrpcLogEntry_ServerHeader: - s := proto.Size(x.ServerHeader) - n += 1 // tag and wire - n += proto.SizeVarint(uint64(s)) - n += s - case *GrpcLogEntry_Message: - s := proto.Size(x.Message) - n += 1 // tag and wire - n += proto.SizeVarint(uint64(s)) - n += s - case *GrpcLogEntry_Trailer: - s := proto.Size(x.Trailer) - n += 1 // tag and wire - n += proto.SizeVarint(uint64(s)) - n += s - case nil: - default: - panic(fmt.Sprintf("proto: unexpected type %T in oneof", x)) - } - return n -} - -type ClientHeader struct { - // This contains only the metadata from the application. - Metadata *Metadata `protobuf:"bytes,1,opt,name=metadata,proto3" json:"metadata,omitempty"` - // The name of the RPC method, which looks something like: - // // - // Note the leading "/" character. - MethodName string `protobuf:"bytes,2,opt,name=method_name,json=methodName,proto3" json:"method_name,omitempty"` - // A single process may be used to run multiple virtual - // servers with different identities. - // The authority is the name of such a server identitiy. - // It is typically a portion of the URI in the form of - // or : . - Authority string `protobuf:"bytes,3,opt,name=authority,proto3" json:"authority,omitempty"` - // the RPC timeout - Timeout *duration.Duration `protobuf:"bytes,4,opt,name=timeout,proto3" json:"timeout,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *ClientHeader) Reset() { *m = ClientHeader{} } -func (m *ClientHeader) String() string { return proto.CompactTextString(m) } -func (*ClientHeader) ProtoMessage() {} -func (*ClientHeader) Descriptor() ([]byte, []int) { - return fileDescriptor_binarylog_264c8c9c551ce911, []int{1} -} -func (m *ClientHeader) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_ClientHeader.Unmarshal(m, b) -} -func (m *ClientHeader) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_ClientHeader.Marshal(b, m, deterministic) -} -func (dst *ClientHeader) XXX_Merge(src proto.Message) { - xxx_messageInfo_ClientHeader.Merge(dst, src) -} -func (m *ClientHeader) XXX_Size() int { - return xxx_messageInfo_ClientHeader.Size(m) -} -func (m *ClientHeader) XXX_DiscardUnknown() { - xxx_messageInfo_ClientHeader.DiscardUnknown(m) -} - -var xxx_messageInfo_ClientHeader proto.InternalMessageInfo - -func (m *ClientHeader) GetMetadata() *Metadata { - if m != nil { - return m.Metadata - } - return nil -} - -func (m *ClientHeader) GetMethodName() string { - if m != nil { - return m.MethodName - } - return "" -} - -func (m *ClientHeader) GetAuthority() string { - if m != nil { - return m.Authority - } - return "" -} - -func (m *ClientHeader) GetTimeout() *duration.Duration { - if m != nil { - return m.Timeout - } - return nil -} - -type ServerHeader struct { - // This contains only the metadata from the application. - Metadata *Metadata `protobuf:"bytes,1,opt,name=metadata,proto3" json:"metadata,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *ServerHeader) Reset() { *m = ServerHeader{} } -func (m *ServerHeader) String() string { return proto.CompactTextString(m) } -func (*ServerHeader) ProtoMessage() {} -func (*ServerHeader) Descriptor() ([]byte, []int) { - return fileDescriptor_binarylog_264c8c9c551ce911, []int{2} -} -func (m *ServerHeader) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_ServerHeader.Unmarshal(m, b) -} -func (m *ServerHeader) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_ServerHeader.Marshal(b, m, deterministic) -} -func (dst *ServerHeader) XXX_Merge(src proto.Message) { - xxx_messageInfo_ServerHeader.Merge(dst, src) -} -func (m *ServerHeader) XXX_Size() int { - return xxx_messageInfo_ServerHeader.Size(m) -} -func (m *ServerHeader) XXX_DiscardUnknown() { - xxx_messageInfo_ServerHeader.DiscardUnknown(m) -} - -var xxx_messageInfo_ServerHeader proto.InternalMessageInfo - -func (m *ServerHeader) GetMetadata() *Metadata { - if m != nil { - return m.Metadata - } - return nil -} - -type Trailer struct { - // This contains only the metadata from the application. - Metadata *Metadata `protobuf:"bytes,1,opt,name=metadata,proto3" json:"metadata,omitempty"` - // The gRPC status code. - StatusCode uint32 `protobuf:"varint,2,opt,name=status_code,json=statusCode,proto3" json:"status_code,omitempty"` - // An original status message before any transport specific - // encoding. - StatusMessage string `protobuf:"bytes,3,opt,name=status_message,json=statusMessage,proto3" json:"status_message,omitempty"` - // The value of the 'grpc-status-details-bin' metadata key. If - // present, this is always an encoded 'google.rpc.Status' message. - StatusDetails []byte `protobuf:"bytes,4,opt,name=status_details,json=statusDetails,proto3" json:"status_details,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *Trailer) Reset() { *m = Trailer{} } -func (m *Trailer) String() string { return proto.CompactTextString(m) } -func (*Trailer) ProtoMessage() {} -func (*Trailer) Descriptor() ([]byte, []int) { - return fileDescriptor_binarylog_264c8c9c551ce911, []int{3} -} -func (m *Trailer) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_Trailer.Unmarshal(m, b) -} -func (m *Trailer) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_Trailer.Marshal(b, m, deterministic) -} -func (dst *Trailer) XXX_Merge(src proto.Message) { - xxx_messageInfo_Trailer.Merge(dst, src) -} -func (m *Trailer) XXX_Size() int { - return xxx_messageInfo_Trailer.Size(m) -} -func (m *Trailer) XXX_DiscardUnknown() { - xxx_messageInfo_Trailer.DiscardUnknown(m) -} - -var xxx_messageInfo_Trailer proto.InternalMessageInfo - -func (m *Trailer) GetMetadata() *Metadata { - if m != nil { - return m.Metadata - } - return nil -} - -func (m *Trailer) GetStatusCode() uint32 { - if m != nil { - return m.StatusCode - } - return 0 -} - -func (m *Trailer) GetStatusMessage() string { - if m != nil { - return m.StatusMessage - } - return "" -} - -func (m *Trailer) GetStatusDetails() []byte { - if m != nil { - return m.StatusDetails - } - return nil -} - -// Message payload, used by CLIENT_MESSAGE and SERVER_MESSAGE -type Message struct { - // Length of the message. It may not be the same as the length of the - // data field, as the logging payload can be truncated or omitted. - Length uint32 `protobuf:"varint,1,opt,name=length,proto3" json:"length,omitempty"` - // May be truncated or omitted. - Data []byte `protobuf:"bytes,2,opt,name=data,proto3" json:"data,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *Message) Reset() { *m = Message{} } -func (m *Message) String() string { return proto.CompactTextString(m) } -func (*Message) ProtoMessage() {} -func (*Message) Descriptor() ([]byte, []int) { - return fileDescriptor_binarylog_264c8c9c551ce911, []int{4} -} -func (m *Message) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_Message.Unmarshal(m, b) -} -func (m *Message) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_Message.Marshal(b, m, deterministic) -} -func (dst *Message) XXX_Merge(src proto.Message) { - xxx_messageInfo_Message.Merge(dst, src) -} -func (m *Message) XXX_Size() int { - return xxx_messageInfo_Message.Size(m) -} -func (m *Message) XXX_DiscardUnknown() { - xxx_messageInfo_Message.DiscardUnknown(m) -} - -var xxx_messageInfo_Message proto.InternalMessageInfo - -func (m *Message) GetLength() uint32 { - if m != nil { - return m.Length - } - return 0 -} - -func (m *Message) GetData() []byte { - if m != nil { - return m.Data - } - return nil -} - -// A list of metadata pairs, used in the payload of client header, -// server header, and server trailer. -// Implementations may omit some entries to honor the header limits -// of GRPC_BINARY_LOG_CONFIG. -// -// Header keys added by gRPC are omitted. To be more specific, -// implementations will not log the following entries, and this is -// not to be treated as a truncation: -// - entries handled by grpc that are not user visible, such as those -// that begin with 'grpc-' (with exception of grpc-trace-bin) -// or keys like 'lb-token' -// - transport specific entries, including but not limited to: -// ':path', ':authority', 'content-encoding', 'user-agent', 'te', etc -// - entries added for call credentials -// -// Implementations must always log grpc-trace-bin if it is present. -// Practically speaking it will only be visible on server side because -// grpc-trace-bin is managed by low level client side mechanisms -// inaccessible from the application level. On server side, the -// header is just a normal metadata key. -// The pair will not count towards the size limit. -type Metadata struct { - Entry []*MetadataEntry `protobuf:"bytes,1,rep,name=entry,proto3" json:"entry,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *Metadata) Reset() { *m = Metadata{} } -func (m *Metadata) String() string { return proto.CompactTextString(m) } -func (*Metadata) ProtoMessage() {} -func (*Metadata) Descriptor() ([]byte, []int) { - return fileDescriptor_binarylog_264c8c9c551ce911, []int{5} -} -func (m *Metadata) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_Metadata.Unmarshal(m, b) -} -func (m *Metadata) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_Metadata.Marshal(b, m, deterministic) -} -func (dst *Metadata) XXX_Merge(src proto.Message) { - xxx_messageInfo_Metadata.Merge(dst, src) -} -func (m *Metadata) XXX_Size() int { - return xxx_messageInfo_Metadata.Size(m) -} -func (m *Metadata) XXX_DiscardUnknown() { - xxx_messageInfo_Metadata.DiscardUnknown(m) -} - -var xxx_messageInfo_Metadata proto.InternalMessageInfo - -func (m *Metadata) GetEntry() []*MetadataEntry { - if m != nil { - return m.Entry - } - return nil -} - -// A metadata key value pair -type MetadataEntry struct { - Key string `protobuf:"bytes,1,opt,name=key,proto3" json:"key,omitempty"` - Value []byte `protobuf:"bytes,2,opt,name=value,proto3" json:"value,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *MetadataEntry) Reset() { *m = MetadataEntry{} } -func (m *MetadataEntry) String() string { return proto.CompactTextString(m) } -func (*MetadataEntry) ProtoMessage() {} -func (*MetadataEntry) Descriptor() ([]byte, []int) { - return fileDescriptor_binarylog_264c8c9c551ce911, []int{6} -} -func (m *MetadataEntry) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_MetadataEntry.Unmarshal(m, b) -} -func (m *MetadataEntry) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_MetadataEntry.Marshal(b, m, deterministic) -} -func (dst *MetadataEntry) XXX_Merge(src proto.Message) { - xxx_messageInfo_MetadataEntry.Merge(dst, src) -} -func (m *MetadataEntry) XXX_Size() int { - return xxx_messageInfo_MetadataEntry.Size(m) -} -func (m *MetadataEntry) XXX_DiscardUnknown() { - xxx_messageInfo_MetadataEntry.DiscardUnknown(m) -} - -var xxx_messageInfo_MetadataEntry proto.InternalMessageInfo - -func (m *MetadataEntry) GetKey() string { - if m != nil { - return m.Key - } - return "" -} - -func (m *MetadataEntry) GetValue() []byte { - if m != nil { - return m.Value - } - return nil -} - -// Address information -type Address struct { - Type Address_Type `protobuf:"varint,1,opt,name=type,proto3,enum=grpc.binarylog.v1.Address_Type" json:"type,omitempty"` - Address string `protobuf:"bytes,2,opt,name=address,proto3" json:"address,omitempty"` - // only for TYPE_IPV4 and TYPE_IPV6 - IpPort uint32 `protobuf:"varint,3,opt,name=ip_port,json=ipPort,proto3" json:"ip_port,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *Address) Reset() { *m = Address{} } -func (m *Address) String() string { return proto.CompactTextString(m) } -func (*Address) ProtoMessage() {} -func (*Address) Descriptor() ([]byte, []int) { - return fileDescriptor_binarylog_264c8c9c551ce911, []int{7} -} -func (m *Address) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_Address.Unmarshal(m, b) -} -func (m *Address) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_Address.Marshal(b, m, deterministic) -} -func (dst *Address) XXX_Merge(src proto.Message) { - xxx_messageInfo_Address.Merge(dst, src) -} -func (m *Address) XXX_Size() int { - return xxx_messageInfo_Address.Size(m) -} -func (m *Address) XXX_DiscardUnknown() { - xxx_messageInfo_Address.DiscardUnknown(m) -} - -var xxx_messageInfo_Address proto.InternalMessageInfo - -func (m *Address) GetType() Address_Type { - if m != nil { - return m.Type - } - return Address_TYPE_UNKNOWN -} - -func (m *Address) GetAddress() string { - if m != nil { - return m.Address - } - return "" -} - -func (m *Address) GetIpPort() uint32 { - if m != nil { - return m.IpPort - } - return 0 -} - -func init() { - proto.RegisterType((*GrpcLogEntry)(nil), "grpc.binarylog.v1.GrpcLogEntry") - proto.RegisterType((*ClientHeader)(nil), "grpc.binarylog.v1.ClientHeader") - proto.RegisterType((*ServerHeader)(nil), "grpc.binarylog.v1.ServerHeader") - proto.RegisterType((*Trailer)(nil), "grpc.binarylog.v1.Trailer") - proto.RegisterType((*Message)(nil), "grpc.binarylog.v1.Message") - proto.RegisterType((*Metadata)(nil), "grpc.binarylog.v1.Metadata") - proto.RegisterType((*MetadataEntry)(nil), "grpc.binarylog.v1.MetadataEntry") - proto.RegisterType((*Address)(nil), "grpc.binarylog.v1.Address") - proto.RegisterEnum("grpc.binarylog.v1.GrpcLogEntry_EventType", GrpcLogEntry_EventType_name, GrpcLogEntry_EventType_value) - proto.RegisterEnum("grpc.binarylog.v1.GrpcLogEntry_Logger", GrpcLogEntry_Logger_name, GrpcLogEntry_Logger_value) - proto.RegisterEnum("grpc.binarylog.v1.Address_Type", Address_Type_name, Address_Type_value) -} - -func init() { - proto.RegisterFile("grpc/binarylog/grpc_binarylog_v1/binarylog.proto", fileDescriptor_binarylog_264c8c9c551ce911) -} - -var fileDescriptor_binarylog_264c8c9c551ce911 = []byte{ - // 900 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xa4, 0x55, 0x51, 0x6f, 0xe3, 0x44, - 0x10, 0x3e, 0x37, 0x69, 0xdc, 0x4c, 0x92, 0xca, 0x5d, 0x95, 0x3b, 0x5f, 0x29, 0x34, 0xb2, 0x04, - 0x0a, 0x42, 0x72, 0xb9, 0x94, 0xeb, 0xf1, 0x02, 0x52, 0x92, 0xfa, 0xd2, 0x88, 0x5c, 0x1a, 0x6d, - 0x72, 0x3d, 0x40, 0x48, 0xd6, 0x36, 0x5e, 0x1c, 0x0b, 0xc7, 0x6b, 0xd6, 0x9b, 0xa0, 0xfc, 0x2c, - 0xde, 0x90, 0xee, 0x77, 0xf1, 0x8e, 0xbc, 0x6b, 0x27, 0xa6, 0x69, 0x0f, 0x09, 0xde, 0x3c, 0xdf, - 0x7c, 0xf3, 0xcd, 0xee, 0x78, 0x66, 0x16, 0xbe, 0xf2, 0x79, 0x3c, 0x3b, 0xbf, 0x0b, 0x22, 0xc2, - 0xd7, 0x21, 0xf3, 0xcf, 0x53, 0xd3, 0xdd, 0x98, 0xee, 0xea, 0xc5, 0xd6, 0x67, 0xc7, 0x9c, 0x09, - 0x86, 0x8e, 0x52, 0x8a, 0xbd, 0x45, 0x57, 0x2f, 0x4e, 0x3e, 0xf5, 0x19, 0xf3, 0x43, 0x7a, 0x2e, - 0x09, 0x77, 0xcb, 0x5f, 0xce, 0xbd, 0x25, 0x27, 0x22, 0x60, 0x91, 0x0a, 0x39, 0x39, 0xbb, 0xef, - 0x17, 0xc1, 0x82, 0x26, 0x82, 0x2c, 0x62, 0x45, 0xb0, 0xde, 0xeb, 0x50, 0xef, 0xf3, 0x78, 0x36, - 0x64, 0xbe, 0x13, 0x09, 0xbe, 0x46, 0xdf, 0x40, 0x75, 0xc3, 0x31, 0xb5, 0xa6, 0xd6, 0xaa, 0xb5, - 0x4f, 0x6c, 0xa5, 0x62, 0xe7, 0x2a, 0xf6, 0x34, 0x67, 0xe0, 0x2d, 0x19, 0x3d, 0x03, 0x7d, 0x46, - 0xc2, 0xd0, 0x0d, 0x3c, 0x73, 0xaf, 0xa9, 0xb5, 0xca, 0xb8, 0x92, 0x9a, 0x03, 0x0f, 0xbd, 0x84, - 0x67, 0x09, 0xfd, 0x6d, 0x49, 0xa3, 0x19, 0x75, 0x03, 0xcf, 0xfd, 0x3d, 0x10, 0xf3, 0x20, 0x72, - 0x53, 0xa7, 0x59, 0x92, 0xc4, 0xe3, 0xdc, 0x3d, 0xf0, 0xde, 0x49, 0x67, 0x8f, 0x84, 0x21, 0xfa, - 0x16, 0xca, 0x62, 0x1d, 0x53, 0xb3, 0xdc, 0xd4, 0x5a, 0x87, 0xed, 0x2f, 0xec, 0x9d, 0xdb, 0xdb, - 0xc5, 0x83, 0xdb, 0xce, 0x8a, 0x46, 0x62, 0xba, 0x8e, 0x29, 0x96, 0x61, 0xe8, 0x3b, 0xa8, 0x84, - 0xcc, 0xf7, 0x29, 0x37, 0xf7, 0xa5, 0xc0, 0xe7, 0xff, 0x26, 0x30, 0x94, 0x6c, 0x9c, 0x45, 0xa1, - 0xd7, 0xd0, 0x98, 0x85, 0x01, 0x8d, 0x84, 0x3b, 0xa7, 0xc4, 0xa3, 0xdc, 0xac, 0xc8, 0x62, 0x9c, - 0x3d, 0x20, 0xd3, 0x93, 0xbc, 0x6b, 0x49, 0xbb, 0x7e, 0x82, 0xeb, 0xb3, 0x82, 0x9d, 0xea, 0x24, - 0x94, 0xaf, 0x28, 0xcf, 0x75, 0xf4, 0x47, 0x75, 0x26, 0x92, 0xb7, 0xd5, 0x49, 0x0a, 0x36, 0xba, - 0x04, 0x7d, 0x41, 0x93, 0x84, 0xf8, 0xd4, 0x3c, 0xc8, 0x7f, 0xcb, 0x8e, 0xc2, 0x1b, 0xc5, 0xb8, - 0x7e, 0x82, 0x73, 0x72, 0x1a, 0x27, 0x38, 0x09, 0x42, 0xca, 0xcd, 0xea, 0xa3, 0x71, 0x53, 0xc5, - 0x48, 0xe3, 0x32, 0x32, 0xfa, 0x12, 0x8e, 0x62, 0xb2, 0x0e, 0x19, 0xf1, 0x5c, 0xc1, 0x97, 0xd1, - 0x8c, 0x08, 0xea, 0x99, 0xd0, 0xd4, 0x5a, 0x07, 0xd8, 0xc8, 0x1c, 0xd3, 0x1c, 0x47, 0x36, 0x94, - 0x63, 0x4a, 0xb9, 0x59, 0x7b, 0x34, 0x43, 0xc7, 0xf3, 0x38, 0x4d, 0x12, 0x2c, 0x79, 0xd6, 0x5f, - 0x1a, 0x54, 0x37, 0x3f, 0x0c, 0x3d, 0x05, 0xe4, 0xdc, 0x3a, 0xa3, 0xa9, 0x3b, 0xfd, 0x71, 0xec, - 0xb8, 0x6f, 0x47, 0xdf, 0x8f, 0x6e, 0xde, 0x8d, 0x8c, 0x27, 0xe8, 0x14, 0xcc, 0x02, 0xde, 0x1b, - 0x0e, 0xd2, 0xef, 0x6b, 0xa7, 0x73, 0xe5, 0x60, 0x43, 0xbb, 0xe7, 0x9d, 0x38, 0xf8, 0xd6, 0xc1, - 0xb9, 0x77, 0x0f, 0x7d, 0x02, 0xcf, 0x77, 0x63, 0xdf, 0x38, 0x93, 0x49, 0xa7, 0xef, 0x18, 0xa5, - 0x7b, 0xee, 0x2c, 0x38, 0x77, 0x97, 0x51, 0x13, 0x4e, 0x1f, 0xc8, 0xdc, 0x19, 0xbe, 0x76, 0x7b, - 0xc3, 0x9b, 0x89, 0x63, 0xec, 0x3f, 0x2c, 0x30, 0xc5, 0x9d, 0xc1, 0xd0, 0xc1, 0x46, 0x05, 0x7d, - 0x04, 0x47, 0x45, 0x81, 0xce, 0xa8, 0xe7, 0x0c, 0x0d, 0xdd, 0xea, 0x42, 0x45, 0xb5, 0x19, 0x42, - 0x70, 0x38, 0xbc, 0xe9, 0xf7, 0x1d, 0x5c, 0xb8, 0xef, 0x11, 0x34, 0x32, 0x4c, 0x65, 0x34, 0xb4, - 0x02, 0xa4, 0x52, 0x18, 0x7b, 0xdd, 0x2a, 0xe8, 0x59, 0xfd, 0xad, 0xf7, 0x1a, 0xd4, 0x8b, 0xcd, - 0x87, 0x5e, 0xc1, 0xc1, 0x82, 0x0a, 0xe2, 0x11, 0x41, 0xb2, 0xe1, 0xfd, 0xf8, 0xc1, 0x2e, 0x51, - 0x14, 0xbc, 0x21, 0xa3, 0x33, 0xa8, 0x2d, 0xa8, 0x98, 0x33, 0xcf, 0x8d, 0xc8, 0x82, 0xca, 0x01, - 0xae, 0x62, 0x50, 0xd0, 0x88, 0x2c, 0x28, 0x3a, 0x85, 0x2a, 0x59, 0x8a, 0x39, 0xe3, 0x81, 0x58, - 0xcb, 0xb1, 0xad, 0xe2, 0x2d, 0x80, 0x2e, 0x40, 0x4f, 0x17, 0x01, 0x5b, 0x0a, 0x39, 0xae, 0xb5, - 0xf6, 0xf3, 0x9d, 0x9d, 0x71, 0x95, 0x6d, 0x26, 0x9c, 0x33, 0xad, 0x3e, 0xd4, 0x8b, 0x1d, 0xff, - 0x9f, 0x0f, 0x6f, 0xfd, 0xa1, 0x81, 0x9e, 0x75, 0xf0, 0xff, 0xaa, 0x40, 0x22, 0x88, 0x58, 0x26, - 0xee, 0x8c, 0x79, 0xaa, 0x02, 0x0d, 0x0c, 0x0a, 0xea, 0x31, 0x8f, 0xa2, 0xcf, 0xe0, 0x30, 0x23, - 0xe4, 0x73, 0xa8, 0xca, 0xd0, 0x50, 0x68, 0x36, 0x7a, 0x05, 0x9a, 0x47, 0x05, 0x09, 0xc2, 0x44, - 0x56, 0xa4, 0x9e, 0xd3, 0xae, 0x14, 0x68, 0xbd, 0x04, 0x3d, 0x8f, 0x78, 0x0a, 0x95, 0x90, 0x46, - 0xbe, 0x98, 0xcb, 0x03, 0x37, 0x70, 0x66, 0x21, 0x04, 0x65, 0x79, 0x8d, 0x3d, 0x19, 0x2f, 0xbf, - 0xad, 0x2e, 0x1c, 0xe4, 0x67, 0x47, 0x97, 0xb0, 0x4f, 0xd3, 0xcd, 0x65, 0x6a, 0xcd, 0x52, 0xab, - 0xd6, 0x6e, 0x7e, 0xe0, 0x9e, 0x72, 0xc3, 0x61, 0x45, 0xb7, 0x5e, 0x41, 0xe3, 0x1f, 0x38, 0x32, - 0xa0, 0xf4, 0x2b, 0x5d, 0xcb, 0xec, 0x55, 0x9c, 0x7e, 0xa2, 0x63, 0xd8, 0x5f, 0x91, 0x70, 0x49, - 0xb3, 0xdc, 0xca, 0xb0, 0xfe, 0xd4, 0x40, 0xcf, 0xe6, 0x18, 0x5d, 0x64, 0xdb, 0x59, 0x93, 0xcb, - 0xf5, 0xec, 0xf1, 0x89, 0xb7, 0x0b, 0x3b, 0xd9, 0x04, 0x9d, 0x28, 0x34, 0xeb, 0xb0, 0xdc, 0x4c, - 0x1f, 0x8f, 0x20, 0x76, 0x63, 0xc6, 0x85, 0xac, 0x6a, 0x03, 0x57, 0x82, 0x78, 0xcc, 0xb8, 0xb0, - 0x1c, 0x28, 0xcb, 0x1d, 0x61, 0x40, 0xfd, 0xde, 0x76, 0x68, 0x40, 0x55, 0x22, 0x83, 0xf1, 0xed, - 0xd7, 0x86, 0x56, 0x34, 0x2f, 0x8d, 0xbd, 0x8d, 0xf9, 0x76, 0x34, 0xf8, 0xc1, 0x28, 0x75, 0x7f, - 0x86, 0xe3, 0x80, 0xed, 0x1e, 0xb2, 0x7b, 0xd8, 0x95, 0xd6, 0x90, 0xf9, 0xe3, 0xb4, 0x51, 0xc7, - 0xda, 0x4f, 0xed, 0xac, 0x71, 0x7d, 0x16, 0x92, 0xc8, 0xb7, 0x19, 0x57, 0x4f, 0xf3, 0x87, 0x5e, - 0xea, 0xbb, 0x8a, 0xec, 0xf2, 0x8b, 0xbf, 0x03, 0x00, 0x00, 0xff, 0xff, 0xe7, 0xf6, 0x4b, 0x50, - 0xd4, 0x07, 0x00, 0x00, -} diff --git a/vendor/google.golang.org/grpc/call.go b/vendor/google.golang.org/grpc/call.go deleted file mode 100644 index 100f05dc74..0000000000 --- a/vendor/google.golang.org/grpc/call.go +++ /dev/null @@ -1,74 +0,0 @@ -/* - * - * Copyright 2014 gRPC authors. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ - -package grpc - -import ( - "context" -) - -// Invoke sends the RPC request on the wire and returns after response is -// received. This is typically called by generated code. -// -// All errors returned by Invoke are compatible with the status package. -func (cc *ClientConn) Invoke(ctx context.Context, method string, args, reply interface{}, opts ...CallOption) error { - // allow interceptor to see all applicable call options, which means those - // configured as defaults from dial option as well as per-call options - opts = combine(cc.dopts.callOptions, opts) - - if cc.dopts.unaryInt != nil { - return cc.dopts.unaryInt(ctx, method, args, reply, cc, invoke, opts...) - } - return invoke(ctx, method, args, reply, cc, opts...) -} - -func combine(o1 []CallOption, o2 []CallOption) []CallOption { - // we don't use append because o1 could have extra capacity whose - // elements would be overwritten, which could cause inadvertent - // sharing (and race connditions) between concurrent calls - if len(o1) == 0 { - return o2 - } else if len(o2) == 0 { - return o1 - } - ret := make([]CallOption, len(o1)+len(o2)) - copy(ret, o1) - copy(ret[len(o1):], o2) - return ret -} - -// Invoke sends the RPC request on the wire and returns after response is -// received. This is typically called by generated code. -// -// DEPRECATED: Use ClientConn.Invoke instead. -func Invoke(ctx context.Context, method string, args, reply interface{}, cc *ClientConn, opts ...CallOption) error { - return cc.Invoke(ctx, method, args, reply, opts...) -} - -var unaryStreamDesc = &StreamDesc{ServerStreams: false, ClientStreams: false} - -func invoke(ctx context.Context, method string, req, reply interface{}, cc *ClientConn, opts ...CallOption) error { - cs, err := newClientStream(ctx, unaryStreamDesc, cc, method, opts...) - if err != nil { - return err - } - if err := cs.SendMsg(req); err != nil { - return err - } - return cs.RecvMsg(reply) -} diff --git a/vendor/google.golang.org/grpc/clientconn.go b/vendor/google.golang.org/grpc/clientconn.go deleted file mode 100644 index 56d0bf713d..0000000000 --- a/vendor/google.golang.org/grpc/clientconn.go +++ /dev/null @@ -1,1449 +0,0 @@ -/* - * - * Copyright 2014 gRPC authors. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ - -package grpc - -import ( - "context" - "errors" - "fmt" - "math" - "net" - "reflect" - "strings" - "sync" - "sync/atomic" - "time" - - "google.golang.org/grpc/balancer" - _ "google.golang.org/grpc/balancer/roundrobin" // To register roundrobin. - "google.golang.org/grpc/codes" - "google.golang.org/grpc/connectivity" - "google.golang.org/grpc/credentials" - "google.golang.org/grpc/grpclog" - "google.golang.org/grpc/internal/backoff" - "google.golang.org/grpc/internal/channelz" - "google.golang.org/grpc/internal/envconfig" - "google.golang.org/grpc/internal/grpcsync" - "google.golang.org/grpc/internal/transport" - "google.golang.org/grpc/keepalive" - "google.golang.org/grpc/metadata" - "google.golang.org/grpc/resolver" - _ "google.golang.org/grpc/resolver/dns" // To register dns resolver. - _ "google.golang.org/grpc/resolver/passthrough" // To register passthrough resolver. - "google.golang.org/grpc/status" -) - -const ( - // minimum time to give a connection to complete - minConnectTimeout = 20 * time.Second - // must match grpclbName in grpclb/grpclb.go - grpclbName = "grpclb" -) - -var ( - // ErrClientConnClosing indicates that the operation is illegal because - // the ClientConn is closing. - // - // Deprecated: this error should not be relied upon by users; use the status - // code of Canceled instead. - ErrClientConnClosing = status.Error(codes.Canceled, "grpc: the client connection is closing") - // errConnDrain indicates that the connection starts to be drained and does not accept any new RPCs. - errConnDrain = errors.New("grpc: the connection is drained") - // errConnClosing indicates that the connection is closing. - errConnClosing = errors.New("grpc: the connection is closing") - // errBalancerClosed indicates that the balancer is closed. - errBalancerClosed = errors.New("grpc: balancer is closed") - // We use an accessor so that minConnectTimeout can be - // atomically read and updated while testing. - getMinConnectTimeout = func() time.Duration { - return minConnectTimeout - } -) - -// The following errors are returned from Dial and DialContext -var ( - // errNoTransportSecurity indicates that there is no transport security - // being set for ClientConn. Users should either set one or explicitly - // call WithInsecure DialOption to disable security. - errNoTransportSecurity = errors.New("grpc: no transport security set (use grpc.WithInsecure() explicitly or set credentials)") - // errTransportCredsAndBundle indicates that creds bundle is used together - // with other individual Transport Credentials. - errTransportCredsAndBundle = errors.New("grpc: credentials.Bundle may not be used with individual TransportCredentials") - // errTransportCredentialsMissing indicates that users want to transmit security - // information (e.g., oauth2 token) which requires secure connection on an insecure - // connection. - errTransportCredentialsMissing = errors.New("grpc: the credentials require transport level security (use grpc.WithTransportCredentials() to set)") - // errCredentialsConflict indicates that grpc.WithTransportCredentials() - // and grpc.WithInsecure() are both called for a connection. - errCredentialsConflict = errors.New("grpc: transport credentials are set for an insecure connection (grpc.WithTransportCredentials() and grpc.WithInsecure() are both called)") -) - -const ( - defaultClientMaxReceiveMessageSize = 1024 * 1024 * 4 - defaultClientMaxSendMessageSize = math.MaxInt32 - // http2IOBufSize specifies the buffer size for sending frames. - defaultWriteBufSize = 32 * 1024 - defaultReadBufSize = 32 * 1024 -) - -// Dial creates a client connection to the given target. -func Dial(target string, opts ...DialOption) (*ClientConn, error) { - return DialContext(context.Background(), target, opts...) -} - -// DialContext creates a client connection to the given target. By default, it's -// a non-blocking dial (the function won't wait for connections to be -// established, and connecting happens in the background). To make it a blocking -// dial, use WithBlock() dial option. -// -// In the non-blocking case, the ctx does not act against the connection. It -// only controls the setup steps. -// -// In the blocking case, ctx can be used to cancel or expire the pending -// connection. Once this function returns, the cancellation and expiration of -// ctx will be noop. Users should call ClientConn.Close to terminate all the -// pending operations after this function returns. -// -// The target name syntax is defined in -// https://github.com/grpc/grpc/blob/master/doc/naming.md. -// e.g. to use dns resolver, a "dns:///" prefix should be applied to the target. -func DialContext(ctx context.Context, target string, opts ...DialOption) (conn *ClientConn, err error) { - cc := &ClientConn{ - target: target, - csMgr: &connectivityStateManager{}, - conns: make(map[*addrConn]struct{}), - dopts: defaultDialOptions(), - blockingpicker: newPickerWrapper(), - czData: new(channelzData), - firstResolveEvent: grpcsync.NewEvent(), - } - cc.retryThrottler.Store((*retryThrottler)(nil)) - cc.ctx, cc.cancel = context.WithCancel(context.Background()) - - for _, opt := range opts { - opt.apply(&cc.dopts) - } - - if channelz.IsOn() { - if cc.dopts.channelzParentID != 0 { - cc.channelzID = channelz.RegisterChannel(&channelzChannel{cc}, cc.dopts.channelzParentID, target) - channelz.AddTraceEvent(cc.channelzID, &channelz.TraceEventDesc{ - Desc: "Channel Created", - Severity: channelz.CtINFO, - Parent: &channelz.TraceEventDesc{ - Desc: fmt.Sprintf("Nested Channel(id:%d) created", cc.channelzID), - Severity: channelz.CtINFO, - }, - }) - } else { - cc.channelzID = channelz.RegisterChannel(&channelzChannel{cc}, 0, target) - channelz.AddTraceEvent(cc.channelzID, &channelz.TraceEventDesc{ - Desc: "Channel Created", - Severity: channelz.CtINFO, - }) - } - cc.csMgr.channelzID = cc.channelzID - } - - if !cc.dopts.insecure { - if cc.dopts.copts.TransportCredentials == nil && cc.dopts.copts.CredsBundle == nil { - return nil, errNoTransportSecurity - } - if cc.dopts.copts.TransportCredentials != nil && cc.dopts.copts.CredsBundle != nil { - return nil, errTransportCredsAndBundle - } - } else { - if cc.dopts.copts.TransportCredentials != nil || cc.dopts.copts.CredsBundle != nil { - return nil, errCredentialsConflict - } - for _, cd := range cc.dopts.copts.PerRPCCredentials { - if cd.RequireTransportSecurity() { - return nil, errTransportCredentialsMissing - } - } - } - - cc.mkp = cc.dopts.copts.KeepaliveParams - - if cc.dopts.copts.Dialer == nil { - cc.dopts.copts.Dialer = newProxyDialer( - func(ctx context.Context, addr string) (net.Conn, error) { - network, addr := parseDialTarget(addr) - return (&net.Dialer{}).DialContext(ctx, network, addr) - }, - ) - } - - if cc.dopts.copts.UserAgent != "" { - cc.dopts.copts.UserAgent += " " + grpcUA - } else { - cc.dopts.copts.UserAgent = grpcUA - } - - if cc.dopts.timeout > 0 { - var cancel context.CancelFunc - ctx, cancel = context.WithTimeout(ctx, cc.dopts.timeout) - defer cancel() - } - - defer func() { - select { - case <-ctx.Done(): - conn, err = nil, ctx.Err() - default: - } - - if err != nil { - cc.Close() - } - }() - - scSet := false - if cc.dopts.scChan != nil { - // Try to get an initial service config. - select { - case sc, ok := <-cc.dopts.scChan: - if ok { - cc.sc = sc - scSet = true - } - default: - } - } - if cc.dopts.bs == nil { - cc.dopts.bs = backoff.Exponential{ - MaxDelay: DefaultBackoffConfig.MaxDelay, - } - } - if cc.dopts.resolverBuilder == nil { - // Only try to parse target when resolver builder is not already set. - cc.parsedTarget = parseTarget(cc.target) - grpclog.Infof("parsed scheme: %q", cc.parsedTarget.Scheme) - cc.dopts.resolverBuilder = resolver.Get(cc.parsedTarget.Scheme) - if cc.dopts.resolverBuilder == nil { - // If resolver builder is still nil, the parse target's scheme is - // not registered. Fallback to default resolver and set Endpoint to - // the original unparsed target. - grpclog.Infof("scheme %q not registered, fallback to default scheme", cc.parsedTarget.Scheme) - cc.parsedTarget = resolver.Target{ - Scheme: resolver.GetDefaultScheme(), - Endpoint: target, - } - cc.dopts.resolverBuilder = resolver.Get(cc.parsedTarget.Scheme) - } - } else { - cc.parsedTarget = resolver.Target{Endpoint: target} - } - creds := cc.dopts.copts.TransportCredentials - if creds != nil && creds.Info().ServerName != "" { - cc.authority = creds.Info().ServerName - } else if cc.dopts.insecure && cc.dopts.authority != "" { - cc.authority = cc.dopts.authority - } else { - // Use endpoint from "scheme://authority/endpoint" as the default - // authority for ClientConn. - cc.authority = cc.parsedTarget.Endpoint - } - - if cc.dopts.scChan != nil && !scSet { - // Blocking wait for the initial service config. - select { - case sc, ok := <-cc.dopts.scChan: - if ok { - cc.sc = sc - } - case <-ctx.Done(): - return nil, ctx.Err() - } - } - if cc.dopts.scChan != nil { - go cc.scWatcher() - } - - var credsClone credentials.TransportCredentials - if creds := cc.dopts.copts.TransportCredentials; creds != nil { - credsClone = creds.Clone() - } - cc.balancerBuildOpts = balancer.BuildOptions{ - DialCreds: credsClone, - CredsBundle: cc.dopts.copts.CredsBundle, - Dialer: cc.dopts.copts.Dialer, - ChannelzParentID: cc.channelzID, - } - - // Build the resolver. - rWrapper, err := newCCResolverWrapper(cc) - if err != nil { - return nil, fmt.Errorf("failed to build resolver: %v", err) - } - - cc.mu.Lock() - cc.resolverWrapper = rWrapper - cc.mu.Unlock() - // A blocking dial blocks until the clientConn is ready. - if cc.dopts.block { - for { - s := cc.GetState() - if s == connectivity.Ready { - break - } else if cc.dopts.copts.FailOnNonTempDialError && s == connectivity.TransientFailure { - if err = cc.blockingpicker.connectionError(); err != nil { - terr, ok := err.(interface { - Temporary() bool - }) - if ok && !terr.Temporary() { - return nil, err - } - } - } - if !cc.WaitForStateChange(ctx, s) { - // ctx got timeout or canceled. - return nil, ctx.Err() - } - } - } - - return cc, nil -} - -// connectivityStateManager keeps the connectivity.State of ClientConn. -// This struct will eventually be exported so the balancers can access it. -type connectivityStateManager struct { - mu sync.Mutex - state connectivity.State - notifyChan chan struct{} - channelzID int64 -} - -// updateState updates the connectivity.State of ClientConn. -// If there's a change it notifies goroutines waiting on state change to -// happen. -func (csm *connectivityStateManager) updateState(state connectivity.State) { - csm.mu.Lock() - defer csm.mu.Unlock() - if csm.state == connectivity.Shutdown { - return - } - if csm.state == state { - return - } - csm.state = state - if channelz.IsOn() { - channelz.AddTraceEvent(csm.channelzID, &channelz.TraceEventDesc{ - Desc: fmt.Sprintf("Channel Connectivity change to %v", state), - Severity: channelz.CtINFO, - }) - } - if csm.notifyChan != nil { - // There are other goroutines waiting on this channel. - close(csm.notifyChan) - csm.notifyChan = nil - } -} - -func (csm *connectivityStateManager) getState() connectivity.State { - csm.mu.Lock() - defer csm.mu.Unlock() - return csm.state -} - -func (csm *connectivityStateManager) getNotifyChan() <-chan struct{} { - csm.mu.Lock() - defer csm.mu.Unlock() - if csm.notifyChan == nil { - csm.notifyChan = make(chan struct{}) - } - return csm.notifyChan -} - -// ClientConn represents a client connection to an RPC server. -type ClientConn struct { - ctx context.Context - cancel context.CancelFunc - - target string - parsedTarget resolver.Target - authority string - dopts dialOptions - csMgr *connectivityStateManager - - balancerBuildOpts balancer.BuildOptions - blockingpicker *pickerWrapper - - mu sync.RWMutex - resolverWrapper *ccResolverWrapper - sc ServiceConfig - scRaw string - conns map[*addrConn]struct{} - // Keepalive parameter can be updated if a GoAway is received. - mkp keepalive.ClientParameters - curBalancerName string - preBalancerName string // previous balancer name. - curAddresses []resolver.Address - balancerWrapper *ccBalancerWrapper - retryThrottler atomic.Value - - firstResolveEvent *grpcsync.Event - - channelzID int64 // channelz unique identification number - czData *channelzData -} - -// WaitForStateChange waits until the connectivity.State of ClientConn changes from sourceState or -// ctx expires. A true value is returned in former case and false in latter. -// This is an EXPERIMENTAL API. -func (cc *ClientConn) WaitForStateChange(ctx context.Context, sourceState connectivity.State) bool { - ch := cc.csMgr.getNotifyChan() - if cc.csMgr.getState() != sourceState { - return true - } - select { - case <-ctx.Done(): - return false - case <-ch: - return true - } -} - -// GetState returns the connectivity.State of ClientConn. -// This is an EXPERIMENTAL API. -func (cc *ClientConn) GetState() connectivity.State { - return cc.csMgr.getState() -} - -func (cc *ClientConn) scWatcher() { - for { - select { - case sc, ok := <-cc.dopts.scChan: - if !ok { - return - } - cc.mu.Lock() - // TODO: load balance policy runtime change is ignored. - // We may revist this decision in the future. - cc.sc = sc - cc.scRaw = "" - cc.mu.Unlock() - case <-cc.ctx.Done(): - return - } - } -} - -// waitForResolvedAddrs blocks until the resolver has provided addresses or the -// context expires. Returns nil unless the context expires first; otherwise -// returns a status error based on the context. -func (cc *ClientConn) waitForResolvedAddrs(ctx context.Context) error { - // This is on the RPC path, so we use a fast path to avoid the - // more-expensive "select" below after the resolver has returned once. - if cc.firstResolveEvent.HasFired() { - return nil - } - select { - case <-cc.firstResolveEvent.Done(): - return nil - case <-ctx.Done(): - return status.FromContextError(ctx.Err()).Err() - case <-cc.ctx.Done(): - return ErrClientConnClosing - } -} - -func (cc *ClientConn) handleResolvedAddrs(addrs []resolver.Address, err error) { - cc.mu.Lock() - defer cc.mu.Unlock() - if cc.conns == nil { - // cc was closed. - return - } - - if reflect.DeepEqual(cc.curAddresses, addrs) { - return - } - - cc.curAddresses = addrs - cc.firstResolveEvent.Fire() - - if cc.dopts.balancerBuilder == nil { - // Only look at balancer types and switch balancer if balancer dial - // option is not set. - var isGRPCLB bool - for _, a := range addrs { - if a.Type == resolver.GRPCLB { - isGRPCLB = true - break - } - } - var newBalancerName string - if isGRPCLB { - newBalancerName = grpclbName - } else { - // Address list doesn't contain grpclb address. Try to pick a - // non-grpclb balancer. - newBalancerName = cc.curBalancerName - // If current balancer is grpclb, switch to the previous one. - if newBalancerName == grpclbName { - newBalancerName = cc.preBalancerName - } - // The following could be true in two cases: - // - the first time handling resolved addresses - // (curBalancerName="") - // - the first time handling non-grpclb addresses - // (curBalancerName="grpclb", preBalancerName="") - if newBalancerName == "" { - newBalancerName = PickFirstBalancerName - } - } - cc.switchBalancer(newBalancerName) - } else if cc.balancerWrapper == nil { - // Balancer dial option was set, and this is the first time handling - // resolved addresses. Build a balancer with dopts.balancerBuilder. - cc.balancerWrapper = newCCBalancerWrapper(cc, cc.dopts.balancerBuilder, cc.balancerBuildOpts) - } - - cc.balancerWrapper.handleResolvedAddrs(addrs, nil) -} - -// switchBalancer starts the switching from current balancer to the balancer -// with the given name. -// -// It will NOT send the current address list to the new balancer. If needed, -// caller of this function should send address list to the new balancer after -// this function returns. -// -// Caller must hold cc.mu. -func (cc *ClientConn) switchBalancer(name string) { - if cc.conns == nil { - return - } - - if strings.ToLower(cc.curBalancerName) == strings.ToLower(name) { - return - } - - grpclog.Infof("ClientConn switching balancer to %q", name) - if cc.dopts.balancerBuilder != nil { - grpclog.Infoln("ignoring balancer switching: Balancer DialOption used instead") - return - } - // TODO(bar switching) change this to two steps: drain and close. - // Keep track of sc in wrapper. - if cc.balancerWrapper != nil { - cc.balancerWrapper.close() - } - - builder := balancer.Get(name) - // TODO(yuxuanli): If user send a service config that does not contain a valid balancer name, should - // we reuse previous one? - if channelz.IsOn() { - if builder == nil { - channelz.AddTraceEvent(cc.channelzID, &channelz.TraceEventDesc{ - Desc: fmt.Sprintf("Channel switches to new LB policy %q due to fallback from invalid balancer name", PickFirstBalancerName), - Severity: channelz.CtWarning, - }) - } else { - channelz.AddTraceEvent(cc.channelzID, &channelz.TraceEventDesc{ - Desc: fmt.Sprintf("Channel switches to new LB policy %q", name), - Severity: channelz.CtINFO, - }) - } - } - if builder == nil { - grpclog.Infof("failed to get balancer builder for: %v, using pick_first instead", name) - builder = newPickfirstBuilder() - } - - cc.preBalancerName = cc.curBalancerName - cc.curBalancerName = builder.Name() - cc.balancerWrapper = newCCBalancerWrapper(cc, builder, cc.balancerBuildOpts) -} - -func (cc *ClientConn) handleSubConnStateChange(sc balancer.SubConn, s connectivity.State) { - cc.mu.Lock() - if cc.conns == nil { - cc.mu.Unlock() - return - } - // TODO(bar switching) send updates to all balancer wrappers when balancer - // gracefully switching is supported. - cc.balancerWrapper.handleSubConnStateChange(sc, s) - cc.mu.Unlock() -} - -// newAddrConn creates an addrConn for addrs and adds it to cc.conns. -// -// Caller needs to make sure len(addrs) > 0. -func (cc *ClientConn) newAddrConn(addrs []resolver.Address, opts balancer.NewSubConnOptions) (*addrConn, error) { - ac := &addrConn{ - cc: cc, - addrs: addrs, - scopts: opts, - dopts: cc.dopts, - czData: new(channelzData), - resetBackoff: make(chan struct{}), - } - ac.ctx, ac.cancel = context.WithCancel(cc.ctx) - // Track ac in cc. This needs to be done before any getTransport(...) is called. - cc.mu.Lock() - if cc.conns == nil { - cc.mu.Unlock() - return nil, ErrClientConnClosing - } - if channelz.IsOn() { - ac.channelzID = channelz.RegisterSubChannel(ac, cc.channelzID, "") - channelz.AddTraceEvent(ac.channelzID, &channelz.TraceEventDesc{ - Desc: "Subchannel Created", - Severity: channelz.CtINFO, - Parent: &channelz.TraceEventDesc{ - Desc: fmt.Sprintf("Subchannel(id:%d) created", ac.channelzID), - Severity: channelz.CtINFO, - }, - }) - } - cc.conns[ac] = struct{}{} - cc.mu.Unlock() - return ac, nil -} - -// removeAddrConn removes the addrConn in the subConn from clientConn. -// It also tears down the ac with the given error. -func (cc *ClientConn) removeAddrConn(ac *addrConn, err error) { - cc.mu.Lock() - if cc.conns == nil { - cc.mu.Unlock() - return - } - delete(cc.conns, ac) - cc.mu.Unlock() - ac.tearDown(err) -} - -func (cc *ClientConn) channelzMetric() *channelz.ChannelInternalMetric { - return &channelz.ChannelInternalMetric{ - State: cc.GetState(), - Target: cc.target, - CallsStarted: atomic.LoadInt64(&cc.czData.callsStarted), - CallsSucceeded: atomic.LoadInt64(&cc.czData.callsSucceeded), - CallsFailed: atomic.LoadInt64(&cc.czData.callsFailed), - LastCallStartedTimestamp: time.Unix(0, atomic.LoadInt64(&cc.czData.lastCallStartedTime)), - } -} - -// Target returns the target string of the ClientConn. -// This is an EXPERIMENTAL API. -func (cc *ClientConn) Target() string { - return cc.target -} - -func (cc *ClientConn) incrCallsStarted() { - atomic.AddInt64(&cc.czData.callsStarted, 1) - atomic.StoreInt64(&cc.czData.lastCallStartedTime, time.Now().UnixNano()) -} - -func (cc *ClientConn) incrCallsSucceeded() { - atomic.AddInt64(&cc.czData.callsSucceeded, 1) -} - -func (cc *ClientConn) incrCallsFailed() { - atomic.AddInt64(&cc.czData.callsFailed, 1) -} - -// connect starts creating a transport. -// It does nothing if the ac is not IDLE. -// TODO(bar) Move this to the addrConn section. -func (ac *addrConn) connect() error { - ac.mu.Lock() - if ac.state == connectivity.Shutdown { - ac.mu.Unlock() - return errConnClosing - } - if ac.state != connectivity.Idle { - ac.mu.Unlock() - return nil - } - ac.updateConnectivityState(connectivity.Connecting) - ac.mu.Unlock() - - // Start a goroutine connecting to the server asynchronously. - go ac.resetTransport() - return nil -} - -// tryUpdateAddrs tries to update ac.addrs with the new addresses list. -// -// It checks whether current connected address of ac is in the new addrs list. -// - If true, it updates ac.addrs and returns true. The ac will keep using -// the existing connection. -// - If false, it does nothing and returns false. -func (ac *addrConn) tryUpdateAddrs(addrs []resolver.Address) bool { - ac.mu.Lock() - defer ac.mu.Unlock() - grpclog.Infof("addrConn: tryUpdateAddrs curAddr: %v, addrs: %v", ac.curAddr, addrs) - if ac.state == connectivity.Shutdown { - ac.addrs = addrs - return true - } - - // Unless we're busy reconnecting already, let's reconnect from the top of - // the list. - if ac.state != connectivity.Ready { - return false - } - - var curAddrFound bool - for _, a := range addrs { - if reflect.DeepEqual(ac.curAddr, a) { - curAddrFound = true - break - } - } - grpclog.Infof("addrConn: tryUpdateAddrs curAddrFound: %v", curAddrFound) - if curAddrFound { - ac.addrs = addrs - } - - return curAddrFound -} - -// GetMethodConfig gets the method config of the input method. -// If there's an exact match for input method (i.e. /service/method), we return -// the corresponding MethodConfig. -// If there isn't an exact match for the input method, we look for the default config -// under the service (i.e /service/). If there is a default MethodConfig for -// the service, we return it. -// Otherwise, we return an empty MethodConfig. -func (cc *ClientConn) GetMethodConfig(method string) MethodConfig { - // TODO: Avoid the locking here. - cc.mu.RLock() - defer cc.mu.RUnlock() - m, ok := cc.sc.Methods[method] - if !ok { - i := strings.LastIndex(method, "/") - m = cc.sc.Methods[method[:i+1]] - } - return m -} - -func (cc *ClientConn) healthCheckConfig() *healthCheckConfig { - cc.mu.RLock() - defer cc.mu.RUnlock() - return cc.sc.healthCheckConfig -} - -func (cc *ClientConn) getTransport(ctx context.Context, failfast bool, method string) (transport.ClientTransport, func(balancer.DoneInfo), error) { - hdr, _ := metadata.FromOutgoingContext(ctx) - t, done, err := cc.blockingpicker.pick(ctx, failfast, balancer.PickOptions{ - FullMethodName: method, - Header: hdr, - }) - if err != nil { - return nil, nil, toRPCErr(err) - } - return t, done, nil -} - -// handleServiceConfig parses the service config string in JSON format to Go native -// struct ServiceConfig, and store both the struct and the JSON string in ClientConn. -func (cc *ClientConn) handleServiceConfig(js string) error { - if cc.dopts.disableServiceConfig { - return nil - } - if cc.scRaw == js { - return nil - } - if channelz.IsOn() { - channelz.AddTraceEvent(cc.channelzID, &channelz.TraceEventDesc{ - // The special formatting of \"%s\" instead of %q is to provide nice printing of service config - // for human consumption. - Desc: fmt.Sprintf("Channel has a new service config \"%s\"", js), - Severity: channelz.CtINFO, - }) - } - sc, err := parseServiceConfig(js) - if err != nil { - return err - } - cc.mu.Lock() - // Check if the ClientConn is already closed. Some fields (e.g. - // balancerWrapper) are set to nil when closing the ClientConn, and could - // cause nil pointer panic if we don't have this check. - if cc.conns == nil { - cc.mu.Unlock() - return nil - } - cc.scRaw = js - cc.sc = sc - - if sc.retryThrottling != nil { - newThrottler := &retryThrottler{ - tokens: sc.retryThrottling.MaxTokens, - max: sc.retryThrottling.MaxTokens, - thresh: sc.retryThrottling.MaxTokens / 2, - ratio: sc.retryThrottling.TokenRatio, - } - cc.retryThrottler.Store(newThrottler) - } else { - cc.retryThrottler.Store((*retryThrottler)(nil)) - } - - if sc.LB != nil && *sc.LB != grpclbName { // "grpclb" is not a valid balancer option in service config. - if cc.curBalancerName == grpclbName { - // If current balancer is grpclb, there's at least one grpclb - // balancer address in the resolved list. Don't switch the balancer, - // but change the previous balancer name, so if a new resolved - // address list doesn't contain grpclb address, balancer will be - // switched to *sc.LB. - cc.preBalancerName = *sc.LB - } else { - cc.switchBalancer(*sc.LB) - cc.balancerWrapper.handleResolvedAddrs(cc.curAddresses, nil) - } - } - - cc.mu.Unlock() - return nil -} - -func (cc *ClientConn) resolveNow(o resolver.ResolveNowOption) { - cc.mu.RLock() - r := cc.resolverWrapper - cc.mu.RUnlock() - if r == nil { - return - } - go r.resolveNow(o) -} - -// ResetConnectBackoff wakes up all subchannels in transient failure and causes -// them to attempt another connection immediately. It also resets the backoff -// times used for subsequent attempts regardless of the current state. -// -// In general, this function should not be used. Typical service or network -// outages result in a reasonable client reconnection strategy by default. -// However, if a previously unavailable network becomes available, this may be -// used to trigger an immediate reconnect. -// -// This API is EXPERIMENTAL. -func (cc *ClientConn) ResetConnectBackoff() { - cc.mu.Lock() - defer cc.mu.Unlock() - for ac := range cc.conns { - ac.resetConnectBackoff() - } -} - -// Close tears down the ClientConn and all underlying connections. -func (cc *ClientConn) Close() error { - defer cc.cancel() - - cc.mu.Lock() - if cc.conns == nil { - cc.mu.Unlock() - return ErrClientConnClosing - } - conns := cc.conns - cc.conns = nil - cc.csMgr.updateState(connectivity.Shutdown) - - rWrapper := cc.resolverWrapper - cc.resolverWrapper = nil - bWrapper := cc.balancerWrapper - cc.balancerWrapper = nil - cc.mu.Unlock() - - cc.blockingpicker.close() - - if rWrapper != nil { - rWrapper.close() - } - if bWrapper != nil { - bWrapper.close() - } - - for ac := range conns { - ac.tearDown(ErrClientConnClosing) - } - if channelz.IsOn() { - ted := &channelz.TraceEventDesc{ - Desc: "Channel Deleted", - Severity: channelz.CtINFO, - } - if cc.dopts.channelzParentID != 0 { - ted.Parent = &channelz.TraceEventDesc{ - Desc: fmt.Sprintf("Nested channel(id:%d) deleted", cc.channelzID), - Severity: channelz.CtINFO, - } - } - channelz.AddTraceEvent(cc.channelzID, ted) - // TraceEvent needs to be called before RemoveEntry, as TraceEvent may add trace reference to - // the entity beng deleted, and thus prevent it from being deleted right away. - channelz.RemoveEntry(cc.channelzID) - } - return nil -} - -// addrConn is a network connection to a given address. -type addrConn struct { - ctx context.Context - cancel context.CancelFunc - - cc *ClientConn - dopts dialOptions - acbw balancer.SubConn - scopts balancer.NewSubConnOptions - - // transport is set when there's a viable transport (note: ac state may not be READY as LB channel - // health checking may require server to report healthy to set ac to READY), and is reset - // to nil when the current transport should no longer be used to create a stream (e.g. after GoAway - // is received, transport is closed, ac has been torn down). - transport transport.ClientTransport // The current transport. - - mu sync.Mutex - curAddr resolver.Address // The current address. - addrs []resolver.Address // All addresses that the resolver resolved to. - - // Use updateConnectivityState for updating addrConn's connectivity state. - state connectivity.State - - tearDownErr error // The reason this addrConn is torn down. - - backoffIdx int // Needs to be stateful for resetConnectBackoff. - resetBackoff chan struct{} - - channelzID int64 // channelz unique identification number. - czData *channelzData - healthCheckEnabled bool -} - -// Note: this requires a lock on ac.mu. -func (ac *addrConn) updateConnectivityState(s connectivity.State) { - if ac.state == s { - return - } - - updateMsg := fmt.Sprintf("Subchannel Connectivity change to %v", s) - grpclog.Infof(updateMsg) - ac.state = s - if channelz.IsOn() { - channelz.AddTraceEvent(ac.channelzID, &channelz.TraceEventDesc{ - Desc: updateMsg, - Severity: channelz.CtINFO, - }) - } - ac.cc.handleSubConnStateChange(ac.acbw, s) -} - -// adjustParams updates parameters used to create transports upon -// receiving a GoAway. -func (ac *addrConn) adjustParams(r transport.GoAwayReason) { - switch r { - case transport.GoAwayTooManyPings: - v := 2 * ac.dopts.copts.KeepaliveParams.Time - ac.cc.mu.Lock() - if v > ac.cc.mkp.Time { - ac.cc.mkp.Time = v - } - ac.cc.mu.Unlock() - } -} - -func (ac *addrConn) resetTransport() { - for i := 0; ; i++ { - tryNextAddrFromStart := grpcsync.NewEvent() - - ac.mu.Lock() - if i > 0 { - ac.cc.resolveNow(resolver.ResolveNowOption{}) - } - addrs := ac.addrs - backoffFor := ac.dopts.bs.Backoff(ac.backoffIdx) - ac.mu.Unlock() - - addrLoop: - for _, addr := range addrs { - ac.mu.Lock() - - if ac.state == connectivity.Shutdown { - ac.mu.Unlock() - return - } - ac.updateConnectivityState(connectivity.Connecting) - ac.transport = nil - ac.mu.Unlock() - - // This will be the duration that dial gets to finish. - dialDuration := getMinConnectTimeout() - if dialDuration < backoffFor { - // Give dial more time as we keep failing to connect. - dialDuration = backoffFor - } - connectDeadline := time.Now().Add(dialDuration) - - ac.mu.Lock() - ac.cc.mu.RLock() - ac.dopts.copts.KeepaliveParams = ac.cc.mkp - ac.cc.mu.RUnlock() - - if ac.state == connectivity.Shutdown { - ac.mu.Unlock() - return - } - - copts := ac.dopts.copts - if ac.scopts.CredsBundle != nil { - copts.CredsBundle = ac.scopts.CredsBundle - } - hctx, hcancel := context.WithCancel(ac.ctx) - defer hcancel() - ac.mu.Unlock() - - if channelz.IsOn() { - channelz.AddTraceEvent(ac.channelzID, &channelz.TraceEventDesc{ - Desc: fmt.Sprintf("Subchannel picks a new address %q to connect", addr.Addr), - Severity: channelz.CtINFO, - }) - } - - reconnect := grpcsync.NewEvent() - prefaceReceived := make(chan struct{}) - newTr, err := ac.createTransport(addr, copts, connectDeadline, reconnect, prefaceReceived) - if err == nil { - ac.mu.Lock() - ac.curAddr = addr - ac.transport = newTr - ac.mu.Unlock() - - healthCheckConfig := ac.cc.healthCheckConfig() - // LB channel health checking is only enabled when all the four requirements below are met: - // 1. it is not disabled by the user with the WithDisableHealthCheck DialOption, - // 2. the internal.HealthCheckFunc is set by importing the grpc/healthcheck package, - // 3. a service config with non-empty healthCheckConfig field is provided, - // 4. the current load balancer allows it. - healthcheckManagingState := false - if !ac.cc.dopts.disableHealthCheck && healthCheckConfig != nil && ac.scopts.HealthCheckEnabled { - if ac.cc.dopts.healthCheckFunc == nil { - // TODO: add a link to the health check doc in the error message. - grpclog.Error("the client side LB channel health check function has not been set.") - } else { - // TODO(deklerk) refactor to just return transport - go ac.startHealthCheck(hctx, newTr, addr, healthCheckConfig.ServiceName) - healthcheckManagingState = true - } - } - if !healthcheckManagingState { - ac.mu.Lock() - ac.updateConnectivityState(connectivity.Ready) - ac.mu.Unlock() - } - } else { - hcancel() - if err == errConnClosing { - return - } - - if tryNextAddrFromStart.HasFired() { - break addrLoop - } - continue - } - - ac.mu.Lock() - reqHandshake := ac.dopts.reqHandshake - ac.mu.Unlock() - - <-reconnect.Done() - hcancel() - - if reqHandshake == envconfig.RequireHandshakeHybrid { - // In RequireHandshakeHybrid mode, we must check to see whether - // server preface has arrived yet to decide whether to start - // reconnecting at the top of the list (server preface received) - // or continue with the next addr in the list as if the - // connection were not successful (server preface not received). - select { - case <-prefaceReceived: - // We received a server preface - huzzah! We consider this - // a success and restart from the top of the addr list. - ac.mu.Lock() - ac.backoffIdx = 0 - ac.mu.Unlock() - break addrLoop - default: - // Despite having set state to READY, in hybrid mode we - // consider this a failure and continue connecting at the - // next addr in the list. - ac.mu.Lock() - if ac.state == connectivity.Shutdown { - ac.mu.Unlock() - return - } - - ac.updateConnectivityState(connectivity.TransientFailure) - ac.mu.Unlock() - - if tryNextAddrFromStart.HasFired() { - break addrLoop - } - } - } else { - // In RequireHandshakeOn mode, we would have already waited for - // the server preface, so we consider this a success and restart - // from the top of the addr list. In RequireHandshakeOff mode, - // we don't care to wait for the server preface before - // considering this a success, so we also restart from the top - // of the addr list. - ac.mu.Lock() - ac.backoffIdx = 0 - ac.mu.Unlock() - break addrLoop - } - } - - // After exhausting all addresses, or after need to reconnect after a - // READY, the addrConn enters TRANSIENT_FAILURE. - ac.mu.Lock() - if ac.state == connectivity.Shutdown { - ac.mu.Unlock() - return - } - ac.updateConnectivityState(connectivity.TransientFailure) - - // Backoff. - b := ac.resetBackoff - timer := time.NewTimer(backoffFor) - acctx := ac.ctx - ac.mu.Unlock() - - select { - case <-timer.C: - ac.mu.Lock() - ac.backoffIdx++ - ac.mu.Unlock() - case <-b: - timer.Stop() - case <-acctx.Done(): - timer.Stop() - return - } - } -} - -// createTransport creates a connection to one of the backends in addrs. It -// sets ac.transport in the success case, or it returns an error if it was -// unable to successfully create a transport. -// -// If waitForHandshake is enabled, it blocks until server preface arrives. -func (ac *addrConn) createTransport(addr resolver.Address, copts transport.ConnectOptions, connectDeadline time.Time, reconnect *grpcsync.Event, prefaceReceived chan struct{}) (transport.ClientTransport, error) { - onCloseCalled := make(chan struct{}) - - target := transport.TargetInfo{ - Addr: addr.Addr, - Metadata: addr.Metadata, - Authority: ac.cc.authority, - } - - prefaceTimer := time.NewTimer(connectDeadline.Sub(time.Now())) - - onGoAway := func(r transport.GoAwayReason) { - ac.mu.Lock() - ac.adjustParams(r) - ac.mu.Unlock() - reconnect.Fire() - } - - onClose := func() { - close(onCloseCalled) - prefaceTimer.Stop() - reconnect.Fire() - } - - onPrefaceReceipt := func() { - close(prefaceReceived) - prefaceTimer.Stop() - } - - connectCtx, cancel := context.WithDeadline(ac.ctx, connectDeadline) - defer cancel() - if channelz.IsOn() { - copts.ChannelzParentID = ac.channelzID - } - - newTr, err := transport.NewClientTransport(connectCtx, ac.cc.ctx, target, copts, onPrefaceReceipt, onGoAway, onClose) - - if err == nil { - if ac.dopts.reqHandshake == envconfig.RequireHandshakeOn { - select { - case <-prefaceTimer.C: - // We didn't get the preface in time. - newTr.Close() - err = errors.New("timed out waiting for server handshake") - case <-prefaceReceived: - // We got the preface - huzzah! things are good. - case <-onCloseCalled: - // The transport has already closed - noop. - return nil, errors.New("connection closed") - } - } else if ac.dopts.reqHandshake == envconfig.RequireHandshakeHybrid { - go func() { - select { - case <-prefaceTimer.C: - // We didn't get the preface in time. - newTr.Close() - case <-prefaceReceived: - // We got the preface just in the nick of time - huzzah! - case <-onCloseCalled: - // The transport has already closed - noop. - } - }() - } - } - - if err != nil { - // newTr is either nil, or closed. - ac.cc.blockingpicker.updateConnectionError(err) - ac.mu.Lock() - if ac.state == connectivity.Shutdown { - // ac.tearDown(...) has been invoked. - ac.mu.Unlock() - - return nil, errConnClosing - } - ac.mu.Unlock() - grpclog.Warningf("grpc: addrConn.createTransport failed to connect to %v. Err :%v. Reconnecting...", addr, err) - return nil, err - } - - // Now there is a viable transport to be use, so set ac.transport to reflect the new viable transport. - ac.mu.Lock() - if ac.state == connectivity.Shutdown { - ac.mu.Unlock() - newTr.Close() - return nil, errConnClosing - } - ac.mu.Unlock() - - // Now there is a viable transport to be use, so set ac.transport to reflect the new viable transport. - ac.mu.Lock() - if ac.state == connectivity.Shutdown { - ac.mu.Unlock() - newTr.Close() - return nil, errConnClosing - } - ac.mu.Unlock() - - return newTr, nil -} - -func (ac *addrConn) startHealthCheck(ctx context.Context, newTr transport.ClientTransport, addr resolver.Address, serviceName string) { - // Set up the health check helper functions - newStream := func() (interface{}, error) { - return ac.newClientStream(ctx, &StreamDesc{ServerStreams: true}, "/grpc.health.v1.Health/Watch", newTr) - } - firstReady := true - reportHealth := func(ok bool) { - ac.mu.Lock() - defer ac.mu.Unlock() - if ac.transport != newTr { - return - } - if ok { - if firstReady { - firstReady = false - ac.curAddr = addr - } - ac.updateConnectivityState(connectivity.Ready) - } else { - ac.updateConnectivityState(connectivity.TransientFailure) - } - } - err := ac.cc.dopts.healthCheckFunc(ctx, newStream, reportHealth, serviceName) - if err != nil { - if status.Code(err) == codes.Unimplemented { - if channelz.IsOn() { - channelz.AddTraceEvent(ac.channelzID, &channelz.TraceEventDesc{ - Desc: "Subchannel health check is unimplemented at server side, thus health check is disabled", - Severity: channelz.CtError, - }) - } - grpclog.Error("Subchannel health check is unimplemented at server side, thus health check is disabled") - } else { - grpclog.Errorf("HealthCheckFunc exits with unexpected error %v", err) - } - } -} - -func (ac *addrConn) resetConnectBackoff() { - ac.mu.Lock() - close(ac.resetBackoff) - ac.backoffIdx = 0 - ac.resetBackoff = make(chan struct{}) - ac.mu.Unlock() -} - -// getReadyTransport returns the transport if ac's state is READY. -// Otherwise it returns nil, false. -// If ac's state is IDLE, it will trigger ac to connect. -func (ac *addrConn) getReadyTransport() (transport.ClientTransport, bool) { - ac.mu.Lock() - if ac.state == connectivity.Ready && ac.transport != nil { - t := ac.transport - ac.mu.Unlock() - return t, true - } - var idle bool - if ac.state == connectivity.Idle { - idle = true - } - ac.mu.Unlock() - // Trigger idle ac to connect. - if idle { - ac.connect() - } - return nil, false -} - -// tearDown starts to tear down the addrConn. -// TODO(zhaoq): Make this synchronous to avoid unbounded memory consumption in -// some edge cases (e.g., the caller opens and closes many addrConn's in a -// tight loop. -// tearDown doesn't remove ac from ac.cc.conns. -func (ac *addrConn) tearDown(err error) { - ac.mu.Lock() - if ac.state == connectivity.Shutdown { - ac.mu.Unlock() - return - } - curTr := ac.transport - ac.transport = nil - // We have to set the state to Shutdown before anything else to prevent races - // between setting the state and logic that waits on context cancelation / etc. - ac.updateConnectivityState(connectivity.Shutdown) - ac.cancel() - ac.tearDownErr = err - ac.curAddr = resolver.Address{} - if err == errConnDrain && curTr != nil { - // GracefulClose(...) may be executed multiple times when - // i) receiving multiple GoAway frames from the server; or - // ii) there are concurrent name resolver/Balancer triggered - // address removal and GoAway. - // We have to unlock and re-lock here because GracefulClose => Close => onClose, which requires locking ac.mu. - ac.mu.Unlock() - curTr.GracefulClose() - ac.mu.Lock() - } - if channelz.IsOn() { - channelz.AddTraceEvent(ac.channelzID, &channelz.TraceEventDesc{ - Desc: "Subchannel Deleted", - Severity: channelz.CtINFO, - Parent: &channelz.TraceEventDesc{ - Desc: fmt.Sprintf("Subchanel(id:%d) deleted", ac.channelzID), - Severity: channelz.CtINFO, - }, - }) - // TraceEvent needs to be called before RemoveEntry, as TraceEvent may add trace reference to - // the entity beng deleted, and thus prevent it from being deleted right away. - channelz.RemoveEntry(ac.channelzID) - } - ac.mu.Unlock() -} - -func (ac *addrConn) getState() connectivity.State { - ac.mu.Lock() - defer ac.mu.Unlock() - return ac.state -} - -func (ac *addrConn) ChannelzMetric() *channelz.ChannelInternalMetric { - ac.mu.Lock() - addr := ac.curAddr.Addr - ac.mu.Unlock() - return &channelz.ChannelInternalMetric{ - State: ac.getState(), - Target: addr, - CallsStarted: atomic.LoadInt64(&ac.czData.callsStarted), - CallsSucceeded: atomic.LoadInt64(&ac.czData.callsSucceeded), - CallsFailed: atomic.LoadInt64(&ac.czData.callsFailed), - LastCallStartedTimestamp: time.Unix(0, atomic.LoadInt64(&ac.czData.lastCallStartedTime)), - } -} - -func (ac *addrConn) incrCallsStarted() { - atomic.AddInt64(&ac.czData.callsStarted, 1) - atomic.StoreInt64(&ac.czData.lastCallStartedTime, time.Now().UnixNano()) -} - -func (ac *addrConn) incrCallsSucceeded() { - atomic.AddInt64(&ac.czData.callsSucceeded, 1) -} - -func (ac *addrConn) incrCallsFailed() { - atomic.AddInt64(&ac.czData.callsFailed, 1) -} - -type retryThrottler struct { - max float64 - thresh float64 - ratio float64 - - mu sync.Mutex - tokens float64 // TODO(dfawley): replace with atomic and remove lock. -} - -// throttle subtracts a retry token from the pool and returns whether a retry -// should be throttled (disallowed) based upon the retry throttling policy in -// the service config. -func (rt *retryThrottler) throttle() bool { - if rt == nil { - return false - } - rt.mu.Lock() - defer rt.mu.Unlock() - rt.tokens-- - if rt.tokens < 0 { - rt.tokens = 0 - } - return rt.tokens <= rt.thresh -} - -func (rt *retryThrottler) successfulRPC() { - if rt == nil { - return - } - rt.mu.Lock() - defer rt.mu.Unlock() - rt.tokens += rt.ratio - if rt.tokens > rt.max { - rt.tokens = rt.max - } -} - -type channelzChannel struct { - cc *ClientConn -} - -func (c *channelzChannel) ChannelzMetric() *channelz.ChannelInternalMetric { - return c.cc.channelzMetric() -} - -// ErrClientConnTimeout indicates that the ClientConn cannot establish the -// underlying connections within the specified timeout. -// -// Deprecated: This error is never returned by grpc and should not be -// referenced by users. -var ErrClientConnTimeout = errors.New("grpc: timed out when dialing") diff --git a/vendor/google.golang.org/grpc/codec.go b/vendor/google.golang.org/grpc/codec.go deleted file mode 100644 index 1297765478..0000000000 --- a/vendor/google.golang.org/grpc/codec.go +++ /dev/null @@ -1,50 +0,0 @@ -/* - * - * Copyright 2014 gRPC authors. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ - -package grpc - -import ( - "google.golang.org/grpc/encoding" - _ "google.golang.org/grpc/encoding/proto" // to register the Codec for "proto" -) - -// baseCodec contains the functionality of both Codec and encoding.Codec, but -// omits the name/string, which vary between the two and are not needed for -// anything besides the registry in the encoding package. -type baseCodec interface { - Marshal(v interface{}) ([]byte, error) - Unmarshal(data []byte, v interface{}) error -} - -var _ baseCodec = Codec(nil) -var _ baseCodec = encoding.Codec(nil) - -// Codec defines the interface gRPC uses to encode and decode messages. -// Note that implementations of this interface must be thread safe; -// a Codec's methods can be called from concurrent goroutines. -// -// Deprecated: use encoding.Codec instead. -type Codec interface { - // Marshal returns the wire format of v. - Marshal(v interface{}) ([]byte, error) - // Unmarshal parses the wire format into v. - Unmarshal(data []byte, v interface{}) error - // String returns the name of the Codec implementation. This is unused by - // gRPC. - String() string -} diff --git a/vendor/google.golang.org/grpc/codegen.sh b/vendor/google.golang.org/grpc/codegen.sh deleted file mode 100644 index 4cdc6ba7c0..0000000000 --- a/vendor/google.golang.org/grpc/codegen.sh +++ /dev/null @@ -1,17 +0,0 @@ -#!/usr/bin/env bash - -# This script serves as an example to demonstrate how to generate the gRPC-Go -# interface and the related messages from .proto file. -# -# It assumes the installation of i) Google proto buffer compiler at -# https://github.com/google/protobuf (after v2.6.1) and ii) the Go codegen -# plugin at https://github.com/golang/protobuf (after 2015-02-20). If you have -# not, please install them first. -# -# We recommend running this script at $GOPATH/src. -# -# If this is not what you need, feel free to make your own scripts. Again, this -# script is for demonstration purpose. -# -proto=$1 -protoc --go_out=plugins=grpc:. $proto diff --git a/vendor/google.golang.org/grpc/codes/code_string.go b/vendor/google.golang.org/grpc/codes/code_string.go deleted file mode 100644 index 0b206a5782..0000000000 --- a/vendor/google.golang.org/grpc/codes/code_string.go +++ /dev/null @@ -1,62 +0,0 @@ -/* - * - * Copyright 2017 gRPC authors. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ - -package codes - -import "strconv" - -func (c Code) String() string { - switch c { - case OK: - return "OK" - case Canceled: - return "Canceled" - case Unknown: - return "Unknown" - case InvalidArgument: - return "InvalidArgument" - case DeadlineExceeded: - return "DeadlineExceeded" - case NotFound: - return "NotFound" - case AlreadyExists: - return "AlreadyExists" - case PermissionDenied: - return "PermissionDenied" - case ResourceExhausted: - return "ResourceExhausted" - case FailedPrecondition: - return "FailedPrecondition" - case Aborted: - return "Aborted" - case OutOfRange: - return "OutOfRange" - case Unimplemented: - return "Unimplemented" - case Internal: - return "Internal" - case Unavailable: - return "Unavailable" - case DataLoss: - return "DataLoss" - case Unauthenticated: - return "Unauthenticated" - default: - return "Code(" + strconv.FormatInt(int64(c), 10) + ")" - } -} diff --git a/vendor/google.golang.org/grpc/codes/codes.go b/vendor/google.golang.org/grpc/codes/codes.go deleted file mode 100644 index d9b9d5782e..0000000000 --- a/vendor/google.golang.org/grpc/codes/codes.go +++ /dev/null @@ -1,197 +0,0 @@ -/* - * - * Copyright 2014 gRPC authors. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ - -// Package codes defines the canonical error codes used by gRPC. It is -// consistent across various languages. -package codes // import "google.golang.org/grpc/codes" - -import ( - "fmt" - "strconv" -) - -// A Code is an unsigned 32-bit error code as defined in the gRPC spec. -type Code uint32 - -const ( - // OK is returned on success. - OK Code = 0 - - // Canceled indicates the operation was canceled (typically by the caller). - Canceled Code = 1 - - // Unknown error. An example of where this error may be returned is - // if a Status value received from another address space belongs to - // an error-space that is not known in this address space. Also - // errors raised by APIs that do not return enough error information - // may be converted to this error. - Unknown Code = 2 - - // InvalidArgument indicates client specified an invalid argument. - // Note that this differs from FailedPrecondition. It indicates arguments - // that are problematic regardless of the state of the system - // (e.g., a malformed file name). - InvalidArgument Code = 3 - - // DeadlineExceeded means operation expired before completion. - // For operations that change the state of the system, this error may be - // returned even if the operation has completed successfully. For - // example, a successful response from a server could have been delayed - // long enough for the deadline to expire. - DeadlineExceeded Code = 4 - - // NotFound means some requested entity (e.g., file or directory) was - // not found. - NotFound Code = 5 - - // AlreadyExists means an attempt to create an entity failed because one - // already exists. - AlreadyExists Code = 6 - - // PermissionDenied indicates the caller does not have permission to - // execute the specified operation. It must not be used for rejections - // caused by exhausting some resource (use ResourceExhausted - // instead for those errors). It must not be - // used if the caller cannot be identified (use Unauthenticated - // instead for those errors). - PermissionDenied Code = 7 - - // ResourceExhausted indicates some resource has been exhausted, perhaps - // a per-user quota, or perhaps the entire file system is out of space. - ResourceExhausted Code = 8 - - // FailedPrecondition indicates operation was rejected because the - // system is not in a state required for the operation's execution. - // For example, directory to be deleted may be non-empty, an rmdir - // operation is applied to a non-directory, etc. - // - // A litmus test that may help a service implementor in deciding - // between FailedPrecondition, Aborted, and Unavailable: - // (a) Use Unavailable if the client can retry just the failing call. - // (b) Use Aborted if the client should retry at a higher-level - // (e.g., restarting a read-modify-write sequence). - // (c) Use FailedPrecondition if the client should not retry until - // the system state has been explicitly fixed. E.g., if an "rmdir" - // fails because the directory is non-empty, FailedPrecondition - // should be returned since the client should not retry unless - // they have first fixed up the directory by deleting files from it. - // (d) Use FailedPrecondition if the client performs conditional - // REST Get/Update/Delete on a resource and the resource on the - // server does not match the condition. E.g., conflicting - // read-modify-write on the same resource. - FailedPrecondition Code = 9 - - // Aborted indicates the operation was aborted, typically due to a - // concurrency issue like sequencer check failures, transaction aborts, - // etc. - // - // See litmus test above for deciding between FailedPrecondition, - // Aborted, and Unavailable. - Aborted Code = 10 - - // OutOfRange means operation was attempted past the valid range. - // E.g., seeking or reading past end of file. - // - // Unlike InvalidArgument, this error indicates a problem that may - // be fixed if the system state changes. For example, a 32-bit file - // system will generate InvalidArgument if asked to read at an - // offset that is not in the range [0,2^32-1], but it will generate - // OutOfRange if asked to read from an offset past the current - // file size. - // - // There is a fair bit of overlap between FailedPrecondition and - // OutOfRange. We recommend using OutOfRange (the more specific - // error) when it applies so that callers who are iterating through - // a space can easily look for an OutOfRange error to detect when - // they are done. - OutOfRange Code = 11 - - // Unimplemented indicates operation is not implemented or not - // supported/enabled in this service. - Unimplemented Code = 12 - - // Internal errors. Means some invariants expected by underlying - // system has been broken. If you see one of these errors, - // something is very broken. - Internal Code = 13 - - // Unavailable indicates the service is currently unavailable. - // This is a most likely a transient condition and may be corrected - // by retrying with a backoff. - // - // See litmus test above for deciding between FailedPrecondition, - // Aborted, and Unavailable. - Unavailable Code = 14 - - // DataLoss indicates unrecoverable data loss or corruption. - DataLoss Code = 15 - - // Unauthenticated indicates the request does not have valid - // authentication credentials for the operation. - Unauthenticated Code = 16 - - _maxCode = 17 -) - -var strToCode = map[string]Code{ - `"OK"`: OK, - `"CANCELLED"`:/* [sic] */ Canceled, - `"UNKNOWN"`: Unknown, - `"INVALID_ARGUMENT"`: InvalidArgument, - `"DEADLINE_EXCEEDED"`: DeadlineExceeded, - `"NOT_FOUND"`: NotFound, - `"ALREADY_EXISTS"`: AlreadyExists, - `"PERMISSION_DENIED"`: PermissionDenied, - `"RESOURCE_EXHAUSTED"`: ResourceExhausted, - `"FAILED_PRECONDITION"`: FailedPrecondition, - `"ABORTED"`: Aborted, - `"OUT_OF_RANGE"`: OutOfRange, - `"UNIMPLEMENTED"`: Unimplemented, - `"INTERNAL"`: Internal, - `"UNAVAILABLE"`: Unavailable, - `"DATA_LOSS"`: DataLoss, - `"UNAUTHENTICATED"`: Unauthenticated, -} - -// UnmarshalJSON unmarshals b into the Code. -func (c *Code) UnmarshalJSON(b []byte) error { - // From json.Unmarshaler: By convention, to approximate the behavior of - // Unmarshal itself, Unmarshalers implement UnmarshalJSON([]byte("null")) as - // a no-op. - if string(b) == "null" { - return nil - } - if c == nil { - return fmt.Errorf("nil receiver passed to UnmarshalJSON") - } - - if ci, err := strconv.ParseUint(string(b), 10, 32); err == nil { - if ci >= _maxCode { - return fmt.Errorf("invalid code: %q", ci) - } - - *c = Code(ci) - return nil - } - - if jc, ok := strToCode[string(b)]; ok { - *c = jc - return nil - } - return fmt.Errorf("invalid code: %q", string(b)) -} diff --git a/vendor/google.golang.org/grpc/connectivity/connectivity.go b/vendor/google.golang.org/grpc/connectivity/connectivity.go deleted file mode 100644 index b1d7dbc547..0000000000 --- a/vendor/google.golang.org/grpc/connectivity/connectivity.go +++ /dev/null @@ -1,73 +0,0 @@ -/* - * - * Copyright 2017 gRPC authors. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ - -// Package connectivity defines connectivity semantics. -// For details, see https://github.com/grpc/grpc/blob/master/doc/connectivity-semantics-and-api.md. -// All APIs in this package are experimental. -package connectivity - -import ( - "context" - - "google.golang.org/grpc/grpclog" -) - -// State indicates the state of connectivity. -// It can be the state of a ClientConn or SubConn. -type State int - -func (s State) String() string { - switch s { - case Idle: - return "IDLE" - case Connecting: - return "CONNECTING" - case Ready: - return "READY" - case TransientFailure: - return "TRANSIENT_FAILURE" - case Shutdown: - return "SHUTDOWN" - default: - grpclog.Errorf("unknown connectivity state: %d", s) - return "Invalid-State" - } -} - -const ( - // Idle indicates the ClientConn is idle. - Idle State = iota - // Connecting indicates the ClienConn is connecting. - Connecting - // Ready indicates the ClientConn is ready for work. - Ready - // TransientFailure indicates the ClientConn has seen a failure but expects to recover. - TransientFailure - // Shutdown indicates the ClientConn has started shutting down. - Shutdown -) - -// Reporter reports the connectivity states. -type Reporter interface { - // CurrentState returns the current state of the reporter. - CurrentState() State - // WaitForStateChange blocks until the reporter's state is different from the given state, - // and returns true. - // It returns false if <-ctx.Done() can proceed (ctx got timeout or got canceled). - WaitForStateChange(context.Context, State) bool -} diff --git a/vendor/google.golang.org/grpc/credentials/credentials.go b/vendor/google.golang.org/grpc/credentials/credentials.go deleted file mode 100644 index a851560456..0000000000 --- a/vendor/google.golang.org/grpc/credentials/credentials.go +++ /dev/null @@ -1,328 +0,0 @@ -/* - * - * Copyright 2014 gRPC authors. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ - -// Package credentials implements various credentials supported by gRPC library, -// which encapsulate all the state needed by a client to authenticate with a -// server and make various assertions, e.g., about the client's identity, role, -// or whether it is authorized to make a particular call. -package credentials // import "google.golang.org/grpc/credentials" - -import ( - "context" - "crypto/tls" - "crypto/x509" - "errors" - "fmt" - "io/ioutil" - "net" - "strings" - - "github.com/golang/protobuf/proto" - "google.golang.org/grpc/credentials/internal" -) - -// alpnProtoStr are the specified application level protocols for gRPC. -var alpnProtoStr = []string{"h2"} - -// PerRPCCredentials defines the common interface for the credentials which need to -// attach security information to every RPC (e.g., oauth2). -type PerRPCCredentials interface { - // GetRequestMetadata gets the current request metadata, refreshing - // tokens if required. This should be called by the transport layer on - // each request, and the data should be populated in headers or other - // context. If a status code is returned, it will be used as the status - // for the RPC. uri is the URI of the entry point for the request. - // When supported by the underlying implementation, ctx can be used for - // timeout and cancellation. - // TODO(zhaoq): Define the set of the qualified keys instead of leaving - // it as an arbitrary string. - GetRequestMetadata(ctx context.Context, uri ...string) (map[string]string, error) - // RequireTransportSecurity indicates whether the credentials requires - // transport security. - RequireTransportSecurity() bool -} - -// ProtocolInfo provides information regarding the gRPC wire protocol version, -// security protocol, security protocol version in use, server name, etc. -type ProtocolInfo struct { - // ProtocolVersion is the gRPC wire protocol version. - ProtocolVersion string - // SecurityProtocol is the security protocol in use. - SecurityProtocol string - // SecurityVersion is the security protocol version. - SecurityVersion string - // ServerName is the user-configured server name. - ServerName string -} - -// AuthInfo defines the common interface for the auth information the users are interested in. -type AuthInfo interface { - AuthType() string -} - -// ErrConnDispatched indicates that rawConn has been dispatched out of gRPC -// and the caller should not close rawConn. -var ErrConnDispatched = errors.New("credentials: rawConn is dispatched out of gRPC") - -// TransportCredentials defines the common interface for all the live gRPC wire -// protocols and supported transport security protocols (e.g., TLS, SSL). -type TransportCredentials interface { - // ClientHandshake does the authentication handshake specified by the corresponding - // authentication protocol on rawConn for clients. It returns the authenticated - // connection and the corresponding auth information about the connection. - // Implementations must use the provided context to implement timely cancellation. - // gRPC will try to reconnect if the error returned is a temporary error - // (io.EOF, context.DeadlineExceeded or err.Temporary() == true). - // If the returned error is a wrapper error, implementations should make sure that - // the error implements Temporary() to have the correct retry behaviors. - // - // If the returned net.Conn is closed, it MUST close the net.Conn provided. - ClientHandshake(context.Context, string, net.Conn) (net.Conn, AuthInfo, error) - // ServerHandshake does the authentication handshake for servers. It returns - // the authenticated connection and the corresponding auth information about - // the connection. - // - // If the returned net.Conn is closed, it MUST close the net.Conn provided. - ServerHandshake(net.Conn) (net.Conn, AuthInfo, error) - // Info provides the ProtocolInfo of this TransportCredentials. - Info() ProtocolInfo - // Clone makes a copy of this TransportCredentials. - Clone() TransportCredentials - // OverrideServerName overrides the server name used to verify the hostname on the returned certificates from the server. - // gRPC internals also use it to override the virtual hosting name if it is set. - // It must be called before dialing. Currently, this is only used by grpclb. - OverrideServerName(string) error -} - -// Bundle is a combination of TransportCredentials and PerRPCCredentials. -// -// It also contains a mode switching method, so it can be used as a combination -// of different credential policies. -// -// Bundle cannot be used together with individual TransportCredentials. -// PerRPCCredentials from Bundle will be appended to other PerRPCCredentials. -// -// This API is experimental. -type Bundle interface { - TransportCredentials() TransportCredentials - PerRPCCredentials() PerRPCCredentials - // NewWithMode should make a copy of Bundle, and switch mode. Modifying the - // existing Bundle may cause races. - // - // NewWithMode returns nil if the requested mode is not supported. - NewWithMode(mode string) (Bundle, error) -} - -// TLSInfo contains the auth information for a TLS authenticated connection. -// It implements the AuthInfo interface. -type TLSInfo struct { - State tls.ConnectionState -} - -// AuthType returns the type of TLSInfo as a string. -func (t TLSInfo) AuthType() string { - return "tls" -} - -// GetSecurityValue returns security info requested by channelz. -func (t TLSInfo) GetSecurityValue() ChannelzSecurityValue { - v := &TLSChannelzSecurityValue{ - StandardName: cipherSuiteLookup[t.State.CipherSuite], - } - // Currently there's no way to get LocalCertificate info from tls package. - if len(t.State.PeerCertificates) > 0 { - v.RemoteCertificate = t.State.PeerCertificates[0].Raw - } - return v -} - -// tlsCreds is the credentials required for authenticating a connection using TLS. -type tlsCreds struct { - // TLS configuration - config *tls.Config -} - -func (c tlsCreds) Info() ProtocolInfo { - return ProtocolInfo{ - SecurityProtocol: "tls", - SecurityVersion: "1.2", - ServerName: c.config.ServerName, - } -} - -func (c *tlsCreds) ClientHandshake(ctx context.Context, authority string, rawConn net.Conn) (_ net.Conn, _ AuthInfo, err error) { - // use local cfg to avoid clobbering ServerName if using multiple endpoints - cfg := cloneTLSConfig(c.config) - if cfg.ServerName == "" { - colonPos := strings.LastIndex(authority, ":") - if colonPos == -1 { - colonPos = len(authority) - } - cfg.ServerName = authority[:colonPos] - } - conn := tls.Client(rawConn, cfg) - errChannel := make(chan error, 1) - go func() { - errChannel <- conn.Handshake() - }() - select { - case err := <-errChannel: - if err != nil { - return nil, nil, err - } - case <-ctx.Done(): - return nil, nil, ctx.Err() - } - return internal.WrapSyscallConn(rawConn, conn), TLSInfo{conn.ConnectionState()}, nil -} - -func (c *tlsCreds) ServerHandshake(rawConn net.Conn) (net.Conn, AuthInfo, error) { - conn := tls.Server(rawConn, c.config) - if err := conn.Handshake(); err != nil { - return nil, nil, err - } - return internal.WrapSyscallConn(rawConn, conn), TLSInfo{conn.ConnectionState()}, nil -} - -func (c *tlsCreds) Clone() TransportCredentials { - return NewTLS(c.config) -} - -func (c *tlsCreds) OverrideServerName(serverNameOverride string) error { - c.config.ServerName = serverNameOverride - return nil -} - -// NewTLS uses c to construct a TransportCredentials based on TLS. -func NewTLS(c *tls.Config) TransportCredentials { - tc := &tlsCreds{cloneTLSConfig(c)} - tc.config.NextProtos = alpnProtoStr - return tc -} - -// NewClientTLSFromCert constructs TLS credentials from the input certificate for client. -// serverNameOverride is for testing only. If set to a non empty string, -// it will override the virtual host name of authority (e.g. :authority header field) in requests. -func NewClientTLSFromCert(cp *x509.CertPool, serverNameOverride string) TransportCredentials { - return NewTLS(&tls.Config{ServerName: serverNameOverride, RootCAs: cp}) -} - -// NewClientTLSFromFile constructs TLS credentials from the input certificate file for client. -// serverNameOverride is for testing only. If set to a non empty string, -// it will override the virtual host name of authority (e.g. :authority header field) in requests. -func NewClientTLSFromFile(certFile, serverNameOverride string) (TransportCredentials, error) { - b, err := ioutil.ReadFile(certFile) - if err != nil { - return nil, err - } - cp := x509.NewCertPool() - if !cp.AppendCertsFromPEM(b) { - return nil, fmt.Errorf("credentials: failed to append certificates") - } - return NewTLS(&tls.Config{ServerName: serverNameOverride, RootCAs: cp}), nil -} - -// NewServerTLSFromCert constructs TLS credentials from the input certificate for server. -func NewServerTLSFromCert(cert *tls.Certificate) TransportCredentials { - return NewTLS(&tls.Config{Certificates: []tls.Certificate{*cert}}) -} - -// NewServerTLSFromFile constructs TLS credentials from the input certificate file and key -// file for server. -func NewServerTLSFromFile(certFile, keyFile string) (TransportCredentials, error) { - cert, err := tls.LoadX509KeyPair(certFile, keyFile) - if err != nil { - return nil, err - } - return NewTLS(&tls.Config{Certificates: []tls.Certificate{cert}}), nil -} - -// ChannelzSecurityInfo defines the interface that security protocols should implement -// in order to provide security info to channelz. -type ChannelzSecurityInfo interface { - GetSecurityValue() ChannelzSecurityValue -} - -// ChannelzSecurityValue defines the interface that GetSecurityValue() return value -// should satisfy. This interface should only be satisfied by *TLSChannelzSecurityValue -// and *OtherChannelzSecurityValue. -type ChannelzSecurityValue interface { - isChannelzSecurityValue() -} - -// TLSChannelzSecurityValue defines the struct that TLS protocol should return -// from GetSecurityValue(), containing security info like cipher and certificate used. -type TLSChannelzSecurityValue struct { - StandardName string - LocalCertificate []byte - RemoteCertificate []byte -} - -func (*TLSChannelzSecurityValue) isChannelzSecurityValue() {} - -// OtherChannelzSecurityValue defines the struct that non-TLS protocol should return -// from GetSecurityValue(), which contains protocol specific security info. Note -// the Value field will be sent to users of channelz requesting channel info, and -// thus sensitive info should better be avoided. -type OtherChannelzSecurityValue struct { - Name string - Value proto.Message -} - -func (*OtherChannelzSecurityValue) isChannelzSecurityValue() {} - -var cipherSuiteLookup = map[uint16]string{ - tls.TLS_RSA_WITH_RC4_128_SHA: "TLS_RSA_WITH_RC4_128_SHA", - tls.TLS_RSA_WITH_3DES_EDE_CBC_SHA: "TLS_RSA_WITH_3DES_EDE_CBC_SHA", - tls.TLS_RSA_WITH_AES_128_CBC_SHA: "TLS_RSA_WITH_AES_128_CBC_SHA", - tls.TLS_RSA_WITH_AES_256_CBC_SHA: "TLS_RSA_WITH_AES_256_CBC_SHA", - tls.TLS_RSA_WITH_AES_128_GCM_SHA256: "TLS_RSA_WITH_AES_128_GCM_SHA256", - tls.TLS_RSA_WITH_AES_256_GCM_SHA384: "TLS_RSA_WITH_AES_256_GCM_SHA384", - tls.TLS_ECDHE_ECDSA_WITH_RC4_128_SHA: "TLS_ECDHE_ECDSA_WITH_RC4_128_SHA", - tls.TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA: "TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA", - tls.TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA: "TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA", - tls.TLS_ECDHE_RSA_WITH_RC4_128_SHA: "TLS_ECDHE_RSA_WITH_RC4_128_SHA", - tls.TLS_ECDHE_RSA_WITH_3DES_EDE_CBC_SHA: "TLS_ECDHE_RSA_WITH_3DES_EDE_CBC_SHA", - tls.TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA: "TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA", - tls.TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA: "TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA", - tls.TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256: "TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256", - tls.TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256: "TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256", - tls.TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384: "TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384", - tls.TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384: "TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384", - tls.TLS_FALLBACK_SCSV: "TLS_FALLBACK_SCSV", - tls.TLS_RSA_WITH_AES_128_CBC_SHA256: "TLS_RSA_WITH_AES_128_CBC_SHA256", - tls.TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256: "TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256", - tls.TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256: "TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256", - tls.TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305: "TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305", - tls.TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305: "TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305", -} - -// cloneTLSConfig returns a shallow clone of the exported -// fields of cfg, ignoring the unexported sync.Once, which -// contains a mutex and must not be copied. -// -// If cfg is nil, a new zero tls.Config is returned. -// -// TODO: inline this function if possible. -func cloneTLSConfig(cfg *tls.Config) *tls.Config { - if cfg == nil { - return &tls.Config{} - } - - return cfg.Clone() -} diff --git a/vendor/google.golang.org/grpc/credentials/internal/syscallconn.go b/vendor/google.golang.org/grpc/credentials/internal/syscallconn.go deleted file mode 100644 index 2f4472becc..0000000000 --- a/vendor/google.golang.org/grpc/credentials/internal/syscallconn.go +++ /dev/null @@ -1,61 +0,0 @@ -// +build !appengine - -/* - * - * Copyright 2018 gRPC authors. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ - -// Package internal contains credentials-internal code. -package internal - -import ( - "net" - "syscall" -) - -type sysConn = syscall.Conn - -// syscallConn keeps reference of rawConn to support syscall.Conn for channelz. -// SyscallConn() (the method in interface syscall.Conn) is explicitly -// implemented on this type, -// -// Interface syscall.Conn is implemented by most net.Conn implementations (e.g. -// TCPConn, UnixConn), but is not part of net.Conn interface. So wrapper conns -// that embed net.Conn don't implement syscall.Conn. (Side note: tls.Conn -// doesn't embed net.Conn, so even if syscall.Conn is part of net.Conn, it won't -// help here). -type syscallConn struct { - net.Conn - // sysConn is a type alias of syscall.Conn. It's necessary because the name - // `Conn` collides with `net.Conn`. - sysConn -} - -// WrapSyscallConn tries to wrap rawConn and newConn into a net.Conn that -// implements syscall.Conn. rawConn will be used to support syscall, and newConn -// will be used for read/write. -// -// This function returns newConn if rawConn doesn't implement syscall.Conn. -func WrapSyscallConn(rawConn, newConn net.Conn) net.Conn { - sysConn, ok := rawConn.(syscall.Conn) - if !ok { - return newConn - } - return &syscallConn{ - Conn: newConn, - sysConn: sysConn, - } -} diff --git a/vendor/google.golang.org/grpc/credentials/internal/syscallconn_appengine.go b/vendor/google.golang.org/grpc/credentials/internal/syscallconn_appengine.go deleted file mode 100644 index d4346e9eab..0000000000 --- a/vendor/google.golang.org/grpc/credentials/internal/syscallconn_appengine.go +++ /dev/null @@ -1,30 +0,0 @@ -// +build appengine - -/* - * - * Copyright 2018 gRPC authors. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ - -package internal - -import ( - "net" -) - -// WrapSyscallConn returns newConn on appengine. -func WrapSyscallConn(rawConn, newConn net.Conn) net.Conn { - return newConn -} diff --git a/vendor/google.golang.org/grpc/dialoptions.go b/vendor/google.golang.org/grpc/dialoptions.go deleted file mode 100644 index f286462449..0000000000 --- a/vendor/google.golang.org/grpc/dialoptions.go +++ /dev/null @@ -1,492 +0,0 @@ -/* - * - * Copyright 2018 gRPC authors. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ - -package grpc - -import ( - "context" - "fmt" - "net" - "time" - - "google.golang.org/grpc/balancer" - "google.golang.org/grpc/credentials" - "google.golang.org/grpc/internal" - "google.golang.org/grpc/internal/backoff" - "google.golang.org/grpc/internal/envconfig" - "google.golang.org/grpc/internal/transport" - "google.golang.org/grpc/keepalive" - "google.golang.org/grpc/resolver" - "google.golang.org/grpc/stats" -) - -// dialOptions configure a Dial call. dialOptions are set by the DialOption -// values passed to Dial. -type dialOptions struct { - unaryInt UnaryClientInterceptor - streamInt StreamClientInterceptor - cp Compressor - dc Decompressor - bs backoff.Strategy - block bool - insecure bool - timeout time.Duration - scChan <-chan ServiceConfig - authority string - copts transport.ConnectOptions - callOptions []CallOption - // This is used by v1 balancer dial option WithBalancer to support v1 - // balancer, and also by WithBalancerName dial option. - balancerBuilder balancer.Builder - // This is to support grpclb. - resolverBuilder resolver.Builder - reqHandshake envconfig.RequireHandshakeSetting - channelzParentID int64 - disableServiceConfig bool - disableRetry bool - disableHealthCheck bool - healthCheckFunc internal.HealthChecker -} - -// DialOption configures how we set up the connection. -type DialOption interface { - apply(*dialOptions) -} - -// EmptyDialOption does not alter the dial configuration. It can be embedded in -// another structure to build custom dial options. -// -// This API is EXPERIMENTAL. -type EmptyDialOption struct{} - -func (EmptyDialOption) apply(*dialOptions) {} - -// funcDialOption wraps a function that modifies dialOptions into an -// implementation of the DialOption interface. -type funcDialOption struct { - f func(*dialOptions) -} - -func (fdo *funcDialOption) apply(do *dialOptions) { - fdo.f(do) -} - -func newFuncDialOption(f func(*dialOptions)) *funcDialOption { - return &funcDialOption{ - f: f, - } -} - -// WithWaitForHandshake blocks until the initial settings frame is received from -// the server before assigning RPCs to the connection. -// -// Deprecated: this is the default behavior, and this option will be removed -// after the 1.18 release. -func WithWaitForHandshake() DialOption { - return newFuncDialOption(func(o *dialOptions) { - o.reqHandshake = envconfig.RequireHandshakeOn - }) -} - -// WithWriteBufferSize determines how much data can be batched before doing a -// write on the wire. The corresponding memory allocation for this buffer will -// be twice the size to keep syscalls low. The default value for this buffer is -// 32KB. -// -// Zero will disable the write buffer such that each write will be on underlying -// connection. Note: A Send call may not directly translate to a write. -func WithWriteBufferSize(s int) DialOption { - return newFuncDialOption(func(o *dialOptions) { - o.copts.WriteBufferSize = s - }) -} - -// WithReadBufferSize lets you set the size of read buffer, this determines how -// much data can be read at most for each read syscall. -// -// The default value for this buffer is 32KB. Zero will disable read buffer for -// a connection so data framer can access the underlying conn directly. -func WithReadBufferSize(s int) DialOption { - return newFuncDialOption(func(o *dialOptions) { - o.copts.ReadBufferSize = s - }) -} - -// WithInitialWindowSize returns a DialOption which sets the value for initial -// window size on a stream. The lower bound for window size is 64K and any value -// smaller than that will be ignored. -func WithInitialWindowSize(s int32) DialOption { - return newFuncDialOption(func(o *dialOptions) { - o.copts.InitialWindowSize = s - }) -} - -// WithInitialConnWindowSize returns a DialOption which sets the value for -// initial window size on a connection. The lower bound for window size is 64K -// and any value smaller than that will be ignored. -func WithInitialConnWindowSize(s int32) DialOption { - return newFuncDialOption(func(o *dialOptions) { - o.copts.InitialConnWindowSize = s - }) -} - -// WithMaxMsgSize returns a DialOption which sets the maximum message size the -// client can receive. -// -// Deprecated: use WithDefaultCallOptions(MaxCallRecvMsgSize(s)) instead. -func WithMaxMsgSize(s int) DialOption { - return WithDefaultCallOptions(MaxCallRecvMsgSize(s)) -} - -// WithDefaultCallOptions returns a DialOption which sets the default -// CallOptions for calls over the connection. -func WithDefaultCallOptions(cos ...CallOption) DialOption { - return newFuncDialOption(func(o *dialOptions) { - o.callOptions = append(o.callOptions, cos...) - }) -} - -// WithCodec returns a DialOption which sets a codec for message marshaling and -// unmarshaling. -// -// Deprecated: use WithDefaultCallOptions(CallCustomCodec(c)) instead. -func WithCodec(c Codec) DialOption { - return WithDefaultCallOptions(CallCustomCodec(c)) -} - -// WithCompressor returns a DialOption which sets a Compressor to use for -// message compression. It has lower priority than the compressor set by the -// UseCompressor CallOption. -// -// Deprecated: use UseCompressor instead. -func WithCompressor(cp Compressor) DialOption { - return newFuncDialOption(func(o *dialOptions) { - o.cp = cp - }) -} - -// WithDecompressor returns a DialOption which sets a Decompressor to use for -// incoming message decompression. If incoming response messages are encoded -// using the decompressor's Type(), it will be used. Otherwise, the message -// encoding will be used to look up the compressor registered via -// encoding.RegisterCompressor, which will then be used to decompress the -// message. If no compressor is registered for the encoding, an Unimplemented -// status error will be returned. -// -// Deprecated: use encoding.RegisterCompressor instead. -func WithDecompressor(dc Decompressor) DialOption { - return newFuncDialOption(func(o *dialOptions) { - o.dc = dc - }) -} - -// WithBalancer returns a DialOption which sets a load balancer with the v1 API. -// Name resolver will be ignored if this DialOption is specified. -// -// Deprecated: use the new balancer APIs in balancer package and -// WithBalancerName. -func WithBalancer(b Balancer) DialOption { - return newFuncDialOption(func(o *dialOptions) { - o.balancerBuilder = &balancerWrapperBuilder{ - b: b, - } - }) -} - -// WithBalancerName sets the balancer that the ClientConn will be initialized -// with. Balancer registered with balancerName will be used. This function -// panics if no balancer was registered by balancerName. -// -// The balancer cannot be overridden by balancer option specified by service -// config. -// -// This is an EXPERIMENTAL API. -func WithBalancerName(balancerName string) DialOption { - builder := balancer.Get(balancerName) - if builder == nil { - panic(fmt.Sprintf("grpc.WithBalancerName: no balancer is registered for name %v", balancerName)) - } - return newFuncDialOption(func(o *dialOptions) { - o.balancerBuilder = builder - }) -} - -// withResolverBuilder is only for grpclb. -func withResolverBuilder(b resolver.Builder) DialOption { - return newFuncDialOption(func(o *dialOptions) { - o.resolverBuilder = b - }) -} - -// WithServiceConfig returns a DialOption which has a channel to read the -// service configuration. -// -// Deprecated: service config should be received through name resolver, as -// specified here. -// https://github.com/grpc/grpc/blob/master/doc/service_config.md -func WithServiceConfig(c <-chan ServiceConfig) DialOption { - return newFuncDialOption(func(o *dialOptions) { - o.scChan = c - }) -} - -// WithBackoffMaxDelay configures the dialer to use the provided maximum delay -// when backing off after failed connection attempts. -func WithBackoffMaxDelay(md time.Duration) DialOption { - return WithBackoffConfig(BackoffConfig{MaxDelay: md}) -} - -// WithBackoffConfig configures the dialer to use the provided backoff -// parameters after connection failures. -// -// Use WithBackoffMaxDelay until more parameters on BackoffConfig are opened up -// for use. -func WithBackoffConfig(b BackoffConfig) DialOption { - return withBackoff(backoff.Exponential{ - MaxDelay: b.MaxDelay, - }) -} - -// withBackoff sets the backoff strategy used for connectRetryNum after a failed -// connection attempt. -// -// This can be exported if arbitrary backoff strategies are allowed by gRPC. -func withBackoff(bs backoff.Strategy) DialOption { - return newFuncDialOption(func(o *dialOptions) { - o.bs = bs - }) -} - -// WithBlock returns a DialOption which makes caller of Dial blocks until the -// underlying connection is up. Without this, Dial returns immediately and -// connecting the server happens in background. -func WithBlock() DialOption { - return newFuncDialOption(func(o *dialOptions) { - o.block = true - }) -} - -// WithInsecure returns a DialOption which disables transport security for this -// ClientConn. Note that transport security is required unless WithInsecure is -// set. -func WithInsecure() DialOption { - return newFuncDialOption(func(o *dialOptions) { - o.insecure = true - }) -} - -// WithTransportCredentials returns a DialOption which configures a connection -// level security credentials (e.g., TLS/SSL). This should not be used together -// with WithCredentialsBundle. -func WithTransportCredentials(creds credentials.TransportCredentials) DialOption { - return newFuncDialOption(func(o *dialOptions) { - o.copts.TransportCredentials = creds - }) -} - -// WithPerRPCCredentials returns a DialOption which sets credentials and places -// auth state on each outbound RPC. -func WithPerRPCCredentials(creds credentials.PerRPCCredentials) DialOption { - return newFuncDialOption(func(o *dialOptions) { - o.copts.PerRPCCredentials = append(o.copts.PerRPCCredentials, creds) - }) -} - -// WithCredentialsBundle returns a DialOption to set a credentials bundle for -// the ClientConn.WithCreds. This should not be used together with -// WithTransportCredentials. -// -// This API is experimental. -func WithCredentialsBundle(b credentials.Bundle) DialOption { - return newFuncDialOption(func(o *dialOptions) { - o.copts.CredsBundle = b - }) -} - -// WithTimeout returns a DialOption that configures a timeout for dialing a -// ClientConn initially. This is valid if and only if WithBlock() is present. -// -// Deprecated: use DialContext and context.WithTimeout instead. -func WithTimeout(d time.Duration) DialOption { - return newFuncDialOption(func(o *dialOptions) { - o.timeout = d - }) -} - -func withContextDialer(f func(context.Context, string) (net.Conn, error)) DialOption { - return newFuncDialOption(func(o *dialOptions) { - o.copts.Dialer = f - }) -} - -func init() { - internal.WithContextDialer = withContextDialer - internal.WithResolverBuilder = withResolverBuilder - internal.WithHealthCheckFunc = withHealthCheckFunc -} - -// WithDialer returns a DialOption that specifies a function to use for dialing -// network addresses. If FailOnNonTempDialError() is set to true, and an error -// is returned by f, gRPC checks the error's Temporary() method to decide if it -// should try to reconnect to the network address. -func WithDialer(f func(string, time.Duration) (net.Conn, error)) DialOption { - return withContextDialer( - func(ctx context.Context, addr string) (net.Conn, error) { - if deadline, ok := ctx.Deadline(); ok { - return f(addr, deadline.Sub(time.Now())) - } - return f(addr, 0) - }) -} - -// WithStatsHandler returns a DialOption that specifies the stats handler for -// all the RPCs and underlying network connections in this ClientConn. -func WithStatsHandler(h stats.Handler) DialOption { - return newFuncDialOption(func(o *dialOptions) { - o.copts.StatsHandler = h - }) -} - -// FailOnNonTempDialError returns a DialOption that specifies if gRPC fails on -// non-temporary dial errors. If f is true, and dialer returns a non-temporary -// error, gRPC will fail the connection to the network address and won't try to -// reconnect. The default value of FailOnNonTempDialError is false. -// -// FailOnNonTempDialError only affects the initial dial, and does not do -// anything useful unless you are also using WithBlock(). -// -// This is an EXPERIMENTAL API. -func FailOnNonTempDialError(f bool) DialOption { - return newFuncDialOption(func(o *dialOptions) { - o.copts.FailOnNonTempDialError = f - }) -} - -// WithUserAgent returns a DialOption that specifies a user agent string for all -// the RPCs. -func WithUserAgent(s string) DialOption { - return newFuncDialOption(func(o *dialOptions) { - o.copts.UserAgent = s - }) -} - -// WithKeepaliveParams returns a DialOption that specifies keepalive parameters -// for the client transport. -func WithKeepaliveParams(kp keepalive.ClientParameters) DialOption { - return newFuncDialOption(func(o *dialOptions) { - o.copts.KeepaliveParams = kp - }) -} - -// WithUnaryInterceptor returns a DialOption that specifies the interceptor for -// unary RPCs. -func WithUnaryInterceptor(f UnaryClientInterceptor) DialOption { - return newFuncDialOption(func(o *dialOptions) { - o.unaryInt = f - }) -} - -// WithStreamInterceptor returns a DialOption that specifies the interceptor for -// streaming RPCs. -func WithStreamInterceptor(f StreamClientInterceptor) DialOption { - return newFuncDialOption(func(o *dialOptions) { - o.streamInt = f - }) -} - -// WithAuthority returns a DialOption that specifies the value to be used as the -// :authority pseudo-header. This value only works with WithInsecure and has no -// effect if TransportCredentials are present. -func WithAuthority(a string) DialOption { - return newFuncDialOption(func(o *dialOptions) { - o.authority = a - }) -} - -// WithChannelzParentID returns a DialOption that specifies the channelz ID of -// current ClientConn's parent. This function is used in nested channel creation -// (e.g. grpclb dial). -func WithChannelzParentID(id int64) DialOption { - return newFuncDialOption(func(o *dialOptions) { - o.channelzParentID = id - }) -} - -// WithDisableServiceConfig returns a DialOption that causes grpc to ignore any -// service config provided by the resolver and provides a hint to the resolver -// to not fetch service configs. -func WithDisableServiceConfig() DialOption { - return newFuncDialOption(func(o *dialOptions) { - o.disableServiceConfig = true - }) -} - -// WithDisableRetry returns a DialOption that disables retries, even if the -// service config enables them. This does not impact transparent retries, which -// will happen automatically if no data is written to the wire or if the RPC is -// unprocessed by the remote server. -// -// Retry support is currently disabled by default, but will be enabled by -// default in the future. Until then, it may be enabled by setting the -// environment variable "GRPC_GO_RETRY" to "on". -// -// This API is EXPERIMENTAL. -func WithDisableRetry() DialOption { - return newFuncDialOption(func(o *dialOptions) { - o.disableRetry = true - }) -} - -// WithMaxHeaderListSize returns a DialOption that specifies the maximum -// (uncompressed) size of header list that the client is prepared to accept. -func WithMaxHeaderListSize(s uint32) DialOption { - return newFuncDialOption(func(o *dialOptions) { - o.copts.MaxHeaderListSize = &s - }) -} - -// WithDisableHealthCheck disables the LB channel health checking for all SubConns of this ClientConn. -// -// This API is EXPERIMENTAL. -func WithDisableHealthCheck() DialOption { - return newFuncDialOption(func(o *dialOptions) { - o.disableHealthCheck = true - }) -} - -// withHealthCheckFunc replaces the default health check function with the provided one. It makes -// tests easier to change the health check function. -// -// For testing purpose only. -func withHealthCheckFunc(f internal.HealthChecker) DialOption { - return newFuncDialOption(func(o *dialOptions) { - o.healthCheckFunc = f - }) -} - -func defaultDialOptions() dialOptions { - return dialOptions{ - disableRetry: !envconfig.Retry, - reqHandshake: envconfig.RequireHandshake, - healthCheckFunc: internal.HealthCheckFunc, - copts: transport.ConnectOptions{ - WriteBufferSize: defaultWriteBufSize, - ReadBufferSize: defaultReadBufSize, - }, - } -} diff --git a/vendor/google.golang.org/grpc/doc.go b/vendor/google.golang.org/grpc/doc.go deleted file mode 100644 index 187adbb117..0000000000 --- a/vendor/google.golang.org/grpc/doc.go +++ /dev/null @@ -1,24 +0,0 @@ -/* - * - * Copyright 2015 gRPC authors. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ - -/* -Package grpc implements an RPC system called gRPC. - -See grpc.io for more information about gRPC. -*/ -package grpc // import "google.golang.org/grpc" diff --git a/vendor/google.golang.org/grpc/encoding/encoding.go b/vendor/google.golang.org/grpc/encoding/encoding.go deleted file mode 100644 index ade8b7cec7..0000000000 --- a/vendor/google.golang.org/grpc/encoding/encoding.go +++ /dev/null @@ -1,118 +0,0 @@ -/* - * - * Copyright 2017 gRPC authors. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ - -// Package encoding defines the interface for the compressor and codec, and -// functions to register and retrieve compressors and codecs. -// -// This package is EXPERIMENTAL. -package encoding - -import ( - "io" - "strings" -) - -// Identity specifies the optional encoding for uncompressed streams. -// It is intended for grpc internal use only. -const Identity = "identity" - -// Compressor is used for compressing and decompressing when sending or -// receiving messages. -type Compressor interface { - // Compress writes the data written to wc to w after compressing it. If an - // error occurs while initializing the compressor, that error is returned - // instead. - Compress(w io.Writer) (io.WriteCloser, error) - // Decompress reads data from r, decompresses it, and provides the - // uncompressed data via the returned io.Reader. If an error occurs while - // initializing the decompressor, that error is returned instead. - Decompress(r io.Reader) (io.Reader, error) - // Name is the name of the compression codec and is used to set the content - // coding header. The result must be static; the result cannot change - // between calls. - Name() string -} - -var registeredCompressor = make(map[string]Compressor) - -// RegisterCompressor registers the compressor with gRPC by its name. It can -// be activated when sending an RPC via grpc.UseCompressor(). It will be -// automatically accessed when receiving a message based on the content coding -// header. Servers also use it to send a response with the same encoding as -// the request. -// -// NOTE: this function must only be called during initialization time (i.e. in -// an init() function), and is not thread-safe. If multiple Compressors are -// registered with the same name, the one registered last will take effect. -func RegisterCompressor(c Compressor) { - registeredCompressor[c.Name()] = c -} - -// GetCompressor returns Compressor for the given compressor name. -func GetCompressor(name string) Compressor { - return registeredCompressor[name] -} - -// Codec defines the interface gRPC uses to encode and decode messages. Note -// that implementations of this interface must be thread safe; a Codec's -// methods can be called from concurrent goroutines. -type Codec interface { - // Marshal returns the wire format of v. - Marshal(v interface{}) ([]byte, error) - // Unmarshal parses the wire format into v. - Unmarshal(data []byte, v interface{}) error - // Name returns the name of the Codec implementation. The returned string - // will be used as part of content type in transmission. The result must be - // static; the result cannot change between calls. - Name() string -} - -var registeredCodecs = make(map[string]Codec) - -// RegisterCodec registers the provided Codec for use with all gRPC clients and -// servers. -// -// The Codec will be stored and looked up by result of its Name() method, which -// should match the content-subtype of the encoding handled by the Codec. This -// is case-insensitive, and is stored and looked up as lowercase. If the -// result of calling Name() is an empty string, RegisterCodec will panic. See -// Content-Type on -// https://github.com/grpc/grpc/blob/master/doc/PROTOCOL-HTTP2.md#requests for -// more details. -// -// NOTE: this function must only be called during initialization time (i.e. in -// an init() function), and is not thread-safe. If multiple Compressors are -// registered with the same name, the one registered last will take effect. -func RegisterCodec(codec Codec) { - if codec == nil { - panic("cannot register a nil Codec") - } - contentSubtype := strings.ToLower(codec.Name()) - if contentSubtype == "" { - panic("cannot register Codec with empty string result for String()") - } - registeredCodecs[contentSubtype] = codec -} - -// GetCodec gets a registered Codec by content-subtype, or nil if no Codec is -// registered for the content-subtype. -// -// The content-subtype is expected to be lowercase. -func GetCodec(contentSubtype string) Codec { - return registeredCodecs[contentSubtype] -} diff --git a/vendor/google.golang.org/grpc/encoding/proto/proto.go b/vendor/google.golang.org/grpc/encoding/proto/proto.go deleted file mode 100644 index 66b97a6f69..0000000000 --- a/vendor/google.golang.org/grpc/encoding/proto/proto.go +++ /dev/null @@ -1,110 +0,0 @@ -/* - * - * Copyright 2018 gRPC authors. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ - -// Package proto defines the protobuf codec. Importing this package will -// register the codec. -package proto - -import ( - "math" - "sync" - - "github.com/golang/protobuf/proto" - "google.golang.org/grpc/encoding" -) - -// Name is the name registered for the proto compressor. -const Name = "proto" - -func init() { - encoding.RegisterCodec(codec{}) -} - -// codec is a Codec implementation with protobuf. It is the default codec for gRPC. -type codec struct{} - -type cachedProtoBuffer struct { - lastMarshaledSize uint32 - proto.Buffer -} - -func capToMaxInt32(val int) uint32 { - if val > math.MaxInt32 { - return uint32(math.MaxInt32) - } - return uint32(val) -} - -func marshal(v interface{}, cb *cachedProtoBuffer) ([]byte, error) { - protoMsg := v.(proto.Message) - newSlice := make([]byte, 0, cb.lastMarshaledSize) - - cb.SetBuf(newSlice) - cb.Reset() - if err := cb.Marshal(protoMsg); err != nil { - return nil, err - } - out := cb.Bytes() - cb.lastMarshaledSize = capToMaxInt32(len(out)) - return out, nil -} - -func (codec) Marshal(v interface{}) ([]byte, error) { - if pm, ok := v.(proto.Marshaler); ok { - // object can marshal itself, no need for buffer - return pm.Marshal() - } - - cb := protoBufferPool.Get().(*cachedProtoBuffer) - out, err := marshal(v, cb) - - // put back buffer and lose the ref to the slice - cb.SetBuf(nil) - protoBufferPool.Put(cb) - return out, err -} - -func (codec) Unmarshal(data []byte, v interface{}) error { - protoMsg := v.(proto.Message) - protoMsg.Reset() - - if pu, ok := protoMsg.(proto.Unmarshaler); ok { - // object can unmarshal itself, no need for buffer - return pu.Unmarshal(data) - } - - cb := protoBufferPool.Get().(*cachedProtoBuffer) - cb.SetBuf(data) - err := cb.Unmarshal(protoMsg) - cb.SetBuf(nil) - protoBufferPool.Put(cb) - return err -} - -func (codec) Name() string { - return Name -} - -var protoBufferPool = &sync.Pool{ - New: func() interface{} { - return &cachedProtoBuffer{ - Buffer: proto.Buffer{}, - lastMarshaledSize: 16, - } - }, -} diff --git a/vendor/google.golang.org/grpc/go.mod b/vendor/google.golang.org/grpc/go.mod deleted file mode 100644 index f296dcf4e6..0000000000 --- a/vendor/google.golang.org/grpc/go.mod +++ /dev/null @@ -1,20 +0,0 @@ -module google.golang.org/grpc - -require ( - cloud.google.com/go v0.26.0 // indirect - github.com/client9/misspell v0.3.4 - github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b - github.com/golang/mock v1.1.1 - github.com/golang/protobuf v1.2.0 - github.com/kisielk/gotool v1.0.0 // indirect - golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3 - golang.org/x/net v0.0.0-20180826012351-8a410e7b638d - golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be - golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f // indirect - golang.org/x/sys v0.0.0-20180830151530-49385e6e1522 - golang.org/x/text v0.3.0 // indirect - golang.org/x/tools v0.0.0-20180828015842-6cd1fcedba52 - google.golang.org/appengine v1.1.0 // indirect - google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8 - honnef.co/go/tools v0.0.0-20180728063816-88497007e858 -) diff --git a/vendor/google.golang.org/grpc/go.sum b/vendor/google.golang.org/grpc/go.sum deleted file mode 100644 index bfb6bb7c0e..0000000000 --- a/vendor/google.golang.org/grpc/go.sum +++ /dev/null @@ -1,32 +0,0 @@ -cloud.google.com/go v0.26.0 h1:e0WKqKTd5BnrG8aKH3J3h+QvEIQtSUcf2n5UZ5ZgLtQ= -cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= -github.com/client9/misspell v0.3.4 h1:ta993UF76GwbvJcIo3Y68y/M3WxlpEHPWIGDkJYwzJI= -github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw= -github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b h1:VKtxabqXZkF25pY9ekfRL6a582T4P37/31XEstQ5p58= -github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q= -github.com/golang/mock v1.1.1 h1:G5FRp8JnTd7RQH5kemVNlMeyXQAztQ3mOWV95KxsXH8= -github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= -github.com/golang/protobuf v1.2.0 h1:P3YflyNX/ehuJFLhxviNdFxQPkGK5cDcApsge1SqnvM= -github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= -github.com/kisielk/gotool v1.0.0 h1:AV2c/EiW3KqPNT9ZKl07ehoAGi4C5/01Cfbblndcapg= -github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= -golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3 h1:x/bBzNauLQAlE3fLku/xy92Y8QwKX5HZymrMz2IiKFc= -golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= -golang.org/x/net v0.0.0-20180826012351-8a410e7b638d h1:g9qWBGx4puODJTMVyoPrpoxPFgVGd+z1DZwjfRu4d0I= -golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be h1:vEDujvNQGv4jgYKudGeI/+DAX4Jffq6hpD55MmoEvKs= -golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= -golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f h1:wMNYb4v58l5UBM7MYRLPG6ZhfOqbKu7X5eyFl8ZhKvA= -golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sys v0.0.0-20180830151530-49385e6e1522 h1:Ve1ORMCxvRmSXBwJK+t3Oy+V2vRW2OetUQBq4rJIkZE= -golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/text v0.3.0 h1:g61tztE5qeGQ89tm6NTjjM9VPIm088od1l6aSorWRWg= -golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= -golang.org/x/tools v0.0.0-20180828015842-6cd1fcedba52 h1:JG/0uqcGdTNgq7FdU+61l5Pdmb8putNZlXb65bJBROs= -golang.org/x/tools v0.0.0-20180828015842-6cd1fcedba52/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= -google.golang.org/appengine v1.1.0 h1:igQkv0AAhEIvTEpD5LIpAfav2eeVO9HBTjvKHVJPRSs= -google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM= -google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8 h1:Nw54tB0rB7hY/N0NQvRW8DG4Yk3Q6T9cu9RcFQDu1tc= -google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= -honnef.co/go/tools v0.0.0-20180728063816-88497007e858 h1:wN+eVZ7U+gqdqkec6C6VXR1OFf9a5Ul9ETzeYsYv20g= -honnef.co/go/tools v0.0.0-20180728063816-88497007e858/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= diff --git a/vendor/google.golang.org/grpc/grpclog/grpclog.go b/vendor/google.golang.org/grpc/grpclog/grpclog.go deleted file mode 100644 index 1fabb11e1b..0000000000 --- a/vendor/google.golang.org/grpc/grpclog/grpclog.go +++ /dev/null @@ -1,126 +0,0 @@ -/* - * - * Copyright 2017 gRPC authors. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ - -// Package grpclog defines logging for grpc. -// -// All logs in transport package only go to verbose level 2. -// All logs in other packages in grpc are logged in spite of the verbosity level. -// -// In the default logger, -// severity level can be set by environment variable GRPC_GO_LOG_SEVERITY_LEVEL, -// verbosity level can be set by GRPC_GO_LOG_VERBOSITY_LEVEL. -package grpclog // import "google.golang.org/grpc/grpclog" - -import "os" - -var logger = newLoggerV2() - -// V reports whether verbosity level l is at least the requested verbose level. -func V(l int) bool { - return logger.V(l) -} - -// Info logs to the INFO log. -func Info(args ...interface{}) { - logger.Info(args...) -} - -// Infof logs to the INFO log. Arguments are handled in the manner of fmt.Printf. -func Infof(format string, args ...interface{}) { - logger.Infof(format, args...) -} - -// Infoln logs to the INFO log. Arguments are handled in the manner of fmt.Println. -func Infoln(args ...interface{}) { - logger.Infoln(args...) -} - -// Warning logs to the WARNING log. -func Warning(args ...interface{}) { - logger.Warning(args...) -} - -// Warningf logs to the WARNING log. Arguments are handled in the manner of fmt.Printf. -func Warningf(format string, args ...interface{}) { - logger.Warningf(format, args...) -} - -// Warningln logs to the WARNING log. Arguments are handled in the manner of fmt.Println. -func Warningln(args ...interface{}) { - logger.Warningln(args...) -} - -// Error logs to the ERROR log. -func Error(args ...interface{}) { - logger.Error(args...) -} - -// Errorf logs to the ERROR log. Arguments are handled in the manner of fmt.Printf. -func Errorf(format string, args ...interface{}) { - logger.Errorf(format, args...) -} - -// Errorln logs to the ERROR log. Arguments are handled in the manner of fmt.Println. -func Errorln(args ...interface{}) { - logger.Errorln(args...) -} - -// Fatal logs to the FATAL log. Arguments are handled in the manner of fmt.Print. -// It calls os.Exit() with exit code 1. -func Fatal(args ...interface{}) { - logger.Fatal(args...) - // Make sure fatal logs will exit. - os.Exit(1) -} - -// Fatalf logs to the FATAL log. Arguments are handled in the manner of fmt.Printf. -// It calles os.Exit() with exit code 1. -func Fatalf(format string, args ...interface{}) { - logger.Fatalf(format, args...) - // Make sure fatal logs will exit. - os.Exit(1) -} - -// Fatalln logs to the FATAL log. Arguments are handled in the manner of fmt.Println. -// It calle os.Exit()) with exit code 1. -func Fatalln(args ...interface{}) { - logger.Fatalln(args...) - // Make sure fatal logs will exit. - os.Exit(1) -} - -// Print prints to the logger. Arguments are handled in the manner of fmt.Print. -// -// Deprecated: use Info. -func Print(args ...interface{}) { - logger.Info(args...) -} - -// Printf prints to the logger. Arguments are handled in the manner of fmt.Printf. -// -// Deprecated: use Infof. -func Printf(format string, args ...interface{}) { - logger.Infof(format, args...) -} - -// Println prints to the logger. Arguments are handled in the manner of fmt.Println. -// -// Deprecated: use Infoln. -func Println(args ...interface{}) { - logger.Infoln(args...) -} diff --git a/vendor/google.golang.org/grpc/grpclog/logger.go b/vendor/google.golang.org/grpc/grpclog/logger.go deleted file mode 100644 index 097494f710..0000000000 --- a/vendor/google.golang.org/grpc/grpclog/logger.go +++ /dev/null @@ -1,85 +0,0 @@ -/* - * - * Copyright 2015 gRPC authors. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ - -package grpclog - -// Logger mimics golang's standard Logger as an interface. -// -// Deprecated: use LoggerV2. -type Logger interface { - Fatal(args ...interface{}) - Fatalf(format string, args ...interface{}) - Fatalln(args ...interface{}) - Print(args ...interface{}) - Printf(format string, args ...interface{}) - Println(args ...interface{}) -} - -// SetLogger sets the logger that is used in grpc. Call only from -// init() functions. -// -// Deprecated: use SetLoggerV2. -func SetLogger(l Logger) { - logger = &loggerWrapper{Logger: l} -} - -// loggerWrapper wraps Logger into a LoggerV2. -type loggerWrapper struct { - Logger -} - -func (g *loggerWrapper) Info(args ...interface{}) { - g.Logger.Print(args...) -} - -func (g *loggerWrapper) Infoln(args ...interface{}) { - g.Logger.Println(args...) -} - -func (g *loggerWrapper) Infof(format string, args ...interface{}) { - g.Logger.Printf(format, args...) -} - -func (g *loggerWrapper) Warning(args ...interface{}) { - g.Logger.Print(args...) -} - -func (g *loggerWrapper) Warningln(args ...interface{}) { - g.Logger.Println(args...) -} - -func (g *loggerWrapper) Warningf(format string, args ...interface{}) { - g.Logger.Printf(format, args...) -} - -func (g *loggerWrapper) Error(args ...interface{}) { - g.Logger.Print(args...) -} - -func (g *loggerWrapper) Errorln(args ...interface{}) { - g.Logger.Println(args...) -} - -func (g *loggerWrapper) Errorf(format string, args ...interface{}) { - g.Logger.Printf(format, args...) -} - -func (g *loggerWrapper) V(l int) bool { - // Returns true for all verbose level. - return true -} diff --git a/vendor/google.golang.org/grpc/grpclog/loggerv2.go b/vendor/google.golang.org/grpc/grpclog/loggerv2.go deleted file mode 100644 index d493257769..0000000000 --- a/vendor/google.golang.org/grpc/grpclog/loggerv2.go +++ /dev/null @@ -1,195 +0,0 @@ -/* - * - * Copyright 2017 gRPC authors. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ - -package grpclog - -import ( - "io" - "io/ioutil" - "log" - "os" - "strconv" -) - -// LoggerV2 does underlying logging work for grpclog. -type LoggerV2 interface { - // Info logs to INFO log. Arguments are handled in the manner of fmt.Print. - Info(args ...interface{}) - // Infoln logs to INFO log. Arguments are handled in the manner of fmt.Println. - Infoln(args ...interface{}) - // Infof logs to INFO log. Arguments are handled in the manner of fmt.Printf. - Infof(format string, args ...interface{}) - // Warning logs to WARNING log. Arguments are handled in the manner of fmt.Print. - Warning(args ...interface{}) - // Warningln logs to WARNING log. Arguments are handled in the manner of fmt.Println. - Warningln(args ...interface{}) - // Warningf logs to WARNING log. Arguments are handled in the manner of fmt.Printf. - Warningf(format string, args ...interface{}) - // Error logs to ERROR log. Arguments are handled in the manner of fmt.Print. - Error(args ...interface{}) - // Errorln logs to ERROR log. Arguments are handled in the manner of fmt.Println. - Errorln(args ...interface{}) - // Errorf logs to ERROR log. Arguments are handled in the manner of fmt.Printf. - Errorf(format string, args ...interface{}) - // Fatal logs to ERROR log. Arguments are handled in the manner of fmt.Print. - // gRPC ensures that all Fatal logs will exit with os.Exit(1). - // Implementations may also call os.Exit() with a non-zero exit code. - Fatal(args ...interface{}) - // Fatalln logs to ERROR log. Arguments are handled in the manner of fmt.Println. - // gRPC ensures that all Fatal logs will exit with os.Exit(1). - // Implementations may also call os.Exit() with a non-zero exit code. - Fatalln(args ...interface{}) - // Fatalf logs to ERROR log. Arguments are handled in the manner of fmt.Printf. - // gRPC ensures that all Fatal logs will exit with os.Exit(1). - // Implementations may also call os.Exit() with a non-zero exit code. - Fatalf(format string, args ...interface{}) - // V reports whether verbosity level l is at least the requested verbose level. - V(l int) bool -} - -// SetLoggerV2 sets logger that is used in grpc to a V2 logger. -// Not mutex-protected, should be called before any gRPC functions. -func SetLoggerV2(l LoggerV2) { - logger = l -} - -const ( - // infoLog indicates Info severity. - infoLog int = iota - // warningLog indicates Warning severity. - warningLog - // errorLog indicates Error severity. - errorLog - // fatalLog indicates Fatal severity. - fatalLog -) - -// severityName contains the string representation of each severity. -var severityName = []string{ - infoLog: "INFO", - warningLog: "WARNING", - errorLog: "ERROR", - fatalLog: "FATAL", -} - -// loggerT is the default logger used by grpclog. -type loggerT struct { - m []*log.Logger - v int -} - -// NewLoggerV2 creates a loggerV2 with the provided writers. -// Fatal logs will be written to errorW, warningW, infoW, followed by exit(1). -// Error logs will be written to errorW, warningW and infoW. -// Warning logs will be written to warningW and infoW. -// Info logs will be written to infoW. -func NewLoggerV2(infoW, warningW, errorW io.Writer) LoggerV2 { - return NewLoggerV2WithVerbosity(infoW, warningW, errorW, 0) -} - -// NewLoggerV2WithVerbosity creates a loggerV2 with the provided writers and -// verbosity level. -func NewLoggerV2WithVerbosity(infoW, warningW, errorW io.Writer, v int) LoggerV2 { - var m []*log.Logger - m = append(m, log.New(infoW, severityName[infoLog]+": ", log.LstdFlags)) - m = append(m, log.New(io.MultiWriter(infoW, warningW), severityName[warningLog]+": ", log.LstdFlags)) - ew := io.MultiWriter(infoW, warningW, errorW) // ew will be used for error and fatal. - m = append(m, log.New(ew, severityName[errorLog]+": ", log.LstdFlags)) - m = append(m, log.New(ew, severityName[fatalLog]+": ", log.LstdFlags)) - return &loggerT{m: m, v: v} -} - -// newLoggerV2 creates a loggerV2 to be used as default logger. -// All logs are written to stderr. -func newLoggerV2() LoggerV2 { - errorW := ioutil.Discard - warningW := ioutil.Discard - infoW := ioutil.Discard - - logLevel := os.Getenv("GRPC_GO_LOG_SEVERITY_LEVEL") - switch logLevel { - case "", "ERROR", "error": // If env is unset, set level to ERROR. - errorW = os.Stderr - case "WARNING", "warning": - warningW = os.Stderr - case "INFO", "info": - infoW = os.Stderr - } - - var v int - vLevel := os.Getenv("GRPC_GO_LOG_VERBOSITY_LEVEL") - if vl, err := strconv.Atoi(vLevel); err == nil { - v = vl - } - return NewLoggerV2WithVerbosity(infoW, warningW, errorW, v) -} - -func (g *loggerT) Info(args ...interface{}) { - g.m[infoLog].Print(args...) -} - -func (g *loggerT) Infoln(args ...interface{}) { - g.m[infoLog].Println(args...) -} - -func (g *loggerT) Infof(format string, args ...interface{}) { - g.m[infoLog].Printf(format, args...) -} - -func (g *loggerT) Warning(args ...interface{}) { - g.m[warningLog].Print(args...) -} - -func (g *loggerT) Warningln(args ...interface{}) { - g.m[warningLog].Println(args...) -} - -func (g *loggerT) Warningf(format string, args ...interface{}) { - g.m[warningLog].Printf(format, args...) -} - -func (g *loggerT) Error(args ...interface{}) { - g.m[errorLog].Print(args...) -} - -func (g *loggerT) Errorln(args ...interface{}) { - g.m[errorLog].Println(args...) -} - -func (g *loggerT) Errorf(format string, args ...interface{}) { - g.m[errorLog].Printf(format, args...) -} - -func (g *loggerT) Fatal(args ...interface{}) { - g.m[fatalLog].Fatal(args...) - // No need to call os.Exit() again because log.Logger.Fatal() calls os.Exit(). -} - -func (g *loggerT) Fatalln(args ...interface{}) { - g.m[fatalLog].Fatalln(args...) - // No need to call os.Exit() again because log.Logger.Fatal() calls os.Exit(). -} - -func (g *loggerT) Fatalf(format string, args ...interface{}) { - g.m[fatalLog].Fatalf(format, args...) - // No need to call os.Exit() again because log.Logger.Fatal() calls os.Exit(). -} - -func (g *loggerT) V(l int) bool { - return l <= g.v -} diff --git a/vendor/google.golang.org/grpc/install_gae.sh b/vendor/google.golang.org/grpc/install_gae.sh deleted file mode 100644 index 7c7bcada50..0000000000 --- a/vendor/google.golang.org/grpc/install_gae.sh +++ /dev/null @@ -1,6 +0,0 @@ -#!/bin/bash - -TMP=$(mktemp -d /tmp/sdk.XXX) \ -&& curl -o $TMP.zip "https://storage.googleapis.com/appengine-sdks/featured/go_appengine_sdk_linux_amd64-1.9.68.zip" \ -&& unzip -q $TMP.zip -d $TMP \ -&& export PATH="$PATH:$TMP/go_appengine" diff --git a/vendor/google.golang.org/grpc/interceptor.go b/vendor/google.golang.org/grpc/interceptor.go deleted file mode 100644 index 8b7350022a..0000000000 --- a/vendor/google.golang.org/grpc/interceptor.go +++ /dev/null @@ -1,77 +0,0 @@ -/* - * - * Copyright 2016 gRPC authors. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ - -package grpc - -import ( - "context" -) - -// UnaryInvoker is called by UnaryClientInterceptor to complete RPCs. -type UnaryInvoker func(ctx context.Context, method string, req, reply interface{}, cc *ClientConn, opts ...CallOption) error - -// UnaryClientInterceptor intercepts the execution of a unary RPC on the client. invoker is the handler to complete the RPC -// and it is the responsibility of the interceptor to call it. -// This is an EXPERIMENTAL API. -type UnaryClientInterceptor func(ctx context.Context, method string, req, reply interface{}, cc *ClientConn, invoker UnaryInvoker, opts ...CallOption) error - -// Streamer is called by StreamClientInterceptor to create a ClientStream. -type Streamer func(ctx context.Context, desc *StreamDesc, cc *ClientConn, method string, opts ...CallOption) (ClientStream, error) - -// StreamClientInterceptor intercepts the creation of ClientStream. It may return a custom ClientStream to intercept all I/O -// operations. streamer is the handler to create a ClientStream and it is the responsibility of the interceptor to call it. -// This is an EXPERIMENTAL API. -type StreamClientInterceptor func(ctx context.Context, desc *StreamDesc, cc *ClientConn, method string, streamer Streamer, opts ...CallOption) (ClientStream, error) - -// UnaryServerInfo consists of various information about a unary RPC on -// server side. All per-rpc information may be mutated by the interceptor. -type UnaryServerInfo struct { - // Server is the service implementation the user provides. This is read-only. - Server interface{} - // FullMethod is the full RPC method string, i.e., /package.service/method. - FullMethod string -} - -// UnaryHandler defines the handler invoked by UnaryServerInterceptor to complete the normal -// execution of a unary RPC. If a UnaryHandler returns an error, it should be produced by the -// status package, or else gRPC will use codes.Unknown as the status code and err.Error() as -// the status message of the RPC. -type UnaryHandler func(ctx context.Context, req interface{}) (interface{}, error) - -// UnaryServerInterceptor provides a hook to intercept the execution of a unary RPC on the server. info -// contains all the information of this RPC the interceptor can operate on. And handler is the wrapper -// of the service method implementation. It is the responsibility of the interceptor to invoke handler -// to complete the RPC. -type UnaryServerInterceptor func(ctx context.Context, req interface{}, info *UnaryServerInfo, handler UnaryHandler) (resp interface{}, err error) - -// StreamServerInfo consists of various information about a streaming RPC on -// server side. All per-rpc information may be mutated by the interceptor. -type StreamServerInfo struct { - // FullMethod is the full RPC method string, i.e., /package.service/method. - FullMethod string - // IsClientStream indicates whether the RPC is a client streaming RPC. - IsClientStream bool - // IsServerStream indicates whether the RPC is a server streaming RPC. - IsServerStream bool -} - -// StreamServerInterceptor provides a hook to intercept the execution of a streaming RPC on the server. -// info contains all the information of this RPC the interceptor can operate on. And handler is the -// service method implementation. It is the responsibility of the interceptor to invoke handler to -// complete the RPC. -type StreamServerInterceptor func(srv interface{}, ss ServerStream, info *StreamServerInfo, handler StreamHandler) error diff --git a/vendor/google.golang.org/grpc/internal/backoff/backoff.go b/vendor/google.golang.org/grpc/internal/backoff/backoff.go deleted file mode 100644 index 1bd0cce5ab..0000000000 --- a/vendor/google.golang.org/grpc/internal/backoff/backoff.go +++ /dev/null @@ -1,78 +0,0 @@ -/* - * - * Copyright 2017 gRPC authors. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ - -// Package backoff implement the backoff strategy for gRPC. -// -// This is kept in internal until the gRPC project decides whether or not to -// allow alternative backoff strategies. -package backoff - -import ( - "time" - - "google.golang.org/grpc/internal/grpcrand" -) - -// Strategy defines the methodology for backing off after a grpc connection -// failure. -// -type Strategy interface { - // Backoff returns the amount of time to wait before the next retry given - // the number of consecutive failures. - Backoff(retries int) time.Duration -} - -const ( - // baseDelay is the amount of time to wait before retrying after the first - // failure. - baseDelay = 1.0 * time.Second - // factor is applied to the backoff after each retry. - factor = 1.6 - // jitter provides a range to randomize backoff delays. - jitter = 0.2 -) - -// Exponential implements exponential backoff algorithm as defined in -// https://github.com/grpc/grpc/blob/master/doc/connection-backoff.md. -type Exponential struct { - // MaxDelay is the upper bound of backoff delay. - MaxDelay time.Duration -} - -// Backoff returns the amount of time to wait before the next retry given the -// number of retries. -func (bc Exponential) Backoff(retries int) time.Duration { - if retries == 0 { - return baseDelay - } - backoff, max := float64(baseDelay), float64(bc.MaxDelay) - for backoff < max && retries > 0 { - backoff *= factor - retries-- - } - if backoff > max { - backoff = max - } - // Randomize backoff delays so that if a cluster of requests start at - // the same time, they won't operate in lockstep. - backoff *= 1 + jitter*(grpcrand.Float64()*2-1) - if backoff < 0 { - return 0 - } - return time.Duration(backoff) -} diff --git a/vendor/google.golang.org/grpc/internal/binarylog/binarylog.go b/vendor/google.golang.org/grpc/internal/binarylog/binarylog.go deleted file mode 100644 index fee6aecd08..0000000000 --- a/vendor/google.golang.org/grpc/internal/binarylog/binarylog.go +++ /dev/null @@ -1,167 +0,0 @@ -/* - * - * Copyright 2018 gRPC authors. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ - -// Package binarylog implementation binary logging as defined in -// https://github.com/grpc/proposal/blob/master/A16-binary-logging.md. -package binarylog - -import ( - "fmt" - "os" - - "google.golang.org/grpc/grpclog" -) - -// Logger is the global binary logger. It can be used to get binary logger for -// each method. -type Logger interface { - getMethodLogger(methodName string) *MethodLogger -} - -// binLogger is the global binary logger for the binary. One of this should be -// built at init time from the configuration (environment varialbe or flags). -// -// It is used to get a methodLogger for each individual method. -var binLogger Logger - -// SetLogger sets the binarg logger. -// -// Only call this at init time. -func SetLogger(l Logger) { - binLogger = l -} - -// GetMethodLogger returns the methodLogger for the given methodName. -// -// methodName should be in the format of "/service/method". -// -// Each methodLogger returned by this method is a new instance. This is to -// generate sequence id within the call. -func GetMethodLogger(methodName string) *MethodLogger { - if binLogger == nil { - return nil - } - return binLogger.getMethodLogger(methodName) -} - -func init() { - const envStr = "GRPC_BINARY_LOG_FILTER" - configStr := os.Getenv(envStr) - binLogger = NewLoggerFromConfigString(configStr) -} - -type methodLoggerConfig struct { - // Max length of header and message. - hdr, msg uint64 -} - -type logger struct { - all *methodLoggerConfig - services map[string]*methodLoggerConfig - methods map[string]*methodLoggerConfig - - blacklist map[string]struct{} -} - -// newEmptyLogger creates an empty logger. The map fields need to be filled in -// using the set* functions. -func newEmptyLogger() *logger { - return &logger{} -} - -// Set method logger for "*". -func (l *logger) setDefaultMethodLogger(ml *methodLoggerConfig) error { - if l.all != nil { - return fmt.Errorf("conflicting global rules found") - } - l.all = ml - return nil -} - -// Set method logger for "service/*". -// -// New methodLogger with same service overrides the old one. -func (l *logger) setServiceMethodLogger(service string, ml *methodLoggerConfig) error { - if _, ok := l.services[service]; ok { - return fmt.Errorf("conflicting rules for service %v found", service) - } - if l.services == nil { - l.services = make(map[string]*methodLoggerConfig) - } - l.services[service] = ml - return nil -} - -// Set method logger for "service/method". -// -// New methodLogger with same method overrides the old one. -func (l *logger) setMethodMethodLogger(method string, ml *methodLoggerConfig) error { - if _, ok := l.blacklist[method]; ok { - return fmt.Errorf("conflicting rules for method %v found", method) - } - if _, ok := l.methods[method]; ok { - return fmt.Errorf("conflicting rules for method %v found", method) - } - if l.methods == nil { - l.methods = make(map[string]*methodLoggerConfig) - } - l.methods[method] = ml - return nil -} - -// Set blacklist method for "-service/method". -func (l *logger) setBlacklist(method string) error { - if _, ok := l.blacklist[method]; ok { - return fmt.Errorf("conflicting rules for method %v found", method) - } - if _, ok := l.methods[method]; ok { - return fmt.Errorf("conflicting rules for method %v found", method) - } - if l.blacklist == nil { - l.blacklist = make(map[string]struct{}) - } - l.blacklist[method] = struct{}{} - return nil -} - -// getMethodLogger returns the methodLogger for the given methodName. -// -// methodName should be in the format of "/service/method". -// -// Each methodLogger returned by this method is a new instance. This is to -// generate sequence id within the call. -func (l *logger) getMethodLogger(methodName string) *MethodLogger { - s, m, err := parseMethodName(methodName) - if err != nil { - grpclog.Infof("binarylogging: failed to parse %q: %v", methodName, err) - return nil - } - if ml, ok := l.methods[s+"/"+m]; ok { - return newMethodLogger(ml.hdr, ml.msg) - } - if _, ok := l.blacklist[s+"/"+m]; ok { - return nil - } - if ml, ok := l.services[s]; ok { - return newMethodLogger(ml.hdr, ml.msg) - } - if l.all == nil { - return nil - } - return newMethodLogger(l.all.hdr, l.all.msg) -} diff --git a/vendor/google.golang.org/grpc/internal/binarylog/binarylog_testutil.go b/vendor/google.golang.org/grpc/internal/binarylog/binarylog_testutil.go deleted file mode 100644 index 1ee00a39ac..0000000000 --- a/vendor/google.golang.org/grpc/internal/binarylog/binarylog_testutil.go +++ /dev/null @@ -1,42 +0,0 @@ -/* - * - * Copyright 2018 gRPC authors. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ - -// This file contains exported variables/functions that are exported for testing -// only. -// -// An ideal way for this would be to put those in a *_test.go but in binarylog -// package. But this doesn't work with staticcheck with go module. Error was: -// "MdToMetadataProto not declared by package binarylog". This could be caused -// by the way staticcheck looks for files for a certain package, which doesn't -// support *_test.go files. -// -// Move those to binary_test.go when staticcheck is fixed. - -package binarylog - -var ( - // AllLogger is a logger that logs all headers/messages for all RPCs. It's - // for testing only. - AllLogger = NewLoggerFromConfigString("*") - // MdToMetadataProto converts metadata to a binary logging proto message. - // It's for testing only. - MdToMetadataProto = mdToMetadataProto - // AddrToProto converts an address to a binary logging proto message. It's - // for testing only. - AddrToProto = addrToProto -) diff --git a/vendor/google.golang.org/grpc/internal/binarylog/env_config.go b/vendor/google.golang.org/grpc/internal/binarylog/env_config.go deleted file mode 100644 index eb188eae5a..0000000000 --- a/vendor/google.golang.org/grpc/internal/binarylog/env_config.go +++ /dev/null @@ -1,210 +0,0 @@ -/* - * - * Copyright 2018 gRPC authors. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ - -package binarylog - -import ( - "errors" - "fmt" - "regexp" - "strconv" - "strings" - - "google.golang.org/grpc/grpclog" -) - -// NewLoggerFromConfigString reads the string and build a logger. It can be used -// to build a new logger and assign it to binarylog.Logger. -// -// Example filter config strings: -// - "" Nothing will be logged -// - "*" All headers and messages will be fully logged. -// - "*{h}" Only headers will be logged. -// - "*{m:256}" Only the first 256 bytes of each message will be logged. -// - "Foo/*" Logs every method in service Foo -// - "Foo/*,-Foo/Bar" Logs every method in service Foo except method /Foo/Bar -// - "Foo/*,Foo/Bar{m:256}" Logs the first 256 bytes of each message in method -// /Foo/Bar, logs all headers and messages in every other method in service -// Foo. -// -// If two configs exist for one certain method or service, the one specified -// later overrides the privous config. -func NewLoggerFromConfigString(s string) Logger { - if s == "" { - return nil - } - l := newEmptyLogger() - methods := strings.Split(s, ",") - for _, method := range methods { - if err := l.fillMethodLoggerWithConfigString(method); err != nil { - grpclog.Warningf("failed to parse binary log config: %v", err) - return nil - } - } - return l -} - -// fillMethodLoggerWithConfigString parses config, creates methodLogger and adds -// it to the right map in the logger. -func (l *logger) fillMethodLoggerWithConfigString(config string) error { - // "" is invalid. - if config == "" { - return errors.New("empty string is not a valid method binary logging config") - } - - // "-service/method", blacklist, no * or {} allowed. - if config[0] == '-' { - s, m, suffix, err := parseMethodConfigAndSuffix(config[1:]) - if err != nil { - return fmt.Errorf("invalid config: %q, %v", config, err) - } - if m == "*" { - return fmt.Errorf("invalid config: %q, %v", config, "* not allowd in blacklist config") - } - if suffix != "" { - return fmt.Errorf("invalid config: %q, %v", config, "header/message limit not allowed in blacklist config") - } - if err := l.setBlacklist(s + "/" + m); err != nil { - return fmt.Errorf("invalid config: %v", err) - } - return nil - } - - // "*{h:256;m:256}" - if config[0] == '*' { - hdr, msg, err := parseHeaderMessageLengthConfig(config[1:]) - if err != nil { - return fmt.Errorf("invalid config: %q, %v", config, err) - } - if err := l.setDefaultMethodLogger(&methodLoggerConfig{hdr: hdr, msg: msg}); err != nil { - return fmt.Errorf("invalid config: %v", err) - } - return nil - } - - s, m, suffix, err := parseMethodConfigAndSuffix(config) - if err != nil { - return fmt.Errorf("invalid config: %q, %v", config, err) - } - hdr, msg, err := parseHeaderMessageLengthConfig(suffix) - if err != nil { - return fmt.Errorf("invalid header/message length config: %q, %v", suffix, err) - } - if m == "*" { - if err := l.setServiceMethodLogger(s, &methodLoggerConfig{hdr: hdr, msg: msg}); err != nil { - return fmt.Errorf("invalid config: %v", err) - } - } else { - if err := l.setMethodMethodLogger(s+"/"+m, &methodLoggerConfig{hdr: hdr, msg: msg}); err != nil { - return fmt.Errorf("invalid config: %v", err) - } - } - return nil -} - -const ( - // TODO: this const is only used by env_config now. But could be useful for - // other config. Move to binarylog.go if necessary. - maxUInt = ^uint64(0) - - // For "p.s/m" plus any suffix. Suffix will be parsed again. See test for - // expected output. - longMethodConfigRegexpStr = `^([\w./]+)/((?:\w+)|[*])(.+)?$` - - // For suffix from above, "{h:123,m:123}". See test for expected output. - optionalLengthRegexpStr = `(?::(\d+))?` // Optional ":123". - headerConfigRegexpStr = `^{h` + optionalLengthRegexpStr + `}$` - messageConfigRegexpStr = `^{m` + optionalLengthRegexpStr + `}$` - headerMessageConfigRegexpStr = `^{h` + optionalLengthRegexpStr + `;m` + optionalLengthRegexpStr + `}$` -) - -var ( - longMethodConfigRegexp = regexp.MustCompile(longMethodConfigRegexpStr) - headerConfigRegexp = regexp.MustCompile(headerConfigRegexpStr) - messageConfigRegexp = regexp.MustCompile(messageConfigRegexpStr) - headerMessageConfigRegexp = regexp.MustCompile(headerMessageConfigRegexpStr) -) - -// Turn "service/method{h;m}" into "service", "method", "{h;m}". -func parseMethodConfigAndSuffix(c string) (service, method, suffix string, _ error) { - // Regexp result: - // - // in: "p.s/m{h:123,m:123}", - // out: []string{"p.s/m{h:123,m:123}", "p.s", "m", "{h:123,m:123}"}, - match := longMethodConfigRegexp.FindStringSubmatch(c) - if match == nil { - return "", "", "", fmt.Errorf("%q contains invalid substring", c) - } - service = match[1] - method = match[2] - suffix = match[3] - return -} - -// Turn "{h:123;m:345}" into 123, 345. -// -// Return maxUInt if length is unspecified. -func parseHeaderMessageLengthConfig(c string) (hdrLenStr, msgLenStr uint64, err error) { - if c == "" { - return maxUInt, maxUInt, nil - } - // Header config only. - if match := headerConfigRegexp.FindStringSubmatch(c); match != nil { - if s := match[1]; s != "" { - hdrLenStr, err = strconv.ParseUint(s, 10, 64) - if err != nil { - return 0, 0, fmt.Errorf("failed to convert %q to uint", s) - } - return hdrLenStr, 0, nil - } - return maxUInt, 0, nil - } - - // Message config only. - if match := messageConfigRegexp.FindStringSubmatch(c); match != nil { - if s := match[1]; s != "" { - msgLenStr, err = strconv.ParseUint(s, 10, 64) - if err != nil { - return 0, 0, fmt.Errorf("Failed to convert %q to uint", s) - } - return 0, msgLenStr, nil - } - return 0, maxUInt, nil - } - - // Header and message config both. - if match := headerMessageConfigRegexp.FindStringSubmatch(c); match != nil { - // Both hdr and msg are specified, but one or two of them might be empty. - hdrLenStr = maxUInt - msgLenStr = maxUInt - if s := match[1]; s != "" { - hdrLenStr, err = strconv.ParseUint(s, 10, 64) - if err != nil { - return 0, 0, fmt.Errorf("Failed to convert %q to uint", s) - } - } - if s := match[2]; s != "" { - msgLenStr, err = strconv.ParseUint(s, 10, 64) - if err != nil { - return 0, 0, fmt.Errorf("Failed to convert %q to uint", s) - } - } - return hdrLenStr, msgLenStr, nil - } - return 0, 0, fmt.Errorf("%q contains invalid substring", c) -} diff --git a/vendor/google.golang.org/grpc/internal/binarylog/method_logger.go b/vendor/google.golang.org/grpc/internal/binarylog/method_logger.go deleted file mode 100644 index b06cdd4d43..0000000000 --- a/vendor/google.golang.org/grpc/internal/binarylog/method_logger.go +++ /dev/null @@ -1,426 +0,0 @@ -/* - * - * Copyright 2018 gRPC authors. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ - -package binarylog - -import ( - "net" - "strings" - "sync/atomic" - "time" - - "github.com/golang/protobuf/proto" - "github.com/golang/protobuf/ptypes" - pb "google.golang.org/grpc/binarylog/grpc_binarylog_v1" - "google.golang.org/grpc/grpclog" - "google.golang.org/grpc/metadata" - "google.golang.org/grpc/status" -) - -type callIDGenerator struct { - id uint64 -} - -func (g *callIDGenerator) next() uint64 { - id := atomic.AddUint64(&g.id, 1) - return id -} - -// reset is for testing only, and doesn't need to be thread safe. -func (g *callIDGenerator) reset() { - g.id = 0 -} - -var idGen callIDGenerator - -// MethodLogger is the sub-logger for each method. -type MethodLogger struct { - headerMaxLen, messageMaxLen uint64 - - callID uint64 - idWithinCallGen *callIDGenerator - - sink Sink // TODO(blog): make this plugable. -} - -func newMethodLogger(h, m uint64) *MethodLogger { - return &MethodLogger{ - headerMaxLen: h, - messageMaxLen: m, - - callID: idGen.next(), - idWithinCallGen: &callIDGenerator{}, - - sink: defaultSink, // TODO(blog): make it plugable. - } -} - -// Log creates a proto binary log entry, and logs it to the sink. -func (ml *MethodLogger) Log(c LogEntryConfig) { - m := c.toProto() - timestamp, _ := ptypes.TimestampProto(time.Now()) - m.Timestamp = timestamp - m.CallId = ml.callID - m.SequenceIdWithinCall = ml.idWithinCallGen.next() - - switch pay := m.Payload.(type) { - case *pb.GrpcLogEntry_ClientHeader: - m.PayloadTruncated = ml.truncateMetadata(pay.ClientHeader.GetMetadata()) - case *pb.GrpcLogEntry_ServerHeader: - m.PayloadTruncated = ml.truncateMetadata(pay.ServerHeader.GetMetadata()) - case *pb.GrpcLogEntry_Message: - m.PayloadTruncated = ml.truncateMessage(pay.Message) - } - - ml.sink.Write(m) -} - -func (ml *MethodLogger) truncateMetadata(mdPb *pb.Metadata) (truncated bool) { - if ml.headerMaxLen == maxUInt { - return false - } - var ( - bytesLimit = ml.headerMaxLen - index int - ) - // At the end of the loop, index will be the first entry where the total - // size is greater than the limit: - // - // len(entry[:index]) <= ml.hdr && len(entry[:index+1]) > ml.hdr. - for ; index < len(mdPb.Entry); index++ { - entry := mdPb.Entry[index] - if entry.Key == "grpc-trace-bin" { - // "grpc-trace-bin" is a special key. It's kept in the log entry, - // but not counted towards the size limit. - continue - } - currentEntryLen := uint64(len(entry.Value)) - if currentEntryLen > bytesLimit { - break - } - bytesLimit -= currentEntryLen - } - truncated = index < len(mdPb.Entry) - mdPb.Entry = mdPb.Entry[:index] - return truncated -} - -func (ml *MethodLogger) truncateMessage(msgPb *pb.Message) (truncated bool) { - if ml.messageMaxLen == maxUInt { - return false - } - if ml.messageMaxLen >= uint64(len(msgPb.Data)) { - return false - } - msgPb.Data = msgPb.Data[:ml.messageMaxLen] - return true -} - -// LogEntryConfig represents the configuration for binary log entry. -type LogEntryConfig interface { - toProto() *pb.GrpcLogEntry -} - -// ClientHeader configs the binary log entry to be a ClientHeader entry. -type ClientHeader struct { - OnClientSide bool - Header metadata.MD - MethodName string - Authority string - Timeout time.Duration - // PeerAddr is required only when it's on server side. - PeerAddr net.Addr -} - -func (c *ClientHeader) toProto() *pb.GrpcLogEntry { - // This function doesn't need to set all the fields (e.g. seq ID). The Log - // function will set the fields when necessary. - clientHeader := &pb.ClientHeader{ - Metadata: mdToMetadataProto(c.Header), - MethodName: c.MethodName, - Authority: c.Authority, - } - if c.Timeout > 0 { - clientHeader.Timeout = ptypes.DurationProto(c.Timeout) - } - ret := &pb.GrpcLogEntry{ - Type: pb.GrpcLogEntry_EVENT_TYPE_CLIENT_HEADER, - Payload: &pb.GrpcLogEntry_ClientHeader{ - ClientHeader: clientHeader, - }, - } - if c.OnClientSide { - ret.Logger = pb.GrpcLogEntry_LOGGER_CLIENT - } else { - ret.Logger = pb.GrpcLogEntry_LOGGER_SERVER - } - if c.PeerAddr != nil { - ret.Peer = addrToProto(c.PeerAddr) - } - return ret -} - -// ServerHeader configs the binary log entry to be a ServerHeader entry. -type ServerHeader struct { - OnClientSide bool - Header metadata.MD - // PeerAddr is required only when it's on client side. - PeerAddr net.Addr -} - -func (c *ServerHeader) toProto() *pb.GrpcLogEntry { - ret := &pb.GrpcLogEntry{ - Type: pb.GrpcLogEntry_EVENT_TYPE_SERVER_HEADER, - Payload: &pb.GrpcLogEntry_ServerHeader{ - ServerHeader: &pb.ServerHeader{ - Metadata: mdToMetadataProto(c.Header), - }, - }, - } - if c.OnClientSide { - ret.Logger = pb.GrpcLogEntry_LOGGER_CLIENT - } else { - ret.Logger = pb.GrpcLogEntry_LOGGER_SERVER - } - if c.PeerAddr != nil { - ret.Peer = addrToProto(c.PeerAddr) - } - return ret -} - -// ClientMessage configs the binary log entry to be a ClientMessage entry. -type ClientMessage struct { - OnClientSide bool - // Message can be a proto.Message or []byte. Other messages formats are not - // supported. - Message interface{} -} - -func (c *ClientMessage) toProto() *pb.GrpcLogEntry { - var ( - data []byte - err error - ) - if m, ok := c.Message.(proto.Message); ok { - data, err = proto.Marshal(m) - if err != nil { - grpclog.Infof("binarylogging: failed to marshal proto message: %v", err) - } - } else if b, ok := c.Message.([]byte); ok { - data = b - } else { - grpclog.Infof("binarylogging: message to log is neither proto.message nor []byte") - } - ret := &pb.GrpcLogEntry{ - Type: pb.GrpcLogEntry_EVENT_TYPE_CLIENT_MESSAGE, - Payload: &pb.GrpcLogEntry_Message{ - Message: &pb.Message{ - Length: uint32(len(data)), - Data: data, - }, - }, - } - if c.OnClientSide { - ret.Logger = pb.GrpcLogEntry_LOGGER_CLIENT - } else { - ret.Logger = pb.GrpcLogEntry_LOGGER_SERVER - } - return ret -} - -// ServerMessage configs the binary log entry to be a ServerMessage entry. -type ServerMessage struct { - OnClientSide bool - // Message can be a proto.Message or []byte. Other messages formats are not - // supported. - Message interface{} -} - -func (c *ServerMessage) toProto() *pb.GrpcLogEntry { - var ( - data []byte - err error - ) - if m, ok := c.Message.(proto.Message); ok { - data, err = proto.Marshal(m) - if err != nil { - grpclog.Infof("binarylogging: failed to marshal proto message: %v", err) - } - } else if b, ok := c.Message.([]byte); ok { - data = b - } else { - grpclog.Infof("binarylogging: message to log is neither proto.message nor []byte") - } - ret := &pb.GrpcLogEntry{ - Type: pb.GrpcLogEntry_EVENT_TYPE_SERVER_MESSAGE, - Payload: &pb.GrpcLogEntry_Message{ - Message: &pb.Message{ - Length: uint32(len(data)), - Data: data, - }, - }, - } - if c.OnClientSide { - ret.Logger = pb.GrpcLogEntry_LOGGER_CLIENT - } else { - ret.Logger = pb.GrpcLogEntry_LOGGER_SERVER - } - return ret -} - -// ClientHalfClose configs the binary log entry to be a ClientHalfClose entry. -type ClientHalfClose struct { - OnClientSide bool -} - -func (c *ClientHalfClose) toProto() *pb.GrpcLogEntry { - ret := &pb.GrpcLogEntry{ - Type: pb.GrpcLogEntry_EVENT_TYPE_CLIENT_HALF_CLOSE, - Payload: nil, // No payload here. - } - if c.OnClientSide { - ret.Logger = pb.GrpcLogEntry_LOGGER_CLIENT - } else { - ret.Logger = pb.GrpcLogEntry_LOGGER_SERVER - } - return ret -} - -// ServerTrailer configs the binary log entry to be a ServerTrailer entry. -type ServerTrailer struct { - OnClientSide bool - Trailer metadata.MD - // Err is the status error. - Err error - // PeerAddr is required only when it's on client side and the RPC is trailer - // only. - PeerAddr net.Addr -} - -func (c *ServerTrailer) toProto() *pb.GrpcLogEntry { - st, ok := status.FromError(c.Err) - if !ok { - grpclog.Info("binarylogging: error in trailer is not a status error") - } - var ( - detailsBytes []byte - err error - ) - stProto := st.Proto() - if stProto != nil && len(stProto.Details) != 0 { - detailsBytes, err = proto.Marshal(stProto) - if err != nil { - grpclog.Infof("binarylogging: failed to marshal status proto: %v", err) - } - } - ret := &pb.GrpcLogEntry{ - Type: pb.GrpcLogEntry_EVENT_TYPE_SERVER_TRAILER, - Payload: &pb.GrpcLogEntry_Trailer{ - Trailer: &pb.Trailer{ - Metadata: mdToMetadataProto(c.Trailer), - StatusCode: uint32(st.Code()), - StatusMessage: st.Message(), - StatusDetails: detailsBytes, - }, - }, - } - if c.OnClientSide { - ret.Logger = pb.GrpcLogEntry_LOGGER_CLIENT - } else { - ret.Logger = pb.GrpcLogEntry_LOGGER_SERVER - } - if c.PeerAddr != nil { - ret.Peer = addrToProto(c.PeerAddr) - } - return ret -} - -// Cancel configs the binary log entry to be a Cancel entry. -type Cancel struct { - OnClientSide bool -} - -func (c *Cancel) toProto() *pb.GrpcLogEntry { - ret := &pb.GrpcLogEntry{ - Type: pb.GrpcLogEntry_EVENT_TYPE_CANCEL, - Payload: nil, - } - if c.OnClientSide { - ret.Logger = pb.GrpcLogEntry_LOGGER_CLIENT - } else { - ret.Logger = pb.GrpcLogEntry_LOGGER_SERVER - } - return ret -} - -// metadataKeyOmit returns whether the metadata entry with this key should be -// omitted. -func metadataKeyOmit(key string) bool { - switch key { - case "lb-token", ":path", ":authority", "content-encoding", "content-type", "user-agent", "te": - return true - case "grpc-trace-bin": // grpc-trace-bin is special because it's visiable to users. - return false - } - if strings.HasPrefix(key, "grpc-") { - return true - } - return false -} - -func mdToMetadataProto(md metadata.MD) *pb.Metadata { - ret := &pb.Metadata{} - for k, vv := range md { - if metadataKeyOmit(k) { - continue - } - for _, v := range vv { - ret.Entry = append(ret.Entry, - &pb.MetadataEntry{ - Key: k, - Value: []byte(v), - }, - ) - } - } - return ret -} - -func addrToProto(addr net.Addr) *pb.Address { - ret := &pb.Address{} - switch a := addr.(type) { - case *net.TCPAddr: - if a.IP.To4() != nil { - ret.Type = pb.Address_TYPE_IPV4 - } else if a.IP.To16() != nil { - ret.Type = pb.Address_TYPE_IPV6 - } else { - ret.Type = pb.Address_TYPE_UNKNOWN - // Do not set address and port fields. - break - } - ret.Address = a.IP.String() - ret.IpPort = uint32(a.Port) - case *net.UnixAddr: - ret.Type = pb.Address_TYPE_UNIX - ret.Address = a.String() - default: - ret.Type = pb.Address_TYPE_UNKNOWN - } - return ret -} diff --git a/vendor/google.golang.org/grpc/internal/binarylog/regenerate.sh b/vendor/google.golang.org/grpc/internal/binarylog/regenerate.sh deleted file mode 100644 index 113d40cbe1..0000000000 --- a/vendor/google.golang.org/grpc/internal/binarylog/regenerate.sh +++ /dev/null @@ -1,33 +0,0 @@ -#!/bin/bash -# Copyright 2018 gRPC authors. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -set -eux -o pipefail - -TMP=$(mktemp -d) - -function finish { - rm -rf "$TMP" -} -trap finish EXIT - -pushd "$TMP" -mkdir -p grpc/binarylog/grpc_binarylog_v1 -curl https://raw.githubusercontent.com/grpc/grpc-proto/master/grpc/binlog/v1/binarylog.proto > grpc/binarylog/grpc_binarylog_v1/binarylog.proto - -protoc --go_out=plugins=grpc,paths=source_relative:. -I. grpc/binarylog/grpc_binarylog_v1/*.proto -popd -rm -f ./grpc_binarylog_v1/*.pb.go -cp "$TMP"/grpc/binarylog/grpc_binarylog_v1/*.pb.go ../../binarylog/grpc_binarylog_v1/ - diff --git a/vendor/google.golang.org/grpc/internal/binarylog/sink.go b/vendor/google.golang.org/grpc/internal/binarylog/sink.go deleted file mode 100644 index 20d044f0fd..0000000000 --- a/vendor/google.golang.org/grpc/internal/binarylog/sink.go +++ /dev/null @@ -1,162 +0,0 @@ -/* - * - * Copyright 2018 gRPC authors. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ - -package binarylog - -import ( - "bufio" - "encoding/binary" - "fmt" - "io" - "io/ioutil" - "sync" - "time" - - "github.com/golang/protobuf/proto" - pb "google.golang.org/grpc/binarylog/grpc_binarylog_v1" - "google.golang.org/grpc/grpclog" -) - -var ( - defaultSink Sink = &noopSink{} // TODO(blog): change this default (file in /tmp). -) - -// SetDefaultSink sets the sink where binary logs will be written to. -// -// Not thread safe. Only set during initialization. -func SetDefaultSink(s Sink) { - if defaultSink != nil { - defaultSink.Close() - } - defaultSink = s -} - -// Sink writes log entry into the binary log sink. -type Sink interface { - // Write will be called to write the log entry into the sink. - // - // It should be thread-safe so it can be called in parallel. - Write(*pb.GrpcLogEntry) error - // Close will be called when the Sink is replaced by a new Sink. - Close() error -} - -type noopSink struct{} - -func (ns *noopSink) Write(*pb.GrpcLogEntry) error { return nil } -func (ns *noopSink) Close() error { return nil } - -// newWriterSink creates a binary log sink with the given writer. -// -// Write() marshalls the proto message and writes it to the given writer. Each -// message is prefixed with a 4 byte big endian unsigned integer as the length. -// -// No buffer is done, Close() doesn't try to close the writer. -func newWriterSink(w io.Writer) *writerSink { - return &writerSink{out: w} -} - -type writerSink struct { - out io.Writer -} - -func (ws *writerSink) Write(e *pb.GrpcLogEntry) error { - b, err := proto.Marshal(e) - if err != nil { - grpclog.Infof("binary logging: failed to marshal proto message: %v", err) - } - hdr := make([]byte, 4) - binary.BigEndian.PutUint32(hdr, uint32(len(b))) - if _, err := ws.out.Write(hdr); err != nil { - return err - } - if _, err := ws.out.Write(b); err != nil { - return err - } - return nil -} - -func (ws *writerSink) Close() error { return nil } - -type bufWriteCloserSink struct { - mu sync.Mutex - closer io.Closer - out *writerSink // out is built on buf. - buf *bufio.Writer // buf is kept for flush. - - writeStartOnce sync.Once - writeTicker *time.Ticker -} - -func (fs *bufWriteCloserSink) Write(e *pb.GrpcLogEntry) error { - // Start the write loop when Write is called. - fs.writeStartOnce.Do(fs.startFlushGoroutine) - fs.mu.Lock() - if err := fs.out.Write(e); err != nil { - fs.mu.Unlock() - return err - } - fs.mu.Unlock() - return nil -} - -const ( - bufFlushDuration = 60 * time.Second -) - -func (fs *bufWriteCloserSink) startFlushGoroutine() { - fs.writeTicker = time.NewTicker(bufFlushDuration) - go func() { - for range fs.writeTicker.C { - fs.mu.Lock() - fs.buf.Flush() - fs.mu.Unlock() - } - }() -} - -func (fs *bufWriteCloserSink) Close() error { - if fs.writeTicker != nil { - fs.writeTicker.Stop() - } - fs.mu.Lock() - fs.buf.Flush() - fs.closer.Close() - fs.out.Close() - fs.mu.Unlock() - return nil -} - -func newBufWriteCloserSink(o io.WriteCloser) Sink { - bufW := bufio.NewWriter(o) - return &bufWriteCloserSink{ - closer: o, - out: newWriterSink(bufW), - buf: bufW, - } -} - -// NewTempFileSink creates a temp file and returns a Sink that writes to this -// file. -func NewTempFileSink() (Sink, error) { - tempFile, err := ioutil.TempFile("/tmp", "grpcgo_binarylog_*.txt") - if err != nil { - return nil, fmt.Errorf("failed to create temp file: %v", err) - } - return newBufWriteCloserSink(tempFile), nil -} diff --git a/vendor/google.golang.org/grpc/internal/binarylog/util.go b/vendor/google.golang.org/grpc/internal/binarylog/util.go deleted file mode 100644 index 15dc7803d8..0000000000 --- a/vendor/google.golang.org/grpc/internal/binarylog/util.go +++ /dev/null @@ -1,41 +0,0 @@ -/* - * - * Copyright 2018 gRPC authors. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ - -package binarylog - -import ( - "errors" - "strings" -) - -// parseMethodName splits service and method from the input. It expects format -// "/service/method". -// -// TODO: move to internal/grpcutil. -func parseMethodName(methodName string) (service, method string, _ error) { - if !strings.HasPrefix(methodName, "/") { - return "", "", errors.New("invalid method name: should start with /") - } - methodName = methodName[1:] - - pos := strings.LastIndex(methodName, "/") - if pos < 0 { - return "", "", errors.New("invalid method name: suffix /method is missing") - } - return methodName[:pos], methodName[pos+1:], nil -} diff --git a/vendor/google.golang.org/grpc/internal/channelz/funcs.go b/vendor/google.golang.org/grpc/internal/channelz/funcs.go deleted file mode 100644 index 041520d351..0000000000 --- a/vendor/google.golang.org/grpc/internal/channelz/funcs.go +++ /dev/null @@ -1,699 +0,0 @@ -/* - * - * Copyright 2018 gRPC authors. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ - -// Package channelz defines APIs for enabling channelz service, entry -// registration/deletion, and accessing channelz data. It also defines channelz -// metric struct formats. -// -// All APIs in this package are experimental. -package channelz - -import ( - "sort" - "sync" - "sync/atomic" - "time" - - "google.golang.org/grpc/grpclog" -) - -const ( - defaultMaxTraceEntry int32 = 30 -) - -var ( - db dbWrapper - idGen idGenerator - // EntryPerPage defines the number of channelz entries to be shown on a web page. - EntryPerPage = int64(50) - curState int32 - maxTraceEntry = defaultMaxTraceEntry -) - -// TurnOn turns on channelz data collection. -func TurnOn() { - if !IsOn() { - NewChannelzStorage() - atomic.StoreInt32(&curState, 1) - } -} - -// IsOn returns whether channelz data collection is on. -func IsOn() bool { - return atomic.CompareAndSwapInt32(&curState, 1, 1) -} - -// SetMaxTraceEntry sets maximum number of trace entry per entity (i.e. channel/subchannel). -// Setting it to 0 will disable channel tracing. -func SetMaxTraceEntry(i int32) { - atomic.StoreInt32(&maxTraceEntry, i) -} - -// ResetMaxTraceEntryToDefault resets the maximum number of trace entry per entity to default. -func ResetMaxTraceEntryToDefault() { - atomic.StoreInt32(&maxTraceEntry, defaultMaxTraceEntry) -} - -func getMaxTraceEntry() int { - i := atomic.LoadInt32(&maxTraceEntry) - return int(i) -} - -// dbWarpper wraps around a reference to internal channelz data storage, and -// provide synchronized functionality to set and get the reference. -type dbWrapper struct { - mu sync.RWMutex - DB *channelMap -} - -func (d *dbWrapper) set(db *channelMap) { - d.mu.Lock() - d.DB = db - d.mu.Unlock() -} - -func (d *dbWrapper) get() *channelMap { - d.mu.RLock() - defer d.mu.RUnlock() - return d.DB -} - -// NewChannelzStorage initializes channelz data storage and id generator. -// -// Note: This function is exported for testing purpose only. User should not call -// it in most cases. -func NewChannelzStorage() { - db.set(&channelMap{ - topLevelChannels: make(map[int64]struct{}), - channels: make(map[int64]*channel), - listenSockets: make(map[int64]*listenSocket), - normalSockets: make(map[int64]*normalSocket), - servers: make(map[int64]*server), - subChannels: make(map[int64]*subChannel), - }) - idGen.reset() -} - -// GetTopChannels returns a slice of top channel's ChannelMetric, along with a -// boolean indicating whether there's more top channels to be queried for. -// -// The arg id specifies that only top channel with id at or above it will be included -// in the result. The returned slice is up to a length of the arg maxResults or -// EntryPerPage if maxResults is zero, and is sorted in ascending id order. -func GetTopChannels(id int64, maxResults int64) ([]*ChannelMetric, bool) { - return db.get().GetTopChannels(id, maxResults) -} - -// GetServers returns a slice of server's ServerMetric, along with a -// boolean indicating whether there's more servers to be queried for. -// -// The arg id specifies that only server with id at or above it will be included -// in the result. The returned slice is up to a length of the arg maxResults or -// EntryPerPage if maxResults is zero, and is sorted in ascending id order. -func GetServers(id int64, maxResults int64) ([]*ServerMetric, bool) { - return db.get().GetServers(id, maxResults) -} - -// GetServerSockets returns a slice of server's (identified by id) normal socket's -// SocketMetric, along with a boolean indicating whether there's more sockets to -// be queried for. -// -// The arg startID specifies that only sockets with id at or above it will be -// included in the result. The returned slice is up to a length of the arg maxResults -// or EntryPerPage if maxResults is zero, and is sorted in ascending id order. -func GetServerSockets(id int64, startID int64, maxResults int64) ([]*SocketMetric, bool) { - return db.get().GetServerSockets(id, startID, maxResults) -} - -// GetChannel returns the ChannelMetric for the channel (identified by id). -func GetChannel(id int64) *ChannelMetric { - return db.get().GetChannel(id) -} - -// GetSubChannel returns the SubChannelMetric for the subchannel (identified by id). -func GetSubChannel(id int64) *SubChannelMetric { - return db.get().GetSubChannel(id) -} - -// GetSocket returns the SocketInternalMetric for the socket (identified by id). -func GetSocket(id int64) *SocketMetric { - return db.get().GetSocket(id) -} - -// GetServer returns the ServerMetric for the server (identified by id). -func GetServer(id int64) *ServerMetric { - return db.get().GetServer(id) -} - -// RegisterChannel registers the given channel c in channelz database with ref -// as its reference name, and add it to the child list of its parent (identified -// by pid). pid = 0 means no parent. It returns the unique channelz tracking id -// assigned to this channel. -func RegisterChannel(c Channel, pid int64, ref string) int64 { - id := idGen.genID() - cn := &channel{ - refName: ref, - c: c, - subChans: make(map[int64]string), - nestedChans: make(map[int64]string), - id: id, - pid: pid, - trace: &channelTrace{createdTime: time.Now(), events: make([]*TraceEvent, 0, getMaxTraceEntry())}, - } - if pid == 0 { - db.get().addChannel(id, cn, true, pid, ref) - } else { - db.get().addChannel(id, cn, false, pid, ref) - } - return id -} - -// RegisterSubChannel registers the given channel c in channelz database with ref -// as its reference name, and add it to the child list of its parent (identified -// by pid). It returns the unique channelz tracking id assigned to this subchannel. -func RegisterSubChannel(c Channel, pid int64, ref string) int64 { - if pid == 0 { - grpclog.Error("a SubChannel's parent id cannot be 0") - return 0 - } - id := idGen.genID() - sc := &subChannel{ - refName: ref, - c: c, - sockets: make(map[int64]string), - id: id, - pid: pid, - trace: &channelTrace{createdTime: time.Now(), events: make([]*TraceEvent, 0, getMaxTraceEntry())}, - } - db.get().addSubChannel(id, sc, pid, ref) - return id -} - -// RegisterServer registers the given server s in channelz database. It returns -// the unique channelz tracking id assigned to this server. -func RegisterServer(s Server, ref string) int64 { - id := idGen.genID() - svr := &server{ - refName: ref, - s: s, - sockets: make(map[int64]string), - listenSockets: make(map[int64]string), - id: id, - } - db.get().addServer(id, svr) - return id -} - -// RegisterListenSocket registers the given listen socket s in channelz database -// with ref as its reference name, and add it to the child list of its parent -// (identified by pid). It returns the unique channelz tracking id assigned to -// this listen socket. -func RegisterListenSocket(s Socket, pid int64, ref string) int64 { - if pid == 0 { - grpclog.Error("a ListenSocket's parent id cannot be 0") - return 0 - } - id := idGen.genID() - ls := &listenSocket{refName: ref, s: s, id: id, pid: pid} - db.get().addListenSocket(id, ls, pid, ref) - return id -} - -// RegisterNormalSocket registers the given normal socket s in channelz database -// with ref as its reference name, and add it to the child list of its parent -// (identified by pid). It returns the unique channelz tracking id assigned to -// this normal socket. -func RegisterNormalSocket(s Socket, pid int64, ref string) int64 { - if pid == 0 { - grpclog.Error("a NormalSocket's parent id cannot be 0") - return 0 - } - id := idGen.genID() - ns := &normalSocket{refName: ref, s: s, id: id, pid: pid} - db.get().addNormalSocket(id, ns, pid, ref) - return id -} - -// RemoveEntry removes an entry with unique channelz trakcing id to be id from -// channelz database. -func RemoveEntry(id int64) { - db.get().removeEntry(id) -} - -// TraceEventDesc is what the caller of AddTraceEvent should provide to describe the event to be added -// to the channel trace. -// The Parent field is optional. It is used for event that will be recorded in the entity's parent -// trace also. -type TraceEventDesc struct { - Desc string - Severity Severity - Parent *TraceEventDesc -} - -// AddTraceEvent adds trace related to the entity with specified id, using the provided TraceEventDesc. -func AddTraceEvent(id int64, desc *TraceEventDesc) { - if getMaxTraceEntry() == 0 { - return - } - db.get().traceEvent(id, desc) -} - -// channelMap is the storage data structure for channelz. -// Methods of channelMap can be divided in two two categories with respect to locking. -// 1. Methods acquire the global lock. -// 2. Methods that can only be called when global lock is held. -// A second type of method need always to be called inside a first type of method. -type channelMap struct { - mu sync.RWMutex - topLevelChannels map[int64]struct{} - servers map[int64]*server - channels map[int64]*channel - subChannels map[int64]*subChannel - listenSockets map[int64]*listenSocket - normalSockets map[int64]*normalSocket -} - -func (c *channelMap) addServer(id int64, s *server) { - c.mu.Lock() - s.cm = c - c.servers[id] = s - c.mu.Unlock() -} - -func (c *channelMap) addChannel(id int64, cn *channel, isTopChannel bool, pid int64, ref string) { - c.mu.Lock() - cn.cm = c - cn.trace.cm = c - c.channels[id] = cn - if isTopChannel { - c.topLevelChannels[id] = struct{}{} - } else { - c.findEntry(pid).addChild(id, cn) - } - c.mu.Unlock() -} - -func (c *channelMap) addSubChannel(id int64, sc *subChannel, pid int64, ref string) { - c.mu.Lock() - sc.cm = c - sc.trace.cm = c - c.subChannels[id] = sc - c.findEntry(pid).addChild(id, sc) - c.mu.Unlock() -} - -func (c *channelMap) addListenSocket(id int64, ls *listenSocket, pid int64, ref string) { - c.mu.Lock() - ls.cm = c - c.listenSockets[id] = ls - c.findEntry(pid).addChild(id, ls) - c.mu.Unlock() -} - -func (c *channelMap) addNormalSocket(id int64, ns *normalSocket, pid int64, ref string) { - c.mu.Lock() - ns.cm = c - c.normalSockets[id] = ns - c.findEntry(pid).addChild(id, ns) - c.mu.Unlock() -} - -// removeEntry triggers the removal of an entry, which may not indeed delete the entry, if it has to -// wait on the deletion of its children and until no other entity's channel trace references it. -// It may lead to a chain of entry deletion. For example, deleting the last socket of a gracefully -// shutting down server will lead to the server being also deleted. -func (c *channelMap) removeEntry(id int64) { - c.mu.Lock() - c.findEntry(id).triggerDelete() - c.mu.Unlock() -} - -// c.mu must be held by the caller -func (c *channelMap) decrTraceRefCount(id int64) { - e := c.findEntry(id) - if v, ok := e.(tracedChannel); ok { - v.decrTraceRefCount() - e.deleteSelfIfReady() - } -} - -// c.mu must be held by the caller. -func (c *channelMap) findEntry(id int64) entry { - var v entry - var ok bool - if v, ok = c.channels[id]; ok { - return v - } - if v, ok = c.subChannels[id]; ok { - return v - } - if v, ok = c.servers[id]; ok { - return v - } - if v, ok = c.listenSockets[id]; ok { - return v - } - if v, ok = c.normalSockets[id]; ok { - return v - } - return &dummyEntry{idNotFound: id} -} - -// c.mu must be held by the caller -// deleteEntry simply deletes an entry from the channelMap. Before calling this -// method, caller must check this entry is ready to be deleted, i.e removeEntry() -// has been called on it, and no children still exist. -// Conditionals are ordered by the expected frequency of deletion of each entity -// type, in order to optimize performance. -func (c *channelMap) deleteEntry(id int64) { - var ok bool - if _, ok = c.normalSockets[id]; ok { - delete(c.normalSockets, id) - return - } - if _, ok = c.subChannels[id]; ok { - delete(c.subChannels, id) - return - } - if _, ok = c.channels[id]; ok { - delete(c.channels, id) - delete(c.topLevelChannels, id) - return - } - if _, ok = c.listenSockets[id]; ok { - delete(c.listenSockets, id) - return - } - if _, ok = c.servers[id]; ok { - delete(c.servers, id) - return - } -} - -func (c *channelMap) traceEvent(id int64, desc *TraceEventDesc) { - c.mu.Lock() - child := c.findEntry(id) - childTC, ok := child.(tracedChannel) - if !ok { - c.mu.Unlock() - return - } - childTC.getChannelTrace().append(&TraceEvent{Desc: desc.Desc, Severity: desc.Severity, Timestamp: time.Now()}) - if desc.Parent != nil { - parent := c.findEntry(child.getParentID()) - var chanType RefChannelType - switch child.(type) { - case *channel: - chanType = RefChannel - case *subChannel: - chanType = RefSubChannel - } - if parentTC, ok := parent.(tracedChannel); ok { - parentTC.getChannelTrace().append(&TraceEvent{ - Desc: desc.Parent.Desc, - Severity: desc.Parent.Severity, - Timestamp: time.Now(), - RefID: id, - RefName: childTC.getRefName(), - RefType: chanType, - }) - childTC.incrTraceRefCount() - } - } - c.mu.Unlock() -} - -type int64Slice []int64 - -func (s int64Slice) Len() int { return len(s) } -func (s int64Slice) Swap(i, j int) { s[i], s[j] = s[j], s[i] } -func (s int64Slice) Less(i, j int) bool { return s[i] < s[j] } - -func copyMap(m map[int64]string) map[int64]string { - n := make(map[int64]string) - for k, v := range m { - n[k] = v - } - return n -} - -func min(a, b int64) int64 { - if a < b { - return a - } - return b -} - -func (c *channelMap) GetTopChannels(id int64, maxResults int64) ([]*ChannelMetric, bool) { - if maxResults <= 0 { - maxResults = EntryPerPage - } - c.mu.RLock() - l := int64(len(c.topLevelChannels)) - ids := make([]int64, 0, l) - cns := make([]*channel, 0, min(l, maxResults)) - - for k := range c.topLevelChannels { - ids = append(ids, k) - } - sort.Sort(int64Slice(ids)) - idx := sort.Search(len(ids), func(i int) bool { return ids[i] >= id }) - count := int64(0) - var end bool - var t []*ChannelMetric - for i, v := range ids[idx:] { - if count == maxResults { - break - } - if cn, ok := c.channels[v]; ok { - cns = append(cns, cn) - t = append(t, &ChannelMetric{ - NestedChans: copyMap(cn.nestedChans), - SubChans: copyMap(cn.subChans), - }) - count++ - } - if i == len(ids[idx:])-1 { - end = true - break - } - } - c.mu.RUnlock() - if count == 0 { - end = true - } - - for i, cn := range cns { - t[i].ChannelData = cn.c.ChannelzMetric() - t[i].ID = cn.id - t[i].RefName = cn.refName - t[i].Trace = cn.trace.dumpData() - } - return t, end -} - -func (c *channelMap) GetServers(id, maxResults int64) ([]*ServerMetric, bool) { - if maxResults <= 0 { - maxResults = EntryPerPage - } - c.mu.RLock() - l := int64(len(c.servers)) - ids := make([]int64, 0, l) - ss := make([]*server, 0, min(l, maxResults)) - for k := range c.servers { - ids = append(ids, k) - } - sort.Sort(int64Slice(ids)) - idx := sort.Search(len(ids), func(i int) bool { return ids[i] >= id }) - count := int64(0) - var end bool - var s []*ServerMetric - for i, v := range ids[idx:] { - if count == maxResults { - break - } - if svr, ok := c.servers[v]; ok { - ss = append(ss, svr) - s = append(s, &ServerMetric{ - ListenSockets: copyMap(svr.listenSockets), - }) - count++ - } - if i == len(ids[idx:])-1 { - end = true - break - } - } - c.mu.RUnlock() - if count == 0 { - end = true - } - - for i, svr := range ss { - s[i].ServerData = svr.s.ChannelzMetric() - s[i].ID = svr.id - s[i].RefName = svr.refName - } - return s, end -} - -func (c *channelMap) GetServerSockets(id int64, startID int64, maxResults int64) ([]*SocketMetric, bool) { - if maxResults <= 0 { - maxResults = EntryPerPage - } - var svr *server - var ok bool - c.mu.RLock() - if svr, ok = c.servers[id]; !ok { - // server with id doesn't exist. - c.mu.RUnlock() - return nil, true - } - svrskts := svr.sockets - l := int64(len(svrskts)) - ids := make([]int64, 0, l) - sks := make([]*normalSocket, 0, min(l, maxResults)) - for k := range svrskts { - ids = append(ids, k) - } - sort.Sort(int64Slice(ids)) - idx := sort.Search(len(ids), func(i int) bool { return ids[i] >= startID }) - count := int64(0) - var end bool - for i, v := range ids[idx:] { - if count == maxResults { - break - } - if ns, ok := c.normalSockets[v]; ok { - sks = append(sks, ns) - count++ - } - if i == len(ids[idx:])-1 { - end = true - break - } - } - c.mu.RUnlock() - if count == 0 { - end = true - } - var s []*SocketMetric - for _, ns := range sks { - sm := &SocketMetric{} - sm.SocketData = ns.s.ChannelzMetric() - sm.ID = ns.id - sm.RefName = ns.refName - s = append(s, sm) - } - return s, end -} - -func (c *channelMap) GetChannel(id int64) *ChannelMetric { - cm := &ChannelMetric{} - var cn *channel - var ok bool - c.mu.RLock() - if cn, ok = c.channels[id]; !ok { - // channel with id doesn't exist. - c.mu.RUnlock() - return nil - } - cm.NestedChans = copyMap(cn.nestedChans) - cm.SubChans = copyMap(cn.subChans) - // cn.c can be set to &dummyChannel{} when deleteSelfFromMap is called. Save a copy of cn.c when - // holding the lock to prevent potential data race. - chanCopy := cn.c - c.mu.RUnlock() - cm.ChannelData = chanCopy.ChannelzMetric() - cm.ID = cn.id - cm.RefName = cn.refName - cm.Trace = cn.trace.dumpData() - return cm -} - -func (c *channelMap) GetSubChannel(id int64) *SubChannelMetric { - cm := &SubChannelMetric{} - var sc *subChannel - var ok bool - c.mu.RLock() - if sc, ok = c.subChannels[id]; !ok { - // subchannel with id doesn't exist. - c.mu.RUnlock() - return nil - } - cm.Sockets = copyMap(sc.sockets) - // sc.c can be set to &dummyChannel{} when deleteSelfFromMap is called. Save a copy of sc.c when - // holding the lock to prevent potential data race. - chanCopy := sc.c - c.mu.RUnlock() - cm.ChannelData = chanCopy.ChannelzMetric() - cm.ID = sc.id - cm.RefName = sc.refName - cm.Trace = sc.trace.dumpData() - return cm -} - -func (c *channelMap) GetSocket(id int64) *SocketMetric { - sm := &SocketMetric{} - c.mu.RLock() - if ls, ok := c.listenSockets[id]; ok { - c.mu.RUnlock() - sm.SocketData = ls.s.ChannelzMetric() - sm.ID = ls.id - sm.RefName = ls.refName - return sm - } - if ns, ok := c.normalSockets[id]; ok { - c.mu.RUnlock() - sm.SocketData = ns.s.ChannelzMetric() - sm.ID = ns.id - sm.RefName = ns.refName - return sm - } - c.mu.RUnlock() - return nil -} - -func (c *channelMap) GetServer(id int64) *ServerMetric { - sm := &ServerMetric{} - var svr *server - var ok bool - c.mu.RLock() - if svr, ok = c.servers[id]; !ok { - c.mu.RUnlock() - return nil - } - sm.ListenSockets = copyMap(svr.listenSockets) - c.mu.RUnlock() - sm.ID = svr.id - sm.RefName = svr.refName - sm.ServerData = svr.s.ChannelzMetric() - return sm -} - -type idGenerator struct { - id int64 -} - -func (i *idGenerator) reset() { - atomic.StoreInt64(&i.id, 0) -} - -func (i *idGenerator) genID() int64 { - return atomic.AddInt64(&i.id, 1) -} diff --git a/vendor/google.golang.org/grpc/internal/channelz/types.go b/vendor/google.golang.org/grpc/internal/channelz/types.go deleted file mode 100644 index 17c2274cb3..0000000000 --- a/vendor/google.golang.org/grpc/internal/channelz/types.go +++ /dev/null @@ -1,702 +0,0 @@ -/* - * - * Copyright 2018 gRPC authors. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ - -package channelz - -import ( - "net" - "sync" - "sync/atomic" - "time" - - "google.golang.org/grpc/connectivity" - "google.golang.org/grpc/credentials" - "google.golang.org/grpc/grpclog" -) - -// entry represents a node in the channelz database. -type entry interface { - // addChild adds a child e, whose channelz id is id to child list - addChild(id int64, e entry) - // deleteChild deletes a child with channelz id to be id from child list - deleteChild(id int64) - // triggerDelete tries to delete self from channelz database. However, if child - // list is not empty, then deletion from the database is on hold until the last - // child is deleted from database. - triggerDelete() - // deleteSelfIfReady check whether triggerDelete() has been called before, and whether child - // list is now empty. If both conditions are met, then delete self from database. - deleteSelfIfReady() - // getParentID returns parent ID of the entry. 0 value parent ID means no parent. - getParentID() int64 -} - -// dummyEntry is a fake entry to handle entry not found case. -type dummyEntry struct { - idNotFound int64 -} - -func (d *dummyEntry) addChild(id int64, e entry) { - // Note: It is possible for a normal program to reach here under race condition. - // For example, there could be a race between ClientConn.Close() info being propagated - // to addrConn and http2Client. ClientConn.Close() cancel the context and result - // in http2Client to error. The error info is then caught by transport monitor - // and before addrConn.tearDown() is called in side ClientConn.Close(). Therefore, - // the addrConn will create a new transport. And when registering the new transport in - // channelz, its parent addrConn could have already been torn down and deleted - // from channelz tracking, and thus reach the code here. - grpclog.Infof("attempt to add child of type %T with id %d to a parent (id=%d) that doesn't currently exist", e, id, d.idNotFound) -} - -func (d *dummyEntry) deleteChild(id int64) { - // It is possible for a normal program to reach here under race condition. - // Refer to the example described in addChild(). - grpclog.Infof("attempt to delete child with id %d from a parent (id=%d) that doesn't currently exist", id, d.idNotFound) -} - -func (d *dummyEntry) triggerDelete() { - grpclog.Warningf("attempt to delete an entry (id=%d) that doesn't currently exist", d.idNotFound) -} - -func (*dummyEntry) deleteSelfIfReady() { - // code should not reach here. deleteSelfIfReady is always called on an existing entry. -} - -func (*dummyEntry) getParentID() int64 { - return 0 -} - -// ChannelMetric defines the info channelz provides for a specific Channel, which -// includes ChannelInternalMetric and channelz-specific data, such as channelz id, -// child list, etc. -type ChannelMetric struct { - // ID is the channelz id of this channel. - ID int64 - // RefName is the human readable reference string of this channel. - RefName string - // ChannelData contains channel internal metric reported by the channel through - // ChannelzMetric(). - ChannelData *ChannelInternalMetric - // NestedChans tracks the nested channel type children of this channel in the format of - // a map from nested channel channelz id to corresponding reference string. - NestedChans map[int64]string - // SubChans tracks the subchannel type children of this channel in the format of a - // map from subchannel channelz id to corresponding reference string. - SubChans map[int64]string - // Sockets tracks the socket type children of this channel in the format of a map - // from socket channelz id to corresponding reference string. - // Note current grpc implementation doesn't allow channel having sockets directly, - // therefore, this is field is unused. - Sockets map[int64]string - // Trace contains the most recent traced events. - Trace *ChannelTrace -} - -// SubChannelMetric defines the info channelz provides for a specific SubChannel, -// which includes ChannelInternalMetric and channelz-specific data, such as -// channelz id, child list, etc. -type SubChannelMetric struct { - // ID is the channelz id of this subchannel. - ID int64 - // RefName is the human readable reference string of this subchannel. - RefName string - // ChannelData contains subchannel internal metric reported by the subchannel - // through ChannelzMetric(). - ChannelData *ChannelInternalMetric - // NestedChans tracks the nested channel type children of this subchannel in the format of - // a map from nested channel channelz id to corresponding reference string. - // Note current grpc implementation doesn't allow subchannel to have nested channels - // as children, therefore, this field is unused. - NestedChans map[int64]string - // SubChans tracks the subchannel type children of this subchannel in the format of a - // map from subchannel channelz id to corresponding reference string. - // Note current grpc implementation doesn't allow subchannel to have subchannels - // as children, therefore, this field is unused. - SubChans map[int64]string - // Sockets tracks the socket type children of this subchannel in the format of a map - // from socket channelz id to corresponding reference string. - Sockets map[int64]string - // Trace contains the most recent traced events. - Trace *ChannelTrace -} - -// ChannelInternalMetric defines the struct that the implementor of Channel interface -// should return from ChannelzMetric(). -type ChannelInternalMetric struct { - // current connectivity state of the channel. - State connectivity.State - // The target this channel originally tried to connect to. May be absent - Target string - // The number of calls started on the channel. - CallsStarted int64 - // The number of calls that have completed with an OK status. - CallsSucceeded int64 - // The number of calls that have a completed with a non-OK status. - CallsFailed int64 - // The last time a call was started on the channel. - LastCallStartedTimestamp time.Time -} - -// ChannelTrace stores traced events on a channel/subchannel and related info. -type ChannelTrace struct { - // EventNum is the number of events that ever got traced (i.e. including those that have been deleted) - EventNum int64 - // CreationTime is the creation time of the trace. - CreationTime time.Time - // Events stores the most recent trace events (up to $maxTraceEntry, newer event will overwrite the - // oldest one) - Events []*TraceEvent -} - -// TraceEvent represent a single trace event -type TraceEvent struct { - // Desc is a simple description of the trace event. - Desc string - // Severity states the severity of this trace event. - Severity Severity - // Timestamp is the event time. - Timestamp time.Time - // RefID is the id of the entity that gets referenced in the event. RefID is 0 if no other entity is - // involved in this event. - // e.g. SubChannel (id: 4[]) Created. --> RefID = 4, RefName = "" (inside []) - RefID int64 - // RefName is the reference name for the entity that gets referenced in the event. - RefName string - // RefType indicates the referenced entity type, i.e Channel or SubChannel. - RefType RefChannelType -} - -// Channel is the interface that should be satisfied in order to be tracked by -// channelz as Channel or SubChannel. -type Channel interface { - ChannelzMetric() *ChannelInternalMetric -} - -type dummyChannel struct{} - -func (d *dummyChannel) ChannelzMetric() *ChannelInternalMetric { - return &ChannelInternalMetric{} -} - -type channel struct { - refName string - c Channel - closeCalled bool - nestedChans map[int64]string - subChans map[int64]string - id int64 - pid int64 - cm *channelMap - trace *channelTrace - // traceRefCount is the number of trace events that reference this channel. - // Non-zero traceRefCount means the trace of this channel cannot be deleted. - traceRefCount int32 -} - -func (c *channel) addChild(id int64, e entry) { - switch v := e.(type) { - case *subChannel: - c.subChans[id] = v.refName - case *channel: - c.nestedChans[id] = v.refName - default: - grpclog.Errorf("cannot add a child (id = %d) of type %T to a channel", id, e) - } -} - -func (c *channel) deleteChild(id int64) { - delete(c.subChans, id) - delete(c.nestedChans, id) - c.deleteSelfIfReady() -} - -func (c *channel) triggerDelete() { - c.closeCalled = true - c.deleteSelfIfReady() -} - -func (c *channel) getParentID() int64 { - return c.pid -} - -// deleteSelfFromTree tries to delete the channel from the channelz entry relation tree, which means -// deleting the channel reference from its parent's child list. -// -// In order for a channel to be deleted from the tree, it must meet the criteria that, removal of the -// corresponding grpc object has been invoked, and the channel does not have any children left. -// -// The returned boolean value indicates whether the channel has been successfully deleted from tree. -func (c *channel) deleteSelfFromTree() (deleted bool) { - if !c.closeCalled || len(c.subChans)+len(c.nestedChans) != 0 { - return false - } - // not top channel - if c.pid != 0 { - c.cm.findEntry(c.pid).deleteChild(c.id) - } - return true -} - -// deleteSelfFromMap checks whether it is valid to delete the channel from the map, which means -// deleting the channel from channelz's tracking entirely. Users can no longer use id to query the -// channel, and its memory will be garbage collected. -// -// The trace reference count of the channel must be 0 in order to be deleted from the map. This is -// specified in the channel tracing gRFC that as long as some other trace has reference to an entity, -// the trace of the referenced entity must not be deleted. In order to release the resource allocated -// by grpc, the reference to the grpc object is reset to a dummy object. -// -// deleteSelfFromMap must be called after deleteSelfFromTree returns true. -// -// It returns a bool to indicate whether the channel can be safely deleted from map. -func (c *channel) deleteSelfFromMap() (delete bool) { - if c.getTraceRefCount() != 0 { - c.c = &dummyChannel{} - return false - } - return true -} - -// deleteSelfIfReady tries to delete the channel itself from the channelz database. -// The delete process includes two steps: -// 1. delete the channel from the entry relation tree, i.e. delete the channel reference from its -// parent's child list. -// 2. delete the channel from the map, i.e. delete the channel entirely from channelz. Lookup by id -// will return entry not found error. -func (c *channel) deleteSelfIfReady() { - if !c.deleteSelfFromTree() { - return - } - if !c.deleteSelfFromMap() { - return - } - c.cm.deleteEntry(c.id) - c.trace.clear() -} - -func (c *channel) getChannelTrace() *channelTrace { - return c.trace -} - -func (c *channel) incrTraceRefCount() { - atomic.AddInt32(&c.traceRefCount, 1) -} - -func (c *channel) decrTraceRefCount() { - atomic.AddInt32(&c.traceRefCount, -1) -} - -func (c *channel) getTraceRefCount() int { - i := atomic.LoadInt32(&c.traceRefCount) - return int(i) -} - -func (c *channel) getRefName() string { - return c.refName -} - -type subChannel struct { - refName string - c Channel - closeCalled bool - sockets map[int64]string - id int64 - pid int64 - cm *channelMap - trace *channelTrace - traceRefCount int32 -} - -func (sc *subChannel) addChild(id int64, e entry) { - if v, ok := e.(*normalSocket); ok { - sc.sockets[id] = v.refName - } else { - grpclog.Errorf("cannot add a child (id = %d) of type %T to a subChannel", id, e) - } -} - -func (sc *subChannel) deleteChild(id int64) { - delete(sc.sockets, id) - sc.deleteSelfIfReady() -} - -func (sc *subChannel) triggerDelete() { - sc.closeCalled = true - sc.deleteSelfIfReady() -} - -func (sc *subChannel) getParentID() int64 { - return sc.pid -} - -// deleteSelfFromTree tries to delete the subchannel from the channelz entry relation tree, which -// means deleting the subchannel reference from its parent's child list. -// -// In order for a subchannel to be deleted from the tree, it must meet the criteria that, removal of -// the corresponding grpc object has been invoked, and the subchannel does not have any children left. -// -// The returned boolean value indicates whether the channel has been successfully deleted from tree. -func (sc *subChannel) deleteSelfFromTree() (deleted bool) { - if !sc.closeCalled || len(sc.sockets) != 0 { - return false - } - sc.cm.findEntry(sc.pid).deleteChild(sc.id) - return true -} - -// deleteSelfFromMap checks whether it is valid to delete the subchannel from the map, which means -// deleting the subchannel from channelz's tracking entirely. Users can no longer use id to query -// the subchannel, and its memory will be garbage collected. -// -// The trace reference count of the subchannel must be 0 in order to be deleted from the map. This is -// specified in the channel tracing gRFC that as long as some other trace has reference to an entity, -// the trace of the referenced entity must not be deleted. In order to release the resource allocated -// by grpc, the reference to the grpc object is reset to a dummy object. -// -// deleteSelfFromMap must be called after deleteSelfFromTree returns true. -// -// It returns a bool to indicate whether the channel can be safely deleted from map. -func (sc *subChannel) deleteSelfFromMap() (delete bool) { - if sc.getTraceRefCount() != 0 { - // free the grpc struct (i.e. addrConn) - sc.c = &dummyChannel{} - return false - } - return true -} - -// deleteSelfIfReady tries to delete the subchannel itself from the channelz database. -// The delete process includes two steps: -// 1. delete the subchannel from the entry relation tree, i.e. delete the subchannel reference from -// its parent's child list. -// 2. delete the subchannel from the map, i.e. delete the subchannel entirely from channelz. Lookup -// by id will return entry not found error. -func (sc *subChannel) deleteSelfIfReady() { - if !sc.deleteSelfFromTree() { - return - } - if !sc.deleteSelfFromMap() { - return - } - sc.cm.deleteEntry(sc.id) - sc.trace.clear() -} - -func (sc *subChannel) getChannelTrace() *channelTrace { - return sc.trace -} - -func (sc *subChannel) incrTraceRefCount() { - atomic.AddInt32(&sc.traceRefCount, 1) -} - -func (sc *subChannel) decrTraceRefCount() { - atomic.AddInt32(&sc.traceRefCount, -1) -} - -func (sc *subChannel) getTraceRefCount() int { - i := atomic.LoadInt32(&sc.traceRefCount) - return int(i) -} - -func (sc *subChannel) getRefName() string { - return sc.refName -} - -// SocketMetric defines the info channelz provides for a specific Socket, which -// includes SocketInternalMetric and channelz-specific data, such as channelz id, etc. -type SocketMetric struct { - // ID is the channelz id of this socket. - ID int64 - // RefName is the human readable reference string of this socket. - RefName string - // SocketData contains socket internal metric reported by the socket through - // ChannelzMetric(). - SocketData *SocketInternalMetric -} - -// SocketInternalMetric defines the struct that the implementor of Socket interface -// should return from ChannelzMetric(). -type SocketInternalMetric struct { - // The number of streams that have been started. - StreamsStarted int64 - // The number of streams that have ended successfully: - // On client side, receiving frame with eos bit set. - // On server side, sending frame with eos bit set. - StreamsSucceeded int64 - // The number of streams that have ended unsuccessfully: - // On client side, termination without receiving frame with eos bit set. - // On server side, termination without sending frame with eos bit set. - StreamsFailed int64 - // The number of messages successfully sent on this socket. - MessagesSent int64 - MessagesReceived int64 - // The number of keep alives sent. This is typically implemented with HTTP/2 - // ping messages. - KeepAlivesSent int64 - // The last time a stream was created by this endpoint. Usually unset for - // servers. - LastLocalStreamCreatedTimestamp time.Time - // The last time a stream was created by the remote endpoint. Usually unset - // for clients. - LastRemoteStreamCreatedTimestamp time.Time - // The last time a message was sent by this endpoint. - LastMessageSentTimestamp time.Time - // The last time a message was received by this endpoint. - LastMessageReceivedTimestamp time.Time - // The amount of window, granted to the local endpoint by the remote endpoint. - // This may be slightly out of date due to network latency. This does NOT - // include stream level or TCP level flow control info. - LocalFlowControlWindow int64 - // The amount of window, granted to the remote endpoint by the local endpoint. - // This may be slightly out of date due to network latency. This does NOT - // include stream level or TCP level flow control info. - RemoteFlowControlWindow int64 - // The locally bound address. - LocalAddr net.Addr - // The remote bound address. May be absent. - RemoteAddr net.Addr - // Optional, represents the name of the remote endpoint, if different than - // the original target name. - RemoteName string - SocketOptions *SocketOptionData - Security credentials.ChannelzSecurityValue -} - -// Socket is the interface that should be satisfied in order to be tracked by -// channelz as Socket. -type Socket interface { - ChannelzMetric() *SocketInternalMetric -} - -type listenSocket struct { - refName string - s Socket - id int64 - pid int64 - cm *channelMap -} - -func (ls *listenSocket) addChild(id int64, e entry) { - grpclog.Errorf("cannot add a child (id = %d) of type %T to a listen socket", id, e) -} - -func (ls *listenSocket) deleteChild(id int64) { - grpclog.Errorf("cannot delete a child (id = %d) from a listen socket", id) -} - -func (ls *listenSocket) triggerDelete() { - ls.cm.deleteEntry(ls.id) - ls.cm.findEntry(ls.pid).deleteChild(ls.id) -} - -func (ls *listenSocket) deleteSelfIfReady() { - grpclog.Errorf("cannot call deleteSelfIfReady on a listen socket") -} - -func (ls *listenSocket) getParentID() int64 { - return ls.pid -} - -type normalSocket struct { - refName string - s Socket - id int64 - pid int64 - cm *channelMap -} - -func (ns *normalSocket) addChild(id int64, e entry) { - grpclog.Errorf("cannot add a child (id = %d) of type %T to a normal socket", id, e) -} - -func (ns *normalSocket) deleteChild(id int64) { - grpclog.Errorf("cannot delete a child (id = %d) from a normal socket", id) -} - -func (ns *normalSocket) triggerDelete() { - ns.cm.deleteEntry(ns.id) - ns.cm.findEntry(ns.pid).deleteChild(ns.id) -} - -func (ns *normalSocket) deleteSelfIfReady() { - grpclog.Errorf("cannot call deleteSelfIfReady on a normal socket") -} - -func (ns *normalSocket) getParentID() int64 { - return ns.pid -} - -// ServerMetric defines the info channelz provides for a specific Server, which -// includes ServerInternalMetric and channelz-specific data, such as channelz id, -// child list, etc. -type ServerMetric struct { - // ID is the channelz id of this server. - ID int64 - // RefName is the human readable reference string of this server. - RefName string - // ServerData contains server internal metric reported by the server through - // ChannelzMetric(). - ServerData *ServerInternalMetric - // ListenSockets tracks the listener socket type children of this server in the - // format of a map from socket channelz id to corresponding reference string. - ListenSockets map[int64]string -} - -// ServerInternalMetric defines the struct that the implementor of Server interface -// should return from ChannelzMetric(). -type ServerInternalMetric struct { - // The number of incoming calls started on the server. - CallsStarted int64 - // The number of incoming calls that have completed with an OK status. - CallsSucceeded int64 - // The number of incoming calls that have a completed with a non-OK status. - CallsFailed int64 - // The last time a call was started on the server. - LastCallStartedTimestamp time.Time -} - -// Server is the interface to be satisfied in order to be tracked by channelz as -// Server. -type Server interface { - ChannelzMetric() *ServerInternalMetric -} - -type server struct { - refName string - s Server - closeCalled bool - sockets map[int64]string - listenSockets map[int64]string - id int64 - cm *channelMap -} - -func (s *server) addChild(id int64, e entry) { - switch v := e.(type) { - case *normalSocket: - s.sockets[id] = v.refName - case *listenSocket: - s.listenSockets[id] = v.refName - default: - grpclog.Errorf("cannot add a child (id = %d) of type %T to a server", id, e) - } -} - -func (s *server) deleteChild(id int64) { - delete(s.sockets, id) - delete(s.listenSockets, id) - s.deleteSelfIfReady() -} - -func (s *server) triggerDelete() { - s.closeCalled = true - s.deleteSelfIfReady() -} - -func (s *server) deleteSelfIfReady() { - if !s.closeCalled || len(s.sockets)+len(s.listenSockets) != 0 { - return - } - s.cm.deleteEntry(s.id) -} - -func (s *server) getParentID() int64 { - return 0 -} - -type tracedChannel interface { - getChannelTrace() *channelTrace - incrTraceRefCount() - decrTraceRefCount() - getRefName() string -} - -type channelTrace struct { - cm *channelMap - createdTime time.Time - eventCount int64 - mu sync.Mutex - events []*TraceEvent -} - -func (c *channelTrace) append(e *TraceEvent) { - c.mu.Lock() - if len(c.events) == getMaxTraceEntry() { - del := c.events[0] - c.events = c.events[1:] - if del.RefID != 0 { - // start recursive cleanup in a goroutine to not block the call originated from grpc. - go func() { - // need to acquire c.cm.mu lock to call the unlocked attemptCleanup func. - c.cm.mu.Lock() - c.cm.decrTraceRefCount(del.RefID) - c.cm.mu.Unlock() - }() - } - } - e.Timestamp = time.Now() - c.events = append(c.events, e) - c.eventCount++ - c.mu.Unlock() -} - -func (c *channelTrace) clear() { - c.mu.Lock() - for _, e := range c.events { - if e.RefID != 0 { - // caller should have already held the c.cm.mu lock. - c.cm.decrTraceRefCount(e.RefID) - } - } - c.mu.Unlock() -} - -// Severity is the severity level of a trace event. -// The canonical enumeration of all valid values is here: -// https://github.com/grpc/grpc-proto/blob/9b13d199cc0d4703c7ea26c9c330ba695866eb23/grpc/channelz/v1/channelz.proto#L126. -type Severity int - -const ( - // CtUNKNOWN indicates unknown severity of a trace event. - CtUNKNOWN Severity = iota - // CtINFO indicates info level severity of a trace event. - CtINFO - // CtWarning indicates warning level severity of a trace event. - CtWarning - // CtError indicates error level severity of a trace event. - CtError -) - -// RefChannelType is the type of the entity being referenced in a trace event. -type RefChannelType int - -const ( - // RefChannel indicates the referenced entity is a Channel. - RefChannel RefChannelType = iota - // RefSubChannel indicates the referenced entity is a SubChannel. - RefSubChannel -) - -func (c *channelTrace) dumpData() *ChannelTrace { - c.mu.Lock() - ct := &ChannelTrace{EventNum: c.eventCount, CreationTime: c.createdTime} - ct.Events = c.events[:len(c.events)] - c.mu.Unlock() - return ct -} diff --git a/vendor/google.golang.org/grpc/internal/channelz/types_linux.go b/vendor/google.golang.org/grpc/internal/channelz/types_linux.go deleted file mode 100644 index 692dd61817..0000000000 --- a/vendor/google.golang.org/grpc/internal/channelz/types_linux.go +++ /dev/null @@ -1,53 +0,0 @@ -// +build !appengine - -/* - * - * Copyright 2018 gRPC authors. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ - -package channelz - -import ( - "syscall" - - "golang.org/x/sys/unix" -) - -// SocketOptionData defines the struct to hold socket option data, and related -// getter function to obtain info from fd. -type SocketOptionData struct { - Linger *unix.Linger - RecvTimeout *unix.Timeval - SendTimeout *unix.Timeval - TCPInfo *unix.TCPInfo -} - -// Getsockopt defines the function to get socket options requested by channelz. -// It is to be passed to syscall.RawConn.Control(). -func (s *SocketOptionData) Getsockopt(fd uintptr) { - if v, err := unix.GetsockoptLinger(int(fd), syscall.SOL_SOCKET, syscall.SO_LINGER); err == nil { - s.Linger = v - } - if v, err := unix.GetsockoptTimeval(int(fd), syscall.SOL_SOCKET, syscall.SO_RCVTIMEO); err == nil { - s.RecvTimeout = v - } - if v, err := unix.GetsockoptTimeval(int(fd), syscall.SOL_SOCKET, syscall.SO_SNDTIMEO); err == nil { - s.SendTimeout = v - } - if v, err := unix.GetsockoptTCPInfo(int(fd), syscall.SOL_TCP, syscall.TCP_INFO); err == nil { - s.TCPInfo = v - } -} diff --git a/vendor/google.golang.org/grpc/internal/channelz/types_nonlinux.go b/vendor/google.golang.org/grpc/internal/channelz/types_nonlinux.go deleted file mode 100644 index 79edbefc43..0000000000 --- a/vendor/google.golang.org/grpc/internal/channelz/types_nonlinux.go +++ /dev/null @@ -1,44 +0,0 @@ -// +build !linux appengine - -/* - * - * Copyright 2018 gRPC authors. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ - -package channelz - -import ( - "sync" - - "google.golang.org/grpc/grpclog" -) - -var once sync.Once - -// SocketOptionData defines the struct to hold socket option data, and related -// getter function to obtain info from fd. -// Windows OS doesn't support Socket Option -type SocketOptionData struct { -} - -// Getsockopt defines the function to get socket options requested by channelz. -// It is to be passed to syscall.RawConn.Control(). -// Windows OS doesn't support Socket Option -func (s *SocketOptionData) Getsockopt(fd uintptr) { - once.Do(func() { - grpclog.Warningln("Channelz: socket options are not supported on non-linux os and appengine.") - }) -} diff --git a/vendor/google.golang.org/grpc/internal/channelz/util_linux.go b/vendor/google.golang.org/grpc/internal/channelz/util_linux.go deleted file mode 100644 index fdf409d55d..0000000000 --- a/vendor/google.golang.org/grpc/internal/channelz/util_linux.go +++ /dev/null @@ -1,39 +0,0 @@ -// +build linux,!appengine - -/* - * - * Copyright 2018 gRPC authors. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ - -package channelz - -import ( - "syscall" -) - -// GetSocketOption gets the socket option info of the conn. -func GetSocketOption(socket interface{}) *SocketOptionData { - c, ok := socket.(syscall.Conn) - if !ok { - return nil - } - data := &SocketOptionData{} - if rawConn, err := c.SyscallConn(); err == nil { - rawConn.Control(data.Getsockopt) - return data - } - return nil -} diff --git a/vendor/google.golang.org/grpc/internal/channelz/util_nonlinux.go b/vendor/google.golang.org/grpc/internal/channelz/util_nonlinux.go deleted file mode 100644 index 8864a08111..0000000000 --- a/vendor/google.golang.org/grpc/internal/channelz/util_nonlinux.go +++ /dev/null @@ -1,26 +0,0 @@ -// +build !linux appengine - -/* - * - * Copyright 2018 gRPC authors. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ - -package channelz - -// GetSocketOption gets the socket option info of the conn. -func GetSocketOption(c interface{}) *SocketOptionData { - return nil -} diff --git a/vendor/google.golang.org/grpc/internal/envconfig/envconfig.go b/vendor/google.golang.org/grpc/internal/envconfig/envconfig.go deleted file mode 100644 index d2193b3a21..0000000000 --- a/vendor/google.golang.org/grpc/internal/envconfig/envconfig.go +++ /dev/null @@ -1,70 +0,0 @@ -/* - * - * Copyright 2018 gRPC authors. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ - -// Package envconfig contains grpc settings configured by environment variables. -package envconfig - -import ( - "os" - "strings" -) - -const ( - prefix = "GRPC_GO_" - retryStr = prefix + "RETRY" - requireHandshakeStr = prefix + "REQUIRE_HANDSHAKE" -) - -// RequireHandshakeSetting describes the settings for handshaking. -type RequireHandshakeSetting int - -const ( - // RequireHandshakeHybrid (default, deprecated) indicates to not wait for - // handshake before considering a connection ready, but wait before - // considering successful. - RequireHandshakeHybrid RequireHandshakeSetting = iota - // RequireHandshakeOn (default after the 1.17 release) indicates to wait - // for handshake before considering a connection ready/successful. - RequireHandshakeOn - // RequireHandshakeOff indicates to not wait for handshake before - // considering a connection ready/successful. - RequireHandshakeOff -) - -var ( - // Retry is set if retry is explicitly enabled via "GRPC_GO_RETRY=on". - Retry = strings.EqualFold(os.Getenv(retryStr), "on") - // RequireHandshake is set based upon the GRPC_GO_REQUIRE_HANDSHAKE - // environment variable. - // - // Will be removed after the 1.18 release. - RequireHandshake RequireHandshakeSetting -) - -func init() { - switch strings.ToLower(os.Getenv(requireHandshakeStr)) { - case "on": - default: - RequireHandshake = RequireHandshakeOn - case "off": - RequireHandshake = RequireHandshakeOff - case "hybrid": - // Will be removed after the 1.17 release. - RequireHandshake = RequireHandshakeHybrid - } -} diff --git a/vendor/google.golang.org/grpc/internal/grpcrand/grpcrand.go b/vendor/google.golang.org/grpc/internal/grpcrand/grpcrand.go deleted file mode 100644 index 200b115ca2..0000000000 --- a/vendor/google.golang.org/grpc/internal/grpcrand/grpcrand.go +++ /dev/null @@ -1,56 +0,0 @@ -/* - * - * Copyright 2018 gRPC authors. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ - -// Package grpcrand implements math/rand functions in a concurrent-safe way -// with a global random source, independent of math/rand's global source. -package grpcrand - -import ( - "math/rand" - "sync" - "time" -) - -var ( - r = rand.New(rand.NewSource(time.Now().UnixNano())) - mu sync.Mutex -) - -// Int63n implements rand.Int63n on the grpcrand global source. -func Int63n(n int64) int64 { - mu.Lock() - res := r.Int63n(n) - mu.Unlock() - return res -} - -// Intn implements rand.Intn on the grpcrand global source. -func Intn(n int) int { - mu.Lock() - res := r.Intn(n) - mu.Unlock() - return res -} - -// Float64 implements rand.Float64 on the grpcrand global source. -func Float64() float64 { - mu.Lock() - res := r.Float64() - mu.Unlock() - return res -} diff --git a/vendor/google.golang.org/grpc/internal/grpcsync/event.go b/vendor/google.golang.org/grpc/internal/grpcsync/event.go deleted file mode 100644 index fbe697c376..0000000000 --- a/vendor/google.golang.org/grpc/internal/grpcsync/event.go +++ /dev/null @@ -1,61 +0,0 @@ -/* - * - * Copyright 2018 gRPC authors. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ - -// Package grpcsync implements additional synchronization primitives built upon -// the sync package. -package grpcsync - -import ( - "sync" - "sync/atomic" -) - -// Event represents a one-time event that may occur in the future. -type Event struct { - fired int32 - c chan struct{} - o sync.Once -} - -// Fire causes e to complete. It is safe to call multiple times, and -// concurrently. It returns true iff this call to Fire caused the signaling -// channel returned by Done to close. -func (e *Event) Fire() bool { - ret := false - e.o.Do(func() { - atomic.StoreInt32(&e.fired, 1) - close(e.c) - ret = true - }) - return ret -} - -// Done returns a channel that will be closed when Fire is called. -func (e *Event) Done() <-chan struct{} { - return e.c -} - -// HasFired returns true if Fire has been called. -func (e *Event) HasFired() bool { - return atomic.LoadInt32(&e.fired) == 1 -} - -// NewEvent returns a new, ready-to-use Event. -func NewEvent() *Event { - return &Event{c: make(chan struct{})} -} diff --git a/vendor/google.golang.org/grpc/internal/internal.go b/vendor/google.golang.org/grpc/internal/internal.go deleted file mode 100644 index eaa54d4fc6..0000000000 --- a/vendor/google.golang.org/grpc/internal/internal.go +++ /dev/null @@ -1,50 +0,0 @@ -/* - * Copyright 2016 gRPC authors. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ - -// Package internal contains gRPC-internal code, to avoid polluting -// the godoc of the top-level grpc package. It must not import any grpc -// symbols to avoid circular dependencies. -package internal - -import "context" - -var ( - // WithContextDialer is exported by dialoptions.go - WithContextDialer interface{} // func(context.Context, string) (net.Conn, error) grpc.DialOption - // WithResolverBuilder is exported by dialoptions.go - WithResolverBuilder interface{} // func (resolver.Builder) grpc.DialOption - // WithHealthCheckFunc is not exported by dialoptions.go - WithHealthCheckFunc interface{} // func (HealthChecker) DialOption - // HealthCheckFunc is used to provide client-side LB channel health checking - HealthCheckFunc HealthChecker - // BalancerUnregister is exported by package balancer to unregister a balancer. - BalancerUnregister func(name string) -) - -// HealthChecker defines the signature of the client-side LB channel health checking function. -type HealthChecker func(ctx context.Context, newStream func() (interface{}, error), reportHealth func(bool), serviceName string) error - -const ( - // CredsBundleModeFallback switches GoogleDefaultCreds to fallback mode. - CredsBundleModeFallback = "fallback" - // CredsBundleModeBalancer switches GoogleDefaultCreds to grpclb balancer - // mode. - CredsBundleModeBalancer = "balancer" - // CredsBundleModeBackendFromBalancer switches GoogleDefaultCreds to mode - // that supports backend returned by grpclb balancer. - CredsBundleModeBackendFromBalancer = "backend-from-balancer" -) diff --git a/vendor/google.golang.org/grpc/internal/syscall/syscall_linux.go b/vendor/google.golang.org/grpc/internal/syscall/syscall_linux.go deleted file mode 100644 index 43281a3e07..0000000000 --- a/vendor/google.golang.org/grpc/internal/syscall/syscall_linux.go +++ /dev/null @@ -1,114 +0,0 @@ -// +build !appengine - -/* - * - * Copyright 2018 gRPC authors. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ - -// Package syscall provides functionalities that grpc uses to get low-level operating system -// stats/info. -package syscall - -import ( - "fmt" - "net" - "syscall" - "time" - - "golang.org/x/sys/unix" - "google.golang.org/grpc/grpclog" -) - -// GetCPUTime returns the how much CPU time has passed since the start of this process. -func GetCPUTime() int64 { - var ts unix.Timespec - if err := unix.ClockGettime(unix.CLOCK_PROCESS_CPUTIME_ID, &ts); err != nil { - grpclog.Fatal(err) - } - return ts.Nano() -} - -// Rusage is an alias for syscall.Rusage under linux non-appengine environment. -type Rusage syscall.Rusage - -// GetRusage returns the resource usage of current process. -func GetRusage() (rusage *Rusage) { - rusage = new(Rusage) - syscall.Getrusage(syscall.RUSAGE_SELF, (*syscall.Rusage)(rusage)) - return -} - -// CPUTimeDiff returns the differences of user CPU time and system CPU time used -// between two Rusage structs. -func CPUTimeDiff(first *Rusage, latest *Rusage) (float64, float64) { - f := (*syscall.Rusage)(first) - l := (*syscall.Rusage)(latest) - var ( - utimeDiffs = l.Utime.Sec - f.Utime.Sec - utimeDiffus = l.Utime.Usec - f.Utime.Usec - stimeDiffs = l.Stime.Sec - f.Stime.Sec - stimeDiffus = l.Stime.Usec - f.Stime.Usec - ) - - uTimeElapsed := float64(utimeDiffs) + float64(utimeDiffus)*1.0e-6 - sTimeElapsed := float64(stimeDiffs) + float64(stimeDiffus)*1.0e-6 - - return uTimeElapsed, sTimeElapsed -} - -// SetTCPUserTimeout sets the TCP user timeout on a connection's socket -func SetTCPUserTimeout(conn net.Conn, timeout time.Duration) error { - tcpconn, ok := conn.(*net.TCPConn) - if !ok { - // not a TCP connection. exit early - return nil - } - rawConn, err := tcpconn.SyscallConn() - if err != nil { - return fmt.Errorf("error getting raw connection: %v", err) - } - err = rawConn.Control(func(fd uintptr) { - err = syscall.SetsockoptInt(int(fd), syscall.IPPROTO_TCP, unix.TCP_USER_TIMEOUT, int(timeout/time.Millisecond)) - }) - if err != nil { - return fmt.Errorf("error setting option on socket: %v", err) - } - - return nil -} - -// GetTCPUserTimeout gets the TCP user timeout on a connection's socket -func GetTCPUserTimeout(conn net.Conn) (opt int, err error) { - tcpconn, ok := conn.(*net.TCPConn) - if !ok { - err = fmt.Errorf("conn is not *net.TCPConn. got %T", conn) - return - } - rawConn, err := tcpconn.SyscallConn() - if err != nil { - err = fmt.Errorf("error getting raw connection: %v", err) - return - } - err = rawConn.Control(func(fd uintptr) { - opt, err = syscall.GetsockoptInt(int(fd), syscall.IPPROTO_TCP, unix.TCP_USER_TIMEOUT) - }) - if err != nil { - err = fmt.Errorf("error getting option on socket: %v", err) - return - } - - return -} diff --git a/vendor/google.golang.org/grpc/internal/syscall/syscall_nonlinux.go b/vendor/google.golang.org/grpc/internal/syscall/syscall_nonlinux.go deleted file mode 100644 index 61678feb00..0000000000 --- a/vendor/google.golang.org/grpc/internal/syscall/syscall_nonlinux.go +++ /dev/null @@ -1,63 +0,0 @@ -// +build !linux appengine - -/* - * - * Copyright 2018 gRPC authors. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ - -package syscall - -import ( - "net" - "time" - - "google.golang.org/grpc/grpclog" -) - -func init() { - grpclog.Info("CPU time info is unavailable on non-linux or appengine environment.") -} - -// GetCPUTime returns the how much CPU time has passed since the start of this process. -// It always returns 0 under non-linux or appengine environment. -func GetCPUTime() int64 { - return 0 -} - -// Rusage is an empty struct under non-linux or appengine environment. -type Rusage struct{} - -// GetRusage is a no-op function under non-linux or appengine environment. -func GetRusage() (rusage *Rusage) { - return nil -} - -// CPUTimeDiff returns the differences of user CPU time and system CPU time used -// between two Rusage structs. It a no-op function for non-linux or appengine environment. -func CPUTimeDiff(first *Rusage, latest *Rusage) (float64, float64) { - return 0, 0 -} - -// SetTCPUserTimeout is a no-op function under non-linux or appengine environments -func SetTCPUserTimeout(conn net.Conn, timeout time.Duration) error { - return nil -} - -// GetTCPUserTimeout is a no-op function under non-linux or appengine environments -// a negative return value indicates the operation is not supported -func GetTCPUserTimeout(conn net.Conn) (int, error) { - return -1, nil -} diff --git a/vendor/google.golang.org/grpc/internal/transport/bdp_estimator.go b/vendor/google.golang.org/grpc/internal/transport/bdp_estimator.go deleted file mode 100644 index 070680edba..0000000000 --- a/vendor/google.golang.org/grpc/internal/transport/bdp_estimator.go +++ /dev/null @@ -1,141 +0,0 @@ -/* - * - * Copyright 2017 gRPC authors. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ - -package transport - -import ( - "sync" - "time" -) - -const ( - // bdpLimit is the maximum value the flow control windows will be increased - // to. TCP typically limits this to 4MB, but some systems go up to 16MB. - // Since this is only a limit, it is safe to make it optimistic. - bdpLimit = (1 << 20) * 16 - // alpha is a constant factor used to keep a moving average - // of RTTs. - alpha = 0.9 - // If the current bdp sample is greater than or equal to - // our beta * our estimated bdp and the current bandwidth - // sample is the maximum bandwidth observed so far, we - // increase our bbp estimate by a factor of gamma. - beta = 0.66 - // To put our bdp to be smaller than or equal to twice the real BDP, - // we should multiply our current sample with 4/3, however to round things out - // we use 2 as the multiplication factor. - gamma = 2 -) - -// Adding arbitrary data to ping so that its ack can be identified. -// Easter-egg: what does the ping message say? -var bdpPing = &ping{data: [8]byte{2, 4, 16, 16, 9, 14, 7, 7}} - -type bdpEstimator struct { - // sentAt is the time when the ping was sent. - sentAt time.Time - - mu sync.Mutex - // bdp is the current bdp estimate. - bdp uint32 - // sample is the number of bytes received in one measurement cycle. - sample uint32 - // bwMax is the maximum bandwidth noted so far (bytes/sec). - bwMax float64 - // bool to keep track of the beginning of a new measurement cycle. - isSent bool - // Callback to update the window sizes. - updateFlowControl func(n uint32) - // sampleCount is the number of samples taken so far. - sampleCount uint64 - // round trip time (seconds) - rtt float64 -} - -// timesnap registers the time bdp ping was sent out so that -// network rtt can be calculated when its ack is received. -// It is called (by controller) when the bdpPing is -// being written on the wire. -func (b *bdpEstimator) timesnap(d [8]byte) { - if bdpPing.data != d { - return - } - b.sentAt = time.Now() -} - -// add adds bytes to the current sample for calculating bdp. -// It returns true only if a ping must be sent. This can be used -// by the caller (handleData) to make decision about batching -// a window update with it. -func (b *bdpEstimator) add(n uint32) bool { - b.mu.Lock() - defer b.mu.Unlock() - if b.bdp == bdpLimit { - return false - } - if !b.isSent { - b.isSent = true - b.sample = n - b.sentAt = time.Time{} - b.sampleCount++ - return true - } - b.sample += n - return false -} - -// calculate is called when an ack for a bdp ping is received. -// Here we calculate the current bdp and bandwidth sample and -// decide if the flow control windows should go up. -func (b *bdpEstimator) calculate(d [8]byte) { - // Check if the ping acked for was the bdp ping. - if bdpPing.data != d { - return - } - b.mu.Lock() - rttSample := time.Since(b.sentAt).Seconds() - if b.sampleCount < 10 { - // Bootstrap rtt with an average of first 10 rtt samples. - b.rtt += (rttSample - b.rtt) / float64(b.sampleCount) - } else { - // Heed to the recent past more. - b.rtt += (rttSample - b.rtt) * float64(alpha) - } - b.isSent = false - // The number of bytes accumulated so far in the sample is smaller - // than or equal to 1.5 times the real BDP on a saturated connection. - bwCurrent := float64(b.sample) / (b.rtt * float64(1.5)) - if bwCurrent > b.bwMax { - b.bwMax = bwCurrent - } - // If the current sample (which is smaller than or equal to the 1.5 times the real BDP) is - // greater than or equal to 2/3rd our perceived bdp AND this is the maximum bandwidth seen so far, we - // should update our perception of the network BDP. - if float64(b.sample) >= beta*float64(b.bdp) && bwCurrent == b.bwMax && b.bdp != bdpLimit { - sampleFloat := float64(b.sample) - b.bdp = uint32(gamma * sampleFloat) - if b.bdp > bdpLimit { - b.bdp = bdpLimit - } - bdp := b.bdp - b.mu.Unlock() - b.updateFlowControl(bdp) - return - } - b.mu.Unlock() -} diff --git a/vendor/google.golang.org/grpc/internal/transport/controlbuf.go b/vendor/google.golang.org/grpc/internal/transport/controlbuf.go deleted file mode 100644 index 204ba1588b..0000000000 --- a/vendor/google.golang.org/grpc/internal/transport/controlbuf.go +++ /dev/null @@ -1,852 +0,0 @@ -/* - * - * Copyright 2014 gRPC authors. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ - -package transport - -import ( - "bytes" - "fmt" - "runtime" - "sync" - - "golang.org/x/net/http2" - "golang.org/x/net/http2/hpack" -) - -var updateHeaderTblSize = func(e *hpack.Encoder, v uint32) { - e.SetMaxDynamicTableSizeLimit(v) -} - -type itemNode struct { - it interface{} - next *itemNode -} - -type itemList struct { - head *itemNode - tail *itemNode -} - -func (il *itemList) enqueue(i interface{}) { - n := &itemNode{it: i} - if il.tail == nil { - il.head, il.tail = n, n - return - } - il.tail.next = n - il.tail = n -} - -// peek returns the first item in the list without removing it from the -// list. -func (il *itemList) peek() interface{} { - return il.head.it -} - -func (il *itemList) dequeue() interface{} { - if il.head == nil { - return nil - } - i := il.head.it - il.head = il.head.next - if il.head == nil { - il.tail = nil - } - return i -} - -func (il *itemList) dequeueAll() *itemNode { - h := il.head - il.head, il.tail = nil, nil - return h -} - -func (il *itemList) isEmpty() bool { - return il.head == nil -} - -// The following defines various control items which could flow through -// the control buffer of transport. They represent different aspects of -// control tasks, e.g., flow control, settings, streaming resetting, etc. - -// registerStream is used to register an incoming stream with loopy writer. -type registerStream struct { - streamID uint32 - wq *writeQuota -} - -// headerFrame is also used to register stream on the client-side. -type headerFrame struct { - streamID uint32 - hf []hpack.HeaderField - endStream bool // Valid on server side. - initStream func(uint32) (bool, error) // Used only on the client side. - onWrite func() - wq *writeQuota // write quota for the stream created. - cleanup *cleanupStream // Valid on the server side. - onOrphaned func(error) // Valid on client-side -} - -type cleanupStream struct { - streamID uint32 - rst bool - rstCode http2.ErrCode - onWrite func() -} - -type dataFrame struct { - streamID uint32 - endStream bool - h []byte - d []byte - // onEachWrite is called every time - // a part of d is written out. - onEachWrite func() -} - -type incomingWindowUpdate struct { - streamID uint32 - increment uint32 -} - -type outgoingWindowUpdate struct { - streamID uint32 - increment uint32 -} - -type incomingSettings struct { - ss []http2.Setting -} - -type outgoingSettings struct { - ss []http2.Setting -} - -type incomingGoAway struct { -} - -type goAway struct { - code http2.ErrCode - debugData []byte - headsUp bool - closeConn bool -} - -type ping struct { - ack bool - data [8]byte -} - -type outFlowControlSizeRequest struct { - resp chan uint32 -} - -type outStreamState int - -const ( - active outStreamState = iota - empty - waitingOnStreamQuota -) - -type outStream struct { - id uint32 - state outStreamState - itl *itemList - bytesOutStanding int - wq *writeQuota - - next *outStream - prev *outStream -} - -func (s *outStream) deleteSelf() { - if s.prev != nil { - s.prev.next = s.next - } - if s.next != nil { - s.next.prev = s.prev - } - s.next, s.prev = nil, nil -} - -type outStreamList struct { - // Following are sentinel objects that mark the - // beginning and end of the list. They do not - // contain any item lists. All valid objects are - // inserted in between them. - // This is needed so that an outStream object can - // deleteSelf() in O(1) time without knowing which - // list it belongs to. - head *outStream - tail *outStream -} - -func newOutStreamList() *outStreamList { - head, tail := new(outStream), new(outStream) - head.next = tail - tail.prev = head - return &outStreamList{ - head: head, - tail: tail, - } -} - -func (l *outStreamList) enqueue(s *outStream) { - e := l.tail.prev - e.next = s - s.prev = e - s.next = l.tail - l.tail.prev = s -} - -// remove from the beginning of the list. -func (l *outStreamList) dequeue() *outStream { - b := l.head.next - if b == l.tail { - return nil - } - b.deleteSelf() - return b -} - -// controlBuffer is a way to pass information to loopy. -// Information is passed as specific struct types called control frames. -// A control frame not only represents data, messages or headers to be sent out -// but can also be used to instruct loopy to update its internal state. -// It shouldn't be confused with an HTTP2 frame, although some of the control frames -// like dataFrame and headerFrame do go out on wire as HTTP2 frames. -type controlBuffer struct { - ch chan struct{} - done <-chan struct{} - mu sync.Mutex - consumerWaiting bool - list *itemList - err error -} - -func newControlBuffer(done <-chan struct{}) *controlBuffer { - return &controlBuffer{ - ch: make(chan struct{}, 1), - list: &itemList{}, - done: done, - } -} - -func (c *controlBuffer) put(it interface{}) error { - _, err := c.executeAndPut(nil, it) - return err -} - -func (c *controlBuffer) executeAndPut(f func(it interface{}) bool, it interface{}) (bool, error) { - var wakeUp bool - c.mu.Lock() - if c.err != nil { - c.mu.Unlock() - return false, c.err - } - if f != nil { - if !f(it) { // f wasn't successful - c.mu.Unlock() - return false, nil - } - } - if c.consumerWaiting { - wakeUp = true - c.consumerWaiting = false - } - c.list.enqueue(it) - c.mu.Unlock() - if wakeUp { - select { - case c.ch <- struct{}{}: - default: - } - } - return true, nil -} - -// Note argument f should never be nil. -func (c *controlBuffer) execute(f func(it interface{}) bool, it interface{}) (bool, error) { - c.mu.Lock() - if c.err != nil { - c.mu.Unlock() - return false, c.err - } - if !f(it) { // f wasn't successful - c.mu.Unlock() - return false, nil - } - c.mu.Unlock() - return true, nil -} - -func (c *controlBuffer) get(block bool) (interface{}, error) { - for { - c.mu.Lock() - if c.err != nil { - c.mu.Unlock() - return nil, c.err - } - if !c.list.isEmpty() { - h := c.list.dequeue() - c.mu.Unlock() - return h, nil - } - if !block { - c.mu.Unlock() - return nil, nil - } - c.consumerWaiting = true - c.mu.Unlock() - select { - case <-c.ch: - case <-c.done: - c.finish() - return nil, ErrConnClosing - } - } -} - -func (c *controlBuffer) finish() { - c.mu.Lock() - if c.err != nil { - c.mu.Unlock() - return - } - c.err = ErrConnClosing - // There may be headers for streams in the control buffer. - // These streams need to be cleaned out since the transport - // is still not aware of these yet. - for head := c.list.dequeueAll(); head != nil; head = head.next { - hdr, ok := head.it.(*headerFrame) - if !ok { - continue - } - if hdr.onOrphaned != nil { // It will be nil on the server-side. - hdr.onOrphaned(ErrConnClosing) - } - } - c.mu.Unlock() -} - -type side int - -const ( - clientSide side = iota - serverSide -) - -// Loopy receives frames from the control buffer. -// Each frame is handled individually; most of the work done by loopy goes -// into handling data frames. Loopy maintains a queue of active streams, and each -// stream maintains a queue of data frames; as loopy receives data frames -// it gets added to the queue of the relevant stream. -// Loopy goes over this list of active streams by processing one node every iteration, -// thereby closely resemebling to a round-robin scheduling over all streams. While -// processing a stream, loopy writes out data bytes from this stream capped by the min -// of http2MaxFrameLen, connection-level flow control and stream-level flow control. -type loopyWriter struct { - side side - cbuf *controlBuffer - sendQuota uint32 - oiws uint32 // outbound initial window size. - // estdStreams is map of all established streams that are not cleaned-up yet. - // On client-side, this is all streams whose headers were sent out. - // On server-side, this is all streams whose headers were received. - estdStreams map[uint32]*outStream // Established streams. - // activeStreams is a linked-list of all streams that have data to send and some - // stream-level flow control quota. - // Each of these streams internally have a list of data items(and perhaps trailers - // on the server-side) to be sent out. - activeStreams *outStreamList - framer *framer - hBuf *bytes.Buffer // The buffer for HPACK encoding. - hEnc *hpack.Encoder // HPACK encoder. - bdpEst *bdpEstimator - draining bool - - // Side-specific handlers - ssGoAwayHandler func(*goAway) (bool, error) -} - -func newLoopyWriter(s side, fr *framer, cbuf *controlBuffer, bdpEst *bdpEstimator) *loopyWriter { - var buf bytes.Buffer - l := &loopyWriter{ - side: s, - cbuf: cbuf, - sendQuota: defaultWindowSize, - oiws: defaultWindowSize, - estdStreams: make(map[uint32]*outStream), - activeStreams: newOutStreamList(), - framer: fr, - hBuf: &buf, - hEnc: hpack.NewEncoder(&buf), - bdpEst: bdpEst, - } - return l -} - -const minBatchSize = 1000 - -// run should be run in a separate goroutine. -// It reads control frames from controlBuf and processes them by: -// 1. Updating loopy's internal state, or/and -// 2. Writing out HTTP2 frames on the wire. -// -// Loopy keeps all active streams with data to send in a linked-list. -// All streams in the activeStreams linked-list must have both: -// 1. Data to send, and -// 2. Stream level flow control quota available. -// -// In each iteration of run loop, other than processing the incoming control -// frame, loopy calls processData, which processes one node from the activeStreams linked-list. -// This results in writing of HTTP2 frames into an underlying write buffer. -// When there's no more control frames to read from controlBuf, loopy flushes the write buffer. -// As an optimization, to increase the batch size for each flush, loopy yields the processor, once -// if the batch size is too low to give stream goroutines a chance to fill it up. -func (l *loopyWriter) run() (err error) { - defer func() { - if err == ErrConnClosing { - // Don't log ErrConnClosing as error since it happens - // 1. When the connection is closed by some other known issue. - // 2. User closed the connection. - // 3. A graceful close of connection. - infof("transport: loopyWriter.run returning. %v", err) - err = nil - } - }() - for { - it, err := l.cbuf.get(true) - if err != nil { - return err - } - if err = l.handle(it); err != nil { - return err - } - if _, err = l.processData(); err != nil { - return err - } - gosched := true - hasdata: - for { - it, err := l.cbuf.get(false) - if err != nil { - return err - } - if it != nil { - if err = l.handle(it); err != nil { - return err - } - if _, err = l.processData(); err != nil { - return err - } - continue hasdata - } - isEmpty, err := l.processData() - if err != nil { - return err - } - if !isEmpty { - continue hasdata - } - if gosched { - gosched = false - if l.framer.writer.offset < minBatchSize { - runtime.Gosched() - continue hasdata - } - } - l.framer.writer.Flush() - break hasdata - - } - } -} - -func (l *loopyWriter) outgoingWindowUpdateHandler(w *outgoingWindowUpdate) error { - return l.framer.fr.WriteWindowUpdate(w.streamID, w.increment) -} - -func (l *loopyWriter) incomingWindowUpdateHandler(w *incomingWindowUpdate) error { - // Otherwise update the quota. - if w.streamID == 0 { - l.sendQuota += w.increment - return nil - } - // Find the stream and update it. - if str, ok := l.estdStreams[w.streamID]; ok { - str.bytesOutStanding -= int(w.increment) - if strQuota := int(l.oiws) - str.bytesOutStanding; strQuota > 0 && str.state == waitingOnStreamQuota { - str.state = active - l.activeStreams.enqueue(str) - return nil - } - } - return nil -} - -func (l *loopyWriter) outgoingSettingsHandler(s *outgoingSettings) error { - return l.framer.fr.WriteSettings(s.ss...) -} - -func (l *loopyWriter) incomingSettingsHandler(s *incomingSettings) error { - if err := l.applySettings(s.ss); err != nil { - return err - } - return l.framer.fr.WriteSettingsAck() -} - -func (l *loopyWriter) registerStreamHandler(h *registerStream) error { - str := &outStream{ - id: h.streamID, - state: empty, - itl: &itemList{}, - wq: h.wq, - } - l.estdStreams[h.streamID] = str - return nil -} - -func (l *loopyWriter) headerHandler(h *headerFrame) error { - if l.side == serverSide { - str, ok := l.estdStreams[h.streamID] - if !ok { - warningf("transport: loopy doesn't recognize the stream: %d", h.streamID) - return nil - } - // Case 1.A: Server is responding back with headers. - if !h.endStream { - return l.writeHeader(h.streamID, h.endStream, h.hf, h.onWrite) - } - // else: Case 1.B: Server wants to close stream. - - if str.state != empty { // either active or waiting on stream quota. - // add it str's list of items. - str.itl.enqueue(h) - return nil - } - if err := l.writeHeader(h.streamID, h.endStream, h.hf, h.onWrite); err != nil { - return err - } - return l.cleanupStreamHandler(h.cleanup) - } - // Case 2: Client wants to originate stream. - str := &outStream{ - id: h.streamID, - state: empty, - itl: &itemList{}, - wq: h.wq, - } - str.itl.enqueue(h) - return l.originateStream(str) -} - -func (l *loopyWriter) originateStream(str *outStream) error { - hdr := str.itl.dequeue().(*headerFrame) - sendPing, err := hdr.initStream(str.id) - if err != nil { - if err == ErrConnClosing { - return err - } - // Other errors(errStreamDrain) need not close transport. - return nil - } - if err = l.writeHeader(str.id, hdr.endStream, hdr.hf, hdr.onWrite); err != nil { - return err - } - l.estdStreams[str.id] = str - if sendPing { - return l.pingHandler(&ping{data: [8]byte{}}) - } - return nil -} - -func (l *loopyWriter) writeHeader(streamID uint32, endStream bool, hf []hpack.HeaderField, onWrite func()) error { - if onWrite != nil { - onWrite() - } - l.hBuf.Reset() - for _, f := range hf { - if err := l.hEnc.WriteField(f); err != nil { - warningf("transport: loopyWriter.writeHeader encountered error while encoding headers:", err) - } - } - var ( - err error - endHeaders, first bool - ) - first = true - for !endHeaders { - size := l.hBuf.Len() - if size > http2MaxFrameLen { - size = http2MaxFrameLen - } else { - endHeaders = true - } - if first { - first = false - err = l.framer.fr.WriteHeaders(http2.HeadersFrameParam{ - StreamID: streamID, - BlockFragment: l.hBuf.Next(size), - EndStream: endStream, - EndHeaders: endHeaders, - }) - } else { - err = l.framer.fr.WriteContinuation( - streamID, - endHeaders, - l.hBuf.Next(size), - ) - } - if err != nil { - return err - } - } - return nil -} - -func (l *loopyWriter) preprocessData(df *dataFrame) error { - str, ok := l.estdStreams[df.streamID] - if !ok { - return nil - } - // If we got data for a stream it means that - // stream was originated and the headers were sent out. - str.itl.enqueue(df) - if str.state == empty { - str.state = active - l.activeStreams.enqueue(str) - } - return nil -} - -func (l *loopyWriter) pingHandler(p *ping) error { - if !p.ack { - l.bdpEst.timesnap(p.data) - } - return l.framer.fr.WritePing(p.ack, p.data) - -} - -func (l *loopyWriter) outFlowControlSizeRequestHandler(o *outFlowControlSizeRequest) error { - o.resp <- l.sendQuota - return nil -} - -func (l *loopyWriter) cleanupStreamHandler(c *cleanupStream) error { - c.onWrite() - if str, ok := l.estdStreams[c.streamID]; ok { - // On the server side it could be a trailers-only response or - // a RST_STREAM before stream initialization thus the stream might - // not be established yet. - delete(l.estdStreams, c.streamID) - str.deleteSelf() - } - if c.rst { // If RST_STREAM needs to be sent. - if err := l.framer.fr.WriteRSTStream(c.streamID, c.rstCode); err != nil { - return err - } - } - if l.side == clientSide && l.draining && len(l.estdStreams) == 0 { - return ErrConnClosing - } - return nil -} - -func (l *loopyWriter) incomingGoAwayHandler(*incomingGoAway) error { - if l.side == clientSide { - l.draining = true - if len(l.estdStreams) == 0 { - return ErrConnClosing - } - } - return nil -} - -func (l *loopyWriter) goAwayHandler(g *goAway) error { - // Handling of outgoing GoAway is very specific to side. - if l.ssGoAwayHandler != nil { - draining, err := l.ssGoAwayHandler(g) - if err != nil { - return err - } - l.draining = draining - } - return nil -} - -func (l *loopyWriter) handle(i interface{}) error { - switch i := i.(type) { - case *incomingWindowUpdate: - return l.incomingWindowUpdateHandler(i) - case *outgoingWindowUpdate: - return l.outgoingWindowUpdateHandler(i) - case *incomingSettings: - return l.incomingSettingsHandler(i) - case *outgoingSettings: - return l.outgoingSettingsHandler(i) - case *headerFrame: - return l.headerHandler(i) - case *registerStream: - return l.registerStreamHandler(i) - case *cleanupStream: - return l.cleanupStreamHandler(i) - case *incomingGoAway: - return l.incomingGoAwayHandler(i) - case *dataFrame: - return l.preprocessData(i) - case *ping: - return l.pingHandler(i) - case *goAway: - return l.goAwayHandler(i) - case *outFlowControlSizeRequest: - return l.outFlowControlSizeRequestHandler(i) - default: - return fmt.Errorf("transport: unknown control message type %T", i) - } -} - -func (l *loopyWriter) applySettings(ss []http2.Setting) error { - for _, s := range ss { - switch s.ID { - case http2.SettingInitialWindowSize: - o := l.oiws - l.oiws = s.Val - if o < l.oiws { - // If the new limit is greater make all depleted streams active. - for _, stream := range l.estdStreams { - if stream.state == waitingOnStreamQuota { - stream.state = active - l.activeStreams.enqueue(stream) - } - } - } - case http2.SettingHeaderTableSize: - updateHeaderTblSize(l.hEnc, s.Val) - } - } - return nil -} - -// processData removes the first stream from active streams, writes out at most 16KB -// of its data and then puts it at the end of activeStreams if there's still more data -// to be sent and stream has some stream-level flow control. -func (l *loopyWriter) processData() (bool, error) { - if l.sendQuota == 0 { - return true, nil - } - str := l.activeStreams.dequeue() // Remove the first stream. - if str == nil { - return true, nil - } - dataItem := str.itl.peek().(*dataFrame) // Peek at the first data item this stream. - // A data item is represented by a dataFrame, since it later translates into - // multiple HTTP2 data frames. - // Every dataFrame has two buffers; h that keeps grpc-message header and d that is acutal data. - // As an optimization to keep wire traffic low, data from d is copied to h to make as big as the - // maximum possilbe HTTP2 frame size. - - if len(dataItem.h) == 0 && len(dataItem.d) == 0 { // Empty data frame - // Client sends out empty data frame with endStream = true - if err := l.framer.fr.WriteData(dataItem.streamID, dataItem.endStream, nil); err != nil { - return false, err - } - str.itl.dequeue() // remove the empty data item from stream - if str.itl.isEmpty() { - str.state = empty - } else if trailer, ok := str.itl.peek().(*headerFrame); ok { // the next item is trailers. - if err := l.writeHeader(trailer.streamID, trailer.endStream, trailer.hf, trailer.onWrite); err != nil { - return false, err - } - if err := l.cleanupStreamHandler(trailer.cleanup); err != nil { - return false, nil - } - } else { - l.activeStreams.enqueue(str) - } - return false, nil - } - var ( - idx int - buf []byte - ) - if len(dataItem.h) != 0 { // data header has not been written out yet. - buf = dataItem.h - } else { - idx = 1 - buf = dataItem.d - } - size := http2MaxFrameLen - if len(buf) < size { - size = len(buf) - } - if strQuota := int(l.oiws) - str.bytesOutStanding; strQuota <= 0 { // stream-level flow control. - str.state = waitingOnStreamQuota - return false, nil - } else if strQuota < size { - size = strQuota - } - - if l.sendQuota < uint32(size) { // connection-level flow control. - size = int(l.sendQuota) - } - // Now that outgoing flow controls are checked we can replenish str's write quota - str.wq.replenish(size) - var endStream bool - // If this is the last data message on this stream and all of it can be written in this iteration. - if dataItem.endStream && size == len(buf) { - // buf contains either data or it contains header but data is empty. - if idx == 1 || len(dataItem.d) == 0 { - endStream = true - } - } - if dataItem.onEachWrite != nil { - dataItem.onEachWrite() - } - if err := l.framer.fr.WriteData(dataItem.streamID, endStream, buf[:size]); err != nil { - return false, err - } - buf = buf[size:] - str.bytesOutStanding += size - l.sendQuota -= uint32(size) - if idx == 0 { - dataItem.h = buf - } else { - dataItem.d = buf - } - - if len(dataItem.h) == 0 && len(dataItem.d) == 0 { // All the data from that message was written out. - str.itl.dequeue() - } - if str.itl.isEmpty() { - str.state = empty - } else if trailer, ok := str.itl.peek().(*headerFrame); ok { // The next item is trailers. - if err := l.writeHeader(trailer.streamID, trailer.endStream, trailer.hf, trailer.onWrite); err != nil { - return false, err - } - if err := l.cleanupStreamHandler(trailer.cleanup); err != nil { - return false, err - } - } else if int(l.oiws)-str.bytesOutStanding <= 0 { // Ran out of stream quota. - str.state = waitingOnStreamQuota - } else { // Otherwise add it back to the list of active streams. - l.activeStreams.enqueue(str) - } - return false, nil -} diff --git a/vendor/google.golang.org/grpc/internal/transport/defaults.go b/vendor/google.golang.org/grpc/internal/transport/defaults.go deleted file mode 100644 index 9fa306b2e0..0000000000 --- a/vendor/google.golang.org/grpc/internal/transport/defaults.go +++ /dev/null @@ -1,49 +0,0 @@ -/* - * - * Copyright 2018 gRPC authors. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ - -package transport - -import ( - "math" - "time" -) - -const ( - // The default value of flow control window size in HTTP2 spec. - defaultWindowSize = 65535 - // The initial window size for flow control. - initialWindowSize = defaultWindowSize // for an RPC - infinity = time.Duration(math.MaxInt64) - defaultClientKeepaliveTime = infinity - defaultClientKeepaliveTimeout = 20 * time.Second - defaultMaxStreamsClient = 100 - defaultMaxConnectionIdle = infinity - defaultMaxConnectionAge = infinity - defaultMaxConnectionAgeGrace = infinity - defaultServerKeepaliveTime = 2 * time.Hour - defaultServerKeepaliveTimeout = 20 * time.Second - defaultKeepalivePolicyMinTime = 5 * time.Minute - // max window limit set by HTTP2 Specs. - maxWindowSize = math.MaxInt32 - // defaultWriteQuota is the default value for number of data - // bytes that each stream can schedule before some of it being - // flushed out. - defaultWriteQuota = 64 * 1024 - defaultClientMaxHeaderListSize = uint32(16 << 20) - defaultServerMaxHeaderListSize = uint32(16 << 20) -) diff --git a/vendor/google.golang.org/grpc/internal/transport/flowcontrol.go b/vendor/google.golang.org/grpc/internal/transport/flowcontrol.go deleted file mode 100644 index 5ea997a7e4..0000000000 --- a/vendor/google.golang.org/grpc/internal/transport/flowcontrol.go +++ /dev/null @@ -1,218 +0,0 @@ -/* - * - * Copyright 2014 gRPC authors. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ - -package transport - -import ( - "fmt" - "math" - "sync" - "sync/atomic" -) - -// writeQuota is a soft limit on the amount of data a stream can -// schedule before some of it is written out. -type writeQuota struct { - quota int32 - // get waits on read from when quota goes less than or equal to zero. - // replenish writes on it when quota goes positive again. - ch chan struct{} - // done is triggered in error case. - done <-chan struct{} - // replenish is called by loopyWriter to give quota back to. - // It is implemented as a field so that it can be updated - // by tests. - replenish func(n int) -} - -func newWriteQuota(sz int32, done <-chan struct{}) *writeQuota { - w := &writeQuota{ - quota: sz, - ch: make(chan struct{}, 1), - done: done, - } - w.replenish = w.realReplenish - return w -} - -func (w *writeQuota) get(sz int32) error { - for { - if atomic.LoadInt32(&w.quota) > 0 { - atomic.AddInt32(&w.quota, -sz) - return nil - } - select { - case <-w.ch: - continue - case <-w.done: - return errStreamDone - } - } -} - -func (w *writeQuota) realReplenish(n int) { - sz := int32(n) - a := atomic.AddInt32(&w.quota, sz) - b := a - sz - if b <= 0 && a > 0 { - select { - case w.ch <- struct{}{}: - default: - } - } -} - -type trInFlow struct { - limit uint32 - unacked uint32 - effectiveWindowSize uint32 -} - -func (f *trInFlow) newLimit(n uint32) uint32 { - d := n - f.limit - f.limit = n - f.updateEffectiveWindowSize() - return d -} - -func (f *trInFlow) onData(n uint32) uint32 { - f.unacked += n - if f.unacked >= f.limit/4 { - w := f.unacked - f.unacked = 0 - f.updateEffectiveWindowSize() - return w - } - f.updateEffectiveWindowSize() - return 0 -} - -func (f *trInFlow) reset() uint32 { - w := f.unacked - f.unacked = 0 - f.updateEffectiveWindowSize() - return w -} - -func (f *trInFlow) updateEffectiveWindowSize() { - atomic.StoreUint32(&f.effectiveWindowSize, f.limit-f.unacked) -} - -func (f *trInFlow) getSize() uint32 { - return atomic.LoadUint32(&f.effectiveWindowSize) -} - -// TODO(mmukhi): Simplify this code. -// inFlow deals with inbound flow control -type inFlow struct { - mu sync.Mutex - // The inbound flow control limit for pending data. - limit uint32 - // pendingData is the overall data which have been received but not been - // consumed by applications. - pendingData uint32 - // The amount of data the application has consumed but grpc has not sent - // window update for them. Used to reduce window update frequency. - pendingUpdate uint32 - // delta is the extra window update given by receiver when an application - // is reading data bigger in size than the inFlow limit. - delta uint32 -} - -// newLimit updates the inflow window to a new value n. -// It assumes that n is always greater than the old limit. -func (f *inFlow) newLimit(n uint32) uint32 { - f.mu.Lock() - d := n - f.limit - f.limit = n - f.mu.Unlock() - return d -} - -func (f *inFlow) maybeAdjust(n uint32) uint32 { - if n > uint32(math.MaxInt32) { - n = uint32(math.MaxInt32) - } - f.mu.Lock() - // estSenderQuota is the receiver's view of the maximum number of bytes the sender - // can send without a window update. - estSenderQuota := int32(f.limit - (f.pendingData + f.pendingUpdate)) - // estUntransmittedData is the maximum number of bytes the sends might not have put - // on the wire yet. A value of 0 or less means that we have already received all or - // more bytes than the application is requesting to read. - estUntransmittedData := int32(n - f.pendingData) // Casting into int32 since it could be negative. - // This implies that unless we send a window update, the sender won't be able to send all the bytes - // for this message. Therefore we must send an update over the limit since there's an active read - // request from the application. - if estUntransmittedData > estSenderQuota { - // Sender's window shouldn't go more than 2^31 - 1 as specified in the HTTP spec. - if f.limit+n > maxWindowSize { - f.delta = maxWindowSize - f.limit - } else { - // Send a window update for the whole message and not just the difference between - // estUntransmittedData and estSenderQuota. This will be helpful in case the message - // is padded; We will fallback on the current available window(at least a 1/4th of the limit). - f.delta = n - } - f.mu.Unlock() - return f.delta - } - f.mu.Unlock() - return 0 -} - -// onData is invoked when some data frame is received. It updates pendingData. -func (f *inFlow) onData(n uint32) error { - f.mu.Lock() - f.pendingData += n - if f.pendingData+f.pendingUpdate > f.limit+f.delta { - limit := f.limit - rcvd := f.pendingData + f.pendingUpdate - f.mu.Unlock() - return fmt.Errorf("received %d-bytes data exceeding the limit %d bytes", rcvd, limit) - } - f.mu.Unlock() - return nil -} - -// onRead is invoked when the application reads the data. It returns the window size -// to be sent to the peer. -func (f *inFlow) onRead(n uint32) uint32 { - f.mu.Lock() - if f.pendingData == 0 { - f.mu.Unlock() - return 0 - } - f.pendingData -= n - if n > f.delta { - n -= f.delta - f.delta = 0 - } else { - f.delta -= n - n = 0 - } - f.pendingUpdate += n - if f.pendingUpdate >= f.limit/4 { - wu := f.pendingUpdate - f.pendingUpdate = 0 - f.mu.Unlock() - return wu - } - f.mu.Unlock() - return 0 -} diff --git a/vendor/google.golang.org/grpc/internal/transport/handler_server.go b/vendor/google.golang.org/grpc/internal/transport/handler_server.go deleted file mode 100644 index 73b41ea7e0..0000000000 --- a/vendor/google.golang.org/grpc/internal/transport/handler_server.go +++ /dev/null @@ -1,449 +0,0 @@ -/* - * - * Copyright 2016 gRPC authors. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ - -// This file is the implementation of a gRPC server using HTTP/2 which -// uses the standard Go http2 Server implementation (via the -// http.Handler interface), rather than speaking low-level HTTP/2 -// frames itself. It is the implementation of *grpc.Server.ServeHTTP. - -package transport - -import ( - "context" - "errors" - "fmt" - "io" - "net" - "net/http" - "strings" - "sync" - "time" - - "github.com/golang/protobuf/proto" - "golang.org/x/net/http2" - "google.golang.org/grpc/codes" - "google.golang.org/grpc/credentials" - "google.golang.org/grpc/metadata" - "google.golang.org/grpc/peer" - "google.golang.org/grpc/stats" - "google.golang.org/grpc/status" -) - -// NewServerHandlerTransport returns a ServerTransport handling gRPC -// from inside an http.Handler. It requires that the http Server -// supports HTTP/2. -func NewServerHandlerTransport(w http.ResponseWriter, r *http.Request, stats stats.Handler) (ServerTransport, error) { - if r.ProtoMajor != 2 { - return nil, errors.New("gRPC requires HTTP/2") - } - if r.Method != "POST" { - return nil, errors.New("invalid gRPC request method") - } - contentType := r.Header.Get("Content-Type") - // TODO: do we assume contentType is lowercase? we did before - contentSubtype, validContentType := contentSubtype(contentType) - if !validContentType { - return nil, errors.New("invalid gRPC request content-type") - } - if _, ok := w.(http.Flusher); !ok { - return nil, errors.New("gRPC requires a ResponseWriter supporting http.Flusher") - } - if _, ok := w.(http.CloseNotifier); !ok { - return nil, errors.New("gRPC requires a ResponseWriter supporting http.CloseNotifier") - } - - st := &serverHandlerTransport{ - rw: w, - req: r, - closedCh: make(chan struct{}), - writes: make(chan func()), - contentType: contentType, - contentSubtype: contentSubtype, - stats: stats, - } - - if v := r.Header.Get("grpc-timeout"); v != "" { - to, err := decodeTimeout(v) - if err != nil { - return nil, status.Errorf(codes.Internal, "malformed time-out: %v", err) - } - st.timeoutSet = true - st.timeout = to - } - - metakv := []string{"content-type", contentType} - if r.Host != "" { - metakv = append(metakv, ":authority", r.Host) - } - for k, vv := range r.Header { - k = strings.ToLower(k) - if isReservedHeader(k) && !isWhitelistedHeader(k) { - continue - } - for _, v := range vv { - v, err := decodeMetadataHeader(k, v) - if err != nil { - return nil, status.Errorf(codes.Internal, "malformed binary metadata: %v", err) - } - metakv = append(metakv, k, v) - } - } - st.headerMD = metadata.Pairs(metakv...) - - return st, nil -} - -// serverHandlerTransport is an implementation of ServerTransport -// which replies to exactly one gRPC request (exactly one HTTP request), -// using the net/http.Handler interface. This http.Handler is guaranteed -// at this point to be speaking over HTTP/2, so it's able to speak valid -// gRPC. -type serverHandlerTransport struct { - rw http.ResponseWriter - req *http.Request - timeoutSet bool - timeout time.Duration - didCommonHeaders bool - - headerMD metadata.MD - - closeOnce sync.Once - closedCh chan struct{} // closed on Close - - // writes is a channel of code to run serialized in the - // ServeHTTP (HandleStreams) goroutine. The channel is closed - // when WriteStatus is called. - writes chan func() - - // block concurrent WriteStatus calls - // e.g. grpc/(*serverStream).SendMsg/RecvMsg - writeStatusMu sync.Mutex - - // we just mirror the request content-type - contentType string - // we store both contentType and contentSubtype so we don't keep recreating them - // TODO make sure this is consistent across handler_server and http2_server - contentSubtype string - - stats stats.Handler -} - -func (ht *serverHandlerTransport) Close() error { - ht.closeOnce.Do(ht.closeCloseChanOnce) - return nil -} - -func (ht *serverHandlerTransport) closeCloseChanOnce() { close(ht.closedCh) } - -func (ht *serverHandlerTransport) RemoteAddr() net.Addr { return strAddr(ht.req.RemoteAddr) } - -// strAddr is a net.Addr backed by either a TCP "ip:port" string, or -// the empty string if unknown. -type strAddr string - -func (a strAddr) Network() string { - if a != "" { - // Per the documentation on net/http.Request.RemoteAddr, if this is - // set, it's set to the IP:port of the peer (hence, TCP): - // https://golang.org/pkg/net/http/#Request - // - // If we want to support Unix sockets later, we can - // add our own grpc-specific convention within the - // grpc codebase to set RemoteAddr to a different - // format, or probably better: we can attach it to the - // context and use that from serverHandlerTransport.RemoteAddr. - return "tcp" - } - return "" -} - -func (a strAddr) String() string { return string(a) } - -// do runs fn in the ServeHTTP goroutine. -func (ht *serverHandlerTransport) do(fn func()) error { - // Avoid a panic writing to closed channel. Imperfect but maybe good enough. - select { - case <-ht.closedCh: - return ErrConnClosing - default: - select { - case ht.writes <- fn: - return nil - case <-ht.closedCh: - return ErrConnClosing - } - } -} - -func (ht *serverHandlerTransport) WriteStatus(s *Stream, st *status.Status) error { - ht.writeStatusMu.Lock() - defer ht.writeStatusMu.Unlock() - - err := ht.do(func() { - ht.writeCommonHeaders(s) - - // And flush, in case no header or body has been sent yet. - // This forces a separation of headers and trailers if this is the - // first call (for example, in end2end tests's TestNoService). - ht.rw.(http.Flusher).Flush() - - h := ht.rw.Header() - h.Set("Grpc-Status", fmt.Sprintf("%d", st.Code())) - if m := st.Message(); m != "" { - h.Set("Grpc-Message", encodeGrpcMessage(m)) - } - - if p := st.Proto(); p != nil && len(p.Details) > 0 { - stBytes, err := proto.Marshal(p) - if err != nil { - // TODO: return error instead, when callers are able to handle it. - panic(err) - } - - h.Set("Grpc-Status-Details-Bin", encodeBinHeader(stBytes)) - } - - if md := s.Trailer(); len(md) > 0 { - for k, vv := range md { - // Clients don't tolerate reading restricted headers after some non restricted ones were sent. - if isReservedHeader(k) { - continue - } - for _, v := range vv { - // http2 ResponseWriter mechanism to send undeclared Trailers after - // the headers have possibly been written. - h.Add(http2.TrailerPrefix+k, encodeMetadataHeader(k, v)) - } - } - } - }) - - if err == nil { // transport has not been closed - if ht.stats != nil { - ht.stats.HandleRPC(s.Context(), &stats.OutTrailer{}) - } - close(ht.writes) - } - ht.Close() - return err -} - -// writeCommonHeaders sets common headers on the first write -// call (Write, WriteHeader, or WriteStatus). -func (ht *serverHandlerTransport) writeCommonHeaders(s *Stream) { - if ht.didCommonHeaders { - return - } - ht.didCommonHeaders = true - - h := ht.rw.Header() - h["Date"] = nil // suppress Date to make tests happy; TODO: restore - h.Set("Content-Type", ht.contentType) - - // Predeclare trailers we'll set later in WriteStatus (after the body). - // This is a SHOULD in the HTTP RFC, and the way you add (known) - // Trailers per the net/http.ResponseWriter contract. - // See https://golang.org/pkg/net/http/#ResponseWriter - // and https://golang.org/pkg/net/http/#example_ResponseWriter_trailers - h.Add("Trailer", "Grpc-Status") - h.Add("Trailer", "Grpc-Message") - h.Add("Trailer", "Grpc-Status-Details-Bin") - - if s.sendCompress != "" { - h.Set("Grpc-Encoding", s.sendCompress) - } -} - -func (ht *serverHandlerTransport) Write(s *Stream, hdr []byte, data []byte, opts *Options) error { - return ht.do(func() { - ht.writeCommonHeaders(s) - ht.rw.Write(hdr) - ht.rw.Write(data) - ht.rw.(http.Flusher).Flush() - }) -} - -func (ht *serverHandlerTransport) WriteHeader(s *Stream, md metadata.MD) error { - err := ht.do(func() { - ht.writeCommonHeaders(s) - h := ht.rw.Header() - for k, vv := range md { - // Clients don't tolerate reading restricted headers after some non restricted ones were sent. - if isReservedHeader(k) { - continue - } - for _, v := range vv { - v = encodeMetadataHeader(k, v) - h.Add(k, v) - } - } - ht.rw.WriteHeader(200) - ht.rw.(http.Flusher).Flush() - }) - - if err == nil { - if ht.stats != nil { - ht.stats.HandleRPC(s.Context(), &stats.OutHeader{}) - } - } - return err -} - -func (ht *serverHandlerTransport) HandleStreams(startStream func(*Stream), traceCtx func(context.Context, string) context.Context) { - // With this transport type there will be exactly 1 stream: this HTTP request. - - ctx := ht.req.Context() - var cancel context.CancelFunc - if ht.timeoutSet { - ctx, cancel = context.WithTimeout(ctx, ht.timeout) - } else { - ctx, cancel = context.WithCancel(ctx) - } - - // requestOver is closed when either the request's context is done - // or the status has been written via WriteStatus. - requestOver := make(chan struct{}) - - // clientGone receives a single value if peer is gone, either - // because the underlying connection is dead or because the - // peer sends an http2 RST_STREAM. - clientGone := ht.rw.(http.CloseNotifier).CloseNotify() - go func() { - select { - case <-requestOver: - case <-ht.closedCh: - case <-clientGone: - } - cancel() - ht.Close() - }() - - req := ht.req - - s := &Stream{ - id: 0, // irrelevant - requestRead: func(int) {}, - cancel: cancel, - buf: newRecvBuffer(), - st: ht, - method: req.URL.Path, - recvCompress: req.Header.Get("grpc-encoding"), - contentSubtype: ht.contentSubtype, - } - pr := &peer.Peer{ - Addr: ht.RemoteAddr(), - } - if req.TLS != nil { - pr.AuthInfo = credentials.TLSInfo{State: *req.TLS} - } - ctx = metadata.NewIncomingContext(ctx, ht.headerMD) - s.ctx = peer.NewContext(ctx, pr) - if ht.stats != nil { - s.ctx = ht.stats.TagRPC(s.ctx, &stats.RPCTagInfo{FullMethodName: s.method}) - inHeader := &stats.InHeader{ - FullMethod: s.method, - RemoteAddr: ht.RemoteAddr(), - Compression: s.recvCompress, - } - ht.stats.HandleRPC(s.ctx, inHeader) - } - s.trReader = &transportReader{ - reader: &recvBufferReader{ctx: s.ctx, ctxDone: s.ctx.Done(), recv: s.buf}, - windowHandler: func(int) {}, - } - - // readerDone is closed when the Body.Read-ing goroutine exits. - readerDone := make(chan struct{}) - go func() { - defer close(readerDone) - - // TODO: minimize garbage, optimize recvBuffer code/ownership - const readSize = 8196 - for buf := make([]byte, readSize); ; { - n, err := req.Body.Read(buf) - if n > 0 { - s.buf.put(recvMsg{data: buf[:n:n]}) - buf = buf[n:] - } - if err != nil { - s.buf.put(recvMsg{err: mapRecvMsgError(err)}) - return - } - if len(buf) == 0 { - buf = make([]byte, readSize) - } - } - }() - - // startStream is provided by the *grpc.Server's serveStreams. - // It starts a goroutine serving s and exits immediately. - // The goroutine that is started is the one that then calls - // into ht, calling WriteHeader, Write, WriteStatus, Close, etc. - startStream(s) - - ht.runStream() - close(requestOver) - - // Wait for reading goroutine to finish. - req.Body.Close() - <-readerDone -} - -func (ht *serverHandlerTransport) runStream() { - for { - select { - case fn, ok := <-ht.writes: - if !ok { - return - } - fn() - case <-ht.closedCh: - return - } - } -} - -func (ht *serverHandlerTransport) IncrMsgSent() {} - -func (ht *serverHandlerTransport) IncrMsgRecv() {} - -func (ht *serverHandlerTransport) Drain() { - panic("Drain() is not implemented") -} - -// mapRecvMsgError returns the non-nil err into the appropriate -// error value as expected by callers of *grpc.parser.recvMsg. -// In particular, in can only be: -// * io.EOF -// * io.ErrUnexpectedEOF -// * of type transport.ConnectionError -// * an error from the status package -func mapRecvMsgError(err error) error { - if err == io.EOF || err == io.ErrUnexpectedEOF { - return err - } - if se, ok := err.(http2.StreamError); ok { - if code, ok := http2ErrConvTab[se.Code]; ok { - return status.Error(code, se.Error()) - } - } - if strings.Contains(err.Error(), "body closed by handler") { - return status.Error(codes.Canceled, err.Error()) - } - return connectionErrorf(true, err, err.Error()) -} diff --git a/vendor/google.golang.org/grpc/internal/transport/http2_client.go b/vendor/google.golang.org/grpc/internal/transport/http2_client.go deleted file mode 100644 index babcaee501..0000000000 --- a/vendor/google.golang.org/grpc/internal/transport/http2_client.go +++ /dev/null @@ -1,1380 +0,0 @@ -/* - * - * Copyright 2014 gRPC authors. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ - -package transport - -import ( - "context" - "fmt" - "io" - "math" - "net" - "strconv" - "strings" - "sync" - "sync/atomic" - "time" - - "golang.org/x/net/http2" - "golang.org/x/net/http2/hpack" - - "google.golang.org/grpc/codes" - "google.golang.org/grpc/credentials" - "google.golang.org/grpc/internal/channelz" - "google.golang.org/grpc/internal/syscall" - "google.golang.org/grpc/keepalive" - "google.golang.org/grpc/metadata" - "google.golang.org/grpc/peer" - "google.golang.org/grpc/stats" - "google.golang.org/grpc/status" -) - -// http2Client implements the ClientTransport interface with HTTP2. -type http2Client struct { - ctx context.Context - cancel context.CancelFunc - ctxDone <-chan struct{} // Cache the ctx.Done() chan. - userAgent string - md interface{} - conn net.Conn // underlying communication channel - loopy *loopyWriter - remoteAddr net.Addr - localAddr net.Addr - authInfo credentials.AuthInfo // auth info about the connection - - readerDone chan struct{} // sync point to enable testing. - writerDone chan struct{} // sync point to enable testing. - // goAway is closed to notify the upper layer (i.e., addrConn.transportMonitor) - // that the server sent GoAway on this transport. - goAway chan struct{} - // awakenKeepalive is used to wake up keepalive when after it has gone dormant. - awakenKeepalive chan struct{} - - framer *framer - // controlBuf delivers all the control related tasks (e.g., window - // updates, reset streams, and various settings) to the controller. - controlBuf *controlBuffer - fc *trInFlow - // The scheme used: https if TLS is on, http otherwise. - scheme string - - isSecure bool - - perRPCCreds []credentials.PerRPCCredentials - - // Boolean to keep track of reading activity on transport. - // 1 is true and 0 is false. - activity uint32 // Accessed atomically. - kp keepalive.ClientParameters - keepaliveEnabled bool - - statsHandler stats.Handler - - initialWindowSize int32 - - // configured by peer through SETTINGS_MAX_HEADER_LIST_SIZE - maxSendHeaderListSize *uint32 - - bdpEst *bdpEstimator - // onPrefaceReceipt is a callback that client transport calls upon - // receiving server preface to signal that a succefull HTTP2 - // connection was established. - onPrefaceReceipt func() - - maxConcurrentStreams uint32 - streamQuota int64 - streamsQuotaAvailable chan struct{} - waitingStreams uint32 - nextID uint32 - - mu sync.Mutex // guard the following variables - state transportState - activeStreams map[uint32]*Stream - // prevGoAway ID records the Last-Stream-ID in the previous GOAway frame. - prevGoAwayID uint32 - // goAwayReason records the http2.ErrCode and debug data received with the - // GoAway frame. - goAwayReason GoAwayReason - - // Fields below are for channelz metric collection. - channelzID int64 // channelz unique identification number - czData *channelzData - - onGoAway func(GoAwayReason) - onClose func() -} - -func dial(ctx context.Context, fn func(context.Context, string) (net.Conn, error), addr string) (net.Conn, error) { - if fn != nil { - return fn(ctx, addr) - } - return (&net.Dialer{}).DialContext(ctx, "tcp", addr) -} - -func isTemporary(err error) bool { - switch err := err.(type) { - case interface { - Temporary() bool - }: - return err.Temporary() - case interface { - Timeout() bool - }: - // Timeouts may be resolved upon retry, and are thus treated as - // temporary. - return err.Timeout() - } - return true -} - -// newHTTP2Client constructs a connected ClientTransport to addr based on HTTP2 -// and starts to receive messages on it. Non-nil error returns if construction -// fails. -func newHTTP2Client(connectCtx, ctx context.Context, addr TargetInfo, opts ConnectOptions, onPrefaceReceipt func(), onGoAway func(GoAwayReason), onClose func()) (_ *http2Client, err error) { - scheme := "http" - ctx, cancel := context.WithCancel(ctx) - defer func() { - if err != nil { - cancel() - } - }() - - conn, err := dial(connectCtx, opts.Dialer, addr.Addr) - if err != nil { - if opts.FailOnNonTempDialError { - return nil, connectionErrorf(isTemporary(err), err, "transport: error while dialing: %v", err) - } - return nil, connectionErrorf(true, err, "transport: Error while dialing %v", err) - } - // Any further errors will close the underlying connection - defer func(conn net.Conn) { - if err != nil { - conn.Close() - } - }(conn) - kp := opts.KeepaliveParams - // Validate keepalive parameters. - if kp.Time == 0 { - kp.Time = defaultClientKeepaliveTime - } - if kp.Timeout == 0 { - kp.Timeout = defaultClientKeepaliveTimeout - } - keepaliveEnabled := false - if kp.Time != infinity { - if err = syscall.SetTCPUserTimeout(conn, kp.Timeout); err != nil { - return nil, connectionErrorf(false, err, "transport: failed to set TCP_USER_TIMEOUT: %v", err) - } - keepaliveEnabled = true - } - var ( - isSecure bool - authInfo credentials.AuthInfo - ) - transportCreds := opts.TransportCredentials - perRPCCreds := opts.PerRPCCredentials - - if b := opts.CredsBundle; b != nil { - if t := b.TransportCredentials(); t != nil { - transportCreds = t - } - if t := b.PerRPCCredentials(); t != nil { - perRPCCreds = append(perRPCCreds, t) - } - } - if transportCreds != nil { - scheme = "https" - conn, authInfo, err = transportCreds.ClientHandshake(connectCtx, addr.Authority, conn) - if err != nil { - return nil, connectionErrorf(isTemporary(err), err, "transport: authentication handshake failed: %v", err) - } - isSecure = true - } - dynamicWindow := true - icwz := int32(initialWindowSize) - if opts.InitialConnWindowSize >= defaultWindowSize { - icwz = opts.InitialConnWindowSize - dynamicWindow = false - } - writeBufSize := opts.WriteBufferSize - readBufSize := opts.ReadBufferSize - maxHeaderListSize := defaultClientMaxHeaderListSize - if opts.MaxHeaderListSize != nil { - maxHeaderListSize = *opts.MaxHeaderListSize - } - t := &http2Client{ - ctx: ctx, - ctxDone: ctx.Done(), // Cache Done chan. - cancel: cancel, - userAgent: opts.UserAgent, - md: addr.Metadata, - conn: conn, - remoteAddr: conn.RemoteAddr(), - localAddr: conn.LocalAddr(), - authInfo: authInfo, - readerDone: make(chan struct{}), - writerDone: make(chan struct{}), - goAway: make(chan struct{}), - awakenKeepalive: make(chan struct{}, 1), - framer: newFramer(conn, writeBufSize, readBufSize, maxHeaderListSize), - fc: &trInFlow{limit: uint32(icwz)}, - scheme: scheme, - activeStreams: make(map[uint32]*Stream), - isSecure: isSecure, - perRPCCreds: perRPCCreds, - kp: kp, - statsHandler: opts.StatsHandler, - initialWindowSize: initialWindowSize, - onPrefaceReceipt: onPrefaceReceipt, - nextID: 1, - maxConcurrentStreams: defaultMaxStreamsClient, - streamQuota: defaultMaxStreamsClient, - streamsQuotaAvailable: make(chan struct{}, 1), - czData: new(channelzData), - onGoAway: onGoAway, - onClose: onClose, - keepaliveEnabled: keepaliveEnabled, - } - t.controlBuf = newControlBuffer(t.ctxDone) - if opts.InitialWindowSize >= defaultWindowSize { - t.initialWindowSize = opts.InitialWindowSize - dynamicWindow = false - } - if dynamicWindow { - t.bdpEst = &bdpEstimator{ - bdp: initialWindowSize, - updateFlowControl: t.updateFlowControl, - } - } - // Make sure awakenKeepalive can't be written upon. - // keepalive routine will make it writable, if need be. - t.awakenKeepalive <- struct{}{} - if t.statsHandler != nil { - t.ctx = t.statsHandler.TagConn(t.ctx, &stats.ConnTagInfo{ - RemoteAddr: t.remoteAddr, - LocalAddr: t.localAddr, - }) - connBegin := &stats.ConnBegin{ - Client: true, - } - t.statsHandler.HandleConn(t.ctx, connBegin) - } - if channelz.IsOn() { - t.channelzID = channelz.RegisterNormalSocket(t, opts.ChannelzParentID, fmt.Sprintf("%s -> %s", t.localAddr, t.remoteAddr)) - } - if t.keepaliveEnabled { - go t.keepalive() - } - // Start the reader goroutine for incoming message. Each transport has - // a dedicated goroutine which reads HTTP2 frame from network. Then it - // dispatches the frame to the corresponding stream entity. - go t.reader() - - // Send connection preface to server. - n, err := t.conn.Write(clientPreface) - if err != nil { - t.Close() - return nil, connectionErrorf(true, err, "transport: failed to write client preface: %v", err) - } - if n != len(clientPreface) { - t.Close() - return nil, connectionErrorf(true, err, "transport: preface mismatch, wrote %d bytes; want %d", n, len(clientPreface)) - } - var ss []http2.Setting - - if t.initialWindowSize != defaultWindowSize { - ss = append(ss, http2.Setting{ - ID: http2.SettingInitialWindowSize, - Val: uint32(t.initialWindowSize), - }) - } - if opts.MaxHeaderListSize != nil { - ss = append(ss, http2.Setting{ - ID: http2.SettingMaxHeaderListSize, - Val: *opts.MaxHeaderListSize, - }) - } - err = t.framer.fr.WriteSettings(ss...) - if err != nil { - t.Close() - return nil, connectionErrorf(true, err, "transport: failed to write initial settings frame: %v", err) - } - // Adjust the connection flow control window if needed. - if delta := uint32(icwz - defaultWindowSize); delta > 0 { - if err := t.framer.fr.WriteWindowUpdate(0, delta); err != nil { - t.Close() - return nil, connectionErrorf(true, err, "transport: failed to write window update: %v", err) - } - } - - t.framer.writer.Flush() - go func() { - t.loopy = newLoopyWriter(clientSide, t.framer, t.controlBuf, t.bdpEst) - err := t.loopy.run() - if err != nil { - errorf("transport: loopyWriter.run returning. Err: %v", err) - } - // If it's a connection error, let reader goroutine handle it - // since there might be data in the buffers. - if _, ok := err.(net.Error); !ok { - t.conn.Close() - } - close(t.writerDone) - }() - return t, nil -} - -func (t *http2Client) newStream(ctx context.Context, callHdr *CallHdr) *Stream { - // TODO(zhaoq): Handle uint32 overflow of Stream.id. - s := &Stream{ - done: make(chan struct{}), - method: callHdr.Method, - sendCompress: callHdr.SendCompress, - buf: newRecvBuffer(), - headerChan: make(chan struct{}), - contentSubtype: callHdr.ContentSubtype, - } - s.wq = newWriteQuota(defaultWriteQuota, s.done) - s.requestRead = func(n int) { - t.adjustWindow(s, uint32(n)) - } - // The client side stream context should have exactly the same life cycle with the user provided context. - // That means, s.ctx should be read-only. And s.ctx is done iff ctx is done. - // So we use the original context here instead of creating a copy. - s.ctx = ctx - s.trReader = &transportReader{ - reader: &recvBufferReader{ - ctx: s.ctx, - ctxDone: s.ctx.Done(), - recv: s.buf, - closeStream: func(err error) { - t.CloseStream(s, err) - }, - }, - windowHandler: func(n int) { - t.updateWindow(s, uint32(n)) - }, - } - return s -} - -func (t *http2Client) getPeer() *peer.Peer { - pr := &peer.Peer{ - Addr: t.remoteAddr, - } - // Attach Auth info if there is any. - if t.authInfo != nil { - pr.AuthInfo = t.authInfo - } - return pr -} - -func (t *http2Client) createHeaderFields(ctx context.Context, callHdr *CallHdr) ([]hpack.HeaderField, error) { - aud := t.createAudience(callHdr) - authData, err := t.getTrAuthData(ctx, aud) - if err != nil { - return nil, err - } - callAuthData, err := t.getCallAuthData(ctx, aud, callHdr) - if err != nil { - return nil, err - } - // TODO(mmukhi): Benchmark if the performance gets better if count the metadata and other header fields - // first and create a slice of that exact size. - // Make the slice of certain predictable size to reduce allocations made by append. - hfLen := 7 // :method, :scheme, :path, :authority, content-type, user-agent, te - hfLen += len(authData) + len(callAuthData) - headerFields := make([]hpack.HeaderField, 0, hfLen) - headerFields = append(headerFields, hpack.HeaderField{Name: ":method", Value: "POST"}) - headerFields = append(headerFields, hpack.HeaderField{Name: ":scheme", Value: t.scheme}) - headerFields = append(headerFields, hpack.HeaderField{Name: ":path", Value: callHdr.Method}) - headerFields = append(headerFields, hpack.HeaderField{Name: ":authority", Value: callHdr.Host}) - headerFields = append(headerFields, hpack.HeaderField{Name: "content-type", Value: contentType(callHdr.ContentSubtype)}) - headerFields = append(headerFields, hpack.HeaderField{Name: "user-agent", Value: t.userAgent}) - headerFields = append(headerFields, hpack.HeaderField{Name: "te", Value: "trailers"}) - if callHdr.PreviousAttempts > 0 { - headerFields = append(headerFields, hpack.HeaderField{Name: "grpc-previous-rpc-attempts", Value: strconv.Itoa(callHdr.PreviousAttempts)}) - } - - if callHdr.SendCompress != "" { - headerFields = append(headerFields, hpack.HeaderField{Name: "grpc-encoding", Value: callHdr.SendCompress}) - } - if dl, ok := ctx.Deadline(); ok { - // Send out timeout regardless its value. The server can detect timeout context by itself. - // TODO(mmukhi): Perhaps this field should be updated when actually writing out to the wire. - timeout := dl.Sub(time.Now()) - headerFields = append(headerFields, hpack.HeaderField{Name: "grpc-timeout", Value: encodeTimeout(timeout)}) - } - for k, v := range authData { - headerFields = append(headerFields, hpack.HeaderField{Name: k, Value: encodeMetadataHeader(k, v)}) - } - for k, v := range callAuthData { - headerFields = append(headerFields, hpack.HeaderField{Name: k, Value: encodeMetadataHeader(k, v)}) - } - if b := stats.OutgoingTags(ctx); b != nil { - headerFields = append(headerFields, hpack.HeaderField{Name: "grpc-tags-bin", Value: encodeBinHeader(b)}) - } - if b := stats.OutgoingTrace(ctx); b != nil { - headerFields = append(headerFields, hpack.HeaderField{Name: "grpc-trace-bin", Value: encodeBinHeader(b)}) - } - - if md, added, ok := metadata.FromOutgoingContextRaw(ctx); ok { - var k string - for _, vv := range added { - for i, v := range vv { - if i%2 == 0 { - k = v - continue - } - // HTTP doesn't allow you to set pseudoheaders after non pseudoheaders were set. - if isReservedHeader(k) { - continue - } - headerFields = append(headerFields, hpack.HeaderField{Name: strings.ToLower(k), Value: encodeMetadataHeader(k, v)}) - } - } - for k, vv := range md { - // HTTP doesn't allow you to set pseudoheaders after non pseudoheaders were set. - if isReservedHeader(k) { - continue - } - for _, v := range vv { - headerFields = append(headerFields, hpack.HeaderField{Name: k, Value: encodeMetadataHeader(k, v)}) - } - } - } - if md, ok := t.md.(*metadata.MD); ok { - for k, vv := range *md { - if isReservedHeader(k) { - continue - } - for _, v := range vv { - headerFields = append(headerFields, hpack.HeaderField{Name: k, Value: encodeMetadataHeader(k, v)}) - } - } - } - return headerFields, nil -} - -func (t *http2Client) createAudience(callHdr *CallHdr) string { - // Create an audience string only if needed. - if len(t.perRPCCreds) == 0 && callHdr.Creds == nil { - return "" - } - // Construct URI required to get auth request metadata. - // Omit port if it is the default one. - host := strings.TrimSuffix(callHdr.Host, ":443") - pos := strings.LastIndex(callHdr.Method, "/") - if pos == -1 { - pos = len(callHdr.Method) - } - return "https://" + host + callHdr.Method[:pos] -} - -func (t *http2Client) getTrAuthData(ctx context.Context, audience string) (map[string]string, error) { - authData := map[string]string{} - for _, c := range t.perRPCCreds { - data, err := c.GetRequestMetadata(ctx, audience) - if err != nil { - if _, ok := status.FromError(err); ok { - return nil, err - } - - return nil, status.Errorf(codes.Unauthenticated, "transport: %v", err) - } - for k, v := range data { - // Capital header names are illegal in HTTP/2. - k = strings.ToLower(k) - authData[k] = v - } - } - return authData, nil -} - -func (t *http2Client) getCallAuthData(ctx context.Context, audience string, callHdr *CallHdr) (map[string]string, error) { - callAuthData := map[string]string{} - // Check if credentials.PerRPCCredentials were provided via call options. - // Note: if these credentials are provided both via dial options and call - // options, then both sets of credentials will be applied. - if callCreds := callHdr.Creds; callCreds != nil { - if !t.isSecure && callCreds.RequireTransportSecurity() { - return nil, status.Error(codes.Unauthenticated, "transport: cannot send secure credentials on an insecure connection") - } - data, err := callCreds.GetRequestMetadata(ctx, audience) - if err != nil { - return nil, status.Errorf(codes.Internal, "transport: %v", err) - } - for k, v := range data { - // Capital header names are illegal in HTTP/2 - k = strings.ToLower(k) - callAuthData[k] = v - } - } - return callAuthData, nil -} - -// NewStream creates a stream and registers it into the transport as "active" -// streams. -func (t *http2Client) NewStream(ctx context.Context, callHdr *CallHdr) (_ *Stream, err error) { - ctx = peer.NewContext(ctx, t.getPeer()) - headerFields, err := t.createHeaderFields(ctx, callHdr) - if err != nil { - return nil, err - } - s := t.newStream(ctx, callHdr) - cleanup := func(err error) { - if s.swapState(streamDone) == streamDone { - // If it was already done, return. - return - } - // The stream was unprocessed by the server. - atomic.StoreUint32(&s.unprocessed, 1) - s.write(recvMsg{err: err}) - close(s.done) - // If headerChan isn't closed, then close it. - if atomic.SwapUint32(&s.headerDone, 1) == 0 { - close(s.headerChan) - } - - } - hdr := &headerFrame{ - hf: headerFields, - endStream: false, - initStream: func(id uint32) (bool, error) { - t.mu.Lock() - if state := t.state; state != reachable { - t.mu.Unlock() - // Do a quick cleanup. - err := error(errStreamDrain) - if state == closing { - err = ErrConnClosing - } - cleanup(err) - return false, err - } - t.activeStreams[id] = s - if channelz.IsOn() { - atomic.AddInt64(&t.czData.streamsStarted, 1) - atomic.StoreInt64(&t.czData.lastStreamCreatedTime, time.Now().UnixNano()) - } - var sendPing bool - // If the number of active streams change from 0 to 1, then check if keepalive - // has gone dormant. If so, wake it up. - if len(t.activeStreams) == 1 && t.keepaliveEnabled { - select { - case t.awakenKeepalive <- struct{}{}: - sendPing = true - // Fill the awakenKeepalive channel again as this channel must be - // kept non-writable except at the point that the keepalive() - // goroutine is waiting either to be awaken or shutdown. - t.awakenKeepalive <- struct{}{} - default: - } - } - t.mu.Unlock() - return sendPing, nil - }, - onOrphaned: cleanup, - wq: s.wq, - } - firstTry := true - var ch chan struct{} - checkForStreamQuota := func(it interface{}) bool { - if t.streamQuota <= 0 { // Can go negative if server decreases it. - if firstTry { - t.waitingStreams++ - } - ch = t.streamsQuotaAvailable - return false - } - if !firstTry { - t.waitingStreams-- - } - t.streamQuota-- - h := it.(*headerFrame) - h.streamID = t.nextID - t.nextID += 2 - s.id = h.streamID - s.fc = &inFlow{limit: uint32(t.initialWindowSize)} - if t.streamQuota > 0 && t.waitingStreams > 0 { - select { - case t.streamsQuotaAvailable <- struct{}{}: - default: - } - } - return true - } - var hdrListSizeErr error - checkForHeaderListSize := func(it interface{}) bool { - if t.maxSendHeaderListSize == nil { - return true - } - hdrFrame := it.(*headerFrame) - var sz int64 - for _, f := range hdrFrame.hf { - if sz += int64(f.Size()); sz > int64(*t.maxSendHeaderListSize) { - hdrListSizeErr = status.Errorf(codes.Internal, "header list size to send violates the maximum size (%d bytes) set by server", *t.maxSendHeaderListSize) - return false - } - } - return true - } - for { - success, err := t.controlBuf.executeAndPut(func(it interface{}) bool { - if !checkForStreamQuota(it) { - return false - } - if !checkForHeaderListSize(it) { - return false - } - return true - }, hdr) - if err != nil { - return nil, err - } - if success { - break - } - if hdrListSizeErr != nil { - return nil, hdrListSizeErr - } - firstTry = false - select { - case <-ch: - case <-s.ctx.Done(): - return nil, ContextErr(s.ctx.Err()) - case <-t.goAway: - return nil, errStreamDrain - case <-t.ctx.Done(): - return nil, ErrConnClosing - } - } - if t.statsHandler != nil { - outHeader := &stats.OutHeader{ - Client: true, - FullMethod: callHdr.Method, - RemoteAddr: t.remoteAddr, - LocalAddr: t.localAddr, - Compression: callHdr.SendCompress, - } - t.statsHandler.HandleRPC(s.ctx, outHeader) - } - return s, nil -} - -// CloseStream clears the footprint of a stream when the stream is not needed any more. -// This must not be executed in reader's goroutine. -func (t *http2Client) CloseStream(s *Stream, err error) { - var ( - rst bool - rstCode http2.ErrCode - ) - if err != nil { - rst = true - rstCode = http2.ErrCodeCancel - } - t.closeStream(s, err, rst, rstCode, status.Convert(err), nil, false) -} - -func (t *http2Client) closeStream(s *Stream, err error, rst bool, rstCode http2.ErrCode, st *status.Status, mdata map[string][]string, eosReceived bool) { - // Set stream status to done. - if s.swapState(streamDone) == streamDone { - // If it was already done, return. If multiple closeStream calls - // happen simultaneously, wait for the first to finish. - <-s.done - return - } - // status and trailers can be updated here without any synchronization because the stream goroutine will - // only read it after it sees an io.EOF error from read or write and we'll write those errors - // only after updating this. - s.status = st - if len(mdata) > 0 { - s.trailer = mdata - } - if err != nil { - // This will unblock reads eventually. - s.write(recvMsg{err: err}) - } - // If headerChan isn't closed, then close it. - if atomic.SwapUint32(&s.headerDone, 1) == 0 { - s.noHeaders = true - close(s.headerChan) - } - cleanup := &cleanupStream{ - streamID: s.id, - onWrite: func() { - t.mu.Lock() - if t.activeStreams != nil { - delete(t.activeStreams, s.id) - } - t.mu.Unlock() - if channelz.IsOn() { - if eosReceived { - atomic.AddInt64(&t.czData.streamsSucceeded, 1) - } else { - atomic.AddInt64(&t.czData.streamsFailed, 1) - } - } - }, - rst: rst, - rstCode: rstCode, - } - addBackStreamQuota := func(interface{}) bool { - t.streamQuota++ - if t.streamQuota > 0 && t.waitingStreams > 0 { - select { - case t.streamsQuotaAvailable <- struct{}{}: - default: - } - } - return true - } - t.controlBuf.executeAndPut(addBackStreamQuota, cleanup) - // This will unblock write. - close(s.done) -} - -// Close kicks off the shutdown process of the transport. This should be called -// only once on a transport. Once it is called, the transport should not be -// accessed any more. -// -// This method blocks until the addrConn that initiated this transport is -// re-connected. This happens because t.onClose() begins reconnect logic at the -// addrConn level and blocks until the addrConn is successfully connected. -func (t *http2Client) Close() error { - t.mu.Lock() - // Make sure we only Close once. - if t.state == closing { - t.mu.Unlock() - return nil - } - t.state = closing - streams := t.activeStreams - t.activeStreams = nil - t.mu.Unlock() - t.controlBuf.finish() - t.cancel() - err := t.conn.Close() - if channelz.IsOn() { - channelz.RemoveEntry(t.channelzID) - } - // Notify all active streams. - for _, s := range streams { - t.closeStream(s, ErrConnClosing, false, http2.ErrCodeNo, status.New(codes.Unavailable, ErrConnClosing.Desc), nil, false) - } - if t.statsHandler != nil { - connEnd := &stats.ConnEnd{ - Client: true, - } - t.statsHandler.HandleConn(t.ctx, connEnd) - } - t.onClose() - return err -} - -// GracefulClose sets the state to draining, which prevents new streams from -// being created and causes the transport to be closed when the last active -// stream is closed. If there are no active streams, the transport is closed -// immediately. This does nothing if the transport is already draining or -// closing. -func (t *http2Client) GracefulClose() error { - t.mu.Lock() - // Make sure we move to draining only from active. - if t.state == draining || t.state == closing { - t.mu.Unlock() - return nil - } - t.state = draining - active := len(t.activeStreams) - t.mu.Unlock() - if active == 0 { - return t.Close() - } - t.controlBuf.put(&incomingGoAway{}) - return nil -} - -// Write formats the data into HTTP2 data frame(s) and sends it out. The caller -// should proceed only if Write returns nil. -func (t *http2Client) Write(s *Stream, hdr []byte, data []byte, opts *Options) error { - if opts.Last { - // If it's the last message, update stream state. - if !s.compareAndSwapState(streamActive, streamWriteDone) { - return errStreamDone - } - } else if s.getState() != streamActive { - return errStreamDone - } - df := &dataFrame{ - streamID: s.id, - endStream: opts.Last, - } - if hdr != nil || data != nil { // If it's not an empty data frame. - // Add some data to grpc message header so that we can equally - // distribute bytes across frames. - emptyLen := http2MaxFrameLen - len(hdr) - if emptyLen > len(data) { - emptyLen = len(data) - } - hdr = append(hdr, data[:emptyLen]...) - data = data[emptyLen:] - df.h, df.d = hdr, data - // TODO(mmukhi): The above logic in this if can be moved to loopyWriter's data handler. - if err := s.wq.get(int32(len(hdr) + len(data))); err != nil { - return err - } - } - return t.controlBuf.put(df) -} - -func (t *http2Client) getStream(f http2.Frame) (*Stream, bool) { - t.mu.Lock() - defer t.mu.Unlock() - s, ok := t.activeStreams[f.Header().StreamID] - return s, ok -} - -// adjustWindow sends out extra window update over the initial window size -// of stream if the application is requesting data larger in size than -// the window. -func (t *http2Client) adjustWindow(s *Stream, n uint32) { - if w := s.fc.maybeAdjust(n); w > 0 { - t.controlBuf.put(&outgoingWindowUpdate{streamID: s.id, increment: w}) - } -} - -// updateWindow adjusts the inbound quota for the stream. -// Window updates will be sent out when the cumulative quota -// exceeds the corresponding threshold. -func (t *http2Client) updateWindow(s *Stream, n uint32) { - if w := s.fc.onRead(n); w > 0 { - t.controlBuf.put(&outgoingWindowUpdate{streamID: s.id, increment: w}) - } -} - -// updateFlowControl updates the incoming flow control windows -// for the transport and the stream based on the current bdp -// estimation. -func (t *http2Client) updateFlowControl(n uint32) { - t.mu.Lock() - for _, s := range t.activeStreams { - s.fc.newLimit(n) - } - t.mu.Unlock() - updateIWS := func(interface{}) bool { - t.initialWindowSize = int32(n) - return true - } - t.controlBuf.executeAndPut(updateIWS, &outgoingWindowUpdate{streamID: 0, increment: t.fc.newLimit(n)}) - t.controlBuf.put(&outgoingSettings{ - ss: []http2.Setting{ - { - ID: http2.SettingInitialWindowSize, - Val: n, - }, - }, - }) -} - -func (t *http2Client) handleData(f *http2.DataFrame) { - size := f.Header().Length - var sendBDPPing bool - if t.bdpEst != nil { - sendBDPPing = t.bdpEst.add(size) - } - // Decouple connection's flow control from application's read. - // An update on connection's flow control should not depend on - // whether user application has read the data or not. Such a - // restriction is already imposed on the stream's flow control, - // and therefore the sender will be blocked anyways. - // Decoupling the connection flow control will prevent other - // active(fast) streams from starving in presence of slow or - // inactive streams. - // - if w := t.fc.onData(size); w > 0 { - t.controlBuf.put(&outgoingWindowUpdate{ - streamID: 0, - increment: w, - }) - } - if sendBDPPing { - // Avoid excessive ping detection (e.g. in an L7 proxy) - // by sending a window update prior to the BDP ping. - - if w := t.fc.reset(); w > 0 { - t.controlBuf.put(&outgoingWindowUpdate{ - streamID: 0, - increment: w, - }) - } - - t.controlBuf.put(bdpPing) - } - // Select the right stream to dispatch. - s, ok := t.getStream(f) - if !ok { - return - } - if size > 0 { - if err := s.fc.onData(size); err != nil { - t.closeStream(s, io.EOF, true, http2.ErrCodeFlowControl, status.New(codes.Internal, err.Error()), nil, false) - return - } - if f.Header().Flags.Has(http2.FlagDataPadded) { - if w := s.fc.onRead(size - uint32(len(f.Data()))); w > 0 { - t.controlBuf.put(&outgoingWindowUpdate{s.id, w}) - } - } - // TODO(bradfitz, zhaoq): A copy is required here because there is no - // guarantee f.Data() is consumed before the arrival of next frame. - // Can this copy be eliminated? - if len(f.Data()) > 0 { - data := make([]byte, len(f.Data())) - copy(data, f.Data()) - s.write(recvMsg{data: data}) - } - } - // The server has closed the stream without sending trailers. Record that - // the read direction is closed, and set the status appropriately. - if f.FrameHeader.Flags.Has(http2.FlagDataEndStream) { - t.closeStream(s, io.EOF, false, http2.ErrCodeNo, status.New(codes.Internal, "server closed the stream without sending trailers"), nil, true) - } -} - -func (t *http2Client) handleRSTStream(f *http2.RSTStreamFrame) { - s, ok := t.getStream(f) - if !ok { - return - } - if f.ErrCode == http2.ErrCodeRefusedStream { - // The stream was unprocessed by the server. - atomic.StoreUint32(&s.unprocessed, 1) - } - statusCode, ok := http2ErrConvTab[f.ErrCode] - if !ok { - warningf("transport: http2Client.handleRSTStream found no mapped gRPC status for the received http2 error %v", f.ErrCode) - statusCode = codes.Unknown - } - if statusCode == codes.Canceled { - // Our deadline was already exceeded, and that was likely the cause of - // this cancelation. Alter the status code accordingly. - if d, ok := s.ctx.Deadline(); ok && d.After(time.Now()) { - statusCode = codes.DeadlineExceeded - } - } - t.closeStream(s, io.EOF, false, http2.ErrCodeNo, status.Newf(statusCode, "stream terminated by RST_STREAM with error code: %v", f.ErrCode), nil, false) -} - -func (t *http2Client) handleSettings(f *http2.SettingsFrame, isFirst bool) { - if f.IsAck() { - return - } - var maxStreams *uint32 - var ss []http2.Setting - var updateFuncs []func() - f.ForeachSetting(func(s http2.Setting) error { - switch s.ID { - case http2.SettingMaxConcurrentStreams: - maxStreams = new(uint32) - *maxStreams = s.Val - case http2.SettingMaxHeaderListSize: - updateFuncs = append(updateFuncs, func() { - t.maxSendHeaderListSize = new(uint32) - *t.maxSendHeaderListSize = s.Val - }) - default: - ss = append(ss, s) - } - return nil - }) - if isFirst && maxStreams == nil { - maxStreams = new(uint32) - *maxStreams = math.MaxUint32 - } - sf := &incomingSettings{ - ss: ss, - } - if maxStreams != nil { - updateStreamQuota := func() { - delta := int64(*maxStreams) - int64(t.maxConcurrentStreams) - t.maxConcurrentStreams = *maxStreams - t.streamQuota += delta - if delta > 0 && t.waitingStreams > 0 { - close(t.streamsQuotaAvailable) // wake all of them up. - t.streamsQuotaAvailable = make(chan struct{}, 1) - } - } - updateFuncs = append(updateFuncs, updateStreamQuota) - } - t.controlBuf.executeAndPut(func(interface{}) bool { - for _, f := range updateFuncs { - f() - } - return true - }, sf) -} - -func (t *http2Client) handlePing(f *http2.PingFrame) { - if f.IsAck() { - // Maybe it's a BDP ping. - if t.bdpEst != nil { - t.bdpEst.calculate(f.Data) - } - return - } - pingAck := &ping{ack: true} - copy(pingAck.data[:], f.Data[:]) - t.controlBuf.put(pingAck) -} - -func (t *http2Client) handleGoAway(f *http2.GoAwayFrame) { - t.mu.Lock() - if t.state == closing { - t.mu.Unlock() - return - } - if f.ErrCode == http2.ErrCodeEnhanceYourCalm { - infof("Client received GoAway with http2.ErrCodeEnhanceYourCalm.") - } - id := f.LastStreamID - if id > 0 && id%2 != 1 { - t.mu.Unlock() - t.Close() - return - } - // A client can receive multiple GoAways from the server (see - // https://github.com/grpc/grpc-go/issues/1387). The idea is that the first - // GoAway will be sent with an ID of MaxInt32 and the second GoAway will be - // sent after an RTT delay with the ID of the last stream the server will - // process. - // - // Therefore, when we get the first GoAway we don't necessarily close any - // streams. While in case of second GoAway we close all streams created after - // the GoAwayId. This way streams that were in-flight while the GoAway from - // server was being sent don't get killed. - select { - case <-t.goAway: // t.goAway has been closed (i.e.,multiple GoAways). - // If there are multiple GoAways the first one should always have an ID greater than the following ones. - if id > t.prevGoAwayID { - t.mu.Unlock() - t.Close() - return - } - default: - t.setGoAwayReason(f) - close(t.goAway) - t.state = draining - t.controlBuf.put(&incomingGoAway{}) - - // This has to be a new goroutine because we're still using the current goroutine to read in the transport. - t.onGoAway(t.goAwayReason) - } - // All streams with IDs greater than the GoAwayId - // and smaller than the previous GoAway ID should be killed. - upperLimit := t.prevGoAwayID - if upperLimit == 0 { // This is the first GoAway Frame. - upperLimit = math.MaxUint32 // Kill all streams after the GoAway ID. - } - for streamID, stream := range t.activeStreams { - if streamID > id && streamID <= upperLimit { - // The stream was unprocessed by the server. - atomic.StoreUint32(&stream.unprocessed, 1) - t.closeStream(stream, errStreamDrain, false, http2.ErrCodeNo, statusGoAway, nil, false) - } - } - t.prevGoAwayID = id - active := len(t.activeStreams) - t.mu.Unlock() - if active == 0 { - t.Close() - } -} - -// setGoAwayReason sets the value of t.goAwayReason based -// on the GoAway frame received. -// It expects a lock on transport's mutext to be held by -// the caller. -func (t *http2Client) setGoAwayReason(f *http2.GoAwayFrame) { - t.goAwayReason = GoAwayNoReason - switch f.ErrCode { - case http2.ErrCodeEnhanceYourCalm: - if string(f.DebugData()) == "too_many_pings" { - t.goAwayReason = GoAwayTooManyPings - } - } -} - -func (t *http2Client) GetGoAwayReason() GoAwayReason { - t.mu.Lock() - defer t.mu.Unlock() - return t.goAwayReason -} - -func (t *http2Client) handleWindowUpdate(f *http2.WindowUpdateFrame) { - t.controlBuf.put(&incomingWindowUpdate{ - streamID: f.Header().StreamID, - increment: f.Increment, - }) -} - -// operateHeaders takes action on the decoded headers. -func (t *http2Client) operateHeaders(frame *http2.MetaHeadersFrame) { - s, ok := t.getStream(frame) - if !ok { - return - } - atomic.StoreUint32(&s.bytesReceived, 1) - var state decodeState - if err := state.decodeHeader(frame); err != nil { - t.closeStream(s, err, true, http2.ErrCodeProtocol, status.New(codes.Internal, err.Error()), nil, false) - // Something wrong. Stops reading even when there is remaining. - return - } - - endStream := frame.StreamEnded() - var isHeader bool - defer func() { - if t.statsHandler != nil { - if isHeader { - inHeader := &stats.InHeader{ - Client: true, - WireLength: int(frame.Header().Length), - } - t.statsHandler.HandleRPC(s.ctx, inHeader) - } else { - inTrailer := &stats.InTrailer{ - Client: true, - WireLength: int(frame.Header().Length), - } - t.statsHandler.HandleRPC(s.ctx, inTrailer) - } - } - }() - // If headers haven't been received yet. - if atomic.SwapUint32(&s.headerDone, 1) == 0 { - if !endStream { - // Headers frame is not actually a trailers-only frame. - isHeader = true - // These values can be set without any synchronization because - // stream goroutine will read it only after seeing a closed - // headerChan which we'll close after setting this. - s.recvCompress = state.encoding - if len(state.mdata) > 0 { - s.header = state.mdata - } - } else { - s.noHeaders = true - } - close(s.headerChan) - } - if !endStream { - return - } - // if client received END_STREAM from server while stream was still active, send RST_STREAM - rst := s.getState() == streamActive - t.closeStream(s, io.EOF, rst, http2.ErrCodeNo, state.status(), state.mdata, true) -} - -// reader runs as a separate goroutine in charge of reading data from network -// connection. -// -// TODO(zhaoq): currently one reader per transport. Investigate whether this is -// optimal. -// TODO(zhaoq): Check the validity of the incoming frame sequence. -func (t *http2Client) reader() { - defer close(t.readerDone) - // Check the validity of server preface. - frame, err := t.framer.fr.ReadFrame() - if err != nil { - t.Close() // this kicks off resetTransport, so must be last before return - return - } - t.conn.SetReadDeadline(time.Time{}) // reset deadline once we get the settings frame (we didn't time out, yay!) - if t.keepaliveEnabled { - atomic.CompareAndSwapUint32(&t.activity, 0, 1) - } - sf, ok := frame.(*http2.SettingsFrame) - if !ok { - t.Close() // this kicks off resetTransport, so must be last before return - return - } - t.onPrefaceReceipt() - t.handleSettings(sf, true) - - // loop to keep reading incoming messages on this transport. - for { - frame, err := t.framer.fr.ReadFrame() - if t.keepaliveEnabled { - atomic.CompareAndSwapUint32(&t.activity, 0, 1) - } - if err != nil { - // Abort an active stream if the http2.Framer returns a - // http2.StreamError. This can happen only if the server's response - // is malformed http2. - if se, ok := err.(http2.StreamError); ok { - t.mu.Lock() - s := t.activeStreams[se.StreamID] - t.mu.Unlock() - if s != nil { - // use error detail to provide better err message - code := http2ErrConvTab[se.Code] - msg := t.framer.fr.ErrorDetail().Error() - t.closeStream(s, status.Error(code, msg), true, http2.ErrCodeProtocol, status.New(code, msg), nil, false) - } - continue - } else { - // Transport error. - t.Close() - return - } - } - switch frame := frame.(type) { - case *http2.MetaHeadersFrame: - t.operateHeaders(frame) - case *http2.DataFrame: - t.handleData(frame) - case *http2.RSTStreamFrame: - t.handleRSTStream(frame) - case *http2.SettingsFrame: - t.handleSettings(frame, false) - case *http2.PingFrame: - t.handlePing(frame) - case *http2.GoAwayFrame: - t.handleGoAway(frame) - case *http2.WindowUpdateFrame: - t.handleWindowUpdate(frame) - default: - errorf("transport: http2Client.reader got unhandled frame type %v.", frame) - } - } -} - -// keepalive running in a separate goroutune makes sure the connection is alive by sending pings. -func (t *http2Client) keepalive() { - p := &ping{data: [8]byte{}} - timer := time.NewTimer(t.kp.Time) - for { - select { - case <-timer.C: - if atomic.CompareAndSwapUint32(&t.activity, 1, 0) { - timer.Reset(t.kp.Time) - continue - } - // Check if keepalive should go dormant. - t.mu.Lock() - if len(t.activeStreams) < 1 && !t.kp.PermitWithoutStream { - // Make awakenKeepalive writable. - <-t.awakenKeepalive - t.mu.Unlock() - select { - case <-t.awakenKeepalive: - // If the control gets here a ping has been sent - // need to reset the timer with keepalive.Timeout. - case <-t.ctx.Done(): - return - } - } else { - t.mu.Unlock() - if channelz.IsOn() { - atomic.AddInt64(&t.czData.kpCount, 1) - } - // Send ping. - t.controlBuf.put(p) - } - - // By the time control gets here a ping has been sent one way or the other. - timer.Reset(t.kp.Timeout) - select { - case <-timer.C: - if atomic.CompareAndSwapUint32(&t.activity, 1, 0) { - timer.Reset(t.kp.Time) - continue - } - t.Close() - return - case <-t.ctx.Done(): - if !timer.Stop() { - <-timer.C - } - return - } - case <-t.ctx.Done(): - if !timer.Stop() { - <-timer.C - } - return - } - } -} - -func (t *http2Client) Error() <-chan struct{} { - return t.ctx.Done() -} - -func (t *http2Client) GoAway() <-chan struct{} { - return t.goAway -} - -func (t *http2Client) ChannelzMetric() *channelz.SocketInternalMetric { - s := channelz.SocketInternalMetric{ - StreamsStarted: atomic.LoadInt64(&t.czData.streamsStarted), - StreamsSucceeded: atomic.LoadInt64(&t.czData.streamsSucceeded), - StreamsFailed: atomic.LoadInt64(&t.czData.streamsFailed), - MessagesSent: atomic.LoadInt64(&t.czData.msgSent), - MessagesReceived: atomic.LoadInt64(&t.czData.msgRecv), - KeepAlivesSent: atomic.LoadInt64(&t.czData.kpCount), - LastLocalStreamCreatedTimestamp: time.Unix(0, atomic.LoadInt64(&t.czData.lastStreamCreatedTime)), - LastMessageSentTimestamp: time.Unix(0, atomic.LoadInt64(&t.czData.lastMsgSentTime)), - LastMessageReceivedTimestamp: time.Unix(0, atomic.LoadInt64(&t.czData.lastMsgRecvTime)), - LocalFlowControlWindow: int64(t.fc.getSize()), - SocketOptions: channelz.GetSocketOption(t.conn), - LocalAddr: t.localAddr, - RemoteAddr: t.remoteAddr, - // RemoteName : - } - if au, ok := t.authInfo.(credentials.ChannelzSecurityInfo); ok { - s.Security = au.GetSecurityValue() - } - s.RemoteFlowControlWindow = t.getOutFlowWindow() - return &s -} - -func (t *http2Client) IncrMsgSent() { - atomic.AddInt64(&t.czData.msgSent, 1) - atomic.StoreInt64(&t.czData.lastMsgSentTime, time.Now().UnixNano()) -} - -func (t *http2Client) IncrMsgRecv() { - atomic.AddInt64(&t.czData.msgRecv, 1) - atomic.StoreInt64(&t.czData.lastMsgRecvTime, time.Now().UnixNano()) -} - -func (t *http2Client) getOutFlowWindow() int64 { - resp := make(chan uint32, 1) - timer := time.NewTimer(time.Second) - defer timer.Stop() - t.controlBuf.put(&outFlowControlSizeRequest{resp}) - select { - case sz := <-resp: - return int64(sz) - case <-t.ctxDone: - return -1 - case <-timer.C: - return -2 - } -} diff --git a/vendor/google.golang.org/grpc/internal/transport/http2_server.go b/vendor/google.golang.org/grpc/internal/transport/http2_server.go deleted file mode 100644 index df2740398b..0000000000 --- a/vendor/google.golang.org/grpc/internal/transport/http2_server.go +++ /dev/null @@ -1,1180 +0,0 @@ -/* - * - * Copyright 2014 gRPC authors. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ - -package transport - -import ( - "bytes" - "context" - "errors" - "fmt" - "io" - "math" - "net" - "strconv" - "sync" - "sync/atomic" - "time" - - "github.com/golang/protobuf/proto" - "golang.org/x/net/http2" - "golang.org/x/net/http2/hpack" - - "google.golang.org/grpc/codes" - "google.golang.org/grpc/credentials" - "google.golang.org/grpc/grpclog" - "google.golang.org/grpc/internal/channelz" - "google.golang.org/grpc/internal/grpcrand" - "google.golang.org/grpc/keepalive" - "google.golang.org/grpc/metadata" - "google.golang.org/grpc/peer" - "google.golang.org/grpc/stats" - "google.golang.org/grpc/status" - "google.golang.org/grpc/tap" -) - -var ( - // ErrIllegalHeaderWrite indicates that setting header is illegal because of - // the stream's state. - ErrIllegalHeaderWrite = errors.New("transport: the stream is done or WriteHeader was already called") - // ErrHeaderListSizeLimitViolation indicates that the header list size is larger - // than the limit set by peer. - ErrHeaderListSizeLimitViolation = errors.New("transport: trying to send header list size larger than the limit set by peer") -) - -// http2Server implements the ServerTransport interface with HTTP2. -type http2Server struct { - ctx context.Context - ctxDone <-chan struct{} // Cache the context.Done() chan - cancel context.CancelFunc - conn net.Conn - loopy *loopyWriter - readerDone chan struct{} // sync point to enable testing. - writerDone chan struct{} // sync point to enable testing. - remoteAddr net.Addr - localAddr net.Addr - maxStreamID uint32 // max stream ID ever seen - authInfo credentials.AuthInfo // auth info about the connection - inTapHandle tap.ServerInHandle - framer *framer - // The max number of concurrent streams. - maxStreams uint32 - // controlBuf delivers all the control related tasks (e.g., window - // updates, reset streams, and various settings) to the controller. - controlBuf *controlBuffer - fc *trInFlow - stats stats.Handler - // Flag to keep track of reading activity on transport. - // 1 is true and 0 is false. - activity uint32 // Accessed atomically. - // Keepalive and max-age parameters for the server. - kp keepalive.ServerParameters - - // Keepalive enforcement policy. - kep keepalive.EnforcementPolicy - // The time instance last ping was received. - lastPingAt time.Time - // Number of times the client has violated keepalive ping policy so far. - pingStrikes uint8 - // Flag to signify that number of ping strikes should be reset to 0. - // This is set whenever data or header frames are sent. - // 1 means yes. - resetPingStrikes uint32 // Accessed atomically. - initialWindowSize int32 - bdpEst *bdpEstimator - maxSendHeaderListSize *uint32 - - mu sync.Mutex // guard the following - - // drainChan is initialized when drain(...) is called the first time. - // After which the server writes out the first GoAway(with ID 2^31-1) frame. - // Then an independent goroutine will be launched to later send the second GoAway. - // During this time we don't want to write another first GoAway(with ID 2^31 -1) frame. - // Thus call to drain(...) will be a no-op if drainChan is already initialized since draining is - // already underway. - drainChan chan struct{} - state transportState - activeStreams map[uint32]*Stream - // idle is the time instant when the connection went idle. - // This is either the beginning of the connection or when the number of - // RPCs go down to 0. - // When the connection is busy, this value is set to 0. - idle time.Time - - // Fields below are for channelz metric collection. - channelzID int64 // channelz unique identification number - czData *channelzData -} - -// newHTTP2Server constructs a ServerTransport based on HTTP2. ConnectionError is -// returned if something goes wrong. -func newHTTP2Server(conn net.Conn, config *ServerConfig) (_ ServerTransport, err error) { - writeBufSize := config.WriteBufferSize - readBufSize := config.ReadBufferSize - maxHeaderListSize := defaultServerMaxHeaderListSize - if config.MaxHeaderListSize != nil { - maxHeaderListSize = *config.MaxHeaderListSize - } - framer := newFramer(conn, writeBufSize, readBufSize, maxHeaderListSize) - // Send initial settings as connection preface to client. - var isettings []http2.Setting - // TODO(zhaoq): Have a better way to signal "no limit" because 0 is - // permitted in the HTTP2 spec. - maxStreams := config.MaxStreams - if maxStreams == 0 { - maxStreams = math.MaxUint32 - } else { - isettings = append(isettings, http2.Setting{ - ID: http2.SettingMaxConcurrentStreams, - Val: maxStreams, - }) - } - dynamicWindow := true - iwz := int32(initialWindowSize) - if config.InitialWindowSize >= defaultWindowSize { - iwz = config.InitialWindowSize - dynamicWindow = false - } - icwz := int32(initialWindowSize) - if config.InitialConnWindowSize >= defaultWindowSize { - icwz = config.InitialConnWindowSize - dynamicWindow = false - } - if iwz != defaultWindowSize { - isettings = append(isettings, http2.Setting{ - ID: http2.SettingInitialWindowSize, - Val: uint32(iwz)}) - } - if config.MaxHeaderListSize != nil { - isettings = append(isettings, http2.Setting{ - ID: http2.SettingMaxHeaderListSize, - Val: *config.MaxHeaderListSize, - }) - } - if err := framer.fr.WriteSettings(isettings...); err != nil { - return nil, connectionErrorf(false, err, "transport: %v", err) - } - // Adjust the connection flow control window if needed. - if delta := uint32(icwz - defaultWindowSize); delta > 0 { - if err := framer.fr.WriteWindowUpdate(0, delta); err != nil { - return nil, connectionErrorf(false, err, "transport: %v", err) - } - } - kp := config.KeepaliveParams - if kp.MaxConnectionIdle == 0 { - kp.MaxConnectionIdle = defaultMaxConnectionIdle - } - if kp.MaxConnectionAge == 0 { - kp.MaxConnectionAge = defaultMaxConnectionAge - } - // Add a jitter to MaxConnectionAge. - kp.MaxConnectionAge += getJitter(kp.MaxConnectionAge) - if kp.MaxConnectionAgeGrace == 0 { - kp.MaxConnectionAgeGrace = defaultMaxConnectionAgeGrace - } - if kp.Time == 0 { - kp.Time = defaultServerKeepaliveTime - } - if kp.Timeout == 0 { - kp.Timeout = defaultServerKeepaliveTimeout - } - kep := config.KeepalivePolicy - if kep.MinTime == 0 { - kep.MinTime = defaultKeepalivePolicyMinTime - } - ctx, cancel := context.WithCancel(context.Background()) - t := &http2Server{ - ctx: ctx, - cancel: cancel, - ctxDone: ctx.Done(), - conn: conn, - remoteAddr: conn.RemoteAddr(), - localAddr: conn.LocalAddr(), - authInfo: config.AuthInfo, - framer: framer, - readerDone: make(chan struct{}), - writerDone: make(chan struct{}), - maxStreams: maxStreams, - inTapHandle: config.InTapHandle, - fc: &trInFlow{limit: uint32(icwz)}, - state: reachable, - activeStreams: make(map[uint32]*Stream), - stats: config.StatsHandler, - kp: kp, - idle: time.Now(), - kep: kep, - initialWindowSize: iwz, - czData: new(channelzData), - } - t.controlBuf = newControlBuffer(t.ctxDone) - if dynamicWindow { - t.bdpEst = &bdpEstimator{ - bdp: initialWindowSize, - updateFlowControl: t.updateFlowControl, - } - } - if t.stats != nil { - t.ctx = t.stats.TagConn(t.ctx, &stats.ConnTagInfo{ - RemoteAddr: t.remoteAddr, - LocalAddr: t.localAddr, - }) - connBegin := &stats.ConnBegin{} - t.stats.HandleConn(t.ctx, connBegin) - } - if channelz.IsOn() { - t.channelzID = channelz.RegisterNormalSocket(t, config.ChannelzParentID, fmt.Sprintf("%s -> %s", t.remoteAddr, t.localAddr)) - } - t.framer.writer.Flush() - - defer func() { - if err != nil { - t.Close() - } - }() - - // Check the validity of client preface. - preface := make([]byte, len(clientPreface)) - if _, err := io.ReadFull(t.conn, preface); err != nil { - return nil, connectionErrorf(false, err, "transport: http2Server.HandleStreams failed to receive the preface from client: %v", err) - } - if !bytes.Equal(preface, clientPreface) { - return nil, connectionErrorf(false, nil, "transport: http2Server.HandleStreams received bogus greeting from client: %q", preface) - } - - frame, err := t.framer.fr.ReadFrame() - if err == io.EOF || err == io.ErrUnexpectedEOF { - return nil, err - } - if err != nil { - return nil, connectionErrorf(false, err, "transport: http2Server.HandleStreams failed to read initial settings frame: %v", err) - } - atomic.StoreUint32(&t.activity, 1) - sf, ok := frame.(*http2.SettingsFrame) - if !ok { - return nil, connectionErrorf(false, nil, "transport: http2Server.HandleStreams saw invalid preface type %T from client", frame) - } - t.handleSettings(sf) - - go func() { - t.loopy = newLoopyWriter(serverSide, t.framer, t.controlBuf, t.bdpEst) - t.loopy.ssGoAwayHandler = t.outgoingGoAwayHandler - if err := t.loopy.run(); err != nil { - errorf("transport: loopyWriter.run returning. Err: %v", err) - } - t.conn.Close() - close(t.writerDone) - }() - go t.keepalive() - return t, nil -} - -// operateHeader takes action on the decoded headers. -func (t *http2Server) operateHeaders(frame *http2.MetaHeadersFrame, handle func(*Stream), traceCtx func(context.Context, string) context.Context) (fatal bool) { - streamID := frame.Header().StreamID - state := decodeState{serverSide: true} - if err := state.decodeHeader(frame); err != nil { - if se, ok := status.FromError(err); ok { - t.controlBuf.put(&cleanupStream{ - streamID: streamID, - rst: true, - rstCode: statusCodeConvTab[se.Code()], - onWrite: func() {}, - }) - } - return false - } - - buf := newRecvBuffer() - s := &Stream{ - id: streamID, - st: t, - buf: buf, - fc: &inFlow{limit: uint32(t.initialWindowSize)}, - recvCompress: state.encoding, - method: state.method, - contentSubtype: state.contentSubtype, - } - if frame.StreamEnded() { - // s is just created by the caller. No lock needed. - s.state = streamReadDone - } - if state.timeoutSet { - s.ctx, s.cancel = context.WithTimeout(t.ctx, state.timeout) - } else { - s.ctx, s.cancel = context.WithCancel(t.ctx) - } - pr := &peer.Peer{ - Addr: t.remoteAddr, - } - // Attach Auth info if there is any. - if t.authInfo != nil { - pr.AuthInfo = t.authInfo - } - s.ctx = peer.NewContext(s.ctx, pr) - // Attach the received metadata to the context. - if len(state.mdata) > 0 { - s.ctx = metadata.NewIncomingContext(s.ctx, state.mdata) - } - if state.statsTags != nil { - s.ctx = stats.SetIncomingTags(s.ctx, state.statsTags) - } - if state.statsTrace != nil { - s.ctx = stats.SetIncomingTrace(s.ctx, state.statsTrace) - } - if t.inTapHandle != nil { - var err error - info := &tap.Info{ - FullMethodName: state.method, - } - s.ctx, err = t.inTapHandle(s.ctx, info) - if err != nil { - warningf("transport: http2Server.operateHeaders got an error from InTapHandle: %v", err) - t.controlBuf.put(&cleanupStream{ - streamID: s.id, - rst: true, - rstCode: http2.ErrCodeRefusedStream, - onWrite: func() {}, - }) - return false - } - } - t.mu.Lock() - if t.state != reachable { - t.mu.Unlock() - return false - } - if uint32(len(t.activeStreams)) >= t.maxStreams { - t.mu.Unlock() - t.controlBuf.put(&cleanupStream{ - streamID: streamID, - rst: true, - rstCode: http2.ErrCodeRefusedStream, - onWrite: func() {}, - }) - return false - } - if streamID%2 != 1 || streamID <= t.maxStreamID { - t.mu.Unlock() - // illegal gRPC stream id. - errorf("transport: http2Server.HandleStreams received an illegal stream id: %v", streamID) - return true - } - t.maxStreamID = streamID - t.activeStreams[streamID] = s - if len(t.activeStreams) == 1 { - t.idle = time.Time{} - } - t.mu.Unlock() - if channelz.IsOn() { - atomic.AddInt64(&t.czData.streamsStarted, 1) - atomic.StoreInt64(&t.czData.lastStreamCreatedTime, time.Now().UnixNano()) - } - s.requestRead = func(n int) { - t.adjustWindow(s, uint32(n)) - } - s.ctx = traceCtx(s.ctx, s.method) - if t.stats != nil { - s.ctx = t.stats.TagRPC(s.ctx, &stats.RPCTagInfo{FullMethodName: s.method}) - inHeader := &stats.InHeader{ - FullMethod: s.method, - RemoteAddr: t.remoteAddr, - LocalAddr: t.localAddr, - Compression: s.recvCompress, - WireLength: int(frame.Header().Length), - } - t.stats.HandleRPC(s.ctx, inHeader) - } - s.ctxDone = s.ctx.Done() - s.wq = newWriteQuota(defaultWriteQuota, s.ctxDone) - s.trReader = &transportReader{ - reader: &recvBufferReader{ - ctx: s.ctx, - ctxDone: s.ctxDone, - recv: s.buf, - }, - windowHandler: func(n int) { - t.updateWindow(s, uint32(n)) - }, - } - // Register the stream with loopy. - t.controlBuf.put(®isterStream{ - streamID: s.id, - wq: s.wq, - }) - handle(s) - return false -} - -// HandleStreams receives incoming streams using the given handler. This is -// typically run in a separate goroutine. -// traceCtx attaches trace to ctx and returns the new context. -func (t *http2Server) HandleStreams(handle func(*Stream), traceCtx func(context.Context, string) context.Context) { - defer close(t.readerDone) - for { - frame, err := t.framer.fr.ReadFrame() - atomic.StoreUint32(&t.activity, 1) - if err != nil { - if se, ok := err.(http2.StreamError); ok { - warningf("transport: http2Server.HandleStreams encountered http2.StreamError: %v", se) - t.mu.Lock() - s := t.activeStreams[se.StreamID] - t.mu.Unlock() - if s != nil { - t.closeStream(s, true, se.Code, nil, false) - } else { - t.controlBuf.put(&cleanupStream{ - streamID: se.StreamID, - rst: true, - rstCode: se.Code, - onWrite: func() {}, - }) - } - continue - } - if err == io.EOF || err == io.ErrUnexpectedEOF { - t.Close() - return - } - warningf("transport: http2Server.HandleStreams failed to read frame: %v", err) - t.Close() - return - } - switch frame := frame.(type) { - case *http2.MetaHeadersFrame: - if t.operateHeaders(frame, handle, traceCtx) { - t.Close() - break - } - case *http2.DataFrame: - t.handleData(frame) - case *http2.RSTStreamFrame: - t.handleRSTStream(frame) - case *http2.SettingsFrame: - t.handleSettings(frame) - case *http2.PingFrame: - t.handlePing(frame) - case *http2.WindowUpdateFrame: - t.handleWindowUpdate(frame) - case *http2.GoAwayFrame: - // TODO: Handle GoAway from the client appropriately. - default: - errorf("transport: http2Server.HandleStreams found unhandled frame type %v.", frame) - } - } -} - -func (t *http2Server) getStream(f http2.Frame) (*Stream, bool) { - t.mu.Lock() - defer t.mu.Unlock() - if t.activeStreams == nil { - // The transport is closing. - return nil, false - } - s, ok := t.activeStreams[f.Header().StreamID] - if !ok { - // The stream is already done. - return nil, false - } - return s, true -} - -// adjustWindow sends out extra window update over the initial window size -// of stream if the application is requesting data larger in size than -// the window. -func (t *http2Server) adjustWindow(s *Stream, n uint32) { - if w := s.fc.maybeAdjust(n); w > 0 { - t.controlBuf.put(&outgoingWindowUpdate{streamID: s.id, increment: w}) - } - -} - -// updateWindow adjusts the inbound quota for the stream and the transport. -// Window updates will deliver to the controller for sending when -// the cumulative quota exceeds the corresponding threshold. -func (t *http2Server) updateWindow(s *Stream, n uint32) { - if w := s.fc.onRead(n); w > 0 { - t.controlBuf.put(&outgoingWindowUpdate{streamID: s.id, - increment: w, - }) - } -} - -// updateFlowControl updates the incoming flow control windows -// for the transport and the stream based on the current bdp -// estimation. -func (t *http2Server) updateFlowControl(n uint32) { - t.mu.Lock() - for _, s := range t.activeStreams { - s.fc.newLimit(n) - } - t.initialWindowSize = int32(n) - t.mu.Unlock() - t.controlBuf.put(&outgoingWindowUpdate{ - streamID: 0, - increment: t.fc.newLimit(n), - }) - t.controlBuf.put(&outgoingSettings{ - ss: []http2.Setting{ - { - ID: http2.SettingInitialWindowSize, - Val: n, - }, - }, - }) - -} - -func (t *http2Server) handleData(f *http2.DataFrame) { - size := f.Header().Length - var sendBDPPing bool - if t.bdpEst != nil { - sendBDPPing = t.bdpEst.add(size) - } - // Decouple connection's flow control from application's read. - // An update on connection's flow control should not depend on - // whether user application has read the data or not. Such a - // restriction is already imposed on the stream's flow control, - // and therefore the sender will be blocked anyways. - // Decoupling the connection flow control will prevent other - // active(fast) streams from starving in presence of slow or - // inactive streams. - if w := t.fc.onData(size); w > 0 { - t.controlBuf.put(&outgoingWindowUpdate{ - streamID: 0, - increment: w, - }) - } - if sendBDPPing { - // Avoid excessive ping detection (e.g. in an L7 proxy) - // by sending a window update prior to the BDP ping. - if w := t.fc.reset(); w > 0 { - t.controlBuf.put(&outgoingWindowUpdate{ - streamID: 0, - increment: w, - }) - } - t.controlBuf.put(bdpPing) - } - // Select the right stream to dispatch. - s, ok := t.getStream(f) - if !ok { - return - } - if size > 0 { - if err := s.fc.onData(size); err != nil { - t.closeStream(s, true, http2.ErrCodeFlowControl, nil, false) - return - } - if f.Header().Flags.Has(http2.FlagDataPadded) { - if w := s.fc.onRead(size - uint32(len(f.Data()))); w > 0 { - t.controlBuf.put(&outgoingWindowUpdate{s.id, w}) - } - } - // TODO(bradfitz, zhaoq): A copy is required here because there is no - // guarantee f.Data() is consumed before the arrival of next frame. - // Can this copy be eliminated? - if len(f.Data()) > 0 { - data := make([]byte, len(f.Data())) - copy(data, f.Data()) - s.write(recvMsg{data: data}) - } - } - if f.Header().Flags.Has(http2.FlagDataEndStream) { - // Received the end of stream from the client. - s.compareAndSwapState(streamActive, streamReadDone) - s.write(recvMsg{err: io.EOF}) - } -} - -func (t *http2Server) handleRSTStream(f *http2.RSTStreamFrame) { - s, ok := t.getStream(f) - if !ok { - return - } - t.closeStream(s, false, 0, nil, false) -} - -func (t *http2Server) handleSettings(f *http2.SettingsFrame) { - if f.IsAck() { - return - } - var ss []http2.Setting - var updateFuncs []func() - f.ForeachSetting(func(s http2.Setting) error { - switch s.ID { - case http2.SettingMaxHeaderListSize: - updateFuncs = append(updateFuncs, func() { - t.maxSendHeaderListSize = new(uint32) - *t.maxSendHeaderListSize = s.Val - }) - default: - ss = append(ss, s) - } - return nil - }) - t.controlBuf.executeAndPut(func(interface{}) bool { - for _, f := range updateFuncs { - f() - } - return true - }, &incomingSettings{ - ss: ss, - }) -} - -const ( - maxPingStrikes = 2 - defaultPingTimeout = 2 * time.Hour -) - -func (t *http2Server) handlePing(f *http2.PingFrame) { - if f.IsAck() { - if f.Data == goAwayPing.data && t.drainChan != nil { - close(t.drainChan) - return - } - // Maybe it's a BDP ping. - if t.bdpEst != nil { - t.bdpEst.calculate(f.Data) - } - return - } - pingAck := &ping{ack: true} - copy(pingAck.data[:], f.Data[:]) - t.controlBuf.put(pingAck) - - now := time.Now() - defer func() { - t.lastPingAt = now - }() - // A reset ping strikes means that we don't need to check for policy - // violation for this ping and the pingStrikes counter should be set - // to 0. - if atomic.CompareAndSwapUint32(&t.resetPingStrikes, 1, 0) { - t.pingStrikes = 0 - return - } - t.mu.Lock() - ns := len(t.activeStreams) - t.mu.Unlock() - if ns < 1 && !t.kep.PermitWithoutStream { - // Keepalive shouldn't be active thus, this new ping should - // have come after at least defaultPingTimeout. - if t.lastPingAt.Add(defaultPingTimeout).After(now) { - t.pingStrikes++ - } - } else { - // Check if keepalive policy is respected. - if t.lastPingAt.Add(t.kep.MinTime).After(now) { - t.pingStrikes++ - } - } - - if t.pingStrikes > maxPingStrikes { - // Send goaway and close the connection. - errorf("transport: Got too many pings from the client, closing the connection.") - t.controlBuf.put(&goAway{code: http2.ErrCodeEnhanceYourCalm, debugData: []byte("too_many_pings"), closeConn: true}) - } -} - -func (t *http2Server) handleWindowUpdate(f *http2.WindowUpdateFrame) { - t.controlBuf.put(&incomingWindowUpdate{ - streamID: f.Header().StreamID, - increment: f.Increment, - }) -} - -func appendHeaderFieldsFromMD(headerFields []hpack.HeaderField, md metadata.MD) []hpack.HeaderField { - for k, vv := range md { - if isReservedHeader(k) { - // Clients don't tolerate reading restricted headers after some non restricted ones were sent. - continue - } - for _, v := range vv { - headerFields = append(headerFields, hpack.HeaderField{Name: k, Value: encodeMetadataHeader(k, v)}) - } - } - return headerFields -} - -func (t *http2Server) checkForHeaderListSize(it interface{}) bool { - if t.maxSendHeaderListSize == nil { - return true - } - hdrFrame := it.(*headerFrame) - var sz int64 - for _, f := range hdrFrame.hf { - if sz += int64(f.Size()); sz > int64(*t.maxSendHeaderListSize) { - errorf("header list size to send violates the maximum size (%d bytes) set by client", *t.maxSendHeaderListSize) - return false - } - } - return true -} - -// WriteHeader sends the header metedata md back to the client. -func (t *http2Server) WriteHeader(s *Stream, md metadata.MD) error { - if s.updateHeaderSent() || s.getState() == streamDone { - return ErrIllegalHeaderWrite - } - s.hdrMu.Lock() - if md.Len() > 0 { - if s.header.Len() > 0 { - s.header = metadata.Join(s.header, md) - } else { - s.header = md - } - } - if err := t.writeHeaderLocked(s); err != nil { - s.hdrMu.Unlock() - return err - } - s.hdrMu.Unlock() - return nil -} - -func (t *http2Server) writeHeaderLocked(s *Stream) error { - // TODO(mmukhi): Benchmark if the performance gets better if count the metadata and other header fields - // first and create a slice of that exact size. - headerFields := make([]hpack.HeaderField, 0, 2) // at least :status, content-type will be there if none else. - headerFields = append(headerFields, hpack.HeaderField{Name: ":status", Value: "200"}) - headerFields = append(headerFields, hpack.HeaderField{Name: "content-type", Value: contentType(s.contentSubtype)}) - if s.sendCompress != "" { - headerFields = append(headerFields, hpack.HeaderField{Name: "grpc-encoding", Value: s.sendCompress}) - } - headerFields = appendHeaderFieldsFromMD(headerFields, s.header) - success, err := t.controlBuf.executeAndPut(t.checkForHeaderListSize, &headerFrame{ - streamID: s.id, - hf: headerFields, - endStream: false, - onWrite: func() { - atomic.StoreUint32(&t.resetPingStrikes, 1) - }, - }) - if !success { - if err != nil { - return err - } - t.closeStream(s, true, http2.ErrCodeInternal, nil, false) - return ErrHeaderListSizeLimitViolation - } - if t.stats != nil { - // Note: WireLength is not set in outHeader. - // TODO(mmukhi): Revisit this later, if needed. - outHeader := &stats.OutHeader{} - t.stats.HandleRPC(s.Context(), outHeader) - } - return nil -} - -// WriteStatus sends stream status to the client and terminates the stream. -// There is no further I/O operations being able to perform on this stream. -// TODO(zhaoq): Now it indicates the end of entire stream. Revisit if early -// OK is adopted. -func (t *http2Server) WriteStatus(s *Stream, st *status.Status) error { - if s.getState() == streamDone { - return nil - } - s.hdrMu.Lock() - // TODO(mmukhi): Benchmark if the performance gets better if count the metadata and other header fields - // first and create a slice of that exact size. - headerFields := make([]hpack.HeaderField, 0, 2) // grpc-status and grpc-message will be there if none else. - if !s.updateHeaderSent() { // No headers have been sent. - if len(s.header) > 0 { // Send a separate header frame. - if err := t.writeHeaderLocked(s); err != nil { - s.hdrMu.Unlock() - return err - } - } else { // Send a trailer only response. - headerFields = append(headerFields, hpack.HeaderField{Name: ":status", Value: "200"}) - headerFields = append(headerFields, hpack.HeaderField{Name: "content-type", Value: contentType(s.contentSubtype)}) - } - } - headerFields = append(headerFields, hpack.HeaderField{Name: "grpc-status", Value: strconv.Itoa(int(st.Code()))}) - headerFields = append(headerFields, hpack.HeaderField{Name: "grpc-message", Value: encodeGrpcMessage(st.Message())}) - - if p := st.Proto(); p != nil && len(p.Details) > 0 { - stBytes, err := proto.Marshal(p) - if err != nil { - // TODO: return error instead, when callers are able to handle it. - grpclog.Errorf("transport: failed to marshal rpc status: %v, error: %v", p, err) - } else { - headerFields = append(headerFields, hpack.HeaderField{Name: "grpc-status-details-bin", Value: encodeBinHeader(stBytes)}) - } - } - - // Attach the trailer metadata. - headerFields = appendHeaderFieldsFromMD(headerFields, s.trailer) - trailingHeader := &headerFrame{ - streamID: s.id, - hf: headerFields, - endStream: true, - onWrite: func() { - atomic.StoreUint32(&t.resetPingStrikes, 1) - }, - } - s.hdrMu.Unlock() - success, err := t.controlBuf.execute(t.checkForHeaderListSize, trailingHeader) - if !success { - if err != nil { - return err - } - t.closeStream(s, true, http2.ErrCodeInternal, nil, false) - return ErrHeaderListSizeLimitViolation - } - t.closeStream(s, false, 0, trailingHeader, true) - if t.stats != nil { - t.stats.HandleRPC(s.Context(), &stats.OutTrailer{}) - } - return nil -} - -// Write converts the data into HTTP2 data frame and sends it out. Non-nil error -// is returns if it fails (e.g., framing error, transport error). -func (t *http2Server) Write(s *Stream, hdr []byte, data []byte, opts *Options) error { - if !s.isHeaderSent() { // Headers haven't been written yet. - if err := t.WriteHeader(s, nil); err != nil { - // TODO(mmukhi, dfawley): Make sure this is the right code to return. - return status.Errorf(codes.Internal, "transport: %v", err) - } - } else { - // Writing headers checks for this condition. - if s.getState() == streamDone { - // TODO(mmukhi, dfawley): Should the server write also return io.EOF? - s.cancel() - select { - case <-t.ctx.Done(): - return ErrConnClosing - default: - } - return ContextErr(s.ctx.Err()) - } - } - // Add some data to header frame so that we can equally distribute bytes across frames. - emptyLen := http2MaxFrameLen - len(hdr) - if emptyLen > len(data) { - emptyLen = len(data) - } - hdr = append(hdr, data[:emptyLen]...) - data = data[emptyLen:] - df := &dataFrame{ - streamID: s.id, - h: hdr, - d: data, - onEachWrite: func() { - atomic.StoreUint32(&t.resetPingStrikes, 1) - }, - } - if err := s.wq.get(int32(len(hdr) + len(data))); err != nil { - select { - case <-t.ctx.Done(): - return ErrConnClosing - default: - } - return ContextErr(s.ctx.Err()) - } - return t.controlBuf.put(df) -} - -// keepalive running in a separate goroutine does the following: -// 1. Gracefully closes an idle connection after a duration of keepalive.MaxConnectionIdle. -// 2. Gracefully closes any connection after a duration of keepalive.MaxConnectionAge. -// 3. Forcibly closes a connection after an additive period of keepalive.MaxConnectionAgeGrace over keepalive.MaxConnectionAge. -// 4. Makes sure a connection is alive by sending pings with a frequency of keepalive.Time and closes a non-responsive connection -// after an additional duration of keepalive.Timeout. -func (t *http2Server) keepalive() { - p := &ping{} - var pingSent bool - maxIdle := time.NewTimer(t.kp.MaxConnectionIdle) - maxAge := time.NewTimer(t.kp.MaxConnectionAge) - keepalive := time.NewTimer(t.kp.Time) - // NOTE: All exit paths of this function should reset their - // respective timers. A failure to do so will cause the - // following clean-up to deadlock and eventually leak. - defer func() { - if !maxIdle.Stop() { - <-maxIdle.C - } - if !maxAge.Stop() { - <-maxAge.C - } - if !keepalive.Stop() { - <-keepalive.C - } - }() - for { - select { - case <-maxIdle.C: - t.mu.Lock() - idle := t.idle - if idle.IsZero() { // The connection is non-idle. - t.mu.Unlock() - maxIdle.Reset(t.kp.MaxConnectionIdle) - continue - } - val := t.kp.MaxConnectionIdle - time.Since(idle) - t.mu.Unlock() - if val <= 0 { - // The connection has been idle for a duration of keepalive.MaxConnectionIdle or more. - // Gracefully close the connection. - t.drain(http2.ErrCodeNo, []byte{}) - // Resetting the timer so that the clean-up doesn't deadlock. - maxIdle.Reset(infinity) - return - } - maxIdle.Reset(val) - case <-maxAge.C: - t.drain(http2.ErrCodeNo, []byte{}) - maxAge.Reset(t.kp.MaxConnectionAgeGrace) - select { - case <-maxAge.C: - // Close the connection after grace period. - t.Close() - // Resetting the timer so that the clean-up doesn't deadlock. - maxAge.Reset(infinity) - case <-t.ctx.Done(): - } - return - case <-keepalive.C: - if atomic.CompareAndSwapUint32(&t.activity, 1, 0) { - pingSent = false - keepalive.Reset(t.kp.Time) - continue - } - if pingSent { - t.Close() - // Resetting the timer so that the clean-up doesn't deadlock. - keepalive.Reset(infinity) - return - } - pingSent = true - if channelz.IsOn() { - atomic.AddInt64(&t.czData.kpCount, 1) - } - t.controlBuf.put(p) - keepalive.Reset(t.kp.Timeout) - case <-t.ctx.Done(): - return - } - } -} - -// Close starts shutting down the http2Server transport. -// TODO(zhaoq): Now the destruction is not blocked on any pending streams. This -// could cause some resource issue. Revisit this later. -func (t *http2Server) Close() error { - t.mu.Lock() - if t.state == closing { - t.mu.Unlock() - return errors.New("transport: Close() was already called") - } - t.state = closing - streams := t.activeStreams - t.activeStreams = nil - t.mu.Unlock() - t.controlBuf.finish() - t.cancel() - err := t.conn.Close() - if channelz.IsOn() { - channelz.RemoveEntry(t.channelzID) - } - // Cancel all active streams. - for _, s := range streams { - s.cancel() - } - if t.stats != nil { - connEnd := &stats.ConnEnd{} - t.stats.HandleConn(t.ctx, connEnd) - } - return err -} - -// closeStream clears the footprint of a stream when the stream is not needed -// any more. -func (t *http2Server) closeStream(s *Stream, rst bool, rstCode http2.ErrCode, hdr *headerFrame, eosReceived bool) { - if s.swapState(streamDone) == streamDone { - // If the stream was already done, return. - return - } - // In case stream sending and receiving are invoked in separate - // goroutines (e.g., bi-directional streaming), cancel needs to be - // called to interrupt the potential blocking on other goroutines. - s.cancel() - cleanup := &cleanupStream{ - streamID: s.id, - rst: rst, - rstCode: rstCode, - onWrite: func() { - t.mu.Lock() - if t.activeStreams != nil { - delete(t.activeStreams, s.id) - if len(t.activeStreams) == 0 { - t.idle = time.Now() - } - } - t.mu.Unlock() - if channelz.IsOn() { - if eosReceived { - atomic.AddInt64(&t.czData.streamsSucceeded, 1) - } else { - atomic.AddInt64(&t.czData.streamsFailed, 1) - } - } - }, - } - if hdr != nil { - hdr.cleanup = cleanup - t.controlBuf.put(hdr) - } else { - t.controlBuf.put(cleanup) - } -} - -func (t *http2Server) RemoteAddr() net.Addr { - return t.remoteAddr -} - -func (t *http2Server) Drain() { - t.drain(http2.ErrCodeNo, []byte{}) -} - -func (t *http2Server) drain(code http2.ErrCode, debugData []byte) { - t.mu.Lock() - defer t.mu.Unlock() - if t.drainChan != nil { - return - } - t.drainChan = make(chan struct{}) - t.controlBuf.put(&goAway{code: code, debugData: debugData, headsUp: true}) -} - -var goAwayPing = &ping{data: [8]byte{1, 6, 1, 8, 0, 3, 3, 9}} - -// Handles outgoing GoAway and returns true if loopy needs to put itself -// in draining mode. -func (t *http2Server) outgoingGoAwayHandler(g *goAway) (bool, error) { - t.mu.Lock() - if t.state == closing { // TODO(mmukhi): This seems unnecessary. - t.mu.Unlock() - // The transport is closing. - return false, ErrConnClosing - } - sid := t.maxStreamID - if !g.headsUp { - // Stop accepting more streams now. - t.state = draining - if len(t.activeStreams) == 0 { - g.closeConn = true - } - t.mu.Unlock() - if err := t.framer.fr.WriteGoAway(sid, g.code, g.debugData); err != nil { - return false, err - } - if g.closeConn { - // Abruptly close the connection following the GoAway (via - // loopywriter). But flush out what's inside the buffer first. - t.framer.writer.Flush() - return false, fmt.Errorf("transport: Connection closing") - } - return true, nil - } - t.mu.Unlock() - // For a graceful close, send out a GoAway with stream ID of MaxUInt32, - // Follow that with a ping and wait for the ack to come back or a timer - // to expire. During this time accept new streams since they might have - // originated before the GoAway reaches the client. - // After getting the ack or timer expiration send out another GoAway this - // time with an ID of the max stream server intends to process. - if err := t.framer.fr.WriteGoAway(math.MaxUint32, http2.ErrCodeNo, []byte{}); err != nil { - return false, err - } - if err := t.framer.fr.WritePing(false, goAwayPing.data); err != nil { - return false, err - } - go func() { - timer := time.NewTimer(time.Minute) - defer timer.Stop() - select { - case <-t.drainChan: - case <-timer.C: - case <-t.ctx.Done(): - return - } - t.controlBuf.put(&goAway{code: g.code, debugData: g.debugData}) - }() - return false, nil -} - -func (t *http2Server) ChannelzMetric() *channelz.SocketInternalMetric { - s := channelz.SocketInternalMetric{ - StreamsStarted: atomic.LoadInt64(&t.czData.streamsStarted), - StreamsSucceeded: atomic.LoadInt64(&t.czData.streamsSucceeded), - StreamsFailed: atomic.LoadInt64(&t.czData.streamsFailed), - MessagesSent: atomic.LoadInt64(&t.czData.msgSent), - MessagesReceived: atomic.LoadInt64(&t.czData.msgRecv), - KeepAlivesSent: atomic.LoadInt64(&t.czData.kpCount), - LastRemoteStreamCreatedTimestamp: time.Unix(0, atomic.LoadInt64(&t.czData.lastStreamCreatedTime)), - LastMessageSentTimestamp: time.Unix(0, atomic.LoadInt64(&t.czData.lastMsgSentTime)), - LastMessageReceivedTimestamp: time.Unix(0, atomic.LoadInt64(&t.czData.lastMsgRecvTime)), - LocalFlowControlWindow: int64(t.fc.getSize()), - SocketOptions: channelz.GetSocketOption(t.conn), - LocalAddr: t.localAddr, - RemoteAddr: t.remoteAddr, - // RemoteName : - } - if au, ok := t.authInfo.(credentials.ChannelzSecurityInfo); ok { - s.Security = au.GetSecurityValue() - } - s.RemoteFlowControlWindow = t.getOutFlowWindow() - return &s -} - -func (t *http2Server) IncrMsgSent() { - atomic.AddInt64(&t.czData.msgSent, 1) - atomic.StoreInt64(&t.czData.lastMsgSentTime, time.Now().UnixNano()) -} - -func (t *http2Server) IncrMsgRecv() { - atomic.AddInt64(&t.czData.msgRecv, 1) - atomic.StoreInt64(&t.czData.lastMsgRecvTime, time.Now().UnixNano()) -} - -func (t *http2Server) getOutFlowWindow() int64 { - resp := make(chan uint32) - timer := time.NewTimer(time.Second) - defer timer.Stop() - t.controlBuf.put(&outFlowControlSizeRequest{resp}) - select { - case sz := <-resp: - return int64(sz) - case <-t.ctxDone: - return -1 - case <-timer.C: - return -2 - } -} - -func getJitter(v time.Duration) time.Duration { - if v == infinity { - return 0 - } - // Generate a jitter between +/- 10% of the value. - r := int64(v / 10) - j := grpcrand.Int63n(2*r) - r - return time.Duration(j) -} diff --git a/vendor/google.golang.org/grpc/internal/transport/http_util.go b/vendor/google.golang.org/grpc/internal/transport/http_util.go deleted file mode 100644 index 77a2cfaaef..0000000000 --- a/vendor/google.golang.org/grpc/internal/transport/http_util.go +++ /dev/null @@ -1,623 +0,0 @@ -/* - * - * Copyright 2014 gRPC authors. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ - -package transport - -import ( - "bufio" - "bytes" - "encoding/base64" - "fmt" - "io" - "math" - "net" - "net/http" - "strconv" - "strings" - "time" - "unicode/utf8" - - "github.com/golang/protobuf/proto" - "golang.org/x/net/http2" - "golang.org/x/net/http2/hpack" - spb "google.golang.org/genproto/googleapis/rpc/status" - "google.golang.org/grpc/codes" - "google.golang.org/grpc/status" -) - -const ( - // http2MaxFrameLen specifies the max length of a HTTP2 frame. - http2MaxFrameLen = 16384 // 16KB frame - // http://http2.github.io/http2-spec/#SettingValues - http2InitHeaderTableSize = 4096 - // baseContentType is the base content-type for gRPC. This is a valid - // content-type on it's own, but can also include a content-subtype such as - // "proto" as a suffix after "+" or ";". See - // https://github.com/grpc/grpc/blob/master/doc/PROTOCOL-HTTP2.md#requests - // for more details. - baseContentType = "application/grpc" -) - -var ( - clientPreface = []byte(http2.ClientPreface) - http2ErrConvTab = map[http2.ErrCode]codes.Code{ - http2.ErrCodeNo: codes.Internal, - http2.ErrCodeProtocol: codes.Internal, - http2.ErrCodeInternal: codes.Internal, - http2.ErrCodeFlowControl: codes.ResourceExhausted, - http2.ErrCodeSettingsTimeout: codes.Internal, - http2.ErrCodeStreamClosed: codes.Internal, - http2.ErrCodeFrameSize: codes.Internal, - http2.ErrCodeRefusedStream: codes.Unavailable, - http2.ErrCodeCancel: codes.Canceled, - http2.ErrCodeCompression: codes.Internal, - http2.ErrCodeConnect: codes.Internal, - http2.ErrCodeEnhanceYourCalm: codes.ResourceExhausted, - http2.ErrCodeInadequateSecurity: codes.PermissionDenied, - http2.ErrCodeHTTP11Required: codes.Internal, - } - statusCodeConvTab = map[codes.Code]http2.ErrCode{ - codes.Internal: http2.ErrCodeInternal, - codes.Canceled: http2.ErrCodeCancel, - codes.Unavailable: http2.ErrCodeRefusedStream, - codes.ResourceExhausted: http2.ErrCodeEnhanceYourCalm, - codes.PermissionDenied: http2.ErrCodeInadequateSecurity, - } - httpStatusConvTab = map[int]codes.Code{ - // 400 Bad Request - INTERNAL. - http.StatusBadRequest: codes.Internal, - // 401 Unauthorized - UNAUTHENTICATED. - http.StatusUnauthorized: codes.Unauthenticated, - // 403 Forbidden - PERMISSION_DENIED. - http.StatusForbidden: codes.PermissionDenied, - // 404 Not Found - UNIMPLEMENTED. - http.StatusNotFound: codes.Unimplemented, - // 429 Too Many Requests - UNAVAILABLE. - http.StatusTooManyRequests: codes.Unavailable, - // 502 Bad Gateway - UNAVAILABLE. - http.StatusBadGateway: codes.Unavailable, - // 503 Service Unavailable - UNAVAILABLE. - http.StatusServiceUnavailable: codes.Unavailable, - // 504 Gateway timeout - UNAVAILABLE. - http.StatusGatewayTimeout: codes.Unavailable, - } -) - -// Records the states during HPACK decoding. Must be reset once the -// decoding of the entire headers are finished. -type decodeState struct { - encoding string - // statusGen caches the stream status received from the trailer the server - // sent. Client side only. Do not access directly. After all trailers are - // parsed, use the status method to retrieve the status. - statusGen *status.Status - // rawStatusCode and rawStatusMsg are set from the raw trailer fields and are not - // intended for direct access outside of parsing. - rawStatusCode *int - rawStatusMsg string - httpStatus *int - // Server side only fields. - timeoutSet bool - timeout time.Duration - method string - // key-value metadata map from the peer. - mdata map[string][]string - statsTags []byte - statsTrace []byte - contentSubtype string - // whether decoding on server side or not - serverSide bool -} - -// isReservedHeader checks whether hdr belongs to HTTP2 headers -// reserved by gRPC protocol. Any other headers are classified as the -// user-specified metadata. -func isReservedHeader(hdr string) bool { - if hdr != "" && hdr[0] == ':' { - return true - } - switch hdr { - case "content-type", - "user-agent", - "grpc-message-type", - "grpc-encoding", - "grpc-message", - "grpc-status", - "grpc-timeout", - "grpc-status-details-bin", - // Intentionally exclude grpc-previous-rpc-attempts and - // grpc-retry-pushback-ms, which are "reserved", but their API - // intentionally works via metadata. - "te": - return true - default: - return false - } -} - -// isWhitelistedHeader checks whether hdr should be propagated into metadata -// visible to users, even though it is classified as "reserved", above. -func isWhitelistedHeader(hdr string) bool { - switch hdr { - case ":authority", "user-agent": - return true - default: - return false - } -} - -// contentSubtype returns the content-subtype for the given content-type. The -// given content-type must be a valid content-type that starts with -// "application/grpc". A content-subtype will follow "application/grpc" after a -// "+" or ";". See -// https://github.com/grpc/grpc/blob/master/doc/PROTOCOL-HTTP2.md#requests for -// more details. -// -// If contentType is not a valid content-type for gRPC, the boolean -// will be false, otherwise true. If content-type == "application/grpc", -// "application/grpc+", or "application/grpc;", the boolean will be true, -// but no content-subtype will be returned. -// -// contentType is assumed to be lowercase already. -func contentSubtype(contentType string) (string, bool) { - if contentType == baseContentType { - return "", true - } - if !strings.HasPrefix(contentType, baseContentType) { - return "", false - } - // guaranteed since != baseContentType and has baseContentType prefix - switch contentType[len(baseContentType)] { - case '+', ';': - // this will return true for "application/grpc+" or "application/grpc;" - // which the previous validContentType function tested to be valid, so we - // just say that no content-subtype is specified in this case - return contentType[len(baseContentType)+1:], true - default: - return "", false - } -} - -// contentSubtype is assumed to be lowercase -func contentType(contentSubtype string) string { - if contentSubtype == "" { - return baseContentType - } - return baseContentType + "+" + contentSubtype -} - -func (d *decodeState) status() *status.Status { - if d.statusGen == nil { - // No status-details were provided; generate status using code/msg. - d.statusGen = status.New(codes.Code(int32(*(d.rawStatusCode))), d.rawStatusMsg) - } - return d.statusGen -} - -const binHdrSuffix = "-bin" - -func encodeBinHeader(v []byte) string { - return base64.RawStdEncoding.EncodeToString(v) -} - -func decodeBinHeader(v string) ([]byte, error) { - if len(v)%4 == 0 { - // Input was padded, or padding was not necessary. - return base64.StdEncoding.DecodeString(v) - } - return base64.RawStdEncoding.DecodeString(v) -} - -func encodeMetadataHeader(k, v string) string { - if strings.HasSuffix(k, binHdrSuffix) { - return encodeBinHeader(([]byte)(v)) - } - return v -} - -func decodeMetadataHeader(k, v string) (string, error) { - if strings.HasSuffix(k, binHdrSuffix) { - b, err := decodeBinHeader(v) - return string(b), err - } - return v, nil -} - -func (d *decodeState) decodeHeader(frame *http2.MetaHeadersFrame) error { - // frame.Truncated is set to true when framer detects that the current header - // list size hits MaxHeaderListSize limit. - if frame.Truncated { - return status.Error(codes.Internal, "peer header list size exceeded limit") - } - for _, hf := range frame.Fields { - if err := d.processHeaderField(hf); err != nil { - return err - } - } - - if d.serverSide { - return nil - } - - // If grpc status exists, no need to check further. - if d.rawStatusCode != nil || d.statusGen != nil { - return nil - } - - // If grpc status doesn't exist and http status doesn't exist, - // then it's a malformed header. - if d.httpStatus == nil { - return status.Error(codes.Internal, "malformed header: doesn't contain status(gRPC or HTTP)") - } - - if *(d.httpStatus) != http.StatusOK { - code, ok := httpStatusConvTab[*(d.httpStatus)] - if !ok { - code = codes.Unknown - } - return status.Error(code, http.StatusText(*(d.httpStatus))) - } - - // gRPC status doesn't exist and http status is OK. - // Set rawStatusCode to be unknown and return nil error. - // So that, if the stream has ended this Unknown status - // will be propagated to the user. - // Otherwise, it will be ignored. In which case, status from - // a later trailer, that has StreamEnded flag set, is propagated. - code := int(codes.Unknown) - d.rawStatusCode = &code - return nil -} - -func (d *decodeState) addMetadata(k, v string) { - if d.mdata == nil { - d.mdata = make(map[string][]string) - } - d.mdata[k] = append(d.mdata[k], v) -} - -func (d *decodeState) processHeaderField(f hpack.HeaderField) error { - switch f.Name { - case "content-type": - contentSubtype, validContentType := contentSubtype(f.Value) - if !validContentType { - return status.Errorf(codes.Internal, "transport: received the unexpected content-type %q", f.Value) - } - d.contentSubtype = contentSubtype - // TODO: do we want to propagate the whole content-type in the metadata, - // or come up with a way to just propagate the content-subtype if it was set? - // ie {"content-type": "application/grpc+proto"} or {"content-subtype": "proto"} - // in the metadata? - d.addMetadata(f.Name, f.Value) - case "grpc-encoding": - d.encoding = f.Value - case "grpc-status": - code, err := strconv.Atoi(f.Value) - if err != nil { - return status.Errorf(codes.Internal, "transport: malformed grpc-status: %v", err) - } - d.rawStatusCode = &code - case "grpc-message": - d.rawStatusMsg = decodeGrpcMessage(f.Value) - case "grpc-status-details-bin": - v, err := decodeBinHeader(f.Value) - if err != nil { - return status.Errorf(codes.Internal, "transport: malformed grpc-status-details-bin: %v", err) - } - s := &spb.Status{} - if err := proto.Unmarshal(v, s); err != nil { - return status.Errorf(codes.Internal, "transport: malformed grpc-status-details-bin: %v", err) - } - d.statusGen = status.FromProto(s) - case "grpc-timeout": - d.timeoutSet = true - var err error - if d.timeout, err = decodeTimeout(f.Value); err != nil { - return status.Errorf(codes.Internal, "transport: malformed time-out: %v", err) - } - case ":path": - d.method = f.Value - case ":status": - code, err := strconv.Atoi(f.Value) - if err != nil { - return status.Errorf(codes.Internal, "transport: malformed http-status: %v", err) - } - d.httpStatus = &code - case "grpc-tags-bin": - v, err := decodeBinHeader(f.Value) - if err != nil { - return status.Errorf(codes.Internal, "transport: malformed grpc-tags-bin: %v", err) - } - d.statsTags = v - d.addMetadata(f.Name, string(v)) - case "grpc-trace-bin": - v, err := decodeBinHeader(f.Value) - if err != nil { - return status.Errorf(codes.Internal, "transport: malformed grpc-trace-bin: %v", err) - } - d.statsTrace = v - d.addMetadata(f.Name, string(v)) - default: - if isReservedHeader(f.Name) && !isWhitelistedHeader(f.Name) { - break - } - v, err := decodeMetadataHeader(f.Name, f.Value) - if err != nil { - errorf("Failed to decode metadata header (%q, %q): %v", f.Name, f.Value, err) - return nil - } - d.addMetadata(f.Name, v) - } - return nil -} - -type timeoutUnit uint8 - -const ( - hour timeoutUnit = 'H' - minute timeoutUnit = 'M' - second timeoutUnit = 'S' - millisecond timeoutUnit = 'm' - microsecond timeoutUnit = 'u' - nanosecond timeoutUnit = 'n' -) - -func timeoutUnitToDuration(u timeoutUnit) (d time.Duration, ok bool) { - switch u { - case hour: - return time.Hour, true - case minute: - return time.Minute, true - case second: - return time.Second, true - case millisecond: - return time.Millisecond, true - case microsecond: - return time.Microsecond, true - case nanosecond: - return time.Nanosecond, true - default: - } - return -} - -const maxTimeoutValue int64 = 100000000 - 1 - -// div does integer division and round-up the result. Note that this is -// equivalent to (d+r-1)/r but has less chance to overflow. -func div(d, r time.Duration) int64 { - if m := d % r; m > 0 { - return int64(d/r + 1) - } - return int64(d / r) -} - -// TODO(zhaoq): It is the simplistic and not bandwidth efficient. Improve it. -func encodeTimeout(t time.Duration) string { - if t <= 0 { - return "0n" - } - if d := div(t, time.Nanosecond); d <= maxTimeoutValue { - return strconv.FormatInt(d, 10) + "n" - } - if d := div(t, time.Microsecond); d <= maxTimeoutValue { - return strconv.FormatInt(d, 10) + "u" - } - if d := div(t, time.Millisecond); d <= maxTimeoutValue { - return strconv.FormatInt(d, 10) + "m" - } - if d := div(t, time.Second); d <= maxTimeoutValue { - return strconv.FormatInt(d, 10) + "S" - } - if d := div(t, time.Minute); d <= maxTimeoutValue { - return strconv.FormatInt(d, 10) + "M" - } - // Note that maxTimeoutValue * time.Hour > MaxInt64. - return strconv.FormatInt(div(t, time.Hour), 10) + "H" -} - -func decodeTimeout(s string) (time.Duration, error) { - size := len(s) - if size < 2 { - return 0, fmt.Errorf("transport: timeout string is too short: %q", s) - } - if size > 9 { - // Spec allows for 8 digits plus the unit. - return 0, fmt.Errorf("transport: timeout string is too long: %q", s) - } - unit := timeoutUnit(s[size-1]) - d, ok := timeoutUnitToDuration(unit) - if !ok { - return 0, fmt.Errorf("transport: timeout unit is not recognized: %q", s) - } - t, err := strconv.ParseInt(s[:size-1], 10, 64) - if err != nil { - return 0, err - } - const maxHours = math.MaxInt64 / int64(time.Hour) - if d == time.Hour && t > maxHours { - // This timeout would overflow math.MaxInt64; clamp it. - return time.Duration(math.MaxInt64), nil - } - return d * time.Duration(t), nil -} - -const ( - spaceByte = ' ' - tildeByte = '~' - percentByte = '%' -) - -// encodeGrpcMessage is used to encode status code in header field -// "grpc-message". It does percent encoding and also replaces invalid utf-8 -// characters with Unicode replacement character. -// -// It checks to see if each individual byte in msg is an allowable byte, and -// then either percent encoding or passing it through. When percent encoding, -// the byte is converted into hexadecimal notation with a '%' prepended. -func encodeGrpcMessage(msg string) string { - if msg == "" { - return "" - } - lenMsg := len(msg) - for i := 0; i < lenMsg; i++ { - c := msg[i] - if !(c >= spaceByte && c <= tildeByte && c != percentByte) { - return encodeGrpcMessageUnchecked(msg) - } - } - return msg -} - -func encodeGrpcMessageUnchecked(msg string) string { - var buf bytes.Buffer - for len(msg) > 0 { - r, size := utf8.DecodeRuneInString(msg) - for _, b := range []byte(string(r)) { - if size > 1 { - // If size > 1, r is not ascii. Always do percent encoding. - buf.WriteString(fmt.Sprintf("%%%02X", b)) - continue - } - - // The for loop is necessary even if size == 1. r could be - // utf8.RuneError. - // - // fmt.Sprintf("%%%02X", utf8.RuneError) gives "%FFFD". - if b >= spaceByte && b <= tildeByte && b != percentByte { - buf.WriteByte(b) - } else { - buf.WriteString(fmt.Sprintf("%%%02X", b)) - } - } - msg = msg[size:] - } - return buf.String() -} - -// decodeGrpcMessage decodes the msg encoded by encodeGrpcMessage. -func decodeGrpcMessage(msg string) string { - if msg == "" { - return "" - } - lenMsg := len(msg) - for i := 0; i < lenMsg; i++ { - if msg[i] == percentByte && i+2 < lenMsg { - return decodeGrpcMessageUnchecked(msg) - } - } - return msg -} - -func decodeGrpcMessageUnchecked(msg string) string { - var buf bytes.Buffer - lenMsg := len(msg) - for i := 0; i < lenMsg; i++ { - c := msg[i] - if c == percentByte && i+2 < lenMsg { - parsed, err := strconv.ParseUint(msg[i+1:i+3], 16, 8) - if err != nil { - buf.WriteByte(c) - } else { - buf.WriteByte(byte(parsed)) - i += 2 - } - } else { - buf.WriteByte(c) - } - } - return buf.String() -} - -type bufWriter struct { - buf []byte - offset int - batchSize int - conn net.Conn - err error - - onFlush func() -} - -func newBufWriter(conn net.Conn, batchSize int) *bufWriter { - return &bufWriter{ - buf: make([]byte, batchSize*2), - batchSize: batchSize, - conn: conn, - } -} - -func (w *bufWriter) Write(b []byte) (n int, err error) { - if w.err != nil { - return 0, w.err - } - if w.batchSize == 0 { // Buffer has been disabled. - return w.conn.Write(b) - } - for len(b) > 0 { - nn := copy(w.buf[w.offset:], b) - b = b[nn:] - w.offset += nn - n += nn - if w.offset >= w.batchSize { - err = w.Flush() - } - } - return n, err -} - -func (w *bufWriter) Flush() error { - if w.err != nil { - return w.err - } - if w.offset == 0 { - return nil - } - if w.onFlush != nil { - w.onFlush() - } - _, w.err = w.conn.Write(w.buf[:w.offset]) - w.offset = 0 - return w.err -} - -type framer struct { - writer *bufWriter - fr *http2.Framer -} - -func newFramer(conn net.Conn, writeBufferSize, readBufferSize int, maxHeaderListSize uint32) *framer { - if writeBufferSize < 0 { - writeBufferSize = 0 - } - var r io.Reader = conn - if readBufferSize > 0 { - r = bufio.NewReaderSize(r, readBufferSize) - } - w := newBufWriter(conn, writeBufferSize) - f := &framer{ - writer: w, - fr: http2.NewFramer(w, r), - } - // Opt-in to Frame reuse API on framer to reduce garbage. - // Frames aren't safe to read from after a subsequent call to ReadFrame. - f.fr.SetReuseFrames() - f.fr.MaxHeaderListSize = maxHeaderListSize - f.fr.ReadMetaHeaders = hpack.NewDecoder(http2InitHeaderTableSize, nil) - return f -} diff --git a/vendor/google.golang.org/grpc/internal/transport/log.go b/vendor/google.golang.org/grpc/internal/transport/log.go deleted file mode 100644 index 879df80c4d..0000000000 --- a/vendor/google.golang.org/grpc/internal/transport/log.go +++ /dev/null @@ -1,44 +0,0 @@ -/* - * - * Copyright 2017 gRPC authors. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ - -// This file contains wrappers for grpclog functions. -// The transport package only logs to verbose level 2 by default. - -package transport - -import "google.golang.org/grpc/grpclog" - -const logLevel = 2 - -func infof(format string, args ...interface{}) { - if grpclog.V(logLevel) { - grpclog.Infof(format, args...) - } -} - -func warningf(format string, args ...interface{}) { - if grpclog.V(logLevel) { - grpclog.Warningf(format, args...) - } -} - -func errorf(format string, args ...interface{}) { - if grpclog.V(logLevel) { - grpclog.Errorf(format, args...) - } -} diff --git a/vendor/google.golang.org/grpc/internal/transport/transport.go b/vendor/google.golang.org/grpc/internal/transport/transport.go deleted file mode 100644 index 2580aa7d3b..0000000000 --- a/vendor/google.golang.org/grpc/internal/transport/transport.go +++ /dev/null @@ -1,758 +0,0 @@ -/* - * - * Copyright 2014 gRPC authors. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ - -// Package transport defines and implements message oriented communication -// channel to complete various transactions (e.g., an RPC). It is meant for -// grpc-internal usage and is not intended to be imported directly by users. -package transport - -import ( - "context" - "errors" - "fmt" - "io" - "net" - "sync" - "sync/atomic" - - "google.golang.org/grpc/codes" - "google.golang.org/grpc/credentials" - "google.golang.org/grpc/keepalive" - "google.golang.org/grpc/metadata" - "google.golang.org/grpc/stats" - "google.golang.org/grpc/status" - "google.golang.org/grpc/tap" -) - -// recvMsg represents the received msg from the transport. All transport -// protocol specific info has been removed. -type recvMsg struct { - data []byte - // nil: received some data - // io.EOF: stream is completed. data is nil. - // other non-nil error: transport failure. data is nil. - err error -} - -// recvBuffer is an unbounded channel of recvMsg structs. -// Note recvBuffer differs from controlBuffer only in that recvBuffer -// holds a channel of only recvMsg structs instead of objects implementing "item" interface. -// recvBuffer is written to much more often than -// controlBuffer and using strict recvMsg structs helps avoid allocation in "recvBuffer.put" -type recvBuffer struct { - c chan recvMsg - mu sync.Mutex - backlog []recvMsg - err error -} - -func newRecvBuffer() *recvBuffer { - b := &recvBuffer{ - c: make(chan recvMsg, 1), - } - return b -} - -func (b *recvBuffer) put(r recvMsg) { - b.mu.Lock() - if b.err != nil { - b.mu.Unlock() - // An error had occurred earlier, don't accept more - // data or errors. - return - } - b.err = r.err - if len(b.backlog) == 0 { - select { - case b.c <- r: - b.mu.Unlock() - return - default: - } - } - b.backlog = append(b.backlog, r) - b.mu.Unlock() -} - -func (b *recvBuffer) load() { - b.mu.Lock() - if len(b.backlog) > 0 { - select { - case b.c <- b.backlog[0]: - b.backlog[0] = recvMsg{} - b.backlog = b.backlog[1:] - default: - } - } - b.mu.Unlock() -} - -// get returns the channel that receives a recvMsg in the buffer. -// -// Upon receipt of a recvMsg, the caller should call load to send another -// recvMsg onto the channel if there is any. -func (b *recvBuffer) get() <-chan recvMsg { - return b.c -} - -// recvBufferReader implements io.Reader interface to read the data from -// recvBuffer. -type recvBufferReader struct { - closeStream func(error) // Closes the client transport stream with the given error and nil trailer metadata. - ctx context.Context - ctxDone <-chan struct{} // cache of ctx.Done() (for performance). - recv *recvBuffer - last []byte // Stores the remaining data in the previous calls. - err error -} - -// Read reads the next len(p) bytes from last. If last is drained, it tries to -// read additional data from recv. It blocks if there no additional data available -// in recv. If Read returns any non-nil error, it will continue to return that error. -func (r *recvBufferReader) Read(p []byte) (n int, err error) { - if r.err != nil { - return 0, r.err - } - if r.last != nil && len(r.last) > 0 { - // Read remaining data left in last call. - copied := copy(p, r.last) - r.last = r.last[copied:] - return copied, nil - } - if r.closeStream != nil { - n, r.err = r.readClient(p) - } else { - n, r.err = r.read(p) - } - return n, r.err -} - -func (r *recvBufferReader) read(p []byte) (n int, err error) { - select { - case <-r.ctxDone: - return 0, ContextErr(r.ctx.Err()) - case m := <-r.recv.get(): - return r.readAdditional(m, p) - } -} - -func (r *recvBufferReader) readClient(p []byte) (n int, err error) { - // If the context is canceled, then closes the stream with nil metadata. - // closeStream writes its error parameter to r.recv as a recvMsg. - // r.readAdditional acts on that message and returns the necessary error. - select { - case <-r.ctxDone: - r.closeStream(ContextErr(r.ctx.Err())) - m := <-r.recv.get() - return r.readAdditional(m, p) - case m := <-r.recv.get(): - return r.readAdditional(m, p) - } -} - -func (r *recvBufferReader) readAdditional(m recvMsg, p []byte) (n int, err error) { - r.recv.load() - if m.err != nil { - return 0, m.err - } - copied := copy(p, m.data) - r.last = m.data[copied:] - return copied, nil -} - -type streamState uint32 - -const ( - streamActive streamState = iota - streamWriteDone // EndStream sent - streamReadDone // EndStream received - streamDone // the entire stream is finished. -) - -// Stream represents an RPC in the transport layer. -type Stream struct { - id uint32 - st ServerTransport // nil for client side Stream - ctx context.Context // the associated context of the stream - cancel context.CancelFunc // always nil for client side Stream - done chan struct{} // closed at the end of stream to unblock writers. On the client side. - ctxDone <-chan struct{} // same as done chan but for server side. Cache of ctx.Done() (for performance) - method string // the associated RPC method of the stream - recvCompress string - sendCompress string - buf *recvBuffer - trReader io.Reader - fc *inFlow - wq *writeQuota - - // Callback to state application's intentions to read data. This - // is used to adjust flow control, if needed. - requestRead func(int) - - headerChan chan struct{} // closed to indicate the end of header metadata. - headerDone uint32 // set when headerChan is closed. Used to avoid closing headerChan multiple times. - - // hdrMu protects header and trailer metadata on the server-side. - hdrMu sync.Mutex - // On client side, header keeps the received header metadata. - // - // On server side, header keeps the header set by SetHeader(). The complete - // header will merged into this after t.WriteHeader() is called. - header metadata.MD - trailer metadata.MD // the key-value map of trailer metadata. - - noHeaders bool // set if the client never received headers (set only after the stream is done). - - // On the server-side, headerSent is atomically set to 1 when the headers are sent out. - headerSent uint32 - - state streamState - - // On client-side it is the status error received from the server. - // On server-side it is unused. - status *status.Status - - bytesReceived uint32 // indicates whether any bytes have been received on this stream - unprocessed uint32 // set if the server sends a refused stream or GOAWAY including this stream - - // contentSubtype is the content-subtype for requests. - // this must be lowercase or the behavior is undefined. - contentSubtype string -} - -// isHeaderSent is only valid on the server-side. -func (s *Stream) isHeaderSent() bool { - return atomic.LoadUint32(&s.headerSent) == 1 -} - -// updateHeaderSent updates headerSent and returns true -// if it was alreay set. It is valid only on server-side. -func (s *Stream) updateHeaderSent() bool { - return atomic.SwapUint32(&s.headerSent, 1) == 1 -} - -func (s *Stream) swapState(st streamState) streamState { - return streamState(atomic.SwapUint32((*uint32)(&s.state), uint32(st))) -} - -func (s *Stream) compareAndSwapState(oldState, newState streamState) bool { - return atomic.CompareAndSwapUint32((*uint32)(&s.state), uint32(oldState), uint32(newState)) -} - -func (s *Stream) getState() streamState { - return streamState(atomic.LoadUint32((*uint32)(&s.state))) -} - -func (s *Stream) waitOnHeader() error { - if s.headerChan == nil { - // On the server headerChan is always nil since a stream originates - // only after having received headers. - return nil - } - select { - case <-s.ctx.Done(): - return ContextErr(s.ctx.Err()) - case <-s.headerChan: - return nil - } -} - -// RecvCompress returns the compression algorithm applied to the inbound -// message. It is empty string if there is no compression applied. -func (s *Stream) RecvCompress() string { - if err := s.waitOnHeader(); err != nil { - return "" - } - return s.recvCompress -} - -// SetSendCompress sets the compression algorithm to the stream. -func (s *Stream) SetSendCompress(str string) { - s.sendCompress = str -} - -// Done returns a channel which is closed when it receives the final status -// from the server. -func (s *Stream) Done() <-chan struct{} { - return s.done -} - -// Header returns the header metadata of the stream. -// -// On client side, it acquires the key-value pairs of header metadata once it is -// available. It blocks until i) the metadata is ready or ii) there is no header -// metadata or iii) the stream is canceled/expired. -// -// On server side, it returns the out header after t.WriteHeader is called. -func (s *Stream) Header() (metadata.MD, error) { - if s.headerChan == nil && s.header != nil { - // On server side, return the header in stream. It will be the out - // header after t.WriteHeader is called. - return s.header.Copy(), nil - } - err := s.waitOnHeader() - // Even if the stream is closed, header is returned if available. - select { - case <-s.headerChan: - if s.header == nil { - return nil, nil - } - return s.header.Copy(), nil - default: - } - return nil, err -} - -// TrailersOnly blocks until a header or trailers-only frame is received and -// then returns true if the stream was trailers-only. If the stream ends -// before headers are received, returns true, nil. If a context error happens -// first, returns it as a status error. Client-side only. -func (s *Stream) TrailersOnly() (bool, error) { - err := s.waitOnHeader() - if err != nil { - return false, err - } - // if !headerDone, some other connection error occurred. - return s.noHeaders && atomic.LoadUint32(&s.headerDone) == 1, nil -} - -// Trailer returns the cached trailer metedata. Note that if it is not called -// after the entire stream is done, it could return an empty MD. Client -// side only. -// It can be safely read only after stream has ended that is either read -// or write have returned io.EOF. -func (s *Stream) Trailer() metadata.MD { - c := s.trailer.Copy() - return c -} - -// ContentSubtype returns the content-subtype for a request. For example, a -// content-subtype of "proto" will result in a content-type of -// "application/grpc+proto". This will always be lowercase. See -// https://github.com/grpc/grpc/blob/master/doc/PROTOCOL-HTTP2.md#requests for -// more details. -func (s *Stream) ContentSubtype() string { - return s.contentSubtype -} - -// Context returns the context of the stream. -func (s *Stream) Context() context.Context { - return s.ctx -} - -// Method returns the method for the stream. -func (s *Stream) Method() string { - return s.method -} - -// Status returns the status received from the server. -// Status can be read safely only after the stream has ended, -// that is, after Done() is closed. -func (s *Stream) Status() *status.Status { - return s.status -} - -// SetHeader sets the header metadata. This can be called multiple times. -// Server side only. -// This should not be called in parallel to other data writes. -func (s *Stream) SetHeader(md metadata.MD) error { - if md.Len() == 0 { - return nil - } - if s.isHeaderSent() || s.getState() == streamDone { - return ErrIllegalHeaderWrite - } - s.hdrMu.Lock() - s.header = metadata.Join(s.header, md) - s.hdrMu.Unlock() - return nil -} - -// SendHeader sends the given header metadata. The given metadata is -// combined with any metadata set by previous calls to SetHeader and -// then written to the transport stream. -func (s *Stream) SendHeader(md metadata.MD) error { - return s.st.WriteHeader(s, md) -} - -// SetTrailer sets the trailer metadata which will be sent with the RPC status -// by the server. This can be called multiple times. Server side only. -// This should not be called parallel to other data writes. -func (s *Stream) SetTrailer(md metadata.MD) error { - if md.Len() == 0 { - return nil - } - if s.getState() == streamDone { - return ErrIllegalHeaderWrite - } - s.hdrMu.Lock() - s.trailer = metadata.Join(s.trailer, md) - s.hdrMu.Unlock() - return nil -} - -func (s *Stream) write(m recvMsg) { - s.buf.put(m) -} - -// Read reads all p bytes from the wire for this stream. -func (s *Stream) Read(p []byte) (n int, err error) { - // Don't request a read if there was an error earlier - if er := s.trReader.(*transportReader).er; er != nil { - return 0, er - } - s.requestRead(len(p)) - return io.ReadFull(s.trReader, p) -} - -// tranportReader reads all the data available for this Stream from the transport and -// passes them into the decoder, which converts them into a gRPC message stream. -// The error is io.EOF when the stream is done or another non-nil error if -// the stream broke. -type transportReader struct { - reader io.Reader - // The handler to control the window update procedure for both this - // particular stream and the associated transport. - windowHandler func(int) - er error -} - -func (t *transportReader) Read(p []byte) (n int, err error) { - n, err = t.reader.Read(p) - if err != nil { - t.er = err - return - } - t.windowHandler(n) - return -} - -// BytesReceived indicates whether any bytes have been received on this stream. -func (s *Stream) BytesReceived() bool { - return atomic.LoadUint32(&s.bytesReceived) == 1 -} - -// Unprocessed indicates whether the server did not process this stream -- -// i.e. it sent a refused stream or GOAWAY including this stream ID. -func (s *Stream) Unprocessed() bool { - return atomic.LoadUint32(&s.unprocessed) == 1 -} - -// GoString is implemented by Stream so context.String() won't -// race when printing %#v. -func (s *Stream) GoString() string { - return fmt.Sprintf("", s, s.method) -} - -// state of transport -type transportState int - -const ( - reachable transportState = iota - closing - draining -) - -// ServerConfig consists of all the configurations to establish a server transport. -type ServerConfig struct { - MaxStreams uint32 - AuthInfo credentials.AuthInfo - InTapHandle tap.ServerInHandle - StatsHandler stats.Handler - KeepaliveParams keepalive.ServerParameters - KeepalivePolicy keepalive.EnforcementPolicy - InitialWindowSize int32 - InitialConnWindowSize int32 - WriteBufferSize int - ReadBufferSize int - ChannelzParentID int64 - MaxHeaderListSize *uint32 -} - -// NewServerTransport creates a ServerTransport with conn or non-nil error -// if it fails. -func NewServerTransport(protocol string, conn net.Conn, config *ServerConfig) (ServerTransport, error) { - return newHTTP2Server(conn, config) -} - -// ConnectOptions covers all relevant options for communicating with the server. -type ConnectOptions struct { - // UserAgent is the application user agent. - UserAgent string - // Dialer specifies how to dial a network address. - Dialer func(context.Context, string) (net.Conn, error) - // FailOnNonTempDialError specifies if gRPC fails on non-temporary dial errors. - FailOnNonTempDialError bool - // PerRPCCredentials stores the PerRPCCredentials required to issue RPCs. - PerRPCCredentials []credentials.PerRPCCredentials - // TransportCredentials stores the Authenticator required to setup a client - // connection. Only one of TransportCredentials and CredsBundle is non-nil. - TransportCredentials credentials.TransportCredentials - // CredsBundle is the credentials bundle to be used. Only one of - // TransportCredentials and CredsBundle is non-nil. - CredsBundle credentials.Bundle - // KeepaliveParams stores the keepalive parameters. - KeepaliveParams keepalive.ClientParameters - // StatsHandler stores the handler for stats. - StatsHandler stats.Handler - // InitialWindowSize sets the initial window size for a stream. - InitialWindowSize int32 - // InitialConnWindowSize sets the initial window size for a connection. - InitialConnWindowSize int32 - // WriteBufferSize sets the size of write buffer which in turn determines how much data can be batched before it's written on the wire. - WriteBufferSize int - // ReadBufferSize sets the size of read buffer, which in turn determines how much data can be read at most for one read syscall. - ReadBufferSize int - // ChannelzParentID sets the addrConn id which initiate the creation of this client transport. - ChannelzParentID int64 - // MaxHeaderListSize sets the max (uncompressed) size of header list that is prepared to be received. - MaxHeaderListSize *uint32 -} - -// TargetInfo contains the information of the target such as network address and metadata. -type TargetInfo struct { - Addr string - Metadata interface{} - Authority string -} - -// NewClientTransport establishes the transport with the required ConnectOptions -// and returns it to the caller. -func NewClientTransport(connectCtx, ctx context.Context, target TargetInfo, opts ConnectOptions, onPrefaceReceipt func(), onGoAway func(GoAwayReason), onClose func()) (ClientTransport, error) { - return newHTTP2Client(connectCtx, ctx, target, opts, onPrefaceReceipt, onGoAway, onClose) -} - -// Options provides additional hints and information for message -// transmission. -type Options struct { - // Last indicates whether this write is the last piece for - // this stream. - Last bool -} - -// CallHdr carries the information of a particular RPC. -type CallHdr struct { - // Host specifies the peer's host. - Host string - - // Method specifies the operation to perform. - Method string - - // SendCompress specifies the compression algorithm applied on - // outbound message. - SendCompress string - - // Creds specifies credentials.PerRPCCredentials for a call. - Creds credentials.PerRPCCredentials - - // ContentSubtype specifies the content-subtype for a request. For example, a - // content-subtype of "proto" will result in a content-type of - // "application/grpc+proto". The value of ContentSubtype must be all - // lowercase, otherwise the behavior is undefined. See - // https://github.com/grpc/grpc/blob/master/doc/PROTOCOL-HTTP2.md#requests - // for more details. - ContentSubtype string - - PreviousAttempts int // value of grpc-previous-rpc-attempts header to set -} - -// ClientTransport is the common interface for all gRPC client-side transport -// implementations. -type ClientTransport interface { - // Close tears down this transport. Once it returns, the transport - // should not be accessed any more. The caller must make sure this - // is called only once. - Close() error - - // GracefulClose starts to tear down the transport. It stops accepting - // new RPCs and wait the completion of the pending RPCs. - GracefulClose() error - - // Write sends the data for the given stream. A nil stream indicates - // the write is to be performed on the transport as a whole. - Write(s *Stream, hdr []byte, data []byte, opts *Options) error - - // NewStream creates a Stream for an RPC. - NewStream(ctx context.Context, callHdr *CallHdr) (*Stream, error) - - // CloseStream clears the footprint of a stream when the stream is - // not needed any more. The err indicates the error incurred when - // CloseStream is called. Must be called when a stream is finished - // unless the associated transport is closing. - CloseStream(stream *Stream, err error) - - // Error returns a channel that is closed when some I/O error - // happens. Typically the caller should have a goroutine to monitor - // this in order to take action (e.g., close the current transport - // and create a new one) in error case. It should not return nil - // once the transport is initiated. - Error() <-chan struct{} - - // GoAway returns a channel that is closed when ClientTransport - // receives the draining signal from the server (e.g., GOAWAY frame in - // HTTP/2). - GoAway() <-chan struct{} - - // GetGoAwayReason returns the reason why GoAway frame was received. - GetGoAwayReason() GoAwayReason - - // IncrMsgSent increments the number of message sent through this transport. - IncrMsgSent() - - // IncrMsgRecv increments the number of message received through this transport. - IncrMsgRecv() -} - -// ServerTransport is the common interface for all gRPC server-side transport -// implementations. -// -// Methods may be called concurrently from multiple goroutines, but -// Write methods for a given Stream will be called serially. -type ServerTransport interface { - // HandleStreams receives incoming streams using the given handler. - HandleStreams(func(*Stream), func(context.Context, string) context.Context) - - // WriteHeader sends the header metadata for the given stream. - // WriteHeader may not be called on all streams. - WriteHeader(s *Stream, md metadata.MD) error - - // Write sends the data for the given stream. - // Write may not be called on all streams. - Write(s *Stream, hdr []byte, data []byte, opts *Options) error - - // WriteStatus sends the status of a stream to the client. WriteStatus is - // the final call made on a stream and always occurs. - WriteStatus(s *Stream, st *status.Status) error - - // Close tears down the transport. Once it is called, the transport - // should not be accessed any more. All the pending streams and their - // handlers will be terminated asynchronously. - Close() error - - // RemoteAddr returns the remote network address. - RemoteAddr() net.Addr - - // Drain notifies the client this ServerTransport stops accepting new RPCs. - Drain() - - // IncrMsgSent increments the number of message sent through this transport. - IncrMsgSent() - - // IncrMsgRecv increments the number of message received through this transport. - IncrMsgRecv() -} - -// connectionErrorf creates an ConnectionError with the specified error description. -func connectionErrorf(temp bool, e error, format string, a ...interface{}) ConnectionError { - return ConnectionError{ - Desc: fmt.Sprintf(format, a...), - temp: temp, - err: e, - } -} - -// ConnectionError is an error that results in the termination of the -// entire connection and the retry of all the active streams. -type ConnectionError struct { - Desc string - temp bool - err error -} - -func (e ConnectionError) Error() string { - return fmt.Sprintf("connection error: desc = %q", e.Desc) -} - -// Temporary indicates if this connection error is temporary or fatal. -func (e ConnectionError) Temporary() bool { - return e.temp -} - -// Origin returns the original error of this connection error. -func (e ConnectionError) Origin() error { - // Never return nil error here. - // If the original error is nil, return itself. - if e.err == nil { - return e - } - return e.err -} - -var ( - // ErrConnClosing indicates that the transport is closing. - ErrConnClosing = connectionErrorf(true, nil, "transport is closing") - // errStreamDrain indicates that the stream is rejected because the - // connection is draining. This could be caused by goaway or balancer - // removing the address. - errStreamDrain = status.Error(codes.Unavailable, "the connection is draining") - // errStreamDone is returned from write at the client side to indiacte application - // layer of an error. - errStreamDone = errors.New("the stream is done") - // StatusGoAway indicates that the server sent a GOAWAY that included this - // stream's ID in unprocessed RPCs. - statusGoAway = status.New(codes.Unavailable, "the stream is rejected because server is draining the connection") -) - -// GoAwayReason contains the reason for the GoAway frame received. -type GoAwayReason uint8 - -const ( - // GoAwayInvalid indicates that no GoAway frame is received. - GoAwayInvalid GoAwayReason = 0 - // GoAwayNoReason is the default value when GoAway frame is received. - GoAwayNoReason GoAwayReason = 1 - // GoAwayTooManyPings indicates that a GoAway frame with - // ErrCodeEnhanceYourCalm was received and that the debug data said - // "too_many_pings". - GoAwayTooManyPings GoAwayReason = 2 -) - -// channelzData is used to store channelz related data for http2Client and http2Server. -// These fields cannot be embedded in the original structs (e.g. http2Client), since to do atomic -// operation on int64 variable on 32-bit machine, user is responsible to enforce memory alignment. -// Here, by grouping those int64 fields inside a struct, we are enforcing the alignment. -type channelzData struct { - kpCount int64 - // The number of streams that have started, including already finished ones. - streamsStarted int64 - // Client side: The number of streams that have ended successfully by receiving - // EoS bit set frame from server. - // Server side: The number of streams that have ended successfully by sending - // frame with EoS bit set. - streamsSucceeded int64 - streamsFailed int64 - // lastStreamCreatedTime stores the timestamp that the last stream gets created. It is of int64 type - // instead of time.Time since it's more costly to atomically update time.Time variable than int64 - // variable. The same goes for lastMsgSentTime and lastMsgRecvTime. - lastStreamCreatedTime int64 - msgSent int64 - msgRecv int64 - lastMsgSentTime int64 - lastMsgRecvTime int64 -} - -// ContextErr converts the error from context package into a status error. -func ContextErr(err error) error { - switch err { - case context.DeadlineExceeded: - return status.Error(codes.DeadlineExceeded, err.Error()) - case context.Canceled: - return status.Error(codes.Canceled, err.Error()) - } - return status.Errorf(codes.Internal, "Unexpected error from context packet: %v", err) -} diff --git a/vendor/google.golang.org/grpc/keepalive/keepalive.go b/vendor/google.golang.org/grpc/keepalive/keepalive.go deleted file mode 100644 index 899e72d7a4..0000000000 --- a/vendor/google.golang.org/grpc/keepalive/keepalive.go +++ /dev/null @@ -1,83 +0,0 @@ -/* - * - * Copyright 2017 gRPC authors. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ - -// Package keepalive defines configurable parameters for point-to-point -// healthcheck. -package keepalive - -import ( - "time" -) - -// ClientParameters is used to set keepalive parameters on the client-side. -// These configure how the client will actively probe to notice when a -// connection is broken and send pings so intermediaries will be aware of the -// liveness of the connection. Make sure these parameters are set in -// coordination with the keepalive policy on the server, as incompatible -// settings can result in closing of connection. -type ClientParameters struct { - // After a duration of this time if the client doesn't see any activity it - // pings the server to see if the transport is still alive. - Time time.Duration // The current default value is infinity. - // After having pinged for keepalive check, the client waits for a duration - // of Timeout and if no activity is seen even after that the connection is - // closed. - Timeout time.Duration // The current default value is 20 seconds. - // If true, client sends keepalive pings even with no active RPCs. If false, - // when there are no active RPCs, Time and Timeout will be ignored and no - // keepalive pings will be sent. - PermitWithoutStream bool // false by default. -} - -// ServerParameters is used to set keepalive and max-age parameters on the -// server-side. -type ServerParameters struct { - // MaxConnectionIdle is a duration for the amount of time after which an - // idle connection would be closed by sending a GoAway. Idleness duration is - // defined since the most recent time the number of outstanding RPCs became - // zero or the connection establishment. - MaxConnectionIdle time.Duration // The current default value is infinity. - // MaxConnectionAge is a duration for the maximum amount of time a - // connection may exist before it will be closed by sending a GoAway. A - // random jitter of +/-10% will be added to MaxConnectionAge to spread out - // connection storms. - MaxConnectionAge time.Duration // The current default value is infinity. - // MaxConnectionAgeGrace is an additive period after MaxConnectionAge after - // which the connection will be forcibly closed. - MaxConnectionAgeGrace time.Duration // The current default value is infinity. - // After a duration of this time if the server doesn't see any activity it - // pings the client to see if the transport is still alive. - Time time.Duration // The current default value is 2 hours. - // After having pinged for keepalive check, the server waits for a duration - // of Timeout and if no activity is seen even after that the connection is - // closed. - Timeout time.Duration // The current default value is 20 seconds. -} - -// EnforcementPolicy is used to set keepalive enforcement policy on the -// server-side. Server will close connection with a client that violates this -// policy. -type EnforcementPolicy struct { - // MinTime is the minimum amount of time a client should wait before sending - // a keepalive ping. - MinTime time.Duration // The current default value is 5 minutes. - // If true, server allows keepalive pings even when there are no active - // streams(RPCs). If false, and client sends ping when there are no active - // streams, server will send GOAWAY and close the connection. - PermitWithoutStream bool // false by default. -} diff --git a/vendor/google.golang.org/grpc/metadata/metadata.go b/vendor/google.golang.org/grpc/metadata/metadata.go deleted file mode 100644 index cf6d1b9478..0000000000 --- a/vendor/google.golang.org/grpc/metadata/metadata.go +++ /dev/null @@ -1,209 +0,0 @@ -/* - * - * Copyright 2014 gRPC authors. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ - -// Package metadata define the structure of the metadata supported by gRPC library. -// Please refer to https://github.com/grpc/grpc/blob/master/doc/PROTOCOL-HTTP2.md -// for more information about custom-metadata. -package metadata // import "google.golang.org/grpc/metadata" - -import ( - "context" - "fmt" - "strings" -) - -// DecodeKeyValue returns k, v, nil. -// -// Deprecated: use k and v directly instead. -func DecodeKeyValue(k, v string) (string, string, error) { - return k, v, nil -} - -// MD is a mapping from metadata keys to values. Users should use the following -// two convenience functions New and Pairs to generate MD. -type MD map[string][]string - -// New creates an MD from a given key-value map. -// -// Only the following ASCII characters are allowed in keys: -// - digits: 0-9 -// - uppercase letters: A-Z (normalized to lower) -// - lowercase letters: a-z -// - special characters: -_. -// Uppercase letters are automatically converted to lowercase. -// -// Keys beginning with "grpc-" are reserved for grpc-internal use only and may -// result in errors if set in metadata. -func New(m map[string]string) MD { - md := MD{} - for k, val := range m { - key := strings.ToLower(k) - md[key] = append(md[key], val) - } - return md -} - -// Pairs returns an MD formed by the mapping of key, value ... -// Pairs panics if len(kv) is odd. -// -// Only the following ASCII characters are allowed in keys: -// - digits: 0-9 -// - uppercase letters: A-Z (normalized to lower) -// - lowercase letters: a-z -// - special characters: -_. -// Uppercase letters are automatically converted to lowercase. -// -// Keys beginning with "grpc-" are reserved for grpc-internal use only and may -// result in errors if set in metadata. -func Pairs(kv ...string) MD { - if len(kv)%2 == 1 { - panic(fmt.Sprintf("metadata: Pairs got the odd number of input pairs for metadata: %d", len(kv))) - } - md := MD{} - var key string - for i, s := range kv { - if i%2 == 0 { - key = strings.ToLower(s) - continue - } - md[key] = append(md[key], s) - } - return md -} - -// Len returns the number of items in md. -func (md MD) Len() int { - return len(md) -} - -// Copy returns a copy of md. -func (md MD) Copy() MD { - return Join(md) -} - -// Get obtains the values for a given key. -func (md MD) Get(k string) []string { - k = strings.ToLower(k) - return md[k] -} - -// Set sets the value of a given key with a slice of values. -func (md MD) Set(k string, vals ...string) { - if len(vals) == 0 { - return - } - k = strings.ToLower(k) - md[k] = vals -} - -// Append adds the values to key k, not overwriting what was already stored at that key. -func (md MD) Append(k string, vals ...string) { - if len(vals) == 0 { - return - } - k = strings.ToLower(k) - md[k] = append(md[k], vals...) -} - -// Join joins any number of mds into a single MD. -// The order of values for each key is determined by the order in which -// the mds containing those values are presented to Join. -func Join(mds ...MD) MD { - out := MD{} - for _, md := range mds { - for k, v := range md { - out[k] = append(out[k], v...) - } - } - return out -} - -type mdIncomingKey struct{} -type mdOutgoingKey struct{} - -// NewIncomingContext creates a new context with incoming md attached. -func NewIncomingContext(ctx context.Context, md MD) context.Context { - return context.WithValue(ctx, mdIncomingKey{}, md) -} - -// NewOutgoingContext creates a new context with outgoing md attached. If used -// in conjunction with AppendToOutgoingContext, NewOutgoingContext will -// overwrite any previously-appended metadata. -func NewOutgoingContext(ctx context.Context, md MD) context.Context { - return context.WithValue(ctx, mdOutgoingKey{}, rawMD{md: md}) -} - -// AppendToOutgoingContext returns a new context with the provided kv merged -// with any existing metadata in the context. Please refer to the -// documentation of Pairs for a description of kv. -func AppendToOutgoingContext(ctx context.Context, kv ...string) context.Context { - if len(kv)%2 == 1 { - panic(fmt.Sprintf("metadata: AppendToOutgoingContext got an odd number of input pairs for metadata: %d", len(kv))) - } - md, _ := ctx.Value(mdOutgoingKey{}).(rawMD) - added := make([][]string, len(md.added)+1) - copy(added, md.added) - added[len(added)-1] = make([]string, len(kv)) - copy(added[len(added)-1], kv) - return context.WithValue(ctx, mdOutgoingKey{}, rawMD{md: md.md, added: added}) -} - -// FromIncomingContext returns the incoming metadata in ctx if it exists. The -// returned MD should not be modified. Writing to it may cause races. -// Modification should be made to copies of the returned MD. -func FromIncomingContext(ctx context.Context) (md MD, ok bool) { - md, ok = ctx.Value(mdIncomingKey{}).(MD) - return -} - -// FromOutgoingContextRaw returns the un-merged, intermediary contents -// of rawMD. Remember to perform strings.ToLower on the keys. The returned -// MD should not be modified. Writing to it may cause races. Modification -// should be made to copies of the returned MD. -// -// This is intended for gRPC-internal use ONLY. -func FromOutgoingContextRaw(ctx context.Context) (MD, [][]string, bool) { - raw, ok := ctx.Value(mdOutgoingKey{}).(rawMD) - if !ok { - return nil, nil, false - } - - return raw.md, raw.added, true -} - -// FromOutgoingContext returns the outgoing metadata in ctx if it exists. The -// returned MD should not be modified. Writing to it may cause races. -// Modification should be made to copies of the returned MD. -func FromOutgoingContext(ctx context.Context) (MD, bool) { - raw, ok := ctx.Value(mdOutgoingKey{}).(rawMD) - if !ok { - return nil, false - } - - mds := make([]MD, 0, len(raw.added)+1) - mds = append(mds, raw.md) - for _, vv := range raw.added { - mds = append(mds, Pairs(vv...)) - } - return Join(mds...), ok -} - -type rawMD struct { - md MD - added [][]string -} diff --git a/vendor/google.golang.org/grpc/naming/dns_resolver.go b/vendor/google.golang.org/grpc/naming/dns_resolver.go deleted file mode 100644 index fd8cd3b5a4..0000000000 --- a/vendor/google.golang.org/grpc/naming/dns_resolver.go +++ /dev/null @@ -1,293 +0,0 @@ -/* - * - * Copyright 2017 gRPC authors. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ - -package naming - -import ( - "context" - "errors" - "fmt" - "net" - "strconv" - "time" - - "google.golang.org/grpc/grpclog" -) - -const ( - defaultPort = "443" - defaultFreq = time.Minute * 30 -) - -var ( - errMissingAddr = errors.New("missing address") - errWatcherClose = errors.New("watcher has been closed") - - lookupHost = net.DefaultResolver.LookupHost - lookupSRV = net.DefaultResolver.LookupSRV -) - -// NewDNSResolverWithFreq creates a DNS Resolver that can resolve DNS names, and -// create watchers that poll the DNS server using the frequency set by freq. -func NewDNSResolverWithFreq(freq time.Duration) (Resolver, error) { - return &dnsResolver{freq: freq}, nil -} - -// NewDNSResolver creates a DNS Resolver that can resolve DNS names, and create -// watchers that poll the DNS server using the default frequency defined by defaultFreq. -func NewDNSResolver() (Resolver, error) { - return NewDNSResolverWithFreq(defaultFreq) -} - -// dnsResolver handles name resolution for names following the DNS scheme -type dnsResolver struct { - // frequency of polling the DNS server that the watchers created by this resolver will use. - freq time.Duration -} - -// formatIP returns ok = false if addr is not a valid textual representation of an IP address. -// If addr is an IPv4 address, return the addr and ok = true. -// If addr is an IPv6 address, return the addr enclosed in square brackets and ok = true. -func formatIP(addr string) (addrIP string, ok bool) { - ip := net.ParseIP(addr) - if ip == nil { - return "", false - } - if ip.To4() != nil { - return addr, true - } - return "[" + addr + "]", true -} - -// parseTarget takes the user input target string, returns formatted host and port info. -// If target doesn't specify a port, set the port to be the defaultPort. -// If target is in IPv6 format and host-name is enclosed in sqarue brackets, brackets -// are strippd when setting the host. -// examples: -// target: "www.google.com" returns host: "www.google.com", port: "443" -// target: "ipv4-host:80" returns host: "ipv4-host", port: "80" -// target: "[ipv6-host]" returns host: "ipv6-host", port: "443" -// target: ":80" returns host: "localhost", port: "80" -// target: ":" returns host: "localhost", port: "443" -func parseTarget(target string) (host, port string, err error) { - if target == "" { - return "", "", errMissingAddr - } - - if ip := net.ParseIP(target); ip != nil { - // target is an IPv4 or IPv6(without brackets) address - return target, defaultPort, nil - } - if host, port, err := net.SplitHostPort(target); err == nil { - // target has port, i.e ipv4-host:port, [ipv6-host]:port, host-name:port - if host == "" { - // Keep consistent with net.Dial(): If the host is empty, as in ":80", the local system is assumed. - host = "localhost" - } - if port == "" { - // If the port field is empty(target ends with colon), e.g. "[::1]:", defaultPort is used. - port = defaultPort - } - return host, port, nil - } - if host, port, err := net.SplitHostPort(target + ":" + defaultPort); err == nil { - // target doesn't have port - return host, port, nil - } - return "", "", fmt.Errorf("invalid target address %v", target) -} - -// Resolve creates a watcher that watches the name resolution of the target. -func (r *dnsResolver) Resolve(target string) (Watcher, error) { - host, port, err := parseTarget(target) - if err != nil { - return nil, err - } - - if net.ParseIP(host) != nil { - ipWatcher := &ipWatcher{ - updateChan: make(chan *Update, 1), - } - host, _ = formatIP(host) - ipWatcher.updateChan <- &Update{Op: Add, Addr: host + ":" + port} - return ipWatcher, nil - } - - ctx, cancel := context.WithCancel(context.Background()) - return &dnsWatcher{ - r: r, - host: host, - port: port, - ctx: ctx, - cancel: cancel, - t: time.NewTimer(0), - }, nil -} - -// dnsWatcher watches for the name resolution update for a specific target -type dnsWatcher struct { - r *dnsResolver - host string - port string - // The latest resolved address set - curAddrs map[string]*Update - ctx context.Context - cancel context.CancelFunc - t *time.Timer -} - -// ipWatcher watches for the name resolution update for an IP address. -type ipWatcher struct { - updateChan chan *Update -} - -// Next returns the address resolution Update for the target. For IP address, -// the resolution is itself, thus polling name server is unnecessary. Therefore, -// Next() will return an Update the first time it is called, and will be blocked -// for all following calls as no Update exists until watcher is closed. -func (i *ipWatcher) Next() ([]*Update, error) { - u, ok := <-i.updateChan - if !ok { - return nil, errWatcherClose - } - return []*Update{u}, nil -} - -// Close closes the ipWatcher. -func (i *ipWatcher) Close() { - close(i.updateChan) -} - -// AddressType indicates the address type returned by name resolution. -type AddressType uint8 - -const ( - // Backend indicates the server is a backend server. - Backend AddressType = iota - // GRPCLB indicates the server is a grpclb load balancer. - GRPCLB -) - -// AddrMetadataGRPCLB contains the information the name resolver for grpclb should provide. The -// name resolver used by the grpclb balancer is required to provide this type of metadata in -// its address updates. -type AddrMetadataGRPCLB struct { - // AddrType is the type of server (grpc load balancer or backend). - AddrType AddressType - // ServerName is the name of the grpc load balancer. Used for authentication. - ServerName string -} - -// compileUpdate compares the old resolved addresses and newly resolved addresses, -// and generates an update list -func (w *dnsWatcher) compileUpdate(newAddrs map[string]*Update) []*Update { - var res []*Update - for a, u := range w.curAddrs { - if _, ok := newAddrs[a]; !ok { - u.Op = Delete - res = append(res, u) - } - } - for a, u := range newAddrs { - if _, ok := w.curAddrs[a]; !ok { - res = append(res, u) - } - } - return res -} - -func (w *dnsWatcher) lookupSRV() map[string]*Update { - newAddrs := make(map[string]*Update) - _, srvs, err := lookupSRV(w.ctx, "grpclb", "tcp", w.host) - if err != nil { - grpclog.Infof("grpc: failed dns SRV record lookup due to %v.\n", err) - return nil - } - for _, s := range srvs { - lbAddrs, err := lookupHost(w.ctx, s.Target) - if err != nil { - grpclog.Warningf("grpc: failed load banlacer address dns lookup due to %v.\n", err) - continue - } - for _, a := range lbAddrs { - a, ok := formatIP(a) - if !ok { - grpclog.Errorf("grpc: failed IP parsing due to %v.\n", err) - continue - } - addr := a + ":" + strconv.Itoa(int(s.Port)) - newAddrs[addr] = &Update{Addr: addr, - Metadata: AddrMetadataGRPCLB{AddrType: GRPCLB, ServerName: s.Target}} - } - } - return newAddrs -} - -func (w *dnsWatcher) lookupHost() map[string]*Update { - newAddrs := make(map[string]*Update) - addrs, err := lookupHost(w.ctx, w.host) - if err != nil { - grpclog.Warningf("grpc: failed dns A record lookup due to %v.\n", err) - return nil - } - for _, a := range addrs { - a, ok := formatIP(a) - if !ok { - grpclog.Errorf("grpc: failed IP parsing due to %v.\n", err) - continue - } - addr := a + ":" + w.port - newAddrs[addr] = &Update{Addr: addr} - } - return newAddrs -} - -func (w *dnsWatcher) lookup() []*Update { - newAddrs := w.lookupSRV() - if newAddrs == nil { - // If failed to get any balancer address (either no corresponding SRV for the - // target, or caused by failure during resolution/parsing of the balancer target), - // return any A record info available. - newAddrs = w.lookupHost() - } - result := w.compileUpdate(newAddrs) - w.curAddrs = newAddrs - return result -} - -// Next returns the resolved address update(delta) for the target. If there's no -// change, it will sleep for 30 mins and try to resolve again after that. -func (w *dnsWatcher) Next() ([]*Update, error) { - for { - select { - case <-w.ctx.Done(): - return nil, errWatcherClose - case <-w.t.C: - } - result := w.lookup() - // Next lookup should happen after an interval defined by w.r.freq. - w.t.Reset(w.r.freq) - if len(result) > 0 { - return result, nil - } - } -} - -func (w *dnsWatcher) Close() { - w.cancel() -} diff --git a/vendor/google.golang.org/grpc/naming/naming.go b/vendor/google.golang.org/grpc/naming/naming.go deleted file mode 100644 index 8cc39e9375..0000000000 --- a/vendor/google.golang.org/grpc/naming/naming.go +++ /dev/null @@ -1,69 +0,0 @@ -/* - * - * Copyright 2014 gRPC authors. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ - -// Package naming defines the naming API and related data structures for gRPC. -// The interface is EXPERIMENTAL and may be suject to change. -// -// Deprecated: please use package resolver. -package naming - -// Operation defines the corresponding operations for a name resolution change. -// -// Deprecated: please use package resolver. -type Operation uint8 - -const ( - // Add indicates a new address is added. - Add Operation = iota - // Delete indicates an existing address is deleted. - Delete -) - -// Update defines a name resolution update. Notice that it is not valid having both -// empty string Addr and nil Metadata in an Update. -// -// Deprecated: please use package resolver. -type Update struct { - // Op indicates the operation of the update. - Op Operation - // Addr is the updated address. It is empty string if there is no address update. - Addr string - // Metadata is the updated metadata. It is nil if there is no metadata update. - // Metadata is not required for a custom naming implementation. - Metadata interface{} -} - -// Resolver creates a Watcher for a target to track its resolution changes. -// -// Deprecated: please use package resolver. -type Resolver interface { - // Resolve creates a Watcher for target. - Resolve(target string) (Watcher, error) -} - -// Watcher watches for the updates on the specified target. -// -// Deprecated: please use package resolver. -type Watcher interface { - // Next blocks until an update or error happens. It may return one or more - // updates. The first call should get the full set of the results. It should - // return an error if and only if Watcher cannot recover. - Next() ([]*Update, error) - // Close closes the Watcher. - Close() -} diff --git a/vendor/google.golang.org/grpc/peer/peer.go b/vendor/google.golang.org/grpc/peer/peer.go deleted file mode 100644 index e01d219ffb..0000000000 --- a/vendor/google.golang.org/grpc/peer/peer.go +++ /dev/null @@ -1,51 +0,0 @@ -/* - * - * Copyright 2014 gRPC authors. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ - -// Package peer defines various peer information associated with RPCs and -// corresponding utils. -package peer - -import ( - "context" - "net" - - "google.golang.org/grpc/credentials" -) - -// Peer contains the information of the peer for an RPC, such as the address -// and authentication information. -type Peer struct { - // Addr is the peer address. - Addr net.Addr - // AuthInfo is the authentication information of the transport. - // It is nil if there is no transport security being used. - AuthInfo credentials.AuthInfo -} - -type peerKey struct{} - -// NewContext creates a new context with peer information attached. -func NewContext(ctx context.Context, p *Peer) context.Context { - return context.WithValue(ctx, peerKey{}, p) -} - -// FromContext returns the peer information in ctx if it exists. -func FromContext(ctx context.Context) (p *Peer, ok bool) { - p, ok = ctx.Value(peerKey{}).(*Peer) - return -} diff --git a/vendor/google.golang.org/grpc/picker_wrapper.go b/vendor/google.golang.org/grpc/picker_wrapper.go deleted file mode 100644 index 14f915d676..0000000000 --- a/vendor/google.golang.org/grpc/picker_wrapper.go +++ /dev/null @@ -1,180 +0,0 @@ -/* - * - * Copyright 2017 gRPC authors. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ - -package grpc - -import ( - "context" - "io" - "sync" - - "google.golang.org/grpc/balancer" - "google.golang.org/grpc/codes" - "google.golang.org/grpc/grpclog" - "google.golang.org/grpc/internal/channelz" - "google.golang.org/grpc/internal/transport" - "google.golang.org/grpc/status" -) - -// pickerWrapper is a wrapper of balancer.Picker. It blocks on certain pick -// actions and unblock when there's a picker update. -type pickerWrapper struct { - mu sync.Mutex - done bool - blockingCh chan struct{} - picker balancer.Picker - - // The latest connection happened. - connErrMu sync.Mutex - connErr error -} - -func newPickerWrapper() *pickerWrapper { - bp := &pickerWrapper{blockingCh: make(chan struct{})} - return bp -} - -func (bp *pickerWrapper) updateConnectionError(err error) { - bp.connErrMu.Lock() - bp.connErr = err - bp.connErrMu.Unlock() -} - -func (bp *pickerWrapper) connectionError() error { - bp.connErrMu.Lock() - err := bp.connErr - bp.connErrMu.Unlock() - return err -} - -// updatePicker is called by UpdateBalancerState. It unblocks all blocked pick. -func (bp *pickerWrapper) updatePicker(p balancer.Picker) { - bp.mu.Lock() - if bp.done { - bp.mu.Unlock() - return - } - bp.picker = p - // bp.blockingCh should never be nil. - close(bp.blockingCh) - bp.blockingCh = make(chan struct{}) - bp.mu.Unlock() -} - -func doneChannelzWrapper(acw *acBalancerWrapper, done func(balancer.DoneInfo)) func(balancer.DoneInfo) { - acw.mu.Lock() - ac := acw.ac - acw.mu.Unlock() - ac.incrCallsStarted() - return func(b balancer.DoneInfo) { - if b.Err != nil && b.Err != io.EOF { - ac.incrCallsFailed() - } else { - ac.incrCallsSucceeded() - } - if done != nil { - done(b) - } - } -} - -// pick returns the transport that will be used for the RPC. -// It may block in the following cases: -// - there's no picker -// - the current picker returns ErrNoSubConnAvailable -// - the current picker returns other errors and failfast is false. -// - the subConn returned by the current picker is not READY -// When one of these situations happens, pick blocks until the picker gets updated. -func (bp *pickerWrapper) pick(ctx context.Context, failfast bool, opts balancer.PickOptions) (transport.ClientTransport, func(balancer.DoneInfo), error) { - var ( - p balancer.Picker - ch chan struct{} - ) - - for { - bp.mu.Lock() - if bp.done { - bp.mu.Unlock() - return nil, nil, ErrClientConnClosing - } - - if bp.picker == nil { - ch = bp.blockingCh - } - if ch == bp.blockingCh { - // This could happen when either: - // - bp.picker is nil (the previous if condition), or - // - has called pick on the current picker. - bp.mu.Unlock() - select { - case <-ctx.Done(): - return nil, nil, ctx.Err() - case <-ch: - } - continue - } - - ch = bp.blockingCh - p = bp.picker - bp.mu.Unlock() - - subConn, done, err := p.Pick(ctx, opts) - - if err != nil { - switch err { - case balancer.ErrNoSubConnAvailable: - continue - case balancer.ErrTransientFailure: - if !failfast { - continue - } - return nil, nil, status.Errorf(codes.Unavailable, "%v, latest connection error: %v", err, bp.connectionError()) - default: - // err is some other error. - return nil, nil, toRPCErr(err) - } - } - - acw, ok := subConn.(*acBalancerWrapper) - if !ok { - grpclog.Infof("subconn returned from pick is not *acBalancerWrapper") - continue - } - if t, ok := acw.getAddrConn().getReadyTransport(); ok { - if channelz.IsOn() { - return t, doneChannelzWrapper(acw, done), nil - } - return t, done, nil - } - grpclog.Infof("blockingPicker: the picked transport is not ready, loop back to repick") - // If ok == false, ac.state is not READY. - // A valid picker always returns READY subConn. This means the state of ac - // just changed, and picker will be updated shortly. - // continue back to the beginning of the for loop to repick. - } -} - -func (bp *pickerWrapper) close() { - bp.mu.Lock() - defer bp.mu.Unlock() - if bp.done { - return - } - bp.done = true - close(bp.blockingCh) -} diff --git a/vendor/google.golang.org/grpc/pickfirst.go b/vendor/google.golang.org/grpc/pickfirst.go deleted file mode 100644 index d1e38aad77..0000000000 --- a/vendor/google.golang.org/grpc/pickfirst.go +++ /dev/null @@ -1,110 +0,0 @@ -/* - * - * Copyright 2017 gRPC authors. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ - -package grpc - -import ( - "context" - - "google.golang.org/grpc/balancer" - "google.golang.org/grpc/connectivity" - "google.golang.org/grpc/grpclog" - "google.golang.org/grpc/resolver" -) - -// PickFirstBalancerName is the name of the pick_first balancer. -const PickFirstBalancerName = "pick_first" - -func newPickfirstBuilder() balancer.Builder { - return &pickfirstBuilder{} -} - -type pickfirstBuilder struct{} - -func (*pickfirstBuilder) Build(cc balancer.ClientConn, opt balancer.BuildOptions) balancer.Balancer { - return &pickfirstBalancer{cc: cc} -} - -func (*pickfirstBuilder) Name() string { - return PickFirstBalancerName -} - -type pickfirstBalancer struct { - cc balancer.ClientConn - sc balancer.SubConn -} - -func (b *pickfirstBalancer) HandleResolvedAddrs(addrs []resolver.Address, err error) { - if err != nil { - grpclog.Infof("pickfirstBalancer: HandleResolvedAddrs called with error %v", err) - return - } - if b.sc == nil { - b.sc, err = b.cc.NewSubConn(addrs, balancer.NewSubConnOptions{}) - if err != nil { - //TODO(yuxuanli): why not change the cc state to Idle? - grpclog.Errorf("pickfirstBalancer: failed to NewSubConn: %v", err) - return - } - b.cc.UpdateBalancerState(connectivity.Idle, &picker{sc: b.sc}) - b.sc.Connect() - } else { - b.sc.UpdateAddresses(addrs) - b.sc.Connect() - } -} - -func (b *pickfirstBalancer) HandleSubConnStateChange(sc balancer.SubConn, s connectivity.State) { - grpclog.Infof("pickfirstBalancer: HandleSubConnStateChange: %p, %v", sc, s) - if b.sc != sc { - grpclog.Infof("pickfirstBalancer: ignored state change because sc is not recognized") - return - } - if s == connectivity.Shutdown { - b.sc = nil - return - } - - switch s { - case connectivity.Ready, connectivity.Idle: - b.cc.UpdateBalancerState(s, &picker{sc: sc}) - case connectivity.Connecting: - b.cc.UpdateBalancerState(s, &picker{err: balancer.ErrNoSubConnAvailable}) - case connectivity.TransientFailure: - b.cc.UpdateBalancerState(s, &picker{err: balancer.ErrTransientFailure}) - } -} - -func (b *pickfirstBalancer) Close() { -} - -type picker struct { - err error - sc balancer.SubConn -} - -func (p *picker) Pick(ctx context.Context, opts balancer.PickOptions) (balancer.SubConn, func(balancer.DoneInfo), error) { - if p.err != nil { - return nil, nil, p.err - } - return p.sc, nil, nil -} - -func init() { - balancer.Register(newPickfirstBuilder()) -} diff --git a/vendor/google.golang.org/grpc/proxy.go b/vendor/google.golang.org/grpc/proxy.go deleted file mode 100644 index f8f69bfb70..0000000000 --- a/vendor/google.golang.org/grpc/proxy.go +++ /dev/null @@ -1,152 +0,0 @@ -/* - * - * Copyright 2017 gRPC authors. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ - -package grpc - -import ( - "bufio" - "context" - "encoding/base64" - "errors" - "fmt" - "io" - "net" - "net/http" - "net/http/httputil" - "net/url" -) - -const proxyAuthHeaderKey = "Proxy-Authorization" - -var ( - // errDisabled indicates that proxy is disabled for the address. - errDisabled = errors.New("proxy is disabled for the address") - // The following variable will be overwritten in the tests. - httpProxyFromEnvironment = http.ProxyFromEnvironment -) - -func mapAddress(ctx context.Context, address string) (*url.URL, error) { - req := &http.Request{ - URL: &url.URL{ - Scheme: "https", - Host: address, - }, - } - url, err := httpProxyFromEnvironment(req) - if err != nil { - return nil, err - } - if url == nil { - return nil, errDisabled - } - return url, nil -} - -// To read a response from a net.Conn, http.ReadResponse() takes a bufio.Reader. -// It's possible that this reader reads more than what's need for the response and stores -// those bytes in the buffer. -// bufConn wraps the original net.Conn and the bufio.Reader to make sure we don't lose the -// bytes in the buffer. -type bufConn struct { - net.Conn - r io.Reader -} - -func (c *bufConn) Read(b []byte) (int, error) { - return c.r.Read(b) -} - -func basicAuth(username, password string) string { - auth := username + ":" + password - return base64.StdEncoding.EncodeToString([]byte(auth)) -} - -func doHTTPConnectHandshake(ctx context.Context, conn net.Conn, backendAddr string, proxyURL *url.URL) (_ net.Conn, err error) { - defer func() { - if err != nil { - conn.Close() - } - }() - - req := &http.Request{ - Method: http.MethodConnect, - URL: &url.URL{Host: backendAddr}, - Header: map[string][]string{"User-Agent": {grpcUA}}, - } - if t := proxyURL.User; t != nil { - u := t.Username() - p, _ := t.Password() - req.Header.Add(proxyAuthHeaderKey, "Basic "+basicAuth(u, p)) - } - - if err := sendHTTPRequest(ctx, req, conn); err != nil { - return nil, fmt.Errorf("failed to write the HTTP request: %v", err) - } - - r := bufio.NewReader(conn) - resp, err := http.ReadResponse(r, req) - if err != nil { - return nil, fmt.Errorf("reading server HTTP response: %v", err) - } - defer resp.Body.Close() - if resp.StatusCode != http.StatusOK { - dump, err := httputil.DumpResponse(resp, true) - if err != nil { - return nil, fmt.Errorf("failed to do connect handshake, status code: %s", resp.Status) - } - return nil, fmt.Errorf("failed to do connect handshake, response: %q", dump) - } - - return &bufConn{Conn: conn, r: r}, nil -} - -// newProxyDialer returns a dialer that connects to proxy first if necessary. -// The returned dialer checks if a proxy is necessary, dial to the proxy with the -// provided dialer, does HTTP CONNECT handshake and returns the connection. -func newProxyDialer(dialer func(context.Context, string) (net.Conn, error)) func(context.Context, string) (net.Conn, error) { - return func(ctx context.Context, addr string) (conn net.Conn, err error) { - var newAddr string - proxyURL, err := mapAddress(ctx, addr) - if err != nil { - if err != errDisabled { - return nil, err - } - newAddr = addr - } else { - newAddr = proxyURL.Host - } - - conn, err = dialer(ctx, newAddr) - if err != nil { - return - } - if proxyURL != nil { - // proxy is disabled if proxyURL is nil. - conn, err = doHTTPConnectHandshake(ctx, conn, addr, proxyURL) - } - return - } -} - -func sendHTTPRequest(ctx context.Context, req *http.Request, conn net.Conn) error { - req = req.WithContext(ctx) - if err := req.Write(conn); err != nil { - return fmt.Errorf("failed to write the HTTP request: %v", err) - } - return nil -} diff --git a/vendor/google.golang.org/grpc/resolver/dns/dns_resolver.go b/vendor/google.golang.org/grpc/resolver/dns/dns_resolver.go deleted file mode 100644 index f33189fede..0000000000 --- a/vendor/google.golang.org/grpc/resolver/dns/dns_resolver.go +++ /dev/null @@ -1,436 +0,0 @@ -/* - * - * Copyright 2018 gRPC authors. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ - -// Package dns implements a dns resolver to be installed as the default resolver -// in grpc. -package dns - -import ( - "context" - "encoding/json" - "errors" - "fmt" - "net" - "os" - "strconv" - "strings" - "sync" - "time" - - "google.golang.org/grpc/grpclog" - "google.golang.org/grpc/internal/backoff" - "google.golang.org/grpc/internal/grpcrand" - "google.golang.org/grpc/resolver" -) - -func init() { - resolver.Register(NewBuilder()) -} - -const ( - defaultPort = "443" - defaultFreq = time.Minute * 30 - defaultDNSSvrPort = "53" - golang = "GO" - // In DNS, service config is encoded in a TXT record via the mechanism - // described in RFC-1464 using the attribute name grpc_config. - txtAttribute = "grpc_config=" -) - -var ( - errMissingAddr = errors.New("dns resolver: missing address") - - // Addresses ending with a colon that is supposed to be the separator - // between host and port is not allowed. E.g. "::" is a valid address as - // it is an IPv6 address (host only) and "[::]:" is invalid as it ends with - // a colon as the host and port separator - errEndsWithColon = errors.New("dns resolver: missing port after port-separator colon") -) - -var ( - defaultResolver netResolver = net.DefaultResolver -) - -var customAuthorityDialler = func(authority string) func(ctx context.Context, network, address string) (net.Conn, error) { - return func(ctx context.Context, network, address string) (net.Conn, error) { - var dialer net.Dialer - return dialer.DialContext(ctx, network, authority) - } -} - -var customAuthorityResolver = func(authority string) (netResolver, error) { - host, port, err := parseTarget(authority, defaultDNSSvrPort) - if err != nil { - return nil, err - } - - authorityWithPort := net.JoinHostPort(host, port) - - return &net.Resolver{ - PreferGo: true, - Dial: customAuthorityDialler(authorityWithPort), - }, nil -} - -// NewBuilder creates a dnsBuilder which is used to factory DNS resolvers. -func NewBuilder() resolver.Builder { - return &dnsBuilder{minFreq: defaultFreq} -} - -type dnsBuilder struct { - // minimum frequency of polling the DNS server. - minFreq time.Duration -} - -// Build creates and starts a DNS resolver that watches the name resolution of the target. -func (b *dnsBuilder) Build(target resolver.Target, cc resolver.ClientConn, opts resolver.BuildOption) (resolver.Resolver, error) { - host, port, err := parseTarget(target.Endpoint, defaultPort) - if err != nil { - return nil, err - } - - // IP address. - if net.ParseIP(host) != nil { - host, _ = formatIP(host) - addr := []resolver.Address{{Addr: host + ":" + port}} - i := &ipResolver{ - cc: cc, - ip: addr, - rn: make(chan struct{}, 1), - q: make(chan struct{}), - } - cc.NewAddress(addr) - go i.watcher() - return i, nil - } - - // DNS address (non-IP). - ctx, cancel := context.WithCancel(context.Background()) - d := &dnsResolver{ - freq: b.minFreq, - backoff: backoff.Exponential{MaxDelay: b.minFreq}, - host: host, - port: port, - ctx: ctx, - cancel: cancel, - cc: cc, - t: time.NewTimer(0), - rn: make(chan struct{}, 1), - disableServiceConfig: opts.DisableServiceConfig, - } - - if target.Authority == "" { - d.resolver = defaultResolver - } else { - d.resolver, err = customAuthorityResolver(target.Authority) - if err != nil { - return nil, err - } - } - - d.wg.Add(1) - go d.watcher() - return d, nil -} - -// Scheme returns the naming scheme of this resolver builder, which is "dns". -func (b *dnsBuilder) Scheme() string { - return "dns" -} - -type netResolver interface { - LookupHost(ctx context.Context, host string) (addrs []string, err error) - LookupSRV(ctx context.Context, service, proto, name string) (cname string, addrs []*net.SRV, err error) - LookupTXT(ctx context.Context, name string) (txts []string, err error) -} - -// ipResolver watches for the name resolution update for an IP address. -type ipResolver struct { - cc resolver.ClientConn - ip []resolver.Address - // rn channel is used by ResolveNow() to force an immediate resolution of the target. - rn chan struct{} - q chan struct{} -} - -// ResolveNow resend the address it stores, no resolution is needed. -func (i *ipResolver) ResolveNow(opt resolver.ResolveNowOption) { - select { - case i.rn <- struct{}{}: - default: - } -} - -// Close closes the ipResolver. -func (i *ipResolver) Close() { - close(i.q) -} - -func (i *ipResolver) watcher() { - for { - select { - case <-i.rn: - i.cc.NewAddress(i.ip) - case <-i.q: - return - } - } -} - -// dnsResolver watches for the name resolution update for a non-IP target. -type dnsResolver struct { - freq time.Duration - backoff backoff.Exponential - retryCount int - host string - port string - resolver netResolver - ctx context.Context - cancel context.CancelFunc - cc resolver.ClientConn - // rn channel is used by ResolveNow() to force an immediate resolution of the target. - rn chan struct{} - t *time.Timer - // wg is used to enforce Close() to return after the watcher() goroutine has finished. - // Otherwise, data race will be possible. [Race Example] in dns_resolver_test we - // replace the real lookup functions with mocked ones to facilitate testing. - // If Close() doesn't wait for watcher() goroutine finishes, race detector sometimes - // will warns lookup (READ the lookup function pointers) inside watcher() goroutine - // has data race with replaceNetFunc (WRITE the lookup function pointers). - wg sync.WaitGroup - disableServiceConfig bool -} - -// ResolveNow invoke an immediate resolution of the target that this dnsResolver watches. -func (d *dnsResolver) ResolveNow(opt resolver.ResolveNowOption) { - select { - case d.rn <- struct{}{}: - default: - } -} - -// Close closes the dnsResolver. -func (d *dnsResolver) Close() { - d.cancel() - d.wg.Wait() - d.t.Stop() -} - -func (d *dnsResolver) watcher() { - defer d.wg.Done() - for { - select { - case <-d.ctx.Done(): - return - case <-d.t.C: - case <-d.rn: - } - result, sc := d.lookup() - // Next lookup should happen within an interval defined by d.freq. It may be - // more often due to exponential retry on empty address list. - if len(result) == 0 { - d.retryCount++ - d.t.Reset(d.backoff.Backoff(d.retryCount)) - } else { - d.retryCount = 0 - d.t.Reset(d.freq) - } - d.cc.NewServiceConfig(sc) - d.cc.NewAddress(result) - } -} - -func (d *dnsResolver) lookupSRV() []resolver.Address { - var newAddrs []resolver.Address - _, srvs, err := d.resolver.LookupSRV(d.ctx, "grpclb", "tcp", d.host) - if err != nil { - grpclog.Infof("grpc: failed dns SRV record lookup due to %v.\n", err) - return nil - } - for _, s := range srvs { - lbAddrs, err := d.resolver.LookupHost(d.ctx, s.Target) - if err != nil { - grpclog.Infof("grpc: failed load balancer address dns lookup due to %v.\n", err) - continue - } - for _, a := range lbAddrs { - a, ok := formatIP(a) - if !ok { - grpclog.Errorf("grpc: failed IP parsing due to %v.\n", err) - continue - } - addr := a + ":" + strconv.Itoa(int(s.Port)) - newAddrs = append(newAddrs, resolver.Address{Addr: addr, Type: resolver.GRPCLB, ServerName: s.Target}) - } - } - return newAddrs -} - -func (d *dnsResolver) lookupTXT() string { - ss, err := d.resolver.LookupTXT(d.ctx, d.host) - if err != nil { - grpclog.Infof("grpc: failed dns TXT record lookup due to %v.\n", err) - return "" - } - var res string - for _, s := range ss { - res += s - } - - // TXT record must have "grpc_config=" attribute in order to be used as service config. - if !strings.HasPrefix(res, txtAttribute) { - grpclog.Warningf("grpc: TXT record %v missing %v attribute", res, txtAttribute) - return "" - } - return strings.TrimPrefix(res, txtAttribute) -} - -func (d *dnsResolver) lookupHost() []resolver.Address { - var newAddrs []resolver.Address - addrs, err := d.resolver.LookupHost(d.ctx, d.host) - if err != nil { - grpclog.Warningf("grpc: failed dns A record lookup due to %v.\n", err) - return nil - } - for _, a := range addrs { - a, ok := formatIP(a) - if !ok { - grpclog.Errorf("grpc: failed IP parsing due to %v.\n", err) - continue - } - addr := a + ":" + d.port - newAddrs = append(newAddrs, resolver.Address{Addr: addr}) - } - return newAddrs -} - -func (d *dnsResolver) lookup() ([]resolver.Address, string) { - newAddrs := d.lookupSRV() - // Support fallback to non-balancer address. - newAddrs = append(newAddrs, d.lookupHost()...) - if d.disableServiceConfig { - return newAddrs, "" - } - sc := d.lookupTXT() - return newAddrs, canaryingSC(sc) -} - -// formatIP returns ok = false if addr is not a valid textual representation of an IP address. -// If addr is an IPv4 address, return the addr and ok = true. -// If addr is an IPv6 address, return the addr enclosed in square brackets and ok = true. -func formatIP(addr string) (addrIP string, ok bool) { - ip := net.ParseIP(addr) - if ip == nil { - return "", false - } - if ip.To4() != nil { - return addr, true - } - return "[" + addr + "]", true -} - -// parseTarget takes the user input target string and default port, returns formatted host and port info. -// If target doesn't specify a port, set the port to be the defaultPort. -// If target is in IPv6 format and host-name is enclosed in sqarue brackets, brackets -// are strippd when setting the host. -// examples: -// target: "www.google.com" defaultPort: "443" returns host: "www.google.com", port: "443" -// target: "ipv4-host:80" defaultPort: "443" returns host: "ipv4-host", port: "80" -// target: "[ipv6-host]" defaultPort: "443" returns host: "ipv6-host", port: "443" -// target: ":80" defaultPort: "443" returns host: "localhost", port: "80" -func parseTarget(target, defaultPort string) (host, port string, err error) { - if target == "" { - return "", "", errMissingAddr - } - if ip := net.ParseIP(target); ip != nil { - // target is an IPv4 or IPv6(without brackets) address - return target, defaultPort, nil - } - if host, port, err = net.SplitHostPort(target); err == nil { - if port == "" { - // If the port field is empty (target ends with colon), e.g. "[::1]:", this is an error. - return "", "", errEndsWithColon - } - // target has port, i.e ipv4-host:port, [ipv6-host]:port, host-name:port - if host == "" { - // Keep consistent with net.Dial(): If the host is empty, as in ":80", the local system is assumed. - host = "localhost" - } - return host, port, nil - } - if host, port, err = net.SplitHostPort(target + ":" + defaultPort); err == nil { - // target doesn't have port - return host, port, nil - } - return "", "", fmt.Errorf("invalid target address %v, error info: %v", target, err) -} - -type rawChoice struct { - ClientLanguage *[]string `json:"clientLanguage,omitempty"` - Percentage *int `json:"percentage,omitempty"` - ClientHostName *[]string `json:"clientHostName,omitempty"` - ServiceConfig *json.RawMessage `json:"serviceConfig,omitempty"` -} - -func containsString(a *[]string, b string) bool { - if a == nil { - return true - } - for _, c := range *a { - if c == b { - return true - } - } - return false -} - -func chosenByPercentage(a *int) bool { - if a == nil { - return true - } - return grpcrand.Intn(100)+1 <= *a -} - -func canaryingSC(js string) string { - if js == "" { - return "" - } - var rcs []rawChoice - err := json.Unmarshal([]byte(js), &rcs) - if err != nil { - grpclog.Warningf("grpc: failed to parse service config json string due to %v.\n", err) - return "" - } - cliHostname, err := os.Hostname() - if err != nil { - grpclog.Warningf("grpc: failed to get client hostname due to %v.\n", err) - return "" - } - var sc string - for _, c := range rcs { - if !containsString(c.ClientLanguage, golang) || - !chosenByPercentage(c.Percentage) || - !containsString(c.ClientHostName, cliHostname) || - c.ServiceConfig == nil { - continue - } - sc = string(*c.ServiceConfig) - break - } - return sc -} diff --git a/vendor/google.golang.org/grpc/resolver/passthrough/passthrough.go b/vendor/google.golang.org/grpc/resolver/passthrough/passthrough.go deleted file mode 100644 index b76010d74d..0000000000 --- a/vendor/google.golang.org/grpc/resolver/passthrough/passthrough.go +++ /dev/null @@ -1,57 +0,0 @@ -/* - * - * Copyright 2017 gRPC authors. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ - -// Package passthrough implements a pass-through resolver. It sends the target -// name without scheme back to gRPC as resolved address. -package passthrough - -import "google.golang.org/grpc/resolver" - -const scheme = "passthrough" - -type passthroughBuilder struct{} - -func (*passthroughBuilder) Build(target resolver.Target, cc resolver.ClientConn, opts resolver.BuildOption) (resolver.Resolver, error) { - r := &passthroughResolver{ - target: target, - cc: cc, - } - r.start() - return r, nil -} - -func (*passthroughBuilder) Scheme() string { - return scheme -} - -type passthroughResolver struct { - target resolver.Target - cc resolver.ClientConn -} - -func (r *passthroughResolver) start() { - r.cc.NewAddress([]resolver.Address{{Addr: r.target.Endpoint}}) -} - -func (*passthroughResolver) ResolveNow(o resolver.ResolveNowOption) {} - -func (*passthroughResolver) Close() {} - -func init() { - resolver.Register(&passthroughBuilder{}) -} diff --git a/vendor/google.golang.org/grpc/resolver/resolver.go b/vendor/google.golang.org/grpc/resolver/resolver.go deleted file mode 100644 index 145cf477ed..0000000000 --- a/vendor/google.golang.org/grpc/resolver/resolver.go +++ /dev/null @@ -1,158 +0,0 @@ -/* - * - * Copyright 2017 gRPC authors. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ - -// Package resolver defines APIs for name resolution in gRPC. -// All APIs in this package are experimental. -package resolver - -var ( - // m is a map from scheme to resolver builder. - m = make(map[string]Builder) - // defaultScheme is the default scheme to use. - defaultScheme = "passthrough" -) - -// TODO(bar) install dns resolver in init(){}. - -// Register registers the resolver builder to the resolver map. b.Scheme will be -// used as the scheme registered with this builder. -// -// NOTE: this function must only be called during initialization time (i.e. in -// an init() function), and is not thread-safe. If multiple Resolvers are -// registered with the same name, the one registered last will take effect. -func Register(b Builder) { - m[b.Scheme()] = b -} - -// Get returns the resolver builder registered with the given scheme. -// -// If no builder is register with the scheme, nil will be returned. -func Get(scheme string) Builder { - if b, ok := m[scheme]; ok { - return b - } - return nil -} - -// SetDefaultScheme sets the default scheme that will be used. The default -// default scheme is "passthrough". -// -// NOTE: this function must only be called during initialization time (i.e. in -// an init() function), and is not thread-safe. The scheme set last overrides -// previously set values. -func SetDefaultScheme(scheme string) { - defaultScheme = scheme -} - -// GetDefaultScheme gets the default scheme that will be used. -func GetDefaultScheme() string { - return defaultScheme -} - -// AddressType indicates the address type returned by name resolution. -type AddressType uint8 - -const ( - // Backend indicates the address is for a backend server. - Backend AddressType = iota - // GRPCLB indicates the address is for a grpclb load balancer. - GRPCLB -) - -// Address represents a server the client connects to. -// This is the EXPERIMENTAL API and may be changed or extended in the future. -type Address struct { - // Addr is the server address on which a connection will be established. - Addr string - // Type is the type of this address. - Type AddressType - // ServerName is the name of this address. - // - // e.g. if Type is GRPCLB, ServerName should be the name of the remote load - // balancer, not the name of the backend. - ServerName string - // Metadata is the information associated with Addr, which may be used - // to make load balancing decision. - Metadata interface{} -} - -// BuildOption includes additional information for the builder to create -// the resolver. -type BuildOption struct { - // DisableServiceConfig indicates whether resolver should fetch service config data. - DisableServiceConfig bool -} - -// ClientConn contains the callbacks for resolver to notify any updates -// to the gRPC ClientConn. -// -// This interface is to be implemented by gRPC. Users should not need a -// brand new implementation of this interface. For the situations like -// testing, the new implementation should embed this interface. This allows -// gRPC to add new methods to this interface. -type ClientConn interface { - // NewAddress is called by resolver to notify ClientConn a new list - // of resolved addresses. - // The address list should be the complete list of resolved addresses. - NewAddress(addresses []Address) - // NewServiceConfig is called by resolver to notify ClientConn a new - // service config. The service config should be provided as a json string. - NewServiceConfig(serviceConfig string) -} - -// Target represents a target for gRPC, as specified in: -// https://github.com/grpc/grpc/blob/master/doc/naming.md. -type Target struct { - Scheme string - Authority string - Endpoint string -} - -// Builder creates a resolver that will be used to watch name resolution updates. -type Builder interface { - // Build creates a new resolver for the given target. - // - // gRPC dial calls Build synchronously, and fails if the returned error is - // not nil. - Build(target Target, cc ClientConn, opts BuildOption) (Resolver, error) - // Scheme returns the scheme supported by this resolver. - // Scheme is defined at https://github.com/grpc/grpc/blob/master/doc/naming.md. - Scheme() string -} - -// ResolveNowOption includes additional information for ResolveNow. -type ResolveNowOption struct{} - -// Resolver watches for the updates on the specified target. -// Updates include address updates and service config updates. -type Resolver interface { - // ResolveNow will be called by gRPC to try to resolve the target name - // again. It's just a hint, resolver can ignore this if it's not necessary. - // - // It could be called multiple times concurrently. - ResolveNow(ResolveNowOption) - // Close closes the resolver. - Close() -} - -// UnregisterForTesting removes the resolver builder with the given scheme from the -// resolver map. -// This function is for testing only. -func UnregisterForTesting(scheme string) { - delete(m, scheme) -} diff --git a/vendor/google.golang.org/grpc/resolver_conn_wrapper.go b/vendor/google.golang.org/grpc/resolver_conn_wrapper.go deleted file mode 100644 index 50991eafb9..0000000000 --- a/vendor/google.golang.org/grpc/resolver_conn_wrapper.go +++ /dev/null @@ -1,155 +0,0 @@ -/* - * - * Copyright 2017 gRPC authors. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ - -package grpc - -import ( - "fmt" - "strings" - - "google.golang.org/grpc/grpclog" - "google.golang.org/grpc/internal/channelz" - "google.golang.org/grpc/resolver" -) - -// ccResolverWrapper is a wrapper on top of cc for resolvers. -// It implements resolver.ClientConnection interface. -type ccResolverWrapper struct { - cc *ClientConn - resolver resolver.Resolver - addrCh chan []resolver.Address - scCh chan string - done chan struct{} - lastAddressesCount int -} - -// split2 returns the values from strings.SplitN(s, sep, 2). -// If sep is not found, it returns ("", "", false) instead. -func split2(s, sep string) (string, string, bool) { - spl := strings.SplitN(s, sep, 2) - if len(spl) < 2 { - return "", "", false - } - return spl[0], spl[1], true -} - -// parseTarget splits target into a struct containing scheme, authority and -// endpoint. -// -// If target is not a valid scheme://authority/endpoint, it returns {Endpoint: -// target}. -func parseTarget(target string) (ret resolver.Target) { - var ok bool - ret.Scheme, ret.Endpoint, ok = split2(target, "://") - if !ok { - return resolver.Target{Endpoint: target} - } - ret.Authority, ret.Endpoint, ok = split2(ret.Endpoint, "/") - if !ok { - return resolver.Target{Endpoint: target} - } - return ret -} - -// newCCResolverWrapper parses cc.target for scheme and gets the resolver -// builder for this scheme and builds the resolver. The monitoring goroutine -// for it is not started yet and can be created by calling start(). -// -// If withResolverBuilder dial option is set, the specified resolver will be -// used instead. -func newCCResolverWrapper(cc *ClientConn) (*ccResolverWrapper, error) { - rb := cc.dopts.resolverBuilder - if rb == nil { - return nil, fmt.Errorf("could not get resolver for scheme: %q", cc.parsedTarget.Scheme) - } - - ccr := &ccResolverWrapper{ - cc: cc, - addrCh: make(chan []resolver.Address, 1), - scCh: make(chan string, 1), - done: make(chan struct{}), - } - - var err error - ccr.resolver, err = rb.Build(cc.parsedTarget, ccr, resolver.BuildOption{DisableServiceConfig: cc.dopts.disableServiceConfig}) - if err != nil { - return nil, err - } - return ccr, nil -} - -func (ccr *ccResolverWrapper) resolveNow(o resolver.ResolveNowOption) { - ccr.resolver.ResolveNow(o) -} - -func (ccr *ccResolverWrapper) close() { - ccr.resolver.Close() - close(ccr.done) -} - -// NewAddress is called by the resolver implemenetion to send addresses to gRPC. -func (ccr *ccResolverWrapper) NewAddress(addrs []resolver.Address) { - select { - case <-ccr.done: - return - default: - } - grpclog.Infof("ccResolverWrapper: sending new addresses to cc: %v", addrs) - if channelz.IsOn() { - ccr.addChannelzTraceEvent(addrs) - } - ccr.cc.handleResolvedAddrs(addrs, nil) -} - -// NewServiceConfig is called by the resolver implemenetion to send service -// configs to gRPC. -func (ccr *ccResolverWrapper) NewServiceConfig(sc string) { - select { - case <-ccr.done: - return - default: - } - grpclog.Infof("ccResolverWrapper: got new service config: %v", sc) - ccr.cc.handleServiceConfig(sc) -} - -func (ccr *ccResolverWrapper) addChannelzTraceEvent(addrs []resolver.Address) { - if len(addrs) == 0 && ccr.lastAddressesCount != 0 { - channelz.AddTraceEvent(ccr.cc.channelzID, &channelz.TraceEventDesc{ - Desc: "Resolver returns an empty address list", - Severity: channelz.CtWarning, - }) - } else if len(addrs) != 0 && ccr.lastAddressesCount == 0 { - var s string - for i, a := range addrs { - if a.ServerName != "" { - s += a.Addr + "(" + a.ServerName + ")" - } else { - s += a.Addr - } - if i != len(addrs)-1 { - s += " " - } - } - channelz.AddTraceEvent(ccr.cc.channelzID, &channelz.TraceEventDesc{ - Desc: fmt.Sprintf("Resolver returns a non-empty address list (previous one was empty) %q", s), - Severity: channelz.CtINFO, - }) - } - ccr.lastAddressesCount = len(addrs) -} diff --git a/vendor/google.golang.org/grpc/rpc_util.go b/vendor/google.golang.org/grpc/rpc_util.go deleted file mode 100644 index 8d0d3dc8c9..0000000000 --- a/vendor/google.golang.org/grpc/rpc_util.go +++ /dev/null @@ -1,816 +0,0 @@ -/* - * - * Copyright 2014 gRPC authors. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ - -package grpc - -import ( - "bytes" - "compress/gzip" - "context" - "encoding/binary" - "fmt" - "io" - "io/ioutil" - "math" - "net/url" - "strings" - "sync" - "time" - - "google.golang.org/grpc/codes" - "google.golang.org/grpc/credentials" - "google.golang.org/grpc/encoding" - "google.golang.org/grpc/encoding/proto" - "google.golang.org/grpc/internal/transport" - "google.golang.org/grpc/metadata" - "google.golang.org/grpc/peer" - "google.golang.org/grpc/stats" - "google.golang.org/grpc/status" -) - -// Compressor defines the interface gRPC uses to compress a message. -// -// Deprecated: use package encoding. -type Compressor interface { - // Do compresses p into w. - Do(w io.Writer, p []byte) error - // Type returns the compression algorithm the Compressor uses. - Type() string -} - -type gzipCompressor struct { - pool sync.Pool -} - -// NewGZIPCompressor creates a Compressor based on GZIP. -// -// Deprecated: use package encoding/gzip. -func NewGZIPCompressor() Compressor { - c, _ := NewGZIPCompressorWithLevel(gzip.DefaultCompression) - return c -} - -// NewGZIPCompressorWithLevel is like NewGZIPCompressor but specifies the gzip compression level instead -// of assuming DefaultCompression. -// -// The error returned will be nil if the level is valid. -// -// Deprecated: use package encoding/gzip. -func NewGZIPCompressorWithLevel(level int) (Compressor, error) { - if level < gzip.DefaultCompression || level > gzip.BestCompression { - return nil, fmt.Errorf("grpc: invalid compression level: %d", level) - } - return &gzipCompressor{ - pool: sync.Pool{ - New: func() interface{} { - w, err := gzip.NewWriterLevel(ioutil.Discard, level) - if err != nil { - panic(err) - } - return w - }, - }, - }, nil -} - -func (c *gzipCompressor) Do(w io.Writer, p []byte) error { - z := c.pool.Get().(*gzip.Writer) - defer c.pool.Put(z) - z.Reset(w) - if _, err := z.Write(p); err != nil { - return err - } - return z.Close() -} - -func (c *gzipCompressor) Type() string { - return "gzip" -} - -// Decompressor defines the interface gRPC uses to decompress a message. -// -// Deprecated: use package encoding. -type Decompressor interface { - // Do reads the data from r and uncompress them. - Do(r io.Reader) ([]byte, error) - // Type returns the compression algorithm the Decompressor uses. - Type() string -} - -type gzipDecompressor struct { - pool sync.Pool -} - -// NewGZIPDecompressor creates a Decompressor based on GZIP. -// -// Deprecated: use package encoding/gzip. -func NewGZIPDecompressor() Decompressor { - return &gzipDecompressor{} -} - -func (d *gzipDecompressor) Do(r io.Reader) ([]byte, error) { - var z *gzip.Reader - switch maybeZ := d.pool.Get().(type) { - case nil: - newZ, err := gzip.NewReader(r) - if err != nil { - return nil, err - } - z = newZ - case *gzip.Reader: - z = maybeZ - if err := z.Reset(r); err != nil { - d.pool.Put(z) - return nil, err - } - } - - defer func() { - z.Close() - d.pool.Put(z) - }() - return ioutil.ReadAll(z) -} - -func (d *gzipDecompressor) Type() string { - return "gzip" -} - -// callInfo contains all related configuration and information about an RPC. -type callInfo struct { - compressorType string - failFast bool - stream ClientStream - maxReceiveMessageSize *int - maxSendMessageSize *int - creds credentials.PerRPCCredentials - contentSubtype string - codec baseCodec - maxRetryRPCBufferSize int -} - -func defaultCallInfo() *callInfo { - return &callInfo{ - failFast: true, - maxRetryRPCBufferSize: 256 * 1024, // 256KB - } -} - -// CallOption configures a Call before it starts or extracts information from -// a Call after it completes. -type CallOption interface { - // before is called before the call is sent to any server. If before - // returns a non-nil error, the RPC fails with that error. - before(*callInfo) error - - // after is called after the call has completed. after cannot return an - // error, so any failures should be reported via output parameters. - after(*callInfo) -} - -// EmptyCallOption does not alter the Call configuration. -// It can be embedded in another structure to carry satellite data for use -// by interceptors. -type EmptyCallOption struct{} - -func (EmptyCallOption) before(*callInfo) error { return nil } -func (EmptyCallOption) after(*callInfo) {} - -// Header returns a CallOptions that retrieves the header metadata -// for a unary RPC. -func Header(md *metadata.MD) CallOption { - return HeaderCallOption{HeaderAddr: md} -} - -// HeaderCallOption is a CallOption for collecting response header metadata. -// The metadata field will be populated *after* the RPC completes. -// This is an EXPERIMENTAL API. -type HeaderCallOption struct { - HeaderAddr *metadata.MD -} - -func (o HeaderCallOption) before(c *callInfo) error { return nil } -func (o HeaderCallOption) after(c *callInfo) { - if c.stream != nil { - *o.HeaderAddr, _ = c.stream.Header() - } -} - -// Trailer returns a CallOptions that retrieves the trailer metadata -// for a unary RPC. -func Trailer(md *metadata.MD) CallOption { - return TrailerCallOption{TrailerAddr: md} -} - -// TrailerCallOption is a CallOption for collecting response trailer metadata. -// The metadata field will be populated *after* the RPC completes. -// This is an EXPERIMENTAL API. -type TrailerCallOption struct { - TrailerAddr *metadata.MD -} - -func (o TrailerCallOption) before(c *callInfo) error { return nil } -func (o TrailerCallOption) after(c *callInfo) { - if c.stream != nil { - *o.TrailerAddr = c.stream.Trailer() - } -} - -// Peer returns a CallOption that retrieves peer information for a unary RPC. -// The peer field will be populated *after* the RPC completes. -func Peer(p *peer.Peer) CallOption { - return PeerCallOption{PeerAddr: p} -} - -// PeerCallOption is a CallOption for collecting the identity of the remote -// peer. The peer field will be populated *after* the RPC completes. -// This is an EXPERIMENTAL API. -type PeerCallOption struct { - PeerAddr *peer.Peer -} - -func (o PeerCallOption) before(c *callInfo) error { return nil } -func (o PeerCallOption) after(c *callInfo) { - if c.stream != nil { - if x, ok := peer.FromContext(c.stream.Context()); ok { - *o.PeerAddr = *x - } - } -} - -// WaitForReady configures the action to take when an RPC is attempted on broken -// connections or unreachable servers. If waitForReady is false, the RPC will fail -// immediately. Otherwise, the RPC client will block the call until a -// connection is available (or the call is canceled or times out) and will -// retry the call if it fails due to a transient error. gRPC will not retry if -// data was written to the wire unless the server indicates it did not process -// the data. Please refer to -// https://github.com/grpc/grpc/blob/master/doc/wait-for-ready.md. -// -// By default, RPCs don't "wait for ready". -func WaitForReady(waitForReady bool) CallOption { - return FailFastCallOption{FailFast: !waitForReady} -} - -// FailFast is the opposite of WaitForReady. -// -// Deprecated: use WaitForReady. -func FailFast(failFast bool) CallOption { - return FailFastCallOption{FailFast: failFast} -} - -// FailFastCallOption is a CallOption for indicating whether an RPC should fail -// fast or not. -// This is an EXPERIMENTAL API. -type FailFastCallOption struct { - FailFast bool -} - -func (o FailFastCallOption) before(c *callInfo) error { - c.failFast = o.FailFast - return nil -} -func (o FailFastCallOption) after(c *callInfo) {} - -// MaxCallRecvMsgSize returns a CallOption which sets the maximum message size the client can receive. -func MaxCallRecvMsgSize(s int) CallOption { - return MaxRecvMsgSizeCallOption{MaxRecvMsgSize: s} -} - -// MaxRecvMsgSizeCallOption is a CallOption that indicates the maximum message -// size the client can receive. -// This is an EXPERIMENTAL API. -type MaxRecvMsgSizeCallOption struct { - MaxRecvMsgSize int -} - -func (o MaxRecvMsgSizeCallOption) before(c *callInfo) error { - c.maxReceiveMessageSize = &o.MaxRecvMsgSize - return nil -} -func (o MaxRecvMsgSizeCallOption) after(c *callInfo) {} - -// MaxCallSendMsgSize returns a CallOption which sets the maximum message size the client can send. -func MaxCallSendMsgSize(s int) CallOption { - return MaxSendMsgSizeCallOption{MaxSendMsgSize: s} -} - -// MaxSendMsgSizeCallOption is a CallOption that indicates the maximum message -// size the client can send. -// This is an EXPERIMENTAL API. -type MaxSendMsgSizeCallOption struct { - MaxSendMsgSize int -} - -func (o MaxSendMsgSizeCallOption) before(c *callInfo) error { - c.maxSendMessageSize = &o.MaxSendMsgSize - return nil -} -func (o MaxSendMsgSizeCallOption) after(c *callInfo) {} - -// PerRPCCredentials returns a CallOption that sets credentials.PerRPCCredentials -// for a call. -func PerRPCCredentials(creds credentials.PerRPCCredentials) CallOption { - return PerRPCCredsCallOption{Creds: creds} -} - -// PerRPCCredsCallOption is a CallOption that indicates the per-RPC -// credentials to use for the call. -// This is an EXPERIMENTAL API. -type PerRPCCredsCallOption struct { - Creds credentials.PerRPCCredentials -} - -func (o PerRPCCredsCallOption) before(c *callInfo) error { - c.creds = o.Creds - return nil -} -func (o PerRPCCredsCallOption) after(c *callInfo) {} - -// UseCompressor returns a CallOption which sets the compressor used when -// sending the request. If WithCompressor is also set, UseCompressor has -// higher priority. -// -// This API is EXPERIMENTAL. -func UseCompressor(name string) CallOption { - return CompressorCallOption{CompressorType: name} -} - -// CompressorCallOption is a CallOption that indicates the compressor to use. -// This is an EXPERIMENTAL API. -type CompressorCallOption struct { - CompressorType string -} - -func (o CompressorCallOption) before(c *callInfo) error { - c.compressorType = o.CompressorType - return nil -} -func (o CompressorCallOption) after(c *callInfo) {} - -// CallContentSubtype returns a CallOption that will set the content-subtype -// for a call. For example, if content-subtype is "json", the Content-Type over -// the wire will be "application/grpc+json". The content-subtype is converted -// to lowercase before being included in Content-Type. See Content-Type on -// https://github.com/grpc/grpc/blob/master/doc/PROTOCOL-HTTP2.md#requests for -// more details. -// -// If CallCustomCodec is not also used, the content-subtype will be used to -// look up the Codec to use in the registry controlled by RegisterCodec. See -// the documentation on RegisterCodec for details on registration. The lookup -// of content-subtype is case-insensitive. If no such Codec is found, the call -// will result in an error with code codes.Internal. -// -// If CallCustomCodec is also used, that Codec will be used for all request and -// response messages, with the content-subtype set to the given contentSubtype -// here for requests. -func CallContentSubtype(contentSubtype string) CallOption { - return ContentSubtypeCallOption{ContentSubtype: strings.ToLower(contentSubtype)} -} - -// ContentSubtypeCallOption is a CallOption that indicates the content-subtype -// used for marshaling messages. -// This is an EXPERIMENTAL API. -type ContentSubtypeCallOption struct { - ContentSubtype string -} - -func (o ContentSubtypeCallOption) before(c *callInfo) error { - c.contentSubtype = o.ContentSubtype - return nil -} -func (o ContentSubtypeCallOption) after(c *callInfo) {} - -// CallCustomCodec returns a CallOption that will set the given Codec to be -// used for all request and response messages for a call. The result of calling -// String() will be used as the content-subtype in a case-insensitive manner. -// -// See Content-Type on -// https://github.com/grpc/grpc/blob/master/doc/PROTOCOL-HTTP2.md#requests for -// more details. Also see the documentation on RegisterCodec and -// CallContentSubtype for more details on the interaction between Codec and -// content-subtype. -// -// This function is provided for advanced users; prefer to use only -// CallContentSubtype to select a registered codec instead. -func CallCustomCodec(codec Codec) CallOption { - return CustomCodecCallOption{Codec: codec} -} - -// CustomCodecCallOption is a CallOption that indicates the codec used for -// marshaling messages. -// This is an EXPERIMENTAL API. -type CustomCodecCallOption struct { - Codec Codec -} - -func (o CustomCodecCallOption) before(c *callInfo) error { - c.codec = o.Codec - return nil -} -func (o CustomCodecCallOption) after(c *callInfo) {} - -// MaxRetryRPCBufferSize returns a CallOption that limits the amount of memory -// used for buffering this RPC's requests for retry purposes. -// -// This API is EXPERIMENTAL. -func MaxRetryRPCBufferSize(bytes int) CallOption { - return MaxRetryRPCBufferSizeCallOption{bytes} -} - -// MaxRetryRPCBufferSizeCallOption is a CallOption indicating the amount of -// memory to be used for caching this RPC for retry purposes. -// This is an EXPERIMENTAL API. -type MaxRetryRPCBufferSizeCallOption struct { - MaxRetryRPCBufferSize int -} - -func (o MaxRetryRPCBufferSizeCallOption) before(c *callInfo) error { - c.maxRetryRPCBufferSize = o.MaxRetryRPCBufferSize - return nil -} -func (o MaxRetryRPCBufferSizeCallOption) after(c *callInfo) {} - -// The format of the payload: compressed or not? -type payloadFormat uint8 - -const ( - compressionNone payloadFormat = 0 // no compression - compressionMade payloadFormat = 1 // compressed -) - -// parser reads complete gRPC messages from the underlying reader. -type parser struct { - // r is the underlying reader. - // See the comment on recvMsg for the permissible - // error types. - r io.Reader - - // The header of a gRPC message. Find more detail at - // https://github.com/grpc/grpc/blob/master/doc/PROTOCOL-HTTP2.md - header [5]byte -} - -// recvMsg reads a complete gRPC message from the stream. -// -// It returns the message and its payload (compression/encoding) -// format. The caller owns the returned msg memory. -// -// If there is an error, possible values are: -// * io.EOF, when no messages remain -// * io.ErrUnexpectedEOF -// * of type transport.ConnectionError -// * an error from the status package -// No other error values or types must be returned, which also means -// that the underlying io.Reader must not return an incompatible -// error. -func (p *parser) recvMsg(maxReceiveMessageSize int) (pf payloadFormat, msg []byte, err error) { - if _, err := p.r.Read(p.header[:]); err != nil { - return 0, nil, err - } - - pf = payloadFormat(p.header[0]) - length := binary.BigEndian.Uint32(p.header[1:]) - - if length == 0 { - return pf, nil, nil - } - if int64(length) > int64(maxInt) { - return 0, nil, status.Errorf(codes.ResourceExhausted, "grpc: received message larger than max length allowed on current machine (%d vs. %d)", length, maxInt) - } - if int(length) > maxReceiveMessageSize { - return 0, nil, status.Errorf(codes.ResourceExhausted, "grpc: received message larger than max (%d vs. %d)", length, maxReceiveMessageSize) - } - // TODO(bradfitz,zhaoq): garbage. reuse buffer after proto decoding instead - // of making it for each message: - msg = make([]byte, int(length)) - if _, err := p.r.Read(msg); err != nil { - if err == io.EOF { - err = io.ErrUnexpectedEOF - } - return 0, nil, err - } - return pf, msg, nil -} - -// encode serializes msg and returns a buffer containing the message, or an -// error if it is too large to be transmitted by grpc. If msg is nil, it -// generates an empty message. -func encode(c baseCodec, msg interface{}) ([]byte, error) { - if msg == nil { // NOTE: typed nils will not be caught by this check - return nil, nil - } - b, err := c.Marshal(msg) - if err != nil { - return nil, status.Errorf(codes.Internal, "grpc: error while marshaling: %v", err.Error()) - } - if uint(len(b)) > math.MaxUint32 { - return nil, status.Errorf(codes.ResourceExhausted, "grpc: message too large (%d bytes)", len(b)) - } - return b, nil -} - -// compress returns the input bytes compressed by compressor or cp. If both -// compressors are nil, returns nil. -// -// TODO(dfawley): eliminate cp parameter by wrapping Compressor in an encoding.Compressor. -func compress(in []byte, cp Compressor, compressor encoding.Compressor) ([]byte, error) { - if compressor == nil && cp == nil { - return nil, nil - } - wrapErr := func(err error) error { - return status.Errorf(codes.Internal, "grpc: error while compressing: %v", err.Error()) - } - cbuf := &bytes.Buffer{} - if compressor != nil { - z, err := compressor.Compress(cbuf) - if err != nil { - return nil, wrapErr(err) - } - if _, err := z.Write(in); err != nil { - return nil, wrapErr(err) - } - if err := z.Close(); err != nil { - return nil, wrapErr(err) - } - } else { - if err := cp.Do(cbuf, in); err != nil { - return nil, wrapErr(err) - } - } - return cbuf.Bytes(), nil -} - -const ( - payloadLen = 1 - sizeLen = 4 - headerLen = payloadLen + sizeLen -) - -// msgHeader returns a 5-byte header for the message being transmitted and the -// payload, which is compData if non-nil or data otherwise. -func msgHeader(data, compData []byte) (hdr []byte, payload []byte) { - hdr = make([]byte, headerLen) - if compData != nil { - hdr[0] = byte(compressionMade) - data = compData - } else { - hdr[0] = byte(compressionNone) - } - - // Write length of payload into buf - binary.BigEndian.PutUint32(hdr[payloadLen:], uint32(len(data))) - return hdr, data -} - -func outPayload(client bool, msg interface{}, data, payload []byte, t time.Time) *stats.OutPayload { - return &stats.OutPayload{ - Client: client, - Payload: msg, - Data: data, - Length: len(data), - WireLength: len(payload) + headerLen, - SentTime: t, - } -} - -func checkRecvPayload(pf payloadFormat, recvCompress string, haveCompressor bool) *status.Status { - switch pf { - case compressionNone: - case compressionMade: - if recvCompress == "" || recvCompress == encoding.Identity { - return status.New(codes.Internal, "grpc: compressed flag set with identity or empty encoding") - } - if !haveCompressor { - return status.Newf(codes.Unimplemented, "grpc: Decompressor is not installed for grpc-encoding %q", recvCompress) - } - default: - return status.Newf(codes.Internal, "grpc: received unexpected payload format %d", pf) - } - return nil -} - -type payloadInfo struct { - wireLength int // The compressed length got from wire. - uncompressedBytes []byte -} - -func recvAndDecompress(p *parser, s *transport.Stream, dc Decompressor, maxReceiveMessageSize int, payInfo *payloadInfo, compressor encoding.Compressor) ([]byte, error) { - pf, d, err := p.recvMsg(maxReceiveMessageSize) - if err != nil { - return nil, err - } - if payInfo != nil { - payInfo.wireLength = len(d) - } - - if st := checkRecvPayload(pf, s.RecvCompress(), compressor != nil || dc != nil); st != nil { - return nil, st.Err() - } - - if pf == compressionMade { - // To match legacy behavior, if the decompressor is set by WithDecompressor or RPCDecompressor, - // use this decompressor as the default. - if dc != nil { - d, err = dc.Do(bytes.NewReader(d)) - if err != nil { - return nil, status.Errorf(codes.Internal, "grpc: failed to decompress the received message %v", err) - } - } else { - dcReader, err := compressor.Decompress(bytes.NewReader(d)) - if err != nil { - return nil, status.Errorf(codes.Internal, "grpc: failed to decompress the received message %v", err) - } - d, err = ioutil.ReadAll(dcReader) - if err != nil { - return nil, status.Errorf(codes.Internal, "grpc: failed to decompress the received message %v", err) - } - } - } - if len(d) > maxReceiveMessageSize { - // TODO: Revisit the error code. Currently keep it consistent with java - // implementation. - return nil, status.Errorf(codes.ResourceExhausted, "grpc: received message larger than max (%d vs. %d)", len(d), maxReceiveMessageSize) - } - return d, nil -} - -// For the two compressor parameters, both should not be set, but if they are, -// dc takes precedence over compressor. -// TODO(dfawley): wrap the old compressor/decompressor using the new API? -func recv(p *parser, c baseCodec, s *transport.Stream, dc Decompressor, m interface{}, maxReceiveMessageSize int, payInfo *payloadInfo, compressor encoding.Compressor) error { - d, err := recvAndDecompress(p, s, dc, maxReceiveMessageSize, payInfo, compressor) - if err != nil { - return err - } - if err := c.Unmarshal(d, m); err != nil { - return status.Errorf(codes.Internal, "grpc: failed to unmarshal the received message %v", err) - } - if payInfo != nil { - payInfo.uncompressedBytes = d - } - return nil -} - -type rpcInfo struct { - failfast bool -} - -type rpcInfoContextKey struct{} - -func newContextWithRPCInfo(ctx context.Context, failfast bool) context.Context { - return context.WithValue(ctx, rpcInfoContextKey{}, &rpcInfo{failfast: failfast}) -} - -func rpcInfoFromContext(ctx context.Context) (s *rpcInfo, ok bool) { - s, ok = ctx.Value(rpcInfoContextKey{}).(*rpcInfo) - return -} - -// Code returns the error code for err if it was produced by the rpc system. -// Otherwise, it returns codes.Unknown. -// -// Deprecated: use status.Code instead. -func Code(err error) codes.Code { - return status.Code(err) -} - -// ErrorDesc returns the error description of err if it was produced by the rpc system. -// Otherwise, it returns err.Error() or empty string when err is nil. -// -// Deprecated: use status.Convert and Message method instead. -func ErrorDesc(err error) string { - return status.Convert(err).Message() -} - -// Errorf returns an error containing an error code and a description; -// Errorf returns nil if c is OK. -// -// Deprecated: use status.Errorf instead. -func Errorf(c codes.Code, format string, a ...interface{}) error { - return status.Errorf(c, format, a...) -} - -// toRPCErr converts an error into an error from the status package. -func toRPCErr(err error) error { - if err == nil || err == io.EOF { - return err - } - if err == io.ErrUnexpectedEOF { - return status.Error(codes.Internal, err.Error()) - } - if _, ok := status.FromError(err); ok { - return err - } - switch e := err.(type) { - case transport.ConnectionError: - return status.Error(codes.Unavailable, e.Desc) - default: - switch err { - case context.DeadlineExceeded: - return status.Error(codes.DeadlineExceeded, err.Error()) - case context.Canceled: - return status.Error(codes.Canceled, err.Error()) - } - } - return status.Error(codes.Unknown, err.Error()) -} - -// setCallInfoCodec should only be called after CallOptions have been applied. -func setCallInfoCodec(c *callInfo) error { - if c.codec != nil { - // codec was already set by a CallOption; use it. - return nil - } - - if c.contentSubtype == "" { - // No codec specified in CallOptions; use proto by default. - c.codec = encoding.GetCodec(proto.Name) - return nil - } - - // c.contentSubtype is already lowercased in CallContentSubtype - c.codec = encoding.GetCodec(c.contentSubtype) - if c.codec == nil { - return status.Errorf(codes.Internal, "no codec registered for content-subtype %s", c.contentSubtype) - } - return nil -} - -// parseDialTarget returns the network and address to pass to dialer -func parseDialTarget(target string) (net string, addr string) { - net = "tcp" - - m1 := strings.Index(target, ":") - m2 := strings.Index(target, ":/") - - // handle unix:addr which will fail with url.Parse - if m1 >= 0 && m2 < 0 { - if n := target[0:m1]; n == "unix" { - net = n - addr = target[m1+1:] - return net, addr - } - } - if m2 >= 0 { - t, err := url.Parse(target) - if err != nil { - return net, target - } - scheme := t.Scheme - addr = t.Path - if scheme == "unix" { - net = scheme - if addr == "" { - addr = t.Host - } - return net, addr - } - } - - return net, target -} - -// channelzData is used to store channelz related data for ClientConn, addrConn and Server. -// These fields cannot be embedded in the original structs (e.g. ClientConn), since to do atomic -// operation on int64 variable on 32-bit machine, user is responsible to enforce memory alignment. -// Here, by grouping those int64 fields inside a struct, we are enforcing the alignment. -type channelzData struct { - callsStarted int64 - callsFailed int64 - callsSucceeded int64 - // lastCallStartedTime stores the timestamp that last call starts. It is of int64 type instead of - // time.Time since it's more costly to atomically update time.Time variable than int64 variable. - lastCallStartedTime int64 -} - -// The SupportPackageIsVersion variables are referenced from generated protocol -// buffer files to ensure compatibility with the gRPC version used. The latest -// support package version is 5. -// -// Older versions are kept for compatibility. They may be removed if -// compatibility cannot be maintained. -// -// These constants should not be referenced from any other code. -const ( - SupportPackageIsVersion3 = true - SupportPackageIsVersion4 = true - SupportPackageIsVersion5 = true -) - -const grpcUA = "grpc-go/" + Version diff --git a/vendor/google.golang.org/grpc/server.go b/vendor/google.golang.org/grpc/server.go deleted file mode 100644 index d705d7a80c..0000000000 --- a/vendor/google.golang.org/grpc/server.go +++ /dev/null @@ -1,1486 +0,0 @@ -/* - * - * Copyright 2014 gRPC authors. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ - -package grpc - -import ( - "context" - "errors" - "fmt" - "io" - "math" - "net" - "net/http" - "reflect" - "runtime" - "strings" - "sync" - "sync/atomic" - "time" - - "golang.org/x/net/trace" - - "google.golang.org/grpc/codes" - "google.golang.org/grpc/credentials" - "google.golang.org/grpc/encoding" - "google.golang.org/grpc/encoding/proto" - "google.golang.org/grpc/grpclog" - "google.golang.org/grpc/internal/binarylog" - "google.golang.org/grpc/internal/channelz" - "google.golang.org/grpc/internal/transport" - "google.golang.org/grpc/keepalive" - "google.golang.org/grpc/metadata" - "google.golang.org/grpc/peer" - "google.golang.org/grpc/stats" - "google.golang.org/grpc/status" - "google.golang.org/grpc/tap" -) - -const ( - defaultServerMaxReceiveMessageSize = 1024 * 1024 * 4 - defaultServerMaxSendMessageSize = math.MaxInt32 -) - -type methodHandler func(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor UnaryServerInterceptor) (interface{}, error) - -// MethodDesc represents an RPC service's method specification. -type MethodDesc struct { - MethodName string - Handler methodHandler -} - -// ServiceDesc represents an RPC service's specification. -type ServiceDesc struct { - ServiceName string - // The pointer to the service interface. Used to check whether the user - // provided implementation satisfies the interface requirements. - HandlerType interface{} - Methods []MethodDesc - Streams []StreamDesc - Metadata interface{} -} - -// service consists of the information of the server serving this service and -// the methods in this service. -type service struct { - server interface{} // the server for service methods - md map[string]*MethodDesc - sd map[string]*StreamDesc - mdata interface{} -} - -// Server is a gRPC server to serve RPC requests. -type Server struct { - opts options - - mu sync.Mutex // guards following - lis map[net.Listener]bool - conns map[io.Closer]bool - serve bool - drain bool - cv *sync.Cond // signaled when connections close for GracefulStop - m map[string]*service // service name -> service info - events trace.EventLog - - quit chan struct{} - done chan struct{} - quitOnce sync.Once - doneOnce sync.Once - channelzRemoveOnce sync.Once - serveWG sync.WaitGroup // counts active Serve goroutines for GracefulStop - - channelzID int64 // channelz unique identification number - czData *channelzData -} - -type options struct { - creds credentials.TransportCredentials - codec baseCodec - cp Compressor - dc Decompressor - unaryInt UnaryServerInterceptor - streamInt StreamServerInterceptor - inTapHandle tap.ServerInHandle - statsHandler stats.Handler - maxConcurrentStreams uint32 - maxReceiveMessageSize int - maxSendMessageSize int - unknownStreamDesc *StreamDesc - keepaliveParams keepalive.ServerParameters - keepalivePolicy keepalive.EnforcementPolicy - initialWindowSize int32 - initialConnWindowSize int32 - writeBufferSize int - readBufferSize int - connectionTimeout time.Duration - maxHeaderListSize *uint32 -} - -var defaultServerOptions = options{ - maxReceiveMessageSize: defaultServerMaxReceiveMessageSize, - maxSendMessageSize: defaultServerMaxSendMessageSize, - connectionTimeout: 120 * time.Second, - writeBufferSize: defaultWriteBufSize, - readBufferSize: defaultReadBufSize, -} - -// A ServerOption sets options such as credentials, codec and keepalive parameters, etc. -type ServerOption func(*options) - -// WriteBufferSize determines how much data can be batched before doing a write on the wire. -// The corresponding memory allocation for this buffer will be twice the size to keep syscalls low. -// The default value for this buffer is 32KB. -// Zero will disable the write buffer such that each write will be on underlying connection. -// Note: A Send call may not directly translate to a write. -func WriteBufferSize(s int) ServerOption { - return func(o *options) { - o.writeBufferSize = s - } -} - -// ReadBufferSize lets you set the size of read buffer, this determines how much data can be read at most -// for one read syscall. -// The default value for this buffer is 32KB. -// Zero will disable read buffer for a connection so data framer can access the underlying -// conn directly. -func ReadBufferSize(s int) ServerOption { - return func(o *options) { - o.readBufferSize = s - } -} - -// InitialWindowSize returns a ServerOption that sets window size for stream. -// The lower bound for window size is 64K and any value smaller than that will be ignored. -func InitialWindowSize(s int32) ServerOption { - return func(o *options) { - o.initialWindowSize = s - } -} - -// InitialConnWindowSize returns a ServerOption that sets window size for a connection. -// The lower bound for window size is 64K and any value smaller than that will be ignored. -func InitialConnWindowSize(s int32) ServerOption { - return func(o *options) { - o.initialConnWindowSize = s - } -} - -// KeepaliveParams returns a ServerOption that sets keepalive and max-age parameters for the server. -func KeepaliveParams(kp keepalive.ServerParameters) ServerOption { - return func(o *options) { - o.keepaliveParams = kp - } -} - -// KeepaliveEnforcementPolicy returns a ServerOption that sets keepalive enforcement policy for the server. -func KeepaliveEnforcementPolicy(kep keepalive.EnforcementPolicy) ServerOption { - return func(o *options) { - o.keepalivePolicy = kep - } -} - -// CustomCodec returns a ServerOption that sets a codec for message marshaling and unmarshaling. -// -// This will override any lookups by content-subtype for Codecs registered with RegisterCodec. -func CustomCodec(codec Codec) ServerOption { - return func(o *options) { - o.codec = codec - } -} - -// RPCCompressor returns a ServerOption that sets a compressor for outbound -// messages. For backward compatibility, all outbound messages will be sent -// using this compressor, regardless of incoming message compression. By -// default, server messages will be sent using the same compressor with which -// request messages were sent. -// -// Deprecated: use encoding.RegisterCompressor instead. -func RPCCompressor(cp Compressor) ServerOption { - return func(o *options) { - o.cp = cp - } -} - -// RPCDecompressor returns a ServerOption that sets a decompressor for inbound -// messages. It has higher priority than decompressors registered via -// encoding.RegisterCompressor. -// -// Deprecated: use encoding.RegisterCompressor instead. -func RPCDecompressor(dc Decompressor) ServerOption { - return func(o *options) { - o.dc = dc - } -} - -// MaxMsgSize returns a ServerOption to set the max message size in bytes the server can receive. -// If this is not set, gRPC uses the default limit. -// -// Deprecated: use MaxRecvMsgSize instead. -func MaxMsgSize(m int) ServerOption { - return MaxRecvMsgSize(m) -} - -// MaxRecvMsgSize returns a ServerOption to set the max message size in bytes the server can receive. -// If this is not set, gRPC uses the default 4MB. -func MaxRecvMsgSize(m int) ServerOption { - return func(o *options) { - o.maxReceiveMessageSize = m - } -} - -// MaxSendMsgSize returns a ServerOption to set the max message size in bytes the server can send. -// If this is not set, gRPC uses the default 4MB. -func MaxSendMsgSize(m int) ServerOption { - return func(o *options) { - o.maxSendMessageSize = m - } -} - -// MaxConcurrentStreams returns a ServerOption that will apply a limit on the number -// of concurrent streams to each ServerTransport. -func MaxConcurrentStreams(n uint32) ServerOption { - return func(o *options) { - o.maxConcurrentStreams = n - } -} - -// Creds returns a ServerOption that sets credentials for server connections. -func Creds(c credentials.TransportCredentials) ServerOption { - return func(o *options) { - o.creds = c - } -} - -// UnaryInterceptor returns a ServerOption that sets the UnaryServerInterceptor for the -// server. Only one unary interceptor can be installed. The construction of multiple -// interceptors (e.g., chaining) can be implemented at the caller. -func UnaryInterceptor(i UnaryServerInterceptor) ServerOption { - return func(o *options) { - if o.unaryInt != nil { - panic("The unary server interceptor was already set and may not be reset.") - } - o.unaryInt = i - } -} - -// StreamInterceptor returns a ServerOption that sets the StreamServerInterceptor for the -// server. Only one stream interceptor can be installed. -func StreamInterceptor(i StreamServerInterceptor) ServerOption { - return func(o *options) { - if o.streamInt != nil { - panic("The stream server interceptor was already set and may not be reset.") - } - o.streamInt = i - } -} - -// InTapHandle returns a ServerOption that sets the tap handle for all the server -// transport to be created. Only one can be installed. -func InTapHandle(h tap.ServerInHandle) ServerOption { - return func(o *options) { - if o.inTapHandle != nil { - panic("The tap handle was already set and may not be reset.") - } - o.inTapHandle = h - } -} - -// StatsHandler returns a ServerOption that sets the stats handler for the server. -func StatsHandler(h stats.Handler) ServerOption { - return func(o *options) { - o.statsHandler = h - } -} - -// UnknownServiceHandler returns a ServerOption that allows for adding a custom -// unknown service handler. The provided method is a bidi-streaming RPC service -// handler that will be invoked instead of returning the "unimplemented" gRPC -// error whenever a request is received for an unregistered service or method. -// The handling function has full access to the Context of the request and the -// stream, and the invocation bypasses interceptors. -func UnknownServiceHandler(streamHandler StreamHandler) ServerOption { - return func(o *options) { - o.unknownStreamDesc = &StreamDesc{ - StreamName: "unknown_service_handler", - Handler: streamHandler, - // We need to assume that the users of the streamHandler will want to use both. - ClientStreams: true, - ServerStreams: true, - } - } -} - -// ConnectionTimeout returns a ServerOption that sets the timeout for -// connection establishment (up to and including HTTP/2 handshaking) for all -// new connections. If this is not set, the default is 120 seconds. A zero or -// negative value will result in an immediate timeout. -// -// This API is EXPERIMENTAL. -func ConnectionTimeout(d time.Duration) ServerOption { - return func(o *options) { - o.connectionTimeout = d - } -} - -// MaxHeaderListSize returns a ServerOption that sets the max (uncompressed) size -// of header list that the server is prepared to accept. -func MaxHeaderListSize(s uint32) ServerOption { - return func(o *options) { - o.maxHeaderListSize = &s - } -} - -// NewServer creates a gRPC server which has no service registered and has not -// started to accept requests yet. -func NewServer(opt ...ServerOption) *Server { - opts := defaultServerOptions - for _, o := range opt { - o(&opts) - } - s := &Server{ - lis: make(map[net.Listener]bool), - opts: opts, - conns: make(map[io.Closer]bool), - m: make(map[string]*service), - quit: make(chan struct{}), - done: make(chan struct{}), - czData: new(channelzData), - } - s.cv = sync.NewCond(&s.mu) - if EnableTracing { - _, file, line, _ := runtime.Caller(1) - s.events = trace.NewEventLog("grpc.Server", fmt.Sprintf("%s:%d", file, line)) - } - - if channelz.IsOn() { - s.channelzID = channelz.RegisterServer(&channelzServer{s}, "") - } - return s -} - -// printf records an event in s's event log, unless s has been stopped. -// REQUIRES s.mu is held. -func (s *Server) printf(format string, a ...interface{}) { - if s.events != nil { - s.events.Printf(format, a...) - } -} - -// errorf records an error in s's event log, unless s has been stopped. -// REQUIRES s.mu is held. -func (s *Server) errorf(format string, a ...interface{}) { - if s.events != nil { - s.events.Errorf(format, a...) - } -} - -// RegisterService registers a service and its implementation to the gRPC -// server. It is called from the IDL generated code. This must be called before -// invoking Serve. -func (s *Server) RegisterService(sd *ServiceDesc, ss interface{}) { - ht := reflect.TypeOf(sd.HandlerType).Elem() - st := reflect.TypeOf(ss) - if !st.Implements(ht) { - grpclog.Fatalf("grpc: Server.RegisterService found the handler of type %v that does not satisfy %v", st, ht) - } - s.register(sd, ss) -} - -func (s *Server) register(sd *ServiceDesc, ss interface{}) { - s.mu.Lock() - defer s.mu.Unlock() - s.printf("RegisterService(%q)", sd.ServiceName) - if s.serve { - grpclog.Fatalf("grpc: Server.RegisterService after Server.Serve for %q", sd.ServiceName) - } - if _, ok := s.m[sd.ServiceName]; ok { - grpclog.Fatalf("grpc: Server.RegisterService found duplicate service registration for %q", sd.ServiceName) - } - srv := &service{ - server: ss, - md: make(map[string]*MethodDesc), - sd: make(map[string]*StreamDesc), - mdata: sd.Metadata, - } - for i := range sd.Methods { - d := &sd.Methods[i] - srv.md[d.MethodName] = d - } - for i := range sd.Streams { - d := &sd.Streams[i] - srv.sd[d.StreamName] = d - } - s.m[sd.ServiceName] = srv -} - -// MethodInfo contains the information of an RPC including its method name and type. -type MethodInfo struct { - // Name is the method name only, without the service name or package name. - Name string - // IsClientStream indicates whether the RPC is a client streaming RPC. - IsClientStream bool - // IsServerStream indicates whether the RPC is a server streaming RPC. - IsServerStream bool -} - -// ServiceInfo contains unary RPC method info, streaming RPC method info and metadata for a service. -type ServiceInfo struct { - Methods []MethodInfo - // Metadata is the metadata specified in ServiceDesc when registering service. - Metadata interface{} -} - -// GetServiceInfo returns a map from service names to ServiceInfo. -// Service names include the package names, in the form of .. -func (s *Server) GetServiceInfo() map[string]ServiceInfo { - ret := make(map[string]ServiceInfo) - for n, srv := range s.m { - methods := make([]MethodInfo, 0, len(srv.md)+len(srv.sd)) - for m := range srv.md { - methods = append(methods, MethodInfo{ - Name: m, - IsClientStream: false, - IsServerStream: false, - }) - } - for m, d := range srv.sd { - methods = append(methods, MethodInfo{ - Name: m, - IsClientStream: d.ClientStreams, - IsServerStream: d.ServerStreams, - }) - } - - ret[n] = ServiceInfo{ - Methods: methods, - Metadata: srv.mdata, - } - } - return ret -} - -// ErrServerStopped indicates that the operation is now illegal because of -// the server being stopped. -var ErrServerStopped = errors.New("grpc: the server has been stopped") - -func (s *Server) useTransportAuthenticator(rawConn net.Conn) (net.Conn, credentials.AuthInfo, error) { - if s.opts.creds == nil { - return rawConn, nil, nil - } - return s.opts.creds.ServerHandshake(rawConn) -} - -type listenSocket struct { - net.Listener - channelzID int64 -} - -func (l *listenSocket) ChannelzMetric() *channelz.SocketInternalMetric { - return &channelz.SocketInternalMetric{ - SocketOptions: channelz.GetSocketOption(l.Listener), - LocalAddr: l.Listener.Addr(), - } -} - -func (l *listenSocket) Close() error { - err := l.Listener.Close() - if channelz.IsOn() { - channelz.RemoveEntry(l.channelzID) - } - return err -} - -// Serve accepts incoming connections on the listener lis, creating a new -// ServerTransport and service goroutine for each. The service goroutines -// read gRPC requests and then call the registered handlers to reply to them. -// Serve returns when lis.Accept fails with fatal errors. lis will be closed when -// this method returns. -// Serve will return a non-nil error unless Stop or GracefulStop is called. -func (s *Server) Serve(lis net.Listener) error { - s.mu.Lock() - s.printf("serving") - s.serve = true - if s.lis == nil { - // Serve called after Stop or GracefulStop. - s.mu.Unlock() - lis.Close() - return ErrServerStopped - } - - s.serveWG.Add(1) - defer func() { - s.serveWG.Done() - select { - // Stop or GracefulStop called; block until done and return nil. - case <-s.quit: - <-s.done - default: - } - }() - - ls := &listenSocket{Listener: lis} - s.lis[ls] = true - - if channelz.IsOn() { - ls.channelzID = channelz.RegisterListenSocket(ls, s.channelzID, lis.Addr().String()) - } - s.mu.Unlock() - - defer func() { - s.mu.Lock() - if s.lis != nil && s.lis[ls] { - ls.Close() - delete(s.lis, ls) - } - s.mu.Unlock() - }() - - var tempDelay time.Duration // how long to sleep on accept failure - - for { - rawConn, err := lis.Accept() - if err != nil { - if ne, ok := err.(interface { - Temporary() bool - }); ok && ne.Temporary() { - if tempDelay == 0 { - tempDelay = 5 * time.Millisecond - } else { - tempDelay *= 2 - } - if max := 1 * time.Second; tempDelay > max { - tempDelay = max - } - s.mu.Lock() - s.printf("Accept error: %v; retrying in %v", err, tempDelay) - s.mu.Unlock() - timer := time.NewTimer(tempDelay) - select { - case <-timer.C: - case <-s.quit: - timer.Stop() - return nil - } - continue - } - s.mu.Lock() - s.printf("done serving; Accept = %v", err) - s.mu.Unlock() - - select { - case <-s.quit: - return nil - default: - } - return err - } - tempDelay = 0 - // Start a new goroutine to deal with rawConn so we don't stall this Accept - // loop goroutine. - // - // Make sure we account for the goroutine so GracefulStop doesn't nil out - // s.conns before this conn can be added. - s.serveWG.Add(1) - go func() { - s.handleRawConn(rawConn) - s.serveWG.Done() - }() - } -} - -// handleRawConn forks a goroutine to handle a just-accepted connection that -// has not had any I/O performed on it yet. -func (s *Server) handleRawConn(rawConn net.Conn) { - rawConn.SetDeadline(time.Now().Add(s.opts.connectionTimeout)) - conn, authInfo, err := s.useTransportAuthenticator(rawConn) - if err != nil { - s.mu.Lock() - s.errorf("ServerHandshake(%q) failed: %v", rawConn.RemoteAddr(), err) - s.mu.Unlock() - grpclog.Warningf("grpc: Server.Serve failed to complete security handshake from %q: %v", rawConn.RemoteAddr(), err) - // If serverHandshake returns ErrConnDispatched, keep rawConn open. - if err != credentials.ErrConnDispatched { - rawConn.Close() - } - rawConn.SetDeadline(time.Time{}) - return - } - - s.mu.Lock() - if s.conns == nil { - s.mu.Unlock() - conn.Close() - return - } - s.mu.Unlock() - - // Finish handshaking (HTTP2) - st := s.newHTTP2Transport(conn, authInfo) - if st == nil { - return - } - - rawConn.SetDeadline(time.Time{}) - if !s.addConn(st) { - return - } - go func() { - s.serveStreams(st) - s.removeConn(st) - }() -} - -// newHTTP2Transport sets up a http/2 transport (using the -// gRPC http2 server transport in transport/http2_server.go). -func (s *Server) newHTTP2Transport(c net.Conn, authInfo credentials.AuthInfo) transport.ServerTransport { - config := &transport.ServerConfig{ - MaxStreams: s.opts.maxConcurrentStreams, - AuthInfo: authInfo, - InTapHandle: s.opts.inTapHandle, - StatsHandler: s.opts.statsHandler, - KeepaliveParams: s.opts.keepaliveParams, - KeepalivePolicy: s.opts.keepalivePolicy, - InitialWindowSize: s.opts.initialWindowSize, - InitialConnWindowSize: s.opts.initialConnWindowSize, - WriteBufferSize: s.opts.writeBufferSize, - ReadBufferSize: s.opts.readBufferSize, - ChannelzParentID: s.channelzID, - MaxHeaderListSize: s.opts.maxHeaderListSize, - } - st, err := transport.NewServerTransport("http2", c, config) - if err != nil { - s.mu.Lock() - s.errorf("NewServerTransport(%q) failed: %v", c.RemoteAddr(), err) - s.mu.Unlock() - c.Close() - grpclog.Warningln("grpc: Server.Serve failed to create ServerTransport: ", err) - return nil - } - - return st -} - -func (s *Server) serveStreams(st transport.ServerTransport) { - defer st.Close() - var wg sync.WaitGroup - st.HandleStreams(func(stream *transport.Stream) { - wg.Add(1) - go func() { - defer wg.Done() - s.handleStream(st, stream, s.traceInfo(st, stream)) - }() - }, func(ctx context.Context, method string) context.Context { - if !EnableTracing { - return ctx - } - tr := trace.New("grpc.Recv."+methodFamily(method), method) - return trace.NewContext(ctx, tr) - }) - wg.Wait() -} - -var _ http.Handler = (*Server)(nil) - -// ServeHTTP implements the Go standard library's http.Handler -// interface by responding to the gRPC request r, by looking up -// the requested gRPC method in the gRPC server s. -// -// The provided HTTP request must have arrived on an HTTP/2 -// connection. When using the Go standard library's server, -// practically this means that the Request must also have arrived -// over TLS. -// -// To share one port (such as 443 for https) between gRPC and an -// existing http.Handler, use a root http.Handler such as: -// -// if r.ProtoMajor == 2 && strings.HasPrefix( -// r.Header.Get("Content-Type"), "application/grpc") { -// grpcServer.ServeHTTP(w, r) -// } else { -// yourMux.ServeHTTP(w, r) -// } -// -// Note that ServeHTTP uses Go's HTTP/2 server implementation which is totally -// separate from grpc-go's HTTP/2 server. Performance and features may vary -// between the two paths. ServeHTTP does not support some gRPC features -// available through grpc-go's HTTP/2 server, and it is currently EXPERIMENTAL -// and subject to change. -func (s *Server) ServeHTTP(w http.ResponseWriter, r *http.Request) { - st, err := transport.NewServerHandlerTransport(w, r, s.opts.statsHandler) - if err != nil { - http.Error(w, err.Error(), http.StatusInternalServerError) - return - } - if !s.addConn(st) { - return - } - defer s.removeConn(st) - s.serveStreams(st) -} - -// traceInfo returns a traceInfo and associates it with stream, if tracing is enabled. -// If tracing is not enabled, it returns nil. -func (s *Server) traceInfo(st transport.ServerTransport, stream *transport.Stream) (trInfo *traceInfo) { - tr, ok := trace.FromContext(stream.Context()) - if !ok { - return nil - } - - trInfo = &traceInfo{ - tr: tr, - } - trInfo.firstLine.client = false - trInfo.firstLine.remoteAddr = st.RemoteAddr() - - if dl, ok := stream.Context().Deadline(); ok { - trInfo.firstLine.deadline = dl.Sub(time.Now()) - } - return trInfo -} - -func (s *Server) addConn(c io.Closer) bool { - s.mu.Lock() - defer s.mu.Unlock() - if s.conns == nil { - c.Close() - return false - } - if s.drain { - // Transport added after we drained our existing conns: drain it - // immediately. - c.(transport.ServerTransport).Drain() - } - s.conns[c] = true - return true -} - -func (s *Server) removeConn(c io.Closer) { - s.mu.Lock() - defer s.mu.Unlock() - if s.conns != nil { - delete(s.conns, c) - s.cv.Broadcast() - } -} - -func (s *Server) channelzMetric() *channelz.ServerInternalMetric { - return &channelz.ServerInternalMetric{ - CallsStarted: atomic.LoadInt64(&s.czData.callsStarted), - CallsSucceeded: atomic.LoadInt64(&s.czData.callsSucceeded), - CallsFailed: atomic.LoadInt64(&s.czData.callsFailed), - LastCallStartedTimestamp: time.Unix(0, atomic.LoadInt64(&s.czData.lastCallStartedTime)), - } -} - -func (s *Server) incrCallsStarted() { - atomic.AddInt64(&s.czData.callsStarted, 1) - atomic.StoreInt64(&s.czData.lastCallStartedTime, time.Now().UnixNano()) -} - -func (s *Server) incrCallsSucceeded() { - atomic.AddInt64(&s.czData.callsSucceeded, 1) -} - -func (s *Server) incrCallsFailed() { - atomic.AddInt64(&s.czData.callsFailed, 1) -} - -func (s *Server) sendResponse(t transport.ServerTransport, stream *transport.Stream, msg interface{}, cp Compressor, opts *transport.Options, comp encoding.Compressor) error { - data, err := encode(s.getCodec(stream.ContentSubtype()), msg) - if err != nil { - grpclog.Errorln("grpc: server failed to encode response: ", err) - return err - } - compData, err := compress(data, cp, comp) - if err != nil { - grpclog.Errorln("grpc: server failed to compress response: ", err) - return err - } - hdr, payload := msgHeader(data, compData) - // TODO(dfawley): should we be checking len(data) instead? - if len(payload) > s.opts.maxSendMessageSize { - return status.Errorf(codes.ResourceExhausted, "grpc: trying to send message larger than max (%d vs. %d)", len(payload), s.opts.maxSendMessageSize) - } - err = t.Write(stream, hdr, payload, opts) - if err == nil && s.opts.statsHandler != nil { - s.opts.statsHandler.HandleRPC(stream.Context(), outPayload(false, msg, data, payload, time.Now())) - } - return err -} - -func (s *Server) processUnaryRPC(t transport.ServerTransport, stream *transport.Stream, srv *service, md *MethodDesc, trInfo *traceInfo) (err error) { - if channelz.IsOn() { - s.incrCallsStarted() - defer func() { - if err != nil && err != io.EOF { - s.incrCallsFailed() - } else { - s.incrCallsSucceeded() - } - }() - } - sh := s.opts.statsHandler - if sh != nil { - beginTime := time.Now() - begin := &stats.Begin{ - BeginTime: beginTime, - } - sh.HandleRPC(stream.Context(), begin) - defer func() { - end := &stats.End{ - BeginTime: beginTime, - EndTime: time.Now(), - } - if err != nil && err != io.EOF { - end.Error = toRPCErr(err) - } - sh.HandleRPC(stream.Context(), end) - }() - } - if trInfo != nil { - defer trInfo.tr.Finish() - trInfo.firstLine.client = false - trInfo.tr.LazyLog(&trInfo.firstLine, false) - defer func() { - if err != nil && err != io.EOF { - trInfo.tr.LazyLog(&fmtStringer{"%v", []interface{}{err}}, true) - trInfo.tr.SetError() - } - }() - } - - binlog := binarylog.GetMethodLogger(stream.Method()) - if binlog != nil { - ctx := stream.Context() - md, _ := metadata.FromIncomingContext(ctx) - logEntry := &binarylog.ClientHeader{ - Header: md, - MethodName: stream.Method(), - PeerAddr: nil, - } - if deadline, ok := ctx.Deadline(); ok { - logEntry.Timeout = deadline.Sub(time.Now()) - if logEntry.Timeout < 0 { - logEntry.Timeout = 0 - } - } - if a := md[":authority"]; len(a) > 0 { - logEntry.Authority = a[0] - } - if peer, ok := peer.FromContext(ctx); ok { - logEntry.PeerAddr = peer.Addr - } - binlog.Log(logEntry) - } - - // comp and cp are used for compression. decomp and dc are used for - // decompression. If comp and decomp are both set, they are the same; - // however they are kept separate to ensure that at most one of the - // compressor/decompressor variable pairs are set for use later. - var comp, decomp encoding.Compressor - var cp Compressor - var dc Decompressor - - // If dc is set and matches the stream's compression, use it. Otherwise, try - // to find a matching registered compressor for decomp. - if rc := stream.RecvCompress(); s.opts.dc != nil && s.opts.dc.Type() == rc { - dc = s.opts.dc - } else if rc != "" && rc != encoding.Identity { - decomp = encoding.GetCompressor(rc) - if decomp == nil { - st := status.Newf(codes.Unimplemented, "grpc: Decompressor is not installed for grpc-encoding %q", rc) - t.WriteStatus(stream, st) - return st.Err() - } - } - - // If cp is set, use it. Otherwise, attempt to compress the response using - // the incoming message compression method. - // - // NOTE: this needs to be ahead of all handling, https://github.com/grpc/grpc-go/issues/686. - if s.opts.cp != nil { - cp = s.opts.cp - stream.SetSendCompress(cp.Type()) - } else if rc := stream.RecvCompress(); rc != "" && rc != encoding.Identity { - // Legacy compressor not specified; attempt to respond with same encoding. - comp = encoding.GetCompressor(rc) - if comp != nil { - stream.SetSendCompress(rc) - } - } - - var payInfo *payloadInfo - if sh != nil || binlog != nil { - payInfo = &payloadInfo{} - } - d, err := recvAndDecompress(&parser{r: stream}, stream, dc, s.opts.maxReceiveMessageSize, payInfo, decomp) - if err != nil { - if st, ok := status.FromError(err); ok { - if e := t.WriteStatus(stream, st); e != nil { - grpclog.Warningf("grpc: Server.processUnaryRPC failed to write status %v", e) - } - } - return err - } - if channelz.IsOn() { - t.IncrMsgRecv() - } - df := func(v interface{}) error { - if err := s.getCodec(stream.ContentSubtype()).Unmarshal(d, v); err != nil { - return status.Errorf(codes.Internal, "grpc: error unmarshalling request: %v", err) - } - if sh != nil { - sh.HandleRPC(stream.Context(), &stats.InPayload{ - RecvTime: time.Now(), - Payload: v, - Data: d, - Length: len(d), - }) - } - if binlog != nil { - binlog.Log(&binarylog.ClientMessage{ - Message: d, - }) - } - if trInfo != nil { - trInfo.tr.LazyLog(&payload{sent: false, msg: v}, true) - } - return nil - } - ctx := NewContextWithServerTransportStream(stream.Context(), stream) - reply, appErr := md.Handler(srv.server, ctx, df, s.opts.unaryInt) - if appErr != nil { - appStatus, ok := status.FromError(appErr) - if !ok { - // Convert appErr if it is not a grpc status error. - appErr = status.Error(codes.Unknown, appErr.Error()) - appStatus, _ = status.FromError(appErr) - } - if trInfo != nil { - trInfo.tr.LazyLog(stringer(appStatus.Message()), true) - trInfo.tr.SetError() - } - if e := t.WriteStatus(stream, appStatus); e != nil { - grpclog.Warningf("grpc: Server.processUnaryRPC failed to write status: %v", e) - } - if binlog != nil { - if h, _ := stream.Header(); h.Len() > 0 { - // Only log serverHeader if there was header. Otherwise it can - // be trailer only. - binlog.Log(&binarylog.ServerHeader{ - Header: h, - }) - } - binlog.Log(&binarylog.ServerTrailer{ - Trailer: stream.Trailer(), - Err: appErr, - }) - } - return appErr - } - if trInfo != nil { - trInfo.tr.LazyLog(stringer("OK"), false) - } - opts := &transport.Options{Last: true} - - if err := s.sendResponse(t, stream, reply, cp, opts, comp); err != nil { - if err == io.EOF { - // The entire stream is done (for unary RPC only). - return err - } - if s, ok := status.FromError(err); ok { - if e := t.WriteStatus(stream, s); e != nil { - grpclog.Warningf("grpc: Server.processUnaryRPC failed to write status: %v", e) - } - } else { - switch st := err.(type) { - case transport.ConnectionError: - // Nothing to do here. - default: - panic(fmt.Sprintf("grpc: Unexpected error (%T) from sendResponse: %v", st, st)) - } - } - if binlog != nil { - h, _ := stream.Header() - binlog.Log(&binarylog.ServerHeader{ - Header: h, - }) - binlog.Log(&binarylog.ServerTrailer{ - Trailer: stream.Trailer(), - Err: appErr, - }) - } - return err - } - if binlog != nil { - h, _ := stream.Header() - binlog.Log(&binarylog.ServerHeader{ - Header: h, - }) - binlog.Log(&binarylog.ServerMessage{ - Message: reply, - }) - } - if channelz.IsOn() { - t.IncrMsgSent() - } - if trInfo != nil { - trInfo.tr.LazyLog(&payload{sent: true, msg: reply}, true) - } - // TODO: Should we be logging if writing status failed here, like above? - // Should the logging be in WriteStatus? Should we ignore the WriteStatus - // error or allow the stats handler to see it? - err = t.WriteStatus(stream, status.New(codes.OK, "")) - if binlog != nil { - binlog.Log(&binarylog.ServerTrailer{ - Trailer: stream.Trailer(), - Err: appErr, - }) - } - return err -} - -func (s *Server) processStreamingRPC(t transport.ServerTransport, stream *transport.Stream, srv *service, sd *StreamDesc, trInfo *traceInfo) (err error) { - if channelz.IsOn() { - s.incrCallsStarted() - defer func() { - if err != nil && err != io.EOF { - s.incrCallsFailed() - } else { - s.incrCallsSucceeded() - } - }() - } - sh := s.opts.statsHandler - if sh != nil { - beginTime := time.Now() - begin := &stats.Begin{ - BeginTime: beginTime, - } - sh.HandleRPC(stream.Context(), begin) - defer func() { - end := &stats.End{ - BeginTime: beginTime, - EndTime: time.Now(), - } - if err != nil && err != io.EOF { - end.Error = toRPCErr(err) - } - sh.HandleRPC(stream.Context(), end) - }() - } - ctx := NewContextWithServerTransportStream(stream.Context(), stream) - ss := &serverStream{ - ctx: ctx, - t: t, - s: stream, - p: &parser{r: stream}, - codec: s.getCodec(stream.ContentSubtype()), - maxReceiveMessageSize: s.opts.maxReceiveMessageSize, - maxSendMessageSize: s.opts.maxSendMessageSize, - trInfo: trInfo, - statsHandler: sh, - } - - ss.binlog = binarylog.GetMethodLogger(stream.Method()) - if ss.binlog != nil { - md, _ := metadata.FromIncomingContext(ctx) - logEntry := &binarylog.ClientHeader{ - Header: md, - MethodName: stream.Method(), - PeerAddr: nil, - } - if deadline, ok := ctx.Deadline(); ok { - logEntry.Timeout = deadline.Sub(time.Now()) - if logEntry.Timeout < 0 { - logEntry.Timeout = 0 - } - } - if a := md[":authority"]; len(a) > 0 { - logEntry.Authority = a[0] - } - if peer, ok := peer.FromContext(ss.Context()); ok { - logEntry.PeerAddr = peer.Addr - } - ss.binlog.Log(logEntry) - } - - // If dc is set and matches the stream's compression, use it. Otherwise, try - // to find a matching registered compressor for decomp. - if rc := stream.RecvCompress(); s.opts.dc != nil && s.opts.dc.Type() == rc { - ss.dc = s.opts.dc - } else if rc != "" && rc != encoding.Identity { - ss.decomp = encoding.GetCompressor(rc) - if ss.decomp == nil { - st := status.Newf(codes.Unimplemented, "grpc: Decompressor is not installed for grpc-encoding %q", rc) - t.WriteStatus(ss.s, st) - return st.Err() - } - } - - // If cp is set, use it. Otherwise, attempt to compress the response using - // the incoming message compression method. - // - // NOTE: this needs to be ahead of all handling, https://github.com/grpc/grpc-go/issues/686. - if s.opts.cp != nil { - ss.cp = s.opts.cp - stream.SetSendCompress(s.opts.cp.Type()) - } else if rc := stream.RecvCompress(); rc != "" && rc != encoding.Identity { - // Legacy compressor not specified; attempt to respond with same encoding. - ss.comp = encoding.GetCompressor(rc) - if ss.comp != nil { - stream.SetSendCompress(rc) - } - } - - if trInfo != nil { - trInfo.tr.LazyLog(&trInfo.firstLine, false) - defer func() { - ss.mu.Lock() - if err != nil && err != io.EOF { - ss.trInfo.tr.LazyLog(&fmtStringer{"%v", []interface{}{err}}, true) - ss.trInfo.tr.SetError() - } - ss.trInfo.tr.Finish() - ss.trInfo.tr = nil - ss.mu.Unlock() - }() - } - var appErr error - var server interface{} - if srv != nil { - server = srv.server - } - if s.opts.streamInt == nil { - appErr = sd.Handler(server, ss) - } else { - info := &StreamServerInfo{ - FullMethod: stream.Method(), - IsClientStream: sd.ClientStreams, - IsServerStream: sd.ServerStreams, - } - appErr = s.opts.streamInt(server, ss, info, sd.Handler) - } - if appErr != nil { - appStatus, ok := status.FromError(appErr) - if !ok { - appStatus = status.New(codes.Unknown, appErr.Error()) - appErr = appStatus.Err() - } - if trInfo != nil { - ss.mu.Lock() - ss.trInfo.tr.LazyLog(stringer(appStatus.Message()), true) - ss.trInfo.tr.SetError() - ss.mu.Unlock() - } - t.WriteStatus(ss.s, appStatus) - if ss.binlog != nil { - ss.binlog.Log(&binarylog.ServerTrailer{ - Trailer: ss.s.Trailer(), - Err: appErr, - }) - } - // TODO: Should we log an error from WriteStatus here and below? - return appErr - } - if trInfo != nil { - ss.mu.Lock() - ss.trInfo.tr.LazyLog(stringer("OK"), false) - ss.mu.Unlock() - } - err = t.WriteStatus(ss.s, status.New(codes.OK, "")) - if ss.binlog != nil { - ss.binlog.Log(&binarylog.ServerTrailer{ - Trailer: ss.s.Trailer(), - Err: appErr, - }) - } - return err -} - -func (s *Server) handleStream(t transport.ServerTransport, stream *transport.Stream, trInfo *traceInfo) { - sm := stream.Method() - if sm != "" && sm[0] == '/' { - sm = sm[1:] - } - pos := strings.LastIndex(sm, "/") - if pos == -1 { - if trInfo != nil { - trInfo.tr.LazyLog(&fmtStringer{"Malformed method name %q", []interface{}{sm}}, true) - trInfo.tr.SetError() - } - errDesc := fmt.Sprintf("malformed method name: %q", stream.Method()) - if err := t.WriteStatus(stream, status.New(codes.ResourceExhausted, errDesc)); err != nil { - if trInfo != nil { - trInfo.tr.LazyLog(&fmtStringer{"%v", []interface{}{err}}, true) - trInfo.tr.SetError() - } - grpclog.Warningf("grpc: Server.handleStream failed to write status: %v", err) - } - if trInfo != nil { - trInfo.tr.Finish() - } - return - } - service := sm[:pos] - method := sm[pos+1:] - - if srv, ok := s.m[service]; ok { - if md, ok := srv.md[method]; ok { - s.processUnaryRPC(t, stream, srv, md, trInfo) - return - } - if sd, ok := srv.sd[method]; ok { - s.processStreamingRPC(t, stream, srv, sd, trInfo) - return - } - } - // Unknown service, or known server unknown method. - if unknownDesc := s.opts.unknownStreamDesc; unknownDesc != nil { - s.processStreamingRPC(t, stream, nil, unknownDesc, trInfo) - return - } - if trInfo != nil { - trInfo.tr.LazyLog(&fmtStringer{"Unknown service %v", []interface{}{service}}, true) - trInfo.tr.SetError() - } - errDesc := fmt.Sprintf("unknown service %v", service) - if err := t.WriteStatus(stream, status.New(codes.Unimplemented, errDesc)); err != nil { - if trInfo != nil { - trInfo.tr.LazyLog(&fmtStringer{"%v", []interface{}{err}}, true) - trInfo.tr.SetError() - } - grpclog.Warningf("grpc: Server.handleStream failed to write status: %v", err) - } - if trInfo != nil { - trInfo.tr.Finish() - } -} - -// The key to save ServerTransportStream in the context. -type streamKey struct{} - -// NewContextWithServerTransportStream creates a new context from ctx and -// attaches stream to it. -// -// This API is EXPERIMENTAL. -func NewContextWithServerTransportStream(ctx context.Context, stream ServerTransportStream) context.Context { - return context.WithValue(ctx, streamKey{}, stream) -} - -// ServerTransportStream is a minimal interface that a transport stream must -// implement. This can be used to mock an actual transport stream for tests of -// handler code that use, for example, grpc.SetHeader (which requires some -// stream to be in context). -// -// See also NewContextWithServerTransportStream. -// -// This API is EXPERIMENTAL. -type ServerTransportStream interface { - Method() string - SetHeader(md metadata.MD) error - SendHeader(md metadata.MD) error - SetTrailer(md metadata.MD) error -} - -// ServerTransportStreamFromContext returns the ServerTransportStream saved in -// ctx. Returns nil if the given context has no stream associated with it -// (which implies it is not an RPC invocation context). -// -// This API is EXPERIMENTAL. -func ServerTransportStreamFromContext(ctx context.Context) ServerTransportStream { - s, _ := ctx.Value(streamKey{}).(ServerTransportStream) - return s -} - -// Stop stops the gRPC server. It immediately closes all open -// connections and listeners. -// It cancels all active RPCs on the server side and the corresponding -// pending RPCs on the client side will get notified by connection -// errors. -func (s *Server) Stop() { - s.quitOnce.Do(func() { - close(s.quit) - }) - - defer func() { - s.serveWG.Wait() - s.doneOnce.Do(func() { - close(s.done) - }) - }() - - s.channelzRemoveOnce.Do(func() { - if channelz.IsOn() { - channelz.RemoveEntry(s.channelzID) - } - }) - - s.mu.Lock() - listeners := s.lis - s.lis = nil - st := s.conns - s.conns = nil - // interrupt GracefulStop if Stop and GracefulStop are called concurrently. - s.cv.Broadcast() - s.mu.Unlock() - - for lis := range listeners { - lis.Close() - } - for c := range st { - c.Close() - } - - s.mu.Lock() - if s.events != nil { - s.events.Finish() - s.events = nil - } - s.mu.Unlock() -} - -// GracefulStop stops the gRPC server gracefully. It stops the server from -// accepting new connections and RPCs and blocks until all the pending RPCs are -// finished. -func (s *Server) GracefulStop() { - s.quitOnce.Do(func() { - close(s.quit) - }) - - defer func() { - s.doneOnce.Do(func() { - close(s.done) - }) - }() - - s.channelzRemoveOnce.Do(func() { - if channelz.IsOn() { - channelz.RemoveEntry(s.channelzID) - } - }) - s.mu.Lock() - if s.conns == nil { - s.mu.Unlock() - return - } - - for lis := range s.lis { - lis.Close() - } - s.lis = nil - if !s.drain { - for c := range s.conns { - c.(transport.ServerTransport).Drain() - } - s.drain = true - } - - // Wait for serving threads to be ready to exit. Only then can we be sure no - // new conns will be created. - s.mu.Unlock() - s.serveWG.Wait() - s.mu.Lock() - - for len(s.conns) != 0 { - s.cv.Wait() - } - s.conns = nil - if s.events != nil { - s.events.Finish() - s.events = nil - } - s.mu.Unlock() -} - -// contentSubtype must be lowercase -// cannot return nil -func (s *Server) getCodec(contentSubtype string) baseCodec { - if s.opts.codec != nil { - return s.opts.codec - } - if contentSubtype == "" { - return encoding.GetCodec(proto.Name) - } - codec := encoding.GetCodec(contentSubtype) - if codec == nil { - return encoding.GetCodec(proto.Name) - } - return codec -} - -// SetHeader sets the header metadata. -// When called multiple times, all the provided metadata will be merged. -// All the metadata will be sent out when one of the following happens: -// - grpc.SendHeader() is called; -// - The first response is sent out; -// - An RPC status is sent out (error or success). -func SetHeader(ctx context.Context, md metadata.MD) error { - if md.Len() == 0 { - return nil - } - stream := ServerTransportStreamFromContext(ctx) - if stream == nil { - return status.Errorf(codes.Internal, "grpc: failed to fetch the stream from the context %v", ctx) - } - return stream.SetHeader(md) -} - -// SendHeader sends header metadata. It may be called at most once. -// The provided md and headers set by SetHeader() will be sent. -func SendHeader(ctx context.Context, md metadata.MD) error { - stream := ServerTransportStreamFromContext(ctx) - if stream == nil { - return status.Errorf(codes.Internal, "grpc: failed to fetch the stream from the context %v", ctx) - } - if err := stream.SendHeader(md); err != nil { - return toRPCErr(err) - } - return nil -} - -// SetTrailer sets the trailer metadata that will be sent when an RPC returns. -// When called more than once, all the provided metadata will be merged. -func SetTrailer(ctx context.Context, md metadata.MD) error { - if md.Len() == 0 { - return nil - } - stream := ServerTransportStreamFromContext(ctx) - if stream == nil { - return status.Errorf(codes.Internal, "grpc: failed to fetch the stream from the context %v", ctx) - } - return stream.SetTrailer(md) -} - -// Method returns the method string for the server context. The returned -// string is in the format of "/service/method". -func Method(ctx context.Context) (string, bool) { - s := ServerTransportStreamFromContext(ctx) - if s == nil { - return "", false - } - return s.Method(), true -} - -type channelzServer struct { - s *Server -} - -func (c *channelzServer) ChannelzMetric() *channelz.ServerInternalMetric { - return c.s.channelzMetric() -} diff --git a/vendor/google.golang.org/grpc/service_config.go b/vendor/google.golang.org/grpc/service_config.go deleted file mode 100644 index 162857e204..0000000000 --- a/vendor/google.golang.org/grpc/service_config.go +++ /dev/null @@ -1,372 +0,0 @@ -/* - * - * Copyright 2017 gRPC authors. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ - -package grpc - -import ( - "encoding/json" - "fmt" - "strconv" - "strings" - "time" - - "google.golang.org/grpc/codes" - "google.golang.org/grpc/grpclog" -) - -const maxInt = int(^uint(0) >> 1) - -// MethodConfig defines the configuration recommended by the service providers for a -// particular method. -// -// Deprecated: Users should not use this struct. Service config should be received -// through name resolver, as specified here -// https://github.com/grpc/grpc/blob/master/doc/service_config.md -type MethodConfig struct { - // WaitForReady indicates whether RPCs sent to this method should wait until - // the connection is ready by default (!failfast). The value specified via the - // gRPC client API will override the value set here. - WaitForReady *bool - // Timeout is the default timeout for RPCs sent to this method. The actual - // deadline used will be the minimum of the value specified here and the value - // set by the application via the gRPC client API. If either one is not set, - // then the other will be used. If neither is set, then the RPC has no deadline. - Timeout *time.Duration - // MaxReqSize is the maximum allowed payload size for an individual request in a - // stream (client->server) in bytes. The size which is measured is the serialized - // payload after per-message compression (but before stream compression) in bytes. - // The actual value used is the minimum of the value specified here and the value set - // by the application via the gRPC client API. If either one is not set, then the other - // will be used. If neither is set, then the built-in default is used. - MaxReqSize *int - // MaxRespSize is the maximum allowed payload size for an individual response in a - // stream (server->client) in bytes. - MaxRespSize *int - // RetryPolicy configures retry options for the method. - retryPolicy *retryPolicy -} - -// ServiceConfig is provided by the service provider and contains parameters for how -// clients that connect to the service should behave. -// -// Deprecated: Users should not use this struct. Service config should be received -// through name resolver, as specified here -// https://github.com/grpc/grpc/blob/master/doc/service_config.md -type ServiceConfig struct { - // LB is the load balancer the service providers recommends. The balancer specified - // via grpc.WithBalancer will override this. - LB *string - - // Methods contains a map for the methods in this service. If there is an - // exact match for a method (i.e. /service/method) in the map, use the - // corresponding MethodConfig. If there's no exact match, look for the - // default config for the service (/service/) and use the corresponding - // MethodConfig if it exists. Otherwise, the method has no MethodConfig to - // use. - Methods map[string]MethodConfig - - // If a retryThrottlingPolicy is provided, gRPC will automatically throttle - // retry attempts and hedged RPCs when the client’s ratio of failures to - // successes exceeds a threshold. - // - // For each server name, the gRPC client will maintain a token_count which is - // initially set to maxTokens, and can take values between 0 and maxTokens. - // - // Every outgoing RPC (regardless of service or method invoked) will change - // token_count as follows: - // - // - Every failed RPC will decrement the token_count by 1. - // - Every successful RPC will increment the token_count by tokenRatio. - // - // If token_count is less than or equal to maxTokens / 2, then RPCs will not - // be retried and hedged RPCs will not be sent. - retryThrottling *retryThrottlingPolicy - // healthCheckConfig must be set as one of the requirement to enable LB channel - // health check. - healthCheckConfig *healthCheckConfig -} - -// healthCheckConfig defines the go-native version of the LB channel health check config. -type healthCheckConfig struct { - // serviceName is the service name to use in the health-checking request. - ServiceName string -} - -// retryPolicy defines the go-native version of the retry policy defined by the -// service config here: -// https://github.com/grpc/proposal/blob/master/A6-client-retries.md#integration-with-service-config -type retryPolicy struct { - // MaxAttempts is the maximum number of attempts, including the original RPC. - // - // This field is required and must be two or greater. - maxAttempts int - - // Exponential backoff parameters. The initial retry attempt will occur at - // random(0, initialBackoffMS). In general, the nth attempt will occur at - // random(0, - // min(initialBackoffMS*backoffMultiplier**(n-1), maxBackoffMS)). - // - // These fields are required and must be greater than zero. - initialBackoff time.Duration - maxBackoff time.Duration - backoffMultiplier float64 - - // The set of status codes which may be retried. - // - // Status codes are specified as strings, e.g., "UNAVAILABLE". - // - // This field is required and must be non-empty. - // Note: a set is used to store this for easy lookup. - retryableStatusCodes map[codes.Code]bool -} - -type jsonRetryPolicy struct { - MaxAttempts int - InitialBackoff string - MaxBackoff string - BackoffMultiplier float64 - RetryableStatusCodes []codes.Code -} - -// retryThrottlingPolicy defines the go-native version of the retry throttling -// policy defined by the service config here: -// https://github.com/grpc/proposal/blob/master/A6-client-retries.md#integration-with-service-config -type retryThrottlingPolicy struct { - // The number of tokens starts at maxTokens. The token_count will always be - // between 0 and maxTokens. - // - // This field is required and must be greater than zero. - MaxTokens float64 - // The amount of tokens to add on each successful RPC. Typically this will - // be some number between 0 and 1, e.g., 0.1. - // - // This field is required and must be greater than zero. Up to 3 decimal - // places are supported. - TokenRatio float64 -} - -func parseDuration(s *string) (*time.Duration, error) { - if s == nil { - return nil, nil - } - if !strings.HasSuffix(*s, "s") { - return nil, fmt.Errorf("malformed duration %q", *s) - } - ss := strings.SplitN((*s)[:len(*s)-1], ".", 3) - if len(ss) > 2 { - return nil, fmt.Errorf("malformed duration %q", *s) - } - // hasDigits is set if either the whole or fractional part of the number is - // present, since both are optional but one is required. - hasDigits := false - var d time.Duration - if len(ss[0]) > 0 { - i, err := strconv.ParseInt(ss[0], 10, 32) - if err != nil { - return nil, fmt.Errorf("malformed duration %q: %v", *s, err) - } - d = time.Duration(i) * time.Second - hasDigits = true - } - if len(ss) == 2 && len(ss[1]) > 0 { - if len(ss[1]) > 9 { - return nil, fmt.Errorf("malformed duration %q", *s) - } - f, err := strconv.ParseInt(ss[1], 10, 64) - if err != nil { - return nil, fmt.Errorf("malformed duration %q: %v", *s, err) - } - for i := 9; i > len(ss[1]); i-- { - f *= 10 - } - d += time.Duration(f) - hasDigits = true - } - if !hasDigits { - return nil, fmt.Errorf("malformed duration %q", *s) - } - - return &d, nil -} - -type jsonName struct { - Service *string - Method *string -} - -func (j jsonName) generatePath() (string, bool) { - if j.Service == nil { - return "", false - } - res := "/" + *j.Service + "/" - if j.Method != nil { - res += *j.Method - } - return res, true -} - -// TODO(lyuxuan): delete this struct after cleaning up old service config implementation. -type jsonMC struct { - Name *[]jsonName - WaitForReady *bool - Timeout *string - MaxRequestMessageBytes *int64 - MaxResponseMessageBytes *int64 - RetryPolicy *jsonRetryPolicy -} - -// TODO(lyuxuan): delete this struct after cleaning up old service config implementation. -type jsonSC struct { - LoadBalancingPolicy *string - MethodConfig *[]jsonMC - RetryThrottling *retryThrottlingPolicy - HealthCheckConfig *healthCheckConfig -} - -func parseServiceConfig(js string) (ServiceConfig, error) { - if len(js) == 0 { - return ServiceConfig{}, fmt.Errorf("no JSON service config provided") - } - var rsc jsonSC - err := json.Unmarshal([]byte(js), &rsc) - if err != nil { - grpclog.Warningf("grpc: parseServiceConfig error unmarshaling %s due to %v", js, err) - return ServiceConfig{}, err - } - sc := ServiceConfig{ - LB: rsc.LoadBalancingPolicy, - Methods: make(map[string]MethodConfig), - retryThrottling: rsc.RetryThrottling, - healthCheckConfig: rsc.HealthCheckConfig, - } - if rsc.MethodConfig == nil { - return sc, nil - } - - for _, m := range *rsc.MethodConfig { - if m.Name == nil { - continue - } - d, err := parseDuration(m.Timeout) - if err != nil { - grpclog.Warningf("grpc: parseServiceConfig error unmarshaling %s due to %v", js, err) - return ServiceConfig{}, err - } - - mc := MethodConfig{ - WaitForReady: m.WaitForReady, - Timeout: d, - } - if mc.retryPolicy, err = convertRetryPolicy(m.RetryPolicy); err != nil { - grpclog.Warningf("grpc: parseServiceConfig error unmarshaling %s due to %v", js, err) - return ServiceConfig{}, err - } - if m.MaxRequestMessageBytes != nil { - if *m.MaxRequestMessageBytes > int64(maxInt) { - mc.MaxReqSize = newInt(maxInt) - } else { - mc.MaxReqSize = newInt(int(*m.MaxRequestMessageBytes)) - } - } - if m.MaxResponseMessageBytes != nil { - if *m.MaxResponseMessageBytes > int64(maxInt) { - mc.MaxRespSize = newInt(maxInt) - } else { - mc.MaxRespSize = newInt(int(*m.MaxResponseMessageBytes)) - } - } - for _, n := range *m.Name { - if path, valid := n.generatePath(); valid { - sc.Methods[path] = mc - } - } - } - - if sc.retryThrottling != nil { - if sc.retryThrottling.MaxTokens <= 0 || - sc.retryThrottling.MaxTokens >= 1000 || - sc.retryThrottling.TokenRatio <= 0 { - // Illegal throttling config; disable throttling. - sc.retryThrottling = nil - } - } - return sc, nil -} - -func convertRetryPolicy(jrp *jsonRetryPolicy) (p *retryPolicy, err error) { - if jrp == nil { - return nil, nil - } - ib, err := parseDuration(&jrp.InitialBackoff) - if err != nil { - return nil, err - } - mb, err := parseDuration(&jrp.MaxBackoff) - if err != nil { - return nil, err - } - - if jrp.MaxAttempts <= 1 || - *ib <= 0 || - *mb <= 0 || - jrp.BackoffMultiplier <= 0 || - len(jrp.RetryableStatusCodes) == 0 { - grpclog.Warningf("grpc: ignoring retry policy %v due to illegal configuration", jrp) - return nil, nil - } - - rp := &retryPolicy{ - maxAttempts: jrp.MaxAttempts, - initialBackoff: *ib, - maxBackoff: *mb, - backoffMultiplier: jrp.BackoffMultiplier, - retryableStatusCodes: make(map[codes.Code]bool), - } - if rp.maxAttempts > 5 { - // TODO(retry): Make the max maxAttempts configurable. - rp.maxAttempts = 5 - } - for _, code := range jrp.RetryableStatusCodes { - rp.retryableStatusCodes[code] = true - } - return rp, nil -} - -func min(a, b *int) *int { - if *a < *b { - return a - } - return b -} - -func getMaxSize(mcMax, doptMax *int, defaultVal int) *int { - if mcMax == nil && doptMax == nil { - return &defaultVal - } - if mcMax != nil && doptMax != nil { - return min(mcMax, doptMax) - } - if mcMax != nil { - return mcMax - } - return doptMax -} - -func newInt(b int) *int { - return &b -} diff --git a/vendor/google.golang.org/grpc/stats/handlers.go b/vendor/google.golang.org/grpc/stats/handlers.go deleted file mode 100644 index dc03731e45..0000000000 --- a/vendor/google.golang.org/grpc/stats/handlers.go +++ /dev/null @@ -1,63 +0,0 @@ -/* - * - * Copyright 2016 gRPC authors. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ - -package stats - -import ( - "context" - "net" -) - -// ConnTagInfo defines the relevant information needed by connection context tagger. -type ConnTagInfo struct { - // RemoteAddr is the remote address of the corresponding connection. - RemoteAddr net.Addr - // LocalAddr is the local address of the corresponding connection. - LocalAddr net.Addr -} - -// RPCTagInfo defines the relevant information needed by RPC context tagger. -type RPCTagInfo struct { - // FullMethodName is the RPC method in the format of /package.service/method. - FullMethodName string - // FailFast indicates if this RPC is failfast. - // This field is only valid on client side, it's always false on server side. - FailFast bool -} - -// Handler defines the interface for the related stats handling (e.g., RPCs, connections). -type Handler interface { - // TagRPC can attach some information to the given context. - // The context used for the rest lifetime of the RPC will be derived from - // the returned context. - TagRPC(context.Context, *RPCTagInfo) context.Context - // HandleRPC processes the RPC stats. - HandleRPC(context.Context, RPCStats) - - // TagConn can attach some information to the given context. - // The returned context will be used for stats handling. - // For conn stats handling, the context used in HandleConn for this - // connection will be derived from the context returned. - // For RPC stats handling, - // - On server side, the context used in HandleRPC for all RPCs on this - // connection will be derived from the context returned. - // - On client side, the context is not derived from the context returned. - TagConn(context.Context, *ConnTagInfo) context.Context - // HandleConn processes the Conn stats. - HandleConn(context.Context, ConnStats) -} diff --git a/vendor/google.golang.org/grpc/stats/stats.go b/vendor/google.golang.org/grpc/stats/stats.go deleted file mode 100644 index 84f77dafa5..0000000000 --- a/vendor/google.golang.org/grpc/stats/stats.go +++ /dev/null @@ -1,295 +0,0 @@ -/* - * - * Copyright 2016 gRPC authors. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ - -//go:generate protoc --go_out=plugins=grpc:. grpc_testing/test.proto - -// Package stats is for collecting and reporting various network and RPC stats. -// This package is for monitoring purpose only. All fields are read-only. -// All APIs are experimental. -package stats // import "google.golang.org/grpc/stats" - -import ( - "context" - "net" - "time" -) - -// RPCStats contains stats information about RPCs. -type RPCStats interface { - isRPCStats() - // IsClient returns true if this RPCStats is from client side. - IsClient() bool -} - -// Begin contains stats when an RPC begins. -// FailFast is only valid if this Begin is from client side. -type Begin struct { - // Client is true if this Begin is from client side. - Client bool - // BeginTime is the time when the RPC begins. - BeginTime time.Time - // FailFast indicates if this RPC is failfast. - FailFast bool -} - -// IsClient indicates if the stats information is from client side. -func (s *Begin) IsClient() bool { return s.Client } - -func (s *Begin) isRPCStats() {} - -// InPayload contains the information for an incoming payload. -type InPayload struct { - // Client is true if this InPayload is from client side. - Client bool - // Payload is the payload with original type. - Payload interface{} - // Data is the serialized message payload. - Data []byte - // Length is the length of uncompressed data. - Length int - // WireLength is the length of data on wire (compressed, signed, encrypted). - WireLength int - // RecvTime is the time when the payload is received. - RecvTime time.Time -} - -// IsClient indicates if the stats information is from client side. -func (s *InPayload) IsClient() bool { return s.Client } - -func (s *InPayload) isRPCStats() {} - -// InHeader contains stats when a header is received. -type InHeader struct { - // Client is true if this InHeader is from client side. - Client bool - // WireLength is the wire length of header. - WireLength int - - // The following fields are valid only if Client is false. - // FullMethod is the full RPC method string, i.e., /package.service/method. - FullMethod string - // RemoteAddr is the remote address of the corresponding connection. - RemoteAddr net.Addr - // LocalAddr is the local address of the corresponding connection. - LocalAddr net.Addr - // Compression is the compression algorithm used for the RPC. - Compression string -} - -// IsClient indicates if the stats information is from client side. -func (s *InHeader) IsClient() bool { return s.Client } - -func (s *InHeader) isRPCStats() {} - -// InTrailer contains stats when a trailer is received. -type InTrailer struct { - // Client is true if this InTrailer is from client side. - Client bool - // WireLength is the wire length of trailer. - WireLength int -} - -// IsClient indicates if the stats information is from client side. -func (s *InTrailer) IsClient() bool { return s.Client } - -func (s *InTrailer) isRPCStats() {} - -// OutPayload contains the information for an outgoing payload. -type OutPayload struct { - // Client is true if this OutPayload is from client side. - Client bool - // Payload is the payload with original type. - Payload interface{} - // Data is the serialized message payload. - Data []byte - // Length is the length of uncompressed data. - Length int - // WireLength is the length of data on wire (compressed, signed, encrypted). - WireLength int - // SentTime is the time when the payload is sent. - SentTime time.Time -} - -// IsClient indicates if this stats information is from client side. -func (s *OutPayload) IsClient() bool { return s.Client } - -func (s *OutPayload) isRPCStats() {} - -// OutHeader contains stats when a header is sent. -type OutHeader struct { - // Client is true if this OutHeader is from client side. - Client bool - - // The following fields are valid only if Client is true. - // FullMethod is the full RPC method string, i.e., /package.service/method. - FullMethod string - // RemoteAddr is the remote address of the corresponding connection. - RemoteAddr net.Addr - // LocalAddr is the local address of the corresponding connection. - LocalAddr net.Addr - // Compression is the compression algorithm used for the RPC. - Compression string -} - -// IsClient indicates if this stats information is from client side. -func (s *OutHeader) IsClient() bool { return s.Client } - -func (s *OutHeader) isRPCStats() {} - -// OutTrailer contains stats when a trailer is sent. -type OutTrailer struct { - // Client is true if this OutTrailer is from client side. - Client bool - // WireLength is the wire length of trailer. - WireLength int -} - -// IsClient indicates if this stats information is from client side. -func (s *OutTrailer) IsClient() bool { return s.Client } - -func (s *OutTrailer) isRPCStats() {} - -// End contains stats when an RPC ends. -type End struct { - // Client is true if this End is from client side. - Client bool - // BeginTime is the time when the RPC began. - BeginTime time.Time - // EndTime is the time when the RPC ends. - EndTime time.Time - // Error is the error the RPC ended with. It is an error generated from - // status.Status and can be converted back to status.Status using - // status.FromError if non-nil. - Error error -} - -// IsClient indicates if this is from client side. -func (s *End) IsClient() bool { return s.Client } - -func (s *End) isRPCStats() {} - -// ConnStats contains stats information about connections. -type ConnStats interface { - isConnStats() - // IsClient returns true if this ConnStats is from client side. - IsClient() bool -} - -// ConnBegin contains the stats of a connection when it is established. -type ConnBegin struct { - // Client is true if this ConnBegin is from client side. - Client bool -} - -// IsClient indicates if this is from client side. -func (s *ConnBegin) IsClient() bool { return s.Client } - -func (s *ConnBegin) isConnStats() {} - -// ConnEnd contains the stats of a connection when it ends. -type ConnEnd struct { - // Client is true if this ConnEnd is from client side. - Client bool -} - -// IsClient indicates if this is from client side. -func (s *ConnEnd) IsClient() bool { return s.Client } - -func (s *ConnEnd) isConnStats() {} - -type incomingTagsKey struct{} -type outgoingTagsKey struct{} - -// SetTags attaches stats tagging data to the context, which will be sent in -// the outgoing RPC with the header grpc-tags-bin. Subsequent calls to -// SetTags will overwrite the values from earlier calls. -// -// NOTE: this is provided only for backward compatibility with existing clients -// and will likely be removed in an upcoming release. New uses should transmit -// this type of data using metadata with a different, non-reserved (i.e. does -// not begin with "grpc-") header name. -func SetTags(ctx context.Context, b []byte) context.Context { - return context.WithValue(ctx, outgoingTagsKey{}, b) -} - -// Tags returns the tags from the context for the inbound RPC. -// -// NOTE: this is provided only for backward compatibility with existing clients -// and will likely be removed in an upcoming release. New uses should transmit -// this type of data using metadata with a different, non-reserved (i.e. does -// not begin with "grpc-") header name. -func Tags(ctx context.Context) []byte { - b, _ := ctx.Value(incomingTagsKey{}).([]byte) - return b -} - -// SetIncomingTags attaches stats tagging data to the context, to be read by -// the application (not sent in outgoing RPCs). -// -// This is intended for gRPC-internal use ONLY. -func SetIncomingTags(ctx context.Context, b []byte) context.Context { - return context.WithValue(ctx, incomingTagsKey{}, b) -} - -// OutgoingTags returns the tags from the context for the outbound RPC. -// -// This is intended for gRPC-internal use ONLY. -func OutgoingTags(ctx context.Context) []byte { - b, _ := ctx.Value(outgoingTagsKey{}).([]byte) - return b -} - -type incomingTraceKey struct{} -type outgoingTraceKey struct{} - -// SetTrace attaches stats tagging data to the context, which will be sent in -// the outgoing RPC with the header grpc-trace-bin. Subsequent calls to -// SetTrace will overwrite the values from earlier calls. -// -// NOTE: this is provided only for backward compatibility with existing clients -// and will likely be removed in an upcoming release. New uses should transmit -// this type of data using metadata with a different, non-reserved (i.e. does -// not begin with "grpc-") header name. -func SetTrace(ctx context.Context, b []byte) context.Context { - return context.WithValue(ctx, outgoingTraceKey{}, b) -} - -// Trace returns the trace from the context for the inbound RPC. -// -// NOTE: this is provided only for backward compatibility with existing clients -// and will likely be removed in an upcoming release. New uses should transmit -// this type of data using metadata with a different, non-reserved (i.e. does -// not begin with "grpc-") header name. -func Trace(ctx context.Context) []byte { - b, _ := ctx.Value(incomingTraceKey{}).([]byte) - return b -} - -// SetIncomingTrace attaches stats tagging data to the context, to be read by -// the application (not sent in outgoing RPCs). It is intended for -// gRPC-internal use. -func SetIncomingTrace(ctx context.Context, b []byte) context.Context { - return context.WithValue(ctx, incomingTraceKey{}, b) -} - -// OutgoingTrace returns the trace from the context for the outbound RPC. It is -// intended for gRPC-internal use. -func OutgoingTrace(ctx context.Context) []byte { - b, _ := ctx.Value(outgoingTraceKey{}).([]byte) - return b -} diff --git a/vendor/google.golang.org/grpc/status/status.go b/vendor/google.golang.org/grpc/status/status.go deleted file mode 100644 index ed36681bb5..0000000000 --- a/vendor/google.golang.org/grpc/status/status.go +++ /dev/null @@ -1,210 +0,0 @@ -/* - * - * Copyright 2017 gRPC authors. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ - -// Package status implements errors returned by gRPC. These errors are -// serialized and transmitted on the wire between server and client, and allow -// for additional data to be transmitted via the Details field in the status -// proto. gRPC service handlers should return an error created by this -// package, and gRPC clients should expect a corresponding error to be -// returned from the RPC call. -// -// This package upholds the invariants that a non-nil error may not -// contain an OK code, and an OK code must result in a nil error. -package status - -import ( - "context" - "errors" - "fmt" - - "github.com/golang/protobuf/proto" - "github.com/golang/protobuf/ptypes" - spb "google.golang.org/genproto/googleapis/rpc/status" - "google.golang.org/grpc/codes" -) - -// statusError is an alias of a status proto. It implements error and Status, -// and a nil statusError should never be returned by this package. -type statusError spb.Status - -func (se *statusError) Error() string { - p := (*spb.Status)(se) - return fmt.Sprintf("rpc error: code = %s desc = %s", codes.Code(p.GetCode()), p.GetMessage()) -} - -func (se *statusError) GRPCStatus() *Status { - return &Status{s: (*spb.Status)(se)} -} - -// Status represents an RPC status code, message, and details. It is immutable -// and should be created with New, Newf, or FromProto. -type Status struct { - s *spb.Status -} - -// Code returns the status code contained in s. -func (s *Status) Code() codes.Code { - if s == nil || s.s == nil { - return codes.OK - } - return codes.Code(s.s.Code) -} - -// Message returns the message contained in s. -func (s *Status) Message() string { - if s == nil || s.s == nil { - return "" - } - return s.s.Message -} - -// Proto returns s's status as an spb.Status proto message. -func (s *Status) Proto() *spb.Status { - if s == nil { - return nil - } - return proto.Clone(s.s).(*spb.Status) -} - -// Err returns an immutable error representing s; returns nil if s.Code() is -// OK. -func (s *Status) Err() error { - if s.Code() == codes.OK { - return nil - } - return (*statusError)(s.s) -} - -// New returns a Status representing c and msg. -func New(c codes.Code, msg string) *Status { - return &Status{s: &spb.Status{Code: int32(c), Message: msg}} -} - -// Newf returns New(c, fmt.Sprintf(format, a...)). -func Newf(c codes.Code, format string, a ...interface{}) *Status { - return New(c, fmt.Sprintf(format, a...)) -} - -// Error returns an error representing c and msg. If c is OK, returns nil. -func Error(c codes.Code, msg string) error { - return New(c, msg).Err() -} - -// Errorf returns Error(c, fmt.Sprintf(format, a...)). -func Errorf(c codes.Code, format string, a ...interface{}) error { - return Error(c, fmt.Sprintf(format, a...)) -} - -// ErrorProto returns an error representing s. If s.Code is OK, returns nil. -func ErrorProto(s *spb.Status) error { - return FromProto(s).Err() -} - -// FromProto returns a Status representing s. -func FromProto(s *spb.Status) *Status { - return &Status{s: proto.Clone(s).(*spb.Status)} -} - -// FromError returns a Status representing err if it was produced from this -// package or has a method `GRPCStatus() *Status`. Otherwise, ok is false and a -// Status is returned with codes.Unknown and the original error message. -func FromError(err error) (s *Status, ok bool) { - if err == nil { - return &Status{s: &spb.Status{Code: int32(codes.OK)}}, true - } - if se, ok := err.(interface { - GRPCStatus() *Status - }); ok { - return se.GRPCStatus(), true - } - return New(codes.Unknown, err.Error()), false -} - -// Convert is a convenience function which removes the need to handle the -// boolean return value from FromError. -func Convert(err error) *Status { - s, _ := FromError(err) - return s -} - -// WithDetails returns a new status with the provided details messages appended to the status. -// If any errors are encountered, it returns nil and the first error encountered. -func (s *Status) WithDetails(details ...proto.Message) (*Status, error) { - if s.Code() == codes.OK { - return nil, errors.New("no error details for status with code OK") - } - // s.Code() != OK implies that s.Proto() != nil. - p := s.Proto() - for _, detail := range details { - any, err := ptypes.MarshalAny(detail) - if err != nil { - return nil, err - } - p.Details = append(p.Details, any) - } - return &Status{s: p}, nil -} - -// Details returns a slice of details messages attached to the status. -// If a detail cannot be decoded, the error is returned in place of the detail. -func (s *Status) Details() []interface{} { - if s == nil || s.s == nil { - return nil - } - details := make([]interface{}, 0, len(s.s.Details)) - for _, any := range s.s.Details { - detail := &ptypes.DynamicAny{} - if err := ptypes.UnmarshalAny(any, detail); err != nil { - details = append(details, err) - continue - } - details = append(details, detail.Message) - } - return details -} - -// Code returns the Code of the error if it is a Status error, codes.OK if err -// is nil, or codes.Unknown otherwise. -func Code(err error) codes.Code { - // Don't use FromError to avoid allocation of OK status. - if err == nil { - return codes.OK - } - if se, ok := err.(interface { - GRPCStatus() *Status - }); ok { - return se.GRPCStatus().Code() - } - return codes.Unknown -} - -// FromContextError converts a context error into a Status. It returns a -// Status with codes.OK if err is nil, or a Status with codes.Unknown if err is -// non-nil and not a context error. -func FromContextError(err error) *Status { - switch err { - case nil: - return New(codes.OK, "") - case context.DeadlineExceeded: - return New(codes.DeadlineExceeded, err.Error()) - case context.Canceled: - return New(codes.Canceled, err.Error()) - default: - return New(codes.Unknown, err.Error()) - } -} diff --git a/vendor/google.golang.org/grpc/stream.go b/vendor/google.golang.org/grpc/stream.go deleted file mode 100644 index d06279a209..0000000000 --- a/vendor/google.golang.org/grpc/stream.go +++ /dev/null @@ -1,1486 +0,0 @@ -/* - * - * Copyright 2014 gRPC authors. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ - -package grpc - -import ( - "context" - "errors" - "io" - "math" - "strconv" - "sync" - "time" - - "golang.org/x/net/trace" - "google.golang.org/grpc/balancer" - "google.golang.org/grpc/codes" - "google.golang.org/grpc/connectivity" - "google.golang.org/grpc/encoding" - "google.golang.org/grpc/grpclog" - "google.golang.org/grpc/internal/binarylog" - "google.golang.org/grpc/internal/channelz" - "google.golang.org/grpc/internal/grpcrand" - "google.golang.org/grpc/internal/transport" - "google.golang.org/grpc/metadata" - "google.golang.org/grpc/peer" - "google.golang.org/grpc/stats" - "google.golang.org/grpc/status" -) - -// StreamHandler defines the handler called by gRPC server to complete the -// execution of a streaming RPC. If a StreamHandler returns an error, it -// should be produced by the status package, or else gRPC will use -// codes.Unknown as the status code and err.Error() as the status message -// of the RPC. -type StreamHandler func(srv interface{}, stream ServerStream) error - -// StreamDesc represents a streaming RPC service's method specification. -type StreamDesc struct { - StreamName string - Handler StreamHandler - - // At least one of these is true. - ServerStreams bool - ClientStreams bool -} - -// Stream defines the common interface a client or server stream has to satisfy. -// -// Deprecated: See ClientStream and ServerStream documentation instead. -type Stream interface { - // Deprecated: See ClientStream and ServerStream documentation instead. - Context() context.Context - // Deprecated: See ClientStream and ServerStream documentation instead. - SendMsg(m interface{}) error - // Deprecated: See ClientStream and ServerStream documentation instead. - RecvMsg(m interface{}) error -} - -// ClientStream defines the client-side behavior of a streaming RPC. -// -// All errors returned from ClientStream methods are compatible with the -// status package. -type ClientStream interface { - // Header returns the header metadata received from the server if there - // is any. It blocks if the metadata is not ready to read. - Header() (metadata.MD, error) - // Trailer returns the trailer metadata from the server, if there is any. - // It must only be called after stream.CloseAndRecv has returned, or - // stream.Recv has returned a non-nil error (including io.EOF). - Trailer() metadata.MD - // CloseSend closes the send direction of the stream. It closes the stream - // when non-nil error is met. It is also not safe to call CloseSend - // concurrently with SendMsg. - CloseSend() error - // Context returns the context for this stream. - // - // It should not be called until after Header or RecvMsg has returned. Once - // called, subsequent client-side retries are disabled. - Context() context.Context - // SendMsg is generally called by generated code. On error, SendMsg aborts - // the stream. If the error was generated by the client, the status is - // returned directly; otherwise, io.EOF is returned and the status of - // the stream may be discovered using RecvMsg. - // - // SendMsg blocks until: - // - There is sufficient flow control to schedule m with the transport, or - // - The stream is done, or - // - The stream breaks. - // - // SendMsg does not wait until the message is received by the server. An - // untimely stream closure may result in lost messages. To ensure delivery, - // users should ensure the RPC completed successfully using RecvMsg. - // - // It is safe to have a goroutine calling SendMsg and another goroutine - // calling RecvMsg on the same stream at the same time, but it is not safe - // to call SendMsg on the same stream in different goroutines. It is also - // not safe to call CloseSend concurrently with SendMsg. - SendMsg(m interface{}) error - // RecvMsg blocks until it receives a message into m or the stream is - // done. It returns io.EOF when the stream completes successfully. On - // any other error, the stream is aborted and the error contains the RPC - // status. - // - // It is safe to have a goroutine calling SendMsg and another goroutine - // calling RecvMsg on the same stream at the same time, but it is not - // safe to call RecvMsg on the same stream in different goroutines. - RecvMsg(m interface{}) error -} - -// NewStream creates a new Stream for the client side. This is typically -// called by generated code. ctx is used for the lifetime of the stream. -// -// To ensure resources are not leaked due to the stream returned, one of the following -// actions must be performed: -// -// 1. Call Close on the ClientConn. -// 2. Cancel the context provided. -// 3. Call RecvMsg until a non-nil error is returned. A protobuf-generated -// client-streaming RPC, for instance, might use the helper function -// CloseAndRecv (note that CloseSend does not Recv, therefore is not -// guaranteed to release all resources). -// 4. Receive a non-nil, non-io.EOF error from Header or SendMsg. -// -// If none of the above happen, a goroutine and a context will be leaked, and grpc -// will not call the optionally-configured stats handler with a stats.End message. -func (cc *ClientConn) NewStream(ctx context.Context, desc *StreamDesc, method string, opts ...CallOption) (ClientStream, error) { - // allow interceptor to see all applicable call options, which means those - // configured as defaults from dial option as well as per-call options - opts = combine(cc.dopts.callOptions, opts) - - if cc.dopts.streamInt != nil { - return cc.dopts.streamInt(ctx, desc, cc, method, newClientStream, opts...) - } - return newClientStream(ctx, desc, cc, method, opts...) -} - -// NewClientStream is a wrapper for ClientConn.NewStream. -func NewClientStream(ctx context.Context, desc *StreamDesc, cc *ClientConn, method string, opts ...CallOption) (ClientStream, error) { - return cc.NewStream(ctx, desc, method, opts...) -} - -func newClientStream(ctx context.Context, desc *StreamDesc, cc *ClientConn, method string, opts ...CallOption) (_ ClientStream, err error) { - if channelz.IsOn() { - cc.incrCallsStarted() - defer func() { - if err != nil { - cc.incrCallsFailed() - } - }() - } - c := defaultCallInfo() - // Provide an opportunity for the first RPC to see the first service config - // provided by the resolver. - if err := cc.waitForResolvedAddrs(ctx); err != nil { - return nil, err - } - mc := cc.GetMethodConfig(method) - if mc.WaitForReady != nil { - c.failFast = !*mc.WaitForReady - } - - // Possible context leak: - // The cancel function for the child context we create will only be called - // when RecvMsg returns a non-nil error, if the ClientConn is closed, or if - // an error is generated by SendMsg. - // https://github.com/grpc/grpc-go/issues/1818. - var cancel context.CancelFunc - if mc.Timeout != nil && *mc.Timeout >= 0 { - ctx, cancel = context.WithTimeout(ctx, *mc.Timeout) - } else { - ctx, cancel = context.WithCancel(ctx) - } - defer func() { - if err != nil { - cancel() - } - }() - - for _, o := range opts { - if err := o.before(c); err != nil { - return nil, toRPCErr(err) - } - } - c.maxSendMessageSize = getMaxSize(mc.MaxReqSize, c.maxSendMessageSize, defaultClientMaxSendMessageSize) - c.maxReceiveMessageSize = getMaxSize(mc.MaxRespSize, c.maxReceiveMessageSize, defaultClientMaxReceiveMessageSize) - if err := setCallInfoCodec(c); err != nil { - return nil, err - } - - callHdr := &transport.CallHdr{ - Host: cc.authority, - Method: method, - ContentSubtype: c.contentSubtype, - } - - // Set our outgoing compression according to the UseCompressor CallOption, if - // set. In that case, also find the compressor from the encoding package. - // Otherwise, use the compressor configured by the WithCompressor DialOption, - // if set. - var cp Compressor - var comp encoding.Compressor - if ct := c.compressorType; ct != "" { - callHdr.SendCompress = ct - if ct != encoding.Identity { - comp = encoding.GetCompressor(ct) - if comp == nil { - return nil, status.Errorf(codes.Internal, "grpc: Compressor is not installed for requested grpc-encoding %q", ct) - } - } - } else if cc.dopts.cp != nil { - callHdr.SendCompress = cc.dopts.cp.Type() - cp = cc.dopts.cp - } - if c.creds != nil { - callHdr.Creds = c.creds - } - var trInfo traceInfo - if EnableTracing { - trInfo.tr = trace.New("grpc.Sent."+methodFamily(method), method) - trInfo.firstLine.client = true - if deadline, ok := ctx.Deadline(); ok { - trInfo.firstLine.deadline = deadline.Sub(time.Now()) - } - trInfo.tr.LazyLog(&trInfo.firstLine, false) - ctx = trace.NewContext(ctx, trInfo.tr) - } - ctx = newContextWithRPCInfo(ctx, c.failFast) - sh := cc.dopts.copts.StatsHandler - var beginTime time.Time - if sh != nil { - ctx = sh.TagRPC(ctx, &stats.RPCTagInfo{FullMethodName: method, FailFast: c.failFast}) - beginTime = time.Now() - begin := &stats.Begin{ - Client: true, - BeginTime: beginTime, - FailFast: c.failFast, - } - sh.HandleRPC(ctx, begin) - } - - cs := &clientStream{ - callHdr: callHdr, - ctx: ctx, - methodConfig: &mc, - opts: opts, - callInfo: c, - cc: cc, - desc: desc, - codec: c.codec, - cp: cp, - comp: comp, - cancel: cancel, - beginTime: beginTime, - firstAttempt: true, - } - if !cc.dopts.disableRetry { - cs.retryThrottler = cc.retryThrottler.Load().(*retryThrottler) - } - cs.binlog = binarylog.GetMethodLogger(method) - - cs.callInfo.stream = cs - // Only this initial attempt has stats/tracing. - // TODO(dfawley): move to newAttempt when per-attempt stats are implemented. - if err := cs.newAttemptLocked(sh, trInfo); err != nil { - cs.finish(err) - return nil, err - } - - op := func(a *csAttempt) error { return a.newStream() } - if err := cs.withRetry(op, func() { cs.bufferForRetryLocked(0, op) }); err != nil { - cs.finish(err) - return nil, err - } - - if cs.binlog != nil { - md, _ := metadata.FromOutgoingContext(ctx) - logEntry := &binarylog.ClientHeader{ - OnClientSide: true, - Header: md, - MethodName: method, - Authority: cs.cc.authority, - } - if deadline, ok := ctx.Deadline(); ok { - logEntry.Timeout = deadline.Sub(time.Now()) - if logEntry.Timeout < 0 { - logEntry.Timeout = 0 - } - } - cs.binlog.Log(logEntry) - } - - if desc != unaryStreamDesc { - // Listen on cc and stream contexts to cleanup when the user closes the - // ClientConn or cancels the stream context. In all other cases, an error - // should already be injected into the recv buffer by the transport, which - // the client will eventually receive, and then we will cancel the stream's - // context in clientStream.finish. - go func() { - select { - case <-cc.ctx.Done(): - cs.finish(ErrClientConnClosing) - case <-ctx.Done(): - cs.finish(toRPCErr(ctx.Err())) - } - }() - } - return cs, nil -} - -func (cs *clientStream) newAttemptLocked(sh stats.Handler, trInfo traceInfo) error { - cs.attempt = &csAttempt{ - cs: cs, - dc: cs.cc.dopts.dc, - statsHandler: sh, - trInfo: trInfo, - } - - if err := cs.ctx.Err(); err != nil { - return toRPCErr(err) - } - t, done, err := cs.cc.getTransport(cs.ctx, cs.callInfo.failFast, cs.callHdr.Method) - if err != nil { - return err - } - cs.attempt.t = t - cs.attempt.done = done - return nil -} - -func (a *csAttempt) newStream() error { - cs := a.cs - cs.callHdr.PreviousAttempts = cs.numRetries - s, err := a.t.NewStream(cs.ctx, cs.callHdr) - if err != nil { - return toRPCErr(err) - } - cs.attempt.s = s - cs.attempt.p = &parser{r: s} - return nil -} - -// clientStream implements a client side Stream. -type clientStream struct { - callHdr *transport.CallHdr - opts []CallOption - callInfo *callInfo - cc *ClientConn - desc *StreamDesc - - codec baseCodec - cp Compressor - comp encoding.Compressor - - cancel context.CancelFunc // cancels all attempts - - sentLast bool // sent an end stream - beginTime time.Time - - methodConfig *MethodConfig - - ctx context.Context // the application's context, wrapped by stats/tracing - - retryThrottler *retryThrottler // The throttler active when the RPC began. - - binlog *binarylog.MethodLogger // Binary logger, can be nil. - // serverHeaderBinlogged is a boolean for whether server header has been - // logged. Server header will be logged when the first time one of those - // happens: stream.Header(), stream.Recv(). - // - // It's only read and used by Recv() and Header(), so it doesn't need to be - // synchronized. - serverHeaderBinlogged bool - - mu sync.Mutex - firstAttempt bool // if true, transparent retry is valid - numRetries int // exclusive of transparent retry attempt(s) - numRetriesSincePushback int // retries since pushback; to reset backoff - finished bool // TODO: replace with atomic cmpxchg or sync.Once? - attempt *csAttempt // the active client stream attempt - // TODO(hedging): hedging will have multiple attempts simultaneously. - committed bool // active attempt committed for retry? - buffer []func(a *csAttempt) error // operations to replay on retry - bufferSize int // current size of buffer -} - -// csAttempt implements a single transport stream attempt within a -// clientStream. -type csAttempt struct { - cs *clientStream - t transport.ClientTransport - s *transport.Stream - p *parser - done func(balancer.DoneInfo) - - finished bool - dc Decompressor - decomp encoding.Compressor - decompSet bool - - mu sync.Mutex // guards trInfo.tr - // trInfo.tr is set when created (if EnableTracing is true), - // and cleared when the finish method is called. - trInfo traceInfo - - statsHandler stats.Handler -} - -func (cs *clientStream) commitAttemptLocked() { - cs.committed = true - cs.buffer = nil -} - -func (cs *clientStream) commitAttempt() { - cs.mu.Lock() - cs.commitAttemptLocked() - cs.mu.Unlock() -} - -// shouldRetry returns nil if the RPC should be retried; otherwise it returns -// the error that should be returned by the operation. -func (cs *clientStream) shouldRetry(err error) error { - if cs.attempt.s == nil && !cs.callInfo.failFast { - // In the event of any error from NewStream (attempt.s == nil), we - // never attempted to write anything to the wire, so we can retry - // indefinitely for non-fail-fast RPCs. - return nil - } - if cs.finished || cs.committed { - // RPC is finished or committed; cannot retry. - return err - } - // Wait for the trailers. - if cs.attempt.s != nil { - <-cs.attempt.s.Done() - } - if cs.firstAttempt && !cs.callInfo.failFast && (cs.attempt.s == nil || cs.attempt.s.Unprocessed()) { - // First attempt, wait-for-ready, stream unprocessed: transparently retry. - cs.firstAttempt = false - return nil - } - cs.firstAttempt = false - if cs.cc.dopts.disableRetry { - return err - } - - pushback := 0 - hasPushback := false - if cs.attempt.s != nil { - if to, toErr := cs.attempt.s.TrailersOnly(); toErr != nil || !to { - return err - } - - // TODO(retry): Move down if the spec changes to not check server pushback - // before considering this a failure for throttling. - sps := cs.attempt.s.Trailer()["grpc-retry-pushback-ms"] - if len(sps) == 1 { - var e error - if pushback, e = strconv.Atoi(sps[0]); e != nil || pushback < 0 { - grpclog.Infof("Server retry pushback specified to abort (%q).", sps[0]) - cs.retryThrottler.throttle() // This counts as a failure for throttling. - return err - } - hasPushback = true - } else if len(sps) > 1 { - grpclog.Warningf("Server retry pushback specified multiple values (%q); not retrying.", sps) - cs.retryThrottler.throttle() // This counts as a failure for throttling. - return err - } - } - - var code codes.Code - if cs.attempt.s != nil { - code = cs.attempt.s.Status().Code() - } else { - code = status.Convert(err).Code() - } - - rp := cs.methodConfig.retryPolicy - if rp == nil || !rp.retryableStatusCodes[code] { - return err - } - - // Note: the ordering here is important; we count this as a failure - // only if the code matched a retryable code. - if cs.retryThrottler.throttle() { - return err - } - if cs.numRetries+1 >= rp.maxAttempts { - return err - } - - var dur time.Duration - if hasPushback { - dur = time.Millisecond * time.Duration(pushback) - cs.numRetriesSincePushback = 0 - } else { - fact := math.Pow(rp.backoffMultiplier, float64(cs.numRetriesSincePushback)) - cur := float64(rp.initialBackoff) * fact - if max := float64(rp.maxBackoff); cur > max { - cur = max - } - dur = time.Duration(grpcrand.Int63n(int64(cur))) - cs.numRetriesSincePushback++ - } - - // TODO(dfawley): we could eagerly fail here if dur puts us past the - // deadline, but unsure if it is worth doing. - t := time.NewTimer(dur) - select { - case <-t.C: - cs.numRetries++ - return nil - case <-cs.ctx.Done(): - t.Stop() - return status.FromContextError(cs.ctx.Err()).Err() - } -} - -// Returns nil if a retry was performed and succeeded; error otherwise. -func (cs *clientStream) retryLocked(lastErr error) error { - for { - cs.attempt.finish(lastErr) - if err := cs.shouldRetry(lastErr); err != nil { - cs.commitAttemptLocked() - return err - } - if err := cs.newAttemptLocked(nil, traceInfo{}); err != nil { - return err - } - if lastErr = cs.replayBufferLocked(); lastErr == nil { - return nil - } - } -} - -func (cs *clientStream) Context() context.Context { - cs.commitAttempt() - // No need to lock before using attempt, since we know it is committed and - // cannot change. - return cs.attempt.s.Context() -} - -func (cs *clientStream) withRetry(op func(a *csAttempt) error, onSuccess func()) error { - cs.mu.Lock() - for { - if cs.committed { - cs.mu.Unlock() - return op(cs.attempt) - } - a := cs.attempt - cs.mu.Unlock() - err := op(a) - cs.mu.Lock() - if a != cs.attempt { - // We started another attempt already. - continue - } - if err == io.EOF { - <-a.s.Done() - } - if err == nil || (err == io.EOF && a.s.Status().Code() == codes.OK) { - onSuccess() - cs.mu.Unlock() - return err - } - if err := cs.retryLocked(err); err != nil { - cs.mu.Unlock() - return err - } - } -} - -func (cs *clientStream) Header() (metadata.MD, error) { - var m metadata.MD - err := cs.withRetry(func(a *csAttempt) error { - var err error - m, err = a.s.Header() - return toRPCErr(err) - }, cs.commitAttemptLocked) - if err != nil { - cs.finish(err) - return nil, err - } - if cs.binlog != nil && !cs.serverHeaderBinlogged { - // Only log if binary log is on and header has not been logged. - logEntry := &binarylog.ServerHeader{ - OnClientSide: true, - Header: m, - PeerAddr: nil, - } - if peer, ok := peer.FromContext(cs.Context()); ok { - logEntry.PeerAddr = peer.Addr - } - cs.binlog.Log(logEntry) - cs.serverHeaderBinlogged = true - } - return m, err -} - -func (cs *clientStream) Trailer() metadata.MD { - // On RPC failure, we never need to retry, because usage requires that - // RecvMsg() returned a non-nil error before calling this function is valid. - // We would have retried earlier if necessary. - // - // Commit the attempt anyway, just in case users are not following those - // directions -- it will prevent races and should not meaningfully impact - // performance. - cs.commitAttempt() - if cs.attempt.s == nil { - return nil - } - return cs.attempt.s.Trailer() -} - -func (cs *clientStream) replayBufferLocked() error { - a := cs.attempt - for _, f := range cs.buffer { - if err := f(a); err != nil { - return err - } - } - return nil -} - -func (cs *clientStream) bufferForRetryLocked(sz int, op func(a *csAttempt) error) { - // Note: we still will buffer if retry is disabled (for transparent retries). - if cs.committed { - return - } - cs.bufferSize += sz - if cs.bufferSize > cs.callInfo.maxRetryRPCBufferSize { - cs.commitAttemptLocked() - return - } - cs.buffer = append(cs.buffer, op) -} - -func (cs *clientStream) SendMsg(m interface{}) (err error) { - defer func() { - if err != nil && err != io.EOF { - // Call finish on the client stream for errors generated by this SendMsg - // call, as these indicate problems created by this client. (Transport - // errors are converted to an io.EOF error in csAttempt.sendMsg; the real - // error will be returned from RecvMsg eventually in that case, or be - // retried.) - cs.finish(err) - } - }() - if cs.sentLast { - return status.Errorf(codes.Internal, "SendMsg called after CloseSend") - } - if !cs.desc.ClientStreams { - cs.sentLast = true - } - data, err := encode(cs.codec, m) - if err != nil { - return err - } - compData, err := compress(data, cs.cp, cs.comp) - if err != nil { - return err - } - hdr, payload := msgHeader(data, compData) - // TODO(dfawley): should we be checking len(data) instead? - if len(payload) > *cs.callInfo.maxSendMessageSize { - return status.Errorf(codes.ResourceExhausted, "trying to send message larger than max (%d vs. %d)", len(payload), *cs.callInfo.maxSendMessageSize) - } - msgBytes := data // Store the pointer before setting to nil. For binary logging. - op := func(a *csAttempt) error { - err := a.sendMsg(m, hdr, payload, data) - // nil out the message and uncomp when replaying; they are only needed for - // stats which is disabled for subsequent attempts. - m, data = nil, nil - return err - } - err = cs.withRetry(op, func() { cs.bufferForRetryLocked(len(hdr)+len(payload), op) }) - if cs.binlog != nil && err == nil { - cs.binlog.Log(&binarylog.ClientMessage{ - OnClientSide: true, - Message: msgBytes, - }) - } - return -} - -func (cs *clientStream) RecvMsg(m interface{}) error { - if cs.binlog != nil && !cs.serverHeaderBinlogged { - // Call Header() to binary log header if it's not already logged. - cs.Header() - } - var recvInfo *payloadInfo - if cs.binlog != nil { - recvInfo = &payloadInfo{} - } - err := cs.withRetry(func(a *csAttempt) error { - return a.recvMsg(m, recvInfo) - }, cs.commitAttemptLocked) - if cs.binlog != nil && err == nil { - cs.binlog.Log(&binarylog.ServerMessage{ - OnClientSide: true, - Message: recvInfo.uncompressedBytes, - }) - } - if err != nil || !cs.desc.ServerStreams { - // err != nil or non-server-streaming indicates end of stream. - cs.finish(err) - - if cs.binlog != nil { - // finish will not log Trailer. Log Trailer here. - logEntry := &binarylog.ServerTrailer{ - OnClientSide: true, - Trailer: cs.Trailer(), - Err: err, - } - if logEntry.Err == io.EOF { - logEntry.Err = nil - } - if peer, ok := peer.FromContext(cs.Context()); ok { - logEntry.PeerAddr = peer.Addr - } - cs.binlog.Log(logEntry) - } - } - return err -} - -func (cs *clientStream) CloseSend() error { - if cs.sentLast { - // TODO: return an error and finish the stream instead, due to API misuse? - return nil - } - cs.sentLast = true - op := func(a *csAttempt) error { - a.t.Write(a.s, nil, nil, &transport.Options{Last: true}) - // Always return nil; io.EOF is the only error that might make sense - // instead, but there is no need to signal the client to call RecvMsg - // as the only use left for the stream after CloseSend is to call - // RecvMsg. This also matches historical behavior. - return nil - } - cs.withRetry(op, func() { cs.bufferForRetryLocked(0, op) }) - if cs.binlog != nil { - cs.binlog.Log(&binarylog.ClientHalfClose{ - OnClientSide: true, - }) - } - // We never returned an error here for reasons. - return nil -} - -func (cs *clientStream) finish(err error) { - if err == io.EOF { - // Ending a stream with EOF indicates a success. - err = nil - } - cs.mu.Lock() - if cs.finished { - cs.mu.Unlock() - return - } - cs.finished = true - cs.commitAttemptLocked() - cs.mu.Unlock() - // For binary logging. only log cancel in finish (could be caused by RPC ctx - // canceled or ClientConn closed). Trailer will be logged in RecvMsg. - // - // Only one of cancel or trailer needs to be logged. In the cases where - // users don't call RecvMsg, users must have already canceled the RPC. - if cs.binlog != nil && status.Code(err) == codes.Canceled { - cs.binlog.Log(&binarylog.Cancel{ - OnClientSide: true, - }) - } - if err == nil { - cs.retryThrottler.successfulRPC() - } - if channelz.IsOn() { - if err != nil { - cs.cc.incrCallsFailed() - } else { - cs.cc.incrCallsSucceeded() - } - } - if cs.attempt != nil { - cs.attempt.finish(err) - } - // after functions all rely upon having a stream. - if cs.attempt.s != nil { - for _, o := range cs.opts { - o.after(cs.callInfo) - } - } - cs.cancel() -} - -func (a *csAttempt) sendMsg(m interface{}, hdr, payld, data []byte) error { - cs := a.cs - if EnableTracing { - a.mu.Lock() - if a.trInfo.tr != nil { - a.trInfo.tr.LazyLog(&payload{sent: true, msg: m}, true) - } - a.mu.Unlock() - } - if err := a.t.Write(a.s, hdr, payld, &transport.Options{Last: !cs.desc.ClientStreams}); err != nil { - if !cs.desc.ClientStreams { - // For non-client-streaming RPCs, we return nil instead of EOF on error - // because the generated code requires it. finish is not called; RecvMsg() - // will call it with the stream's status independently. - return nil - } - return io.EOF - } - if a.statsHandler != nil { - a.statsHandler.HandleRPC(cs.ctx, outPayload(true, m, data, payld, time.Now())) - } - if channelz.IsOn() { - a.t.IncrMsgSent() - } - return nil -} - -func (a *csAttempt) recvMsg(m interface{}, payInfo *payloadInfo) (err error) { - cs := a.cs - if a.statsHandler != nil && payInfo == nil { - payInfo = &payloadInfo{} - } - - if !a.decompSet { - // Block until we receive headers containing received message encoding. - if ct := a.s.RecvCompress(); ct != "" && ct != encoding.Identity { - if a.dc == nil || a.dc.Type() != ct { - // No configured decompressor, or it does not match the incoming - // message encoding; attempt to find a registered compressor that does. - a.dc = nil - a.decomp = encoding.GetCompressor(ct) - } - } else { - // No compression is used; disable our decompressor. - a.dc = nil - } - // Only initialize this state once per stream. - a.decompSet = true - } - err = recv(a.p, cs.codec, a.s, a.dc, m, *cs.callInfo.maxReceiveMessageSize, payInfo, a.decomp) - if err != nil { - if err == io.EOF { - if statusErr := a.s.Status().Err(); statusErr != nil { - return statusErr - } - return io.EOF // indicates successful end of stream. - } - return toRPCErr(err) - } - if EnableTracing { - a.mu.Lock() - if a.trInfo.tr != nil { - a.trInfo.tr.LazyLog(&payload{sent: false, msg: m}, true) - } - a.mu.Unlock() - } - if a.statsHandler != nil { - a.statsHandler.HandleRPC(cs.ctx, &stats.InPayload{ - Client: true, - RecvTime: time.Now(), - Payload: m, - // TODO truncate large payload. - Data: payInfo.uncompressedBytes, - Length: len(payInfo.uncompressedBytes), - }) - } - if channelz.IsOn() { - a.t.IncrMsgRecv() - } - if cs.desc.ServerStreams { - // Subsequent messages should be received by subsequent RecvMsg calls. - return nil - } - // Special handling for non-server-stream rpcs. - // This recv expects EOF or errors, so we don't collect inPayload. - err = recv(a.p, cs.codec, a.s, a.dc, m, *cs.callInfo.maxReceiveMessageSize, nil, a.decomp) - if err == nil { - return toRPCErr(errors.New("grpc: client streaming protocol violation: get , want ")) - } - if err == io.EOF { - return a.s.Status().Err() // non-server streaming Recv returns nil on success - } - return toRPCErr(err) -} - -func (a *csAttempt) finish(err error) { - a.mu.Lock() - if a.finished { - a.mu.Unlock() - return - } - a.finished = true - if err == io.EOF { - // Ending a stream with EOF indicates a success. - err = nil - } - if a.s != nil { - a.t.CloseStream(a.s, err) - } - - if a.done != nil { - br := false - var tr metadata.MD - if a.s != nil { - br = a.s.BytesReceived() - tr = a.s.Trailer() - } - a.done(balancer.DoneInfo{ - Err: err, - Trailer: tr, - BytesSent: a.s != nil, - BytesReceived: br, - }) - } - if a.statsHandler != nil { - end := &stats.End{ - Client: true, - BeginTime: a.cs.beginTime, - EndTime: time.Now(), - Error: err, - } - a.statsHandler.HandleRPC(a.cs.ctx, end) - } - if a.trInfo.tr != nil { - if err == nil { - a.trInfo.tr.LazyPrintf("RPC: [OK]") - } else { - a.trInfo.tr.LazyPrintf("RPC: [%v]", err) - a.trInfo.tr.SetError() - } - a.trInfo.tr.Finish() - a.trInfo.tr = nil - } - a.mu.Unlock() -} - -func (ac *addrConn) newClientStream(ctx context.Context, desc *StreamDesc, method string, t transport.ClientTransport, opts ...CallOption) (_ ClientStream, err error) { - ac.mu.Lock() - if ac.transport != t { - ac.mu.Unlock() - return nil, status.Error(codes.Canceled, "the provided transport is no longer valid to use") - } - // transition to CONNECTING state when an attempt starts - if ac.state != connectivity.Connecting { - ac.updateConnectivityState(connectivity.Connecting) - ac.cc.handleSubConnStateChange(ac.acbw, ac.state) - } - ac.mu.Unlock() - - if t == nil { - // TODO: return RPC error here? - return nil, errors.New("transport provided is nil") - } - // defaultCallInfo contains unnecessary info(i.e. failfast, maxRetryRPCBufferSize), so we just initialize an empty struct. - c := &callInfo{} - - for _, o := range opts { - if err := o.before(c); err != nil { - return nil, toRPCErr(err) - } - } - c.maxReceiveMessageSize = getMaxSize(nil, c.maxReceiveMessageSize, defaultClientMaxReceiveMessageSize) - c.maxSendMessageSize = getMaxSize(nil, c.maxSendMessageSize, defaultServerMaxSendMessageSize) - - // Possible context leak: - // The cancel function for the child context we create will only be called - // when RecvMsg returns a non-nil error, if the ClientConn is closed, or if - // an error is generated by SendMsg. - // https://github.com/grpc/grpc-go/issues/1818. - ctx, cancel := context.WithCancel(ctx) - defer func() { - if err != nil { - cancel() - } - }() - - if err := setCallInfoCodec(c); err != nil { - return nil, err - } - - callHdr := &transport.CallHdr{ - Host: ac.cc.authority, - Method: method, - ContentSubtype: c.contentSubtype, - } - - // Set our outgoing compression according to the UseCompressor CallOption, if - // set. In that case, also find the compressor from the encoding package. - // Otherwise, use the compressor configured by the WithCompressor DialOption, - // if set. - var cp Compressor - var comp encoding.Compressor - if ct := c.compressorType; ct != "" { - callHdr.SendCompress = ct - if ct != encoding.Identity { - comp = encoding.GetCompressor(ct) - if comp == nil { - return nil, status.Errorf(codes.Internal, "grpc: Compressor is not installed for requested grpc-encoding %q", ct) - } - } - } else if ac.cc.dopts.cp != nil { - callHdr.SendCompress = ac.cc.dopts.cp.Type() - cp = ac.cc.dopts.cp - } - if c.creds != nil { - callHdr.Creds = c.creds - } - - as := &addrConnStream{ - callHdr: callHdr, - ac: ac, - ctx: ctx, - cancel: cancel, - opts: opts, - callInfo: c, - desc: desc, - codec: c.codec, - cp: cp, - comp: comp, - t: t, - } - - as.callInfo.stream = as - s, err := as.t.NewStream(as.ctx, as.callHdr) - if err != nil { - err = toRPCErr(err) - return nil, err - } - as.s = s - as.p = &parser{r: s} - ac.incrCallsStarted() - if desc != unaryStreamDesc { - // Listen on cc and stream contexts to cleanup when the user closes the - // ClientConn or cancels the stream context. In all other cases, an error - // should already be injected into the recv buffer by the transport, which - // the client will eventually receive, and then we will cancel the stream's - // context in clientStream.finish. - go func() { - select { - case <-ac.ctx.Done(): - as.finish(status.Error(codes.Canceled, "grpc: the SubConn is closing")) - case <-ctx.Done(): - as.finish(toRPCErr(ctx.Err())) - } - }() - } - return as, nil -} - -type addrConnStream struct { - s *transport.Stream - ac *addrConn - callHdr *transport.CallHdr - cancel context.CancelFunc - opts []CallOption - callInfo *callInfo - t transport.ClientTransport - ctx context.Context - sentLast bool - desc *StreamDesc - codec baseCodec - cp Compressor - comp encoding.Compressor - decompSet bool - dc Decompressor - decomp encoding.Compressor - p *parser - done func(balancer.DoneInfo) - mu sync.Mutex - finished bool -} - -func (as *addrConnStream) Header() (metadata.MD, error) { - m, err := as.s.Header() - if err != nil { - as.finish(toRPCErr(err)) - } - return m, err -} - -func (as *addrConnStream) Trailer() metadata.MD { - return as.s.Trailer() -} - -func (as *addrConnStream) CloseSend() error { - if as.sentLast { - // TODO: return an error and finish the stream instead, due to API misuse? - return nil - } - as.sentLast = true - - as.t.Write(as.s, nil, nil, &transport.Options{Last: true}) - // Always return nil; io.EOF is the only error that might make sense - // instead, but there is no need to signal the client to call RecvMsg - // as the only use left for the stream after CloseSend is to call - // RecvMsg. This also matches historical behavior. - return nil -} - -func (as *addrConnStream) Context() context.Context { - return as.s.Context() -} - -func (as *addrConnStream) SendMsg(m interface{}) (err error) { - defer func() { - if err != nil && err != io.EOF { - // Call finish on the client stream for errors generated by this SendMsg - // call, as these indicate problems created by this client. (Transport - // errors are converted to an io.EOF error in csAttempt.sendMsg; the real - // error will be returned from RecvMsg eventually in that case, or be - // retried.) - as.finish(err) - } - }() - if as.sentLast { - return status.Errorf(codes.Internal, "SendMsg called after CloseSend") - } - if !as.desc.ClientStreams { - as.sentLast = true - } - data, err := encode(as.codec, m) - if err != nil { - return err - } - compData, err := compress(data, as.cp, as.comp) - if err != nil { - return err - } - hdr, payld := msgHeader(data, compData) - // TODO(dfawley): should we be checking len(data) instead? - if len(payld) > *as.callInfo.maxSendMessageSize { - return status.Errorf(codes.ResourceExhausted, "trying to send message larger than max (%d vs. %d)", len(payld), *as.callInfo.maxSendMessageSize) - } - - if err := as.t.Write(as.s, hdr, payld, &transport.Options{Last: !as.desc.ClientStreams}); err != nil { - if !as.desc.ClientStreams { - // For non-client-streaming RPCs, we return nil instead of EOF on error - // because the generated code requires it. finish is not called; RecvMsg() - // will call it with the stream's status independently. - return nil - } - return io.EOF - } - - if channelz.IsOn() { - as.t.IncrMsgSent() - } - return nil -} - -func (as *addrConnStream) RecvMsg(m interface{}) (err error) { - defer func() { - if err != nil || !as.desc.ServerStreams { - // err != nil or non-server-streaming indicates end of stream. - as.finish(err) - } - }() - - if !as.decompSet { - // Block until we receive headers containing received message encoding. - if ct := as.s.RecvCompress(); ct != "" && ct != encoding.Identity { - if as.dc == nil || as.dc.Type() != ct { - // No configured decompressor, or it does not match the incoming - // message encoding; attempt to find a registered compressor that does. - as.dc = nil - as.decomp = encoding.GetCompressor(ct) - } - } else { - // No compression is used; disable our decompressor. - as.dc = nil - } - // Only initialize this state once per stream. - as.decompSet = true - } - err = recv(as.p, as.codec, as.s, as.dc, m, *as.callInfo.maxReceiveMessageSize, nil, as.decomp) - if err != nil { - if err == io.EOF { - if statusErr := as.s.Status().Err(); statusErr != nil { - return statusErr - } - return io.EOF // indicates successful end of stream. - } - return toRPCErr(err) - } - - if channelz.IsOn() { - as.t.IncrMsgRecv() - } - if as.desc.ServerStreams { - // Subsequent messages should be received by subsequent RecvMsg calls. - return nil - } - - // Special handling for non-server-stream rpcs. - // This recv expects EOF or errors, so we don't collect inPayload. - err = recv(as.p, as.codec, as.s, as.dc, m, *as.callInfo.maxReceiveMessageSize, nil, as.decomp) - if err == nil { - return toRPCErr(errors.New("grpc: client streaming protocol violation: get , want ")) - } - if err == io.EOF { - return as.s.Status().Err() // non-server streaming Recv returns nil on success - } - return toRPCErr(err) -} - -func (as *addrConnStream) finish(err error) { - as.mu.Lock() - if as.finished { - as.mu.Unlock() - return - } - as.finished = true - if err == io.EOF { - // Ending a stream with EOF indicates a success. - err = nil - } - if as.s != nil { - as.t.CloseStream(as.s, err) - } - - if err != nil { - as.ac.incrCallsFailed() - } else { - as.ac.incrCallsSucceeded() - } - as.cancel() - as.mu.Unlock() -} - -// ServerStream defines the server-side behavior of a streaming RPC. -// -// All errors returned from ServerStream methods are compatible with the -// status package. -type ServerStream interface { - // SetHeader sets the header metadata. It may be called multiple times. - // When call multiple times, all the provided metadata will be merged. - // All the metadata will be sent out when one of the following happens: - // - ServerStream.SendHeader() is called; - // - The first response is sent out; - // - An RPC status is sent out (error or success). - SetHeader(metadata.MD) error - // SendHeader sends the header metadata. - // The provided md and headers set by SetHeader() will be sent. - // It fails if called multiple times. - SendHeader(metadata.MD) error - // SetTrailer sets the trailer metadata which will be sent with the RPC status. - // When called more than once, all the provided metadata will be merged. - SetTrailer(metadata.MD) - // Context returns the context for this stream. - Context() context.Context - // SendMsg sends a message. On error, SendMsg aborts the stream and the - // error is returned directly. - // - // SendMsg blocks until: - // - There is sufficient flow control to schedule m with the transport, or - // - The stream is done, or - // - The stream breaks. - // - // SendMsg does not wait until the message is received by the client. An - // untimely stream closure may result in lost messages. - // - // It is safe to have a goroutine calling SendMsg and another goroutine - // calling RecvMsg on the same stream at the same time, but it is not safe - // to call SendMsg on the same stream in different goroutines. - SendMsg(m interface{}) error - // RecvMsg blocks until it receives a message into m or the stream is - // done. It returns io.EOF when the client has performed a CloseSend. On - // any non-EOF error, the stream is aborted and the error contains the - // RPC status. - // - // It is safe to have a goroutine calling SendMsg and another goroutine - // calling RecvMsg on the same stream at the same time, but it is not - // safe to call RecvMsg on the same stream in different goroutines. - RecvMsg(m interface{}) error -} - -// serverStream implements a server side Stream. -type serverStream struct { - ctx context.Context - t transport.ServerTransport - s *transport.Stream - p *parser - codec baseCodec - - cp Compressor - dc Decompressor - comp encoding.Compressor - decomp encoding.Compressor - - maxReceiveMessageSize int - maxSendMessageSize int - trInfo *traceInfo - - statsHandler stats.Handler - - binlog *binarylog.MethodLogger - // serverHeaderBinlogged indicates whether server header has been logged. It - // will happen when one of the following two happens: stream.SendHeader(), - // stream.Send(). - // - // It's only checked in send and sendHeader, doesn't need to be - // synchronized. - serverHeaderBinlogged bool - - mu sync.Mutex // protects trInfo.tr after the service handler runs. -} - -func (ss *serverStream) Context() context.Context { - return ss.ctx -} - -func (ss *serverStream) SetHeader(md metadata.MD) error { - if md.Len() == 0 { - return nil - } - return ss.s.SetHeader(md) -} - -func (ss *serverStream) SendHeader(md metadata.MD) error { - err := ss.t.WriteHeader(ss.s, md) - if ss.binlog != nil && !ss.serverHeaderBinlogged { - h, _ := ss.s.Header() - ss.binlog.Log(&binarylog.ServerHeader{ - Header: h, - }) - ss.serverHeaderBinlogged = true - } - return err -} - -func (ss *serverStream) SetTrailer(md metadata.MD) { - if md.Len() == 0 { - return - } - ss.s.SetTrailer(md) -} - -func (ss *serverStream) SendMsg(m interface{}) (err error) { - defer func() { - if ss.trInfo != nil { - ss.mu.Lock() - if ss.trInfo.tr != nil { - if err == nil { - ss.trInfo.tr.LazyLog(&payload{sent: true, msg: m}, true) - } else { - ss.trInfo.tr.LazyLog(&fmtStringer{"%v", []interface{}{err}}, true) - ss.trInfo.tr.SetError() - } - } - ss.mu.Unlock() - } - if err != nil && err != io.EOF { - st, _ := status.FromError(toRPCErr(err)) - ss.t.WriteStatus(ss.s, st) - // Non-user specified status was sent out. This should be an error - // case (as a server side Cancel maybe). - // - // This is not handled specifically now. User will return a final - // status from the service handler, we will log that error instead. - // This behavior is similar to an interceptor. - } - if channelz.IsOn() && err == nil { - ss.t.IncrMsgSent() - } - }() - data, err := encode(ss.codec, m) - if err != nil { - return err - } - compData, err := compress(data, ss.cp, ss.comp) - if err != nil { - return err - } - hdr, payload := msgHeader(data, compData) - // TODO(dfawley): should we be checking len(data) instead? - if len(payload) > ss.maxSendMessageSize { - return status.Errorf(codes.ResourceExhausted, "trying to send message larger than max (%d vs. %d)", len(payload), ss.maxSendMessageSize) - } - if err := ss.t.Write(ss.s, hdr, payload, &transport.Options{Last: false}); err != nil { - return toRPCErr(err) - } - if ss.binlog != nil { - if !ss.serverHeaderBinlogged { - h, _ := ss.s.Header() - ss.binlog.Log(&binarylog.ServerHeader{ - Header: h, - }) - ss.serverHeaderBinlogged = true - } - ss.binlog.Log(&binarylog.ServerMessage{ - Message: data, - }) - } - if ss.statsHandler != nil { - ss.statsHandler.HandleRPC(ss.s.Context(), outPayload(false, m, data, payload, time.Now())) - } - return nil -} - -func (ss *serverStream) RecvMsg(m interface{}) (err error) { - defer func() { - if ss.trInfo != nil { - ss.mu.Lock() - if ss.trInfo.tr != nil { - if err == nil { - ss.trInfo.tr.LazyLog(&payload{sent: false, msg: m}, true) - } else if err != io.EOF { - ss.trInfo.tr.LazyLog(&fmtStringer{"%v", []interface{}{err}}, true) - ss.trInfo.tr.SetError() - } - } - ss.mu.Unlock() - } - if err != nil && err != io.EOF { - st, _ := status.FromError(toRPCErr(err)) - ss.t.WriteStatus(ss.s, st) - // Non-user specified status was sent out. This should be an error - // case (as a server side Cancel maybe). - // - // This is not handled specifically now. User will return a final - // status from the service handler, we will log that error instead. - // This behavior is similar to an interceptor. - } - if channelz.IsOn() && err == nil { - ss.t.IncrMsgRecv() - } - }() - var payInfo *payloadInfo - if ss.statsHandler != nil || ss.binlog != nil { - payInfo = &payloadInfo{} - } - if err := recv(ss.p, ss.codec, ss.s, ss.dc, m, ss.maxReceiveMessageSize, payInfo, ss.decomp); err != nil { - if err == io.EOF { - if ss.binlog != nil { - ss.binlog.Log(&binarylog.ClientHalfClose{}) - } - return err - } - if err == io.ErrUnexpectedEOF { - err = status.Errorf(codes.Internal, io.ErrUnexpectedEOF.Error()) - } - return toRPCErr(err) - } - if ss.statsHandler != nil { - ss.statsHandler.HandleRPC(ss.s.Context(), &stats.InPayload{ - RecvTime: time.Now(), - Payload: m, - // TODO truncate large payload. - Data: payInfo.uncompressedBytes, - Length: len(payInfo.uncompressedBytes), - }) - } - if ss.binlog != nil { - ss.binlog.Log(&binarylog.ClientMessage{ - Message: payInfo.uncompressedBytes, - }) - } - return nil -} - -// MethodFromServerStream returns the method string for the input stream. -// The returned string is in the format of "/service/method". -func MethodFromServerStream(stream ServerStream) (string, bool) { - return Method(stream.Context()) -} diff --git a/vendor/google.golang.org/grpc/tap/tap.go b/vendor/google.golang.org/grpc/tap/tap.go deleted file mode 100644 index 584360f681..0000000000 --- a/vendor/google.golang.org/grpc/tap/tap.go +++ /dev/null @@ -1,51 +0,0 @@ -/* - * - * Copyright 2016 gRPC authors. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ - -// Package tap defines the function handles which are executed on the transport -// layer of gRPC-Go and related information. Everything here is EXPERIMENTAL. -package tap - -import ( - "context" -) - -// Info defines the relevant information needed by the handles. -type Info struct { - // FullMethodName is the string of grpc method (in the format of - // /package.service/method). - FullMethodName string - // TODO: More to be added. -} - -// ServerInHandle defines the function which runs before a new stream is created -// on the server side. If it returns a non-nil error, the stream will not be -// created and a RST_STREAM will be sent back to the client with REFUSED_STREAM. -// The client will receive an RPC error "code = Unavailable, desc = stream -// terminated by RST_STREAM with error code: REFUSED_STREAM". -// -// It's intended to be used in situations where you don't want to waste the -// resources to accept the new stream (e.g. rate-limiting). And the content of -// the error will be ignored and won't be sent back to the client. For other -// general usages, please use interceptors. -// -// Note that it is executed in the per-connection I/O goroutine(s) instead of -// per-RPC goroutine. Therefore, users should NOT have any -// blocking/time-consuming work in this handle. Otherwise all the RPCs would -// slow down. Also, for the same reason, this handle won't be called -// concurrently by gRPC. -type ServerInHandle func(ctx context.Context, info *Info) (context.Context, error) diff --git a/vendor/google.golang.org/grpc/trace.go b/vendor/google.golang.org/grpc/trace.go deleted file mode 100644 index c1c96dedcb..0000000000 --- a/vendor/google.golang.org/grpc/trace.go +++ /dev/null @@ -1,113 +0,0 @@ -/* - * - * Copyright 2015 gRPC authors. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ - -package grpc - -import ( - "bytes" - "fmt" - "io" - "net" - "strings" - "time" - - "golang.org/x/net/trace" -) - -// EnableTracing controls whether to trace RPCs using the golang.org/x/net/trace package. -// This should only be set before any RPCs are sent or received by this program. -var EnableTracing bool - -// methodFamily returns the trace family for the given method. -// It turns "/pkg.Service/GetFoo" into "pkg.Service". -func methodFamily(m string) string { - m = strings.TrimPrefix(m, "/") // remove leading slash - if i := strings.Index(m, "/"); i >= 0 { - m = m[:i] // remove everything from second slash - } - if i := strings.LastIndex(m, "."); i >= 0 { - m = m[i+1:] // cut down to last dotted component - } - return m -} - -// traceInfo contains tracing information for an RPC. -type traceInfo struct { - tr trace.Trace - firstLine firstLine -} - -// firstLine is the first line of an RPC trace. -type firstLine struct { - client bool // whether this is a client (outgoing) RPC - remoteAddr net.Addr - deadline time.Duration // may be zero -} - -func (f *firstLine) String() string { - var line bytes.Buffer - io.WriteString(&line, "RPC: ") - if f.client { - io.WriteString(&line, "to") - } else { - io.WriteString(&line, "from") - } - fmt.Fprintf(&line, " %v deadline:", f.remoteAddr) - if f.deadline != 0 { - fmt.Fprint(&line, f.deadline) - } else { - io.WriteString(&line, "none") - } - return line.String() -} - -const truncateSize = 100 - -func truncate(x string, l int) string { - if l > len(x) { - return x - } - return x[:l] -} - -// payload represents an RPC request or response payload. -type payload struct { - sent bool // whether this is an outgoing payload - msg interface{} // e.g. a proto.Message - // TODO(dsymonds): add stringifying info to codec, and limit how much we hold here? -} - -func (p payload) String() string { - if p.sent { - return truncate(fmt.Sprintf("sent: %v", p.msg), truncateSize) - } - return truncate(fmt.Sprintf("recv: %v", p.msg), truncateSize) -} - -type fmtStringer struct { - format string - a []interface{} -} - -func (f *fmtStringer) String() string { - return fmt.Sprintf(f.format, f.a...) -} - -type stringer string - -func (s stringer) String() string { return string(s) } diff --git a/vendor/google.golang.org/grpc/version.go b/vendor/google.golang.org/grpc/version.go deleted file mode 100644 index 45eace590f..0000000000 --- a/vendor/google.golang.org/grpc/version.go +++ /dev/null @@ -1,22 +0,0 @@ -/* - * - * Copyright 2018 gRPC authors. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ - -package grpc - -// Version is the current grpc version. -const Version = "1.18.0" diff --git a/vendor/google.golang.org/grpc/vet.sh b/vendor/google.golang.org/grpc/vet.sh deleted file mode 100644 index 94a50640b5..0000000000 --- a/vendor/google.golang.org/grpc/vet.sh +++ /dev/null @@ -1,141 +0,0 @@ -#!/bin/bash - -if [[ `uname -a` = *"Darwin"* ]]; then - echo "It seems you are running on Mac. This script does not work on Mac. See https://github.com/grpc/grpc-go/issues/2047" - exit 1 -fi - -set -ex # Exit on error; debugging enabled. -set -o pipefail # Fail a pipe if any sub-command fails. - -die() { - echo "$@" >&2 - exit 1 -} - -# Check to make sure it's safe to modify the user's git repo. -if git status --porcelain | read; then - die "Uncommitted or untracked files found; commit changes first" -fi - -if [[ -d "${GOPATH}/src" ]]; then - die "\${GOPATH}/src (${GOPATH}/src) exists; this script will delete it." -fi - -# Undo any edits made by this script. -cleanup() { - rm -rf "${GOPATH}/src" - git reset --hard HEAD -} -trap cleanup EXIT - -fail_on_output() { - tee /dev/stderr | (! read) -} - -PATH="${GOPATH}/bin:${GOROOT}/bin:${PATH}" - -if [[ "$1" = "-install" ]]; then - # Check for module support - if go help mod >& /dev/null; then - go install \ - golang.org/x/lint/golint \ - golang.org/x/tools/cmd/goimports \ - honnef.co/go/tools/cmd/staticcheck \ - github.com/client9/misspell/cmd/misspell \ - github.com/golang/protobuf/protoc-gen-go - else - # Ye olde `go get` incantation. - # Note: this gets the latest version of all tools (vs. the pinned versions - # with Go modules). - go get -u \ - golang.org/x/lint/golint \ - golang.org/x/tools/cmd/goimports \ - honnef.co/go/tools/cmd/staticcheck \ - github.com/client9/misspell/cmd/misspell \ - github.com/golang/protobuf/protoc-gen-go - fi - if [[ -z "${VET_SKIP_PROTO}" ]]; then - if [[ "${TRAVIS}" = "true" ]]; then - PROTOBUF_VERSION=3.3.0 - PROTOC_FILENAME=protoc-${PROTOBUF_VERSION}-linux-x86_64.zip - pushd /home/travis - wget https://github.com/google/protobuf/releases/download/v${PROTOBUF_VERSION}/${PROTOC_FILENAME} - unzip ${PROTOC_FILENAME} - bin/protoc --version - popd - elif ! which protoc > /dev/null; then - die "Please install protoc into your path" - fi - fi - exit 0 -elif [[ "$#" -ne 0 ]]; then - die "Unknown argument(s): $*" -fi - -# - Ensure all source files contain a copyright message. -git ls-files "*.go" | xargs grep -L "\(Copyright [0-9]\{4,\} gRPC authors\)\|DO NOT EDIT" 2>&1 | fail_on_output - -# - Make sure all tests in grpc and grpc/test use leakcheck via Teardown. -(! grep 'func Test[^(]' *_test.go) -(! grep 'func Test[^(]' test/*.go) - -# - Do not import math/rand for real library code. Use internal/grpcrand for -# thread safety. -git ls-files "*.go" | xargs grep -l '"math/rand"' 2>&1 | (! grep -v '^examples\|^stress\|grpcrand') - -# - Ensure all ptypes proto packages are renamed when importing. -git ls-files "*.go" | (! xargs grep "\(import \|^\s*\)\"github.com/golang/protobuf/ptypes/") - -# - Check imports that are illegal in appengine (until Go 1.11). -# TODO: Remove when we drop Go 1.10 support -go list -f {{.Dir}} ./... | xargs go run test/go_vet/vet.go - -# - gofmt, goimports, golint (with exceptions for generated code), go vet. -gofmt -s -d -l . 2>&1 | fail_on_output -goimports -l . 2>&1 | fail_on_output -golint ./... 2>&1 | (! grep -vE "(_mock|\.pb)\.go:") -go tool vet -all . - -# - Check that generated proto files are up to date. -if [[ -z "${VET_SKIP_PROTO}" ]]; then - PATH="/home/travis/bin:${PATH}" make proto && \ - git status --porcelain 2>&1 | fail_on_output || \ - (git status; git --no-pager diff; exit 1) -fi - -# - Check that our module is tidy. -if go help mod >& /dev/null; then - go mod tidy && \ - git status --porcelain 2>&1 | fail_on_output || \ - (git status; git --no-pager diff; exit 1) -fi - -# - Collection of static analysis checks -### HACK HACK HACK: Remove once staticcheck works with modules. -# Make a symlink in ${GOPATH}/src to its ${GOPATH}/pkg/mod equivalent for every package we use. -for x in $(find "${GOPATH}/pkg/mod" -name '*@*' | grep -v \/mod\/cache\/); do - pkg="$(echo ${x#"${GOPATH}/pkg/mod/"} | cut -f1 -d@)"; - # If multiple versions exist, just use the existing one. - if [[ -L "${GOPATH}/src/${pkg}" ]]; then continue; fi - mkdir -p "$(dirname "${GOPATH}/src/${pkg}")"; - ln -s $x "${GOPATH}/src/${pkg}"; -done -### END HACK HACK HACK - -# TODO(menghanl): fix errors in transport_test. -staticcheck -go 1.9 -ignore ' -balancer.go:SA1019 -balancer_test.go:SA1019 -clientconn_test.go:SA1019 -balancer/roundrobin/roundrobin_test.go:SA1019 -benchmark/benchmain/main.go:SA1019 -internal/transport/handler_server.go:SA1019 -internal/transport/handler_server_test.go:SA1019 -internal/transport/transport_test.go:SA2002 -stats/stats_test.go:SA1019 -test/channelz_test.go:SA1019 -test/end2end_test.go:SA1019 -test/healthcheck_test.go:SA1019 -' ./... -misspell -error . diff --git a/vendor/gopkg.in/fsnotify.v1/.editorconfig b/vendor/gopkg.in/fsnotify.v1/.editorconfig new file mode 100644 index 0000000000..ba49e3c234 --- /dev/null +++ b/vendor/gopkg.in/fsnotify.v1/.editorconfig @@ -0,0 +1,5 @@ +root = true + +[*] +indent_style = tab +indent_size = 4 diff --git a/vendor/gopkg.in/fsnotify.v1/.gitignore b/vendor/gopkg.in/fsnotify.v1/.gitignore new file mode 100644 index 0000000000..4cd0cbaf43 --- /dev/null +++ b/vendor/gopkg.in/fsnotify.v1/.gitignore @@ -0,0 +1,6 @@ +# Setup a Global .gitignore for OS and editor generated files: +# https://help.github.com/articles/ignoring-files +# git config --global core.excludesfile ~/.gitignore_global + +.vagrant +*.sublime-project diff --git a/vendor/gopkg.in/fsnotify.v1/.travis.yml b/vendor/gopkg.in/fsnotify.v1/.travis.yml new file mode 100644 index 0000000000..981d1bb813 --- /dev/null +++ b/vendor/gopkg.in/fsnotify.v1/.travis.yml @@ -0,0 +1,30 @@ +sudo: false +language: go + +go: + - 1.8.x + - 1.9.x + - tip + +matrix: + allow_failures: + - go: tip + fast_finish: true + +before_script: + - go get -u github.com/golang/lint/golint + +script: + - go test -v --race ./... + +after_script: + - test -z "$(gofmt -s -l -w . | tee /dev/stderr)" + - test -z "$(golint ./... | tee /dev/stderr)" + - go vet ./... + +os: + - linux + - osx + +notifications: + email: false diff --git a/vendor/gopkg.in/fsnotify.v1/AUTHORS b/vendor/gopkg.in/fsnotify.v1/AUTHORS new file mode 100644 index 0000000000..5ab5d41c54 --- /dev/null +++ b/vendor/gopkg.in/fsnotify.v1/AUTHORS @@ -0,0 +1,52 @@ +# Names should be added to this file as +# Name or Organization +# The email address is not required for organizations. + +# You can update this list using the following command: +# +# $ git shortlog -se | awk '{print $2 " " $3 " " $4}' + +# Please keep the list sorted. + +Aaron L +Adrien Bustany +Amit Krishnan +Anmol Sethi +Bjørn Erik Pedersen +Bruno Bigras +Caleb Spare +Case Nelson +Chris Howey +Christoffer Buchholz +Daniel Wagner-Hall +Dave Cheney +Evan Phoenix +Francisco Souza +Hari haran +John C Barstow +Kelvin Fo +Ken-ichirou MATSUZAWA +Matt Layher +Nathan Youngman +Nickolai Zeldovich +Patrick +Paul Hammond +Pawel Knap +Pieter Droogendijk +Pursuit92 +Riku Voipio +Rob Figueiredo +Rodrigo Chiossi +Slawek Ligus +Soge Zhang +Tiffany Jernigan +Tilak Sharma +Tom Payne +Travis Cline +Tudor Golubenco +Vahe Khachikyan +Yukang +bronze1man +debrando +henrikedwards +铁哥 diff --git a/vendor/gopkg.in/fsnotify.v1/CHANGELOG.md b/vendor/gopkg.in/fsnotify.v1/CHANGELOG.md new file mode 100644 index 0000000000..be4d7ea2c1 --- /dev/null +++ b/vendor/gopkg.in/fsnotify.v1/CHANGELOG.md @@ -0,0 +1,317 @@ +# Changelog + +## v1.4.7 / 2018-01-09 + +* BSD/macOS: Fix possible deadlock on closing the watcher on kqueue (thanks @nhooyr and @glycerine) +* Tests: Fix missing verb on format string (thanks @rchiossi) +* Linux: Fix deadlock in Remove (thanks @aarondl) +* Linux: Watch.Add improvements (avoid race, fix consistency, reduce garbage) (thanks @twpayne) +* Docs: Moved FAQ into the README (thanks @vahe) +* Linux: Properly handle inotify's IN_Q_OVERFLOW event (thanks @zeldovich) +* Docs: replace references to OS X with macOS + +## v1.4.2 / 2016-10-10 + +* Linux: use InotifyInit1 with IN_CLOEXEC to stop leaking a file descriptor to a child process when using fork/exec [#178](https://github.com/fsnotify/fsnotify/pull/178) (thanks @pattyshack) + +## v1.4.1 / 2016-10-04 + +* Fix flaky inotify stress test on Linux [#177](https://github.com/fsnotify/fsnotify/pull/177) (thanks @pattyshack) + +## v1.4.0 / 2016-10-01 + +* add a String() method to Event.Op [#165](https://github.com/fsnotify/fsnotify/pull/165) (thanks @oozie) + +## v1.3.1 / 2016-06-28 + +* Windows: fix for double backslash when watching the root of a drive [#151](https://github.com/fsnotify/fsnotify/issues/151) (thanks @brunoqc) + +## v1.3.0 / 2016-04-19 + +* Support linux/arm64 by [patching](https://go-review.googlesource.com/#/c/21971/) x/sys/unix and switching to to it from syscall (thanks @suihkulokki) [#135](https://github.com/fsnotify/fsnotify/pull/135) + +## v1.2.10 / 2016-03-02 + +* Fix golint errors in windows.go [#121](https://github.com/fsnotify/fsnotify/pull/121) (thanks @tiffanyfj) + +## v1.2.9 / 2016-01-13 + +kqueue: Fix logic for CREATE after REMOVE [#111](https://github.com/fsnotify/fsnotify/pull/111) (thanks @bep) + +## v1.2.8 / 2015-12-17 + +* kqueue: fix race condition in Close [#105](https://github.com/fsnotify/fsnotify/pull/105) (thanks @djui for reporting the issue and @ppknap for writing a failing test) +* inotify: fix race in test +* enable race detection for continuous integration (Linux, Mac, Windows) + +## v1.2.5 / 2015-10-17 + +* inotify: use epoll_create1 for arm64 support (requires Linux 2.6.27 or later) [#100](https://github.com/fsnotify/fsnotify/pull/100) (thanks @suihkulokki) +* inotify: fix path leaks [#73](https://github.com/fsnotify/fsnotify/pull/73) (thanks @chamaken) +* kqueue: watch for rename events on subdirectories [#83](https://github.com/fsnotify/fsnotify/pull/83) (thanks @guotie) +* kqueue: avoid infinite loops from symlinks cycles [#101](https://github.com/fsnotify/fsnotify/pull/101) (thanks @illicitonion) + +## v1.2.1 / 2015-10-14 + +* kqueue: don't watch named pipes [#98](https://github.com/fsnotify/fsnotify/pull/98) (thanks @evanphx) + +## v1.2.0 / 2015-02-08 + +* inotify: use epoll to wake up readEvents [#66](https://github.com/fsnotify/fsnotify/pull/66) (thanks @PieterD) +* inotify: closing watcher should now always shut down goroutine [#63](https://github.com/fsnotify/fsnotify/pull/63) (thanks @PieterD) +* kqueue: close kqueue after removing watches, fixes [#59](https://github.com/fsnotify/fsnotify/issues/59) + +## v1.1.1 / 2015-02-05 + +* inotify: Retry read on EINTR [#61](https://github.com/fsnotify/fsnotify/issues/61) (thanks @PieterD) + +## v1.1.0 / 2014-12-12 + +* kqueue: rework internals [#43](https://github.com/fsnotify/fsnotify/pull/43) + * add low-level functions + * only need to store flags on directories + * less mutexes [#13](https://github.com/fsnotify/fsnotify/issues/13) + * done can be an unbuffered channel + * remove calls to os.NewSyscallError +* More efficient string concatenation for Event.String() [#52](https://github.com/fsnotify/fsnotify/pull/52) (thanks @mdlayher) +* kqueue: fix regression in rework causing subdirectories to be watched [#48](https://github.com/fsnotify/fsnotify/issues/48) +* kqueue: cleanup internal watch before sending remove event [#51](https://github.com/fsnotify/fsnotify/issues/51) + +## v1.0.4 / 2014-09-07 + +* kqueue: add dragonfly to the build tags. +* Rename source code files, rearrange code so exported APIs are at the top. +* Add done channel to example code. [#37](https://github.com/fsnotify/fsnotify/pull/37) (thanks @chenyukang) + +## v1.0.3 / 2014-08-19 + +* [Fix] Windows MOVED_TO now translates to Create like on BSD and Linux. [#36](https://github.com/fsnotify/fsnotify/issues/36) + +## v1.0.2 / 2014-08-17 + +* [Fix] Missing create events on macOS. [#14](https://github.com/fsnotify/fsnotify/issues/14) (thanks @zhsso) +* [Fix] Make ./path and path equivalent. (thanks @zhsso) + +## v1.0.0 / 2014-08-15 + +* [API] Remove AddWatch on Windows, use Add. +* Improve documentation for exported identifiers. [#30](https://github.com/fsnotify/fsnotify/issues/30) +* Minor updates based on feedback from golint. + +## dev / 2014-07-09 + +* Moved to [github.com/fsnotify/fsnotify](https://github.com/fsnotify/fsnotify). +* Use os.NewSyscallError instead of returning errno (thanks @hariharan-uno) + +## dev / 2014-07-04 + +* kqueue: fix incorrect mutex used in Close() +* Update example to demonstrate usage of Op. + +## dev / 2014-06-28 + +* [API] Don't set the Write Op for attribute notifications [#4](https://github.com/fsnotify/fsnotify/issues/4) +* Fix for String() method on Event (thanks Alex Brainman) +* Don't build on Plan 9 or Solaris (thanks @4ad) + +## dev / 2014-06-21 + +* Events channel of type Event rather than *Event. +* [internal] use syscall constants directly for inotify and kqueue. +* [internal] kqueue: rename events to kevents and fileEvent to event. + +## dev / 2014-06-19 + +* Go 1.3+ required on Windows (uses syscall.ERROR_MORE_DATA internally). +* [internal] remove cookie from Event struct (unused). +* [internal] Event struct has the same definition across every OS. +* [internal] remove internal watch and removeWatch methods. + +## dev / 2014-06-12 + +* [API] Renamed Watch() to Add() and RemoveWatch() to Remove(). +* [API] Pluralized channel names: Events and Errors. +* [API] Renamed FileEvent struct to Event. +* [API] Op constants replace methods like IsCreate(). + +## dev / 2014-06-12 + +* Fix data race on kevent buffer (thanks @tilaks) [#98](https://github.com/howeyc/fsnotify/pull/98) + +## dev / 2014-05-23 + +* [API] Remove current implementation of WatchFlags. + * current implementation doesn't take advantage of OS for efficiency + * provides little benefit over filtering events as they are received, but has extra bookkeeping and mutexes + * no tests for the current implementation + * not fully implemented on Windows [#93](https://github.com/howeyc/fsnotify/issues/93#issuecomment-39285195) + +## v0.9.3 / 2014-12-31 + +* kqueue: cleanup internal watch before sending remove event [#51](https://github.com/fsnotify/fsnotify/issues/51) + +## v0.9.2 / 2014-08-17 + +* [Backport] Fix missing create events on macOS. [#14](https://github.com/fsnotify/fsnotify/issues/14) (thanks @zhsso) + +## v0.9.1 / 2014-06-12 + +* Fix data race on kevent buffer (thanks @tilaks) [#98](https://github.com/howeyc/fsnotify/pull/98) + +## v0.9.0 / 2014-01-17 + +* IsAttrib() for events that only concern a file's metadata [#79][] (thanks @abustany) +* [Fix] kqueue: fix deadlock [#77][] (thanks @cespare) +* [NOTICE] Development has moved to `code.google.com/p/go.exp/fsnotify` in preparation for inclusion in the Go standard library. + +## v0.8.12 / 2013-11-13 + +* [API] Remove FD_SET and friends from Linux adapter + +## v0.8.11 / 2013-11-02 + +* [Doc] Add Changelog [#72][] (thanks @nathany) +* [Doc] Spotlight and double modify events on macOS [#62][] (reported by @paulhammond) + +## v0.8.10 / 2013-10-19 + +* [Fix] kqueue: remove file watches when parent directory is removed [#71][] (reported by @mdwhatcott) +* [Fix] kqueue: race between Close and readEvents [#70][] (reported by @bernerdschaefer) +* [Doc] specify OS-specific limits in README (thanks @debrando) + +## v0.8.9 / 2013-09-08 + +* [Doc] Contributing (thanks @nathany) +* [Doc] update package path in example code [#63][] (thanks @paulhammond) +* [Doc] GoCI badge in README (Linux only) [#60][] +* [Doc] Cross-platform testing with Vagrant [#59][] (thanks @nathany) + +## v0.8.8 / 2013-06-17 + +* [Fix] Windows: handle `ERROR_MORE_DATA` on Windows [#49][] (thanks @jbowtie) + +## v0.8.7 / 2013-06-03 + +* [API] Make syscall flags internal +* [Fix] inotify: ignore event changes +* [Fix] race in symlink test [#45][] (reported by @srid) +* [Fix] tests on Windows +* lower case error messages + +## v0.8.6 / 2013-05-23 + +* kqueue: Use EVT_ONLY flag on Darwin +* [Doc] Update README with full example + +## v0.8.5 / 2013-05-09 + +* [Fix] inotify: allow monitoring of "broken" symlinks (thanks @tsg) + +## v0.8.4 / 2013-04-07 + +* [Fix] kqueue: watch all file events [#40][] (thanks @ChrisBuchholz) + +## v0.8.3 / 2013-03-13 + +* [Fix] inoitfy/kqueue memory leak [#36][] (reported by @nbkolchin) +* [Fix] kqueue: use fsnFlags for watching a directory [#33][] (reported by @nbkolchin) + +## v0.8.2 / 2013-02-07 + +* [Doc] add Authors +* [Fix] fix data races for map access [#29][] (thanks @fsouza) + +## v0.8.1 / 2013-01-09 + +* [Fix] Windows path separators +* [Doc] BSD License + +## v0.8.0 / 2012-11-09 + +* kqueue: directory watching improvements (thanks @vmirage) +* inotify: add `IN_MOVED_TO` [#25][] (requested by @cpisto) +* [Fix] kqueue: deleting watched directory [#24][] (reported by @jakerr) + +## v0.7.4 / 2012-10-09 + +* [Fix] inotify: fixes from https://codereview.appspot.com/5418045/ (ugorji) +* [Fix] kqueue: preserve watch flags when watching for delete [#21][] (reported by @robfig) +* [Fix] kqueue: watch the directory even if it isn't a new watch (thanks @robfig) +* [Fix] kqueue: modify after recreation of file + +## v0.7.3 / 2012-09-27 + +* [Fix] kqueue: watch with an existing folder inside the watched folder (thanks @vmirage) +* [Fix] kqueue: no longer get duplicate CREATE events + +## v0.7.2 / 2012-09-01 + +* kqueue: events for created directories + +## v0.7.1 / 2012-07-14 + +* [Fix] for renaming files + +## v0.7.0 / 2012-07-02 + +* [Feature] FSNotify flags +* [Fix] inotify: Added file name back to event path + +## v0.6.0 / 2012-06-06 + +* kqueue: watch files after directory created (thanks @tmc) + +## v0.5.1 / 2012-05-22 + +* [Fix] inotify: remove all watches before Close() + +## v0.5.0 / 2012-05-03 + +* [API] kqueue: return errors during watch instead of sending over channel +* kqueue: match symlink behavior on Linux +* inotify: add `DELETE_SELF` (requested by @taralx) +* [Fix] kqueue: handle EINTR (reported by @robfig) +* [Doc] Godoc example [#1][] (thanks @davecheney) + +## v0.4.0 / 2012-03-30 + +* Go 1 released: build with go tool +* [Feature] Windows support using winfsnotify +* Windows does not have attribute change notifications +* Roll attribute notifications into IsModify + +## v0.3.0 / 2012-02-19 + +* kqueue: add files when watch directory + +## v0.2.0 / 2011-12-30 + +* update to latest Go weekly code + +## v0.1.0 / 2011-10-19 + +* kqueue: add watch on file creation to match inotify +* kqueue: create file event +* inotify: ignore `IN_IGNORED` events +* event String() +* linux: common FileEvent functions +* initial commit + +[#79]: https://github.com/howeyc/fsnotify/pull/79 +[#77]: https://github.com/howeyc/fsnotify/pull/77 +[#72]: https://github.com/howeyc/fsnotify/issues/72 +[#71]: https://github.com/howeyc/fsnotify/issues/71 +[#70]: https://github.com/howeyc/fsnotify/issues/70 +[#63]: https://github.com/howeyc/fsnotify/issues/63 +[#62]: https://github.com/howeyc/fsnotify/issues/62 +[#60]: https://github.com/howeyc/fsnotify/issues/60 +[#59]: https://github.com/howeyc/fsnotify/issues/59 +[#49]: https://github.com/howeyc/fsnotify/issues/49 +[#45]: https://github.com/howeyc/fsnotify/issues/45 +[#40]: https://github.com/howeyc/fsnotify/issues/40 +[#36]: https://github.com/howeyc/fsnotify/issues/36 +[#33]: https://github.com/howeyc/fsnotify/issues/33 +[#29]: https://github.com/howeyc/fsnotify/issues/29 +[#25]: https://github.com/howeyc/fsnotify/issues/25 +[#24]: https://github.com/howeyc/fsnotify/issues/24 +[#21]: https://github.com/howeyc/fsnotify/issues/21 diff --git a/vendor/gopkg.in/fsnotify.v1/CONTRIBUTING.md b/vendor/gopkg.in/fsnotify.v1/CONTRIBUTING.md new file mode 100644 index 0000000000..828a60b24b --- /dev/null +++ b/vendor/gopkg.in/fsnotify.v1/CONTRIBUTING.md @@ -0,0 +1,77 @@ +# Contributing + +## Issues + +* Request features and report bugs using the [GitHub Issue Tracker](https://github.com/fsnotify/fsnotify/issues). +* Please indicate the platform you are using fsnotify on. +* A code example to reproduce the problem is appreciated. + +## Pull Requests + +### Contributor License Agreement + +fsnotify is derived from code in the [golang.org/x/exp](https://godoc.org/golang.org/x/exp) package and it may be included [in the standard library](https://github.com/fsnotify/fsnotify/issues/1) in the future. Therefore fsnotify carries the same [LICENSE](https://github.com/fsnotify/fsnotify/blob/master/LICENSE) as Go. Contributors retain their copyright, so you need to fill out a short form before we can accept your contribution: [Google Individual Contributor License Agreement](https://developers.google.com/open-source/cla/individual). + +Please indicate that you have signed the CLA in your pull request. + +### How fsnotify is Developed + +* Development is done on feature branches. +* Tests are run on BSD, Linux, macOS and Windows. +* Pull requests are reviewed and [applied to master][am] using [hub][]. + * Maintainers may modify or squash commits rather than asking contributors to. +* To issue a new release, the maintainers will: + * Update the CHANGELOG + * Tag a version, which will become available through gopkg.in. + +### How to Fork + +For smooth sailing, always use the original import path. Installing with `go get` makes this easy. + +1. Install from GitHub (`go get -u github.com/fsnotify/fsnotify`) +2. Create your feature branch (`git checkout -b my-new-feature`) +3. Ensure everything works and the tests pass (see below) +4. Commit your changes (`git commit -am 'Add some feature'`) + +Contribute upstream: + +1. Fork fsnotify on GitHub +2. Add your remote (`git remote add fork git@github.com:mycompany/repo.git`) +3. Push to the branch (`git push fork my-new-feature`) +4. Create a new Pull Request on GitHub + +This workflow is [thoroughly explained by Katrina Owen](https://splice.com/blog/contributing-open-source-git-repositories-go/). + +### Testing + +fsnotify uses build tags to compile different code on Linux, BSD, macOS, and Windows. + +Before doing a pull request, please do your best to test your changes on multiple platforms, and list which platforms you were able/unable to test on. + +To aid in cross-platform testing there is a Vagrantfile for Linux and BSD. + +* Install [Vagrant](http://www.vagrantup.com/) and [VirtualBox](https://www.virtualbox.org/) +* Setup [Vagrant Gopher](https://github.com/nathany/vagrant-gopher) in your `src` folder. +* Run `vagrant up` from the project folder. You can also setup just one box with `vagrant up linux` or `vagrant up bsd` (note: the BSD box doesn't support Windows hosts at this time, and NFS may prompt for your host OS password) +* Once setup, you can run the test suite on a given OS with a single command `vagrant ssh linux -c 'cd fsnotify/fsnotify; go test'`. +* When you're done, you will want to halt or destroy the Vagrant boxes. + +Notice: fsnotify file system events won't trigger in shared folders. The tests get around this limitation by using the /tmp directory. + +Right now there is no equivalent solution for Windows and macOS, but there are Windows VMs [freely available from Microsoft](http://www.modern.ie/en-us/virtualization-tools#downloads). + +### Maintainers + +Help maintaining fsnotify is welcome. To be a maintainer: + +* Submit a pull request and sign the CLA as above. +* You must be able to run the test suite on Mac, Windows, Linux and BSD. + +To keep master clean, the fsnotify project uses the "apply mail" workflow outlined in Nathaniel Talbott's post ["Merge pull request" Considered Harmful][am]. This requires installing [hub][]. + +All code changes should be internal pull requests. + +Releases are tagged using [Semantic Versioning](http://semver.org/). + +[hub]: https://github.com/github/hub +[am]: http://blog.spreedly.com/2014/06/24/merge-pull-request-considered-harmful/#.VGa5yZPF_Zs diff --git a/vendor/golang.org/x/sync/LICENSE b/vendor/gopkg.in/fsnotify.v1/LICENSE similarity index 92% rename from vendor/golang.org/x/sync/LICENSE rename to vendor/gopkg.in/fsnotify.v1/LICENSE index 6a66aea5ea..f21e540800 100644 --- a/vendor/golang.org/x/sync/LICENSE +++ b/vendor/gopkg.in/fsnotify.v1/LICENSE @@ -1,4 +1,5 @@ -Copyright (c) 2009 The Go Authors. All rights reserved. +Copyright (c) 2012 The Go Authors. All rights reserved. +Copyright (c) 2012 fsnotify Authors. All rights reserved. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are diff --git a/vendor/gopkg.in/fsnotify.v1/README.md b/vendor/gopkg.in/fsnotify.v1/README.md new file mode 100644 index 0000000000..3993207413 --- /dev/null +++ b/vendor/gopkg.in/fsnotify.v1/README.md @@ -0,0 +1,79 @@ +# File system notifications for Go + +[![GoDoc](https://godoc.org/github.com/fsnotify/fsnotify?status.svg)](https://godoc.org/github.com/fsnotify/fsnotify) [![Go Report Card](https://goreportcard.com/badge/github.com/fsnotify/fsnotify)](https://goreportcard.com/report/github.com/fsnotify/fsnotify) + +fsnotify utilizes [golang.org/x/sys](https://godoc.org/golang.org/x/sys) rather than `syscall` from the standard library. Ensure you have the latest version installed by running: + +```console +go get -u golang.org/x/sys/... +``` + +Cross platform: Windows, Linux, BSD and macOS. + +|Adapter |OS |Status | +|----------|----------|----------| +|inotify |Linux 2.6.27 or later, Android\*|Supported [![Build Status](https://travis-ci.org/fsnotify/fsnotify.svg?branch=master)](https://travis-ci.org/fsnotify/fsnotify)| +|kqueue |BSD, macOS, iOS\*|Supported [![Build Status](https://travis-ci.org/fsnotify/fsnotify.svg?branch=master)](https://travis-ci.org/fsnotify/fsnotify)| +|ReadDirectoryChangesW|Windows|Supported [![Build status](https://ci.appveyor.com/api/projects/status/ivwjubaih4r0udeh/branch/master?svg=true)](https://ci.appveyor.com/project/NathanYoungman/fsnotify/branch/master)| +|FSEvents |macOS |[Planned](https://github.com/fsnotify/fsnotify/issues/11)| +|FEN |Solaris 11 |[In Progress](https://github.com/fsnotify/fsnotify/issues/12)| +|fanotify |Linux 2.6.37+ | | +|USN Journals |Windows |[Maybe](https://github.com/fsnotify/fsnotify/issues/53)| +|Polling |*All* |[Maybe](https://github.com/fsnotify/fsnotify/issues/9)| + +\* Android and iOS are untested. + +Please see [the documentation](https://godoc.org/github.com/fsnotify/fsnotify) and consult the [FAQ](#faq) for usage information. + +## API stability + +fsnotify is a fork of [howeyc/fsnotify](https://godoc.org/github.com/howeyc/fsnotify) with a new API as of v1.0. The API is based on [this design document](http://goo.gl/MrYxyA). + +All [releases](https://github.com/fsnotify/fsnotify/releases) are tagged based on [Semantic Versioning](http://semver.org/). Further API changes are [planned](https://github.com/fsnotify/fsnotify/milestones), and will be tagged with a new major revision number. + +Go 1.6 supports dependencies located in the `vendor/` folder. Unless you are creating a library, it is recommended that you copy fsnotify into `vendor/github.com/fsnotify/fsnotify` within your project, and likewise for `golang.org/x/sys`. + +## Contributing + +Please refer to [CONTRIBUTING][] before opening an issue or pull request. + +## Example + +See [example_test.go](https://github.com/fsnotify/fsnotify/blob/master/example_test.go). + +## FAQ + +**When a file is moved to another directory is it still being watched?** + +No (it shouldn't be, unless you are watching where it was moved to). + +**When I watch a directory, are all subdirectories watched as well?** + +No, you must add watches for any directory you want to watch (a recursive watcher is on the roadmap [#18][]). + +**Do I have to watch the Error and Event channels in a separate goroutine?** + +As of now, yes. Looking into making this single-thread friendly (see [howeyc #7][#7]) + +**Why am I receiving multiple events for the same file on OS X?** + +Spotlight indexing on OS X can result in multiple events (see [howeyc #62][#62]). A temporary workaround is to add your folder(s) to the *Spotlight Privacy settings* until we have a native FSEvents implementation (see [#11][]). + +**How many files can be watched at once?** + +There are OS-specific limits as to how many watches can be created: +* Linux: /proc/sys/fs/inotify/max_user_watches contains the limit, reaching this limit results in a "no space left on device" error. +* BSD / OSX: sysctl variables "kern.maxfiles" and "kern.maxfilesperproc", reaching these limits results in a "too many open files" error. + +[#62]: https://github.com/howeyc/fsnotify/issues/62 +[#18]: https://github.com/fsnotify/fsnotify/issues/18 +[#11]: https://github.com/fsnotify/fsnotify/issues/11 +[#7]: https://github.com/howeyc/fsnotify/issues/7 + +[contributing]: https://github.com/fsnotify/fsnotify/blob/master/CONTRIBUTING.md + +## Related Projects + +* [notify](https://github.com/rjeczalik/notify) +* [fsevents](https://github.com/fsnotify/fsevents) + diff --git a/vendor/gopkg.in/fsnotify.v1/fen.go b/vendor/gopkg.in/fsnotify.v1/fen.go new file mode 100644 index 0000000000..ced39cb881 --- /dev/null +++ b/vendor/gopkg.in/fsnotify.v1/fen.go @@ -0,0 +1,37 @@ +// Copyright 2010 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build solaris + +package fsnotify + +import ( + "errors" +) + +// Watcher watches a set of files, delivering events to a channel. +type Watcher struct { + Events chan Event + Errors chan error +} + +// NewWatcher establishes a new watcher with the underlying OS and begins waiting for events. +func NewWatcher() (*Watcher, error) { + return nil, errors.New("FEN based watcher not yet supported for fsnotify\n") +} + +// Close removes all watches and closes the events channel. +func (w *Watcher) Close() error { + return nil +} + +// Add starts watching the named file or directory (non-recursively). +func (w *Watcher) Add(name string) error { + return nil +} + +// Remove stops watching the the named file or directory (non-recursively). +func (w *Watcher) Remove(name string) error { + return nil +} diff --git a/vendor/gopkg.in/fsnotify.v1/fsnotify.go b/vendor/gopkg.in/fsnotify.v1/fsnotify.go new file mode 100644 index 0000000000..190bf0de57 --- /dev/null +++ b/vendor/gopkg.in/fsnotify.v1/fsnotify.go @@ -0,0 +1,66 @@ +// Copyright 2012 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build !plan9 + +// Package fsnotify provides a platform-independent interface for file system notifications. +package fsnotify + +import ( + "bytes" + "errors" + "fmt" +) + +// Event represents a single file system notification. +type Event struct { + Name string // Relative path to the file or directory. + Op Op // File operation that triggered the event. +} + +// Op describes a set of file operations. +type Op uint32 + +// These are the generalized file operations that can trigger a notification. +const ( + Create Op = 1 << iota + Write + Remove + Rename + Chmod +) + +func (op Op) String() string { + // Use a buffer for efficient string concatenation + var buffer bytes.Buffer + + if op&Create == Create { + buffer.WriteString("|CREATE") + } + if op&Remove == Remove { + buffer.WriteString("|REMOVE") + } + if op&Write == Write { + buffer.WriteString("|WRITE") + } + if op&Rename == Rename { + buffer.WriteString("|RENAME") + } + if op&Chmod == Chmod { + buffer.WriteString("|CHMOD") + } + if buffer.Len() == 0 { + return "" + } + return buffer.String()[1:] // Strip leading pipe +} + +// String returns a string representation of the event in the form +// "file: REMOVE|WRITE|..." +func (e Event) String() string { + return fmt.Sprintf("%q: %s", e.Name, e.Op.String()) +} + +// Common errors that can be reported by a watcher +var ErrEventOverflow = errors.New("fsnotify queue overflow") diff --git a/vendor/gopkg.in/fsnotify.v1/inotify.go b/vendor/gopkg.in/fsnotify.v1/inotify.go new file mode 100644 index 0000000000..d9fd1b88a0 --- /dev/null +++ b/vendor/gopkg.in/fsnotify.v1/inotify.go @@ -0,0 +1,337 @@ +// Copyright 2010 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build linux + +package fsnotify + +import ( + "errors" + "fmt" + "io" + "os" + "path/filepath" + "strings" + "sync" + "unsafe" + + "golang.org/x/sys/unix" +) + +// Watcher watches a set of files, delivering events to a channel. +type Watcher struct { + Events chan Event + Errors chan error + mu sync.Mutex // Map access + fd int + poller *fdPoller + watches map[string]*watch // Map of inotify watches (key: path) + paths map[int]string // Map of watched paths (key: watch descriptor) + done chan struct{} // Channel for sending a "quit message" to the reader goroutine + doneResp chan struct{} // Channel to respond to Close +} + +// NewWatcher establishes a new watcher with the underlying OS and begins waiting for events. +func NewWatcher() (*Watcher, error) { + // Create inotify fd + fd, errno := unix.InotifyInit1(unix.IN_CLOEXEC) + if fd == -1 { + return nil, errno + } + // Create epoll + poller, err := newFdPoller(fd) + if err != nil { + unix.Close(fd) + return nil, err + } + w := &Watcher{ + fd: fd, + poller: poller, + watches: make(map[string]*watch), + paths: make(map[int]string), + Events: make(chan Event), + Errors: make(chan error), + done: make(chan struct{}), + doneResp: make(chan struct{}), + } + + go w.readEvents() + return w, nil +} + +func (w *Watcher) isClosed() bool { + select { + case <-w.done: + return true + default: + return false + } +} + +// Close removes all watches and closes the events channel. +func (w *Watcher) Close() error { + if w.isClosed() { + return nil + } + + // Send 'close' signal to goroutine, and set the Watcher to closed. + close(w.done) + + // Wake up goroutine + w.poller.wake() + + // Wait for goroutine to close + <-w.doneResp + + return nil +} + +// Add starts watching the named file or directory (non-recursively). +func (w *Watcher) Add(name string) error { + name = filepath.Clean(name) + if w.isClosed() { + return errors.New("inotify instance already closed") + } + + const agnosticEvents = unix.IN_MOVED_TO | unix.IN_MOVED_FROM | + unix.IN_CREATE | unix.IN_ATTRIB | unix.IN_MODIFY | + unix.IN_MOVE_SELF | unix.IN_DELETE | unix.IN_DELETE_SELF + + var flags uint32 = agnosticEvents + + w.mu.Lock() + defer w.mu.Unlock() + watchEntry := w.watches[name] + if watchEntry != nil { + flags |= watchEntry.flags | unix.IN_MASK_ADD + } + wd, errno := unix.InotifyAddWatch(w.fd, name, flags) + if wd == -1 { + return errno + } + + if watchEntry == nil { + w.watches[name] = &watch{wd: uint32(wd), flags: flags} + w.paths[wd] = name + } else { + watchEntry.wd = uint32(wd) + watchEntry.flags = flags + } + + return nil +} + +// Remove stops watching the named file or directory (non-recursively). +func (w *Watcher) Remove(name string) error { + name = filepath.Clean(name) + + // Fetch the watch. + w.mu.Lock() + defer w.mu.Unlock() + watch, ok := w.watches[name] + + // Remove it from inotify. + if !ok { + return fmt.Errorf("can't remove non-existent inotify watch for: %s", name) + } + + // We successfully removed the watch if InotifyRmWatch doesn't return an + // error, we need to clean up our internal state to ensure it matches + // inotify's kernel state. + delete(w.paths, int(watch.wd)) + delete(w.watches, name) + + // inotify_rm_watch will return EINVAL if the file has been deleted; + // the inotify will already have been removed. + // watches and pathes are deleted in ignoreLinux() implicitly and asynchronously + // by calling inotify_rm_watch() below. e.g. readEvents() goroutine receives IN_IGNORE + // so that EINVAL means that the wd is being rm_watch()ed or its file removed + // by another thread and we have not received IN_IGNORE event. + success, errno := unix.InotifyRmWatch(w.fd, watch.wd) + if success == -1 { + // TODO: Perhaps it's not helpful to return an error here in every case. + // the only two possible errors are: + // EBADF, which happens when w.fd is not a valid file descriptor of any kind. + // EINVAL, which is when fd is not an inotify descriptor or wd is not a valid watch descriptor. + // Watch descriptors are invalidated when they are removed explicitly or implicitly; + // explicitly by inotify_rm_watch, implicitly when the file they are watching is deleted. + return errno + } + + return nil +} + +type watch struct { + wd uint32 // Watch descriptor (as returned by the inotify_add_watch() syscall) + flags uint32 // inotify flags of this watch (see inotify(7) for the list of valid flags) +} + +// readEvents reads from the inotify file descriptor, converts the +// received events into Event objects and sends them via the Events channel +func (w *Watcher) readEvents() { + var ( + buf [unix.SizeofInotifyEvent * 4096]byte // Buffer for a maximum of 4096 raw events + n int // Number of bytes read with read() + errno error // Syscall errno + ok bool // For poller.wait + ) + + defer close(w.doneResp) + defer close(w.Errors) + defer close(w.Events) + defer unix.Close(w.fd) + defer w.poller.close() + + for { + // See if we have been closed. + if w.isClosed() { + return + } + + ok, errno = w.poller.wait() + if errno != nil { + select { + case w.Errors <- errno: + case <-w.done: + return + } + continue + } + + if !ok { + continue + } + + n, errno = unix.Read(w.fd, buf[:]) + // If a signal interrupted execution, see if we've been asked to close, and try again. + // http://man7.org/linux/man-pages/man7/signal.7.html : + // "Before Linux 3.8, reads from an inotify(7) file descriptor were not restartable" + if errno == unix.EINTR { + continue + } + + // unix.Read might have been woken up by Close. If so, we're done. + if w.isClosed() { + return + } + + if n < unix.SizeofInotifyEvent { + var err error + if n == 0 { + // If EOF is received. This should really never happen. + err = io.EOF + } else if n < 0 { + // If an error occurred while reading. + err = errno + } else { + // Read was too short. + err = errors.New("notify: short read in readEvents()") + } + select { + case w.Errors <- err: + case <-w.done: + return + } + continue + } + + var offset uint32 + // We don't know how many events we just read into the buffer + // While the offset points to at least one whole event... + for offset <= uint32(n-unix.SizeofInotifyEvent) { + // Point "raw" to the event in the buffer + raw := (*unix.InotifyEvent)(unsafe.Pointer(&buf[offset])) + + mask := uint32(raw.Mask) + nameLen := uint32(raw.Len) + + if mask&unix.IN_Q_OVERFLOW != 0 { + select { + case w.Errors <- ErrEventOverflow: + case <-w.done: + return + } + } + + // If the event happened to the watched directory or the watched file, the kernel + // doesn't append the filename to the event, but we would like to always fill the + // the "Name" field with a valid filename. We retrieve the path of the watch from + // the "paths" map. + w.mu.Lock() + name, ok := w.paths[int(raw.Wd)] + // IN_DELETE_SELF occurs when the file/directory being watched is removed. + // This is a sign to clean up the maps, otherwise we are no longer in sync + // with the inotify kernel state which has already deleted the watch + // automatically. + if ok && mask&unix.IN_DELETE_SELF == unix.IN_DELETE_SELF { + delete(w.paths, int(raw.Wd)) + delete(w.watches, name) + } + w.mu.Unlock() + + if nameLen > 0 { + // Point "bytes" at the first byte of the filename + bytes := (*[unix.PathMax]byte)(unsafe.Pointer(&buf[offset+unix.SizeofInotifyEvent])) + // The filename is padded with NULL bytes. TrimRight() gets rid of those. + name += "/" + strings.TrimRight(string(bytes[0:nameLen]), "\000") + } + + event := newEvent(name, mask) + + // Send the events that are not ignored on the events channel + if !event.ignoreLinux(mask) { + select { + case w.Events <- event: + case <-w.done: + return + } + } + + // Move to the next event in the buffer + offset += unix.SizeofInotifyEvent + nameLen + } + } +} + +// Certain types of events can be "ignored" and not sent over the Events +// channel. Such as events marked ignore by the kernel, or MODIFY events +// against files that do not exist. +func (e *Event) ignoreLinux(mask uint32) bool { + // Ignore anything the inotify API says to ignore + if mask&unix.IN_IGNORED == unix.IN_IGNORED { + return true + } + + // If the event is not a DELETE or RENAME, the file must exist. + // Otherwise the event is ignored. + // *Note*: this was put in place because it was seen that a MODIFY + // event was sent after the DELETE. This ignores that MODIFY and + // assumes a DELETE will come or has come if the file doesn't exist. + if !(e.Op&Remove == Remove || e.Op&Rename == Rename) { + _, statErr := os.Lstat(e.Name) + return os.IsNotExist(statErr) + } + return false +} + +// newEvent returns an platform-independent Event based on an inotify mask. +func newEvent(name string, mask uint32) Event { + e := Event{Name: name} + if mask&unix.IN_CREATE == unix.IN_CREATE || mask&unix.IN_MOVED_TO == unix.IN_MOVED_TO { + e.Op |= Create + } + if mask&unix.IN_DELETE_SELF == unix.IN_DELETE_SELF || mask&unix.IN_DELETE == unix.IN_DELETE { + e.Op |= Remove + } + if mask&unix.IN_MODIFY == unix.IN_MODIFY { + e.Op |= Write + } + if mask&unix.IN_MOVE_SELF == unix.IN_MOVE_SELF || mask&unix.IN_MOVED_FROM == unix.IN_MOVED_FROM { + e.Op |= Rename + } + if mask&unix.IN_ATTRIB == unix.IN_ATTRIB { + e.Op |= Chmod + } + return e +} diff --git a/vendor/gopkg.in/fsnotify.v1/inotify_poller.go b/vendor/gopkg.in/fsnotify.v1/inotify_poller.go new file mode 100644 index 0000000000..cc7db4b22e --- /dev/null +++ b/vendor/gopkg.in/fsnotify.v1/inotify_poller.go @@ -0,0 +1,187 @@ +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build linux + +package fsnotify + +import ( + "errors" + + "golang.org/x/sys/unix" +) + +type fdPoller struct { + fd int // File descriptor (as returned by the inotify_init() syscall) + epfd int // Epoll file descriptor + pipe [2]int // Pipe for waking up +} + +func emptyPoller(fd int) *fdPoller { + poller := new(fdPoller) + poller.fd = fd + poller.epfd = -1 + poller.pipe[0] = -1 + poller.pipe[1] = -1 + return poller +} + +// Create a new inotify poller. +// This creates an inotify handler, and an epoll handler. +func newFdPoller(fd int) (*fdPoller, error) { + var errno error + poller := emptyPoller(fd) + defer func() { + if errno != nil { + poller.close() + } + }() + poller.fd = fd + + // Create epoll fd + poller.epfd, errno = unix.EpollCreate1(0) + if poller.epfd == -1 { + return nil, errno + } + // Create pipe; pipe[0] is the read end, pipe[1] the write end. + errno = unix.Pipe2(poller.pipe[:], unix.O_NONBLOCK) + if errno != nil { + return nil, errno + } + + // Register inotify fd with epoll + event := unix.EpollEvent{ + Fd: int32(poller.fd), + Events: unix.EPOLLIN, + } + errno = unix.EpollCtl(poller.epfd, unix.EPOLL_CTL_ADD, poller.fd, &event) + if errno != nil { + return nil, errno + } + + // Register pipe fd with epoll + event = unix.EpollEvent{ + Fd: int32(poller.pipe[0]), + Events: unix.EPOLLIN, + } + errno = unix.EpollCtl(poller.epfd, unix.EPOLL_CTL_ADD, poller.pipe[0], &event) + if errno != nil { + return nil, errno + } + + return poller, nil +} + +// Wait using epoll. +// Returns true if something is ready to be read, +// false if there is not. +func (poller *fdPoller) wait() (bool, error) { + // 3 possible events per fd, and 2 fds, makes a maximum of 6 events. + // I don't know whether epoll_wait returns the number of events returned, + // or the total number of events ready. + // I decided to catch both by making the buffer one larger than the maximum. + events := make([]unix.EpollEvent, 7) + for { + n, errno := unix.EpollWait(poller.epfd, events, -1) + if n == -1 { + if errno == unix.EINTR { + continue + } + return false, errno + } + if n == 0 { + // If there are no events, try again. + continue + } + if n > 6 { + // This should never happen. More events were returned than should be possible. + return false, errors.New("epoll_wait returned more events than I know what to do with") + } + ready := events[:n] + epollhup := false + epollerr := false + epollin := false + for _, event := range ready { + if event.Fd == int32(poller.fd) { + if event.Events&unix.EPOLLHUP != 0 { + // This should not happen, but if it does, treat it as a wakeup. + epollhup = true + } + if event.Events&unix.EPOLLERR != 0 { + // If an error is waiting on the file descriptor, we should pretend + // something is ready to read, and let unix.Read pick up the error. + epollerr = true + } + if event.Events&unix.EPOLLIN != 0 { + // There is data to read. + epollin = true + } + } + if event.Fd == int32(poller.pipe[0]) { + if event.Events&unix.EPOLLHUP != 0 { + // Write pipe descriptor was closed, by us. This means we're closing down the + // watcher, and we should wake up. + } + if event.Events&unix.EPOLLERR != 0 { + // If an error is waiting on the pipe file descriptor. + // This is an absolute mystery, and should never ever happen. + return false, errors.New("Error on the pipe descriptor.") + } + if event.Events&unix.EPOLLIN != 0 { + // This is a regular wakeup, so we have to clear the buffer. + err := poller.clearWake() + if err != nil { + return false, err + } + } + } + } + + if epollhup || epollerr || epollin { + return true, nil + } + return false, nil + } +} + +// Close the write end of the poller. +func (poller *fdPoller) wake() error { + buf := make([]byte, 1) + n, errno := unix.Write(poller.pipe[1], buf) + if n == -1 { + if errno == unix.EAGAIN { + // Buffer is full, poller will wake. + return nil + } + return errno + } + return nil +} + +func (poller *fdPoller) clearWake() error { + // You have to be woken up a LOT in order to get to 100! + buf := make([]byte, 100) + n, errno := unix.Read(poller.pipe[0], buf) + if n == -1 { + if errno == unix.EAGAIN { + // Buffer is empty, someone else cleared our wake. + return nil + } + return errno + } + return nil +} + +// Close all poller file descriptors, but not the one passed to it. +func (poller *fdPoller) close() { + if poller.pipe[1] != -1 { + unix.Close(poller.pipe[1]) + } + if poller.pipe[0] != -1 { + unix.Close(poller.pipe[0]) + } + if poller.epfd != -1 { + unix.Close(poller.epfd) + } +} diff --git a/vendor/gopkg.in/fsnotify.v1/kqueue.go b/vendor/gopkg.in/fsnotify.v1/kqueue.go new file mode 100644 index 0000000000..86e76a3d67 --- /dev/null +++ b/vendor/gopkg.in/fsnotify.v1/kqueue.go @@ -0,0 +1,521 @@ +// Copyright 2010 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build freebsd openbsd netbsd dragonfly darwin + +package fsnotify + +import ( + "errors" + "fmt" + "io/ioutil" + "os" + "path/filepath" + "sync" + "time" + + "golang.org/x/sys/unix" +) + +// Watcher watches a set of files, delivering events to a channel. +type Watcher struct { + Events chan Event + Errors chan error + done chan struct{} // Channel for sending a "quit message" to the reader goroutine + + kq int // File descriptor (as returned by the kqueue() syscall). + + mu sync.Mutex // Protects access to watcher data + watches map[string]int // Map of watched file descriptors (key: path). + externalWatches map[string]bool // Map of watches added by user of the library. + dirFlags map[string]uint32 // Map of watched directories to fflags used in kqueue. + paths map[int]pathInfo // Map file descriptors to path names for processing kqueue events. + fileExists map[string]bool // Keep track of if we know this file exists (to stop duplicate create events). + isClosed bool // Set to true when Close() is first called +} + +type pathInfo struct { + name string + isDir bool +} + +// NewWatcher establishes a new watcher with the underlying OS and begins waiting for events. +func NewWatcher() (*Watcher, error) { + kq, err := kqueue() + if err != nil { + return nil, err + } + + w := &Watcher{ + kq: kq, + watches: make(map[string]int), + dirFlags: make(map[string]uint32), + paths: make(map[int]pathInfo), + fileExists: make(map[string]bool), + externalWatches: make(map[string]bool), + Events: make(chan Event), + Errors: make(chan error), + done: make(chan struct{}), + } + + go w.readEvents() + return w, nil +} + +// Close removes all watches and closes the events channel. +func (w *Watcher) Close() error { + w.mu.Lock() + if w.isClosed { + w.mu.Unlock() + return nil + } + w.isClosed = true + + // copy paths to remove while locked + var pathsToRemove = make([]string, 0, len(w.watches)) + for name := range w.watches { + pathsToRemove = append(pathsToRemove, name) + } + w.mu.Unlock() + // unlock before calling Remove, which also locks + + for _, name := range pathsToRemove { + w.Remove(name) + } + + // send a "quit" message to the reader goroutine + close(w.done) + + return nil +} + +// Add starts watching the named file or directory (non-recursively). +func (w *Watcher) Add(name string) error { + w.mu.Lock() + w.externalWatches[name] = true + w.mu.Unlock() + _, err := w.addWatch(name, noteAllEvents) + return err +} + +// Remove stops watching the the named file or directory (non-recursively). +func (w *Watcher) Remove(name string) error { + name = filepath.Clean(name) + w.mu.Lock() + watchfd, ok := w.watches[name] + w.mu.Unlock() + if !ok { + return fmt.Errorf("can't remove non-existent kevent watch for: %s", name) + } + + const registerRemove = unix.EV_DELETE + if err := register(w.kq, []int{watchfd}, registerRemove, 0); err != nil { + return err + } + + unix.Close(watchfd) + + w.mu.Lock() + isDir := w.paths[watchfd].isDir + delete(w.watches, name) + delete(w.paths, watchfd) + delete(w.dirFlags, name) + w.mu.Unlock() + + // Find all watched paths that are in this directory that are not external. + if isDir { + var pathsToRemove []string + w.mu.Lock() + for _, path := range w.paths { + wdir, _ := filepath.Split(path.name) + if filepath.Clean(wdir) == name { + if !w.externalWatches[path.name] { + pathsToRemove = append(pathsToRemove, path.name) + } + } + } + w.mu.Unlock() + for _, name := range pathsToRemove { + // Since these are internal, not much sense in propagating error + // to the user, as that will just confuse them with an error about + // a path they did not explicitly watch themselves. + w.Remove(name) + } + } + + return nil +} + +// Watch all events (except NOTE_EXTEND, NOTE_LINK, NOTE_REVOKE) +const noteAllEvents = unix.NOTE_DELETE | unix.NOTE_WRITE | unix.NOTE_ATTRIB | unix.NOTE_RENAME + +// keventWaitTime to block on each read from kevent +var keventWaitTime = durationToTimespec(100 * time.Millisecond) + +// addWatch adds name to the watched file set. +// The flags are interpreted as described in kevent(2). +// Returns the real path to the file which was added, if any, which may be different from the one passed in the case of symlinks. +func (w *Watcher) addWatch(name string, flags uint32) (string, error) { + var isDir bool + // Make ./name and name equivalent + name = filepath.Clean(name) + + w.mu.Lock() + if w.isClosed { + w.mu.Unlock() + return "", errors.New("kevent instance already closed") + } + watchfd, alreadyWatching := w.watches[name] + // We already have a watch, but we can still override flags. + if alreadyWatching { + isDir = w.paths[watchfd].isDir + } + w.mu.Unlock() + + if !alreadyWatching { + fi, err := os.Lstat(name) + if err != nil { + return "", err + } + + // Don't watch sockets. + if fi.Mode()&os.ModeSocket == os.ModeSocket { + return "", nil + } + + // Don't watch named pipes. + if fi.Mode()&os.ModeNamedPipe == os.ModeNamedPipe { + return "", nil + } + + // Follow Symlinks + // Unfortunately, Linux can add bogus symlinks to watch list without + // issue, and Windows can't do symlinks period (AFAIK). To maintain + // consistency, we will act like everything is fine. There will simply + // be no file events for broken symlinks. + // Hence the returns of nil on errors. + if fi.Mode()&os.ModeSymlink == os.ModeSymlink { + name, err = filepath.EvalSymlinks(name) + if err != nil { + return "", nil + } + + w.mu.Lock() + _, alreadyWatching = w.watches[name] + w.mu.Unlock() + + if alreadyWatching { + return name, nil + } + + fi, err = os.Lstat(name) + if err != nil { + return "", nil + } + } + + watchfd, err = unix.Open(name, openMode, 0700) + if watchfd == -1 { + return "", err + } + + isDir = fi.IsDir() + } + + const registerAdd = unix.EV_ADD | unix.EV_CLEAR | unix.EV_ENABLE + if err := register(w.kq, []int{watchfd}, registerAdd, flags); err != nil { + unix.Close(watchfd) + return "", err + } + + if !alreadyWatching { + w.mu.Lock() + w.watches[name] = watchfd + w.paths[watchfd] = pathInfo{name: name, isDir: isDir} + w.mu.Unlock() + } + + if isDir { + // Watch the directory if it has not been watched before, + // or if it was watched before, but perhaps only a NOTE_DELETE (watchDirectoryFiles) + w.mu.Lock() + + watchDir := (flags&unix.NOTE_WRITE) == unix.NOTE_WRITE && + (!alreadyWatching || (w.dirFlags[name]&unix.NOTE_WRITE) != unix.NOTE_WRITE) + // Store flags so this watch can be updated later + w.dirFlags[name] = flags + w.mu.Unlock() + + if watchDir { + if err := w.watchDirectoryFiles(name); err != nil { + return "", err + } + } + } + return name, nil +} + +// readEvents reads from kqueue and converts the received kevents into +// Event values that it sends down the Events channel. +func (w *Watcher) readEvents() { + eventBuffer := make([]unix.Kevent_t, 10) + +loop: + for { + // See if there is a message on the "done" channel + select { + case <-w.done: + break loop + default: + } + + // Get new events + kevents, err := read(w.kq, eventBuffer, &keventWaitTime) + // EINTR is okay, the syscall was interrupted before timeout expired. + if err != nil && err != unix.EINTR { + select { + case w.Errors <- err: + case <-w.done: + break loop + } + continue + } + + // Flush the events we received to the Events channel + for len(kevents) > 0 { + kevent := &kevents[0] + watchfd := int(kevent.Ident) + mask := uint32(kevent.Fflags) + w.mu.Lock() + path := w.paths[watchfd] + w.mu.Unlock() + event := newEvent(path.name, mask) + + if path.isDir && !(event.Op&Remove == Remove) { + // Double check to make sure the directory exists. This can happen when + // we do a rm -fr on a recursively watched folders and we receive a + // modification event first but the folder has been deleted and later + // receive the delete event + if _, err := os.Lstat(event.Name); os.IsNotExist(err) { + // mark is as delete event + event.Op |= Remove + } + } + + if event.Op&Rename == Rename || event.Op&Remove == Remove { + w.Remove(event.Name) + w.mu.Lock() + delete(w.fileExists, event.Name) + w.mu.Unlock() + } + + if path.isDir && event.Op&Write == Write && !(event.Op&Remove == Remove) { + w.sendDirectoryChangeEvents(event.Name) + } else { + // Send the event on the Events channel. + select { + case w.Events <- event: + case <-w.done: + break loop + } + } + + if event.Op&Remove == Remove { + // Look for a file that may have overwritten this. + // For example, mv f1 f2 will delete f2, then create f2. + if path.isDir { + fileDir := filepath.Clean(event.Name) + w.mu.Lock() + _, found := w.watches[fileDir] + w.mu.Unlock() + if found { + // make sure the directory exists before we watch for changes. When we + // do a recursive watch and perform rm -fr, the parent directory might + // have gone missing, ignore the missing directory and let the + // upcoming delete event remove the watch from the parent directory. + if _, err := os.Lstat(fileDir); err == nil { + w.sendDirectoryChangeEvents(fileDir) + } + } + } else { + filePath := filepath.Clean(event.Name) + if fileInfo, err := os.Lstat(filePath); err == nil { + w.sendFileCreatedEventIfNew(filePath, fileInfo) + } + } + } + + // Move to next event + kevents = kevents[1:] + } + } + + // cleanup + err := unix.Close(w.kq) + if err != nil { + // only way the previous loop breaks is if w.done was closed so we need to async send to w.Errors. + select { + case w.Errors <- err: + default: + } + } + close(w.Events) + close(w.Errors) +} + +// newEvent returns an platform-independent Event based on kqueue Fflags. +func newEvent(name string, mask uint32) Event { + e := Event{Name: name} + if mask&unix.NOTE_DELETE == unix.NOTE_DELETE { + e.Op |= Remove + } + if mask&unix.NOTE_WRITE == unix.NOTE_WRITE { + e.Op |= Write + } + if mask&unix.NOTE_RENAME == unix.NOTE_RENAME { + e.Op |= Rename + } + if mask&unix.NOTE_ATTRIB == unix.NOTE_ATTRIB { + e.Op |= Chmod + } + return e +} + +func newCreateEvent(name string) Event { + return Event{Name: name, Op: Create} +} + +// watchDirectoryFiles to mimic inotify when adding a watch on a directory +func (w *Watcher) watchDirectoryFiles(dirPath string) error { + // Get all files + files, err := ioutil.ReadDir(dirPath) + if err != nil { + return err + } + + for _, fileInfo := range files { + filePath := filepath.Join(dirPath, fileInfo.Name()) + filePath, err = w.internalWatch(filePath, fileInfo) + if err != nil { + return err + } + + w.mu.Lock() + w.fileExists[filePath] = true + w.mu.Unlock() + } + + return nil +} + +// sendDirectoryEvents searches the directory for newly created files +// and sends them over the event channel. This functionality is to have +// the BSD version of fsnotify match Linux inotify which provides a +// create event for files created in a watched directory. +func (w *Watcher) sendDirectoryChangeEvents(dirPath string) { + // Get all files + files, err := ioutil.ReadDir(dirPath) + if err != nil { + select { + case w.Errors <- err: + case <-w.done: + return + } + } + + // Search for new files + for _, fileInfo := range files { + filePath := filepath.Join(dirPath, fileInfo.Name()) + err := w.sendFileCreatedEventIfNew(filePath, fileInfo) + + if err != nil { + return + } + } +} + +// sendFileCreatedEvent sends a create event if the file isn't already being tracked. +func (w *Watcher) sendFileCreatedEventIfNew(filePath string, fileInfo os.FileInfo) (err error) { + w.mu.Lock() + _, doesExist := w.fileExists[filePath] + w.mu.Unlock() + if !doesExist { + // Send create event + select { + case w.Events <- newCreateEvent(filePath): + case <-w.done: + return + } + } + + // like watchDirectoryFiles (but without doing another ReadDir) + filePath, err = w.internalWatch(filePath, fileInfo) + if err != nil { + return err + } + + w.mu.Lock() + w.fileExists[filePath] = true + w.mu.Unlock() + + return nil +} + +func (w *Watcher) internalWatch(name string, fileInfo os.FileInfo) (string, error) { + if fileInfo.IsDir() { + // mimic Linux providing delete events for subdirectories + // but preserve the flags used if currently watching subdirectory + w.mu.Lock() + flags := w.dirFlags[name] + w.mu.Unlock() + + flags |= unix.NOTE_DELETE | unix.NOTE_RENAME + return w.addWatch(name, flags) + } + + // watch file to mimic Linux inotify + return w.addWatch(name, noteAllEvents) +} + +// kqueue creates a new kernel event queue and returns a descriptor. +func kqueue() (kq int, err error) { + kq, err = unix.Kqueue() + if kq == -1 { + return kq, err + } + return kq, nil +} + +// register events with the queue +func register(kq int, fds []int, flags int, fflags uint32) error { + changes := make([]unix.Kevent_t, len(fds)) + + for i, fd := range fds { + // SetKevent converts int to the platform-specific types: + unix.SetKevent(&changes[i], fd, unix.EVFILT_VNODE, flags) + changes[i].Fflags = fflags + } + + // register the events + success, err := unix.Kevent(kq, changes, nil, nil) + if success == -1 { + return err + } + return nil +} + +// read retrieves pending events, or waits until an event occurs. +// A timeout of nil blocks indefinitely, while 0 polls the queue. +func read(kq int, events []unix.Kevent_t, timeout *unix.Timespec) ([]unix.Kevent_t, error) { + n, err := unix.Kevent(kq, nil, events, timeout) + if err != nil { + return nil, err + } + return events[0:n], nil +} + +// durationToTimespec prepares a timeout value +func durationToTimespec(d time.Duration) unix.Timespec { + return unix.NsecToTimespec(d.Nanoseconds()) +} diff --git a/vendor/gopkg.in/fsnotify.v1/open_mode_bsd.go b/vendor/gopkg.in/fsnotify.v1/open_mode_bsd.go new file mode 100644 index 0000000000..7d8de14513 --- /dev/null +++ b/vendor/gopkg.in/fsnotify.v1/open_mode_bsd.go @@ -0,0 +1,11 @@ +// Copyright 2013 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build freebsd openbsd netbsd dragonfly + +package fsnotify + +import "golang.org/x/sys/unix" + +const openMode = unix.O_NONBLOCK | unix.O_RDONLY diff --git a/vendor/gopkg.in/fsnotify.v1/open_mode_darwin.go b/vendor/gopkg.in/fsnotify.v1/open_mode_darwin.go new file mode 100644 index 0000000000..9139e17161 --- /dev/null +++ b/vendor/gopkg.in/fsnotify.v1/open_mode_darwin.go @@ -0,0 +1,12 @@ +// Copyright 2013 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build darwin + +package fsnotify + +import "golang.org/x/sys/unix" + +// note: this constant is not defined on BSD +const openMode = unix.O_EVTONLY diff --git a/vendor/gopkg.in/fsnotify.v1/windows.go b/vendor/gopkg.in/fsnotify.v1/windows.go new file mode 100644 index 0000000000..09436f31d8 --- /dev/null +++ b/vendor/gopkg.in/fsnotify.v1/windows.go @@ -0,0 +1,561 @@ +// Copyright 2011 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build windows + +package fsnotify + +import ( + "errors" + "fmt" + "os" + "path/filepath" + "runtime" + "sync" + "syscall" + "unsafe" +) + +// Watcher watches a set of files, delivering events to a channel. +type Watcher struct { + Events chan Event + Errors chan error + isClosed bool // Set to true when Close() is first called + mu sync.Mutex // Map access + port syscall.Handle // Handle to completion port + watches watchMap // Map of watches (key: i-number) + input chan *input // Inputs to the reader are sent on this channel + quit chan chan<- error +} + +// NewWatcher establishes a new watcher with the underlying OS and begins waiting for events. +func NewWatcher() (*Watcher, error) { + port, e := syscall.CreateIoCompletionPort(syscall.InvalidHandle, 0, 0, 0) + if e != nil { + return nil, os.NewSyscallError("CreateIoCompletionPort", e) + } + w := &Watcher{ + port: port, + watches: make(watchMap), + input: make(chan *input, 1), + Events: make(chan Event, 50), + Errors: make(chan error), + quit: make(chan chan<- error, 1), + } + go w.readEvents() + return w, nil +} + +// Close removes all watches and closes the events channel. +func (w *Watcher) Close() error { + if w.isClosed { + return nil + } + w.isClosed = true + + // Send "quit" message to the reader goroutine + ch := make(chan error) + w.quit <- ch + if err := w.wakeupReader(); err != nil { + return err + } + return <-ch +} + +// Add starts watching the named file or directory (non-recursively). +func (w *Watcher) Add(name string) error { + if w.isClosed { + return errors.New("watcher already closed") + } + in := &input{ + op: opAddWatch, + path: filepath.Clean(name), + flags: sysFSALLEVENTS, + reply: make(chan error), + } + w.input <- in + if err := w.wakeupReader(); err != nil { + return err + } + return <-in.reply +} + +// Remove stops watching the the named file or directory (non-recursively). +func (w *Watcher) Remove(name string) error { + in := &input{ + op: opRemoveWatch, + path: filepath.Clean(name), + reply: make(chan error), + } + w.input <- in + if err := w.wakeupReader(); err != nil { + return err + } + return <-in.reply +} + +const ( + // Options for AddWatch + sysFSONESHOT = 0x80000000 + sysFSONLYDIR = 0x1000000 + + // Events + sysFSACCESS = 0x1 + sysFSALLEVENTS = 0xfff + sysFSATTRIB = 0x4 + sysFSCLOSE = 0x18 + sysFSCREATE = 0x100 + sysFSDELETE = 0x200 + sysFSDELETESELF = 0x400 + sysFSMODIFY = 0x2 + sysFSMOVE = 0xc0 + sysFSMOVEDFROM = 0x40 + sysFSMOVEDTO = 0x80 + sysFSMOVESELF = 0x800 + + // Special events + sysFSIGNORED = 0x8000 + sysFSQOVERFLOW = 0x4000 +) + +func newEvent(name string, mask uint32) Event { + e := Event{Name: name} + if mask&sysFSCREATE == sysFSCREATE || mask&sysFSMOVEDTO == sysFSMOVEDTO { + e.Op |= Create + } + if mask&sysFSDELETE == sysFSDELETE || mask&sysFSDELETESELF == sysFSDELETESELF { + e.Op |= Remove + } + if mask&sysFSMODIFY == sysFSMODIFY { + e.Op |= Write + } + if mask&sysFSMOVE == sysFSMOVE || mask&sysFSMOVESELF == sysFSMOVESELF || mask&sysFSMOVEDFROM == sysFSMOVEDFROM { + e.Op |= Rename + } + if mask&sysFSATTRIB == sysFSATTRIB { + e.Op |= Chmod + } + return e +} + +const ( + opAddWatch = iota + opRemoveWatch +) + +const ( + provisional uint64 = 1 << (32 + iota) +) + +type input struct { + op int + path string + flags uint32 + reply chan error +} + +type inode struct { + handle syscall.Handle + volume uint32 + index uint64 +} + +type watch struct { + ov syscall.Overlapped + ino *inode // i-number + path string // Directory path + mask uint64 // Directory itself is being watched with these notify flags + names map[string]uint64 // Map of names being watched and their notify flags + rename string // Remembers the old name while renaming a file + buf [4096]byte +} + +type indexMap map[uint64]*watch +type watchMap map[uint32]indexMap + +func (w *Watcher) wakeupReader() error { + e := syscall.PostQueuedCompletionStatus(w.port, 0, 0, nil) + if e != nil { + return os.NewSyscallError("PostQueuedCompletionStatus", e) + } + return nil +} + +func getDir(pathname string) (dir string, err error) { + attr, e := syscall.GetFileAttributes(syscall.StringToUTF16Ptr(pathname)) + if e != nil { + return "", os.NewSyscallError("GetFileAttributes", e) + } + if attr&syscall.FILE_ATTRIBUTE_DIRECTORY != 0 { + dir = pathname + } else { + dir, _ = filepath.Split(pathname) + dir = filepath.Clean(dir) + } + return +} + +func getIno(path string) (ino *inode, err error) { + h, e := syscall.CreateFile(syscall.StringToUTF16Ptr(path), + syscall.FILE_LIST_DIRECTORY, + syscall.FILE_SHARE_READ|syscall.FILE_SHARE_WRITE|syscall.FILE_SHARE_DELETE, + nil, syscall.OPEN_EXISTING, + syscall.FILE_FLAG_BACKUP_SEMANTICS|syscall.FILE_FLAG_OVERLAPPED, 0) + if e != nil { + return nil, os.NewSyscallError("CreateFile", e) + } + var fi syscall.ByHandleFileInformation + if e = syscall.GetFileInformationByHandle(h, &fi); e != nil { + syscall.CloseHandle(h) + return nil, os.NewSyscallError("GetFileInformationByHandle", e) + } + ino = &inode{ + handle: h, + volume: fi.VolumeSerialNumber, + index: uint64(fi.FileIndexHigh)<<32 | uint64(fi.FileIndexLow), + } + return ino, nil +} + +// Must run within the I/O thread. +func (m watchMap) get(ino *inode) *watch { + if i := m[ino.volume]; i != nil { + return i[ino.index] + } + return nil +} + +// Must run within the I/O thread. +func (m watchMap) set(ino *inode, watch *watch) { + i := m[ino.volume] + if i == nil { + i = make(indexMap) + m[ino.volume] = i + } + i[ino.index] = watch +} + +// Must run within the I/O thread. +func (w *Watcher) addWatch(pathname string, flags uint64) error { + dir, err := getDir(pathname) + if err != nil { + return err + } + if flags&sysFSONLYDIR != 0 && pathname != dir { + return nil + } + ino, err := getIno(dir) + if err != nil { + return err + } + w.mu.Lock() + watchEntry := w.watches.get(ino) + w.mu.Unlock() + if watchEntry == nil { + if _, e := syscall.CreateIoCompletionPort(ino.handle, w.port, 0, 0); e != nil { + syscall.CloseHandle(ino.handle) + return os.NewSyscallError("CreateIoCompletionPort", e) + } + watchEntry = &watch{ + ino: ino, + path: dir, + names: make(map[string]uint64), + } + w.mu.Lock() + w.watches.set(ino, watchEntry) + w.mu.Unlock() + flags |= provisional + } else { + syscall.CloseHandle(ino.handle) + } + if pathname == dir { + watchEntry.mask |= flags + } else { + watchEntry.names[filepath.Base(pathname)] |= flags + } + if err = w.startRead(watchEntry); err != nil { + return err + } + if pathname == dir { + watchEntry.mask &= ^provisional + } else { + watchEntry.names[filepath.Base(pathname)] &= ^provisional + } + return nil +} + +// Must run within the I/O thread. +func (w *Watcher) remWatch(pathname string) error { + dir, err := getDir(pathname) + if err != nil { + return err + } + ino, err := getIno(dir) + if err != nil { + return err + } + w.mu.Lock() + watch := w.watches.get(ino) + w.mu.Unlock() + if watch == nil { + return fmt.Errorf("can't remove non-existent watch for: %s", pathname) + } + if pathname == dir { + w.sendEvent(watch.path, watch.mask&sysFSIGNORED) + watch.mask = 0 + } else { + name := filepath.Base(pathname) + w.sendEvent(filepath.Join(watch.path, name), watch.names[name]&sysFSIGNORED) + delete(watch.names, name) + } + return w.startRead(watch) +} + +// Must run within the I/O thread. +func (w *Watcher) deleteWatch(watch *watch) { + for name, mask := range watch.names { + if mask&provisional == 0 { + w.sendEvent(filepath.Join(watch.path, name), mask&sysFSIGNORED) + } + delete(watch.names, name) + } + if watch.mask != 0 { + if watch.mask&provisional == 0 { + w.sendEvent(watch.path, watch.mask&sysFSIGNORED) + } + watch.mask = 0 + } +} + +// Must run within the I/O thread. +func (w *Watcher) startRead(watch *watch) error { + if e := syscall.CancelIo(watch.ino.handle); e != nil { + w.Errors <- os.NewSyscallError("CancelIo", e) + w.deleteWatch(watch) + } + mask := toWindowsFlags(watch.mask) + for _, m := range watch.names { + mask |= toWindowsFlags(m) + } + if mask == 0 { + if e := syscall.CloseHandle(watch.ino.handle); e != nil { + w.Errors <- os.NewSyscallError("CloseHandle", e) + } + w.mu.Lock() + delete(w.watches[watch.ino.volume], watch.ino.index) + w.mu.Unlock() + return nil + } + e := syscall.ReadDirectoryChanges(watch.ino.handle, &watch.buf[0], + uint32(unsafe.Sizeof(watch.buf)), false, mask, nil, &watch.ov, 0) + if e != nil { + err := os.NewSyscallError("ReadDirectoryChanges", e) + if e == syscall.ERROR_ACCESS_DENIED && watch.mask&provisional == 0 { + // Watched directory was probably removed + if w.sendEvent(watch.path, watch.mask&sysFSDELETESELF) { + if watch.mask&sysFSONESHOT != 0 { + watch.mask = 0 + } + } + err = nil + } + w.deleteWatch(watch) + w.startRead(watch) + return err + } + return nil +} + +// readEvents reads from the I/O completion port, converts the +// received events into Event objects and sends them via the Events channel. +// Entry point to the I/O thread. +func (w *Watcher) readEvents() { + var ( + n, key uint32 + ov *syscall.Overlapped + ) + runtime.LockOSThread() + + for { + e := syscall.GetQueuedCompletionStatus(w.port, &n, &key, &ov, syscall.INFINITE) + watch := (*watch)(unsafe.Pointer(ov)) + + if watch == nil { + select { + case ch := <-w.quit: + w.mu.Lock() + var indexes []indexMap + for _, index := range w.watches { + indexes = append(indexes, index) + } + w.mu.Unlock() + for _, index := range indexes { + for _, watch := range index { + w.deleteWatch(watch) + w.startRead(watch) + } + } + var err error + if e := syscall.CloseHandle(w.port); e != nil { + err = os.NewSyscallError("CloseHandle", e) + } + close(w.Events) + close(w.Errors) + ch <- err + return + case in := <-w.input: + switch in.op { + case opAddWatch: + in.reply <- w.addWatch(in.path, uint64(in.flags)) + case opRemoveWatch: + in.reply <- w.remWatch(in.path) + } + default: + } + continue + } + + switch e { + case syscall.ERROR_MORE_DATA: + if watch == nil { + w.Errors <- errors.New("ERROR_MORE_DATA has unexpectedly null lpOverlapped buffer") + } else { + // The i/o succeeded but the buffer is full. + // In theory we should be building up a full packet. + // In practice we can get away with just carrying on. + n = uint32(unsafe.Sizeof(watch.buf)) + } + case syscall.ERROR_ACCESS_DENIED: + // Watched directory was probably removed + w.sendEvent(watch.path, watch.mask&sysFSDELETESELF) + w.deleteWatch(watch) + w.startRead(watch) + continue + case syscall.ERROR_OPERATION_ABORTED: + // CancelIo was called on this handle + continue + default: + w.Errors <- os.NewSyscallError("GetQueuedCompletionPort", e) + continue + case nil: + } + + var offset uint32 + for { + if n == 0 { + w.Events <- newEvent("", sysFSQOVERFLOW) + w.Errors <- errors.New("short read in readEvents()") + break + } + + // Point "raw" to the event in the buffer + raw := (*syscall.FileNotifyInformation)(unsafe.Pointer(&watch.buf[offset])) + buf := (*[syscall.MAX_PATH]uint16)(unsafe.Pointer(&raw.FileName)) + name := syscall.UTF16ToString(buf[:raw.FileNameLength/2]) + fullname := filepath.Join(watch.path, name) + + var mask uint64 + switch raw.Action { + case syscall.FILE_ACTION_REMOVED: + mask = sysFSDELETESELF + case syscall.FILE_ACTION_MODIFIED: + mask = sysFSMODIFY + case syscall.FILE_ACTION_RENAMED_OLD_NAME: + watch.rename = name + case syscall.FILE_ACTION_RENAMED_NEW_NAME: + if watch.names[watch.rename] != 0 { + watch.names[name] |= watch.names[watch.rename] + delete(watch.names, watch.rename) + mask = sysFSMOVESELF + } + } + + sendNameEvent := func() { + if w.sendEvent(fullname, watch.names[name]&mask) { + if watch.names[name]&sysFSONESHOT != 0 { + delete(watch.names, name) + } + } + } + if raw.Action != syscall.FILE_ACTION_RENAMED_NEW_NAME { + sendNameEvent() + } + if raw.Action == syscall.FILE_ACTION_REMOVED { + w.sendEvent(fullname, watch.names[name]&sysFSIGNORED) + delete(watch.names, name) + } + if w.sendEvent(fullname, watch.mask&toFSnotifyFlags(raw.Action)) { + if watch.mask&sysFSONESHOT != 0 { + watch.mask = 0 + } + } + if raw.Action == syscall.FILE_ACTION_RENAMED_NEW_NAME { + fullname = filepath.Join(watch.path, watch.rename) + sendNameEvent() + } + + // Move to the next event in the buffer + if raw.NextEntryOffset == 0 { + break + } + offset += raw.NextEntryOffset + + // Error! + if offset >= n { + w.Errors <- errors.New("Windows system assumed buffer larger than it is, events have likely been missed.") + break + } + } + + if err := w.startRead(watch); err != nil { + w.Errors <- err + } + } +} + +func (w *Watcher) sendEvent(name string, mask uint64) bool { + if mask == 0 { + return false + } + event := newEvent(name, uint32(mask)) + select { + case ch := <-w.quit: + w.quit <- ch + case w.Events <- event: + } + return true +} + +func toWindowsFlags(mask uint64) uint32 { + var m uint32 + if mask&sysFSACCESS != 0 { + m |= syscall.FILE_NOTIFY_CHANGE_LAST_ACCESS + } + if mask&sysFSMODIFY != 0 { + m |= syscall.FILE_NOTIFY_CHANGE_LAST_WRITE + } + if mask&sysFSATTRIB != 0 { + m |= syscall.FILE_NOTIFY_CHANGE_ATTRIBUTES + } + if mask&(sysFSMOVE|sysFSCREATE|sysFSDELETE) != 0 { + m |= syscall.FILE_NOTIFY_CHANGE_FILE_NAME | syscall.FILE_NOTIFY_CHANGE_DIR_NAME + } + return m +} + +func toFSnotifyFlags(action uint32) uint64 { + switch action { + case syscall.FILE_ACTION_ADDED: + return sysFSCREATE + case syscall.FILE_ACTION_REMOVED: + return sysFSDELETE + case syscall.FILE_ACTION_MODIFIED: + return sysFSMODIFY + case syscall.FILE_ACTION_RENAMED_OLD_NAME: + return sysFSMOVEDFROM + case syscall.FILE_ACTION_RENAMED_NEW_NAME: + return sysFSMOVEDTO + } + return 0 +} diff --git a/vendor/gopkg.in/tomb.v1/LICENSE b/vendor/gopkg.in/tomb.v1/LICENSE new file mode 100644 index 0000000000..a4249bb31d --- /dev/null +++ b/vendor/gopkg.in/tomb.v1/LICENSE @@ -0,0 +1,29 @@ +tomb - support for clean goroutine termination in Go. + +Copyright (c) 2010-2011 - Gustavo Niemeyer + +All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + + * Redistributions of source code must retain the above copyright notice, + this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above copyright notice, + this list of conditions and the following disclaimer in the documentation + and/or other materials provided with the distribution. + * Neither the name of the copyright holder nor the names of its + contributors may be used to endorse or promote products derived from + this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR +CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, +EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, +PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR +PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF +LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING +NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS +SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/vendor/gopkg.in/tomb.v1/README.md b/vendor/gopkg.in/tomb.v1/README.md new file mode 100644 index 0000000000..3ae8788e81 --- /dev/null +++ b/vendor/gopkg.in/tomb.v1/README.md @@ -0,0 +1,4 @@ +Installation and usage +---------------------- + +See [gopkg.in/tomb.v1](https://gopkg.in/tomb.v1) for documentation and usage details. diff --git a/vendor/gopkg.in/tomb.v1/tomb.go b/vendor/gopkg.in/tomb.v1/tomb.go new file mode 100644 index 0000000000..9aec56d821 --- /dev/null +++ b/vendor/gopkg.in/tomb.v1/tomb.go @@ -0,0 +1,176 @@ +// Copyright (c) 2011 - Gustavo Niemeyer +// +// All rights reserved. +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are met: +// +// * Redistributions of source code must retain the above copyright notice, +// this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above copyright notice, +// this list of conditions and the following disclaimer in the documentation +// and/or other materials provided with the distribution. +// * Neither the name of the copyright holder nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR +// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, +// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, +// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR +// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF +// LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING +// NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS +// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +// The tomb package offers a conventional API for clean goroutine termination. +// +// A Tomb tracks the lifecycle of a goroutine as alive, dying or dead, +// and the reason for its death. +// +// The zero value of a Tomb assumes that a goroutine is about to be +// created or already alive. Once Kill or Killf is called with an +// argument that informs the reason for death, the goroutine is in +// a dying state and is expected to terminate soon. Right before the +// goroutine function or method returns, Done must be called to inform +// that the goroutine is indeed dead and about to stop running. +// +// A Tomb exposes Dying and Dead channels. These channels are closed +// when the Tomb state changes in the respective way. They enable +// explicit blocking until the state changes, and also to selectively +// unblock select statements accordingly. +// +// When the tomb state changes to dying and there's still logic going +// on within the goroutine, nested functions and methods may choose to +// return ErrDying as their error value, as this error won't alter the +// tomb state if provided to the Kill method. This is a convenient way to +// follow standard Go practices in the context of a dying tomb. +// +// For background and a detailed example, see the following blog post: +// +// http://blog.labix.org/2011/10/09/death-of-goroutines-under-control +// +// For a more complex code snippet demonstrating the use of multiple +// goroutines with a single Tomb, see: +// +// http://play.golang.org/p/Xh7qWsDPZP +// +package tomb + +import ( + "errors" + "fmt" + "sync" +) + +// A Tomb tracks the lifecycle of a goroutine as alive, dying or dead, +// and the reason for its death. +// +// See the package documentation for details. +type Tomb struct { + m sync.Mutex + dying chan struct{} + dead chan struct{} + reason error +} + +var ( + ErrStillAlive = errors.New("tomb: still alive") + ErrDying = errors.New("tomb: dying") +) + +func (t *Tomb) init() { + t.m.Lock() + if t.dead == nil { + t.dead = make(chan struct{}) + t.dying = make(chan struct{}) + t.reason = ErrStillAlive + } + t.m.Unlock() +} + +// Dead returns the channel that can be used to wait +// until t.Done has been called. +func (t *Tomb) Dead() <-chan struct{} { + t.init() + return t.dead +} + +// Dying returns the channel that can be used to wait +// until t.Kill or t.Done has been called. +func (t *Tomb) Dying() <-chan struct{} { + t.init() + return t.dying +} + +// Wait blocks until the goroutine is in a dead state and returns the +// reason for its death. +func (t *Tomb) Wait() error { + t.init() + <-t.dead + t.m.Lock() + reason := t.reason + t.m.Unlock() + return reason +} + +// Done flags the goroutine as dead, and should be called a single time +// right before the goroutine function or method returns. +// If the goroutine was not already in a dying state before Done is +// called, it will be flagged as dying and dead at once with no +// error. +func (t *Tomb) Done() { + t.Kill(nil) + close(t.dead) +} + +// Kill flags the goroutine as dying for the given reason. +// Kill may be called multiple times, but only the first +// non-nil error is recorded as the reason for termination. +// +// If reason is ErrDying, the previous reason isn't replaced +// even if it is nil. It's a runtime error to call Kill with +// ErrDying if t is not in a dying state. +func (t *Tomb) Kill(reason error) { + t.init() + t.m.Lock() + defer t.m.Unlock() + if reason == ErrDying { + if t.reason == ErrStillAlive { + panic("tomb: Kill with ErrDying while still alive") + } + return + } + if t.reason == nil || t.reason == ErrStillAlive { + t.reason = reason + } + // If the receive on t.dying succeeds, then + // it can only be because we have already closed it. + // If it blocks, then we know that it needs to be closed. + select { + case <-t.dying: + default: + close(t.dying) + } +} + +// Killf works like Kill, but builds the reason providing the received +// arguments to fmt.Errorf. The generated error is also returned. +func (t *Tomb) Killf(f string, a ...interface{}) error { + err := fmt.Errorf(f, a...) + t.Kill(err) + return err +} + +// Err returns the reason for the goroutine death provided via Kill +// or Killf, or ErrStillAlive when the goroutine is still alive. +func (t *Tomb) Err() (reason error) { + t.init() + t.m.Lock() + reason = t.reason + t.m.Unlock() + return +} diff --git a/vendor/k8s.io/api/admission/v1beta1/doc.go b/vendor/k8s.io/api/admission/v1beta1/doc.go index f5135f0f30..92f7c19d26 100644 --- a/vendor/k8s.io/api/admission/v1beta1/doc.go +++ b/vendor/k8s.io/api/admission/v1beta1/doc.go @@ -15,6 +15,7 @@ limitations under the License. */ // +k8s:deepcopy-gen=package +// +k8s:protobuf-gen=package // +k8s:openapi-gen=false // +groupName=admission.k8s.io diff --git a/vendor/k8s.io/api/admissionregistration/v1alpha1/generated.proto b/vendor/k8s.io/api/admissionregistration/v1alpha1/generated.proto deleted file mode 100644 index 98e9a571a2..0000000000 --- a/vendor/k8s.io/api/admissionregistration/v1alpha1/generated.proto +++ /dev/null @@ -1,107 +0,0 @@ -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - - -// This file was autogenerated by go-to-protobuf. Do not edit it manually! - -syntax = 'proto2'; - -package k8s.io.api.admissionregistration.v1alpha1; - -import "k8s.io/apimachinery/pkg/apis/meta/v1/generated.proto"; -import "k8s.io/apimachinery/pkg/runtime/generated.proto"; -import "k8s.io/apimachinery/pkg/runtime/schema/generated.proto"; - -// Package-wide variables from generator "generated". -option go_package = "v1alpha1"; - -// Initializer describes the name and the failure policy of an initializer, and -// what resources it applies to. -message Initializer { - // Name is the identifier of the initializer. It will be added to the - // object that needs to be initialized. - // Name should be fully qualified, e.g., alwayspullimages.kubernetes.io, where - // "alwayspullimages" is the name of the webhook, and kubernetes.io is the name - // of the organization. - // Required - optional string name = 1; - - // Rules describes what resources/subresources the initializer cares about. - // The initializer cares about an operation if it matches _any_ Rule. - // Rule.Resources must not include subresources. - repeated Rule rules = 2; -} - -// InitializerConfiguration describes the configuration of initializers. -message InitializerConfiguration { - // Standard object metadata; More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata. - // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; - - // Initializers is a list of resources and their default initializers - // Order-sensitive. - // When merging multiple InitializerConfigurations, we sort the initializers - // from different InitializerConfigurations by the name of the - // InitializerConfigurations; the order of the initializers from the same - // InitializerConfiguration is preserved. - // +patchMergeKey=name - // +patchStrategy=merge - // +optional - repeated Initializer initializers = 2; -} - -// InitializerConfigurationList is a list of InitializerConfiguration. -message InitializerConfigurationList { - // Standard list metadata. - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds - // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; - - // List of InitializerConfiguration. - repeated InitializerConfiguration items = 2; -} - -// Rule is a tuple of APIGroups, APIVersion, and Resources.It is recommended -// to make sure that all the tuple expansions are valid. -message Rule { - // APIGroups is the API groups the resources belong to. '*' is all groups. - // If '*' is present, the length of the slice must be one. - // Required. - repeated string apiGroups = 1; - - // APIVersions is the API versions the resources belong to. '*' is all versions. - // If '*' is present, the length of the slice must be one. - // Required. - repeated string apiVersions = 2; - - // Resources is a list of resources this rule applies to. - // - // For example: - // 'pods' means pods. - // 'pods/log' means the log subresource of pods. - // '*' means all resources, but not subresources. - // 'pods/*' means all subresources of pods. - // '*/scale' means all scale subresources. - // '*/*' means all resources and their subresources. - // - // If wildcard is present, the validation rule will ensure resources do not - // overlap with each other. - // - // Depending on the enclosing object, subresources might not be allowed. - // Required. - repeated string resources = 3; -} - diff --git a/vendor/k8s.io/api/admissionregistration/v1alpha1/types.go b/vendor/k8s.io/api/admissionregistration/v1alpha1/types.go deleted file mode 100644 index a245f1e858..0000000000 --- a/vendor/k8s.io/api/admissionregistration/v1alpha1/types.go +++ /dev/null @@ -1,106 +0,0 @@ -/* -Copyright 2017 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package v1alpha1 - -import ( - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" -) - -// +genclient -// +genclient:nonNamespaced -// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object - -// InitializerConfiguration describes the configuration of initializers. -type InitializerConfiguration struct { - metav1.TypeMeta `json:",inline"` - // Standard object metadata; More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata. - // +optional - metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` - - // Initializers is a list of resources and their default initializers - // Order-sensitive. - // When merging multiple InitializerConfigurations, we sort the initializers - // from different InitializerConfigurations by the name of the - // InitializerConfigurations; the order of the initializers from the same - // InitializerConfiguration is preserved. - // +patchMergeKey=name - // +patchStrategy=merge - // +optional - Initializers []Initializer `json:"initializers,omitempty" patchStrategy:"merge" patchMergeKey:"name" protobuf:"bytes,2,rep,name=initializers"` -} - -// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object - -// InitializerConfigurationList is a list of InitializerConfiguration. -type InitializerConfigurationList struct { - metav1.TypeMeta `json:",inline"` - // Standard list metadata. - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds - // +optional - metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` - - // List of InitializerConfiguration. - Items []InitializerConfiguration `json:"items" protobuf:"bytes,2,rep,name=items"` -} - -// Initializer describes the name and the failure policy of an initializer, and -// what resources it applies to. -type Initializer struct { - // Name is the identifier of the initializer. It will be added to the - // object that needs to be initialized. - // Name should be fully qualified, e.g., alwayspullimages.kubernetes.io, where - // "alwayspullimages" is the name of the webhook, and kubernetes.io is the name - // of the organization. - // Required - Name string `json:"name" protobuf:"bytes,1,opt,name=name"` - - // Rules describes what resources/subresources the initializer cares about. - // The initializer cares about an operation if it matches _any_ Rule. - // Rule.Resources must not include subresources. - Rules []Rule `json:"rules,omitempty" protobuf:"bytes,2,rep,name=rules"` -} - -// Rule is a tuple of APIGroups, APIVersion, and Resources.It is recommended -// to make sure that all the tuple expansions are valid. -type Rule struct { - // APIGroups is the API groups the resources belong to. '*' is all groups. - // If '*' is present, the length of the slice must be one. - // Required. - APIGroups []string `json:"apiGroups,omitempty" protobuf:"bytes,1,rep,name=apiGroups"` - - // APIVersions is the API versions the resources belong to. '*' is all versions. - // If '*' is present, the length of the slice must be one. - // Required. - APIVersions []string `json:"apiVersions,omitempty" protobuf:"bytes,2,rep,name=apiVersions"` - - // Resources is a list of resources this rule applies to. - // - // For example: - // 'pods' means pods. - // 'pods/log' means the log subresource of pods. - // '*' means all resources, but not subresources. - // 'pods/*' means all subresources of pods. - // '*/scale' means all scale subresources. - // '*/*' means all resources and their subresources. - // - // If wildcard is present, the validation rule will ensure resources do not - // overlap with each other. - // - // Depending on the enclosing object, subresources might not be allowed. - // Required. - Resources []string `json:"resources,omitempty" protobuf:"bytes,3,rep,name=resources"` -} diff --git a/vendor/k8s.io/api/admissionregistration/v1alpha1/types_swagger_doc_generated.go b/vendor/k8s.io/api/admissionregistration/v1alpha1/types_swagger_doc_generated.go deleted file mode 100644 index 69e4b7c64a..0000000000 --- a/vendor/k8s.io/api/admissionregistration/v1alpha1/types_swagger_doc_generated.go +++ /dev/null @@ -1,71 +0,0 @@ -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package v1alpha1 - -// This file contains a collection of methods that can be used from go-restful to -// generate Swagger API documentation for its models. Please read this PR for more -// information on the implementation: https://github.com/emicklei/go-restful/pull/215 -// -// TODOs are ignored from the parser (e.g. TODO(andronat):... || TODO:...) if and only if -// they are on one line! For multiple line or blocks that you want to ignore use ---. -// Any context after a --- is ignored. -// -// Those methods can be generated by using hack/update-generated-swagger-docs.sh - -// AUTO-GENERATED FUNCTIONS START HERE. DO NOT EDIT. -var map_Initializer = map[string]string{ - "": "Initializer describes the name and the failure policy of an initializer, and what resources it applies to.", - "name": "Name is the identifier of the initializer. It will be added to the object that needs to be initialized. Name should be fully qualified, e.g., alwayspullimages.kubernetes.io, where \"alwayspullimages\" is the name of the webhook, and kubernetes.io is the name of the organization. Required", - "rules": "Rules describes what resources/subresources the initializer cares about. The initializer cares about an operation if it matches _any_ Rule. Rule.Resources must not include subresources.", -} - -func (Initializer) SwaggerDoc() map[string]string { - return map_Initializer -} - -var map_InitializerConfiguration = map[string]string{ - "": "InitializerConfiguration describes the configuration of initializers.", - "metadata": "Standard object metadata; More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata.", - "initializers": "Initializers is a list of resources and their default initializers Order-sensitive. When merging multiple InitializerConfigurations, we sort the initializers from different InitializerConfigurations by the name of the InitializerConfigurations; the order of the initializers from the same InitializerConfiguration is preserved.", -} - -func (InitializerConfiguration) SwaggerDoc() map[string]string { - return map_InitializerConfiguration -} - -var map_InitializerConfigurationList = map[string]string{ - "": "InitializerConfigurationList is a list of InitializerConfiguration.", - "metadata": "Standard list metadata. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds", - "items": "List of InitializerConfiguration.", -} - -func (InitializerConfigurationList) SwaggerDoc() map[string]string { - return map_InitializerConfigurationList -} - -var map_Rule = map[string]string{ - "": "Rule is a tuple of APIGroups, APIVersion, and Resources.It is recommended to make sure that all the tuple expansions are valid.", - "apiGroups": "APIGroups is the API groups the resources belong to. '*' is all groups. If '*' is present, the length of the slice must be one. Required.", - "apiVersions": "APIVersions is the API versions the resources belong to. '*' is all versions. If '*' is present, the length of the slice must be one. Required.", - "resources": "Resources is a list of resources this rule applies to.\n\nFor example: 'pods' means pods. 'pods/log' means the log subresource of pods. '*' means all resources, but not subresources. 'pods/*' means all subresources of pods. '*/scale' means all scale subresources. '*/*' means all resources and their subresources.\n\nIf wildcard is present, the validation rule will ensure resources do not overlap with each other.\n\nDepending on the enclosing object, subresources might not be allowed. Required.", -} - -func (Rule) SwaggerDoc() map[string]string { - return map_Rule -} - -// AUTO-GENERATED FUNCTIONS END HERE diff --git a/vendor/k8s.io/api/admissionregistration/v1beta1/doc.go b/vendor/k8s.io/api/admissionregistration/v1beta1/doc.go index 2b29efaca4..0a40726fae 100644 --- a/vendor/k8s.io/api/admissionregistration/v1beta1/doc.go +++ b/vendor/k8s.io/api/admissionregistration/v1beta1/doc.go @@ -15,11 +15,12 @@ limitations under the License. */ // +k8s:deepcopy-gen=package +// +k8s:protobuf-gen=package // +k8s:openapi-gen=true // +groupName=admissionregistration.k8s.io // Package v1beta1 is the v1beta1 version of the API. // AdmissionConfiguration and AdmissionPluginConfiguration are legacy static admission plugin configuration -// InitializerConfiguration and validatingWebhookConfiguration is for the +// MutatingWebhookConfiguration and ValidatingWebhookConfiguration are for the // new dynamic admission controller configuration. package v1beta1 // import "k8s.io/api/admissionregistration/v1beta1" diff --git a/vendor/k8s.io/api/admissionregistration/v1beta1/generated.pb.go b/vendor/k8s.io/api/admissionregistration/v1beta1/generated.pb.go index 2ca3fa6524..1114e29a16 100644 --- a/vendor/k8s.io/api/admissionregistration/v1beta1/generated.pb.go +++ b/vendor/k8s.io/api/admissionregistration/v1beta1/generated.pb.go @@ -249,6 +249,12 @@ func (m *Rule) MarshalTo(dAtA []byte) (int, error) { i += copy(dAtA[i:], s) } } + if m.Scope != nil { + dAtA[i] = 0x22 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(*m.Scope))) + i += copy(dAtA[i:], *m.Scope) + } return i, nil } @@ -462,6 +468,26 @@ func (m *Webhook) MarshalTo(dAtA []byte) (int, error) { i = encodeVarintGenerated(dAtA, i, uint64(len(*m.SideEffects))) i += copy(dAtA[i:], *m.SideEffects) } + if m.TimeoutSeconds != nil { + dAtA[i] = 0x38 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(*m.TimeoutSeconds)) + } + if len(m.AdmissionReviewVersions) > 0 { + for _, s := range m.AdmissionReviewVersions { + dAtA[i] = 0x42 + i++ + l = len(s) + for l >= 1<<7 { + dAtA[i] = uint8(uint64(l)&0x7f | 0x80) + l >>= 7 + i++ + } + dAtA[i] = uint8(l) + i++ + i += copy(dAtA[i:], s) + } + } return i, nil } @@ -563,6 +589,10 @@ func (m *Rule) Size() (n int) { n += 1 + l + sovGenerated(uint64(l)) } } + if m.Scope != nil { + l = len(*m.Scope) + n += 1 + l + sovGenerated(uint64(l)) + } return n } @@ -647,6 +677,15 @@ func (m *Webhook) Size() (n int) { l = len(*m.SideEffects) n += 1 + l + sovGenerated(uint64(l)) } + if m.TimeoutSeconds != nil { + n += 1 + sovGenerated(uint64(*m.TimeoutSeconds)) + } + if len(m.AdmissionReviewVersions) > 0 { + for _, s := range m.AdmissionReviewVersions { + l = len(s) + n += 1 + l + sovGenerated(uint64(l)) + } + } return n } @@ -711,6 +750,7 @@ func (this *Rule) String() string { `APIGroups:` + fmt.Sprintf("%v", this.APIGroups) + `,`, `APIVersions:` + fmt.Sprintf("%v", this.APIVersions) + `,`, `Resources:` + fmt.Sprintf("%v", this.Resources) + `,`, + `Scope:` + valueToStringGenerated(this.Scope) + `,`, `}`, }, "") return s @@ -771,6 +811,8 @@ func (this *Webhook) String() string { `FailurePolicy:` + valueToStringGenerated(this.FailurePolicy) + `,`, `NamespaceSelector:` + strings.Replace(fmt.Sprintf("%v", this.NamespaceSelector), "LabelSelector", "k8s_io_apimachinery_pkg_apis_meta_v1.LabelSelector", 1) + `,`, `SideEffects:` + valueToStringGenerated(this.SideEffects) + `,`, + `TimeoutSeconds:` + valueToStringGenerated(this.TimeoutSeconds) + `,`, + `AdmissionReviewVersions:` + fmt.Sprintf("%v", this.AdmissionReviewVersions) + `,`, `}`, }, "") return s @@ -1133,6 +1175,36 @@ func (m *Rule) Unmarshal(dAtA []byte) error { } m.Resources = append(m.Resources, string(dAtA[iNdEx:postIndex])) iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Scope", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + s := ScopeType(dAtA[iNdEx:postIndex]) + m.Scope = &s + iNdEx = postIndex default: iNdEx = preIndex skippy, err := skipGenerated(dAtA[iNdEx:]) @@ -1835,6 +1907,55 @@ func (m *Webhook) Unmarshal(dAtA []byte) error { s := SideEffectClass(dAtA[iNdEx:postIndex]) m.SideEffects = &s iNdEx = postIndex + case 7: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field TimeoutSeconds", wireType) + } + var v int32 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= (int32(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + m.TimeoutSeconds = &v + case 8: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field AdmissionReviewVersions", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.AdmissionReviewVersions = append(m.AdmissionReviewVersions, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex default: iNdEx = preIndex skippy, err := skipGenerated(dAtA[iNdEx:]) @@ -2110,62 +2231,67 @@ func init() { } var fileDescriptorGenerated = []byte{ - // 906 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xdc, 0x54, 0xcf, 0x6f, 0xe3, 0x44, - 0x14, 0x8e, 0x37, 0x29, 0x49, 0x26, 0x89, 0x76, 0x3b, 0x80, 0x14, 0xaa, 0x95, 0x1d, 0xe5, 0x80, - 0x22, 0xa1, 0xb5, 0x49, 0x41, 0x08, 0x21, 0x10, 0xaa, 0x0b, 0x0b, 0x95, 0xba, 0xbb, 0x61, 0x0a, - 0xbb, 0x12, 0xe2, 0xc0, 0xc4, 0x79, 0x49, 0x86, 0xf8, 0x97, 0x66, 0xc6, 0x59, 0x7a, 0x43, 0xe2, - 0x1f, 0x40, 0x42, 0xfc, 0x0d, 0xfc, 0x15, 0xdc, 0x7b, 0xdc, 0x0b, 0x62, 0x4f, 0x16, 0x35, 0x67, - 0x0e, 0x5c, 0x7b, 0x42, 0x63, 0x3b, 0x71, 0xd2, 0x6c, 0xbb, 0xe9, 0x85, 0x03, 0x37, 0xcf, 0xf7, - 0xe6, 0xfb, 0xde, 0xfb, 0x9e, 0xdf, 0x1b, 0xf4, 0xc5, 0xec, 0x7d, 0x61, 0xb2, 0xc0, 0x9a, 0x45, - 0x43, 0xe0, 0x3e, 0x48, 0x10, 0xd6, 0x1c, 0xfc, 0x51, 0xc0, 0xad, 0x3c, 0x40, 0x43, 0x66, 0xd1, - 0x91, 0xc7, 0x84, 0x60, 0x81, 0xcf, 0x61, 0xc2, 0x84, 0xe4, 0x54, 0xb2, 0xc0, 0xb7, 0xe6, 0xfd, - 0x21, 0x48, 0xda, 0xb7, 0x26, 0xe0, 0x03, 0xa7, 0x12, 0x46, 0x66, 0xc8, 0x03, 0x19, 0xe0, 0x5e, - 0xc6, 0x34, 0x69, 0xc8, 0xcc, 0x17, 0x32, 0xcd, 0x9c, 0xb9, 0x77, 0x6f, 0xc2, 0xe4, 0x34, 0x1a, - 0x9a, 0x4e, 0xe0, 0x59, 0x93, 0x60, 0x12, 0x58, 0xa9, 0xc0, 0x30, 0x1a, 0xa7, 0xa7, 0xf4, 0x90, - 0x7e, 0x65, 0xc2, 0x7b, 0xef, 0x16, 0x25, 0x79, 0xd4, 0x99, 0x32, 0x1f, 0xf8, 0xa9, 0x15, 0xce, - 0x26, 0x0a, 0x10, 0x96, 0x07, 0x92, 0x5a, 0xf3, 0x8d, 0x72, 0xf6, 0xac, 0xab, 0x58, 0x3c, 0xf2, - 0x25, 0xf3, 0x60, 0x83, 0xf0, 0xde, 0xcb, 0x08, 0xc2, 0x99, 0x82, 0x47, 0x2f, 0xf3, 0xba, 0xbf, - 0x6b, 0xe8, 0xee, 0x83, 0x48, 0x52, 0xc9, 0xfc, 0xc9, 0x13, 0x18, 0x4e, 0x83, 0x60, 0x76, 0x18, - 0xf8, 0x63, 0x36, 0x89, 0x32, 0xdb, 0xf8, 0x5b, 0x54, 0x53, 0x45, 0x8e, 0xa8, 0xa4, 0x6d, 0xad, - 0xa3, 0xf5, 0x1a, 0xfb, 0x6f, 0x9b, 0x45, 0xaf, 0x96, 0xb9, 0xcc, 0x70, 0x36, 0x51, 0x80, 0x30, - 0xd5, 0x6d, 0x73, 0xde, 0x37, 0x1f, 0x0d, 0xbf, 0x03, 0x47, 0x3e, 0x00, 0x49, 0x6d, 0x7c, 0x16, - 0x1b, 0xa5, 0x24, 0x36, 0x50, 0x81, 0x91, 0xa5, 0x2a, 0x3e, 0x41, 0xb5, 0x3c, 0xb3, 0x68, 0xdf, - 0xea, 0x94, 0x7b, 0x8d, 0xfd, 0xbe, 0xb9, 0xed, 0xdf, 0x30, 0x73, 0xa6, 0x5d, 0x51, 0x29, 0x48, - 0xed, 0x69, 0x2e, 0xd4, 0xfd, 0x5b, 0x43, 0x9d, 0xeb, 0x7c, 0x1d, 0x33, 0x21, 0xf1, 0x37, 0x1b, - 0xde, 0xcc, 0xed, 0xbc, 0x29, 0x76, 0xea, 0xec, 0x4e, 0xee, 0xac, 0xb6, 0x40, 0x56, 0x7c, 0xcd, - 0xd0, 0x0e, 0x93, 0xe0, 0x2d, 0x4c, 0xdd, 0xdf, 0xde, 0xd4, 0x75, 0x85, 0xdb, 0xad, 0x3c, 0xe5, - 0xce, 0x91, 0x12, 0x27, 0x59, 0x8e, 0xee, 0xcf, 0x1a, 0xaa, 0x90, 0xc8, 0x05, 0xfc, 0x16, 0xaa, - 0xd3, 0x90, 0x7d, 0xc6, 0x83, 0x28, 0x14, 0x6d, 0xad, 0x53, 0xee, 0xd5, 0xed, 0x56, 0x12, 0x1b, - 0xf5, 0x83, 0xc1, 0x51, 0x06, 0x92, 0x22, 0x8e, 0xfb, 0xa8, 0x41, 0x43, 0xf6, 0x18, 0xb8, 0x2a, - 0x25, 0x2b, 0xb4, 0x6e, 0xdf, 0x4e, 0x62, 0xa3, 0x71, 0x30, 0x38, 0x5a, 0xc0, 0x64, 0xf5, 0x8e, - 0xd2, 0xe7, 0x20, 0x82, 0x88, 0x3b, 0x20, 0xda, 0xe5, 0x42, 0x9f, 0x2c, 0x40, 0x52, 0xc4, 0xbb, - 0xbf, 0x6a, 0x08, 0xab, 0xaa, 0x9e, 0x30, 0x39, 0x7d, 0x14, 0x42, 0xe6, 0x40, 0xe0, 0x8f, 0x11, - 0x0a, 0x96, 0xa7, 0xbc, 0x48, 0x23, 0x9d, 0x8f, 0x25, 0x7a, 0x11, 0x1b, 0xad, 0xe5, 0xe9, 0xcb, - 0xd3, 0x10, 0xc8, 0x0a, 0x05, 0x0f, 0x50, 0x85, 0x47, 0x2e, 0xb4, 0x6f, 0x6d, 0xfc, 0xb4, 0x97, - 0x74, 0x56, 0x15, 0x63, 0x37, 0xf3, 0x0e, 0xa6, 0x0d, 0x23, 0xa9, 0x52, 0xf7, 0x47, 0x0d, 0xdd, - 0x39, 0x01, 0x3e, 0x67, 0x0e, 0x10, 0x18, 0x03, 0x07, 0xdf, 0x01, 0x6c, 0xa1, 0xba, 0x4f, 0x3d, - 0x10, 0x21, 0x75, 0x20, 0x1d, 0x90, 0xba, 0xbd, 0x9b, 0x73, 0xeb, 0x0f, 0x17, 0x01, 0x52, 0xdc, - 0xc1, 0x1d, 0x54, 0x51, 0x87, 0xb4, 0xae, 0x7a, 0x91, 0x47, 0xdd, 0x25, 0x69, 0x04, 0xdf, 0x45, - 0x95, 0x90, 0xca, 0x69, 0xbb, 0x9c, 0xde, 0xa8, 0xa9, 0xe8, 0x80, 0xca, 0x29, 0x49, 0xd1, 0xee, - 0x1f, 0x1a, 0xd2, 0x1f, 0x53, 0x97, 0x8d, 0xfe, 0x77, 0xfb, 0xf8, 0x8f, 0x86, 0xba, 0xd7, 0x3b, - 0xfb, 0x0f, 0x36, 0xd2, 0x5b, 0xdf, 0xc8, 0xcf, 0xb7, 0xb7, 0x75, 0x7d, 0xe9, 0x57, 0xec, 0xe4, - 0x2f, 0x15, 0x54, 0xcd, 0xaf, 0x2f, 0x27, 0x43, 0xbb, 0x72, 0x32, 0x9e, 0xa2, 0xa6, 0xe3, 0x32, - 0xf0, 0x65, 0x26, 0x9d, 0xcf, 0xf6, 0x47, 0x37, 0x6e, 0xfd, 0xe1, 0x8a, 0x88, 0xfd, 0x5a, 0x9e, - 0xa8, 0xb9, 0x8a, 0x92, 0xb5, 0x44, 0x98, 0xa2, 0x1d, 0xb5, 0x02, 0xd9, 0x36, 0x37, 0xf6, 0x3f, - 0xbc, 0xd9, 0x36, 0xad, 0xaf, 0x76, 0xd1, 0x09, 0x15, 0x13, 0x24, 0x53, 0xc6, 0xc7, 0xa8, 0x35, - 0xa6, 0xcc, 0x8d, 0x38, 0x0c, 0x02, 0x97, 0x39, 0xa7, 0xed, 0x4a, 0xda, 0x86, 0x37, 0x93, 0xd8, - 0x68, 0xdd, 0x5f, 0x0d, 0x5c, 0xc4, 0xc6, 0xee, 0x1a, 0x90, 0xae, 0xfe, 0x3a, 0x19, 0x7f, 0x8f, - 0x76, 0x97, 0x2b, 0x77, 0x02, 0x2e, 0x38, 0x32, 0xe0, 0xed, 0x9d, 0xb4, 0x5d, 0xef, 0x6c, 0x39, - 0x2d, 0x74, 0x08, 0xee, 0x82, 0x6a, 0xbf, 0x9e, 0xc4, 0xc6, 0xee, 0xc3, 0xcb, 0x8a, 0x64, 0x33, - 0x09, 0xfe, 0x04, 0x35, 0x04, 0x1b, 0xc1, 0xa7, 0xe3, 0x31, 0x38, 0x52, 0xb4, 0x5f, 0x49, 0x5d, - 0x74, 0xd5, 0x7b, 0x79, 0x52, 0xc0, 0x17, 0xb1, 0x71, 0xbb, 0x38, 0x1e, 0xba, 0x54, 0x08, 0xb2, - 0x4a, 0xeb, 0xfe, 0xa6, 0xa1, 0x57, 0x5f, 0xf0, 0xb3, 0x30, 0x45, 0x55, 0x91, 0x3d, 0x41, 0xf9, - 0xec, 0x7f, 0xb0, 0xfd, 0xaf, 0xb8, 0xfc, 0x76, 0xd9, 0x8d, 0x24, 0x36, 0xaa, 0x0b, 0x74, 0xa1, - 0x8b, 0x7b, 0xa8, 0xe6, 0x50, 0x3b, 0xf2, 0x47, 0xf9, 0xe3, 0xd9, 0xb4, 0x9b, 0x6a, 0x57, 0x0e, - 0x0f, 0x32, 0x8c, 0x2c, 0xa3, 0xf8, 0x0d, 0x54, 0x8e, 0xb8, 0x9b, 0xbf, 0x53, 0xd5, 0x24, 0x36, - 0xca, 0x5f, 0x91, 0x63, 0xa2, 0x30, 0xfb, 0xde, 0xd9, 0xb9, 0x5e, 0x7a, 0x76, 0xae, 0x97, 0x9e, - 0x9f, 0xeb, 0xa5, 0x1f, 0x12, 0x5d, 0x3b, 0x4b, 0x74, 0xed, 0x59, 0xa2, 0x6b, 0xcf, 0x13, 0x5d, - 0xfb, 0x33, 0xd1, 0xb5, 0x9f, 0xfe, 0xd2, 0x4b, 0x5f, 0x57, 0xf3, 0xd2, 0xfe, 0x0d, 0x00, 0x00, - 0xff, 0xff, 0x85, 0x06, 0x8c, 0x7f, 0xae, 0x09, 0x00, 0x00, + // 989 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xdc, 0x55, 0x4f, 0x6f, 0xe3, 0x44, + 0x14, 0xaf, 0x37, 0x09, 0x49, 0x26, 0xed, 0xee, 0x76, 0xf8, 0xb3, 0xa1, 0xac, 0xec, 0x28, 0x07, + 0x14, 0x09, 0xd6, 0xa6, 0x05, 0x21, 0xb4, 0x02, 0xa1, 0xba, 0xb0, 0x50, 0xa9, 0xbb, 0x5b, 0x26, + 0xfb, 0x47, 0x42, 0x1c, 0x98, 0x38, 0x2f, 0xc9, 0x10, 0xc7, 0x63, 0x79, 0xc6, 0x29, 0xbd, 0x21, + 0xf1, 0x05, 0xf8, 0x16, 0xf0, 0x25, 0x38, 0x70, 0xeb, 0x71, 0x2f, 0x88, 0x3d, 0x59, 0xd4, 0x9c, + 0x39, 0x70, 0xed, 0x09, 0x8d, 0xed, 0xd8, 0x49, 0xd3, 0x76, 0xb3, 0x17, 0x0e, 0xdc, 0x3c, 0xbf, + 0xf7, 0x7e, 0xef, 0xbd, 0xdf, 0xcc, 0x7b, 0xcf, 0xe8, 0xab, 0xf1, 0x47, 0xc2, 0x64, 0xdc, 0x1a, + 0x87, 0x3d, 0x08, 0x3c, 0x90, 0x20, 0xac, 0x29, 0x78, 0x7d, 0x1e, 0x58, 0x99, 0x81, 0xfa, 0xcc, + 0xa2, 0xfd, 0x09, 0x13, 0x82, 0x71, 0x2f, 0x80, 0x21, 0x13, 0x32, 0xa0, 0x92, 0x71, 0xcf, 0x9a, + 0x6e, 0xf7, 0x40, 0xd2, 0x6d, 0x6b, 0x08, 0x1e, 0x04, 0x54, 0x42, 0xdf, 0xf4, 0x03, 0x2e, 0x39, + 0xee, 0xa4, 0x4c, 0x93, 0xfa, 0xcc, 0xbc, 0x90, 0x69, 0x66, 0xcc, 0xad, 0x3b, 0x43, 0x26, 0x47, + 0x61, 0xcf, 0x74, 0xf8, 0xc4, 0x1a, 0xf2, 0x21, 0xb7, 0x92, 0x00, 0xbd, 0x70, 0x90, 0x9c, 0x92, + 0x43, 0xf2, 0x95, 0x06, 0xde, 0xfa, 0xa0, 0x28, 0x69, 0x42, 0x9d, 0x11, 0xf3, 0x20, 0x38, 0xb6, + 0xfc, 0xf1, 0x50, 0x01, 0xc2, 0x9a, 0x80, 0xa4, 0xd6, 0x74, 0xa9, 0x9c, 0x2d, 0xeb, 0x32, 0x56, + 0x10, 0x7a, 0x92, 0x4d, 0x60, 0x89, 0xf0, 0xe1, 0x8b, 0x08, 0xc2, 0x19, 0xc1, 0x84, 0x9e, 0xe7, + 0xb5, 0x7f, 0xd7, 0xd0, 0xed, 0xfb, 0xa1, 0xa4, 0x92, 0x79, 0xc3, 0xa7, 0xd0, 0x1b, 0x71, 0x3e, + 0xde, 0xe3, 0xde, 0x80, 0x0d, 0xc3, 0x54, 0x36, 0xfe, 0x16, 0xd5, 0x54, 0x91, 0x7d, 0x2a, 0x69, + 0x53, 0x6b, 0x69, 0x9d, 0xc6, 0xce, 0x7b, 0x66, 0x71, 0x57, 0x79, 0x2e, 0xd3, 0x1f, 0x0f, 0x15, + 0x20, 0x4c, 0xe5, 0x6d, 0x4e, 0xb7, 0xcd, 0x87, 0xbd, 0xef, 0xc0, 0x91, 0xf7, 0x41, 0x52, 0x1b, + 0x9f, 0x44, 0xc6, 0x5a, 0x1c, 0x19, 0xa8, 0xc0, 0x48, 0x1e, 0x15, 0x77, 0x51, 0x2d, 0xcb, 0x2c, + 0x9a, 0xd7, 0x5a, 0xa5, 0x4e, 0x63, 0x67, 0xdb, 0x5c, 0xf5, 0x35, 0xcc, 0x8c, 0x69, 0x97, 0x55, + 0x0a, 0x52, 0x3b, 0xca, 0x02, 0xb5, 0xff, 0xd6, 0x50, 0xeb, 0x2a, 0x5d, 0x07, 0x4c, 0x48, 0xfc, + 0xcd, 0x92, 0x36, 0x73, 0x35, 0x6d, 0x8a, 0x9d, 0x28, 0xbb, 0x99, 0x29, 0xab, 0xcd, 0x90, 0x39, + 0x5d, 0x63, 0x54, 0x61, 0x12, 0x26, 0x33, 0x51, 0xf7, 0x56, 0x17, 0x75, 0x55, 0xe1, 0xf6, 0x46, + 0x96, 0xb2, 0xb2, 0xaf, 0x82, 0x93, 0x34, 0x47, 0xfb, 0x37, 0x0d, 0x95, 0x49, 0xe8, 0x02, 0x7e, + 0x07, 0xd5, 0xa9, 0xcf, 0xbe, 0x08, 0x78, 0xe8, 0x8b, 0xa6, 0xd6, 0x2a, 0x75, 0xea, 0xf6, 0x46, + 0x1c, 0x19, 0xf5, 0xdd, 0xc3, 0xfd, 0x14, 0x24, 0x85, 0x1d, 0x6f, 0xa3, 0x06, 0xf5, 0xd9, 0x13, + 0x08, 0x54, 0x29, 0x69, 0xa1, 0x75, 0xfb, 0x46, 0x1c, 0x19, 0x8d, 0xdd, 0xc3, 0xfd, 0x19, 0x4c, + 0xe6, 0x7d, 0x54, 0xfc, 0x00, 0x04, 0x0f, 0x03, 0x07, 0x44, 0xb3, 0x54, 0xc4, 0x27, 0x33, 0x90, + 0x14, 0x76, 0xfc, 0x2e, 0xaa, 0x08, 0x87, 0xfb, 0xd0, 0x2c, 0xb7, 0xb4, 0x4e, 0xdd, 0x7e, 0x43, + 0x95, 0xdd, 0x55, 0xc0, 0x59, 0x64, 0xd4, 0x93, 0x8f, 0x47, 0xc7, 0x3e, 0x90, 0xd4, 0xa9, 0xfd, + 0xb3, 0x86, 0xb0, 0xd2, 0xf0, 0x94, 0xc9, 0xd1, 0x43, 0x1f, 0x52, 0xbd, 0x02, 0x7f, 0x8a, 0x10, + 0xcf, 0x4f, 0x99, 0x24, 0x23, 0xe9, 0xa6, 0x1c, 0x3d, 0x8b, 0x8c, 0x8d, 0xfc, 0x94, 0x84, 0x9c, + 0xa3, 0xe0, 0x43, 0x54, 0x0e, 0x42, 0x17, 0x9a, 0xd7, 0x96, 0x9e, 0xf8, 0x05, 0xef, 0xa0, 0x8a, + 0xb1, 0xd7, 0xb3, 0xfb, 0x4e, 0xae, 0x97, 0x24, 0x91, 0xda, 0x3f, 0x6a, 0xe8, 0x66, 0x17, 0x82, + 0x29, 0x73, 0x80, 0xc0, 0x00, 0x02, 0xf0, 0x1c, 0xc0, 0x16, 0xaa, 0x7b, 0x74, 0x02, 0xc2, 0xa7, + 0x0e, 0x24, 0xed, 0x54, 0xb7, 0x37, 0x33, 0x6e, 0xfd, 0xc1, 0xcc, 0x40, 0x0a, 0x1f, 0xdc, 0x42, + 0x65, 0x75, 0x48, 0xea, 0xaa, 0x17, 0x79, 0x94, 0x2f, 0x49, 0x2c, 0xf8, 0x36, 0x2a, 0xfb, 0x54, + 0x8e, 0x9a, 0xa5, 0xc4, 0xa3, 0xa6, 0xac, 0x87, 0x54, 0x8e, 0x48, 0x82, 0xb6, 0xff, 0xd0, 0x90, + 0xfe, 0x84, 0xba, 0xac, 0xff, 0xbf, 0x9b, 0xde, 0x7f, 0x34, 0xd4, 0xbe, 0x5a, 0xd9, 0x7f, 0x30, + 0xbf, 0x93, 0xc5, 0xf9, 0xfd, 0x72, 0x75, 0x59, 0x57, 0x97, 0x7e, 0xc9, 0x04, 0xff, 0x52, 0x41, + 0xd5, 0xcc, 0x3d, 0xef, 0x0c, 0xed, 0xd2, 0xce, 0x38, 0x42, 0xeb, 0x8e, 0xcb, 0xc0, 0x93, 0x69, + 0xe8, 0xac, 0xb7, 0x3f, 0x79, 0xe9, 0xab, 0xdf, 0x9b, 0x0b, 0x62, 0xbf, 0x96, 0x25, 0x5a, 0x9f, + 0x47, 0xc9, 0x42, 0x22, 0x4c, 0x51, 0x45, 0x8d, 0x40, 0x3a, 0xfb, 0x8d, 0x9d, 0x8f, 0x5f, 0x6e, + 0x9a, 0x16, 0x47, 0xbb, 0xb8, 0x09, 0x65, 0x13, 0x24, 0x8d, 0x8c, 0x0f, 0xd0, 0xc6, 0x80, 0x32, + 0x37, 0x0c, 0xe0, 0x90, 0xbb, 0xcc, 0x39, 0xce, 0xb6, 0xc7, 0xdb, 0x71, 0x64, 0x6c, 0xdc, 0x9b, + 0x37, 0x9c, 0x45, 0xc6, 0xe6, 0x02, 0x90, 0x8c, 0xfe, 0x22, 0x19, 0x7f, 0x8f, 0x36, 0xf3, 0x91, + 0xeb, 0x82, 0x0b, 0x8e, 0xe4, 0x41, 0xb3, 0x92, 0x5c, 0xd7, 0xfb, 0x2b, 0x76, 0x0b, 0xed, 0x81, + 0x3b, 0xa3, 0xda, 0xaf, 0xc7, 0x91, 0xb1, 0xf9, 0xe0, 0x7c, 0x44, 0xb2, 0x9c, 0x04, 0x7f, 0x86, + 0x1a, 0x82, 0xf5, 0xe1, 0xf3, 0xc1, 0x00, 0x1c, 0x29, 0x9a, 0xaf, 0x24, 0x2a, 0xda, 0x6a, 0xbb, + 0x76, 0x0b, 0xf8, 0x2c, 0x32, 0x6e, 0x14, 0xc7, 0x3d, 0x97, 0x0a, 0x41, 0xe6, 0x69, 0xf8, 0x2e, + 0xba, 0xae, 0x7e, 0xe0, 0x3c, 0x94, 0x5d, 0x70, 0xb8, 0xd7, 0x17, 0xcd, 0x6a, 0x4b, 0xeb, 0x54, + 0x6c, 0x1c, 0x47, 0xc6, 0xf5, 0x47, 0x0b, 0x16, 0x72, 0xce, 0x13, 0x3f, 0x46, 0xb7, 0xf2, 0x37, + 0x21, 0x30, 0x65, 0x70, 0x94, 0xef, 0xfa, 0x5a, 0xb2, 0x47, 0xdf, 0x8a, 0x23, 0xe3, 0xd6, 0xee, + 0xc5, 0x2e, 0xe4, 0x32, 0x6e, 0xfb, 0x57, 0x0d, 0xbd, 0x7a, 0x41, 0xff, 0x60, 0x8a, 0xaa, 0x22, + 0xdd, 0x8a, 0xd9, 0x38, 0xde, 0x5d, 0xbd, 0x3b, 0xce, 0xaf, 0x53, 0xbb, 0x11, 0x47, 0x46, 0x75, + 0x86, 0xce, 0xe2, 0xe2, 0x0e, 0xaa, 0x39, 0xd4, 0x0e, 0xbd, 0x7e, 0xb6, 0xcf, 0xd7, 0xed, 0x75, + 0x35, 0xbe, 0x7b, 0xbb, 0x29, 0x46, 0x72, 0x2b, 0x7e, 0x13, 0x95, 0xc2, 0xc0, 0xcd, 0x56, 0x67, + 0x35, 0x8e, 0x8c, 0xd2, 0x63, 0x72, 0x40, 0x14, 0x66, 0xdf, 0x39, 0x39, 0xd5, 0xd7, 0x9e, 0x9d, + 0xea, 0x6b, 0xcf, 0x4f, 0xf5, 0xb5, 0x1f, 0x62, 0x5d, 0x3b, 0x89, 0x75, 0xed, 0x59, 0xac, 0x6b, + 0xcf, 0x63, 0x5d, 0xfb, 0x33, 0xd6, 0xb5, 0x9f, 0xfe, 0xd2, 0xd7, 0xbe, 0xae, 0x66, 0xa5, 0xfd, + 0x1b, 0x00, 0x00, 0xff, 0xff, 0xbb, 0xc0, 0x7c, 0xc4, 0x6f, 0x0a, 0x00, 0x00, } diff --git a/vendor/k8s.io/api/admissionregistration/v1beta1/generated.proto b/vendor/k8s.io/api/admissionregistration/v1beta1/generated.proto index 1c40ae530d..a5fa0f30d6 100644 --- a/vendor/k8s.io/api/admissionregistration/v1beta1/generated.proto +++ b/vendor/k8s.io/api/admissionregistration/v1beta1/generated.proto @@ -81,6 +81,18 @@ message Rule { // Depending on the enclosing object, subresources might not be allowed. // Required. repeated string resources = 3; + + // scope specifies the scope of this rule. + // Valid values are "Cluster", "Namespaced", and "*" + // "Cluster" means that only cluster-scoped resources will match this rule. + // Namespace API objects are cluster-scoped. + // "Namespaced" means that only namespaced resources will match this rule. + // "*" means that there are no scope restrictions. + // Subresources match the scope of their parent resource. + // Default is "*". + // + // +optional + optional string scope = 4; } // RuleWithOperations is a tuple of Operations and Resources. It is recommended to make @@ -217,6 +229,25 @@ message Webhook { // sideEffects == Unknown or Some. Defaults to Unknown. // +optional optional string sideEffects = 6; + + // TimeoutSeconds specifies the timeout for this webhook. After the timeout passes, + // the webhook call will be ignored or the API call will fail based on the + // failure policy. + // The timeout value must be between 1 and 30 seconds. + // Default to 30 seconds. + // +optional + optional int32 timeoutSeconds = 7; + + // AdmissionReviewVersions is an ordered list of preferred `AdmissionReview` + // versions the Webhook expects. API server will try to use first version in + // the list which it supports. If none of the versions specified in this list + // supported by API server, validation will fail for this object. + // If a persisted webhook configuration specifies allowed versions and does not + // include any versions known to the API Server, calls to the webhook will fail + // and be subject to the failure policy. + // Default to `['v1beta1']`. + // +optional + repeated string admissionReviewVersions = 8; } // WebhookClientConfig contains the information to make a TLS diff --git a/vendor/k8s.io/api/admissionregistration/v1beta1/types.go b/vendor/k8s.io/api/admissionregistration/v1beta1/types.go index 49d94ec0eb..f7025f6080 100644 --- a/vendor/k8s.io/api/admissionregistration/v1beta1/types.go +++ b/vendor/k8s.io/api/admissionregistration/v1beta1/types.go @@ -49,8 +49,32 @@ type Rule struct { // Depending on the enclosing object, subresources might not be allowed. // Required. Resources []string `json:"resources,omitempty" protobuf:"bytes,3,rep,name=resources"` + + // scope specifies the scope of this rule. + // Valid values are "Cluster", "Namespaced", and "*" + // "Cluster" means that only cluster-scoped resources will match this rule. + // Namespace API objects are cluster-scoped. + // "Namespaced" means that only namespaced resources will match this rule. + // "*" means that there are no scope restrictions. + // Subresources match the scope of their parent resource. + // Default is "*". + // + // +optional + Scope *ScopeType `json:"scope,omitempty" protobuf:"bytes,4,rep,name=scope"` } +type ScopeType string + +const ( + // ClusterScope means that scope is limited to cluster-scoped objects. + // Namespace objects are cluster-scoped. + ClusterScope ScopeType = "Cluster" + // NamespacedScope means that scope is limited to namespaced objects. + NamespacedScope ScopeType = "Namespaced" + // AllScopes means that all scopes are included. + AllScopes ScopeType = "*" +) + type FailurePolicyType string const ( @@ -216,6 +240,25 @@ type Webhook struct { // sideEffects == Unknown or Some. Defaults to Unknown. // +optional SideEffects *SideEffectClass `json:"sideEffects,omitempty" protobuf:"bytes,6,opt,name=sideEffects,casttype=SideEffectClass"` + + // TimeoutSeconds specifies the timeout for this webhook. After the timeout passes, + // the webhook call will be ignored or the API call will fail based on the + // failure policy. + // The timeout value must be between 1 and 30 seconds. + // Default to 30 seconds. + // +optional + TimeoutSeconds *int32 `json:"timeoutSeconds,omitempty" protobuf:"varint,7,opt,name=timeoutSeconds"` + + // AdmissionReviewVersions is an ordered list of preferred `AdmissionReview` + // versions the Webhook expects. API server will try to use first version in + // the list which it supports. If none of the versions specified in this list + // supported by API server, validation will fail for this object. + // If a persisted webhook configuration specifies allowed versions and does not + // include any versions known to the API Server, calls to the webhook will fail + // and be subject to the failure policy. + // Default to `['v1beta1']`. + // +optional + AdmissionReviewVersions []string `json:"admissionReviewVersions,omitempty" protobuf:"bytes,8,rep,name=admissionReviewVersions"` } // RuleWithOperations is a tuple of Operations and Resources. It is recommended to make diff --git a/vendor/k8s.io/api/admissionregistration/v1beta1/types_swagger_doc_generated.go b/vendor/k8s.io/api/admissionregistration/v1beta1/types_swagger_doc_generated.go index e97628aab7..f400f9f425 100644 --- a/vendor/k8s.io/api/admissionregistration/v1beta1/types_swagger_doc_generated.go +++ b/vendor/k8s.io/api/admissionregistration/v1beta1/types_swagger_doc_generated.go @@ -52,6 +52,7 @@ var map_Rule = map[string]string{ "apiGroups": "APIGroups is the API groups the resources belong to. '*' is all groups. If '*' is present, the length of the slice must be one. Required.", "apiVersions": "APIVersions is the API versions the resources belong to. '*' is all versions. If '*' is present, the length of the slice must be one. Required.", "resources": "Resources is a list of resources this rule applies to.\n\nFor example: 'pods' means pods. 'pods/log' means the log subresource of pods. '*' means all resources, but not subresources. 'pods/*' means all subresources of pods. '*/scale' means all scale subresources. '*/*' means all resources and their subresources.\n\nIf wildcard is present, the validation rule will ensure resources do not overlap with each other.\n\nDepending on the enclosing object, subresources might not be allowed. Required.", + "scope": "scope specifies the scope of this rule. Valid values are \"Cluster\", \"Namespaced\", and \"*\" \"Cluster\" means that only cluster-scoped resources will match this rule. Namespace API objects are cluster-scoped. \"Namespaced\" means that only namespaced resources will match this rule. \"*\" means that there are no scope restrictions. Subresources match the scope of their parent resource. Default is \"*\".", } func (Rule) SwaggerDoc() map[string]string { @@ -99,13 +100,15 @@ func (ValidatingWebhookConfigurationList) SwaggerDoc() map[string]string { } var map_Webhook = map[string]string{ - "": "Webhook describes an admission webhook and the resources and operations it applies to.", - "name": "The name of the admission webhook. Name should be fully qualified, e.g., imagepolicy.kubernetes.io, where \"imagepolicy\" is the name of the webhook, and kubernetes.io is the name of the organization. Required.", - "clientConfig": "ClientConfig defines how to communicate with the hook. Required", - "rules": "Rules describes what operations on what resources/subresources the webhook cares about. The webhook cares about an operation if it matches _any_ Rule. However, in order to prevent ValidatingAdmissionWebhooks and MutatingAdmissionWebhooks from putting the cluster in a state which cannot be recovered from without completely disabling the plugin, ValidatingAdmissionWebhooks and MutatingAdmissionWebhooks are never called on admission requests for ValidatingWebhookConfiguration and MutatingWebhookConfiguration objects.", - "failurePolicy": "FailurePolicy defines how unrecognized errors from the admission endpoint are handled - allowed values are Ignore or Fail. Defaults to Ignore.", - "namespaceSelector": "NamespaceSelector decides whether to run the webhook on an object based on whether the namespace for that object matches the selector. If the object itself is a namespace, the matching is performed on object.metadata.labels. If the object is another cluster scoped resource, it never skips the webhook.\n\nFor example, to run the webhook on any objects whose namespace is not associated with \"runlevel\" of \"0\" or \"1\"; you will set the selector as follows: \"namespaceSelector\": {\n \"matchExpressions\": [\n {\n \"key\": \"runlevel\",\n \"operator\": \"NotIn\",\n \"values\": [\n \"0\",\n \"1\"\n ]\n }\n ]\n}\n\nIf instead you want to only run the webhook on any objects whose namespace is associated with the \"environment\" of \"prod\" or \"staging\"; you will set the selector as follows: \"namespaceSelector\": {\n \"matchExpressions\": [\n {\n \"key\": \"environment\",\n \"operator\": \"In\",\n \"values\": [\n \"prod\",\n \"staging\"\n ]\n }\n ]\n}\n\nSee https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/ for more examples of label selectors.\n\nDefault to the empty LabelSelector, which matches everything.", - "sideEffects": "SideEffects states whether this webhookk has side effects. Acceptable values are: Unknown, None, Some, NoneOnDryRun Webhooks with side effects MUST implement a reconciliation system, since a request may be rejected by a future step in the admission change and the side effects therefore need to be undone. Requests with the dryRun attribute will be auto-rejected if they match a webhook with sideEffects == Unknown or Some. Defaults to Unknown.", + "": "Webhook describes an admission webhook and the resources and operations it applies to.", + "name": "The name of the admission webhook. Name should be fully qualified, e.g., imagepolicy.kubernetes.io, where \"imagepolicy\" is the name of the webhook, and kubernetes.io is the name of the organization. Required.", + "clientConfig": "ClientConfig defines how to communicate with the hook. Required", + "rules": "Rules describes what operations on what resources/subresources the webhook cares about. The webhook cares about an operation if it matches _any_ Rule. However, in order to prevent ValidatingAdmissionWebhooks and MutatingAdmissionWebhooks from putting the cluster in a state which cannot be recovered from without completely disabling the plugin, ValidatingAdmissionWebhooks and MutatingAdmissionWebhooks are never called on admission requests for ValidatingWebhookConfiguration and MutatingWebhookConfiguration objects.", + "failurePolicy": "FailurePolicy defines how unrecognized errors from the admission endpoint are handled - allowed values are Ignore or Fail. Defaults to Ignore.", + "namespaceSelector": "NamespaceSelector decides whether to run the webhook on an object based on whether the namespace for that object matches the selector. If the object itself is a namespace, the matching is performed on object.metadata.labels. If the object is another cluster scoped resource, it never skips the webhook.\n\nFor example, to run the webhook on any objects whose namespace is not associated with \"runlevel\" of \"0\" or \"1\"; you will set the selector as follows: \"namespaceSelector\": {\n \"matchExpressions\": [\n {\n \"key\": \"runlevel\",\n \"operator\": \"NotIn\",\n \"values\": [\n \"0\",\n \"1\"\n ]\n }\n ]\n}\n\nIf instead you want to only run the webhook on any objects whose namespace is associated with the \"environment\" of \"prod\" or \"staging\"; you will set the selector as follows: \"namespaceSelector\": {\n \"matchExpressions\": [\n {\n \"key\": \"environment\",\n \"operator\": \"In\",\n \"values\": [\n \"prod\",\n \"staging\"\n ]\n }\n ]\n}\n\nSee https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/ for more examples of label selectors.\n\nDefault to the empty LabelSelector, which matches everything.", + "sideEffects": "SideEffects states whether this webhookk has side effects. Acceptable values are: Unknown, None, Some, NoneOnDryRun Webhooks with side effects MUST implement a reconciliation system, since a request may be rejected by a future step in the admission change and the side effects therefore need to be undone. Requests with the dryRun attribute will be auto-rejected if they match a webhook with sideEffects == Unknown or Some. Defaults to Unknown.", + "timeoutSeconds": "TimeoutSeconds specifies the timeout for this webhook. After the timeout passes, the webhook call will be ignored or the API call will fail based on the failure policy. The timeout value must be between 1 and 30 seconds. Default to 30 seconds.", + "admissionReviewVersions": "AdmissionReviewVersions is an ordered list of preferred `AdmissionReview` versions the Webhook expects. API server will try to use first version in the list which it supports. If none of the versions specified in this list supported by API server, validation will fail for this object. If a persisted webhook configuration specifies allowed versions and does not include any versions known to the API Server, calls to the webhook will fail and be subject to the failure policy. Default to `['v1beta1']`.", } func (Webhook) SwaggerDoc() map[string]string { diff --git a/vendor/k8s.io/api/admissionregistration/v1beta1/zz_generated.deepcopy.go b/vendor/k8s.io/api/admissionregistration/v1beta1/zz_generated.deepcopy.go index c6867be122..ab79ee6aa6 100644 --- a/vendor/k8s.io/api/admissionregistration/v1beta1/zz_generated.deepcopy.go +++ b/vendor/k8s.io/api/admissionregistration/v1beta1/zz_generated.deepcopy.go @@ -109,6 +109,11 @@ func (in *Rule) DeepCopyInto(out *Rule) { *out = make([]string, len(*in)) copy(*out, *in) } + if in.Scope != nil { + in, out := &in.Scope, &out.Scope + *out = new(ScopeType) + **out = **in + } return } @@ -257,6 +262,16 @@ func (in *Webhook) DeepCopyInto(out *Webhook) { *out = new(SideEffectClass) **out = **in } + if in.TimeoutSeconds != nil { + in, out := &in.TimeoutSeconds, &out.TimeoutSeconds + *out = new(int32) + **out = **in + } + if in.AdmissionReviewVersions != nil { + in, out := &in.AdmissionReviewVersions, &out.AdmissionReviewVersions + *out = make([]string, len(*in)) + copy(*out, *in) + } return } diff --git a/vendor/k8s.io/api/apps/v1/doc.go b/vendor/k8s.io/api/apps/v1/doc.go index 1d66c22232..61dc97bde5 100644 --- a/vendor/k8s.io/api/apps/v1/doc.go +++ b/vendor/k8s.io/api/apps/v1/doc.go @@ -15,6 +15,7 @@ limitations under the License. */ // +k8s:deepcopy-gen=package +// +k8s:protobuf-gen=package // +k8s:openapi-gen=true package v1 // import "k8s.io/api/apps/v1" diff --git a/vendor/k8s.io/api/apps/v1/types.go b/vendor/k8s.io/api/apps/v1/types.go index 68ac55bf17..2fe8574581 100644 --- a/vendor/k8s.io/api/apps/v1/types.go +++ b/vendor/k8s.io/api/apps/v1/types.go @@ -69,7 +69,7 @@ const ( // ParallelPodManagement will create and delete pods as soon as the stateful set // replica count is changed, and will not wait for pods to be ready or complete // termination. - ParallelPodManagement = "Parallel" + ParallelPodManagement PodManagementPolicyType = "Parallel" ) // StatefulSetUpdateStrategy indicates the strategy that the StatefulSet diff --git a/vendor/k8s.io/api/apps/v1beta1/doc.go b/vendor/k8s.io/api/apps/v1beta1/doc.go index 6047ed501d..9072bab692 100644 --- a/vendor/k8s.io/api/apps/v1beta1/doc.go +++ b/vendor/k8s.io/api/apps/v1beta1/doc.go @@ -15,6 +15,7 @@ limitations under the License. */ // +k8s:deepcopy-gen=package +// +k8s:protobuf-gen=package // +k8s:openapi-gen=true package v1beta1 // import "k8s.io/api/apps/v1beta1" diff --git a/vendor/k8s.io/api/apps/v1beta1/generated.proto b/vendor/k8s.io/api/apps/v1beta1/generated.proto index f87f39fe94..7942b997b6 100644 --- a/vendor/k8s.io/api/apps/v1beta1/generated.proto +++ b/vendor/k8s.io/api/apps/v1beta1/generated.proto @@ -263,7 +263,7 @@ message RollingUpdateDeployment { // the rolling update starts, such that the total number of old and new pods do not exceed // 130% of desired pods. Once old pods have been killed, // new ReplicaSet can be scaled up further, ensuring that total number of pods running - // at any time during the update is atmost 130% of desired pods. + // at any time during the update is at most 130% of desired pods. // +optional optional k8s.io.apimachinery.pkg.util.intstr.IntOrString maxSurge = 2; } diff --git a/vendor/k8s.io/api/apps/v1beta1/types.go b/vendor/k8s.io/api/apps/v1beta1/types.go index 326902fd0f..cf6039df69 100644 --- a/vendor/k8s.io/api/apps/v1beta1/types.go +++ b/vendor/k8s.io/api/apps/v1beta1/types.go @@ -17,7 +17,7 @@ limitations under the License. package v1beta1 import ( - "k8s.io/api/core/v1" + v1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/util/intstr" @@ -111,7 +111,7 @@ const ( // ParallelPodManagement will create and delete pods as soon as the stateful set // replica count is changed, and will not wait for pods to be ready or complete // termination. - ParallelPodManagement = "Parallel" + ParallelPodManagement PodManagementPolicyType = "Parallel" ) // StatefulSetUpdateStrategy indicates the strategy that the StatefulSet @@ -433,7 +433,7 @@ type RollingUpdateDeployment struct { // the rolling update starts, such that the total number of old and new pods do not exceed // 130% of desired pods. Once old pods have been killed, // new ReplicaSet can be scaled up further, ensuring that total number of pods running - // at any time during the update is atmost 130% of desired pods. + // at any time during the update is at most 130% of desired pods. // +optional MaxSurge *intstr.IntOrString `json:"maxSurge,omitempty" protobuf:"bytes,2,opt,name=maxSurge"` } diff --git a/vendor/k8s.io/api/apps/v1beta1/types_swagger_doc_generated.go b/vendor/k8s.io/api/apps/v1beta1/types_swagger_doc_generated.go index 68ebef3487..da1eb5996e 100644 --- a/vendor/k8s.io/api/apps/v1beta1/types_swagger_doc_generated.go +++ b/vendor/k8s.io/api/apps/v1beta1/types_swagger_doc_generated.go @@ -149,7 +149,7 @@ func (RollbackConfig) SwaggerDoc() map[string]string { var map_RollingUpdateDeployment = map[string]string{ "": "Spec to control the desired behavior of rolling update.", "maxUnavailable": "The maximum number of pods that can be unavailable during the update. Value can be an absolute number (ex: 5) or a percentage of desired pods (ex: 10%). Absolute number is calculated from percentage by rounding down. This can not be 0 if MaxSurge is 0. Defaults to 25%. Example: when this is set to 30%, the old ReplicaSet can be scaled down to 70% of desired pods immediately when the rolling update starts. Once new pods are ready, old ReplicaSet can be scaled down further, followed by scaling up the new ReplicaSet, ensuring that the total number of pods available at all times during the update is at least 70% of desired pods.", - "maxSurge": "The maximum number of pods that can be scheduled above the desired number of pods. Value can be an absolute number (ex: 5) or a percentage of desired pods (ex: 10%). This can not be 0 if MaxUnavailable is 0. Absolute number is calculated from percentage by rounding up. Defaults to 25%. Example: when this is set to 30%, the new ReplicaSet can be scaled up immediately when the rolling update starts, such that the total number of old and new pods do not exceed 130% of desired pods. Once old pods have been killed, new ReplicaSet can be scaled up further, ensuring that total number of pods running at any time during the update is atmost 130% of desired pods.", + "maxSurge": "The maximum number of pods that can be scheduled above the desired number of pods. Value can be an absolute number (ex: 5) or a percentage of desired pods (ex: 10%). This can not be 0 if MaxUnavailable is 0. Absolute number is calculated from percentage by rounding up. Defaults to 25%. Example: when this is set to 30%, the new ReplicaSet can be scaled up immediately when the rolling update starts, such that the total number of old and new pods do not exceed 130% of desired pods. Once old pods have been killed, new ReplicaSet can be scaled up further, ensuring that total number of pods running at any time during the update is at most 130% of desired pods.", } func (RollingUpdateDeployment) SwaggerDoc() map[string]string { diff --git a/vendor/k8s.io/api/apps/v1beta2/doc.go b/vendor/k8s.io/api/apps/v1beta2/doc.go index e93e164e10..9f499869fd 100644 --- a/vendor/k8s.io/api/apps/v1beta2/doc.go +++ b/vendor/k8s.io/api/apps/v1beta2/doc.go @@ -15,6 +15,7 @@ limitations under the License. */ // +k8s:deepcopy-gen=package +// +k8s:protobuf-gen=package // +k8s:openapi-gen=true package v1beta2 // import "k8s.io/api/apps/v1beta2" diff --git a/vendor/k8s.io/api/apps/v1beta2/generated.proto b/vendor/k8s.io/api/apps/v1beta2/generated.proto index 5d11cbe8d8..17e43970fa 100644 --- a/vendor/k8s.io/api/apps/v1beta2/generated.proto +++ b/vendor/k8s.io/api/apps/v1beta2/generated.proto @@ -43,7 +43,7 @@ option go_package = "v1beta2"; // depend on its stability. It is primarily for internal use by controllers. message ControllerRevision { // Standard object's metadata. - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata // +optional optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; @@ -56,7 +56,7 @@ message ControllerRevision { // ControllerRevisionList is a resource containing a list of ControllerRevision objects. message ControllerRevisionList { - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata // +optional optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; @@ -69,12 +69,12 @@ message ControllerRevisionList { // DaemonSet represents the configuration of a daemon set. message DaemonSet { // Standard object's metadata. - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata // +optional optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; // The desired behavior of this daemon set. - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status // +optional optional DaemonSetSpec spec = 2; @@ -82,7 +82,7 @@ message DaemonSet { // out of date by some window of time. // Populated by the system. // Read-only. - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status // +optional optional DaemonSetStatus status = 3; } @@ -111,7 +111,7 @@ message DaemonSetCondition { // DaemonSetList is a collection of daemon sets. message DaemonSetList { // Standard list metadata. - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata // +optional optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; @@ -374,12 +374,12 @@ message DeploymentStrategy { message ReplicaSet { // If the Labels of a ReplicaSet are empty, they are defaulted to // be the same as the Pod(s) that the ReplicaSet manages. - // Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata + // Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata // +optional optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; // Spec defines the specification of the desired behavior of the ReplicaSet. - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status // +optional optional ReplicaSetSpec spec = 2; @@ -387,7 +387,7 @@ message ReplicaSet { // This data may be out of date by some window of time. // Populated by the system. // Read-only. - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status // +optional optional ReplicaSetStatus status = 3; } @@ -416,7 +416,7 @@ message ReplicaSetCondition { // ReplicaSetList is a collection of ReplicaSets. message ReplicaSetList { // Standard list metadata. - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds // +optional optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; @@ -527,7 +527,7 @@ message RollingUpdateDeployment { // the rolling update starts, such that the total number of old and new pods do not exceed // 130% of desired pods. Once old pods have been killed, // new ReplicaSet can be scaled up further, ensuring that total number of pods running - // at any time during the update is atmost 130% of desired pods. + // at any time during the update is at most 130% of desired pods. // +optional optional k8s.io.apimachinery.pkg.util.intstr.IntOrString maxSurge = 2; } @@ -543,15 +543,15 @@ message RollingUpdateStatefulSetStrategy { // Scale represents a scaling request for a resource. message Scale { - // Standard object metadata; More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata. + // Standard object metadata; More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata. // +optional optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; - // defines the behavior of the scale. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status. + // defines the behavior of the scale. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status. // +optional optional ScaleSpec spec = 2; - // current status of the scale. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status. Read-only. + // current status of the scale. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status. Read-only. // +optional optional ScaleStatus status = 3; } diff --git a/vendor/k8s.io/api/apps/v1beta2/types.go b/vendor/k8s.io/api/apps/v1beta2/types.go index e75589adc5..39e07e278a 100644 --- a/vendor/k8s.io/api/apps/v1beta2/types.go +++ b/vendor/k8s.io/api/apps/v1beta2/types.go @@ -17,7 +17,7 @@ limitations under the License. package v1beta2 import ( - "k8s.io/api/core/v1" + v1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" runtime "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/util/intstr" @@ -62,15 +62,15 @@ type ScaleStatus struct { // Scale represents a scaling request for a resource. type Scale struct { metav1.TypeMeta `json:",inline"` - // Standard object metadata; More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata. + // Standard object metadata; More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata. // +optional metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` - // defines the behavior of the scale. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status. + // defines the behavior of the scale. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status. // +optional Spec ScaleSpec `json:"spec,omitempty" protobuf:"bytes,2,opt,name=spec"` - // current status of the scale. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status. Read-only. + // current status of the scale. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status. Read-only. // +optional Status ScaleStatus `json:"status,omitempty" protobuf:"bytes,3,opt,name=status"` } @@ -115,7 +115,7 @@ const ( // ParallelPodManagement will create and delete pods as soon as the stateful set // replica count is changed, and will not wait for pods to be ready or complete // termination. - ParallelPodManagement = "Parallel" + ParallelPodManagement PodManagementPolicyType = "Parallel" ) // StatefulSetUpdateStrategy indicates the strategy that the StatefulSet @@ -413,7 +413,7 @@ type RollingUpdateDeployment struct { // the rolling update starts, such that the total number of old and new pods do not exceed // 130% of desired pods. Once old pods have been killed, // new ReplicaSet can be scaled up further, ensuring that total number of pods running - // at any time during the update is atmost 130% of desired pods. + // at any time during the update is at most 130% of desired pods. // +optional MaxSurge *intstr.IntOrString `json:"maxSurge,omitempty" protobuf:"bytes,2,opt,name=maxSurge"` } @@ -666,12 +666,12 @@ type DaemonSetCondition struct { type DaemonSet struct { metav1.TypeMeta `json:",inline"` // Standard object's metadata. - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata // +optional metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` // The desired behavior of this daemon set. - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status // +optional Spec DaemonSetSpec `json:"spec,omitempty" protobuf:"bytes,2,opt,name=spec"` @@ -679,7 +679,7 @@ type DaemonSet struct { // out of date by some window of time. // Populated by the system. // Read-only. - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status // +optional Status DaemonSetStatus `json:"status,omitempty" protobuf:"bytes,3,opt,name=status"` } @@ -697,7 +697,7 @@ const ( type DaemonSetList struct { metav1.TypeMeta `json:",inline"` // Standard list metadata. - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata // +optional metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` @@ -716,12 +716,12 @@ type ReplicaSet struct { // If the Labels of a ReplicaSet are empty, they are defaulted to // be the same as the Pod(s) that the ReplicaSet manages. - // Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata + // Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata // +optional metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` // Spec defines the specification of the desired behavior of the ReplicaSet. - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status // +optional Spec ReplicaSetSpec `json:"spec,omitempty" protobuf:"bytes,2,opt,name=spec"` @@ -729,7 +729,7 @@ type ReplicaSet struct { // This data may be out of date by some window of time. // Populated by the system. // Read-only. - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status // +optional Status ReplicaSetStatus `json:"status,omitempty" protobuf:"bytes,3,opt,name=status"` } @@ -740,7 +740,7 @@ type ReplicaSet struct { type ReplicaSetList struct { metav1.TypeMeta `json:",inline"` // Standard list metadata. - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds // +optional metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` @@ -850,7 +850,7 @@ type ReplicaSetCondition struct { type ControllerRevision struct { metav1.TypeMeta `json:",inline"` // Standard object's metadata. - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata // +optional metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` @@ -867,7 +867,7 @@ type ControllerRevision struct { type ControllerRevisionList struct { metav1.TypeMeta `json:",inline"` - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata // +optional metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` diff --git a/vendor/k8s.io/api/apps/v1beta2/types_swagger_doc_generated.go b/vendor/k8s.io/api/apps/v1beta2/types_swagger_doc_generated.go index f8229ceda8..822158a186 100644 --- a/vendor/k8s.io/api/apps/v1beta2/types_swagger_doc_generated.go +++ b/vendor/k8s.io/api/apps/v1beta2/types_swagger_doc_generated.go @@ -29,7 +29,7 @@ package v1beta2 // AUTO-GENERATED FUNCTIONS START HERE. DO NOT EDIT. var map_ControllerRevision = map[string]string{ "": "DEPRECATED - This group version of ControllerRevision is deprecated by apps/v1/ControllerRevision. See the release notes for more information. ControllerRevision implements an immutable snapshot of state data. Clients are responsible for serializing and deserializing the objects that contain their internal state. Once a ControllerRevision has been successfully created, it can not be updated. The API Server will fail validation of all requests that attempt to mutate the Data field. ControllerRevisions may, however, be deleted. Note that, due to its use by both the DaemonSet and StatefulSet controllers for update and rollback, this object is beta. However, it may be subject to name and representation changes in future releases, and clients should not depend on its stability. It is primarily for internal use by controllers.", - "metadata": "Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata", + "metadata": "Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", "data": "Data is the serialized representation of the state.", "revision": "Revision indicates the revision of the state represented by Data.", } @@ -40,7 +40,7 @@ func (ControllerRevision) SwaggerDoc() map[string]string { var map_ControllerRevisionList = map[string]string{ "": "ControllerRevisionList is a resource containing a list of ControllerRevision objects.", - "metadata": "More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata", + "metadata": "More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", "items": "Items is the list of ControllerRevisions", } @@ -50,9 +50,9 @@ func (ControllerRevisionList) SwaggerDoc() map[string]string { var map_DaemonSet = map[string]string{ "": "DEPRECATED - This group version of DaemonSet is deprecated by apps/v1/DaemonSet. See the release notes for more information. DaemonSet represents the configuration of a daemon set.", - "metadata": "Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata", - "spec": "The desired behavior of this daemon set. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status", - "status": "The current status of this daemon set. This data may be out of date by some window of time. Populated by the system. Read-only. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status", + "metadata": "Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", + "spec": "The desired behavior of this daemon set. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status", + "status": "The current status of this daemon set. This data may be out of date by some window of time. Populated by the system. Read-only. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status", } func (DaemonSet) SwaggerDoc() map[string]string { @@ -74,7 +74,7 @@ func (DaemonSetCondition) SwaggerDoc() map[string]string { var map_DaemonSetList = map[string]string{ "": "DaemonSetList is a collection of daemon sets.", - "metadata": "Standard list metadata. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata", + "metadata": "Standard list metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", "items": "A list of daemon sets.", } @@ -202,9 +202,9 @@ func (DeploymentStrategy) SwaggerDoc() map[string]string { var map_ReplicaSet = map[string]string{ "": "DEPRECATED - This group version of ReplicaSet is deprecated by apps/v1/ReplicaSet. See the release notes for more information. ReplicaSet ensures that a specified number of pod replicas are running at any given time.", - "metadata": "If the Labels of a ReplicaSet are empty, they are defaulted to be the same as the Pod(s) that the ReplicaSet manages. Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata", - "spec": "Spec defines the specification of the desired behavior of the ReplicaSet. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status", - "status": "Status is the most recently observed status of the ReplicaSet. This data may be out of date by some window of time. Populated by the system. Read-only. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status", + "metadata": "If the Labels of a ReplicaSet are empty, they are defaulted to be the same as the Pod(s) that the ReplicaSet manages. Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", + "spec": "Spec defines the specification of the desired behavior of the ReplicaSet. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status", + "status": "Status is the most recently observed status of the ReplicaSet. This data may be out of date by some window of time. Populated by the system. Read-only. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status", } func (ReplicaSet) SwaggerDoc() map[string]string { @@ -226,7 +226,7 @@ func (ReplicaSetCondition) SwaggerDoc() map[string]string { var map_ReplicaSetList = map[string]string{ "": "ReplicaSetList is a collection of ReplicaSets.", - "metadata": "Standard list metadata. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds", + "metadata": "Standard list metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds", "items": "List of ReplicaSets. More info: https://kubernetes.io/docs/concepts/workloads/controllers/replicationcontroller", } @@ -272,7 +272,7 @@ func (RollingUpdateDaemonSet) SwaggerDoc() map[string]string { var map_RollingUpdateDeployment = map[string]string{ "": "Spec to control the desired behavior of rolling update.", "maxUnavailable": "The maximum number of pods that can be unavailable during the update. Value can be an absolute number (ex: 5) or a percentage of desired pods (ex: 10%). Absolute number is calculated from percentage by rounding down. This can not be 0 if MaxSurge is 0. Defaults to 25%. Example: when this is set to 30%, the old ReplicaSet can be scaled down to 70% of desired pods immediately when the rolling update starts. Once new pods are ready, old ReplicaSet can be scaled down further, followed by scaling up the new ReplicaSet, ensuring that the total number of pods available at all times during the update is at least 70% of desired pods.", - "maxSurge": "The maximum number of pods that can be scheduled above the desired number of pods. Value can be an absolute number (ex: 5) or a percentage of desired pods (ex: 10%). This can not be 0 if MaxUnavailable is 0. Absolute number is calculated from percentage by rounding up. Defaults to 25%. Example: when this is set to 30%, the new ReplicaSet can be scaled up immediately when the rolling update starts, such that the total number of old and new pods do not exceed 130% of desired pods. Once old pods have been killed, new ReplicaSet can be scaled up further, ensuring that total number of pods running at any time during the update is atmost 130% of desired pods.", + "maxSurge": "The maximum number of pods that can be scheduled above the desired number of pods. Value can be an absolute number (ex: 5) or a percentage of desired pods (ex: 10%). This can not be 0 if MaxUnavailable is 0. Absolute number is calculated from percentage by rounding up. Defaults to 25%. Example: when this is set to 30%, the new ReplicaSet can be scaled up immediately when the rolling update starts, such that the total number of old and new pods do not exceed 130% of desired pods. Once old pods have been killed, new ReplicaSet can be scaled up further, ensuring that total number of pods running at any time during the update is at most 130% of desired pods.", } func (RollingUpdateDeployment) SwaggerDoc() map[string]string { @@ -290,9 +290,9 @@ func (RollingUpdateStatefulSetStrategy) SwaggerDoc() map[string]string { var map_Scale = map[string]string{ "": "Scale represents a scaling request for a resource.", - "metadata": "Standard object metadata; More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata.", - "spec": "defines the behavior of the scale. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status.", - "status": "current status of the scale. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status. Read-only.", + "metadata": "Standard object metadata; More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata.", + "spec": "defines the behavior of the scale. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status.", + "status": "current status of the scale. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status. Read-only.", } func (Scale) SwaggerDoc() map[string]string { diff --git a/vendor/k8s.io/api/auditregistration/v1alpha1/doc.go b/vendor/k8s.io/api/auditregistration/v1alpha1/doc.go index c0d184a998..ae8f767149 100644 --- a/vendor/k8s.io/api/auditregistration/v1alpha1/doc.go +++ b/vendor/k8s.io/api/auditregistration/v1alpha1/doc.go @@ -15,6 +15,7 @@ limitations under the License. */ // +k8s:deepcopy-gen=package +// +k8s:protobuf-gen=package // +k8s:openapi-gen=true // +groupName=auditregistration.k8s.io diff --git a/vendor/k8s.io/api/authentication/v1/doc.go b/vendor/k8s.io/api/authentication/v1/doc.go index 193f154abe..1614265bdf 100644 --- a/vendor/k8s.io/api/authentication/v1/doc.go +++ b/vendor/k8s.io/api/authentication/v1/doc.go @@ -15,6 +15,7 @@ limitations under the License. */ // +k8s:deepcopy-gen=package +// +k8s:protobuf-gen=package // +groupName=authentication.k8s.io // +k8s:openapi-gen=true diff --git a/vendor/k8s.io/api/authentication/v1/generated.proto b/vendor/k8s.io/api/authentication/v1/generated.proto index b69636a814..db7be173dd 100644 --- a/vendor/k8s.io/api/authentication/v1/generated.proto +++ b/vendor/k8s.io/api/authentication/v1/generated.proto @@ -84,7 +84,10 @@ message TokenRequestSpec { optional int64 expirationSeconds = 4; // BoundObjectRef is a reference to an object that the token will be bound to. - // The token will only be valid for as long as the bound objet exists. + // The token will only be valid for as long as the bound object exists. + // NOTE: The API server's TokenReview endpoint will validate the + // BoundObjectRef, but other audiences may not. Keep ExpirationSeconds + // small if you want prompt revocation. // +optional optional BoundObjectReference boundObjectRef = 3; } diff --git a/vendor/k8s.io/api/authentication/v1/types.go b/vendor/k8s.io/api/authentication/v1/types.go index d348c6fd40..c48b03691e 100644 --- a/vendor/k8s.io/api/authentication/v1/types.go +++ b/vendor/k8s.io/api/authentication/v1/types.go @@ -155,7 +155,10 @@ type TokenRequestSpec struct { ExpirationSeconds *int64 `json:"expirationSeconds" protobuf:"varint,4,opt,name=expirationSeconds"` // BoundObjectRef is a reference to an object that the token will be bound to. - // The token will only be valid for as long as the bound objet exists. + // The token will only be valid for as long as the bound object exists. + // NOTE: The API server's TokenReview endpoint will validate the + // BoundObjectRef, but other audiences may not. Keep ExpirationSeconds + // small if you want prompt revocation. // +optional BoundObjectRef *BoundObjectReference `json:"boundObjectRef" protobuf:"bytes,3,opt,name=boundObjectRef"` } diff --git a/vendor/k8s.io/api/authentication/v1/types_swagger_doc_generated.go b/vendor/k8s.io/api/authentication/v1/types_swagger_doc_generated.go index f2c9b95c71..09f6b920fd 100644 --- a/vendor/k8s.io/api/authentication/v1/types_swagger_doc_generated.go +++ b/vendor/k8s.io/api/authentication/v1/types_swagger_doc_generated.go @@ -51,7 +51,7 @@ var map_TokenRequestSpec = map[string]string{ "": "TokenRequestSpec contains client provided parameters of a token request.", "audiences": "Audiences are the intendend audiences of the token. A recipient of a token must identitfy themself with an identifier in the list of audiences of the token, and otherwise should reject the token. A token issued for multiple audiences may be used to authenticate against any of the audiences listed but implies a high degree of trust between the target audiences.", "expirationSeconds": "ExpirationSeconds is the requested duration of validity of the request. The token issuer may return a token with a different validity duration so a client needs to check the 'expiration' field in a response.", - "boundObjectRef": "BoundObjectRef is a reference to an object that the token will be bound to. The token will only be valid for as long as the bound objet exists.", + "boundObjectRef": "BoundObjectRef is a reference to an object that the token will be bound to. The token will only be valid for as long as the bound object exists. NOTE: The API server's TokenReview endpoint will validate the BoundObjectRef, but other audiences may not. Keep ExpirationSeconds small if you want prompt revocation.", } func (TokenRequestSpec) SwaggerDoc() map[string]string { diff --git a/vendor/k8s.io/api/authentication/v1beta1/doc.go b/vendor/k8s.io/api/authentication/v1beta1/doc.go index 919f3c42fd..185a2240f6 100644 --- a/vendor/k8s.io/api/authentication/v1beta1/doc.go +++ b/vendor/k8s.io/api/authentication/v1beta1/doc.go @@ -15,6 +15,7 @@ limitations under the License. */ // +k8s:deepcopy-gen=package +// +k8s:protobuf-gen=package // +groupName=authentication.k8s.io // +k8s:openapi-gen=true diff --git a/vendor/k8s.io/api/authorization/v1/doc.go b/vendor/k8s.io/api/authorization/v1/doc.go index c63ac28cfa..cf100e6b75 100644 --- a/vendor/k8s.io/api/authorization/v1/doc.go +++ b/vendor/k8s.io/api/authorization/v1/doc.go @@ -15,6 +15,7 @@ limitations under the License. */ // +k8s:deepcopy-gen=package +// +k8s:protobuf-gen=package // +k8s:openapi-gen=true // +groupName=authorization.k8s.io diff --git a/vendor/k8s.io/api/authorization/v1beta1/doc.go b/vendor/k8s.io/api/authorization/v1beta1/doc.go index 324f293a17..7046f11109 100644 --- a/vendor/k8s.io/api/authorization/v1beta1/doc.go +++ b/vendor/k8s.io/api/authorization/v1beta1/doc.go @@ -15,6 +15,7 @@ limitations under the License. */ // +k8s:deepcopy-gen=package +// +k8s:protobuf-gen=package // +k8s:openapi-gen=true // +groupName=authorization.k8s.io diff --git a/vendor/k8s.io/api/autoscaling/v1/doc.go b/vendor/k8s.io/api/autoscaling/v1/doc.go index 9c3be845f0..8c9c09b5cb 100644 --- a/vendor/k8s.io/api/autoscaling/v1/doc.go +++ b/vendor/k8s.io/api/autoscaling/v1/doc.go @@ -15,6 +15,7 @@ limitations under the License. */ // +k8s:deepcopy-gen=package +// +k8s:protobuf-gen=package // +k8s:openapi-gen=true package v1 // import "k8s.io/api/autoscaling/v1" diff --git a/vendor/k8s.io/api/autoscaling/v2beta1/doc.go b/vendor/k8s.io/api/autoscaling/v2beta1/doc.go index da9789e5cb..2cc9f11eaf 100644 --- a/vendor/k8s.io/api/autoscaling/v2beta1/doc.go +++ b/vendor/k8s.io/api/autoscaling/v2beta1/doc.go @@ -15,6 +15,7 @@ limitations under the License. */ // +k8s:deepcopy-gen=package +// +k8s:protobuf-gen=package // +k8s:openapi-gen=true package v2beta1 // import "k8s.io/api/autoscaling/v2beta1" diff --git a/vendor/k8s.io/api/autoscaling/v2beta2/doc.go b/vendor/k8s.io/api/autoscaling/v2beta2/doc.go index 7c7d2b6f1e..6d275f6d95 100644 --- a/vendor/k8s.io/api/autoscaling/v2beta2/doc.go +++ b/vendor/k8s.io/api/autoscaling/v2beta2/doc.go @@ -15,6 +15,7 @@ limitations under the License. */ // +k8s:deepcopy-gen=package +// +k8s:protobuf-gen=package // +k8s:openapi-gen=true package v2beta2 // import "k8s.io/api/autoscaling/v2beta2" diff --git a/vendor/k8s.io/api/batch/v1/doc.go b/vendor/k8s.io/api/batch/v1/doc.go index 04491807f2..c4a8db6e78 100644 --- a/vendor/k8s.io/api/batch/v1/doc.go +++ b/vendor/k8s.io/api/batch/v1/doc.go @@ -15,6 +15,7 @@ limitations under the License. */ // +k8s:deepcopy-gen=package +// +k8s:protobuf-gen=package // +k8s:openapi-gen=true package v1 // import "k8s.io/api/batch/v1" diff --git a/vendor/k8s.io/api/batch/v1beta1/doc.go b/vendor/k8s.io/api/batch/v1beta1/doc.go index 43020ed05c..258ff028c1 100644 --- a/vendor/k8s.io/api/batch/v1beta1/doc.go +++ b/vendor/k8s.io/api/batch/v1beta1/doc.go @@ -15,6 +15,7 @@ limitations under the License. */ // +k8s:deepcopy-gen=package +// +k8s:protobuf-gen=package // +k8s:openapi-gen=true package v1beta1 // import "k8s.io/api/batch/v1beta1" diff --git a/vendor/k8s.io/api/batch/v2alpha1/doc.go b/vendor/k8s.io/api/batch/v2alpha1/doc.go index f4ed01ad84..3044b0c629 100644 --- a/vendor/k8s.io/api/batch/v2alpha1/doc.go +++ b/vendor/k8s.io/api/batch/v2alpha1/doc.go @@ -15,6 +15,7 @@ limitations under the License. */ // +k8s:deepcopy-gen=package +// +k8s:protobuf-gen=package // +k8s:openapi-gen=true package v2alpha1 // import "k8s.io/api/batch/v2alpha1" diff --git a/vendor/k8s.io/api/certificates/v1beta1/doc.go b/vendor/k8s.io/api/certificates/v1beta1/doc.go index 8473b640fa..9055248b9d 100644 --- a/vendor/k8s.io/api/certificates/v1beta1/doc.go +++ b/vendor/k8s.io/api/certificates/v1beta1/doc.go @@ -15,6 +15,7 @@ limitations under the License. */ // +k8s:deepcopy-gen=package +// +k8s:protobuf-gen=package // +k8s:openapi-gen=true // +groupName=certificates.k8s.io diff --git a/vendor/k8s.io/api/coordination/v1/doc.go b/vendor/k8s.io/api/coordination/v1/doc.go new file mode 100644 index 0000000000..fc2f4f2c6e --- /dev/null +++ b/vendor/k8s.io/api/coordination/v1/doc.go @@ -0,0 +1,23 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// +k8s:deepcopy-gen=package +// +k8s:protobuf-gen=package +// +k8s:openapi-gen=true + +// +groupName=coordination.k8s.io + +package v1 // import "k8s.io/api/coordination/v1" diff --git a/vendor/k8s.io/api/admissionregistration/v1alpha1/generated.pb.go b/vendor/k8s.io/api/coordination/v1/generated.pb.go similarity index 51% rename from vendor/k8s.io/api/admissionregistration/v1alpha1/generated.pb.go rename to vendor/k8s.io/api/coordination/v1/generated.pb.go index 74c467a222..349c68574a 100644 --- a/vendor/k8s.io/api/admissionregistration/v1alpha1/generated.pb.go +++ b/vendor/k8s.io/api/coordination/v1/generated.pb.go @@ -15,26 +15,27 @@ limitations under the License. */ // Code generated by protoc-gen-gogo. DO NOT EDIT. -// source: k8s.io/kubernetes/vendor/k8s.io/api/admissionregistration/v1alpha1/generated.proto +// source: k8s.io/kubernetes/vendor/k8s.io/api/coordination/v1/generated.proto /* - Package v1alpha1 is a generated protocol buffer package. + Package v1 is a generated protocol buffer package. It is generated from these files: - k8s.io/kubernetes/vendor/k8s.io/api/admissionregistration/v1alpha1/generated.proto + k8s.io/kubernetes/vendor/k8s.io/api/coordination/v1/generated.proto It has these top-level messages: - Initializer - InitializerConfiguration - InitializerConfigurationList - Rule + Lease + LeaseList + LeaseSpec */ -package v1alpha1 +package v1 import proto "github.com/gogo/protobuf/proto" import fmt "fmt" import math "math" +import k8s_io_apimachinery_pkg_apis_meta_v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + import strings "strings" import reflect "reflect" @@ -51,67 +52,24 @@ var _ = math.Inf // proto package needs to be updated. const _ = proto.GoGoProtoPackageIsVersion2 // please upgrade the proto package -func (m *Initializer) Reset() { *m = Initializer{} } -func (*Initializer) ProtoMessage() {} -func (*Initializer) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{0} } - -func (m *InitializerConfiguration) Reset() { *m = InitializerConfiguration{} } -func (*InitializerConfiguration) ProtoMessage() {} -func (*InitializerConfiguration) Descriptor() ([]byte, []int) { - return fileDescriptorGenerated, []int{1} -} +func (m *Lease) Reset() { *m = Lease{} } +func (*Lease) ProtoMessage() {} +func (*Lease) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{0} } -func (m *InitializerConfigurationList) Reset() { *m = InitializerConfigurationList{} } -func (*InitializerConfigurationList) ProtoMessage() {} -func (*InitializerConfigurationList) Descriptor() ([]byte, []int) { - return fileDescriptorGenerated, []int{2} -} +func (m *LeaseList) Reset() { *m = LeaseList{} } +func (*LeaseList) ProtoMessage() {} +func (*LeaseList) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{1} } -func (m *Rule) Reset() { *m = Rule{} } -func (*Rule) ProtoMessage() {} -func (*Rule) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{3} } +func (m *LeaseSpec) Reset() { *m = LeaseSpec{} } +func (*LeaseSpec) ProtoMessage() {} +func (*LeaseSpec) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{2} } func init() { - proto.RegisterType((*Initializer)(nil), "k8s.io.api.admissionregistration.v1alpha1.Initializer") - proto.RegisterType((*InitializerConfiguration)(nil), "k8s.io.api.admissionregistration.v1alpha1.InitializerConfiguration") - proto.RegisterType((*InitializerConfigurationList)(nil), "k8s.io.api.admissionregistration.v1alpha1.InitializerConfigurationList") - proto.RegisterType((*Rule)(nil), "k8s.io.api.admissionregistration.v1alpha1.Rule") -} -func (m *Initializer) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *Initializer) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name))) - i += copy(dAtA[i:], m.Name) - if len(m.Rules) > 0 { - for _, msg := range m.Rules { - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) - n, err := msg.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n - } - } - return i, nil + proto.RegisterType((*Lease)(nil), "k8s.io.api.coordination.v1.Lease") + proto.RegisterType((*LeaseList)(nil), "k8s.io.api.coordination.v1.LeaseList") + proto.RegisterType((*LeaseSpec)(nil), "k8s.io.api.coordination.v1.LeaseSpec") } - -func (m *InitializerConfiguration) Marshal() (dAtA []byte, err error) { +func (m *Lease) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) n, err := m.MarshalTo(dAtA) @@ -121,7 +79,7 @@ func (m *InitializerConfiguration) Marshal() (dAtA []byte, err error) { return dAtA[:n], nil } -func (m *InitializerConfiguration) MarshalTo(dAtA []byte) (int, error) { +func (m *Lease) MarshalTo(dAtA []byte) (int, error) { var i int _ = i var l int @@ -134,22 +92,18 @@ func (m *InitializerConfiguration) MarshalTo(dAtA []byte) (int, error) { return 0, err } i += n1 - if len(m.Initializers) > 0 { - for _, msg := range m.Initializers { - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) - n, err := msg.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n - } + dAtA[i] = 0x12 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.Spec.Size())) + n2, err := m.Spec.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err } + i += n2 return i, nil } -func (m *InitializerConfigurationList) Marshal() (dAtA []byte, err error) { +func (m *LeaseList) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) n, err := m.MarshalTo(dAtA) @@ -159,7 +113,7 @@ func (m *InitializerConfigurationList) Marshal() (dAtA []byte, err error) { return dAtA[:n], nil } -func (m *InitializerConfigurationList) MarshalTo(dAtA []byte) (int, error) { +func (m *LeaseList) MarshalTo(dAtA []byte) (int, error) { var i int _ = i var l int @@ -167,11 +121,11 @@ func (m *InitializerConfigurationList) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0xa i++ i = encodeVarintGenerated(dAtA, i, uint64(m.ListMeta.Size())) - n2, err := m.ListMeta.MarshalTo(dAtA[i:]) + n3, err := m.ListMeta.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n2 + i += n3 if len(m.Items) > 0 { for _, msg := range m.Items { dAtA[i] = 0x12 @@ -187,7 +141,7 @@ func (m *InitializerConfigurationList) MarshalTo(dAtA []byte) (int, error) { return i, nil } -func (m *Rule) Marshal() (dAtA []byte, err error) { +func (m *LeaseSpec) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) n, err := m.MarshalTo(dAtA) @@ -197,55 +151,46 @@ func (m *Rule) Marshal() (dAtA []byte, err error) { return dAtA[:n], nil } -func (m *Rule) MarshalTo(dAtA []byte) (int, error) { +func (m *LeaseSpec) MarshalTo(dAtA []byte) (int, error) { var i int _ = i var l int _ = l - if len(m.APIGroups) > 0 { - for _, s := range m.APIGroups { - dAtA[i] = 0xa - i++ - l = len(s) - for l >= 1<<7 { - dAtA[i] = uint8(uint64(l)&0x7f | 0x80) - l >>= 7 - i++ - } - dAtA[i] = uint8(l) - i++ - i += copy(dAtA[i:], s) + if m.HolderIdentity != nil { + dAtA[i] = 0xa + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(*m.HolderIdentity))) + i += copy(dAtA[i:], *m.HolderIdentity) + } + if m.LeaseDurationSeconds != nil { + dAtA[i] = 0x10 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(*m.LeaseDurationSeconds)) + } + if m.AcquireTime != nil { + dAtA[i] = 0x1a + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.AcquireTime.Size())) + n4, err := m.AcquireTime.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err } - } - if len(m.APIVersions) > 0 { - for _, s := range m.APIVersions { - dAtA[i] = 0x12 - i++ - l = len(s) - for l >= 1<<7 { - dAtA[i] = uint8(uint64(l)&0x7f | 0x80) - l >>= 7 - i++ - } - dAtA[i] = uint8(l) - i++ - i += copy(dAtA[i:], s) + i += n4 + } + if m.RenewTime != nil { + dAtA[i] = 0x22 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.RenewTime.Size())) + n5, err := m.RenewTime.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err } + i += n5 } - if len(m.Resources) > 0 { - for _, s := range m.Resources { - dAtA[i] = 0x1a - i++ - l = len(s) - for l >= 1<<7 { - dAtA[i] = uint8(uint64(l)&0x7f | 0x80) - l >>= 7 - i++ - } - dAtA[i] = uint8(l) - i++ - i += copy(dAtA[i:], s) - } + if m.LeaseTransitions != nil { + dAtA[i] = 0x28 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(*m.LeaseTransitions)) } return i, nil } @@ -259,35 +204,17 @@ func encodeVarintGenerated(dAtA []byte, offset int, v uint64) int { dAtA[offset] = uint8(v) return offset + 1 } -func (m *Initializer) Size() (n int) { - var l int - _ = l - l = len(m.Name) - n += 1 + l + sovGenerated(uint64(l)) - if len(m.Rules) > 0 { - for _, e := range m.Rules { - l = e.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - } - return n -} - -func (m *InitializerConfiguration) Size() (n int) { +func (m *Lease) Size() (n int) { var l int _ = l l = m.ObjectMeta.Size() n += 1 + l + sovGenerated(uint64(l)) - if len(m.Initializers) > 0 { - for _, e := range m.Initializers { - l = e.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - } + l = m.Spec.Size() + n += 1 + l + sovGenerated(uint64(l)) return n } -func (m *InitializerConfigurationList) Size() (n int) { +func (m *LeaseList) Size() (n int) { var l int _ = l l = m.ListMeta.Size() @@ -301,26 +228,26 @@ func (m *InitializerConfigurationList) Size() (n int) { return n } -func (m *Rule) Size() (n int) { +func (m *LeaseSpec) Size() (n int) { var l int _ = l - if len(m.APIGroups) > 0 { - for _, s := range m.APIGroups { - l = len(s) - n += 1 + l + sovGenerated(uint64(l)) - } + if m.HolderIdentity != nil { + l = len(*m.HolderIdentity) + n += 1 + l + sovGenerated(uint64(l)) } - if len(m.APIVersions) > 0 { - for _, s := range m.APIVersions { - l = len(s) - n += 1 + l + sovGenerated(uint64(l)) - } + if m.LeaseDurationSeconds != nil { + n += 1 + sovGenerated(uint64(*m.LeaseDurationSeconds)) } - if len(m.Resources) > 0 { - for _, s := range m.Resources { - l = len(s) - n += 1 + l + sovGenerated(uint64(l)) - } + if m.AcquireTime != nil { + l = m.AcquireTime.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if m.RenewTime != nil { + l = m.RenewTime.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if m.LeaseTransitions != nil { + n += 1 + sovGenerated(uint64(*m.LeaseTransitions)) } return n } @@ -338,47 +265,38 @@ func sovGenerated(x uint64) (n int) { func sozGenerated(x uint64) (n int) { return sovGenerated(uint64((x << 1) ^ uint64((int64(x) >> 63)))) } -func (this *Initializer) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&Initializer{`, - `Name:` + fmt.Sprintf("%v", this.Name) + `,`, - `Rules:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Rules), "Rule", "Rule", 1), `&`, ``, 1) + `,`, - `}`, - }, "") - return s -} -func (this *InitializerConfiguration) String() string { +func (this *Lease) String() string { if this == nil { return "nil" } - s := strings.Join([]string{`&InitializerConfiguration{`, + s := strings.Join([]string{`&Lease{`, `ObjectMeta:` + strings.Replace(strings.Replace(this.ObjectMeta.String(), "ObjectMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta", 1), `&`, ``, 1) + `,`, - `Initializers:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Initializers), "Initializer", "Initializer", 1), `&`, ``, 1) + `,`, + `Spec:` + strings.Replace(strings.Replace(this.Spec.String(), "LeaseSpec", "LeaseSpec", 1), `&`, ``, 1) + `,`, `}`, }, "") return s } -func (this *InitializerConfigurationList) String() string { +func (this *LeaseList) String() string { if this == nil { return "nil" } - s := strings.Join([]string{`&InitializerConfigurationList{`, + s := strings.Join([]string{`&LeaseList{`, `ListMeta:` + strings.Replace(strings.Replace(this.ListMeta.String(), "ListMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ListMeta", 1), `&`, ``, 1) + `,`, - `Items:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Items), "InitializerConfiguration", "InitializerConfiguration", 1), `&`, ``, 1) + `,`, + `Items:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Items), "Lease", "Lease", 1), `&`, ``, 1) + `,`, `}`, }, "") return s } -func (this *Rule) String() string { +func (this *LeaseSpec) String() string { if this == nil { return "nil" } - s := strings.Join([]string{`&Rule{`, - `APIGroups:` + fmt.Sprintf("%v", this.APIGroups) + `,`, - `APIVersions:` + fmt.Sprintf("%v", this.APIVersions) + `,`, - `Resources:` + fmt.Sprintf("%v", this.Resources) + `,`, + s := strings.Join([]string{`&LeaseSpec{`, + `HolderIdentity:` + valueToStringGenerated(this.HolderIdentity) + `,`, + `LeaseDurationSeconds:` + valueToStringGenerated(this.LeaseDurationSeconds) + `,`, + `AcquireTime:` + strings.Replace(fmt.Sprintf("%v", this.AcquireTime), "MicroTime", "k8s_io_apimachinery_pkg_apis_meta_v1.MicroTime", 1) + `,`, + `RenewTime:` + strings.Replace(fmt.Sprintf("%v", this.RenewTime), "MicroTime", "k8s_io_apimachinery_pkg_apis_meta_v1.MicroTime", 1) + `,`, + `LeaseTransitions:` + valueToStringGenerated(this.LeaseTransitions) + `,`, `}`, }, "") return s @@ -391,7 +309,7 @@ func valueToStringGenerated(v interface{}) string { pv := reflect.Indirect(rv).Interface() return fmt.Sprintf("*%v", pv) } -func (m *Initializer) Unmarshal(dAtA []byte) error { +func (m *Lease) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -414,17 +332,17 @@ func (m *Initializer) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: Initializer: wiretype end group for non-group") + return fmt.Errorf("proto: Lease: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: Initializer: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: Lease: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType) } - var stringLen uint64 + var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -434,24 +352,25 @@ func (m *Initializer) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } } - intStringLen := int(stringLen) - if intStringLen < 0 { + if msglen < 0 { return ErrInvalidLengthGenerated } - postIndex := iNdEx + intStringLen + postIndex := iNdEx + msglen if postIndex > l { return io.ErrUnexpectedEOF } - m.Name = string(dAtA[iNdEx:postIndex]) + if err := m.ObjectMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } iNdEx = postIndex case 2: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Rules", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Spec", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -475,8 +394,7 @@ func (m *Initializer) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.Rules = append(m.Rules, Rule{}) - if err := m.Rules[len(m.Rules)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + if err := m.Spec.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex @@ -501,7 +419,7 @@ func (m *Initializer) Unmarshal(dAtA []byte) error { } return nil } -func (m *InitializerConfiguration) Unmarshal(dAtA []byte) error { +func (m *LeaseList) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -524,15 +442,15 @@ func (m *InitializerConfiguration) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: InitializerConfiguration: wiretype end group for non-group") + return fmt.Errorf("proto: LeaseList: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: InitializerConfiguration: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: LeaseList: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field ListMeta", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -556,13 +474,13 @@ func (m *InitializerConfiguration) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - if err := m.ObjectMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + if err := m.ListMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex case 2: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Initializers", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -586,8 +504,8 @@ func (m *InitializerConfiguration) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.Initializers = append(m.Initializers, Initializer{}) - if err := m.Initializers[len(m.Initializers)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + m.Items = append(m.Items, Lease{}) + if err := m.Items[len(m.Items)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex @@ -612,7 +530,7 @@ func (m *InitializerConfiguration) Unmarshal(dAtA []byte) error { } return nil } -func (m *InitializerConfigurationList) Unmarshal(dAtA []byte) error { +func (m *LeaseSpec) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -635,17 +553,17 @@ func (m *InitializerConfigurationList) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: InitializerConfigurationList: wiretype end group for non-group") + return fmt.Errorf("proto: LeaseSpec: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: InitializerConfigurationList: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: LeaseSpec: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ListMeta", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field HolderIdentity", wireType) } - var msglen int + var stringLen uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -655,27 +573,27 @@ func (m *InitializerConfigurationList) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + stringLen |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } } - if msglen < 0 { + intStringLen := int(stringLen) + if intStringLen < 0 { return ErrInvalidLengthGenerated } - postIndex := iNdEx + msglen + postIndex := iNdEx + intStringLen if postIndex > l { return io.ErrUnexpectedEOF } - if err := m.ListMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } + s := string(dAtA[iNdEx:postIndex]) + m.HolderIdentity = &s iNdEx = postIndex case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType) + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field LeaseDurationSeconds", wireType) } - var msglen int + var v int32 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -685,78 +603,17 @@ func (m *InitializerConfigurationList) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + v |= (int32(b) & 0x7F) << shift if b < 0x80 { break } } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Items = append(m.Items, InitializerConfiguration{}) - if err := m.Items[len(m.Items)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *Rule) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: Rule: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: Rule: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: + m.LeaseDurationSeconds = &v + case 3: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field APIGroups", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field AcquireTime", wireType) } - var stringLen uint64 + var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -766,26 +623,30 @@ func (m *Rule) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } } - intStringLen := int(stringLen) - if intStringLen < 0 { + if msglen < 0 { return ErrInvalidLengthGenerated } - postIndex := iNdEx + intStringLen + postIndex := iNdEx + msglen if postIndex > l { return io.ErrUnexpectedEOF } - m.APIGroups = append(m.APIGroups, string(dAtA[iNdEx:postIndex])) + if m.AcquireTime == nil { + m.AcquireTime = &k8s_io_apimachinery_pkg_apis_meta_v1.MicroTime{} + } + if err := m.AcquireTime.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } iNdEx = postIndex - case 2: + case 4: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field APIVersions", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field RenewTime", wireType) } - var stringLen uint64 + var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -795,26 +656,30 @@ func (m *Rule) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } } - intStringLen := int(stringLen) - if intStringLen < 0 { + if msglen < 0 { return ErrInvalidLengthGenerated } - postIndex := iNdEx + intStringLen + postIndex := iNdEx + msglen if postIndex > l { return io.ErrUnexpectedEOF } - m.APIVersions = append(m.APIVersions, string(dAtA[iNdEx:postIndex])) + if m.RenewTime == nil { + m.RenewTime = &k8s_io_apimachinery_pkg_apis_meta_v1.MicroTime{} + } + if err := m.RenewTime.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } iNdEx = postIndex - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Resources", wireType) + case 5: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field LeaseTransitions", wireType) } - var stringLen uint64 + var v int32 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -824,21 +689,12 @@ func (m *Rule) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + v |= (int32(b) & 0x7F) << shift if b < 0x80 { break } } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Resources = append(m.Resources, string(dAtA[iNdEx:postIndex])) - iNdEx = postIndex + m.LeaseTransitions = &v default: iNdEx = preIndex skippy, err := skipGenerated(dAtA[iNdEx:]) @@ -966,43 +822,43 @@ var ( ) func init() { - proto.RegisterFile("k8s.io/kubernetes/vendor/k8s.io/api/admissionregistration/v1alpha1/generated.proto", fileDescriptorGenerated) + proto.RegisterFile("k8s.io/kubernetes/vendor/k8s.io/api/coordination/v1/generated.proto", fileDescriptorGenerated) } var fileDescriptorGenerated = []byte{ - // 531 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x9c, 0x51, 0x4d, 0x8b, 0x13, 0x31, - 0x18, 0x6e, 0x6c, 0x0b, 0x6d, 0xda, 0x45, 0x19, 0x3c, 0x94, 0x22, 0xd3, 0xd2, 0x53, 0x45, 0x4c, - 0xec, 0x22, 0x8b, 0xd7, 0x9d, 0x3d, 0x48, 0xc1, 0x8f, 0x25, 0x88, 0x07, 0xf1, 0x60, 0xda, 0xbe, - 0x3b, 0x8d, 0xed, 0x4c, 0x86, 0x24, 0x53, 0xd0, 0x93, 0x17, 0xef, 0x82, 0x7f, 0xaa, 0xc7, 0x3d, - 0xee, 0xa9, 0xd8, 0x11, 0x3c, 0xfa, 0x1b, 0x24, 0x33, 0x9d, 0x9d, 0x59, 0xeb, 0xe2, 0xea, 0x2d, - 0xef, 0xf3, 0xe6, 0xf9, 0x4a, 0x30, 0x5b, 0x3c, 0xd1, 0x44, 0x48, 0xba, 0x88, 0x27, 0xa0, 0x42, - 0x30, 0xa0, 0xe9, 0x0a, 0xc2, 0x99, 0x54, 0x74, 0xb7, 0xe0, 0x91, 0xa0, 0x7c, 0x16, 0x08, 0xad, - 0x85, 0x0c, 0x15, 0xf8, 0x42, 0x1b, 0xc5, 0x8d, 0x90, 0x21, 0x5d, 0x8d, 0xf8, 0x32, 0x9a, 0xf3, - 0x11, 0xf5, 0x21, 0x04, 0xc5, 0x0d, 0xcc, 0x48, 0xa4, 0xa4, 0x91, 0xce, 0xfd, 0x8c, 0x4a, 0x78, - 0x24, 0xc8, 0x1f, 0xa9, 0x24, 0xa7, 0x76, 0x1f, 0xfa, 0xc2, 0xcc, 0xe3, 0x09, 0x99, 0xca, 0x80, - 0xfa, 0xd2, 0x97, 0x34, 0x55, 0x98, 0xc4, 0x67, 0xe9, 0x94, 0x0e, 0xe9, 0x29, 0x53, 0xee, 0x3e, - 0x2e, 0x42, 0x05, 0x7c, 0x3a, 0x17, 0x21, 0xa8, 0x0f, 0x34, 0x5a, 0xf8, 0x16, 0xd0, 0x34, 0x00, - 0xc3, 0xe9, 0x6a, 0x2f, 0x4f, 0x97, 0x5e, 0xc7, 0x52, 0x71, 0x68, 0x44, 0x00, 0x7b, 0x84, 0xa3, - 0xbf, 0x11, 0xf4, 0x74, 0x0e, 0x01, 0xff, 0x9d, 0x37, 0xf8, 0x8c, 0x70, 0x6b, 0x1c, 0x0a, 0x23, - 0xf8, 0x52, 0x7c, 0x04, 0xe5, 0xf4, 0x71, 0x2d, 0xe4, 0x01, 0x74, 0x50, 0x1f, 0x0d, 0x9b, 0x5e, - 0x7b, 0xbd, 0xe9, 0x55, 0x92, 0x4d, 0xaf, 0xf6, 0x82, 0x07, 0xc0, 0xd2, 0x8d, 0xf3, 0x0a, 0xd7, - 0x55, 0xbc, 0x04, 0xdd, 0xb9, 0xd5, 0xaf, 0x0e, 0x5b, 0x87, 0x94, 0xdc, 0xf8, 0xe9, 0x08, 0x8b, - 0x97, 0xe0, 0x1d, 0xec, 0x34, 0xeb, 0x76, 0xd2, 0x2c, 0x13, 0x1b, 0xfc, 0x44, 0xb8, 0x53, 0xca, - 0x71, 0x22, 0xc3, 0x33, 0xe1, 0xc7, 0x99, 0x80, 0xf3, 0x0e, 0x37, 0xec, 0x43, 0xcd, 0xb8, 0xe1, - 0x69, 0xb0, 0xd6, 0xe1, 0xa3, 0x92, 0xeb, 0x65, 0x5f, 0x12, 0x2d, 0x7c, 0x0b, 0x68, 0x62, 0x6f, - 0x93, 0xd5, 0x88, 0xbc, 0x9c, 0xbc, 0x87, 0xa9, 0x79, 0x0e, 0x86, 0x7b, 0xce, 0xce, 0x16, 0x17, - 0x18, 0xbb, 0x54, 0x75, 0x22, 0xdc, 0x16, 0x85, 0x7b, 0xde, 0xed, 0xe8, 0x1f, 0xba, 0x95, 0xc2, - 0x7b, 0x77, 0x77, 0x5e, 0xed, 0x12, 0xa8, 0xd9, 0x15, 0x87, 0xc1, 0x0f, 0x84, 0xef, 0x5d, 0x57, - 0xf8, 0x99, 0xd0, 0xc6, 0x79, 0xbb, 0x57, 0x9a, 0xdc, 0xac, 0xb4, 0x65, 0xa7, 0x95, 0xef, 0xec, - 0x62, 0x34, 0x72, 0xa4, 0x54, 0x78, 0x8e, 0xeb, 0xc2, 0x40, 0x90, 0x37, 0x3d, 0xf9, 0xbf, 0xa6, - 0x57, 0x52, 0x17, 0x3f, 0x3b, 0xb6, 0xca, 0x2c, 0x33, 0x18, 0x7c, 0x45, 0xb8, 0x66, 0xbf, 0xda, - 0x79, 0x80, 0x9b, 0x3c, 0x12, 0x4f, 0x95, 0x8c, 0x23, 0xdd, 0x41, 0xfd, 0xea, 0xb0, 0xe9, 0x1d, - 0x24, 0x9b, 0x5e, 0xf3, 0xf8, 0x74, 0x9c, 0x81, 0xac, 0xd8, 0x3b, 0x23, 0xdc, 0xe2, 0x91, 0x78, - 0x0d, 0xca, 0xe6, 0xc8, 0x52, 0x36, 0xbd, 0xdb, 0xc9, 0xa6, 0xd7, 0x3a, 0x3e, 0x1d, 0xe7, 0x30, - 0x2b, 0xdf, 0xb1, 0xfa, 0x0a, 0xb4, 0x8c, 0xd5, 0x14, 0x74, 0xa7, 0x5a, 0xe8, 0xb3, 0x1c, 0x64, - 0xc5, 0xde, 0x23, 0xeb, 0xad, 0x5b, 0x39, 0xdf, 0xba, 0x95, 0x8b, 0xad, 0x5b, 0xf9, 0x94, 0xb8, - 0x68, 0x9d, 0xb8, 0xe8, 0x3c, 0x71, 0xd1, 0x45, 0xe2, 0xa2, 0x6f, 0x89, 0x8b, 0xbe, 0x7c, 0x77, - 0x2b, 0x6f, 0x1a, 0x79, 0xe9, 0x5f, 0x01, 0x00, 0x00, 0xff, 0xff, 0xa2, 0x06, 0xa3, 0xcb, 0x75, - 0x04, 0x00, 0x00, + // 535 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x9c, 0x90, 0xc1, 0x6e, 0xd3, 0x40, + 0x10, 0x86, 0xe3, 0x36, 0x91, 0x9a, 0x0d, 0x2d, 0x91, 0x95, 0x83, 0x95, 0x83, 0x5d, 0x22, 0x21, + 0xe5, 0xc2, 0x2e, 0xa9, 0x10, 0x42, 0x9c, 0xc0, 0x20, 0xa0, 0x52, 0x2a, 0x24, 0xb7, 0x27, 0xd4, + 0x03, 0x1b, 0x7b, 0x70, 0x96, 0xd4, 0x5e, 0xb3, 0xbb, 0x0e, 0xea, 0x8d, 0x47, 0xe0, 0xca, 0x63, + 0xc0, 0x53, 0xe4, 0xd8, 0x63, 0x4f, 0x16, 0x31, 0x2f, 0x82, 0x76, 0x93, 0x36, 0x21, 0x49, 0xd5, + 0x8a, 0xdb, 0xee, 0xcc, 0xfc, 0xdf, 0xfc, 0xf3, 0xa3, 0x57, 0xa3, 0x67, 0x12, 0x33, 0x4e, 0x46, + 0xf9, 0x00, 0x44, 0x0a, 0x0a, 0x24, 0x19, 0x43, 0x1a, 0x71, 0x41, 0xe6, 0x0d, 0x9a, 0x31, 0x12, + 0x72, 0x2e, 0x22, 0x96, 0x52, 0xc5, 0x78, 0x4a, 0xc6, 0x3d, 0x12, 0x43, 0x0a, 0x82, 0x2a, 0x88, + 0x70, 0x26, 0xb8, 0xe2, 0x76, 0x7b, 0x36, 0x8b, 0x69, 0xc6, 0xf0, 0xf2, 0x2c, 0x1e, 0xf7, 0xda, + 0x8f, 0x62, 0xa6, 0x86, 0xf9, 0x00, 0x87, 0x3c, 0x21, 0x31, 0x8f, 0x39, 0x31, 0x92, 0x41, 0xfe, + 0xc9, 0xfc, 0xcc, 0xc7, 0xbc, 0x66, 0xa8, 0xf6, 0x93, 0xc5, 0xda, 0x84, 0x86, 0x43, 0x96, 0x82, + 0x38, 0x27, 0xd9, 0x28, 0xd6, 0x05, 0x49, 0x12, 0x50, 0x74, 0x83, 0x81, 0x36, 0xb9, 0x49, 0x25, + 0xf2, 0x54, 0xb1, 0x04, 0xd6, 0x04, 0x4f, 0x6f, 0x13, 0xc8, 0x70, 0x08, 0x09, 0x5d, 0xd5, 0x75, + 0x7e, 0x59, 0xa8, 0xd6, 0x07, 0x2a, 0xc1, 0xfe, 0x88, 0x76, 0xb4, 0x9b, 0x88, 0x2a, 0xea, 0x58, + 0xfb, 0x56, 0xb7, 0x71, 0xf0, 0x18, 0x2f, 0x62, 0xb8, 0x86, 0xe2, 0x6c, 0x14, 0xeb, 0x82, 0xc4, + 0x7a, 0x1a, 0x8f, 0x7b, 0xf8, 0xfd, 0xe0, 0x33, 0x84, 0xea, 0x08, 0x14, 0xf5, 0xed, 0x49, 0xe1, + 0x55, 0xca, 0xc2, 0x43, 0x8b, 0x5a, 0x70, 0x4d, 0xb5, 0xdf, 0xa2, 0xaa, 0xcc, 0x20, 0x74, 0xb6, + 0x0c, 0xfd, 0x21, 0xbe, 0x39, 0x64, 0x6c, 0x2c, 0x1d, 0x67, 0x10, 0xfa, 0xf7, 0xe6, 0xc8, 0xaa, + 0xfe, 0x05, 0x06, 0xd0, 0xf9, 0x69, 0xa1, 0xba, 0x99, 0xe8, 0x33, 0xa9, 0xec, 0xd3, 0x35, 0xe3, + 0xf8, 0x6e, 0xc6, 0xb5, 0xda, 0xd8, 0x6e, 0xce, 0x77, 0xec, 0x5c, 0x55, 0x96, 0x4c, 0xbf, 0x41, + 0x35, 0xa6, 0x20, 0x91, 0xce, 0xd6, 0xfe, 0x76, 0xb7, 0x71, 0xf0, 0xe0, 0x56, 0xd7, 0xfe, 0xee, + 0x9c, 0x56, 0x3b, 0xd4, 0xba, 0x60, 0x26, 0xef, 0xfc, 0xd8, 0x9e, 0x7b, 0xd6, 0x77, 0xd8, 0xcf, + 0xd1, 0xde, 0x90, 0x9f, 0x45, 0x20, 0x0e, 0x23, 0x48, 0x15, 0x53, 0xe7, 0xc6, 0x79, 0xdd, 0xb7, + 0xcb, 0xc2, 0xdb, 0x7b, 0xf7, 0x4f, 0x27, 0x58, 0x99, 0xb4, 0xfb, 0xa8, 0x75, 0xa6, 0x41, 0xaf, + 0x73, 0x61, 0x36, 0x1f, 0x43, 0xc8, 0xd3, 0x48, 0x9a, 0x58, 0x6b, 0xbe, 0x53, 0x16, 0x5e, 0xab, + 0xbf, 0xa1, 0x1f, 0x6c, 0x54, 0xd9, 0x03, 0xd4, 0xa0, 0xe1, 0x97, 0x9c, 0x09, 0x38, 0x61, 0x09, + 0x38, 0xdb, 0x26, 0x40, 0x72, 0xb7, 0x00, 0x8f, 0x58, 0x28, 0xb8, 0x96, 0xf9, 0xf7, 0xcb, 0xc2, + 0x6b, 0xbc, 0x5c, 0x70, 0x82, 0x65, 0xa8, 0x7d, 0x8a, 0xea, 0x02, 0x52, 0xf8, 0x6a, 0x36, 0x54, + 0xff, 0x6f, 0xc3, 0x6e, 0x59, 0x78, 0xf5, 0xe0, 0x8a, 0x12, 0x2c, 0x80, 0xf6, 0x0b, 0xd4, 0x34, + 0x97, 0x9d, 0x08, 0x9a, 0x4a, 0xa6, 0x6f, 0x93, 0x4e, 0xcd, 0x64, 0xd1, 0x2a, 0x0b, 0xaf, 0xd9, + 0x5f, 0xe9, 0x05, 0x6b, 0xd3, 0x7e, 0x77, 0x32, 0x75, 0x2b, 0x17, 0x53, 0xb7, 0x72, 0x39, 0x75, + 0x2b, 0xdf, 0x4a, 0xd7, 0x9a, 0x94, 0xae, 0x75, 0x51, 0xba, 0xd6, 0x65, 0xe9, 0x5a, 0xbf, 0x4b, + 0xd7, 0xfa, 0xfe, 0xc7, 0xad, 0x7c, 0xd8, 0x1a, 0xf7, 0xfe, 0x06, 0x00, 0x00, 0xff, 0xff, 0x41, + 0x5e, 0x94, 0x96, 0x5e, 0x04, 0x00, 0x00, } diff --git a/vendor/k8s.io/api/coordination/v1/generated.proto b/vendor/k8s.io/api/coordination/v1/generated.proto new file mode 100644 index 0000000000..99692e958d --- /dev/null +++ b/vendor/k8s.io/api/coordination/v1/generated.proto @@ -0,0 +1,80 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + + +// This file was autogenerated by go-to-protobuf. Do not edit it manually! + +syntax = 'proto2'; + +package k8s.io.api.coordination.v1; + +import "k8s.io/apimachinery/pkg/apis/meta/v1/generated.proto"; +import "k8s.io/apimachinery/pkg/runtime/generated.proto"; +import "k8s.io/apimachinery/pkg/runtime/schema/generated.proto"; + +// Package-wide variables from generator "generated". +option go_package = "v1"; + +// Lease defines a lease concept. +message Lease { + // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata + // +optional + optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; + + // Specification of the Lease. + // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status + // +optional + optional LeaseSpec spec = 2; +} + +// LeaseList is a list of Lease objects. +message LeaseList { + // Standard list metadata. + // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata + // +optional + optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; + + // Items is a list of schema objects. + repeated Lease items = 2; +} + +// LeaseSpec is a specification of a Lease. +message LeaseSpec { + // holderIdentity contains the identity of the holder of a current lease. + // +optional + optional string holderIdentity = 1; + + // leaseDurationSeconds is a duration that candidates for a lease need + // to wait to force acquire it. This is measure against time of last + // observed RenewTime. + // +optional + optional int32 leaseDurationSeconds = 2; + + // acquireTime is a time when the current lease was acquired. + // +optional + optional k8s.io.apimachinery.pkg.apis.meta.v1.MicroTime acquireTime = 3; + + // renewTime is a time when the current holder of a lease has last + // updated the lease. + // +optional + optional k8s.io.apimachinery.pkg.apis.meta.v1.MicroTime renewTime = 4; + + // leaseTransitions is the number of transitions of a lease between + // holders. + // +optional + optional int32 leaseTransitions = 5; +} + diff --git a/vendor/k8s.io/api/admissionregistration/v1alpha1/register.go b/vendor/k8s.io/api/coordination/v1/register.go similarity index 86% rename from vendor/k8s.io/api/admissionregistration/v1alpha1/register.go rename to vendor/k8s.io/api/coordination/v1/register.go index e9a4164c37..95b987b98b 100644 --- a/vendor/k8s.io/api/admissionregistration/v1alpha1/register.go +++ b/vendor/k8s.io/api/coordination/v1/register.go @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -14,7 +14,7 @@ See the License for the specific language governing permissions and limitations under the License. */ -package v1alpha1 +package v1 import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" @@ -22,10 +22,11 @@ import ( "k8s.io/apimachinery/pkg/runtime/schema" ) -const GroupName = "admissionregistration.k8s.io" +// GroupName is the group name use in this package +const GroupName = "coordination.k8s.io" // SchemeGroupVersion is group version used to register these objects -var SchemeGroupVersion = schema.GroupVersion{Group: GroupName, Version: "v1alpha1"} +var SchemeGroupVersion = schema.GroupVersion{Group: GroupName, Version: "v1"} // Resource takes an unqualified resource and returns a Group qualified GroupResource func Resource(resource string) schema.GroupResource { @@ -40,12 +41,13 @@ var ( AddToScheme = localSchemeBuilder.AddToScheme ) -// Adds the list of known types to scheme. +// Adds the list of known types to api.Scheme. func addKnownTypes(scheme *runtime.Scheme) error { scheme.AddKnownTypes(SchemeGroupVersion, - &InitializerConfiguration{}, - &InitializerConfigurationList{}, + &Lease{}, + &LeaseList{}, ) + metav1.AddToGroupVersion(scheme, SchemeGroupVersion) return nil } diff --git a/vendor/k8s.io/api/coordination/v1/types.go b/vendor/k8s.io/api/coordination/v1/types.go new file mode 100644 index 0000000000..8f9f24d04e --- /dev/null +++ b/vendor/k8s.io/api/coordination/v1/types.go @@ -0,0 +1,74 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +// +genclient +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// Lease defines a lease concept. +type Lease struct { + metav1.TypeMeta `json:",inline"` + // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata + // +optional + metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + + // Specification of the Lease. + // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status + // +optional + Spec LeaseSpec `json:"spec,omitempty" protobuf:"bytes,2,opt,name=spec"` +} + +// LeaseSpec is a specification of a Lease. +type LeaseSpec struct { + // holderIdentity contains the identity of the holder of a current lease. + // +optional + HolderIdentity *string `json:"holderIdentity,omitempty" protobuf:"bytes,1,opt,name=holderIdentity"` + // leaseDurationSeconds is a duration that candidates for a lease need + // to wait to force acquire it. This is measure against time of last + // observed RenewTime. + // +optional + LeaseDurationSeconds *int32 `json:"leaseDurationSeconds,omitempty" protobuf:"varint,2,opt,name=leaseDurationSeconds"` + // acquireTime is a time when the current lease was acquired. + // +optional + AcquireTime *metav1.MicroTime `json:"acquireTime,omitempty" protobuf:"bytes,3,opt,name=acquireTime"` + // renewTime is a time when the current holder of a lease has last + // updated the lease. + // +optional + RenewTime *metav1.MicroTime `json:"renewTime,omitempty" protobuf:"bytes,4,opt,name=renewTime"` + // leaseTransitions is the number of transitions of a lease between + // holders. + // +optional + LeaseTransitions *int32 `json:"leaseTransitions,omitempty" protobuf:"varint,5,opt,name=leaseTransitions"` +} + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// LeaseList is a list of Lease objects. +type LeaseList struct { + metav1.TypeMeta `json:",inline"` + // Standard list metadata. + // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata + // +optional + metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + + // Items is a list of schema objects. + Items []Lease `json:"items" protobuf:"bytes,2,rep,name=items"` +} diff --git a/vendor/k8s.io/api/coordination/v1/types_swagger_doc_generated.go b/vendor/k8s.io/api/coordination/v1/types_swagger_doc_generated.go new file mode 100644 index 0000000000..bd02ad5daa --- /dev/null +++ b/vendor/k8s.io/api/coordination/v1/types_swagger_doc_generated.go @@ -0,0 +1,63 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1 + +// This file contains a collection of methods that can be used from go-restful to +// generate Swagger API documentation for its models. Please read this PR for more +// information on the implementation: https://github.com/emicklei/go-restful/pull/215 +// +// TODOs are ignored from the parser (e.g. TODO(andronat):... || TODO:...) if and only if +// they are on one line! For multiple line or blocks that you want to ignore use ---. +// Any context after a --- is ignored. +// +// Those methods can be generated by using hack/update-generated-swagger-docs.sh + +// AUTO-GENERATED FUNCTIONS START HERE. DO NOT EDIT. +var map_Lease = map[string]string{ + "": "Lease defines a lease concept.", + "metadata": "More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata", + "spec": "Specification of the Lease. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status", +} + +func (Lease) SwaggerDoc() map[string]string { + return map_Lease +} + +var map_LeaseList = map[string]string{ + "": "LeaseList is a list of Lease objects.", + "metadata": "Standard list metadata. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata", + "items": "Items is a list of schema objects.", +} + +func (LeaseList) SwaggerDoc() map[string]string { + return map_LeaseList +} + +var map_LeaseSpec = map[string]string{ + "": "LeaseSpec is a specification of a Lease.", + "holderIdentity": "holderIdentity contains the identity of the holder of a current lease.", + "leaseDurationSeconds": "leaseDurationSeconds is a duration that candidates for a lease need to wait to force acquire it. This is measure against time of last observed RenewTime.", + "acquireTime": "acquireTime is a time when the current lease was acquired.", + "renewTime": "renewTime is a time when the current holder of a lease has last updated the lease.", + "leaseTransitions": "leaseTransitions is the number of transitions of a lease between holders.", +} + +func (LeaseSpec) SwaggerDoc() map[string]string { + return map_LeaseSpec +} + +// AUTO-GENERATED FUNCTIONS END HERE diff --git a/vendor/k8s.io/api/coordination/v1/zz_generated.deepcopy.go b/vendor/k8s.io/api/coordination/v1/zz_generated.deepcopy.go new file mode 100644 index 0000000000..0f534055f6 --- /dev/null +++ b/vendor/k8s.io/api/coordination/v1/zz_generated.deepcopy.go @@ -0,0 +1,124 @@ +// +build !ignore_autogenerated + +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by deepcopy-gen. DO NOT EDIT. + +package v1 + +import ( + runtime "k8s.io/apimachinery/pkg/runtime" +) + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Lease) DeepCopyInto(out *Lease) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Lease. +func (in *Lease) DeepCopy() *Lease { + if in == nil { + return nil + } + out := new(Lease) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *Lease) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LeaseList) DeepCopyInto(out *LeaseList) { + *out = *in + out.TypeMeta = in.TypeMeta + out.ListMeta = in.ListMeta + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]Lease, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LeaseList. +func (in *LeaseList) DeepCopy() *LeaseList { + if in == nil { + return nil + } + out := new(LeaseList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *LeaseList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LeaseSpec) DeepCopyInto(out *LeaseSpec) { + *out = *in + if in.HolderIdentity != nil { + in, out := &in.HolderIdentity, &out.HolderIdentity + *out = new(string) + **out = **in + } + if in.LeaseDurationSeconds != nil { + in, out := &in.LeaseDurationSeconds, &out.LeaseDurationSeconds + *out = new(int32) + **out = **in + } + if in.AcquireTime != nil { + in, out := &in.AcquireTime, &out.AcquireTime + *out = (*in).DeepCopy() + } + if in.RenewTime != nil { + in, out := &in.RenewTime, &out.RenewTime + *out = (*in).DeepCopy() + } + if in.LeaseTransitions != nil { + in, out := &in.LeaseTransitions, &out.LeaseTransitions + *out = new(int32) + **out = **in + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LeaseSpec. +func (in *LeaseSpec) DeepCopy() *LeaseSpec { + if in == nil { + return nil + } + out := new(LeaseSpec) + in.DeepCopyInto(out) + return out +} diff --git a/vendor/k8s.io/api/coordination/v1beta1/doc.go b/vendor/k8s.io/api/coordination/v1beta1/doc.go index bc95fd17d4..304732d59b 100644 --- a/vendor/k8s.io/api/coordination/v1beta1/doc.go +++ b/vendor/k8s.io/api/coordination/v1beta1/doc.go @@ -15,6 +15,7 @@ limitations under the License. */ // +k8s:deepcopy-gen=package +// +k8s:protobuf-gen=package // +k8s:openapi-gen=true // +groupName=coordination.k8s.io diff --git a/vendor/k8s.io/api/core/v1/annotation_key_constants.go b/vendor/k8s.io/api/core/v1/annotation_key_constants.go index 2c72ec2df2..edc9b4d600 100644 --- a/vendor/k8s.io/api/core/v1/annotation_key_constants.go +++ b/vendor/k8s.io/api/core/v1/annotation_key_constants.go @@ -97,4 +97,10 @@ const ( // This annotation will be used to compute the in-cluster network programming latency SLI, see // https://github.com/kubernetes/community/blob/master/sig-scalability/slos/network_programming_latency.md EndpointsLastChangeTriggerTime = "endpoints.kubernetes.io/last-change-trigger-time" + + // MigratedPluginsAnnotationKey is the annotation key, set for CSINode objects, that is a comma-separated + // list of in-tree plugins that will be serviced by the CSI backend on the Node represented by CSINode. + // This annotation is used by the Attach Detach Controller to determine whether to use the in-tree or + // CSI Backend for a volume plugin on a specific node. + MigratedPluginsAnnotationKey = "storage.alpha.kubernetes.io/migrated-plugins" ) diff --git a/vendor/k8s.io/api/core/v1/doc.go b/vendor/k8s.io/api/core/v1/doc.go index 96994c6245..1bdf0b25b1 100644 --- a/vendor/k8s.io/api/core/v1/doc.go +++ b/vendor/k8s.io/api/core/v1/doc.go @@ -16,6 +16,7 @@ limitations under the License. // +k8s:openapi-gen=true // +k8s:deepcopy-gen=package +// +k8s:protobuf-gen=package // Package v1 is the v1 version of the core API. package v1 // import "k8s.io/api/core/v1" diff --git a/vendor/k8s.io/api/core/v1/generated.pb.go b/vendor/k8s.io/api/core/v1/generated.pb.go index 05cc6d6284..058e03eb94 100644 --- a/vendor/k8s.io/api/core/v1/generated.pb.go +++ b/vendor/k8s.io/api/core/v1/generated.pb.go @@ -33,6 +33,7 @@ limitations under the License. AzureFileVolumeSource Binding CSIPersistentVolumeSource + CSIVolumeSource Capabilities CephFSPersistentVolumeSource CephFSVolumeSource @@ -293,816 +294,820 @@ func (*CSIPersistentVolumeSource) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{8} } +func (m *CSIVolumeSource) Reset() { *m = CSIVolumeSource{} } +func (*CSIVolumeSource) ProtoMessage() {} +func (*CSIVolumeSource) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{9} } + func (m *Capabilities) Reset() { *m = Capabilities{} } func (*Capabilities) ProtoMessage() {} -func (*Capabilities) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{9} } +func (*Capabilities) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{10} } func (m *CephFSPersistentVolumeSource) Reset() { *m = CephFSPersistentVolumeSource{} } func (*CephFSPersistentVolumeSource) ProtoMessage() {} func (*CephFSPersistentVolumeSource) Descriptor() ([]byte, []int) { - return fileDescriptorGenerated, []int{10} + return fileDescriptorGenerated, []int{11} } func (m *CephFSVolumeSource) Reset() { *m = CephFSVolumeSource{} } func (*CephFSVolumeSource) ProtoMessage() {} -func (*CephFSVolumeSource) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{11} } +func (*CephFSVolumeSource) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{12} } func (m *CinderPersistentVolumeSource) Reset() { *m = CinderPersistentVolumeSource{} } func (*CinderPersistentVolumeSource) ProtoMessage() {} func (*CinderPersistentVolumeSource) Descriptor() ([]byte, []int) { - return fileDescriptorGenerated, []int{12} + return fileDescriptorGenerated, []int{13} } func (m *CinderVolumeSource) Reset() { *m = CinderVolumeSource{} } func (*CinderVolumeSource) ProtoMessage() {} -func (*CinderVolumeSource) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{13} } +func (*CinderVolumeSource) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{14} } func (m *ClientIPConfig) Reset() { *m = ClientIPConfig{} } func (*ClientIPConfig) ProtoMessage() {} -func (*ClientIPConfig) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{14} } +func (*ClientIPConfig) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{15} } func (m *ComponentCondition) Reset() { *m = ComponentCondition{} } func (*ComponentCondition) ProtoMessage() {} -func (*ComponentCondition) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{15} } +func (*ComponentCondition) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{16} } func (m *ComponentStatus) Reset() { *m = ComponentStatus{} } func (*ComponentStatus) ProtoMessage() {} -func (*ComponentStatus) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{16} } +func (*ComponentStatus) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{17} } func (m *ComponentStatusList) Reset() { *m = ComponentStatusList{} } func (*ComponentStatusList) ProtoMessage() {} -func (*ComponentStatusList) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{17} } +func (*ComponentStatusList) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{18} } func (m *ConfigMap) Reset() { *m = ConfigMap{} } func (*ConfigMap) ProtoMessage() {} -func (*ConfigMap) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{18} } +func (*ConfigMap) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{19} } func (m *ConfigMapEnvSource) Reset() { *m = ConfigMapEnvSource{} } func (*ConfigMapEnvSource) ProtoMessage() {} -func (*ConfigMapEnvSource) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{19} } +func (*ConfigMapEnvSource) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{20} } func (m *ConfigMapKeySelector) Reset() { *m = ConfigMapKeySelector{} } func (*ConfigMapKeySelector) ProtoMessage() {} -func (*ConfigMapKeySelector) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{20} } +func (*ConfigMapKeySelector) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{21} } func (m *ConfigMapList) Reset() { *m = ConfigMapList{} } func (*ConfigMapList) ProtoMessage() {} -func (*ConfigMapList) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{21} } +func (*ConfigMapList) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{22} } func (m *ConfigMapNodeConfigSource) Reset() { *m = ConfigMapNodeConfigSource{} } func (*ConfigMapNodeConfigSource) ProtoMessage() {} func (*ConfigMapNodeConfigSource) Descriptor() ([]byte, []int) { - return fileDescriptorGenerated, []int{22} + return fileDescriptorGenerated, []int{23} } func (m *ConfigMapProjection) Reset() { *m = ConfigMapProjection{} } func (*ConfigMapProjection) ProtoMessage() {} -func (*ConfigMapProjection) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{23} } +func (*ConfigMapProjection) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{24} } func (m *ConfigMapVolumeSource) Reset() { *m = ConfigMapVolumeSource{} } func (*ConfigMapVolumeSource) ProtoMessage() {} -func (*ConfigMapVolumeSource) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{24} } +func (*ConfigMapVolumeSource) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{25} } func (m *Container) Reset() { *m = Container{} } func (*Container) ProtoMessage() {} -func (*Container) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{25} } +func (*Container) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{26} } func (m *ContainerImage) Reset() { *m = ContainerImage{} } func (*ContainerImage) ProtoMessage() {} -func (*ContainerImage) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{26} } +func (*ContainerImage) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{27} } func (m *ContainerPort) Reset() { *m = ContainerPort{} } func (*ContainerPort) ProtoMessage() {} -func (*ContainerPort) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{27} } +func (*ContainerPort) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{28} } func (m *ContainerState) Reset() { *m = ContainerState{} } func (*ContainerState) ProtoMessage() {} -func (*ContainerState) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{28} } +func (*ContainerState) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{29} } func (m *ContainerStateRunning) Reset() { *m = ContainerStateRunning{} } func (*ContainerStateRunning) ProtoMessage() {} -func (*ContainerStateRunning) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{29} } +func (*ContainerStateRunning) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{30} } func (m *ContainerStateTerminated) Reset() { *m = ContainerStateTerminated{} } func (*ContainerStateTerminated) ProtoMessage() {} func (*ContainerStateTerminated) Descriptor() ([]byte, []int) { - return fileDescriptorGenerated, []int{30} + return fileDescriptorGenerated, []int{31} } func (m *ContainerStateWaiting) Reset() { *m = ContainerStateWaiting{} } func (*ContainerStateWaiting) ProtoMessage() {} -func (*ContainerStateWaiting) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{31} } +func (*ContainerStateWaiting) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{32} } func (m *ContainerStatus) Reset() { *m = ContainerStatus{} } func (*ContainerStatus) ProtoMessage() {} -func (*ContainerStatus) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{32} } +func (*ContainerStatus) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{33} } func (m *DaemonEndpoint) Reset() { *m = DaemonEndpoint{} } func (*DaemonEndpoint) ProtoMessage() {} -func (*DaemonEndpoint) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{33} } +func (*DaemonEndpoint) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{34} } func (m *DownwardAPIProjection) Reset() { *m = DownwardAPIProjection{} } func (*DownwardAPIProjection) ProtoMessage() {} -func (*DownwardAPIProjection) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{34} } +func (*DownwardAPIProjection) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{35} } func (m *DownwardAPIVolumeFile) Reset() { *m = DownwardAPIVolumeFile{} } func (*DownwardAPIVolumeFile) ProtoMessage() {} -func (*DownwardAPIVolumeFile) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{35} } +func (*DownwardAPIVolumeFile) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{36} } func (m *DownwardAPIVolumeSource) Reset() { *m = DownwardAPIVolumeSource{} } func (*DownwardAPIVolumeSource) ProtoMessage() {} func (*DownwardAPIVolumeSource) Descriptor() ([]byte, []int) { - return fileDescriptorGenerated, []int{36} + return fileDescriptorGenerated, []int{37} } func (m *EmptyDirVolumeSource) Reset() { *m = EmptyDirVolumeSource{} } func (*EmptyDirVolumeSource) ProtoMessage() {} -func (*EmptyDirVolumeSource) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{37} } +func (*EmptyDirVolumeSource) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{38} } func (m *EndpointAddress) Reset() { *m = EndpointAddress{} } func (*EndpointAddress) ProtoMessage() {} -func (*EndpointAddress) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{38} } +func (*EndpointAddress) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{39} } func (m *EndpointPort) Reset() { *m = EndpointPort{} } func (*EndpointPort) ProtoMessage() {} -func (*EndpointPort) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{39} } +func (*EndpointPort) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{40} } func (m *EndpointSubset) Reset() { *m = EndpointSubset{} } func (*EndpointSubset) ProtoMessage() {} -func (*EndpointSubset) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{40} } +func (*EndpointSubset) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{41} } func (m *Endpoints) Reset() { *m = Endpoints{} } func (*Endpoints) ProtoMessage() {} -func (*Endpoints) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{41} } +func (*Endpoints) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{42} } func (m *EndpointsList) Reset() { *m = EndpointsList{} } func (*EndpointsList) ProtoMessage() {} -func (*EndpointsList) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{42} } +func (*EndpointsList) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{43} } func (m *EnvFromSource) Reset() { *m = EnvFromSource{} } func (*EnvFromSource) ProtoMessage() {} -func (*EnvFromSource) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{43} } +func (*EnvFromSource) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{44} } func (m *EnvVar) Reset() { *m = EnvVar{} } func (*EnvVar) ProtoMessage() {} -func (*EnvVar) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{44} } +func (*EnvVar) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{45} } func (m *EnvVarSource) Reset() { *m = EnvVarSource{} } func (*EnvVarSource) ProtoMessage() {} -func (*EnvVarSource) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{45} } +func (*EnvVarSource) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{46} } func (m *Event) Reset() { *m = Event{} } func (*Event) ProtoMessage() {} -func (*Event) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{46} } +func (*Event) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{47} } func (m *EventList) Reset() { *m = EventList{} } func (*EventList) ProtoMessage() {} -func (*EventList) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{47} } +func (*EventList) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{48} } func (m *EventSeries) Reset() { *m = EventSeries{} } func (*EventSeries) ProtoMessage() {} -func (*EventSeries) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{48} } +func (*EventSeries) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{49} } func (m *EventSource) Reset() { *m = EventSource{} } func (*EventSource) ProtoMessage() {} -func (*EventSource) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{49} } +func (*EventSource) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{50} } func (m *ExecAction) Reset() { *m = ExecAction{} } func (*ExecAction) ProtoMessage() {} -func (*ExecAction) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{50} } +func (*ExecAction) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{51} } func (m *FCVolumeSource) Reset() { *m = FCVolumeSource{} } func (*FCVolumeSource) ProtoMessage() {} -func (*FCVolumeSource) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{51} } +func (*FCVolumeSource) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{52} } func (m *FlexPersistentVolumeSource) Reset() { *m = FlexPersistentVolumeSource{} } func (*FlexPersistentVolumeSource) ProtoMessage() {} func (*FlexPersistentVolumeSource) Descriptor() ([]byte, []int) { - return fileDescriptorGenerated, []int{52} + return fileDescriptorGenerated, []int{53} } func (m *FlexVolumeSource) Reset() { *m = FlexVolumeSource{} } func (*FlexVolumeSource) ProtoMessage() {} -func (*FlexVolumeSource) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{53} } +func (*FlexVolumeSource) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{54} } func (m *FlockerVolumeSource) Reset() { *m = FlockerVolumeSource{} } func (*FlockerVolumeSource) ProtoMessage() {} -func (*FlockerVolumeSource) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{54} } +func (*FlockerVolumeSource) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{55} } func (m *GCEPersistentDiskVolumeSource) Reset() { *m = GCEPersistentDiskVolumeSource{} } func (*GCEPersistentDiskVolumeSource) ProtoMessage() {} func (*GCEPersistentDiskVolumeSource) Descriptor() ([]byte, []int) { - return fileDescriptorGenerated, []int{55} + return fileDescriptorGenerated, []int{56} } func (m *GitRepoVolumeSource) Reset() { *m = GitRepoVolumeSource{} } func (*GitRepoVolumeSource) ProtoMessage() {} -func (*GitRepoVolumeSource) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{56} } +func (*GitRepoVolumeSource) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{57} } func (m *GlusterfsPersistentVolumeSource) Reset() { *m = GlusterfsPersistentVolumeSource{} } func (*GlusterfsPersistentVolumeSource) ProtoMessage() {} func (*GlusterfsPersistentVolumeSource) Descriptor() ([]byte, []int) { - return fileDescriptorGenerated, []int{57} + return fileDescriptorGenerated, []int{58} } func (m *GlusterfsVolumeSource) Reset() { *m = GlusterfsVolumeSource{} } func (*GlusterfsVolumeSource) ProtoMessage() {} -func (*GlusterfsVolumeSource) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{58} } +func (*GlusterfsVolumeSource) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{59} } func (m *HTTPGetAction) Reset() { *m = HTTPGetAction{} } func (*HTTPGetAction) ProtoMessage() {} -func (*HTTPGetAction) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{59} } +func (*HTTPGetAction) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{60} } func (m *HTTPHeader) Reset() { *m = HTTPHeader{} } func (*HTTPHeader) ProtoMessage() {} -func (*HTTPHeader) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{60} } +func (*HTTPHeader) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{61} } func (m *Handler) Reset() { *m = Handler{} } func (*Handler) ProtoMessage() {} -func (*Handler) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{61} } +func (*Handler) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{62} } func (m *HostAlias) Reset() { *m = HostAlias{} } func (*HostAlias) ProtoMessage() {} -func (*HostAlias) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{62} } +func (*HostAlias) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{63} } func (m *HostPathVolumeSource) Reset() { *m = HostPathVolumeSource{} } func (*HostPathVolumeSource) ProtoMessage() {} -func (*HostPathVolumeSource) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{63} } +func (*HostPathVolumeSource) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{64} } func (m *ISCSIPersistentVolumeSource) Reset() { *m = ISCSIPersistentVolumeSource{} } func (*ISCSIPersistentVolumeSource) ProtoMessage() {} func (*ISCSIPersistentVolumeSource) Descriptor() ([]byte, []int) { - return fileDescriptorGenerated, []int{64} + return fileDescriptorGenerated, []int{65} } func (m *ISCSIVolumeSource) Reset() { *m = ISCSIVolumeSource{} } func (*ISCSIVolumeSource) ProtoMessage() {} -func (*ISCSIVolumeSource) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{65} } +func (*ISCSIVolumeSource) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{66} } func (m *KeyToPath) Reset() { *m = KeyToPath{} } func (*KeyToPath) ProtoMessage() {} -func (*KeyToPath) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{66} } +func (*KeyToPath) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{67} } func (m *Lifecycle) Reset() { *m = Lifecycle{} } func (*Lifecycle) ProtoMessage() {} -func (*Lifecycle) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{67} } +func (*Lifecycle) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{68} } func (m *LimitRange) Reset() { *m = LimitRange{} } func (*LimitRange) ProtoMessage() {} -func (*LimitRange) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{68} } +func (*LimitRange) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{69} } func (m *LimitRangeItem) Reset() { *m = LimitRangeItem{} } func (*LimitRangeItem) ProtoMessage() {} -func (*LimitRangeItem) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{69} } +func (*LimitRangeItem) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{70} } func (m *LimitRangeList) Reset() { *m = LimitRangeList{} } func (*LimitRangeList) ProtoMessage() {} -func (*LimitRangeList) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{70} } +func (*LimitRangeList) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{71} } func (m *LimitRangeSpec) Reset() { *m = LimitRangeSpec{} } func (*LimitRangeSpec) ProtoMessage() {} -func (*LimitRangeSpec) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{71} } +func (*LimitRangeSpec) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{72} } func (m *List) Reset() { *m = List{} } func (*List) ProtoMessage() {} -func (*List) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{72} } +func (*List) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{73} } func (m *LoadBalancerIngress) Reset() { *m = LoadBalancerIngress{} } func (*LoadBalancerIngress) ProtoMessage() {} -func (*LoadBalancerIngress) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{73} } +func (*LoadBalancerIngress) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{74} } func (m *LoadBalancerStatus) Reset() { *m = LoadBalancerStatus{} } func (*LoadBalancerStatus) ProtoMessage() {} -func (*LoadBalancerStatus) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{74} } +func (*LoadBalancerStatus) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{75} } func (m *LocalObjectReference) Reset() { *m = LocalObjectReference{} } func (*LocalObjectReference) ProtoMessage() {} -func (*LocalObjectReference) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{75} } +func (*LocalObjectReference) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{76} } func (m *LocalVolumeSource) Reset() { *m = LocalVolumeSource{} } func (*LocalVolumeSource) ProtoMessage() {} -func (*LocalVolumeSource) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{76} } +func (*LocalVolumeSource) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{77} } func (m *NFSVolumeSource) Reset() { *m = NFSVolumeSource{} } func (*NFSVolumeSource) ProtoMessage() {} -func (*NFSVolumeSource) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{77} } +func (*NFSVolumeSource) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{78} } func (m *Namespace) Reset() { *m = Namespace{} } func (*Namespace) ProtoMessage() {} -func (*Namespace) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{78} } +func (*Namespace) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{79} } func (m *NamespaceList) Reset() { *m = NamespaceList{} } func (*NamespaceList) ProtoMessage() {} -func (*NamespaceList) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{79} } +func (*NamespaceList) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{80} } func (m *NamespaceSpec) Reset() { *m = NamespaceSpec{} } func (*NamespaceSpec) ProtoMessage() {} -func (*NamespaceSpec) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{80} } +func (*NamespaceSpec) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{81} } func (m *NamespaceStatus) Reset() { *m = NamespaceStatus{} } func (*NamespaceStatus) ProtoMessage() {} -func (*NamespaceStatus) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{81} } +func (*NamespaceStatus) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{82} } func (m *Node) Reset() { *m = Node{} } func (*Node) ProtoMessage() {} -func (*Node) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{82} } +func (*Node) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{83} } func (m *NodeAddress) Reset() { *m = NodeAddress{} } func (*NodeAddress) ProtoMessage() {} -func (*NodeAddress) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{83} } +func (*NodeAddress) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{84} } func (m *NodeAffinity) Reset() { *m = NodeAffinity{} } func (*NodeAffinity) ProtoMessage() {} -func (*NodeAffinity) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{84} } +func (*NodeAffinity) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{85} } func (m *NodeCondition) Reset() { *m = NodeCondition{} } func (*NodeCondition) ProtoMessage() {} -func (*NodeCondition) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{85} } +func (*NodeCondition) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{86} } func (m *NodeConfigSource) Reset() { *m = NodeConfigSource{} } func (*NodeConfigSource) ProtoMessage() {} -func (*NodeConfigSource) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{86} } +func (*NodeConfigSource) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{87} } func (m *NodeConfigStatus) Reset() { *m = NodeConfigStatus{} } func (*NodeConfigStatus) ProtoMessage() {} -func (*NodeConfigStatus) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{87} } +func (*NodeConfigStatus) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{88} } func (m *NodeDaemonEndpoints) Reset() { *m = NodeDaemonEndpoints{} } func (*NodeDaemonEndpoints) ProtoMessage() {} -func (*NodeDaemonEndpoints) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{88} } +func (*NodeDaemonEndpoints) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{89} } func (m *NodeList) Reset() { *m = NodeList{} } func (*NodeList) ProtoMessage() {} -func (*NodeList) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{89} } +func (*NodeList) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{90} } func (m *NodeProxyOptions) Reset() { *m = NodeProxyOptions{} } func (*NodeProxyOptions) ProtoMessage() {} -func (*NodeProxyOptions) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{90} } +func (*NodeProxyOptions) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{91} } func (m *NodeResources) Reset() { *m = NodeResources{} } func (*NodeResources) ProtoMessage() {} -func (*NodeResources) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{91} } +func (*NodeResources) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{92} } func (m *NodeSelector) Reset() { *m = NodeSelector{} } func (*NodeSelector) ProtoMessage() {} -func (*NodeSelector) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{92} } +func (*NodeSelector) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{93} } func (m *NodeSelectorRequirement) Reset() { *m = NodeSelectorRequirement{} } func (*NodeSelectorRequirement) ProtoMessage() {} func (*NodeSelectorRequirement) Descriptor() ([]byte, []int) { - return fileDescriptorGenerated, []int{93} + return fileDescriptorGenerated, []int{94} } func (m *NodeSelectorTerm) Reset() { *m = NodeSelectorTerm{} } func (*NodeSelectorTerm) ProtoMessage() {} -func (*NodeSelectorTerm) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{94} } +func (*NodeSelectorTerm) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{95} } func (m *NodeSpec) Reset() { *m = NodeSpec{} } func (*NodeSpec) ProtoMessage() {} -func (*NodeSpec) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{95} } +func (*NodeSpec) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{96} } func (m *NodeStatus) Reset() { *m = NodeStatus{} } func (*NodeStatus) ProtoMessage() {} -func (*NodeStatus) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{96} } +func (*NodeStatus) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{97} } func (m *NodeSystemInfo) Reset() { *m = NodeSystemInfo{} } func (*NodeSystemInfo) ProtoMessage() {} -func (*NodeSystemInfo) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{97} } +func (*NodeSystemInfo) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{98} } func (m *ObjectFieldSelector) Reset() { *m = ObjectFieldSelector{} } func (*ObjectFieldSelector) ProtoMessage() {} -func (*ObjectFieldSelector) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{98} } +func (*ObjectFieldSelector) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{99} } func (m *ObjectReference) Reset() { *m = ObjectReference{} } func (*ObjectReference) ProtoMessage() {} -func (*ObjectReference) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{99} } +func (*ObjectReference) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{100} } func (m *PersistentVolume) Reset() { *m = PersistentVolume{} } func (*PersistentVolume) ProtoMessage() {} -func (*PersistentVolume) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{100} } +func (*PersistentVolume) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{101} } func (m *PersistentVolumeClaim) Reset() { *m = PersistentVolumeClaim{} } func (*PersistentVolumeClaim) ProtoMessage() {} -func (*PersistentVolumeClaim) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{101} } +func (*PersistentVolumeClaim) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{102} } func (m *PersistentVolumeClaimCondition) Reset() { *m = PersistentVolumeClaimCondition{} } func (*PersistentVolumeClaimCondition) ProtoMessage() {} func (*PersistentVolumeClaimCondition) Descriptor() ([]byte, []int) { - return fileDescriptorGenerated, []int{102} + return fileDescriptorGenerated, []int{103} } func (m *PersistentVolumeClaimList) Reset() { *m = PersistentVolumeClaimList{} } func (*PersistentVolumeClaimList) ProtoMessage() {} func (*PersistentVolumeClaimList) Descriptor() ([]byte, []int) { - return fileDescriptorGenerated, []int{103} + return fileDescriptorGenerated, []int{104} } func (m *PersistentVolumeClaimSpec) Reset() { *m = PersistentVolumeClaimSpec{} } func (*PersistentVolumeClaimSpec) ProtoMessage() {} func (*PersistentVolumeClaimSpec) Descriptor() ([]byte, []int) { - return fileDescriptorGenerated, []int{104} + return fileDescriptorGenerated, []int{105} } func (m *PersistentVolumeClaimStatus) Reset() { *m = PersistentVolumeClaimStatus{} } func (*PersistentVolumeClaimStatus) ProtoMessage() {} func (*PersistentVolumeClaimStatus) Descriptor() ([]byte, []int) { - return fileDescriptorGenerated, []int{105} + return fileDescriptorGenerated, []int{106} } func (m *PersistentVolumeClaimVolumeSource) Reset() { *m = PersistentVolumeClaimVolumeSource{} } func (*PersistentVolumeClaimVolumeSource) ProtoMessage() {} func (*PersistentVolumeClaimVolumeSource) Descriptor() ([]byte, []int) { - return fileDescriptorGenerated, []int{106} + return fileDescriptorGenerated, []int{107} } func (m *PersistentVolumeList) Reset() { *m = PersistentVolumeList{} } func (*PersistentVolumeList) ProtoMessage() {} -func (*PersistentVolumeList) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{107} } +func (*PersistentVolumeList) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{108} } func (m *PersistentVolumeSource) Reset() { *m = PersistentVolumeSource{} } func (*PersistentVolumeSource) ProtoMessage() {} func (*PersistentVolumeSource) Descriptor() ([]byte, []int) { - return fileDescriptorGenerated, []int{108} + return fileDescriptorGenerated, []int{109} } func (m *PersistentVolumeSpec) Reset() { *m = PersistentVolumeSpec{} } func (*PersistentVolumeSpec) ProtoMessage() {} -func (*PersistentVolumeSpec) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{109} } +func (*PersistentVolumeSpec) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{110} } func (m *PersistentVolumeStatus) Reset() { *m = PersistentVolumeStatus{} } func (*PersistentVolumeStatus) ProtoMessage() {} func (*PersistentVolumeStatus) Descriptor() ([]byte, []int) { - return fileDescriptorGenerated, []int{110} + return fileDescriptorGenerated, []int{111} } func (m *PhotonPersistentDiskVolumeSource) Reset() { *m = PhotonPersistentDiskVolumeSource{} } func (*PhotonPersistentDiskVolumeSource) ProtoMessage() {} func (*PhotonPersistentDiskVolumeSource) Descriptor() ([]byte, []int) { - return fileDescriptorGenerated, []int{111} + return fileDescriptorGenerated, []int{112} } func (m *Pod) Reset() { *m = Pod{} } func (*Pod) ProtoMessage() {} -func (*Pod) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{112} } +func (*Pod) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{113} } func (m *PodAffinity) Reset() { *m = PodAffinity{} } func (*PodAffinity) ProtoMessage() {} -func (*PodAffinity) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{113} } +func (*PodAffinity) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{114} } func (m *PodAffinityTerm) Reset() { *m = PodAffinityTerm{} } func (*PodAffinityTerm) ProtoMessage() {} -func (*PodAffinityTerm) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{114} } +func (*PodAffinityTerm) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{115} } func (m *PodAntiAffinity) Reset() { *m = PodAntiAffinity{} } func (*PodAntiAffinity) ProtoMessage() {} -func (*PodAntiAffinity) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{115} } +func (*PodAntiAffinity) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{116} } func (m *PodAttachOptions) Reset() { *m = PodAttachOptions{} } func (*PodAttachOptions) ProtoMessage() {} -func (*PodAttachOptions) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{116} } +func (*PodAttachOptions) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{117} } func (m *PodCondition) Reset() { *m = PodCondition{} } func (*PodCondition) ProtoMessage() {} -func (*PodCondition) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{117} } +func (*PodCondition) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{118} } func (m *PodDNSConfig) Reset() { *m = PodDNSConfig{} } func (*PodDNSConfig) ProtoMessage() {} -func (*PodDNSConfig) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{118} } +func (*PodDNSConfig) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{119} } func (m *PodDNSConfigOption) Reset() { *m = PodDNSConfigOption{} } func (*PodDNSConfigOption) ProtoMessage() {} -func (*PodDNSConfigOption) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{119} } +func (*PodDNSConfigOption) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{120} } func (m *PodExecOptions) Reset() { *m = PodExecOptions{} } func (*PodExecOptions) ProtoMessage() {} -func (*PodExecOptions) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{120} } +func (*PodExecOptions) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{121} } func (m *PodList) Reset() { *m = PodList{} } func (*PodList) ProtoMessage() {} -func (*PodList) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{121} } +func (*PodList) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{122} } func (m *PodLogOptions) Reset() { *m = PodLogOptions{} } func (*PodLogOptions) ProtoMessage() {} -func (*PodLogOptions) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{122} } +func (*PodLogOptions) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{123} } func (m *PodPortForwardOptions) Reset() { *m = PodPortForwardOptions{} } func (*PodPortForwardOptions) ProtoMessage() {} -func (*PodPortForwardOptions) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{123} } +func (*PodPortForwardOptions) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{124} } func (m *PodProxyOptions) Reset() { *m = PodProxyOptions{} } func (*PodProxyOptions) ProtoMessage() {} -func (*PodProxyOptions) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{124} } +func (*PodProxyOptions) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{125} } func (m *PodReadinessGate) Reset() { *m = PodReadinessGate{} } func (*PodReadinessGate) ProtoMessage() {} -func (*PodReadinessGate) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{125} } +func (*PodReadinessGate) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{126} } func (m *PodSecurityContext) Reset() { *m = PodSecurityContext{} } func (*PodSecurityContext) ProtoMessage() {} -func (*PodSecurityContext) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{126} } +func (*PodSecurityContext) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{127} } func (m *PodSignature) Reset() { *m = PodSignature{} } func (*PodSignature) ProtoMessage() {} -func (*PodSignature) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{127} } +func (*PodSignature) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{128} } func (m *PodSpec) Reset() { *m = PodSpec{} } func (*PodSpec) ProtoMessage() {} -func (*PodSpec) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{128} } +func (*PodSpec) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{129} } func (m *PodStatus) Reset() { *m = PodStatus{} } func (*PodStatus) ProtoMessage() {} -func (*PodStatus) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{129} } +func (*PodStatus) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{130} } func (m *PodStatusResult) Reset() { *m = PodStatusResult{} } func (*PodStatusResult) ProtoMessage() {} -func (*PodStatusResult) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{130} } +func (*PodStatusResult) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{131} } func (m *PodTemplate) Reset() { *m = PodTemplate{} } func (*PodTemplate) ProtoMessage() {} -func (*PodTemplate) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{131} } +func (*PodTemplate) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{132} } func (m *PodTemplateList) Reset() { *m = PodTemplateList{} } func (*PodTemplateList) ProtoMessage() {} -func (*PodTemplateList) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{132} } +func (*PodTemplateList) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{133} } func (m *PodTemplateSpec) Reset() { *m = PodTemplateSpec{} } func (*PodTemplateSpec) ProtoMessage() {} -func (*PodTemplateSpec) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{133} } +func (*PodTemplateSpec) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{134} } func (m *PortworxVolumeSource) Reset() { *m = PortworxVolumeSource{} } func (*PortworxVolumeSource) ProtoMessage() {} -func (*PortworxVolumeSource) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{134} } +func (*PortworxVolumeSource) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{135} } func (m *Preconditions) Reset() { *m = Preconditions{} } func (*Preconditions) ProtoMessage() {} -func (*Preconditions) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{135} } +func (*Preconditions) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{136} } func (m *PreferAvoidPodsEntry) Reset() { *m = PreferAvoidPodsEntry{} } func (*PreferAvoidPodsEntry) ProtoMessage() {} -func (*PreferAvoidPodsEntry) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{136} } +func (*PreferAvoidPodsEntry) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{137} } func (m *PreferredSchedulingTerm) Reset() { *m = PreferredSchedulingTerm{} } func (*PreferredSchedulingTerm) ProtoMessage() {} func (*PreferredSchedulingTerm) Descriptor() ([]byte, []int) { - return fileDescriptorGenerated, []int{137} + return fileDescriptorGenerated, []int{138} } func (m *Probe) Reset() { *m = Probe{} } func (*Probe) ProtoMessage() {} -func (*Probe) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{138} } +func (*Probe) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{139} } func (m *ProjectedVolumeSource) Reset() { *m = ProjectedVolumeSource{} } func (*ProjectedVolumeSource) ProtoMessage() {} -func (*ProjectedVolumeSource) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{139} } +func (*ProjectedVolumeSource) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{140} } func (m *QuobyteVolumeSource) Reset() { *m = QuobyteVolumeSource{} } func (*QuobyteVolumeSource) ProtoMessage() {} -func (*QuobyteVolumeSource) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{140} } +func (*QuobyteVolumeSource) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{141} } func (m *RBDPersistentVolumeSource) Reset() { *m = RBDPersistentVolumeSource{} } func (*RBDPersistentVolumeSource) ProtoMessage() {} func (*RBDPersistentVolumeSource) Descriptor() ([]byte, []int) { - return fileDescriptorGenerated, []int{141} + return fileDescriptorGenerated, []int{142} } func (m *RBDVolumeSource) Reset() { *m = RBDVolumeSource{} } func (*RBDVolumeSource) ProtoMessage() {} -func (*RBDVolumeSource) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{142} } +func (*RBDVolumeSource) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{143} } func (m *RangeAllocation) Reset() { *m = RangeAllocation{} } func (*RangeAllocation) ProtoMessage() {} -func (*RangeAllocation) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{143} } +func (*RangeAllocation) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{144} } func (m *ReplicationController) Reset() { *m = ReplicationController{} } func (*ReplicationController) ProtoMessage() {} -func (*ReplicationController) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{144} } +func (*ReplicationController) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{145} } func (m *ReplicationControllerCondition) Reset() { *m = ReplicationControllerCondition{} } func (*ReplicationControllerCondition) ProtoMessage() {} func (*ReplicationControllerCondition) Descriptor() ([]byte, []int) { - return fileDescriptorGenerated, []int{145} + return fileDescriptorGenerated, []int{146} } func (m *ReplicationControllerList) Reset() { *m = ReplicationControllerList{} } func (*ReplicationControllerList) ProtoMessage() {} func (*ReplicationControllerList) Descriptor() ([]byte, []int) { - return fileDescriptorGenerated, []int{146} + return fileDescriptorGenerated, []int{147} } func (m *ReplicationControllerSpec) Reset() { *m = ReplicationControllerSpec{} } func (*ReplicationControllerSpec) ProtoMessage() {} func (*ReplicationControllerSpec) Descriptor() ([]byte, []int) { - return fileDescriptorGenerated, []int{147} + return fileDescriptorGenerated, []int{148} } func (m *ReplicationControllerStatus) Reset() { *m = ReplicationControllerStatus{} } func (*ReplicationControllerStatus) ProtoMessage() {} func (*ReplicationControllerStatus) Descriptor() ([]byte, []int) { - return fileDescriptorGenerated, []int{148} + return fileDescriptorGenerated, []int{149} } func (m *ResourceFieldSelector) Reset() { *m = ResourceFieldSelector{} } func (*ResourceFieldSelector) ProtoMessage() {} -func (*ResourceFieldSelector) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{149} } +func (*ResourceFieldSelector) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{150} } func (m *ResourceQuota) Reset() { *m = ResourceQuota{} } func (*ResourceQuota) ProtoMessage() {} -func (*ResourceQuota) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{150} } +func (*ResourceQuota) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{151} } func (m *ResourceQuotaList) Reset() { *m = ResourceQuotaList{} } func (*ResourceQuotaList) ProtoMessage() {} -func (*ResourceQuotaList) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{151} } +func (*ResourceQuotaList) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{152} } func (m *ResourceQuotaSpec) Reset() { *m = ResourceQuotaSpec{} } func (*ResourceQuotaSpec) ProtoMessage() {} -func (*ResourceQuotaSpec) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{152} } +func (*ResourceQuotaSpec) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{153} } func (m *ResourceQuotaStatus) Reset() { *m = ResourceQuotaStatus{} } func (*ResourceQuotaStatus) ProtoMessage() {} -func (*ResourceQuotaStatus) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{153} } +func (*ResourceQuotaStatus) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{154} } func (m *ResourceRequirements) Reset() { *m = ResourceRequirements{} } func (*ResourceRequirements) ProtoMessage() {} -func (*ResourceRequirements) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{154} } +func (*ResourceRequirements) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{155} } func (m *SELinuxOptions) Reset() { *m = SELinuxOptions{} } func (*SELinuxOptions) ProtoMessage() {} -func (*SELinuxOptions) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{155} } +func (*SELinuxOptions) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{156} } func (m *ScaleIOPersistentVolumeSource) Reset() { *m = ScaleIOPersistentVolumeSource{} } func (*ScaleIOPersistentVolumeSource) ProtoMessage() {} func (*ScaleIOPersistentVolumeSource) Descriptor() ([]byte, []int) { - return fileDescriptorGenerated, []int{156} + return fileDescriptorGenerated, []int{157} } func (m *ScaleIOVolumeSource) Reset() { *m = ScaleIOVolumeSource{} } func (*ScaleIOVolumeSource) ProtoMessage() {} -func (*ScaleIOVolumeSource) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{157} } +func (*ScaleIOVolumeSource) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{158} } func (m *ScopeSelector) Reset() { *m = ScopeSelector{} } func (*ScopeSelector) ProtoMessage() {} -func (*ScopeSelector) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{158} } +func (*ScopeSelector) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{159} } func (m *ScopedResourceSelectorRequirement) Reset() { *m = ScopedResourceSelectorRequirement{} } func (*ScopedResourceSelectorRequirement) ProtoMessage() {} func (*ScopedResourceSelectorRequirement) Descriptor() ([]byte, []int) { - return fileDescriptorGenerated, []int{159} + return fileDescriptorGenerated, []int{160} } func (m *Secret) Reset() { *m = Secret{} } func (*Secret) ProtoMessage() {} -func (*Secret) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{160} } +func (*Secret) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{161} } func (m *SecretEnvSource) Reset() { *m = SecretEnvSource{} } func (*SecretEnvSource) ProtoMessage() {} -func (*SecretEnvSource) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{161} } +func (*SecretEnvSource) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{162} } func (m *SecretKeySelector) Reset() { *m = SecretKeySelector{} } func (*SecretKeySelector) ProtoMessage() {} -func (*SecretKeySelector) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{162} } +func (*SecretKeySelector) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{163} } func (m *SecretList) Reset() { *m = SecretList{} } func (*SecretList) ProtoMessage() {} -func (*SecretList) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{163} } +func (*SecretList) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{164} } func (m *SecretProjection) Reset() { *m = SecretProjection{} } func (*SecretProjection) ProtoMessage() {} -func (*SecretProjection) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{164} } +func (*SecretProjection) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{165} } func (m *SecretReference) Reset() { *m = SecretReference{} } func (*SecretReference) ProtoMessage() {} -func (*SecretReference) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{165} } +func (*SecretReference) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{166} } func (m *SecretVolumeSource) Reset() { *m = SecretVolumeSource{} } func (*SecretVolumeSource) ProtoMessage() {} -func (*SecretVolumeSource) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{166} } +func (*SecretVolumeSource) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{167} } func (m *SecurityContext) Reset() { *m = SecurityContext{} } func (*SecurityContext) ProtoMessage() {} -func (*SecurityContext) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{167} } +func (*SecurityContext) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{168} } func (m *SerializedReference) Reset() { *m = SerializedReference{} } func (*SerializedReference) ProtoMessage() {} -func (*SerializedReference) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{168} } +func (*SerializedReference) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{169} } func (m *Service) Reset() { *m = Service{} } func (*Service) ProtoMessage() {} -func (*Service) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{169} } +func (*Service) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{170} } func (m *ServiceAccount) Reset() { *m = ServiceAccount{} } func (*ServiceAccount) ProtoMessage() {} -func (*ServiceAccount) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{170} } +func (*ServiceAccount) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{171} } func (m *ServiceAccountList) Reset() { *m = ServiceAccountList{} } func (*ServiceAccountList) ProtoMessage() {} -func (*ServiceAccountList) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{171} } +func (*ServiceAccountList) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{172} } func (m *ServiceAccountTokenProjection) Reset() { *m = ServiceAccountTokenProjection{} } func (*ServiceAccountTokenProjection) ProtoMessage() {} func (*ServiceAccountTokenProjection) Descriptor() ([]byte, []int) { - return fileDescriptorGenerated, []int{172} + return fileDescriptorGenerated, []int{173} } func (m *ServiceList) Reset() { *m = ServiceList{} } func (*ServiceList) ProtoMessage() {} -func (*ServiceList) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{173} } +func (*ServiceList) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{174} } func (m *ServicePort) Reset() { *m = ServicePort{} } func (*ServicePort) ProtoMessage() {} -func (*ServicePort) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{174} } +func (*ServicePort) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{175} } func (m *ServiceProxyOptions) Reset() { *m = ServiceProxyOptions{} } func (*ServiceProxyOptions) ProtoMessage() {} -func (*ServiceProxyOptions) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{175} } +func (*ServiceProxyOptions) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{176} } func (m *ServiceSpec) Reset() { *m = ServiceSpec{} } func (*ServiceSpec) ProtoMessage() {} -func (*ServiceSpec) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{176} } +func (*ServiceSpec) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{177} } func (m *ServiceStatus) Reset() { *m = ServiceStatus{} } func (*ServiceStatus) ProtoMessage() {} -func (*ServiceStatus) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{177} } +func (*ServiceStatus) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{178} } func (m *SessionAffinityConfig) Reset() { *m = SessionAffinityConfig{} } func (*SessionAffinityConfig) ProtoMessage() {} -func (*SessionAffinityConfig) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{178} } +func (*SessionAffinityConfig) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{179} } func (m *StorageOSPersistentVolumeSource) Reset() { *m = StorageOSPersistentVolumeSource{} } func (*StorageOSPersistentVolumeSource) ProtoMessage() {} func (*StorageOSPersistentVolumeSource) Descriptor() ([]byte, []int) { - return fileDescriptorGenerated, []int{179} + return fileDescriptorGenerated, []int{180} } func (m *StorageOSVolumeSource) Reset() { *m = StorageOSVolumeSource{} } func (*StorageOSVolumeSource) ProtoMessage() {} -func (*StorageOSVolumeSource) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{180} } +func (*StorageOSVolumeSource) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{181} } func (m *Sysctl) Reset() { *m = Sysctl{} } func (*Sysctl) ProtoMessage() {} -func (*Sysctl) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{181} } +func (*Sysctl) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{182} } func (m *TCPSocketAction) Reset() { *m = TCPSocketAction{} } func (*TCPSocketAction) ProtoMessage() {} -func (*TCPSocketAction) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{182} } +func (*TCPSocketAction) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{183} } func (m *Taint) Reset() { *m = Taint{} } func (*Taint) ProtoMessage() {} -func (*Taint) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{183} } +func (*Taint) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{184} } func (m *Toleration) Reset() { *m = Toleration{} } func (*Toleration) ProtoMessage() {} -func (*Toleration) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{184} } +func (*Toleration) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{185} } func (m *TopologySelectorLabelRequirement) Reset() { *m = TopologySelectorLabelRequirement{} } func (*TopologySelectorLabelRequirement) ProtoMessage() {} func (*TopologySelectorLabelRequirement) Descriptor() ([]byte, []int) { - return fileDescriptorGenerated, []int{185} + return fileDescriptorGenerated, []int{186} } func (m *TopologySelectorTerm) Reset() { *m = TopologySelectorTerm{} } func (*TopologySelectorTerm) ProtoMessage() {} -func (*TopologySelectorTerm) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{186} } +func (*TopologySelectorTerm) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{187} } func (m *TypedLocalObjectReference) Reset() { *m = TypedLocalObjectReference{} } func (*TypedLocalObjectReference) ProtoMessage() {} func (*TypedLocalObjectReference) Descriptor() ([]byte, []int) { - return fileDescriptorGenerated, []int{187} + return fileDescriptorGenerated, []int{188} } func (m *Volume) Reset() { *m = Volume{} } func (*Volume) ProtoMessage() {} -func (*Volume) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{188} } +func (*Volume) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{189} } func (m *VolumeDevice) Reset() { *m = VolumeDevice{} } func (*VolumeDevice) ProtoMessage() {} -func (*VolumeDevice) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{189} } +func (*VolumeDevice) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{190} } func (m *VolumeMount) Reset() { *m = VolumeMount{} } func (*VolumeMount) ProtoMessage() {} -func (*VolumeMount) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{190} } +func (*VolumeMount) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{191} } func (m *VolumeNodeAffinity) Reset() { *m = VolumeNodeAffinity{} } func (*VolumeNodeAffinity) ProtoMessage() {} -func (*VolumeNodeAffinity) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{191} } +func (*VolumeNodeAffinity) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{192} } func (m *VolumeProjection) Reset() { *m = VolumeProjection{} } func (*VolumeProjection) ProtoMessage() {} -func (*VolumeProjection) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{192} } +func (*VolumeProjection) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{193} } func (m *VolumeSource) Reset() { *m = VolumeSource{} } func (*VolumeSource) ProtoMessage() {} -func (*VolumeSource) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{193} } +func (*VolumeSource) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{194} } func (m *VsphereVirtualDiskVolumeSource) Reset() { *m = VsphereVirtualDiskVolumeSource{} } func (*VsphereVirtualDiskVolumeSource) ProtoMessage() {} func (*VsphereVirtualDiskVolumeSource) Descriptor() ([]byte, []int) { - return fileDescriptorGenerated, []int{194} + return fileDescriptorGenerated, []int{195} } func (m *WeightedPodAffinityTerm) Reset() { *m = WeightedPodAffinityTerm{} } func (*WeightedPodAffinityTerm) ProtoMessage() {} func (*WeightedPodAffinityTerm) Descriptor() ([]byte, []int) { - return fileDescriptorGenerated, []int{195} + return fileDescriptorGenerated, []int{196} } func init() { @@ -1115,6 +1120,7 @@ func init() { proto.RegisterType((*AzureFileVolumeSource)(nil), "k8s.io.api.core.v1.AzureFileVolumeSource") proto.RegisterType((*Binding)(nil), "k8s.io.api.core.v1.Binding") proto.RegisterType((*CSIPersistentVolumeSource)(nil), "k8s.io.api.core.v1.CSIPersistentVolumeSource") + proto.RegisterType((*CSIVolumeSource)(nil), "k8s.io.api.core.v1.CSIVolumeSource") proto.RegisterType((*Capabilities)(nil), "k8s.io.api.core.v1.Capabilities") proto.RegisterType((*CephFSPersistentVolumeSource)(nil), "k8s.io.api.core.v1.CephFSPersistentVolumeSource") proto.RegisterType((*CephFSVolumeSource)(nil), "k8s.io.api.core.v1.CephFSVolumeSource") @@ -1696,6 +1702,76 @@ func (m *CSIPersistentVolumeSource) MarshalTo(dAtA []byte) (int, error) { return i, nil } +func (m *CSIVolumeSource) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *CSIVolumeSource) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + dAtA[i] = 0xa + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Driver))) + i += copy(dAtA[i:], m.Driver) + if m.ReadOnly != nil { + dAtA[i] = 0x10 + i++ + if *m.ReadOnly { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i++ + } + if m.FSType != nil { + dAtA[i] = 0x1a + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(*m.FSType))) + i += copy(dAtA[i:], *m.FSType) + } + if len(m.VolumeAttributes) > 0 { + keysForVolumeAttributes := make([]string, 0, len(m.VolumeAttributes)) + for k := range m.VolumeAttributes { + keysForVolumeAttributes = append(keysForVolumeAttributes, string(k)) + } + github_com_gogo_protobuf_sortkeys.Strings(keysForVolumeAttributes) + for _, k := range keysForVolumeAttributes { + dAtA[i] = 0x22 + i++ + v := m.VolumeAttributes[string(k)] + mapSize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + len(v) + sovGenerated(uint64(len(v))) + i = encodeVarintGenerated(dAtA, i, uint64(mapSize)) + dAtA[i] = 0xa + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(k))) + i += copy(dAtA[i:], k) + dAtA[i] = 0x12 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(v))) + i += copy(dAtA[i:], v) + } + } + if m.NodePublishSecretRef != nil { + dAtA[i] = 0x2a + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.NodePublishSecretRef.Size())) + n9, err := m.NodePublishSecretRef.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n9 + } + return i, nil +} + func (m *Capabilities) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) @@ -1790,11 +1866,11 @@ func (m *CephFSPersistentVolumeSource) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0x2a i++ i = encodeVarintGenerated(dAtA, i, uint64(m.SecretRef.Size())) - n9, err := m.SecretRef.MarshalTo(dAtA[i:]) + n10, err := m.SecretRef.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n9 + i += n10 } dAtA[i] = 0x30 i++ @@ -1853,11 +1929,11 @@ func (m *CephFSVolumeSource) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0x2a i++ i = encodeVarintGenerated(dAtA, i, uint64(m.SecretRef.Size())) - n10, err := m.SecretRef.MarshalTo(dAtA[i:]) + n11, err := m.SecretRef.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n10 + i += n11 } dAtA[i] = 0x30 i++ @@ -1905,11 +1981,11 @@ func (m *CinderPersistentVolumeSource) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0x22 i++ i = encodeVarintGenerated(dAtA, i, uint64(m.SecretRef.Size())) - n11, err := m.SecretRef.MarshalTo(dAtA[i:]) + n12, err := m.SecretRef.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n11 + i += n12 } return i, nil } @@ -1949,11 +2025,11 @@ func (m *CinderVolumeSource) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0x22 i++ i = encodeVarintGenerated(dAtA, i, uint64(m.SecretRef.Size())) - n12, err := m.SecretRef.MarshalTo(dAtA[i:]) + n13, err := m.SecretRef.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n12 + i += n13 } return i, nil } @@ -2033,11 +2109,11 @@ func (m *ComponentStatus) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0xa i++ i = encodeVarintGenerated(dAtA, i, uint64(m.ObjectMeta.Size())) - n13, err := m.ObjectMeta.MarshalTo(dAtA[i:]) + n14, err := m.ObjectMeta.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n13 + i += n14 if len(m.Conditions) > 0 { for _, msg := range m.Conditions { dAtA[i] = 0x12 @@ -2071,11 +2147,11 @@ func (m *ComponentStatusList) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0xa i++ i = encodeVarintGenerated(dAtA, i, uint64(m.ListMeta.Size())) - n14, err := m.ListMeta.MarshalTo(dAtA[i:]) + n15, err := m.ListMeta.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n14 + i += n15 if len(m.Items) > 0 { for _, msg := range m.Items { dAtA[i] = 0x12 @@ -2109,11 +2185,11 @@ func (m *ConfigMap) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0xa i++ i = encodeVarintGenerated(dAtA, i, uint64(m.ObjectMeta.Size())) - n15, err := m.ObjectMeta.MarshalTo(dAtA[i:]) + n16, err := m.ObjectMeta.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n15 + i += n16 if len(m.Data) > 0 { keysForData := make([]string, 0, len(m.Data)) for k := range m.Data { @@ -2185,11 +2261,11 @@ func (m *ConfigMapEnvSource) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0xa i++ i = encodeVarintGenerated(dAtA, i, uint64(m.LocalObjectReference.Size())) - n16, err := m.LocalObjectReference.MarshalTo(dAtA[i:]) + n17, err := m.LocalObjectReference.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n16 + i += n17 if m.Optional != nil { dAtA[i] = 0x10 i++ @@ -2221,11 +2297,11 @@ func (m *ConfigMapKeySelector) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0xa i++ i = encodeVarintGenerated(dAtA, i, uint64(m.LocalObjectReference.Size())) - n17, err := m.LocalObjectReference.MarshalTo(dAtA[i:]) + n18, err := m.LocalObjectReference.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n17 + i += n18 dAtA[i] = 0x12 i++ i = encodeVarintGenerated(dAtA, i, uint64(len(m.Key))) @@ -2261,11 +2337,11 @@ func (m *ConfigMapList) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0xa i++ i = encodeVarintGenerated(dAtA, i, uint64(m.ListMeta.Size())) - n18, err := m.ListMeta.MarshalTo(dAtA[i:]) + n19, err := m.ListMeta.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n18 + i += n19 if len(m.Items) > 0 { for _, msg := range m.Items { dAtA[i] = 0x12 @@ -2337,11 +2413,11 @@ func (m *ConfigMapProjection) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0xa i++ i = encodeVarintGenerated(dAtA, i, uint64(m.LocalObjectReference.Size())) - n19, err := m.LocalObjectReference.MarshalTo(dAtA[i:]) + n20, err := m.LocalObjectReference.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n19 + i += n20 if len(m.Items) > 0 { for _, msg := range m.Items { dAtA[i] = 0x12 @@ -2385,11 +2461,11 @@ func (m *ConfigMapVolumeSource) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0xa i++ i = encodeVarintGenerated(dAtA, i, uint64(m.LocalObjectReference.Size())) - n20, err := m.LocalObjectReference.MarshalTo(dAtA[i:]) + n21, err := m.LocalObjectReference.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n20 + i += n21 if len(m.Items) > 0 { for _, msg := range m.Items { dAtA[i] = 0x12 @@ -2504,11 +2580,11 @@ func (m *Container) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0x42 i++ i = encodeVarintGenerated(dAtA, i, uint64(m.Resources.Size())) - n21, err := m.Resources.MarshalTo(dAtA[i:]) + n22, err := m.Resources.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n21 + i += n22 if len(m.VolumeMounts) > 0 { for _, msg := range m.VolumeMounts { dAtA[i] = 0x4a @@ -2525,31 +2601,31 @@ func (m *Container) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0x52 i++ i = encodeVarintGenerated(dAtA, i, uint64(m.LivenessProbe.Size())) - n22, err := m.LivenessProbe.MarshalTo(dAtA[i:]) + n23, err := m.LivenessProbe.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n22 + i += n23 } if m.ReadinessProbe != nil { dAtA[i] = 0x5a i++ i = encodeVarintGenerated(dAtA, i, uint64(m.ReadinessProbe.Size())) - n23, err := m.ReadinessProbe.MarshalTo(dAtA[i:]) + n24, err := m.ReadinessProbe.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n23 + i += n24 } if m.Lifecycle != nil { dAtA[i] = 0x62 i++ i = encodeVarintGenerated(dAtA, i, uint64(m.Lifecycle.Size())) - n24, err := m.Lifecycle.MarshalTo(dAtA[i:]) + n25, err := m.Lifecycle.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n24 + i += n25 } dAtA[i] = 0x6a i++ @@ -2563,11 +2639,11 @@ func (m *Container) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0x7a i++ i = encodeVarintGenerated(dAtA, i, uint64(m.SecurityContext.Size())) - n25, err := m.SecurityContext.MarshalTo(dAtA[i:]) + n26, err := m.SecurityContext.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n25 + i += n26 } dAtA[i] = 0x80 i++ @@ -2727,31 +2803,31 @@ func (m *ContainerState) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0xa i++ i = encodeVarintGenerated(dAtA, i, uint64(m.Waiting.Size())) - n26, err := m.Waiting.MarshalTo(dAtA[i:]) + n27, err := m.Waiting.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n26 + i += n27 } if m.Running != nil { dAtA[i] = 0x12 i++ i = encodeVarintGenerated(dAtA, i, uint64(m.Running.Size())) - n27, err := m.Running.MarshalTo(dAtA[i:]) + n28, err := m.Running.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n27 + i += n28 } if m.Terminated != nil { dAtA[i] = 0x1a i++ i = encodeVarintGenerated(dAtA, i, uint64(m.Terminated.Size())) - n28, err := m.Terminated.MarshalTo(dAtA[i:]) + n29, err := m.Terminated.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n28 + i += n29 } return i, nil } @@ -2774,11 +2850,11 @@ func (m *ContainerStateRunning) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0xa i++ i = encodeVarintGenerated(dAtA, i, uint64(m.StartedAt.Size())) - n29, err := m.StartedAt.MarshalTo(dAtA[i:]) + n30, err := m.StartedAt.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n29 + i += n30 return i, nil } @@ -2814,19 +2890,19 @@ func (m *ContainerStateTerminated) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0x2a i++ i = encodeVarintGenerated(dAtA, i, uint64(m.StartedAt.Size())) - n30, err := m.StartedAt.MarshalTo(dAtA[i:]) + n31, err := m.StartedAt.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n30 + i += n31 dAtA[i] = 0x32 i++ i = encodeVarintGenerated(dAtA, i, uint64(m.FinishedAt.Size())) - n31, err := m.FinishedAt.MarshalTo(dAtA[i:]) + n32, err := m.FinishedAt.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n31 + i += n32 dAtA[i] = 0x3a i++ i = encodeVarintGenerated(dAtA, i, uint64(len(m.ContainerID))) @@ -2882,19 +2958,19 @@ func (m *ContainerStatus) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0x12 i++ i = encodeVarintGenerated(dAtA, i, uint64(m.State.Size())) - n32, err := m.State.MarshalTo(dAtA[i:]) + n33, err := m.State.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n32 + i += n33 dAtA[i] = 0x1a i++ i = encodeVarintGenerated(dAtA, i, uint64(m.LastTerminationState.Size())) - n33, err := m.LastTerminationState.MarshalTo(dAtA[i:]) + n34, err := m.LastTerminationState.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n33 + i += n34 dAtA[i] = 0x20 i++ if m.Ready { @@ -2995,21 +3071,21 @@ func (m *DownwardAPIVolumeFile) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0x12 i++ i = encodeVarintGenerated(dAtA, i, uint64(m.FieldRef.Size())) - n34, err := m.FieldRef.MarshalTo(dAtA[i:]) + n35, err := m.FieldRef.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n34 + i += n35 } if m.ResourceFieldRef != nil { dAtA[i] = 0x1a i++ i = encodeVarintGenerated(dAtA, i, uint64(m.ResourceFieldRef.Size())) - n35, err := m.ResourceFieldRef.MarshalTo(dAtA[i:]) + n36, err := m.ResourceFieldRef.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n35 + i += n36 } if m.Mode != nil { dAtA[i] = 0x20 @@ -3077,11 +3153,11 @@ func (m *EmptyDirVolumeSource) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0x12 i++ i = encodeVarintGenerated(dAtA, i, uint64(m.SizeLimit.Size())) - n36, err := m.SizeLimit.MarshalTo(dAtA[i:]) + n37, err := m.SizeLimit.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n36 + i += n37 } return i, nil } @@ -3109,11 +3185,11 @@ func (m *EndpointAddress) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0x12 i++ i = encodeVarintGenerated(dAtA, i, uint64(m.TargetRef.Size())) - n37, err := m.TargetRef.MarshalTo(dAtA[i:]) + n38, err := m.TargetRef.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n37 + i += n38 } dAtA[i] = 0x1a i++ @@ -3229,11 +3305,11 @@ func (m *Endpoints) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0xa i++ i = encodeVarintGenerated(dAtA, i, uint64(m.ObjectMeta.Size())) - n38, err := m.ObjectMeta.MarshalTo(dAtA[i:]) + n39, err := m.ObjectMeta.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n38 + i += n39 if len(m.Subsets) > 0 { for _, msg := range m.Subsets { dAtA[i] = 0x12 @@ -3267,11 +3343,11 @@ func (m *EndpointsList) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0xa i++ i = encodeVarintGenerated(dAtA, i, uint64(m.ListMeta.Size())) - n39, err := m.ListMeta.MarshalTo(dAtA[i:]) + n40, err := m.ListMeta.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n39 + i += n40 if len(m.Items) > 0 { for _, msg := range m.Items { dAtA[i] = 0x12 @@ -3310,21 +3386,21 @@ func (m *EnvFromSource) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0x12 i++ i = encodeVarintGenerated(dAtA, i, uint64(m.ConfigMapRef.Size())) - n40, err := m.ConfigMapRef.MarshalTo(dAtA[i:]) + n41, err := m.ConfigMapRef.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n40 + i += n41 } if m.SecretRef != nil { dAtA[i] = 0x1a i++ i = encodeVarintGenerated(dAtA, i, uint64(m.SecretRef.Size())) - n41, err := m.SecretRef.MarshalTo(dAtA[i:]) + n42, err := m.SecretRef.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n41 + i += n42 } return i, nil } @@ -3356,11 +3432,11 @@ func (m *EnvVar) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0x1a i++ i = encodeVarintGenerated(dAtA, i, uint64(m.ValueFrom.Size())) - n42, err := m.ValueFrom.MarshalTo(dAtA[i:]) + n43, err := m.ValueFrom.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n42 + i += n43 } return i, nil } @@ -3384,41 +3460,41 @@ func (m *EnvVarSource) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0xa i++ i = encodeVarintGenerated(dAtA, i, uint64(m.FieldRef.Size())) - n43, err := m.FieldRef.MarshalTo(dAtA[i:]) + n44, err := m.FieldRef.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n43 + i += n44 } if m.ResourceFieldRef != nil { dAtA[i] = 0x12 i++ i = encodeVarintGenerated(dAtA, i, uint64(m.ResourceFieldRef.Size())) - n44, err := m.ResourceFieldRef.MarshalTo(dAtA[i:]) + n45, err := m.ResourceFieldRef.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n44 + i += n45 } if m.ConfigMapKeyRef != nil { dAtA[i] = 0x1a i++ i = encodeVarintGenerated(dAtA, i, uint64(m.ConfigMapKeyRef.Size())) - n45, err := m.ConfigMapKeyRef.MarshalTo(dAtA[i:]) + n46, err := m.ConfigMapKeyRef.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n45 + i += n46 } if m.SecretKeyRef != nil { dAtA[i] = 0x22 i++ i = encodeVarintGenerated(dAtA, i, uint64(m.SecretKeyRef.Size())) - n46, err := m.SecretKeyRef.MarshalTo(dAtA[i:]) + n47, err := m.SecretKeyRef.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n46 + i += n47 } return i, nil } @@ -3441,19 +3517,19 @@ func (m *Event) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0xa i++ i = encodeVarintGenerated(dAtA, i, uint64(m.ObjectMeta.Size())) - n47, err := m.ObjectMeta.MarshalTo(dAtA[i:]) + n48, err := m.ObjectMeta.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n47 + i += n48 dAtA[i] = 0x12 i++ i = encodeVarintGenerated(dAtA, i, uint64(m.InvolvedObject.Size())) - n48, err := m.InvolvedObject.MarshalTo(dAtA[i:]) + n49, err := m.InvolvedObject.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n48 + i += n49 dAtA[i] = 0x1a i++ i = encodeVarintGenerated(dAtA, i, uint64(len(m.Reason))) @@ -3465,27 +3541,27 @@ func (m *Event) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0x2a i++ i = encodeVarintGenerated(dAtA, i, uint64(m.Source.Size())) - n49, err := m.Source.MarshalTo(dAtA[i:]) + n50, err := m.Source.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n49 + i += n50 dAtA[i] = 0x32 i++ i = encodeVarintGenerated(dAtA, i, uint64(m.FirstTimestamp.Size())) - n50, err := m.FirstTimestamp.MarshalTo(dAtA[i:]) + n51, err := m.FirstTimestamp.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n50 + i += n51 dAtA[i] = 0x3a i++ i = encodeVarintGenerated(dAtA, i, uint64(m.LastTimestamp.Size())) - n51, err := m.LastTimestamp.MarshalTo(dAtA[i:]) + n52, err := m.LastTimestamp.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n51 + i += n52 dAtA[i] = 0x40 i++ i = encodeVarintGenerated(dAtA, i, uint64(m.Count)) @@ -3496,20 +3572,20 @@ func (m *Event) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0x52 i++ i = encodeVarintGenerated(dAtA, i, uint64(m.EventTime.Size())) - n52, err := m.EventTime.MarshalTo(dAtA[i:]) + n53, err := m.EventTime.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n52 + i += n53 if m.Series != nil { dAtA[i] = 0x5a i++ i = encodeVarintGenerated(dAtA, i, uint64(m.Series.Size())) - n53, err := m.Series.MarshalTo(dAtA[i:]) + n54, err := m.Series.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n53 + i += n54 } dAtA[i] = 0x62 i++ @@ -3519,11 +3595,11 @@ func (m *Event) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0x6a i++ i = encodeVarintGenerated(dAtA, i, uint64(m.Related.Size())) - n54, err := m.Related.MarshalTo(dAtA[i:]) + n55, err := m.Related.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n54 + i += n55 } dAtA[i] = 0x72 i++ @@ -3554,11 +3630,11 @@ func (m *EventList) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0xa i++ i = encodeVarintGenerated(dAtA, i, uint64(m.ListMeta.Size())) - n55, err := m.ListMeta.MarshalTo(dAtA[i:]) + n56, err := m.ListMeta.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n55 + i += n56 if len(m.Items) > 0 { for _, msg := range m.Items { dAtA[i] = 0x12 @@ -3595,11 +3671,11 @@ func (m *EventSeries) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0x12 i++ i = encodeVarintGenerated(dAtA, i, uint64(m.LastObservedTime.Size())) - n56, err := m.LastObservedTime.MarshalTo(dAtA[i:]) + n57, err := m.LastObservedTime.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n56 + i += n57 dAtA[i] = 0x1a i++ i = encodeVarintGenerated(dAtA, i, uint64(len(m.State))) @@ -3758,11 +3834,11 @@ func (m *FlexPersistentVolumeSource) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0x1a i++ i = encodeVarintGenerated(dAtA, i, uint64(m.SecretRef.Size())) - n57, err := m.SecretRef.MarshalTo(dAtA[i:]) + n58, err := m.SecretRef.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n57 + i += n58 } dAtA[i] = 0x20 i++ @@ -3824,11 +3900,11 @@ func (m *FlexVolumeSource) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0x1a i++ i = encodeVarintGenerated(dAtA, i, uint64(m.SecretRef.Size())) - n58, err := m.SecretRef.MarshalTo(dAtA[i:]) + n59, err := m.SecretRef.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n58 + i += n59 } dAtA[i] = 0x20 i++ @@ -4052,11 +4128,11 @@ func (m *HTTPGetAction) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0x12 i++ i = encodeVarintGenerated(dAtA, i, uint64(m.Port.Size())) - n59, err := m.Port.MarshalTo(dAtA[i:]) + n60, err := m.Port.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n59 + i += n60 dAtA[i] = 0x1a i++ i = encodeVarintGenerated(dAtA, i, uint64(len(m.Host))) @@ -4125,31 +4201,31 @@ func (m *Handler) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0xa i++ i = encodeVarintGenerated(dAtA, i, uint64(m.Exec.Size())) - n60, err := m.Exec.MarshalTo(dAtA[i:]) + n61, err := m.Exec.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n60 + i += n61 } if m.HTTPGet != nil { dAtA[i] = 0x12 i++ i = encodeVarintGenerated(dAtA, i, uint64(m.HTTPGet.Size())) - n61, err := m.HTTPGet.MarshalTo(dAtA[i:]) + n62, err := m.HTTPGet.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n61 + i += n62 } if m.TCPSocket != nil { dAtA[i] = 0x1a i++ i = encodeVarintGenerated(dAtA, i, uint64(m.TCPSocket.Size())) - n62, err := m.TCPSocket.MarshalTo(dAtA[i:]) + n63, err := m.TCPSocket.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n62 + i += n63 } return i, nil } @@ -4288,11 +4364,11 @@ func (m *ISCSIPersistentVolumeSource) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0x52 i++ i = encodeVarintGenerated(dAtA, i, uint64(m.SecretRef.Size())) - n63, err := m.SecretRef.MarshalTo(dAtA[i:]) + n64, err := m.SecretRef.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n63 + i += n64 } dAtA[i] = 0x58 i++ @@ -4380,11 +4456,11 @@ func (m *ISCSIVolumeSource) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0x52 i++ i = encodeVarintGenerated(dAtA, i, uint64(m.SecretRef.Size())) - n64, err := m.SecretRef.MarshalTo(dAtA[i:]) + n65, err := m.SecretRef.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n64 + i += n65 } dAtA[i] = 0x58 i++ @@ -4453,21 +4529,21 @@ func (m *Lifecycle) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0xa i++ i = encodeVarintGenerated(dAtA, i, uint64(m.PostStart.Size())) - n65, err := m.PostStart.MarshalTo(dAtA[i:]) + n66, err := m.PostStart.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n65 + i += n66 } if m.PreStop != nil { dAtA[i] = 0x12 i++ i = encodeVarintGenerated(dAtA, i, uint64(m.PreStop.Size())) - n66, err := m.PreStop.MarshalTo(dAtA[i:]) + n67, err := m.PreStop.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n66 + i += n67 } return i, nil } @@ -4490,19 +4566,19 @@ func (m *LimitRange) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0xa i++ i = encodeVarintGenerated(dAtA, i, uint64(m.ObjectMeta.Size())) - n67, err := m.ObjectMeta.MarshalTo(dAtA[i:]) + n68, err := m.ObjectMeta.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n67 + i += n68 dAtA[i] = 0x12 i++ i = encodeVarintGenerated(dAtA, i, uint64(m.Spec.Size())) - n68, err := m.Spec.MarshalTo(dAtA[i:]) + n69, err := m.Spec.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n68 + i += n69 return i, nil } @@ -4549,11 +4625,11 @@ func (m *LimitRangeItem) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0x12 i++ i = encodeVarintGenerated(dAtA, i, uint64((&v).Size())) - n69, err := (&v).MarshalTo(dAtA[i:]) + n70, err := (&v).MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n69 + i += n70 } } if len(m.Min) > 0 { @@ -4580,11 +4656,11 @@ func (m *LimitRangeItem) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0x12 i++ i = encodeVarintGenerated(dAtA, i, uint64((&v).Size())) - n70, err := (&v).MarshalTo(dAtA[i:]) + n71, err := (&v).MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n70 + i += n71 } } if len(m.Default) > 0 { @@ -4611,11 +4687,11 @@ func (m *LimitRangeItem) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0x12 i++ i = encodeVarintGenerated(dAtA, i, uint64((&v).Size())) - n71, err := (&v).MarshalTo(dAtA[i:]) + n72, err := (&v).MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n71 + i += n72 } } if len(m.DefaultRequest) > 0 { @@ -4642,11 +4718,11 @@ func (m *LimitRangeItem) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0x12 i++ i = encodeVarintGenerated(dAtA, i, uint64((&v).Size())) - n72, err := (&v).MarshalTo(dAtA[i:]) + n73, err := (&v).MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n72 + i += n73 } } if len(m.MaxLimitRequestRatio) > 0 { @@ -4673,11 +4749,11 @@ func (m *LimitRangeItem) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0x12 i++ i = encodeVarintGenerated(dAtA, i, uint64((&v).Size())) - n73, err := (&v).MarshalTo(dAtA[i:]) + n74, err := (&v).MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n73 + i += n74 } } return i, nil @@ -4701,11 +4777,11 @@ func (m *LimitRangeList) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0xa i++ i = encodeVarintGenerated(dAtA, i, uint64(m.ListMeta.Size())) - n74, err := m.ListMeta.MarshalTo(dAtA[i:]) + n75, err := m.ListMeta.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n74 + i += n75 if len(m.Items) > 0 { for _, msg := range m.Items { dAtA[i] = 0x12 @@ -4769,11 +4845,11 @@ func (m *List) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0xa i++ i = encodeVarintGenerated(dAtA, i, uint64(m.ListMeta.Size())) - n75, err := m.ListMeta.MarshalTo(dAtA[i:]) + n76, err := m.ListMeta.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n75 + i += n76 if len(m.Items) > 0 { for _, msg := range m.Items { dAtA[i] = 0x12 @@ -4947,27 +5023,27 @@ func (m *Namespace) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0xa i++ i = encodeVarintGenerated(dAtA, i, uint64(m.ObjectMeta.Size())) - n76, err := m.ObjectMeta.MarshalTo(dAtA[i:]) + n77, err := m.ObjectMeta.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n76 + i += n77 dAtA[i] = 0x12 i++ i = encodeVarintGenerated(dAtA, i, uint64(m.Spec.Size())) - n77, err := m.Spec.MarshalTo(dAtA[i:]) + n78, err := m.Spec.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n77 + i += n78 dAtA[i] = 0x1a i++ i = encodeVarintGenerated(dAtA, i, uint64(m.Status.Size())) - n78, err := m.Status.MarshalTo(dAtA[i:]) + n79, err := m.Status.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n78 + i += n79 return i, nil } @@ -4989,11 +5065,11 @@ func (m *NamespaceList) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0xa i++ i = encodeVarintGenerated(dAtA, i, uint64(m.ListMeta.Size())) - n79, err := m.ListMeta.MarshalTo(dAtA[i:]) + n80, err := m.ListMeta.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n79 + i += n80 if len(m.Items) > 0 { for _, msg := range m.Items { dAtA[i] = 0x12 @@ -5082,27 +5158,27 @@ func (m *Node) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0xa i++ i = encodeVarintGenerated(dAtA, i, uint64(m.ObjectMeta.Size())) - n80, err := m.ObjectMeta.MarshalTo(dAtA[i:]) + n81, err := m.ObjectMeta.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n80 + i += n81 dAtA[i] = 0x12 i++ i = encodeVarintGenerated(dAtA, i, uint64(m.Spec.Size())) - n81, err := m.Spec.MarshalTo(dAtA[i:]) + n82, err := m.Spec.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n81 + i += n82 dAtA[i] = 0x1a i++ i = encodeVarintGenerated(dAtA, i, uint64(m.Status.Size())) - n82, err := m.Status.MarshalTo(dAtA[i:]) + n83, err := m.Status.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n82 + i += n83 return i, nil } @@ -5151,11 +5227,11 @@ func (m *NodeAffinity) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0xa i++ i = encodeVarintGenerated(dAtA, i, uint64(m.RequiredDuringSchedulingIgnoredDuringExecution.Size())) - n83, err := m.RequiredDuringSchedulingIgnoredDuringExecution.MarshalTo(dAtA[i:]) + n84, err := m.RequiredDuringSchedulingIgnoredDuringExecution.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n83 + i += n84 } if len(m.PreferredDuringSchedulingIgnoredDuringExecution) > 0 { for _, msg := range m.PreferredDuringSchedulingIgnoredDuringExecution { @@ -5198,19 +5274,19 @@ func (m *NodeCondition) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0x1a i++ i = encodeVarintGenerated(dAtA, i, uint64(m.LastHeartbeatTime.Size())) - n84, err := m.LastHeartbeatTime.MarshalTo(dAtA[i:]) + n85, err := m.LastHeartbeatTime.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n84 + i += n85 dAtA[i] = 0x22 i++ i = encodeVarintGenerated(dAtA, i, uint64(m.LastTransitionTime.Size())) - n85, err := m.LastTransitionTime.MarshalTo(dAtA[i:]) + n86, err := m.LastTransitionTime.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n85 + i += n86 dAtA[i] = 0x2a i++ i = encodeVarintGenerated(dAtA, i, uint64(len(m.Reason))) @@ -5241,11 +5317,11 @@ func (m *NodeConfigSource) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0x12 i++ i = encodeVarintGenerated(dAtA, i, uint64(m.ConfigMap.Size())) - n86, err := m.ConfigMap.MarshalTo(dAtA[i:]) + n87, err := m.ConfigMap.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n86 + i += n87 } return i, nil } @@ -5269,31 +5345,31 @@ func (m *NodeConfigStatus) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0xa i++ i = encodeVarintGenerated(dAtA, i, uint64(m.Assigned.Size())) - n87, err := m.Assigned.MarshalTo(dAtA[i:]) + n88, err := m.Assigned.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n87 + i += n88 } if m.Active != nil { dAtA[i] = 0x12 i++ i = encodeVarintGenerated(dAtA, i, uint64(m.Active.Size())) - n88, err := m.Active.MarshalTo(dAtA[i:]) + n89, err := m.Active.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n88 + i += n89 } if m.LastKnownGood != nil { dAtA[i] = 0x1a i++ i = encodeVarintGenerated(dAtA, i, uint64(m.LastKnownGood.Size())) - n89, err := m.LastKnownGood.MarshalTo(dAtA[i:]) + n90, err := m.LastKnownGood.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n89 + i += n90 } dAtA[i] = 0x22 i++ @@ -5320,11 +5396,11 @@ func (m *NodeDaemonEndpoints) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0xa i++ i = encodeVarintGenerated(dAtA, i, uint64(m.KubeletEndpoint.Size())) - n90, err := m.KubeletEndpoint.MarshalTo(dAtA[i:]) + n91, err := m.KubeletEndpoint.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n90 + i += n91 return i, nil } @@ -5346,11 +5422,11 @@ func (m *NodeList) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0xa i++ i = encodeVarintGenerated(dAtA, i, uint64(m.ListMeta.Size())) - n91, err := m.ListMeta.MarshalTo(dAtA[i:]) + n92, err := m.ListMeta.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n91 + i += n92 if len(m.Items) > 0 { for _, msg := range m.Items { dAtA[i] = 0x12 @@ -5427,11 +5503,11 @@ func (m *NodeResources) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0x12 i++ i = encodeVarintGenerated(dAtA, i, uint64((&v).Size())) - n92, err := (&v).MarshalTo(dAtA[i:]) + n93, err := (&v).MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n92 + i += n93 } } return i, nil @@ -5601,11 +5677,11 @@ func (m *NodeSpec) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0x32 i++ i = encodeVarintGenerated(dAtA, i, uint64(m.ConfigSource.Size())) - n93, err := m.ConfigSource.MarshalTo(dAtA[i:]) + n94, err := m.ConfigSource.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n93 + i += n94 } return i, nil } @@ -5649,11 +5725,11 @@ func (m *NodeStatus) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0x12 i++ i = encodeVarintGenerated(dAtA, i, uint64((&v).Size())) - n94, err := (&v).MarshalTo(dAtA[i:]) + n95, err := (&v).MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n94 + i += n95 } } if len(m.Allocatable) > 0 { @@ -5680,11 +5756,11 @@ func (m *NodeStatus) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0x12 i++ i = encodeVarintGenerated(dAtA, i, uint64((&v).Size())) - n95, err := (&v).MarshalTo(dAtA[i:]) + n96, err := (&v).MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n95 + i += n96 } } dAtA[i] = 0x1a @@ -5718,19 +5794,19 @@ func (m *NodeStatus) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0x32 i++ i = encodeVarintGenerated(dAtA, i, uint64(m.DaemonEndpoints.Size())) - n96, err := m.DaemonEndpoints.MarshalTo(dAtA[i:]) + n97, err := m.DaemonEndpoints.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n96 + i += n97 dAtA[i] = 0x3a i++ i = encodeVarintGenerated(dAtA, i, uint64(m.NodeInfo.Size())) - n97, err := m.NodeInfo.MarshalTo(dAtA[i:]) + n98, err := m.NodeInfo.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n97 + i += n98 if len(m.Images) > 0 { for _, msg := range m.Images { dAtA[i] = 0x42 @@ -5774,11 +5850,11 @@ func (m *NodeStatus) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0x5a i++ i = encodeVarintGenerated(dAtA, i, uint64(m.Config.Size())) - n98, err := m.Config.MarshalTo(dAtA[i:]) + n99, err := m.Config.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n98 + i += n99 } return i, nil } @@ -5931,27 +6007,27 @@ func (m *PersistentVolume) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0xa i++ i = encodeVarintGenerated(dAtA, i, uint64(m.ObjectMeta.Size())) - n99, err := m.ObjectMeta.MarshalTo(dAtA[i:]) + n100, err := m.ObjectMeta.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n99 + i += n100 dAtA[i] = 0x12 i++ i = encodeVarintGenerated(dAtA, i, uint64(m.Spec.Size())) - n100, err := m.Spec.MarshalTo(dAtA[i:]) + n101, err := m.Spec.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n100 + i += n101 dAtA[i] = 0x1a i++ i = encodeVarintGenerated(dAtA, i, uint64(m.Status.Size())) - n101, err := m.Status.MarshalTo(dAtA[i:]) + n102, err := m.Status.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n101 + i += n102 return i, nil } @@ -5973,27 +6049,27 @@ func (m *PersistentVolumeClaim) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0xa i++ i = encodeVarintGenerated(dAtA, i, uint64(m.ObjectMeta.Size())) - n102, err := m.ObjectMeta.MarshalTo(dAtA[i:]) + n103, err := m.ObjectMeta.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n102 + i += n103 dAtA[i] = 0x12 i++ i = encodeVarintGenerated(dAtA, i, uint64(m.Spec.Size())) - n103, err := m.Spec.MarshalTo(dAtA[i:]) + n104, err := m.Spec.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n103 + i += n104 dAtA[i] = 0x1a i++ i = encodeVarintGenerated(dAtA, i, uint64(m.Status.Size())) - n104, err := m.Status.MarshalTo(dAtA[i:]) + n105, err := m.Status.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n104 + i += n105 return i, nil } @@ -6023,19 +6099,19 @@ func (m *PersistentVolumeClaimCondition) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0x1a i++ i = encodeVarintGenerated(dAtA, i, uint64(m.LastProbeTime.Size())) - n105, err := m.LastProbeTime.MarshalTo(dAtA[i:]) + n106, err := m.LastProbeTime.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n105 + i += n106 dAtA[i] = 0x22 i++ i = encodeVarintGenerated(dAtA, i, uint64(m.LastTransitionTime.Size())) - n106, err := m.LastTransitionTime.MarshalTo(dAtA[i:]) + n107, err := m.LastTransitionTime.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n106 + i += n107 dAtA[i] = 0x2a i++ i = encodeVarintGenerated(dAtA, i, uint64(len(m.Reason))) @@ -6065,11 +6141,11 @@ func (m *PersistentVolumeClaimList) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0xa i++ i = encodeVarintGenerated(dAtA, i, uint64(m.ListMeta.Size())) - n107, err := m.ListMeta.MarshalTo(dAtA[i:]) + n108, err := m.ListMeta.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n107 + i += n108 if len(m.Items) > 0 { for _, msg := range m.Items { dAtA[i] = 0x12 @@ -6118,11 +6194,11 @@ func (m *PersistentVolumeClaimSpec) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0x12 i++ i = encodeVarintGenerated(dAtA, i, uint64(m.Resources.Size())) - n108, err := m.Resources.MarshalTo(dAtA[i:]) + n109, err := m.Resources.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n108 + i += n109 dAtA[i] = 0x1a i++ i = encodeVarintGenerated(dAtA, i, uint64(len(m.VolumeName))) @@ -6131,11 +6207,11 @@ func (m *PersistentVolumeClaimSpec) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0x22 i++ i = encodeVarintGenerated(dAtA, i, uint64(m.Selector.Size())) - n109, err := m.Selector.MarshalTo(dAtA[i:]) + n110, err := m.Selector.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n109 + i += n110 } if m.StorageClassName != nil { dAtA[i] = 0x2a @@ -6153,11 +6229,11 @@ func (m *PersistentVolumeClaimSpec) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0x3a i++ i = encodeVarintGenerated(dAtA, i, uint64(m.DataSource.Size())) - n110, err := m.DataSource.MarshalTo(dAtA[i:]) + n111, err := m.DataSource.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n110 + i += n111 } return i, nil } @@ -6220,11 +6296,11 @@ func (m *PersistentVolumeClaimStatus) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0x12 i++ i = encodeVarintGenerated(dAtA, i, uint64((&v).Size())) - n111, err := (&v).MarshalTo(dAtA[i:]) + n112, err := (&v).MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n111 + i += n112 } } if len(m.Conditions) > 0 { @@ -6290,11 +6366,11 @@ func (m *PersistentVolumeList) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0xa i++ i = encodeVarintGenerated(dAtA, i, uint64(m.ListMeta.Size())) - n112, err := m.ListMeta.MarshalTo(dAtA[i:]) + n113, err := m.ListMeta.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n112 + i += n113 if len(m.Items) > 0 { for _, msg := range m.Items { dAtA[i] = 0x12 @@ -6329,151 +6405,151 @@ func (m *PersistentVolumeSource) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0xa i++ i = encodeVarintGenerated(dAtA, i, uint64(m.GCEPersistentDisk.Size())) - n113, err := m.GCEPersistentDisk.MarshalTo(dAtA[i:]) + n114, err := m.GCEPersistentDisk.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n113 + i += n114 } if m.AWSElasticBlockStore != nil { dAtA[i] = 0x12 i++ i = encodeVarintGenerated(dAtA, i, uint64(m.AWSElasticBlockStore.Size())) - n114, err := m.AWSElasticBlockStore.MarshalTo(dAtA[i:]) + n115, err := m.AWSElasticBlockStore.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n114 + i += n115 } if m.HostPath != nil { dAtA[i] = 0x1a i++ i = encodeVarintGenerated(dAtA, i, uint64(m.HostPath.Size())) - n115, err := m.HostPath.MarshalTo(dAtA[i:]) + n116, err := m.HostPath.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n115 + i += n116 } if m.Glusterfs != nil { dAtA[i] = 0x22 i++ i = encodeVarintGenerated(dAtA, i, uint64(m.Glusterfs.Size())) - n116, err := m.Glusterfs.MarshalTo(dAtA[i:]) + n117, err := m.Glusterfs.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n116 + i += n117 } if m.NFS != nil { dAtA[i] = 0x2a i++ i = encodeVarintGenerated(dAtA, i, uint64(m.NFS.Size())) - n117, err := m.NFS.MarshalTo(dAtA[i:]) + n118, err := m.NFS.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n117 + i += n118 } if m.RBD != nil { dAtA[i] = 0x32 i++ i = encodeVarintGenerated(dAtA, i, uint64(m.RBD.Size())) - n118, err := m.RBD.MarshalTo(dAtA[i:]) + n119, err := m.RBD.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n118 + i += n119 } if m.ISCSI != nil { dAtA[i] = 0x3a i++ i = encodeVarintGenerated(dAtA, i, uint64(m.ISCSI.Size())) - n119, err := m.ISCSI.MarshalTo(dAtA[i:]) + n120, err := m.ISCSI.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n119 + i += n120 } if m.Cinder != nil { dAtA[i] = 0x42 i++ i = encodeVarintGenerated(dAtA, i, uint64(m.Cinder.Size())) - n120, err := m.Cinder.MarshalTo(dAtA[i:]) + n121, err := m.Cinder.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n120 + i += n121 } if m.CephFS != nil { dAtA[i] = 0x4a i++ i = encodeVarintGenerated(dAtA, i, uint64(m.CephFS.Size())) - n121, err := m.CephFS.MarshalTo(dAtA[i:]) + n122, err := m.CephFS.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n121 + i += n122 } if m.FC != nil { dAtA[i] = 0x52 i++ i = encodeVarintGenerated(dAtA, i, uint64(m.FC.Size())) - n122, err := m.FC.MarshalTo(dAtA[i:]) + n123, err := m.FC.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n122 + i += n123 } if m.Flocker != nil { dAtA[i] = 0x5a i++ i = encodeVarintGenerated(dAtA, i, uint64(m.Flocker.Size())) - n123, err := m.Flocker.MarshalTo(dAtA[i:]) + n124, err := m.Flocker.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n123 + i += n124 } if m.FlexVolume != nil { dAtA[i] = 0x62 i++ i = encodeVarintGenerated(dAtA, i, uint64(m.FlexVolume.Size())) - n124, err := m.FlexVolume.MarshalTo(dAtA[i:]) + n125, err := m.FlexVolume.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n124 + i += n125 } if m.AzureFile != nil { dAtA[i] = 0x6a i++ i = encodeVarintGenerated(dAtA, i, uint64(m.AzureFile.Size())) - n125, err := m.AzureFile.MarshalTo(dAtA[i:]) + n126, err := m.AzureFile.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n125 + i += n126 } if m.VsphereVolume != nil { dAtA[i] = 0x72 i++ i = encodeVarintGenerated(dAtA, i, uint64(m.VsphereVolume.Size())) - n126, err := m.VsphereVolume.MarshalTo(dAtA[i:]) + n127, err := m.VsphereVolume.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n126 + i += n127 } if m.Quobyte != nil { dAtA[i] = 0x7a i++ i = encodeVarintGenerated(dAtA, i, uint64(m.Quobyte.Size())) - n127, err := m.Quobyte.MarshalTo(dAtA[i:]) + n128, err := m.Quobyte.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n127 + i += n128 } if m.AzureDisk != nil { dAtA[i] = 0x82 @@ -6481,11 +6557,11 @@ func (m *PersistentVolumeSource) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0x1 i++ i = encodeVarintGenerated(dAtA, i, uint64(m.AzureDisk.Size())) - n128, err := m.AzureDisk.MarshalTo(dAtA[i:]) + n129, err := m.AzureDisk.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n128 + i += n129 } if m.PhotonPersistentDisk != nil { dAtA[i] = 0x8a @@ -6493,11 +6569,11 @@ func (m *PersistentVolumeSource) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0x1 i++ i = encodeVarintGenerated(dAtA, i, uint64(m.PhotonPersistentDisk.Size())) - n129, err := m.PhotonPersistentDisk.MarshalTo(dAtA[i:]) + n130, err := m.PhotonPersistentDisk.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n129 + i += n130 } if m.PortworxVolume != nil { dAtA[i] = 0x92 @@ -6505,11 +6581,11 @@ func (m *PersistentVolumeSource) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0x1 i++ i = encodeVarintGenerated(dAtA, i, uint64(m.PortworxVolume.Size())) - n130, err := m.PortworxVolume.MarshalTo(dAtA[i:]) + n131, err := m.PortworxVolume.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n130 + i += n131 } if m.ScaleIO != nil { dAtA[i] = 0x9a @@ -6517,11 +6593,11 @@ func (m *PersistentVolumeSource) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0x1 i++ i = encodeVarintGenerated(dAtA, i, uint64(m.ScaleIO.Size())) - n131, err := m.ScaleIO.MarshalTo(dAtA[i:]) + n132, err := m.ScaleIO.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n131 + i += n132 } if m.Local != nil { dAtA[i] = 0xa2 @@ -6529,11 +6605,11 @@ func (m *PersistentVolumeSource) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0x1 i++ i = encodeVarintGenerated(dAtA, i, uint64(m.Local.Size())) - n132, err := m.Local.MarshalTo(dAtA[i:]) + n133, err := m.Local.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n132 + i += n133 } if m.StorageOS != nil { dAtA[i] = 0xaa @@ -6541,11 +6617,11 @@ func (m *PersistentVolumeSource) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0x1 i++ i = encodeVarintGenerated(dAtA, i, uint64(m.StorageOS.Size())) - n133, err := m.StorageOS.MarshalTo(dAtA[i:]) + n134, err := m.StorageOS.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n133 + i += n134 } if m.CSI != nil { dAtA[i] = 0xb2 @@ -6553,11 +6629,11 @@ func (m *PersistentVolumeSource) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0x1 i++ i = encodeVarintGenerated(dAtA, i, uint64(m.CSI.Size())) - n134, err := m.CSI.MarshalTo(dAtA[i:]) + n135, err := m.CSI.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n134 + i += n135 } return i, nil } @@ -6601,21 +6677,21 @@ func (m *PersistentVolumeSpec) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0x12 i++ i = encodeVarintGenerated(dAtA, i, uint64((&v).Size())) - n135, err := (&v).MarshalTo(dAtA[i:]) + n136, err := (&v).MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n135 + i += n136 } } dAtA[i] = 0x12 i++ i = encodeVarintGenerated(dAtA, i, uint64(m.PersistentVolumeSource.Size())) - n136, err := m.PersistentVolumeSource.MarshalTo(dAtA[i:]) + n137, err := m.PersistentVolumeSource.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n136 + i += n137 if len(m.AccessModes) > 0 { for _, s := range m.AccessModes { dAtA[i] = 0x1a @@ -6635,11 +6711,11 @@ func (m *PersistentVolumeSpec) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0x22 i++ i = encodeVarintGenerated(dAtA, i, uint64(m.ClaimRef.Size())) - n137, err := m.ClaimRef.MarshalTo(dAtA[i:]) + n138, err := m.ClaimRef.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n137 + i += n138 } dAtA[i] = 0x2a i++ @@ -6674,11 +6750,11 @@ func (m *PersistentVolumeSpec) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0x4a i++ i = encodeVarintGenerated(dAtA, i, uint64(m.NodeAffinity.Size())) - n138, err := m.NodeAffinity.MarshalTo(dAtA[i:]) + n139, err := m.NodeAffinity.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n138 + i += n139 } return i, nil } @@ -6757,27 +6833,27 @@ func (m *Pod) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0xa i++ i = encodeVarintGenerated(dAtA, i, uint64(m.ObjectMeta.Size())) - n139, err := m.ObjectMeta.MarshalTo(dAtA[i:]) + n140, err := m.ObjectMeta.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n139 + i += n140 dAtA[i] = 0x12 i++ i = encodeVarintGenerated(dAtA, i, uint64(m.Spec.Size())) - n140, err := m.Spec.MarshalTo(dAtA[i:]) + n141, err := m.Spec.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n140 + i += n141 dAtA[i] = 0x1a i++ i = encodeVarintGenerated(dAtA, i, uint64(m.Status.Size())) - n141, err := m.Status.MarshalTo(dAtA[i:]) + n142, err := m.Status.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n141 + i += n142 return i, nil } @@ -6842,11 +6918,11 @@ func (m *PodAffinityTerm) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0xa i++ i = encodeVarintGenerated(dAtA, i, uint64(m.LabelSelector.Size())) - n142, err := m.LabelSelector.MarshalTo(dAtA[i:]) + n143, err := m.LabelSelector.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n142 + i += n143 } if len(m.Namespaces) > 0 { for _, s := range m.Namespaces { @@ -6992,19 +7068,19 @@ func (m *PodCondition) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0x1a i++ i = encodeVarintGenerated(dAtA, i, uint64(m.LastProbeTime.Size())) - n143, err := m.LastProbeTime.MarshalTo(dAtA[i:]) + n144, err := m.LastProbeTime.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n143 + i += n144 dAtA[i] = 0x22 i++ i = encodeVarintGenerated(dAtA, i, uint64(m.LastTransitionTime.Size())) - n144, err := m.LastTransitionTime.MarshalTo(dAtA[i:]) + n145, err := m.LastTransitionTime.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n144 + i += n145 dAtA[i] = 0x2a i++ i = encodeVarintGenerated(dAtA, i, uint64(len(m.Reason))) @@ -7191,11 +7267,11 @@ func (m *PodList) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0xa i++ i = encodeVarintGenerated(dAtA, i, uint64(m.ListMeta.Size())) - n145, err := m.ListMeta.MarshalTo(dAtA[i:]) + n146, err := m.ListMeta.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n145 + i += n146 if len(m.Items) > 0 { for _, msg := range m.Items { dAtA[i] = 0x12 @@ -7255,11 +7331,11 @@ func (m *PodLogOptions) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0x2a i++ i = encodeVarintGenerated(dAtA, i, uint64(m.SinceTime.Size())) - n146, err := m.SinceTime.MarshalTo(dAtA[i:]) + n147, err := m.SinceTime.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n146 + i += n147 } dAtA[i] = 0x30 i++ @@ -7370,11 +7446,11 @@ func (m *PodSecurityContext) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0xa i++ i = encodeVarintGenerated(dAtA, i, uint64(m.SELinuxOptions.Size())) - n147, err := m.SELinuxOptions.MarshalTo(dAtA[i:]) + n148, err := m.SELinuxOptions.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n147 + i += n148 } if m.RunAsUser != nil { dAtA[i] = 0x10 @@ -7442,11 +7518,11 @@ func (m *PodSignature) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0xa i++ i = encodeVarintGenerated(dAtA, i, uint64(m.PodController.Size())) - n148, err := m.PodController.MarshalTo(dAtA[i:]) + n149, err := m.PodController.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n148 + i += n149 } return i, nil } @@ -7570,11 +7646,11 @@ func (m *PodSpec) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0x72 i++ i = encodeVarintGenerated(dAtA, i, uint64(m.SecurityContext.Size())) - n149, err := m.SecurityContext.MarshalTo(dAtA[i:]) + n150, err := m.SecurityContext.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n149 + i += n150 } if len(m.ImagePullSecrets) > 0 { for _, msg := range m.ImagePullSecrets { @@ -7606,11 +7682,11 @@ func (m *PodSpec) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0x1 i++ i = encodeVarintGenerated(dAtA, i, uint64(m.Affinity.Size())) - n150, err := m.Affinity.MarshalTo(dAtA[i:]) + n151, err := m.Affinity.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n150 + i += n151 } dAtA[i] = 0x9a i++ @@ -7691,11 +7767,11 @@ func (m *PodSpec) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0x1 i++ i = encodeVarintGenerated(dAtA, i, uint64(m.DNSConfig.Size())) - n151, err := m.DNSConfig.MarshalTo(dAtA[i:]) + n152, err := m.DNSConfig.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n151 + i += n152 } if m.ShareProcessNamespace != nil { dAtA[i] = 0xd8 @@ -7797,11 +7873,11 @@ func (m *PodStatus) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0x3a i++ i = encodeVarintGenerated(dAtA, i, uint64(m.StartTime.Size())) - n152, err := m.StartTime.MarshalTo(dAtA[i:]) + n153, err := m.StartTime.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n152 + i += n153 } if len(m.ContainerStatuses) > 0 { for _, msg := range m.ContainerStatuses { @@ -7856,19 +7932,19 @@ func (m *PodStatusResult) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0xa i++ i = encodeVarintGenerated(dAtA, i, uint64(m.ObjectMeta.Size())) - n153, err := m.ObjectMeta.MarshalTo(dAtA[i:]) + n154, err := m.ObjectMeta.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n153 + i += n154 dAtA[i] = 0x12 i++ i = encodeVarintGenerated(dAtA, i, uint64(m.Status.Size())) - n154, err := m.Status.MarshalTo(dAtA[i:]) + n155, err := m.Status.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n154 + i += n155 return i, nil } @@ -7890,19 +7966,19 @@ func (m *PodTemplate) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0xa i++ i = encodeVarintGenerated(dAtA, i, uint64(m.ObjectMeta.Size())) - n155, err := m.ObjectMeta.MarshalTo(dAtA[i:]) + n156, err := m.ObjectMeta.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n155 + i += n156 dAtA[i] = 0x12 i++ i = encodeVarintGenerated(dAtA, i, uint64(m.Template.Size())) - n156, err := m.Template.MarshalTo(dAtA[i:]) + n157, err := m.Template.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n156 + i += n157 return i, nil } @@ -7924,11 +8000,11 @@ func (m *PodTemplateList) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0xa i++ i = encodeVarintGenerated(dAtA, i, uint64(m.ListMeta.Size())) - n157, err := m.ListMeta.MarshalTo(dAtA[i:]) + n158, err := m.ListMeta.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n157 + i += n158 if len(m.Items) > 0 { for _, msg := range m.Items { dAtA[i] = 0x12 @@ -7962,19 +8038,19 @@ func (m *PodTemplateSpec) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0xa i++ i = encodeVarintGenerated(dAtA, i, uint64(m.ObjectMeta.Size())) - n158, err := m.ObjectMeta.MarshalTo(dAtA[i:]) + n159, err := m.ObjectMeta.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n158 + i += n159 dAtA[i] = 0x12 i++ i = encodeVarintGenerated(dAtA, i, uint64(m.Spec.Size())) - n159, err := m.Spec.MarshalTo(dAtA[i:]) + n160, err := m.Spec.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n159 + i += n160 return i, nil } @@ -8054,19 +8130,19 @@ func (m *PreferAvoidPodsEntry) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0xa i++ i = encodeVarintGenerated(dAtA, i, uint64(m.PodSignature.Size())) - n160, err := m.PodSignature.MarshalTo(dAtA[i:]) + n161, err := m.PodSignature.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n160 + i += n161 dAtA[i] = 0x12 i++ i = encodeVarintGenerated(dAtA, i, uint64(m.EvictionTime.Size())) - n161, err := m.EvictionTime.MarshalTo(dAtA[i:]) + n162, err := m.EvictionTime.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n161 + i += n162 dAtA[i] = 0x1a i++ i = encodeVarintGenerated(dAtA, i, uint64(len(m.Reason))) @@ -8099,11 +8175,11 @@ func (m *PreferredSchedulingTerm) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0x12 i++ i = encodeVarintGenerated(dAtA, i, uint64(m.Preference.Size())) - n162, err := m.Preference.MarshalTo(dAtA[i:]) + n163, err := m.Preference.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n162 + i += n163 return i, nil } @@ -8125,11 +8201,11 @@ func (m *Probe) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0xa i++ i = encodeVarintGenerated(dAtA, i, uint64(m.Handler.Size())) - n163, err := m.Handler.MarshalTo(dAtA[i:]) + n164, err := m.Handler.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n163 + i += n164 dAtA[i] = 0x10 i++ i = encodeVarintGenerated(dAtA, i, uint64(m.InitialDelaySeconds)) @@ -8222,6 +8298,10 @@ func (m *QuobyteVolumeSource) MarshalTo(dAtA []byte) (int, error) { i++ i = encodeVarintGenerated(dAtA, i, uint64(len(m.Group))) i += copy(dAtA[i:], m.Group) + dAtA[i] = 0x32 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Tenant))) + i += copy(dAtA[i:], m.Tenant) return i, nil } @@ -8279,11 +8359,11 @@ func (m *RBDPersistentVolumeSource) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0x3a i++ i = encodeVarintGenerated(dAtA, i, uint64(m.SecretRef.Size())) - n164, err := m.SecretRef.MarshalTo(dAtA[i:]) + n165, err := m.SecretRef.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n164 + i += n165 } dAtA[i] = 0x40 i++ @@ -8350,11 +8430,11 @@ func (m *RBDVolumeSource) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0x3a i++ i = encodeVarintGenerated(dAtA, i, uint64(m.SecretRef.Size())) - n165, err := m.SecretRef.MarshalTo(dAtA[i:]) + n166, err := m.SecretRef.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n165 + i += n166 } dAtA[i] = 0x40 i++ @@ -8385,11 +8465,11 @@ func (m *RangeAllocation) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0xa i++ i = encodeVarintGenerated(dAtA, i, uint64(m.ObjectMeta.Size())) - n166, err := m.ObjectMeta.MarshalTo(dAtA[i:]) + n167, err := m.ObjectMeta.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n166 + i += n167 dAtA[i] = 0x12 i++ i = encodeVarintGenerated(dAtA, i, uint64(len(m.Range))) @@ -8421,27 +8501,27 @@ func (m *ReplicationController) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0xa i++ i = encodeVarintGenerated(dAtA, i, uint64(m.ObjectMeta.Size())) - n167, err := m.ObjectMeta.MarshalTo(dAtA[i:]) + n168, err := m.ObjectMeta.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n167 + i += n168 dAtA[i] = 0x12 i++ i = encodeVarintGenerated(dAtA, i, uint64(m.Spec.Size())) - n168, err := m.Spec.MarshalTo(dAtA[i:]) + n169, err := m.Spec.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n168 + i += n169 dAtA[i] = 0x1a i++ i = encodeVarintGenerated(dAtA, i, uint64(m.Status.Size())) - n169, err := m.Status.MarshalTo(dAtA[i:]) + n170, err := m.Status.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n169 + i += n170 return i, nil } @@ -8471,11 +8551,11 @@ func (m *ReplicationControllerCondition) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0x1a i++ i = encodeVarintGenerated(dAtA, i, uint64(m.LastTransitionTime.Size())) - n170, err := m.LastTransitionTime.MarshalTo(dAtA[i:]) + n171, err := m.LastTransitionTime.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n170 + i += n171 dAtA[i] = 0x22 i++ i = encodeVarintGenerated(dAtA, i, uint64(len(m.Reason))) @@ -8505,11 +8585,11 @@ func (m *ReplicationControllerList) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0xa i++ i = encodeVarintGenerated(dAtA, i, uint64(m.ListMeta.Size())) - n171, err := m.ListMeta.MarshalTo(dAtA[i:]) + n172, err := m.ListMeta.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n171 + i += n172 if len(m.Items) > 0 { for _, msg := range m.Items { dAtA[i] = 0x12 @@ -8571,11 +8651,11 @@ func (m *ReplicationControllerSpec) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0x1a i++ i = encodeVarintGenerated(dAtA, i, uint64(m.Template.Size())) - n172, err := m.Template.MarshalTo(dAtA[i:]) + n173, err := m.Template.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n172 + i += n173 } dAtA[i] = 0x20 i++ @@ -8654,11 +8734,11 @@ func (m *ResourceFieldSelector) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0x1a i++ i = encodeVarintGenerated(dAtA, i, uint64(m.Divisor.Size())) - n173, err := m.Divisor.MarshalTo(dAtA[i:]) + n174, err := m.Divisor.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n173 + i += n174 return i, nil } @@ -8680,27 +8760,27 @@ func (m *ResourceQuota) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0xa i++ i = encodeVarintGenerated(dAtA, i, uint64(m.ObjectMeta.Size())) - n174, err := m.ObjectMeta.MarshalTo(dAtA[i:]) + n175, err := m.ObjectMeta.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n174 + i += n175 dAtA[i] = 0x12 i++ i = encodeVarintGenerated(dAtA, i, uint64(m.Spec.Size())) - n175, err := m.Spec.MarshalTo(dAtA[i:]) + n176, err := m.Spec.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n175 + i += n176 dAtA[i] = 0x1a i++ i = encodeVarintGenerated(dAtA, i, uint64(m.Status.Size())) - n176, err := m.Status.MarshalTo(dAtA[i:]) + n177, err := m.Status.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n176 + i += n177 return i, nil } @@ -8722,11 +8802,11 @@ func (m *ResourceQuotaList) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0xa i++ i = encodeVarintGenerated(dAtA, i, uint64(m.ListMeta.Size())) - n177, err := m.ListMeta.MarshalTo(dAtA[i:]) + n178, err := m.ListMeta.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n177 + i += n178 if len(m.Items) > 0 { for _, msg := range m.Items { dAtA[i] = 0x12 @@ -8781,11 +8861,11 @@ func (m *ResourceQuotaSpec) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0x12 i++ i = encodeVarintGenerated(dAtA, i, uint64((&v).Size())) - n178, err := (&v).MarshalTo(dAtA[i:]) + n179, err := (&v).MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n178 + i += n179 } } if len(m.Scopes) > 0 { @@ -8807,11 +8887,11 @@ func (m *ResourceQuotaSpec) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0x1a i++ i = encodeVarintGenerated(dAtA, i, uint64(m.ScopeSelector.Size())) - n179, err := m.ScopeSelector.MarshalTo(dAtA[i:]) + n180, err := m.ScopeSelector.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n179 + i += n180 } return i, nil } @@ -8855,11 +8935,11 @@ func (m *ResourceQuotaStatus) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0x12 i++ i = encodeVarintGenerated(dAtA, i, uint64((&v).Size())) - n180, err := (&v).MarshalTo(dAtA[i:]) + n181, err := (&v).MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n180 + i += n181 } } if len(m.Used) > 0 { @@ -8886,11 +8966,11 @@ func (m *ResourceQuotaStatus) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0x12 i++ i = encodeVarintGenerated(dAtA, i, uint64((&v).Size())) - n181, err := (&v).MarshalTo(dAtA[i:]) + n182, err := (&v).MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n181 + i += n182 } } return i, nil @@ -8935,11 +9015,11 @@ func (m *ResourceRequirements) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0x12 i++ i = encodeVarintGenerated(dAtA, i, uint64((&v).Size())) - n182, err := (&v).MarshalTo(dAtA[i:]) + n183, err := (&v).MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n182 + i += n183 } } if len(m.Requests) > 0 { @@ -8966,11 +9046,11 @@ func (m *ResourceRequirements) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0x12 i++ i = encodeVarintGenerated(dAtA, i, uint64((&v).Size())) - n183, err := (&v).MarshalTo(dAtA[i:]) + n184, err := (&v).MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n183 + i += n184 } } return i, nil @@ -9037,11 +9117,11 @@ func (m *ScaleIOPersistentVolumeSource) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0x1a i++ i = encodeVarintGenerated(dAtA, i, uint64(m.SecretRef.Size())) - n184, err := m.SecretRef.MarshalTo(dAtA[i:]) + n185, err := m.SecretRef.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n184 + i += n185 } dAtA[i] = 0x20 i++ @@ -9109,11 +9189,11 @@ func (m *ScaleIOVolumeSource) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0x1a i++ i = encodeVarintGenerated(dAtA, i, uint64(m.SecretRef.Size())) - n185, err := m.SecretRef.MarshalTo(dAtA[i:]) + n186, err := m.SecretRef.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n185 + i += n186 } dAtA[i] = 0x20 i++ @@ -9243,11 +9323,11 @@ func (m *Secret) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0xa i++ i = encodeVarintGenerated(dAtA, i, uint64(m.ObjectMeta.Size())) - n186, err := m.ObjectMeta.MarshalTo(dAtA[i:]) + n187, err := m.ObjectMeta.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n186 + i += n187 if len(m.Data) > 0 { keysForData := make([]string, 0, len(m.Data)) for k := range m.Data { @@ -9323,11 +9403,11 @@ func (m *SecretEnvSource) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0xa i++ i = encodeVarintGenerated(dAtA, i, uint64(m.LocalObjectReference.Size())) - n187, err := m.LocalObjectReference.MarshalTo(dAtA[i:]) + n188, err := m.LocalObjectReference.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n187 + i += n188 if m.Optional != nil { dAtA[i] = 0x10 i++ @@ -9359,11 +9439,11 @@ func (m *SecretKeySelector) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0xa i++ i = encodeVarintGenerated(dAtA, i, uint64(m.LocalObjectReference.Size())) - n188, err := m.LocalObjectReference.MarshalTo(dAtA[i:]) + n189, err := m.LocalObjectReference.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n188 + i += n189 dAtA[i] = 0x12 i++ i = encodeVarintGenerated(dAtA, i, uint64(len(m.Key))) @@ -9399,11 +9479,11 @@ func (m *SecretList) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0xa i++ i = encodeVarintGenerated(dAtA, i, uint64(m.ListMeta.Size())) - n189, err := m.ListMeta.MarshalTo(dAtA[i:]) + n190, err := m.ListMeta.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n189 + i += n190 if len(m.Items) > 0 { for _, msg := range m.Items { dAtA[i] = 0x12 @@ -9437,11 +9517,11 @@ func (m *SecretProjection) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0xa i++ i = encodeVarintGenerated(dAtA, i, uint64(m.LocalObjectReference.Size())) - n190, err := m.LocalObjectReference.MarshalTo(dAtA[i:]) + n191, err := m.LocalObjectReference.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n190 + i += n191 if len(m.Items) > 0 { for _, msg := range m.Items { dAtA[i] = 0x12 @@ -9561,11 +9641,11 @@ func (m *SecurityContext) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0xa i++ i = encodeVarintGenerated(dAtA, i, uint64(m.Capabilities.Size())) - n191, err := m.Capabilities.MarshalTo(dAtA[i:]) + n192, err := m.Capabilities.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n191 + i += n192 } if m.Privileged != nil { dAtA[i] = 0x10 @@ -9581,11 +9661,11 @@ func (m *SecurityContext) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0x1a i++ i = encodeVarintGenerated(dAtA, i, uint64(m.SELinuxOptions.Size())) - n192, err := m.SELinuxOptions.MarshalTo(dAtA[i:]) + n193, err := m.SELinuxOptions.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n192 + i += n193 } if m.RunAsUser != nil { dAtA[i] = 0x20 @@ -9654,11 +9734,11 @@ func (m *SerializedReference) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0xa i++ i = encodeVarintGenerated(dAtA, i, uint64(m.Reference.Size())) - n193, err := m.Reference.MarshalTo(dAtA[i:]) + n194, err := m.Reference.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n193 + i += n194 return i, nil } @@ -9680,27 +9760,27 @@ func (m *Service) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0xa i++ i = encodeVarintGenerated(dAtA, i, uint64(m.ObjectMeta.Size())) - n194, err := m.ObjectMeta.MarshalTo(dAtA[i:]) + n195, err := m.ObjectMeta.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n194 + i += n195 dAtA[i] = 0x12 i++ i = encodeVarintGenerated(dAtA, i, uint64(m.Spec.Size())) - n195, err := m.Spec.MarshalTo(dAtA[i:]) + n196, err := m.Spec.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n195 + i += n196 dAtA[i] = 0x1a i++ i = encodeVarintGenerated(dAtA, i, uint64(m.Status.Size())) - n196, err := m.Status.MarshalTo(dAtA[i:]) + n197, err := m.Status.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n196 + i += n197 return i, nil } @@ -9722,11 +9802,11 @@ func (m *ServiceAccount) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0xa i++ i = encodeVarintGenerated(dAtA, i, uint64(m.ObjectMeta.Size())) - n197, err := m.ObjectMeta.MarshalTo(dAtA[i:]) + n198, err := m.ObjectMeta.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n197 + i += n198 if len(m.Secrets) > 0 { for _, msg := range m.Secrets { dAtA[i] = 0x12 @@ -9782,11 +9862,11 @@ func (m *ServiceAccountList) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0xa i++ i = encodeVarintGenerated(dAtA, i, uint64(m.ListMeta.Size())) - n198, err := m.ListMeta.MarshalTo(dAtA[i:]) + n199, err := m.ListMeta.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n198 + i += n199 if len(m.Items) > 0 { for _, msg := range m.Items { dAtA[i] = 0x12 @@ -9851,11 +9931,11 @@ func (m *ServiceList) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0xa i++ i = encodeVarintGenerated(dAtA, i, uint64(m.ListMeta.Size())) - n199, err := m.ListMeta.MarshalTo(dAtA[i:]) + n200, err := m.ListMeta.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n199 + i += n200 if len(m.Items) > 0 { for _, msg := range m.Items { dAtA[i] = 0x12 @@ -9900,11 +9980,11 @@ func (m *ServicePort) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0x22 i++ i = encodeVarintGenerated(dAtA, i, uint64(m.TargetPort.Size())) - n200, err := m.TargetPort.MarshalTo(dAtA[i:]) + n201, err := m.TargetPort.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n200 + i += n201 dAtA[i] = 0x28 i++ i = encodeVarintGenerated(dAtA, i, uint64(m.NodePort)) @@ -10051,11 +10131,11 @@ func (m *ServiceSpec) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0x72 i++ i = encodeVarintGenerated(dAtA, i, uint64(m.SessionAffinityConfig.Size())) - n201, err := m.SessionAffinityConfig.MarshalTo(dAtA[i:]) + n202, err := m.SessionAffinityConfig.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n201 + i += n202 } return i, nil } @@ -10078,11 +10158,11 @@ func (m *ServiceStatus) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0xa i++ i = encodeVarintGenerated(dAtA, i, uint64(m.LoadBalancer.Size())) - n202, err := m.LoadBalancer.MarshalTo(dAtA[i:]) + n203, err := m.LoadBalancer.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n202 + i += n203 return i, nil } @@ -10105,11 +10185,11 @@ func (m *SessionAffinityConfig) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0xa i++ i = encodeVarintGenerated(dAtA, i, uint64(m.ClientIP.Size())) - n203, err := m.ClientIP.MarshalTo(dAtA[i:]) + n204, err := m.ClientIP.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n203 + i += n204 } return i, nil } @@ -10153,11 +10233,11 @@ func (m *StorageOSPersistentVolumeSource) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0x2a i++ i = encodeVarintGenerated(dAtA, i, uint64(m.SecretRef.Size())) - n204, err := m.SecretRef.MarshalTo(dAtA[i:]) + n205, err := m.SecretRef.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n204 + i += n205 } return i, nil } @@ -10201,11 +10281,11 @@ func (m *StorageOSVolumeSource) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0x2a i++ i = encodeVarintGenerated(dAtA, i, uint64(m.SecretRef.Size())) - n205, err := m.SecretRef.MarshalTo(dAtA[i:]) + n206, err := m.SecretRef.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n205 + i += n206 } return i, nil } @@ -10254,11 +10334,11 @@ func (m *TCPSocketAction) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0xa i++ i = encodeVarintGenerated(dAtA, i, uint64(m.Port.Size())) - n206, err := m.Port.MarshalTo(dAtA[i:]) + n207, err := m.Port.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n206 + i += n207 dAtA[i] = 0x12 i++ i = encodeVarintGenerated(dAtA, i, uint64(len(m.Host))) @@ -10297,11 +10377,11 @@ func (m *Taint) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0x22 i++ i = encodeVarintGenerated(dAtA, i, uint64(m.TimeAdded.Size())) - n207, err := m.TimeAdded.MarshalTo(dAtA[i:]) + n208, err := m.TimeAdded.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n207 + i += n208 } return i, nil } @@ -10466,11 +10546,11 @@ func (m *Volume) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0x12 i++ i = encodeVarintGenerated(dAtA, i, uint64(m.VolumeSource.Size())) - n208, err := m.VolumeSource.MarshalTo(dAtA[i:]) + n209, err := m.VolumeSource.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n208 + i += n209 return i, nil } @@ -10541,6 +10621,10 @@ func (m *VolumeMount) MarshalTo(dAtA []byte) (int, error) { i = encodeVarintGenerated(dAtA, i, uint64(len(*m.MountPropagation))) i += copy(dAtA[i:], *m.MountPropagation) } + dAtA[i] = 0x32 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.SubPathExpr))) + i += copy(dAtA[i:], m.SubPathExpr) return i, nil } @@ -10563,11 +10647,11 @@ func (m *VolumeNodeAffinity) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0xa i++ i = encodeVarintGenerated(dAtA, i, uint64(m.Required.Size())) - n209, err := m.Required.MarshalTo(dAtA[i:]) + n210, err := m.Required.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n209 + i += n210 } return i, nil } @@ -10591,41 +10675,41 @@ func (m *VolumeProjection) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0xa i++ i = encodeVarintGenerated(dAtA, i, uint64(m.Secret.Size())) - n210, err := m.Secret.MarshalTo(dAtA[i:]) + n211, err := m.Secret.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n210 + i += n211 } if m.DownwardAPI != nil { dAtA[i] = 0x12 i++ i = encodeVarintGenerated(dAtA, i, uint64(m.DownwardAPI.Size())) - n211, err := m.DownwardAPI.MarshalTo(dAtA[i:]) + n212, err := m.DownwardAPI.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n211 + i += n212 } if m.ConfigMap != nil { dAtA[i] = 0x1a i++ i = encodeVarintGenerated(dAtA, i, uint64(m.ConfigMap.Size())) - n212, err := m.ConfigMap.MarshalTo(dAtA[i:]) + n213, err := m.ConfigMap.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n212 + i += n213 } if m.ServiceAccountToken != nil { dAtA[i] = 0x22 i++ i = encodeVarintGenerated(dAtA, i, uint64(m.ServiceAccountToken.Size())) - n213, err := m.ServiceAccountToken.MarshalTo(dAtA[i:]) + n214, err := m.ServiceAccountToken.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n213 + i += n214 } return i, nil } @@ -10649,151 +10733,151 @@ func (m *VolumeSource) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0xa i++ i = encodeVarintGenerated(dAtA, i, uint64(m.HostPath.Size())) - n214, err := m.HostPath.MarshalTo(dAtA[i:]) + n215, err := m.HostPath.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n214 + i += n215 } if m.EmptyDir != nil { dAtA[i] = 0x12 i++ i = encodeVarintGenerated(dAtA, i, uint64(m.EmptyDir.Size())) - n215, err := m.EmptyDir.MarshalTo(dAtA[i:]) + n216, err := m.EmptyDir.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n215 + i += n216 } if m.GCEPersistentDisk != nil { dAtA[i] = 0x1a i++ i = encodeVarintGenerated(dAtA, i, uint64(m.GCEPersistentDisk.Size())) - n216, err := m.GCEPersistentDisk.MarshalTo(dAtA[i:]) + n217, err := m.GCEPersistentDisk.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n216 + i += n217 } if m.AWSElasticBlockStore != nil { dAtA[i] = 0x22 i++ i = encodeVarintGenerated(dAtA, i, uint64(m.AWSElasticBlockStore.Size())) - n217, err := m.AWSElasticBlockStore.MarshalTo(dAtA[i:]) + n218, err := m.AWSElasticBlockStore.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n217 + i += n218 } if m.GitRepo != nil { dAtA[i] = 0x2a i++ i = encodeVarintGenerated(dAtA, i, uint64(m.GitRepo.Size())) - n218, err := m.GitRepo.MarshalTo(dAtA[i:]) + n219, err := m.GitRepo.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n218 + i += n219 } if m.Secret != nil { dAtA[i] = 0x32 i++ i = encodeVarintGenerated(dAtA, i, uint64(m.Secret.Size())) - n219, err := m.Secret.MarshalTo(dAtA[i:]) + n220, err := m.Secret.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n219 + i += n220 } if m.NFS != nil { dAtA[i] = 0x3a i++ i = encodeVarintGenerated(dAtA, i, uint64(m.NFS.Size())) - n220, err := m.NFS.MarshalTo(dAtA[i:]) + n221, err := m.NFS.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n220 + i += n221 } if m.ISCSI != nil { dAtA[i] = 0x42 i++ i = encodeVarintGenerated(dAtA, i, uint64(m.ISCSI.Size())) - n221, err := m.ISCSI.MarshalTo(dAtA[i:]) + n222, err := m.ISCSI.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n221 + i += n222 } if m.Glusterfs != nil { dAtA[i] = 0x4a i++ i = encodeVarintGenerated(dAtA, i, uint64(m.Glusterfs.Size())) - n222, err := m.Glusterfs.MarshalTo(dAtA[i:]) + n223, err := m.Glusterfs.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n222 + i += n223 } if m.PersistentVolumeClaim != nil { dAtA[i] = 0x52 i++ i = encodeVarintGenerated(dAtA, i, uint64(m.PersistentVolumeClaim.Size())) - n223, err := m.PersistentVolumeClaim.MarshalTo(dAtA[i:]) + n224, err := m.PersistentVolumeClaim.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n223 + i += n224 } if m.RBD != nil { dAtA[i] = 0x5a i++ i = encodeVarintGenerated(dAtA, i, uint64(m.RBD.Size())) - n224, err := m.RBD.MarshalTo(dAtA[i:]) + n225, err := m.RBD.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n224 + i += n225 } if m.FlexVolume != nil { dAtA[i] = 0x62 i++ i = encodeVarintGenerated(dAtA, i, uint64(m.FlexVolume.Size())) - n225, err := m.FlexVolume.MarshalTo(dAtA[i:]) + n226, err := m.FlexVolume.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n225 + i += n226 } if m.Cinder != nil { dAtA[i] = 0x6a i++ i = encodeVarintGenerated(dAtA, i, uint64(m.Cinder.Size())) - n226, err := m.Cinder.MarshalTo(dAtA[i:]) + n227, err := m.Cinder.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n226 + i += n227 } if m.CephFS != nil { dAtA[i] = 0x72 i++ i = encodeVarintGenerated(dAtA, i, uint64(m.CephFS.Size())) - n227, err := m.CephFS.MarshalTo(dAtA[i:]) + n228, err := m.CephFS.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n227 + i += n228 } if m.Flocker != nil { dAtA[i] = 0x7a i++ i = encodeVarintGenerated(dAtA, i, uint64(m.Flocker.Size())) - n228, err := m.Flocker.MarshalTo(dAtA[i:]) + n229, err := m.Flocker.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n228 + i += n229 } if m.DownwardAPI != nil { dAtA[i] = 0x82 @@ -10801,11 +10885,11 @@ func (m *VolumeSource) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0x1 i++ i = encodeVarintGenerated(dAtA, i, uint64(m.DownwardAPI.Size())) - n229, err := m.DownwardAPI.MarshalTo(dAtA[i:]) + n230, err := m.DownwardAPI.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n229 + i += n230 } if m.FC != nil { dAtA[i] = 0x8a @@ -10813,11 +10897,11 @@ func (m *VolumeSource) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0x1 i++ i = encodeVarintGenerated(dAtA, i, uint64(m.FC.Size())) - n230, err := m.FC.MarshalTo(dAtA[i:]) + n231, err := m.FC.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n230 + i += n231 } if m.AzureFile != nil { dAtA[i] = 0x92 @@ -10825,11 +10909,11 @@ func (m *VolumeSource) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0x1 i++ i = encodeVarintGenerated(dAtA, i, uint64(m.AzureFile.Size())) - n231, err := m.AzureFile.MarshalTo(dAtA[i:]) + n232, err := m.AzureFile.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n231 + i += n232 } if m.ConfigMap != nil { dAtA[i] = 0x9a @@ -10837,11 +10921,11 @@ func (m *VolumeSource) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0x1 i++ i = encodeVarintGenerated(dAtA, i, uint64(m.ConfigMap.Size())) - n232, err := m.ConfigMap.MarshalTo(dAtA[i:]) + n233, err := m.ConfigMap.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n232 + i += n233 } if m.VsphereVolume != nil { dAtA[i] = 0xa2 @@ -10849,11 +10933,11 @@ func (m *VolumeSource) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0x1 i++ i = encodeVarintGenerated(dAtA, i, uint64(m.VsphereVolume.Size())) - n233, err := m.VsphereVolume.MarshalTo(dAtA[i:]) + n234, err := m.VsphereVolume.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n233 + i += n234 } if m.Quobyte != nil { dAtA[i] = 0xaa @@ -10861,11 +10945,11 @@ func (m *VolumeSource) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0x1 i++ i = encodeVarintGenerated(dAtA, i, uint64(m.Quobyte.Size())) - n234, err := m.Quobyte.MarshalTo(dAtA[i:]) + n235, err := m.Quobyte.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n234 + i += n235 } if m.AzureDisk != nil { dAtA[i] = 0xb2 @@ -10873,11 +10957,11 @@ func (m *VolumeSource) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0x1 i++ i = encodeVarintGenerated(dAtA, i, uint64(m.AzureDisk.Size())) - n235, err := m.AzureDisk.MarshalTo(dAtA[i:]) + n236, err := m.AzureDisk.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n235 + i += n236 } if m.PhotonPersistentDisk != nil { dAtA[i] = 0xba @@ -10885,11 +10969,11 @@ func (m *VolumeSource) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0x1 i++ i = encodeVarintGenerated(dAtA, i, uint64(m.PhotonPersistentDisk.Size())) - n236, err := m.PhotonPersistentDisk.MarshalTo(dAtA[i:]) + n237, err := m.PhotonPersistentDisk.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n236 + i += n237 } if m.PortworxVolume != nil { dAtA[i] = 0xc2 @@ -10897,11 +10981,11 @@ func (m *VolumeSource) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0x1 i++ i = encodeVarintGenerated(dAtA, i, uint64(m.PortworxVolume.Size())) - n237, err := m.PortworxVolume.MarshalTo(dAtA[i:]) + n238, err := m.PortworxVolume.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n237 + i += n238 } if m.ScaleIO != nil { dAtA[i] = 0xca @@ -10909,11 +10993,11 @@ func (m *VolumeSource) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0x1 i++ i = encodeVarintGenerated(dAtA, i, uint64(m.ScaleIO.Size())) - n238, err := m.ScaleIO.MarshalTo(dAtA[i:]) + n239, err := m.ScaleIO.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n238 + i += n239 } if m.Projected != nil { dAtA[i] = 0xd2 @@ -10921,11 +11005,11 @@ func (m *VolumeSource) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0x1 i++ i = encodeVarintGenerated(dAtA, i, uint64(m.Projected.Size())) - n239, err := m.Projected.MarshalTo(dAtA[i:]) + n240, err := m.Projected.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n239 + i += n240 } if m.StorageOS != nil { dAtA[i] = 0xda @@ -10933,11 +11017,23 @@ func (m *VolumeSource) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0x1 i++ i = encodeVarintGenerated(dAtA, i, uint64(m.StorageOS.Size())) - n240, err := m.StorageOS.MarshalTo(dAtA[i:]) + n241, err := m.StorageOS.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n240 + i += n241 + } + if m.CSI != nil { + dAtA[i] = 0xe2 + i++ + dAtA[i] = 0x1 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.CSI.Size())) + n242, err := m.CSI.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n242 } return i, nil } @@ -10997,11 +11093,11 @@ func (m *WeightedPodAffinityTerm) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0x12 i++ i = encodeVarintGenerated(dAtA, i, uint64(m.PodAffinityTerm.Size())) - n241, err := m.PodAffinityTerm.MarshalTo(dAtA[i:]) + n243, err := m.PodAffinityTerm.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n241 + i += n243 return i, nil } @@ -11160,6 +11256,33 @@ func (m *CSIPersistentVolumeSource) Size() (n int) { return n } +func (m *CSIVolumeSource) Size() (n int) { + var l int + _ = l + l = len(m.Driver) + n += 1 + l + sovGenerated(uint64(l)) + if m.ReadOnly != nil { + n += 2 + } + if m.FSType != nil { + l = len(*m.FSType) + n += 1 + l + sovGenerated(uint64(l)) + } + if len(m.VolumeAttributes) > 0 { + for k, v := range m.VolumeAttributes { + _ = k + _ = v + mapEntrySize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + len(v) + sovGenerated(uint64(len(v))) + n += mapEntrySize + 1 + sovGenerated(uint64(mapEntrySize)) + } + } + if m.NodePublishSecretRef != nil { + l = m.NodePublishSecretRef.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + return n +} + func (m *Capabilities) Size() (n int) { var l int _ = l @@ -13547,6 +13670,8 @@ func (m *QuobyteVolumeSource) Size() (n int) { n += 1 + l + sovGenerated(uint64(l)) l = len(m.Group) n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Tenant) + n += 1 + l + sovGenerated(uint64(l)) return n } @@ -14390,6 +14515,8 @@ func (m *VolumeMount) Size() (n int) { l = len(*m.MountPropagation) n += 1 + l + sovGenerated(uint64(l)) } + l = len(m.SubPathExpr) + n += 1 + l + sovGenerated(uint64(l)) return n } @@ -14536,6 +14663,10 @@ func (m *VolumeSource) Size() (n int) { l = m.StorageOS.Size() n += 2 + l + sovGenerated(uint64(l)) } + if m.CSI != nil { + l = m.CSI.Size() + n += 2 + l + sovGenerated(uint64(l)) + } return n } @@ -14699,6 +14830,30 @@ func (this *CSIPersistentVolumeSource) String() string { }, "") return s } +func (this *CSIVolumeSource) String() string { + if this == nil { + return "nil" + } + keysForVolumeAttributes := make([]string, 0, len(this.VolumeAttributes)) + for k := range this.VolumeAttributes { + keysForVolumeAttributes = append(keysForVolumeAttributes, k) + } + github_com_gogo_protobuf_sortkeys.Strings(keysForVolumeAttributes) + mapStringForVolumeAttributes := "map[string]string{" + for _, k := range keysForVolumeAttributes { + mapStringForVolumeAttributes += fmt.Sprintf("%v: %v,", k, this.VolumeAttributes[k]) + } + mapStringForVolumeAttributes += "}" + s := strings.Join([]string{`&CSIVolumeSource{`, + `Driver:` + fmt.Sprintf("%v", this.Driver) + `,`, + `ReadOnly:` + valueToStringGenerated(this.ReadOnly) + `,`, + `FSType:` + valueToStringGenerated(this.FSType) + `,`, + `VolumeAttributes:` + mapStringForVolumeAttributes + `,`, + `NodePublishSecretRef:` + strings.Replace(fmt.Sprintf("%v", this.NodePublishSecretRef), "LocalObjectReference", "LocalObjectReference", 1) + `,`, + `}`, + }, "") + return s +} func (this *Capabilities) String() string { if this == nil { return "nil" @@ -16548,6 +16703,7 @@ func (this *QuobyteVolumeSource) String() string { `ReadOnly:` + fmt.Sprintf("%v", this.ReadOnly) + `,`, `User:` + fmt.Sprintf("%v", this.User) + `,`, `Group:` + fmt.Sprintf("%v", this.Group) + `,`, + `Tenant:` + fmt.Sprintf("%v", this.Tenant) + `,`, `}`, }, "") return s @@ -17273,6 +17429,7 @@ func (this *VolumeMount) String() string { `MountPath:` + fmt.Sprintf("%v", this.MountPath) + `,`, `SubPath:` + fmt.Sprintf("%v", this.SubPath) + `,`, `MountPropagation:` + valueToStringGenerated(this.MountPropagation) + `,`, + `SubPathExpr:` + fmt.Sprintf("%v", this.SubPathExpr) + `,`, `}`, }, "") return s @@ -17332,6 +17489,7 @@ func (this *VolumeSource) String() string { `ScaleIO:` + strings.Replace(fmt.Sprintf("%v", this.ScaleIO), "ScaleIOVolumeSource", "ScaleIOVolumeSource", 1) + `,`, `Projected:` + strings.Replace(fmt.Sprintf("%v", this.Projected), "ProjectedVolumeSource", "ProjectedVolumeSource", 1) + `,`, `StorageOS:` + strings.Replace(fmt.Sprintf("%v", this.StorageOS), "StorageOSVolumeSource", "StorageOSVolumeSource", 1) + `,`, + `CSI:` + strings.Replace(fmt.Sprintf("%v", this.CSI), "CSIVolumeSource", "CSIVolumeSource", 1) + `,`, `}`, }, "") return s @@ -18842,6 +19000,287 @@ func (m *CSIPersistentVolumeSource) Unmarshal(dAtA []byte) error { } return nil } +func (m *CSIVolumeSource) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: CSIVolumeSource: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: CSIVolumeSource: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Driver", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Driver = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field ReadOnly", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + b := bool(v != 0) + m.ReadOnly = &b + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field FSType", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + s := string(dAtA[iNdEx:postIndex]) + m.FSType = &s + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field VolumeAttributes", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.VolumeAttributes == nil { + m.VolumeAttributes = make(map[string]string) + } + var mapkey string + var mapvalue string + for iNdEx < postIndex { + entryPreIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + if fieldNum == 1 { + var stringLenmapkey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapkey |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapkey := int(stringLenmapkey) + if intStringLenmapkey < 0 { + return ErrInvalidLengthGenerated + } + postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey > l { + return io.ErrUnexpectedEOF + } + mapkey = string(dAtA[iNdEx:postStringIndexmapkey]) + iNdEx = postStringIndexmapkey + } else if fieldNum == 2 { + var stringLenmapvalue uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapvalue |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapvalue := int(stringLenmapvalue) + if intStringLenmapvalue < 0 { + return ErrInvalidLengthGenerated + } + postStringIndexmapvalue := iNdEx + intStringLenmapvalue + if postStringIndexmapvalue > l { + return io.ErrUnexpectedEOF + } + mapvalue = string(dAtA[iNdEx:postStringIndexmapvalue]) + iNdEx = postStringIndexmapvalue + } else { + iNdEx = entryPreIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > postIndex { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + m.VolumeAttributes[mapkey] = mapvalue + iNdEx = postIndex + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field NodePublishSecretRef", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.NodePublishSecretRef == nil { + m.NodePublishSecretRef = &LocalObjectReference{} + } + if err := m.NodePublishSecretRef.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} func (m *Capabilities) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 @@ -41550,6 +41989,35 @@ func (m *QuobyteVolumeSource) Unmarshal(dAtA []byte) error { } m.Group = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex + case 6: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Tenant", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Tenant = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex default: iNdEx = preIndex skippy, err := skipGenerated(dAtA[iNdEx:]) @@ -50046,6 +50514,35 @@ func (m *VolumeMount) Unmarshal(dAtA []byte) error { s := MountPropagationMode(dAtA[iNdEx:postIndex]) m.MountPropagation = &s iNdEx = postIndex + case 6: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field SubPathExpr", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.SubPathExpr = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex default: iNdEx = preIndex skippy, err := skipGenerated(dAtA[iNdEx:]) @@ -51252,6 +51749,39 @@ func (m *VolumeSource) Unmarshal(dAtA []byte) error { return err } iNdEx = postIndex + case 28: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field CSI", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.CSI == nil { + m.CSI = &CSIVolumeSource{} + } + if err := m.CSI.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex default: iNdEx = preIndex skippy, err := skipGenerated(dAtA[iNdEx:]) @@ -51648,808 +52178,814 @@ func init() { } var fileDescriptorGenerated = []byte{ - // 12835 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xec, 0x7d, 0x6d, 0x70, 0x64, 0x57, - 0x56, 0xd8, 0xbe, 0xee, 0xd6, 0x47, 0x1f, 0x7d, 0xdf, 0x99, 0xb1, 0x35, 0xb2, 0x67, 0x7a, 0xfc, - 0xbc, 0x3b, 0x1e, 0xaf, 0x6d, 0xcd, 0x7a, 0x6c, 0xaf, 0xcd, 0xda, 0x6b, 0x90, 0xd4, 0xd2, 0x4c, - 0x7b, 0x46, 0x9a, 0xf6, 0x6d, 0xcd, 0x78, 0xd7, 0x78, 0x97, 0x7d, 0xea, 0xbe, 0x92, 0x9e, 0xf5, - 0xf4, 0x5e, 0xfb, 0xbd, 0xd7, 0x9a, 0x91, 0x03, 0x55, 0xc9, 0x12, 0x48, 0x36, 0x50, 0xa9, 0xad, - 0xb0, 0x95, 0x0f, 0xa0, 0x48, 0x15, 0x21, 0x05, 0x84, 0x24, 0x15, 0x02, 0x01, 0xc2, 0x42, 0x42, - 0x20, 0x3f, 0xc8, 0x9f, 0x0d, 0x49, 0x55, 0x6a, 0xa9, 0xa2, 0xa2, 0x80, 0x48, 0x25, 0xc5, 0x8f, - 0x40, 0x2a, 0xe4, 0x47, 0x50, 0xa8, 0x90, 0xba, 0x9f, 0xef, 0xde, 0xd7, 0xef, 0x75, 0xb7, 0xc6, - 0x1a, 0xd9, 0x50, 0xfb, 0xaf, 0xfb, 0x9e, 0x73, 0xcf, 0xbd, 0xef, 0x7e, 0x9e, 0x73, 0xee, 0xf9, - 0x80, 0x57, 0x77, 0x5e, 0x89, 0xe6, 0xdd, 0xe0, 0xea, 0x4e, 0x67, 0x83, 0x84, 0x3e, 0x89, 0x49, - 0x74, 0x75, 0x8f, 0xf8, 0xad, 0x20, 0xbc, 0x2a, 0x00, 0x4e, 0xdb, 0xbd, 0xda, 0x0c, 0x42, 0x72, - 0x75, 0xef, 0xf9, 0xab, 0x5b, 0xc4, 0x27, 0xa1, 0x13, 0x93, 0xd6, 0x7c, 0x3b, 0x0c, 0xe2, 0x00, - 0x21, 0x8e, 0x33, 0xef, 0xb4, 0xdd, 0x79, 0x8a, 0x33, 0xbf, 0xf7, 0xfc, 0xdc, 0x73, 0x5b, 0x6e, - 0xbc, 0xdd, 0xd9, 0x98, 0x6f, 0x06, 0xbb, 0x57, 0xb7, 0x82, 0xad, 0xe0, 0x2a, 0x43, 0xdd, 0xe8, - 0x6c, 0xb2, 0x7f, 0xec, 0x0f, 0xfb, 0xc5, 0x49, 0xcc, 0xbd, 0x98, 0x34, 0xb3, 0xeb, 0x34, 0xb7, - 0x5d, 0x9f, 0x84, 0xfb, 0x57, 0xdb, 0x3b, 0x5b, 0xac, 0xdd, 0x90, 0x44, 0x41, 0x27, 0x6c, 0x92, - 0x74, 0xc3, 0x3d, 0x6b, 0x45, 0x57, 0x77, 0x49, 0xec, 0x64, 0x74, 0x77, 0xee, 0x6a, 0x5e, 0xad, - 0xb0, 0xe3, 0xc7, 0xee, 0x6e, 0x77, 0x33, 0x9f, 0xee, 0x57, 0x21, 0x6a, 0x6e, 0x93, 0x5d, 0xa7, - 0xab, 0xde, 0x0b, 0x79, 0xf5, 0x3a, 0xb1, 0xeb, 0x5d, 0x75, 0xfd, 0x38, 0x8a, 0xc3, 0x74, 0x25, - 0xfb, 0x9b, 0x16, 0x5c, 0x5a, 0x78, 0xab, 0xb1, 0xec, 0x39, 0x51, 0xec, 0x36, 0x17, 0xbd, 0xa0, - 0xb9, 0xd3, 0x88, 0x83, 0x90, 0xdc, 0x0d, 0xbc, 0xce, 0x2e, 0x69, 0xb0, 0x81, 0x40, 0xcf, 0xc2, - 0xe8, 0x1e, 0xfb, 0x5f, 0xab, 0xce, 0x5a, 0x97, 0xac, 0x2b, 0xe5, 0xc5, 0xe9, 0xdf, 0x3c, 0xa8, - 0x7c, 0xec, 0xf0, 0xa0, 0x32, 0x7a, 0x57, 0x94, 0x63, 0x85, 0x81, 0x2e, 0xc3, 0xf0, 0x66, 0xb4, - 0xbe, 0xdf, 0x26, 0xb3, 0x05, 0x86, 0x3b, 0x29, 0x70, 0x87, 0x57, 0x1a, 0xb4, 0x14, 0x0b, 0x28, - 0xba, 0x0a, 0xe5, 0xb6, 0x13, 0xc6, 0x6e, 0xec, 0x06, 0xfe, 0x6c, 0xf1, 0x92, 0x75, 0x65, 0x68, - 0x71, 0x46, 0xa0, 0x96, 0xeb, 0x12, 0x80, 0x13, 0x1c, 0xda, 0x8d, 0x90, 0x38, 0xad, 0xdb, 0xbe, - 0xb7, 0x3f, 0x5b, 0xba, 0x64, 0x5d, 0x19, 0x4d, 0xba, 0x81, 0x45, 0x39, 0x56, 0x18, 0xf6, 0x0f, - 0x17, 0x60, 0x74, 0x61, 0x73, 0xd3, 0xf5, 0xdd, 0x78, 0x1f, 0xdd, 0x85, 0x71, 0x3f, 0x68, 0x11, - 0xf9, 0x9f, 0x7d, 0xc5, 0xd8, 0xb5, 0x4b, 0xf3, 0xdd, 0x4b, 0x69, 0x7e, 0x4d, 0xc3, 0x5b, 0x9c, - 0x3e, 0x3c, 0xa8, 0x8c, 0xeb, 0x25, 0xd8, 0xa0, 0x83, 0x30, 0x8c, 0xb5, 0x83, 0x96, 0x22, 0x5b, - 0x60, 0x64, 0x2b, 0x59, 0x64, 0xeb, 0x09, 0xda, 0xe2, 0xd4, 0xe1, 0x41, 0x65, 0x4c, 0x2b, 0xc0, - 0x3a, 0x11, 0xb4, 0x01, 0x53, 0xf4, 0xaf, 0x1f, 0xbb, 0x8a, 0x6e, 0x91, 0xd1, 0x7d, 0x32, 0x8f, - 0xae, 0x86, 0xba, 0x78, 0xe6, 0xf0, 0xa0, 0x32, 0x95, 0x2a, 0xc4, 0x69, 0x82, 0xf6, 0xfb, 0x30, - 0xb9, 0x10, 0xc7, 0x4e, 0x73, 0x9b, 0xb4, 0xf8, 0x0c, 0xa2, 0x17, 0xa1, 0xe4, 0x3b, 0xbb, 0x44, - 0xcc, 0xef, 0x25, 0x31, 0xb0, 0xa5, 0x35, 0x67, 0x97, 0x1c, 0x1d, 0x54, 0xa6, 0xef, 0xf8, 0xee, - 0x7b, 0x1d, 0xb1, 0x2a, 0x68, 0x19, 0x66, 0xd8, 0xe8, 0x1a, 0x40, 0x8b, 0xec, 0xb9, 0x4d, 0x52, - 0x77, 0xe2, 0x6d, 0x31, 0xdf, 0x48, 0xd4, 0x85, 0xaa, 0x82, 0x60, 0x0d, 0xcb, 0xbe, 0x0f, 0xe5, - 0x85, 0xbd, 0xc0, 0x6d, 0xd5, 0x83, 0x56, 0x84, 0x76, 0x60, 0xaa, 0x1d, 0x92, 0x4d, 0x12, 0xaa, - 0xa2, 0x59, 0xeb, 0x52, 0xf1, 0xca, 0xd8, 0xb5, 0x2b, 0x99, 0x1f, 0x6b, 0xa2, 0x2e, 0xfb, 0x71, - 0xb8, 0xbf, 0xf8, 0xa8, 0x68, 0x6f, 0x2a, 0x05, 0xc5, 0x69, 0xca, 0xf6, 0xbf, 0x2d, 0xc0, 0xb9, - 0x85, 0xf7, 0x3b, 0x21, 0xa9, 0xba, 0xd1, 0x4e, 0x7a, 0x85, 0xb7, 0xdc, 0x68, 0x67, 0x2d, 0x19, - 0x01, 0xb5, 0xb4, 0xaa, 0xa2, 0x1c, 0x2b, 0x0c, 0xf4, 0x1c, 0x8c, 0xd0, 0xdf, 0x77, 0x70, 0x4d, - 0x7c, 0xf2, 0x19, 0x81, 0x3c, 0x56, 0x75, 0x62, 0xa7, 0xca, 0x41, 0x58, 0xe2, 0xa0, 0x55, 0x18, - 0x6b, 0xb2, 0x0d, 0xb9, 0xb5, 0x1a, 0xb4, 0x08, 0x9b, 0xcc, 0xf2, 0xe2, 0x33, 0x14, 0x7d, 0x29, - 0x29, 0x3e, 0x3a, 0xa8, 0xcc, 0xf2, 0xbe, 0x09, 0x12, 0x1a, 0x0c, 0xeb, 0xf5, 0x91, 0xad, 0xf6, - 0x57, 0x89, 0x51, 0x82, 0x8c, 0xbd, 0x75, 0x45, 0xdb, 0x2a, 0x43, 0x6c, 0xab, 0x8c, 0x67, 0x6f, - 0x13, 0xf4, 0x3c, 0x94, 0x76, 0x5c, 0xbf, 0x35, 0x3b, 0xcc, 0x68, 0x5d, 0xa0, 0x73, 0x7e, 0xd3, - 0xf5, 0x5b, 0x47, 0x07, 0x95, 0x19, 0xa3, 0x3b, 0xb4, 0x10, 0x33, 0x54, 0xfb, 0x8f, 0x2d, 0xa8, - 0x30, 0xd8, 0x8a, 0xeb, 0x91, 0x3a, 0x09, 0x23, 0x37, 0x8a, 0x89, 0x1f, 0x1b, 0x03, 0x7a, 0x0d, - 0x20, 0x22, 0xcd, 0x90, 0xc4, 0xda, 0x90, 0xaa, 0x85, 0xd1, 0x50, 0x10, 0xac, 0x61, 0xd1, 0x03, - 0x21, 0xda, 0x76, 0x42, 0xb6, 0xbe, 0xc4, 0xc0, 0xaa, 0x03, 0xa1, 0x21, 0x01, 0x38, 0xc1, 0x31, - 0x0e, 0x84, 0x62, 0xbf, 0x03, 0x01, 0x7d, 0x16, 0xa6, 0x92, 0xc6, 0xa2, 0xb6, 0xd3, 0x94, 0x03, - 0xc8, 0xb6, 0x4c, 0xc3, 0x04, 0xe1, 0x34, 0xae, 0xfd, 0x8f, 0x2c, 0xb1, 0x78, 0xe8, 0x57, 0x7f, - 0xc4, 0xbf, 0xd5, 0xfe, 0x25, 0x0b, 0x46, 0x16, 0x5d, 0xbf, 0xe5, 0xfa, 0x5b, 0xe8, 0x4b, 0x30, - 0x4a, 0xef, 0xa6, 0x96, 0x13, 0x3b, 0xe2, 0xdc, 0xfb, 0x94, 0xb6, 0xb7, 0xd4, 0x55, 0x31, 0xdf, - 0xde, 0xd9, 0xa2, 0x05, 0xd1, 0x3c, 0xc5, 0xa6, 0xbb, 0xed, 0xf6, 0xc6, 0xbb, 0xa4, 0x19, 0xaf, - 0x92, 0xd8, 0x49, 0x3e, 0x27, 0x29, 0xc3, 0x8a, 0x2a, 0xba, 0x09, 0xc3, 0xb1, 0x13, 0x6e, 0x91, - 0x58, 0x1c, 0x80, 0x99, 0x07, 0x15, 0xaf, 0x89, 0xe9, 0x8e, 0x24, 0x7e, 0x93, 0x24, 0xd7, 0xc2, - 0x3a, 0xab, 0x8a, 0x05, 0x09, 0xfb, 0x6f, 0x0c, 0xc3, 0xf9, 0xa5, 0x46, 0x2d, 0x67, 0x5d, 0x5d, - 0x86, 0xe1, 0x56, 0xe8, 0xee, 0x91, 0x50, 0x8c, 0xb3, 0xa2, 0x52, 0x65, 0xa5, 0x58, 0x40, 0xd1, - 0x2b, 0x30, 0xce, 0x2f, 0xa4, 0x1b, 0x8e, 0xdf, 0xf2, 0xe4, 0x10, 0x9f, 0x15, 0xd8, 0xe3, 0x77, - 0x35, 0x18, 0x36, 0x30, 0x8f, 0xb9, 0xa8, 0x2e, 0xa7, 0x36, 0x63, 0xde, 0x65, 0xf7, 0x15, 0x0b, - 0xa6, 0x79, 0x33, 0x0b, 0x71, 0x1c, 0xba, 0x1b, 0x9d, 0x98, 0x44, 0xb3, 0x43, 0xec, 0xa4, 0x5b, - 0xca, 0x1a, 0xad, 0xdc, 0x11, 0x98, 0xbf, 0x9b, 0xa2, 0xc2, 0x0f, 0xc1, 0x59, 0xd1, 0xee, 0x74, - 0x1a, 0x8c, 0xbb, 0x9a, 0x45, 0xdf, 0x6b, 0xc1, 0x5c, 0x33, 0xf0, 0xe3, 0x30, 0xf0, 0x3c, 0x12, - 0xd6, 0x3b, 0x1b, 0x9e, 0x1b, 0x6d, 0xf3, 0x75, 0x8a, 0xc9, 0x26, 0x3b, 0x09, 0x72, 0xe6, 0x50, - 0x21, 0x89, 0x39, 0xbc, 0x78, 0x78, 0x50, 0x99, 0x5b, 0xca, 0x25, 0x85, 0x7b, 0x34, 0x83, 0x76, - 0x00, 0xd1, 0xab, 0xb4, 0x11, 0x3b, 0x5b, 0x24, 0x69, 0x7c, 0x64, 0xf0, 0xc6, 0x1f, 0x39, 0x3c, - 0xa8, 0xa0, 0xb5, 0x2e, 0x12, 0x38, 0x83, 0x2c, 0x7a, 0x0f, 0xce, 0xd2, 0xd2, 0xae, 0x6f, 0x1d, - 0x1d, 0xbc, 0xb9, 0xd9, 0xc3, 0x83, 0xca, 0xd9, 0xb5, 0x0c, 0x22, 0x38, 0x93, 0xf4, 0xdc, 0x12, - 0x9c, 0xcb, 0x9c, 0x2a, 0x34, 0x0d, 0xc5, 0x1d, 0xc2, 0x59, 0x90, 0x32, 0xa6, 0x3f, 0xd1, 0x59, - 0x18, 0xda, 0x73, 0xbc, 0x8e, 0x58, 0xa5, 0x98, 0xff, 0xf9, 0x4c, 0xe1, 0x15, 0xcb, 0x6e, 0xc2, - 0xf8, 0x92, 0xd3, 0x76, 0x36, 0x5c, 0xcf, 0x8d, 0x5d, 0x12, 0xa1, 0xa7, 0xa0, 0xe8, 0xb4, 0x5a, - 0xec, 0x8a, 0x2c, 0x2f, 0x9e, 0x3b, 0x3c, 0xa8, 0x14, 0x17, 0x5a, 0xf4, 0xac, 0x06, 0x85, 0xb5, - 0x8f, 0x29, 0x06, 0xfa, 0x24, 0x94, 0x5a, 0x61, 0xd0, 0x9e, 0x2d, 0x30, 0x4c, 0x3a, 0x54, 0xa5, - 0x6a, 0x18, 0xb4, 0x53, 0xa8, 0x0c, 0xc7, 0xfe, 0xb5, 0x02, 0x3c, 0xbe, 0x44, 0xda, 0xdb, 0x2b, - 0x8d, 0x9c, 0x4d, 0x77, 0x05, 0x46, 0x77, 0x03, 0xdf, 0x8d, 0x83, 0x30, 0x12, 0x4d, 0xb3, 0xdb, - 0x64, 0x55, 0x94, 0x61, 0x05, 0x45, 0x97, 0xa0, 0xd4, 0x4e, 0x38, 0x81, 0x71, 0xc9, 0x45, 0x30, - 0x1e, 0x80, 0x41, 0x28, 0x46, 0x27, 0x22, 0xa1, 0xb8, 0x05, 0x15, 0xc6, 0x9d, 0x88, 0x84, 0x98, - 0x41, 0x92, 0xe3, 0x94, 0x1e, 0xb4, 0x62, 0x5b, 0xa5, 0x8e, 0x53, 0x0a, 0xc1, 0x1a, 0x16, 0xaa, - 0x43, 0x39, 0x52, 0x93, 0x3a, 0x34, 0xf8, 0xa4, 0x4e, 0xb0, 0xf3, 0x56, 0xcd, 0x64, 0x42, 0xc4, - 0x38, 0x06, 0x86, 0xfb, 0x9e, 0xb7, 0x5f, 0x2f, 0x00, 0xe2, 0x43, 0xf8, 0xe7, 0x6c, 0xe0, 0xee, - 0x74, 0x0f, 0x5c, 0x26, 0xe7, 0x75, 0x2b, 0x68, 0x3a, 0x5e, 0xfa, 0x08, 0x3f, 0xa9, 0xd1, 0xfb, - 0xdf, 0x16, 0x3c, 0xbe, 0xe4, 0xfa, 0x2d, 0x12, 0xe6, 0x2c, 0xc0, 0x87, 0x23, 0x80, 0x1c, 0xef, - 0xa4, 0x37, 0x96, 0x58, 0xe9, 0x04, 0x96, 0x98, 0xfd, 0x47, 0x16, 0x20, 0xfe, 0xd9, 0x1f, 0xb9, - 0x8f, 0xbd, 0xd3, 0xfd, 0xb1, 0x27, 0xb0, 0x2c, 0xec, 0x5b, 0x30, 0xb9, 0xe4, 0xb9, 0xc4, 0x8f, - 0x6b, 0xf5, 0xa5, 0xc0, 0xdf, 0x74, 0xb7, 0xd0, 0x67, 0x60, 0x92, 0xca, 0xb4, 0x41, 0x27, 0x6e, - 0x90, 0x66, 0xe0, 0x33, 0xf6, 0x9f, 0x4a, 0x82, 0xe8, 0xf0, 0xa0, 0x32, 0xb9, 0x6e, 0x40, 0x70, - 0x0a, 0xd3, 0xfe, 0x1d, 0x3a, 0x7e, 0xc1, 0x6e, 0x3b, 0xf0, 0x89, 0x1f, 0x2f, 0x05, 0x7e, 0x8b, - 0x8b, 0x89, 0x9f, 0x81, 0x52, 0x4c, 0xc7, 0x83, 0x8f, 0xdd, 0x65, 0xb9, 0x51, 0xe8, 0x28, 0x1c, - 0x1d, 0x54, 0x1e, 0xe9, 0xae, 0xc1, 0xc6, 0x89, 0xd5, 0x41, 0xdf, 0x06, 0xc3, 0x51, 0xec, 0xc4, - 0x9d, 0x48, 0x8c, 0xe6, 0x13, 0x72, 0x34, 0x1b, 0xac, 0xf4, 0xe8, 0xa0, 0x32, 0xa5, 0xaa, 0xf1, - 0x22, 0x2c, 0x2a, 0xa0, 0xa7, 0x61, 0x64, 0x97, 0x44, 0x91, 0xb3, 0x25, 0x39, 0xfc, 0x29, 0x51, - 0x77, 0x64, 0x95, 0x17, 0x63, 0x09, 0x47, 0x4f, 0xc2, 0x10, 0x09, 0xc3, 0x20, 0x14, 0x7b, 0x74, - 0x42, 0x20, 0x0e, 0x2d, 0xd3, 0x42, 0xcc, 0x61, 0xf6, 0xbf, 0xb7, 0x60, 0x4a, 0xf5, 0x95, 0xb7, - 0x75, 0x0a, 0xac, 0xdc, 0xdb, 0x00, 0x4d, 0xf9, 0x81, 0x11, 0xbb, 0x3d, 0xc6, 0xae, 0x5d, 0xce, - 0x64, 0x50, 0xba, 0x86, 0x31, 0xa1, 0xac, 0x8a, 0x22, 0xac, 0x51, 0xb3, 0x7f, 0xd5, 0x82, 0x33, - 0xa9, 0x2f, 0xba, 0xe5, 0x46, 0x31, 0x7a, 0xa7, 0xeb, 0xab, 0xe6, 0x07, 0xfb, 0x2a, 0x5a, 0x9b, - 0x7d, 0x93, 0x5a, 0xca, 0xb2, 0x44, 0xfb, 0xa2, 0x1b, 0x30, 0xe4, 0xc6, 0x64, 0x57, 0x7e, 0xcc, - 0x93, 0x3d, 0x3f, 0x86, 0xf7, 0x2a, 0x99, 0x91, 0x1a, 0xad, 0x89, 0x39, 0x01, 0xfb, 0x87, 0x8a, - 0x50, 0xe6, 0xcb, 0x76, 0xd5, 0x69, 0x9f, 0xc2, 0x5c, 0xd4, 0xa0, 0xc4, 0xa8, 0xf3, 0x8e, 0x3f, - 0x95, 0xdd, 0x71, 0xd1, 0x9d, 0x79, 0x2a, 0xa7, 0x71, 0x56, 0x50, 0x5d, 0x0d, 0xb4, 0x08, 0x33, - 0x12, 0xc8, 0x01, 0xd8, 0x70, 0x7d, 0x27, 0xdc, 0xa7, 0x65, 0xb3, 0x45, 0x46, 0xf0, 0xb9, 0xde, - 0x04, 0x17, 0x15, 0x3e, 0x27, 0xab, 0xfa, 0x9a, 0x00, 0xb0, 0x46, 0x74, 0xee, 0x65, 0x28, 0x2b, - 0xe4, 0xe3, 0xf0, 0x38, 0x73, 0x9f, 0x85, 0xa9, 0x54, 0x5b, 0xfd, 0xaa, 0x8f, 0xeb, 0x2c, 0xd2, - 0x2f, 0xb3, 0x53, 0x40, 0xf4, 0x7a, 0xd9, 0xdf, 0x13, 0xa7, 0xe8, 0xfb, 0x70, 0xd6, 0xcb, 0x38, - 0x9c, 0xc4, 0x54, 0x0d, 0x7e, 0x98, 0x3d, 0x2e, 0x3e, 0xfb, 0x6c, 0x16, 0x14, 0x67, 0xb6, 0x41, - 0xaf, 0xfd, 0xa0, 0x4d, 0xd7, 0xbc, 0xe3, 0xb1, 0xfe, 0x0a, 0xe9, 0xfb, 0xb6, 0x28, 0xc3, 0x0a, - 0x4a, 0x8f, 0xb0, 0xb3, 0xaa, 0xf3, 0x37, 0xc9, 0x7e, 0x83, 0x78, 0xa4, 0x19, 0x07, 0xe1, 0x87, - 0xda, 0xfd, 0x0b, 0x7c, 0xf4, 0xf9, 0x09, 0x38, 0x26, 0x08, 0x14, 0x6f, 0x92, 0x7d, 0x3e, 0x15, - 0xfa, 0xd7, 0x15, 0x7b, 0x7e, 0xdd, 0xcf, 0x5a, 0x30, 0xa1, 0xbe, 0xee, 0x14, 0xb6, 0xfa, 0xa2, - 0xb9, 0xd5, 0x2f, 0xf4, 0x5c, 0xe0, 0x39, 0x9b, 0xfc, 0xeb, 0x05, 0x38, 0xaf, 0x70, 0x28, 0xbb, - 0xcf, 0xff, 0x88, 0x55, 0x75, 0x15, 0xca, 0xbe, 0xd2, 0x1e, 0x58, 0xa6, 0xd8, 0x9e, 0xe8, 0x0e, - 0x12, 0x1c, 0xca, 0xb5, 0xf9, 0x89, 0x88, 0x3f, 0xae, 0xab, 0xd5, 0x84, 0x0a, 0x6d, 0x11, 0x8a, - 0x1d, 0xb7, 0x25, 0xee, 0x8c, 0x4f, 0xc9, 0xd1, 0xbe, 0x53, 0xab, 0x1e, 0x1d, 0x54, 0x9e, 0xc8, - 0x53, 0xe9, 0xd2, 0xcb, 0x2a, 0x9a, 0xbf, 0x53, 0xab, 0x62, 0x5a, 0x19, 0x2d, 0xc0, 0x94, 0xd4, - 0x5a, 0xdf, 0xa5, 0x1c, 0x54, 0xe0, 0x8b, 0xab, 0x45, 0xe9, 0xc6, 0xb0, 0x09, 0xc6, 0x69, 0x7c, - 0x54, 0x85, 0xe9, 0x9d, 0xce, 0x06, 0xf1, 0x48, 0xcc, 0x3f, 0xf8, 0x26, 0xe1, 0x9a, 0xa3, 0x72, - 0x22, 0x5a, 0xde, 0x4c, 0xc1, 0x71, 0x57, 0x0d, 0xfb, 0xcf, 0xd8, 0x11, 0x2f, 0x46, 0xaf, 0x1e, - 0x06, 0x74, 0x61, 0x51, 0xea, 0x1f, 0xe6, 0x72, 0x1e, 0x64, 0x55, 0xdc, 0x24, 0xfb, 0xeb, 0x01, - 0x65, 0xb6, 0xb3, 0x57, 0x85, 0xb1, 0xe6, 0x4b, 0x3d, 0xd7, 0xfc, 0xcf, 0x17, 0xe0, 0x9c, 0x1a, - 0x01, 0x83, 0xaf, 0xfb, 0xf3, 0x3e, 0x06, 0xcf, 0xc3, 0x58, 0x8b, 0x6c, 0x3a, 0x1d, 0x2f, 0x56, - 0x6a, 0xcc, 0x21, 0xae, 0xca, 0xae, 0x26, 0xc5, 0x58, 0xc7, 0x39, 0xc6, 0xb0, 0xfd, 0xc4, 0x18, - 0xbb, 0x5b, 0x63, 0x87, 0xae, 0x71, 0xb5, 0x6b, 0xac, 0xdc, 0x5d, 0xf3, 0x24, 0x0c, 0xb9, 0xbb, - 0x94, 0xd7, 0x2a, 0x98, 0x2c, 0x54, 0x8d, 0x16, 0x62, 0x0e, 0x43, 0x9f, 0x80, 0x91, 0x66, 0xb0, - 0xbb, 0xeb, 0xf8, 0x2d, 0x76, 0xe5, 0x95, 0x17, 0xc7, 0x28, 0x3b, 0xb6, 0xc4, 0x8b, 0xb0, 0x84, - 0xa1, 0xc7, 0xa1, 0xe4, 0x84, 0x5b, 0xd1, 0x6c, 0x89, 0xe1, 0x8c, 0xd2, 0x96, 0x16, 0xc2, 0xad, - 0x08, 0xb3, 0x52, 0x2a, 0x55, 0xdd, 0x0b, 0xc2, 0x1d, 0xd7, 0xdf, 0xaa, 0xba, 0xa1, 0xd8, 0x12, - 0xea, 0x2e, 0x7c, 0x4b, 0x41, 0xb0, 0x86, 0x85, 0x56, 0x60, 0xa8, 0x1d, 0x84, 0x71, 0x34, 0x3b, - 0xcc, 0x86, 0xfb, 0x89, 0x9c, 0x83, 0x88, 0x7f, 0x6d, 0x3d, 0x08, 0xe3, 0xe4, 0x03, 0xe8, 0xbf, - 0x08, 0xf3, 0xea, 0xe8, 0xdb, 0xa0, 0x48, 0xfc, 0xbd, 0xd9, 0x11, 0x46, 0x65, 0x2e, 0x8b, 0xca, - 0xb2, 0xbf, 0x77, 0xd7, 0x09, 0x93, 0x53, 0x7a, 0xd9, 0xdf, 0xc3, 0xb4, 0x0e, 0xfa, 0x3c, 0x94, - 0xe5, 0x16, 0x8f, 0x84, 0x9a, 0x23, 0x73, 0x89, 0xc9, 0x83, 0x01, 0x93, 0xf7, 0x3a, 0x6e, 0x48, - 0x76, 0x89, 0x1f, 0x47, 0xc9, 0x99, 0x26, 0xa1, 0x11, 0x4e, 0xa8, 0xa1, 0xcf, 0x4b, 0xdd, 0xda, - 0x6a, 0xd0, 0xf1, 0xe3, 0x68, 0xb6, 0xcc, 0xba, 0x97, 0xf9, 0xea, 0x71, 0x37, 0xc1, 0x4b, 0x2b, - 0xdf, 0x78, 0x65, 0x6c, 0x90, 0x42, 0x18, 0x26, 0x3c, 0x77, 0x8f, 0xf8, 0x24, 0x8a, 0xea, 0x61, - 0xb0, 0x41, 0x66, 0x81, 0xf5, 0xfc, 0x7c, 0xf6, 0x63, 0x40, 0xb0, 0x41, 0x16, 0x67, 0x0e, 0x0f, - 0x2a, 0x13, 0xb7, 0xf4, 0x3a, 0xd8, 0x24, 0x81, 0xee, 0xc0, 0x24, 0x95, 0x6b, 0xdc, 0x84, 0xe8, - 0x58, 0x3f, 0xa2, 0x4c, 0xfa, 0xc0, 0x46, 0x25, 0x9c, 0x22, 0x82, 0xde, 0x80, 0xb2, 0xe7, 0x6e, - 0x92, 0xe6, 0x7e, 0xd3, 0x23, 0xb3, 0xe3, 0x8c, 0x62, 0xe6, 0xb6, 0xba, 0x25, 0x91, 0xb8, 0x5c, - 0xa4, 0xfe, 0xe2, 0xa4, 0x3a, 0xba, 0x0b, 0x8f, 0xc4, 0x24, 0xdc, 0x75, 0x7d, 0x87, 0x6e, 0x07, - 0x21, 0x2f, 0xb0, 0x27, 0x95, 0x09, 0xb6, 0xde, 0x2e, 0x8a, 0xa1, 0x7b, 0x64, 0x3d, 0x13, 0x0b, - 0xe7, 0xd4, 0x46, 0xb7, 0x61, 0x8a, 0xed, 0x84, 0x7a, 0xc7, 0xf3, 0xea, 0x81, 0xe7, 0x36, 0xf7, - 0x67, 0x27, 0x19, 0xc1, 0x4f, 0xc8, 0x7b, 0xa1, 0x66, 0x82, 0x8f, 0x0e, 0x2a, 0x90, 0xfc, 0xc3, - 0xe9, 0xda, 0x68, 0x83, 0xe9, 0xd0, 0x3b, 0xa1, 0x1b, 0xef, 0xd3, 0xf5, 0x4b, 0xee, 0xc7, 0xb3, - 0x53, 0x3d, 0x45, 0x61, 0x1d, 0x55, 0x29, 0xda, 0xf5, 0x42, 0x9c, 0x26, 0x48, 0xb7, 0x76, 0x14, - 0xb7, 0x5c, 0x7f, 0x76, 0x9a, 0x9d, 0x18, 0x6a, 0x67, 0x34, 0x68, 0x21, 0xe6, 0x30, 0xa6, 0x3f, - 0xa7, 0x3f, 0x6e, 0xd3, 0x13, 0x74, 0x86, 0x21, 0x26, 0xfa, 0x73, 0x09, 0xc0, 0x09, 0x0e, 0x65, - 0x6a, 0xe2, 0x78, 0x7f, 0x16, 0x31, 0x54, 0xb5, 0x5d, 0xd6, 0xd7, 0x3f, 0x8f, 0x69, 0x39, 0xba, - 0x05, 0x23, 0xc4, 0xdf, 0x5b, 0x09, 0x83, 0xdd, 0xd9, 0x33, 0xf9, 0x7b, 0x76, 0x99, 0xa3, 0xf0, - 0x03, 0x3d, 0x11, 0xf0, 0x44, 0x31, 0x96, 0x24, 0xd0, 0x7d, 0x98, 0xcd, 0x98, 0x11, 0x3e, 0x01, - 0x67, 0xd9, 0x04, 0xbc, 0x26, 0xea, 0xce, 0xae, 0xe7, 0xe0, 0x1d, 0xf5, 0x80, 0xe1, 0x5c, 0xea, - 0xe8, 0x0b, 0x30, 0xc1, 0x37, 0x14, 0x7f, 0x7c, 0x8b, 0x66, 0xcf, 0xb1, 0xaf, 0xb9, 0x94, 0xbf, - 0x39, 0x39, 0xe2, 0xe2, 0x39, 0xd1, 0xa1, 0x09, 0xbd, 0x34, 0xc2, 0x26, 0x35, 0x7b, 0x03, 0x26, - 0xd5, 0xb9, 0xc5, 0x96, 0x0e, 0xaa, 0xc0, 0x10, 0xe3, 0x76, 0x84, 0x7e, 0xab, 0x4c, 0x67, 0x8a, - 0x71, 0x42, 0x98, 0x97, 0xb3, 0x99, 0x72, 0xdf, 0x27, 0x8b, 0xfb, 0x31, 0xe1, 0x52, 0x75, 0x51, - 0x9b, 0x29, 0x09, 0xc0, 0x09, 0x8e, 0xfd, 0xff, 0x38, 0xd7, 0x98, 0x1c, 0x8e, 0x03, 0x5c, 0x07, - 0xcf, 0xc2, 0xe8, 0x76, 0x10, 0xc5, 0x14, 0x9b, 0xb5, 0x31, 0x94, 0xf0, 0x89, 0x37, 0x44, 0x39, - 0x56, 0x18, 0xe8, 0x55, 0x98, 0x68, 0xea, 0x0d, 0x88, 0xbb, 0x4c, 0x0d, 0x81, 0xd1, 0x3a, 0x36, - 0x71, 0xd1, 0x2b, 0x30, 0xca, 0x9e, 0xce, 0x9b, 0x81, 0x27, 0x98, 0x2c, 0x79, 0x21, 0x8f, 0xd6, - 0x45, 0xf9, 0x91, 0xf6, 0x1b, 0x2b, 0x6c, 0x74, 0x19, 0x86, 0x69, 0x17, 0x6a, 0x75, 0x71, 0x8b, - 0x28, 0x55, 0xcd, 0x0d, 0x56, 0x8a, 0x05, 0xd4, 0xfe, 0x5b, 0x05, 0x6d, 0x94, 0xa9, 0x44, 0x4a, - 0x50, 0x1d, 0x46, 0xee, 0x39, 0x6e, 0xec, 0xfa, 0x5b, 0x82, 0x5d, 0x78, 0xba, 0xe7, 0x95, 0xc2, - 0x2a, 0xbd, 0xc5, 0x2b, 0xf0, 0x4b, 0x4f, 0xfc, 0xc1, 0x92, 0x0c, 0xa5, 0x18, 0x76, 0x7c, 0x9f, - 0x52, 0x2c, 0x0c, 0x4a, 0x11, 0xf3, 0x0a, 0x9c, 0xa2, 0xf8, 0x83, 0x25, 0x19, 0xf4, 0x0e, 0x80, - 0x5c, 0x96, 0xa4, 0x25, 0x9e, 0xac, 0x9f, 0xed, 0x4f, 0x74, 0x5d, 0xd5, 0x59, 0x9c, 0xa4, 0x57, - 0x6a, 0xf2, 0x1f, 0x6b, 0xf4, 0xec, 0x98, 0xb1, 0x55, 0xdd, 0x9d, 0x41, 0xdf, 0x49, 0x4f, 0x02, - 0x27, 0x8c, 0x49, 0x6b, 0x21, 0x16, 0x83, 0xf3, 0xc9, 0xc1, 0x64, 0x8a, 0x75, 0x77, 0x97, 0xe8, - 0xa7, 0x86, 0x20, 0x82, 0x13, 0x7a, 0xf6, 0x2f, 0x16, 0x61, 0x36, 0xaf, 0xbb, 0x74, 0xd1, 0x91, - 0xfb, 0x6e, 0xbc, 0x44, 0xb9, 0x21, 0xcb, 0x5c, 0x74, 0xcb, 0xa2, 0x1c, 0x2b, 0x0c, 0x3a, 0xfb, - 0x91, 0xbb, 0x25, 0x45, 0xc2, 0xa1, 0x64, 0xf6, 0x1b, 0xac, 0x14, 0x0b, 0x28, 0xc5, 0x0b, 0x89, - 0x13, 0x09, 0x9b, 0x08, 0x6d, 0x95, 0x60, 0x56, 0x8a, 0x05, 0x54, 0xd7, 0x37, 0x95, 0xfa, 0xe8, - 0x9b, 0x8c, 0x21, 0x1a, 0x3a, 0xd9, 0x21, 0x42, 0x5f, 0x04, 0xd8, 0x74, 0x7d, 0x37, 0xda, 0x66, - 0xd4, 0x87, 0x8f, 0x4d, 0x5d, 0xf1, 0x52, 0x2b, 0x8a, 0x0a, 0xd6, 0x28, 0xa2, 0x97, 0x60, 0x4c, - 0x6d, 0xc0, 0x5a, 0x95, 0x3d, 0x10, 0x69, 0x0f, 0xee, 0xc9, 0x69, 0x54, 0xc5, 0x3a, 0x9e, 0xfd, - 0x6e, 0x7a, 0xbd, 0x88, 0x1d, 0xa0, 0x8d, 0xaf, 0x35, 0xe8, 0xf8, 0x16, 0x7a, 0x8f, 0xaf, 0xfd, - 0xeb, 0x45, 0x98, 0x32, 0x1a, 0xeb, 0x44, 0x03, 0x9c, 0x59, 0xd7, 0xe9, 0x3d, 0xe7, 0xc4, 0x44, - 0xec, 0x3f, 0xbb, 0xff, 0x56, 0xd1, 0xef, 0x42, 0xba, 0x03, 0x78, 0x7d, 0xf4, 0x45, 0x28, 0x7b, - 0x4e, 0xc4, 0x74, 0x57, 0x44, 0xec, 0xbb, 0x41, 0x88, 0x25, 0x72, 0x84, 0x13, 0xc5, 0xda, 0x55, - 0xc3, 0x69, 0x27, 0x24, 0xe9, 0x85, 0x4c, 0x79, 0x1f, 0x69, 0x74, 0xa3, 0x3a, 0x41, 0x19, 0xa4, - 0x7d, 0xcc, 0x61, 0xe8, 0x15, 0x18, 0x0f, 0x09, 0x5b, 0x15, 0x4b, 0x94, 0x95, 0x63, 0xcb, 0x6c, - 0x28, 0xe1, 0xf9, 0xb0, 0x06, 0xc3, 0x06, 0x66, 0xc2, 0xca, 0x0f, 0xf7, 0x60, 0xe5, 0x9f, 0x86, - 0x11, 0xf6, 0x43, 0xad, 0x00, 0x35, 0x1b, 0x35, 0x5e, 0x8c, 0x25, 0x3c, 0xbd, 0x60, 0x46, 0x07, - 0x5c, 0x30, 0x9f, 0x84, 0xc9, 0xaa, 0x43, 0x76, 0x03, 0x7f, 0xd9, 0x6f, 0xb5, 0x03, 0xd7, 0x8f, - 0xd1, 0x2c, 0x94, 0xd8, 0xed, 0xc0, 0xf7, 0x76, 0x89, 0x52, 0xc0, 0x25, 0xca, 0x98, 0xdb, 0x5b, - 0x70, 0xae, 0x1a, 0xdc, 0xf3, 0xef, 0x39, 0x61, 0x6b, 0xa1, 0x5e, 0xd3, 0xe4, 0xdc, 0x35, 0x29, - 0x67, 0x71, 0x23, 0x96, 0xcc, 0x33, 0x55, 0xab, 0xc9, 0xef, 0xda, 0x15, 0xd7, 0x23, 0x39, 0xda, - 0x88, 0xbf, 0x53, 0x30, 0x5a, 0x4a, 0xf0, 0xd5, 0x83, 0x91, 0x95, 0xfb, 0x60, 0xf4, 0x26, 0x8c, - 0x6e, 0xba, 0xc4, 0x6b, 0x61, 0xb2, 0x29, 0x96, 0xd8, 0x53, 0xf9, 0xef, 0xf2, 0x2b, 0x14, 0x53, - 0x6a, 0x9f, 0xb8, 0x94, 0xb6, 0x22, 0x2a, 0x63, 0x45, 0x06, 0xed, 0xc0, 0xb4, 0x14, 0x03, 0x24, - 0x54, 0x2c, 0xb8, 0xa7, 0x7b, 0xc9, 0x16, 0x26, 0xf1, 0xb3, 0x87, 0x07, 0x95, 0x69, 0x9c, 0x22, - 0x83, 0xbb, 0x08, 0x53, 0xb1, 0x6c, 0x97, 0x1e, 0xad, 0x25, 0x36, 0xfc, 0x4c, 0x2c, 0x63, 0x12, - 0x26, 0x2b, 0xb5, 0x7f, 0xd4, 0x82, 0x47, 0xbb, 0x46, 0x46, 0x48, 0xda, 0x27, 0x3c, 0x0b, 0x69, - 0xc9, 0xb7, 0xd0, 0x5f, 0xf2, 0xb5, 0xff, 0xb1, 0x05, 0x67, 0x97, 0x77, 0xdb, 0xf1, 0x7e, 0xd5, - 0x35, 0x5f, 0x77, 0x5e, 0x86, 0xe1, 0x5d, 0xd2, 0x72, 0x3b, 0xbb, 0x62, 0xe6, 0x2a, 0xf2, 0xf8, - 0x59, 0x65, 0xa5, 0x47, 0x07, 0x95, 0x89, 0x46, 0x1c, 0x84, 0xce, 0x16, 0xe1, 0x05, 0x58, 0xa0, - 0xb3, 0x43, 0xdc, 0x7d, 0x9f, 0xdc, 0x72, 0x77, 0x5d, 0x69, 0x67, 0xd1, 0x53, 0x77, 0x36, 0x2f, - 0x07, 0x74, 0xfe, 0xcd, 0x8e, 0xe3, 0xc7, 0x6e, 0xbc, 0x2f, 0x1e, 0x66, 0x24, 0x11, 0x9c, 0xd0, - 0xb3, 0xbf, 0x69, 0xc1, 0x94, 0x5c, 0xf7, 0x0b, 0xad, 0x56, 0x48, 0xa2, 0x08, 0xcd, 0x41, 0xc1, - 0x6d, 0x8b, 0x5e, 0x82, 0xe8, 0x65, 0xa1, 0x56, 0xc7, 0x05, 0xb7, 0x8d, 0xea, 0x50, 0xe6, 0xe6, - 0x1a, 0xc9, 0xe2, 0x1a, 0xc8, 0xe8, 0x83, 0xf5, 0x60, 0x5d, 0xd6, 0xc4, 0x09, 0x11, 0xc9, 0xc1, - 0xb1, 0x33, 0xb3, 0x68, 0xbe, 0x7a, 0xdd, 0x10, 0xe5, 0x58, 0x61, 0xa0, 0x2b, 0x30, 0xea, 0x07, - 0x2d, 0x6e, 0x3d, 0xc3, 0x6f, 0x3f, 0xb6, 0x64, 0xd7, 0x44, 0x19, 0x56, 0x50, 0xfb, 0x07, 0x2d, - 0x18, 0x97, 0x5f, 0x36, 0x20, 0x33, 0x49, 0xb7, 0x56, 0xc2, 0x48, 0x26, 0x5b, 0x8b, 0x32, 0x83, - 0x0c, 0x62, 0xf0, 0x80, 0xc5, 0xe3, 0xf0, 0x80, 0xf6, 0x8f, 0x14, 0x60, 0x52, 0x76, 0xa7, 0xd1, - 0xd9, 0x88, 0x48, 0x8c, 0xd6, 0xa1, 0xec, 0xf0, 0x21, 0x27, 0x72, 0xc5, 0x3e, 0x99, 0x2d, 0x7c, - 0x18, 0xf3, 0x93, 0x5c, 0xcb, 0x0b, 0xb2, 0x36, 0x4e, 0x08, 0x21, 0x0f, 0x66, 0xfc, 0x20, 0x66, - 0x47, 0xb4, 0x82, 0xf7, 0x7a, 0x02, 0x49, 0x53, 0x3f, 0x2f, 0xa8, 0xcf, 0xac, 0xa5, 0xa9, 0xe0, - 0x6e, 0xc2, 0x68, 0x59, 0x2a, 0x3c, 0x8a, 0xf9, 0xe2, 0x86, 0x3e, 0x0b, 0xd9, 0xfa, 0x0e, 0xfb, - 0x57, 0x2c, 0x28, 0x4b, 0xb4, 0xd3, 0x78, 0xed, 0x5a, 0x85, 0x91, 0x88, 0x4d, 0x82, 0x1c, 0x1a, - 0xbb, 0x57, 0xc7, 0xf9, 0x7c, 0x25, 0x37, 0x0f, 0xff, 0x1f, 0x61, 0x49, 0x83, 0xe9, 0xbb, 0x55, - 0xf7, 0x3f, 0x22, 0xfa, 0x6e, 0xd5, 0x9f, 0x9c, 0x1b, 0xe6, 0xbf, 0xb3, 0x3e, 0x6b, 0x62, 0x2d, - 0x65, 0x90, 0xda, 0x21, 0xd9, 0x74, 0xef, 0xa7, 0x19, 0xa4, 0x3a, 0x2b, 0xc5, 0x02, 0x8a, 0xde, - 0x81, 0xf1, 0xa6, 0x54, 0x74, 0x26, 0xc7, 0xc0, 0xe5, 0x9e, 0x4a, 0x77, 0xf5, 0x3e, 0xc3, 0x2d, - 0x6b, 0x97, 0xb4, 0xfa, 0xd8, 0xa0, 0x66, 0x3e, 0xb7, 0x17, 0xfb, 0x3d, 0xb7, 0x27, 0x74, 0xf3, - 0x1f, 0x9f, 0x7f, 0xcc, 0x82, 0x61, 0xae, 0x2e, 0x1b, 0x4c, 0xbf, 0xa8, 0x3d, 0x57, 0x25, 0x63, - 0x77, 0x97, 0x16, 0x8a, 0xe7, 0x27, 0xb4, 0x0a, 0x65, 0xf6, 0x83, 0xa9, 0x0d, 0x8a, 0xf9, 0x26, - 0xc5, 0xbc, 0x55, 0xbd, 0x83, 0x77, 0x65, 0x35, 0x9c, 0x50, 0xb0, 0xbf, 0x56, 0xa4, 0x47, 0x55, - 0x82, 0x6a, 0xdc, 0xe0, 0xd6, 0xc3, 0xbb, 0xc1, 0x0b, 0x0f, 0xeb, 0x06, 0xdf, 0x82, 0xa9, 0xa6, - 0xf6, 0xb8, 0x95, 0xcc, 0xe4, 0x95, 0x9e, 0x8b, 0x44, 0x7b, 0x07, 0xe3, 0x2a, 0xa3, 0x25, 0x93, - 0x08, 0x4e, 0x53, 0x45, 0xdf, 0x09, 0xe3, 0x7c, 0x9e, 0x45, 0x2b, 0xdc, 0x62, 0xe1, 0x13, 0xf9, - 0xeb, 0x45, 0x6f, 0x82, 0xad, 0xc4, 0x86, 0x56, 0x1d, 0x1b, 0xc4, 0xec, 0x5f, 0x1c, 0x85, 0xa1, - 0xe5, 0x3d, 0xe2, 0xc7, 0xa7, 0x70, 0x20, 0x35, 0x61, 0xd2, 0xf5, 0xf7, 0x02, 0x6f, 0x8f, 0xb4, - 0x38, 0xfc, 0x38, 0x97, 0xeb, 0x23, 0x82, 0xf4, 0x64, 0xcd, 0x20, 0x81, 0x53, 0x24, 0x1f, 0x86, - 0x84, 0x79, 0x1d, 0x86, 0xf9, 0xdc, 0x0b, 0xf1, 0x32, 0x53, 0x19, 0xcc, 0x06, 0x51, 0xec, 0x82, - 0x44, 0xfa, 0xe5, 0xda, 0x67, 0x51, 0x1d, 0xbd, 0x0b, 0x93, 0x9b, 0x6e, 0x18, 0xc5, 0x54, 0x34, - 0x8c, 0x62, 0x67, 0xb7, 0xfd, 0x00, 0x12, 0xa5, 0x1a, 0x87, 0x15, 0x83, 0x12, 0x4e, 0x51, 0x46, - 0x5b, 0x30, 0x41, 0x85, 0x9c, 0xa4, 0xa9, 0x91, 0x63, 0x37, 0xa5, 0x54, 0x46, 0xb7, 0x74, 0x42, - 0xd8, 0xa4, 0x4b, 0x0f, 0x93, 0x26, 0x13, 0x8a, 0x46, 0x19, 0x47, 0xa1, 0x0e, 0x13, 0x2e, 0x0d, - 0x71, 0x18, 0x3d, 0x93, 0x98, 0xd9, 0x4a, 0xd9, 0x3c, 0x93, 0x34, 0xe3, 0x94, 0x2f, 0x41, 0x99, - 0xd0, 0x21, 0xa4, 0x84, 0x85, 0x62, 0xfc, 0xea, 0x60, 0x7d, 0x5d, 0x75, 0x9b, 0x61, 0x60, 0xca, - 0xf2, 0xcb, 0x92, 0x12, 0x4e, 0x88, 0xa2, 0x25, 0x18, 0x8e, 0x48, 0xe8, 0x92, 0x48, 0xa8, 0xc8, - 0x7b, 0x4c, 0x23, 0x43, 0xe3, 0xb6, 0xe7, 0xfc, 0x37, 0x16, 0x55, 0xe9, 0xf2, 0x72, 0x98, 0x34, - 0xc4, 0xb4, 0xe2, 0xda, 0xf2, 0x5a, 0x60, 0xa5, 0x58, 0x40, 0xd1, 0x1b, 0x30, 0x12, 0x12, 0x8f, - 0x29, 0x8b, 0x26, 0x06, 0x5f, 0xe4, 0x5c, 0xf7, 0xc4, 0xeb, 0x61, 0x49, 0x00, 0xdd, 0x04, 0x14, - 0x12, 0xca, 0x43, 0xb8, 0xfe, 0x96, 0x32, 0xe6, 0x10, 0xba, 0xee, 0xc7, 0x44, 0xfb, 0x67, 0x70, - 0x82, 0x21, 0xad, 0x52, 0x71, 0x46, 0x35, 0x74, 0x1d, 0x66, 0x54, 0x69, 0xcd, 0x8f, 0x62, 0xc7, - 0x6f, 0x12, 0xa6, 0xe6, 0x2e, 0x27, 0x5c, 0x11, 0x4e, 0x23, 0xe0, 0xee, 0x3a, 0xf6, 0x4f, 0x53, - 0x76, 0x86, 0x8e, 0xd6, 0x29, 0xf0, 0x02, 0xaf, 0x9b, 0xbc, 0xc0, 0xf9, 0xdc, 0x99, 0xcb, 0xe1, - 0x03, 0x0e, 0x2d, 0x18, 0xd3, 0x66, 0x36, 0x59, 0xb3, 0x56, 0x8f, 0x35, 0xdb, 0x81, 0x69, 0xba, - 0xd2, 0x6f, 0x6f, 0x44, 0x24, 0xdc, 0x23, 0x2d, 0xb6, 0x30, 0x0b, 0x0f, 0xb6, 0x30, 0xd5, 0x2b, - 0xf3, 0xad, 0x14, 0x41, 0xdc, 0xd5, 0x04, 0x7a, 0x59, 0x6a, 0x4e, 0x8a, 0x86, 0x91, 0x16, 0xd7, - 0x8a, 0x1c, 0x1d, 0x54, 0xa6, 0xb5, 0x0f, 0xd1, 0x35, 0x25, 0xf6, 0x97, 0xe4, 0x37, 0xaa, 0xd7, - 0xfc, 0xa6, 0x5a, 0x2c, 0xa9, 0xd7, 0x7c, 0xb5, 0x1c, 0x70, 0x82, 0x43, 0xf7, 0x28, 0x15, 0x41, - 0xd2, 0xaf, 0xf9, 0x54, 0x40, 0xc1, 0x0c, 0x62, 0xbf, 0x00, 0xb0, 0x7c, 0x9f, 0x34, 0xf9, 0x52, - 0xd7, 0x1f, 0x20, 0xad, 0xfc, 0x07, 0x48, 0xfb, 0x3f, 0x5a, 0x30, 0xb9, 0xb2, 0x64, 0x88, 0x89, - 0xf3, 0x00, 0x5c, 0x36, 0x7a, 0xeb, 0xad, 0x35, 0xa9, 0x5b, 0xe7, 0xea, 0x51, 0x55, 0x8a, 0x35, - 0x0c, 0x74, 0x1e, 0x8a, 0x5e, 0xc7, 0x17, 0x22, 0xcb, 0xc8, 0xe1, 0x41, 0xa5, 0x78, 0xab, 0xe3, - 0x63, 0x5a, 0xa6, 0x59, 0x08, 0x16, 0x07, 0xb6, 0x10, 0xec, 0xeb, 0x5e, 0x85, 0x2a, 0x30, 0x74, - 0xef, 0x9e, 0xdb, 0xe2, 0x46, 0xec, 0x42, 0xef, 0xff, 0xd6, 0x5b, 0xb5, 0x6a, 0x84, 0x79, 0xb9, - 0xfd, 0xd5, 0x22, 0xcc, 0xad, 0x78, 0xe4, 0xfe, 0x07, 0x34, 0xe4, 0x1f, 0xd4, 0xbe, 0xf1, 0x78, - 0xfc, 0xe2, 0x71, 0x6d, 0x58, 0xfb, 0x8f, 0xc7, 0x26, 0x8c, 0xf0, 0xc7, 0x6c, 0x69, 0xd6, 0xff, - 0x6a, 0x56, 0xeb, 0xf9, 0x03, 0x32, 0xcf, 0x1f, 0xc5, 0x85, 0x39, 0xbf, 0xba, 0x69, 0x45, 0x29, - 0x96, 0xc4, 0xe7, 0x3e, 0x03, 0xe3, 0x3a, 0xe6, 0xb1, 0xac, 0xc9, 0xff, 0x4a, 0x11, 0xa6, 0x69, - 0x0f, 0x1e, 0xea, 0x44, 0xdc, 0xe9, 0x9e, 0x88, 0x93, 0xb6, 0x28, 0xee, 0x3f, 0x1b, 0xef, 0xa4, - 0x67, 0xe3, 0xf9, 0xbc, 0xd9, 0x38, 0xed, 0x39, 0xf8, 0x5e, 0x0b, 0xce, 0xac, 0x78, 0x41, 0x73, - 0x27, 0x65, 0xf5, 0xfb, 0x12, 0x8c, 0xd1, 0x73, 0x3c, 0x32, 0xbc, 0x88, 0x0c, 0xbf, 0x32, 0x01, - 0xc2, 0x3a, 0x9e, 0x56, 0xed, 0xce, 0x9d, 0x5a, 0x35, 0xcb, 0x1d, 0x4d, 0x80, 0xb0, 0x8e, 0x67, - 0x7f, 0xc3, 0x82, 0x0b, 0xd7, 0x97, 0x96, 0x93, 0xa5, 0xd8, 0xe5, 0x11, 0x47, 0xa5, 0xc0, 0x96, - 0xd6, 0x95, 0x44, 0x0a, 0xac, 0xb2, 0x5e, 0x08, 0xe8, 0x47, 0xc5, 0xdb, 0xf3, 0xa7, 0x2c, 0x38, - 0x73, 0xdd, 0x8d, 0xe9, 0xb5, 0x9c, 0xf6, 0xcd, 0xa2, 0xf7, 0x72, 0xe4, 0xc6, 0x41, 0xb8, 0x9f, - 0xf6, 0xcd, 0xc2, 0x0a, 0x82, 0x35, 0x2c, 0xde, 0xf2, 0x9e, 0xcb, 0xcc, 0xa8, 0x0a, 0xa6, 0x2a, - 0x0a, 0x8b, 0x72, 0xac, 0x30, 0xe8, 0x87, 0xb5, 0xdc, 0x90, 0x89, 0x12, 0xfb, 0xe2, 0x84, 0x55, - 0x1f, 0x56, 0x95, 0x00, 0x9c, 0xe0, 0xd8, 0x7f, 0x68, 0x41, 0xe5, 0xba, 0xd7, 0x89, 0x62, 0x12, - 0x6e, 0x46, 0x39, 0xa7, 0xe3, 0x0b, 0x50, 0x26, 0x52, 0x70, 0x17, 0xbd, 0x56, 0xac, 0xa6, 0x92, - 0xe8, 0xb9, 0x8b, 0x98, 0xc2, 0x1b, 0xc0, 0x87, 0xe0, 0x78, 0x46, 0xe0, 0x2b, 0x80, 0x88, 0xde, - 0x96, 0xee, 0x33, 0xc7, 0x9c, 0x6f, 0x96, 0xbb, 0xa0, 0x38, 0xa3, 0x86, 0xfd, 0xa3, 0x16, 0x9c, - 0x53, 0x1f, 0xfc, 0x91, 0xfb, 0x4c, 0xfb, 0xe7, 0x0a, 0x30, 0x71, 0x63, 0x7d, 0xbd, 0x7e, 0x9d, - 0xc4, 0xe2, 0xda, 0xee, 0xaf, 0x5b, 0xc7, 0x9a, 0x8a, 0xb0, 0x97, 0x14, 0xd8, 0x89, 0x5d, 0x6f, - 0x9e, 0xbb, 0x5e, 0xcf, 0xd7, 0xfc, 0xf8, 0x76, 0xd8, 0x88, 0x43, 0xd7, 0xdf, 0xca, 0x54, 0x2a, - 0x4a, 0xe6, 0xa2, 0x98, 0xc7, 0x5c, 0xa0, 0x17, 0x60, 0x98, 0xf9, 0x7e, 0xcb, 0x49, 0x78, 0x4c, - 0x09, 0x51, 0xac, 0xf4, 0xe8, 0xa0, 0x52, 0xbe, 0x83, 0x6b, 0xfc, 0x0f, 0x16, 0xa8, 0xe8, 0x0e, - 0x8c, 0x6d, 0xc7, 0x71, 0xfb, 0x06, 0x71, 0x5a, 0x24, 0x94, 0xc7, 0xe1, 0xc5, 0xac, 0xe3, 0x90, - 0x0e, 0x02, 0x47, 0x4b, 0x4e, 0x90, 0xa4, 0x2c, 0xc2, 0x3a, 0x1d, 0xbb, 0x01, 0x90, 0xc0, 0x4e, - 0x48, 0xa1, 0x62, 0xff, 0xbe, 0x05, 0x23, 0xdc, 0x0d, 0x2f, 0x44, 0xaf, 0x41, 0x89, 0xdc, 0x27, - 0x4d, 0xc1, 0x2a, 0x67, 0x76, 0x38, 0xe1, 0xb4, 0xf8, 0xf3, 0x00, 0xfd, 0x8f, 0x59, 0x2d, 0x74, - 0x03, 0x46, 0x68, 0x6f, 0xaf, 0x2b, 0x9f, 0xc4, 0x27, 0xf2, 0xbe, 0x58, 0x4d, 0x3b, 0x67, 0xce, - 0x44, 0x11, 0x96, 0xd5, 0x99, 0xaa, 0xbb, 0xd9, 0x6e, 0xd0, 0x13, 0x3b, 0xee, 0xc5, 0x58, 0xac, - 0x2f, 0xd5, 0x39, 0x92, 0xa0, 0xc6, 0x55, 0xdd, 0xb2, 0x10, 0x27, 0x44, 0xec, 0x75, 0x28, 0xd3, - 0x49, 0x5d, 0xf0, 0x5c, 0xa7, 0xb7, 0x96, 0xfd, 0x19, 0x28, 0x4b, 0x8d, 0x77, 0x24, 0x3c, 0xb9, - 0x18, 0x55, 0xa9, 0x10, 0x8f, 0x70, 0x02, 0xb7, 0x37, 0xe1, 0x2c, 0x33, 0x75, 0x70, 0xe2, 0x6d, - 0x63, 0x8f, 0xf5, 0x5f, 0xcc, 0xcf, 0x0a, 0xc9, 0x93, 0xcf, 0xcc, 0xac, 0xe6, 0x2c, 0x31, 0x2e, - 0x29, 0x26, 0x52, 0xa8, 0xfd, 0x07, 0x25, 0x78, 0xac, 0xd6, 0xc8, 0xf7, 0xd0, 0x7c, 0x05, 0xc6, - 0x39, 0x5f, 0x4a, 0x97, 0xb6, 0xe3, 0x89, 0x76, 0xd5, 0x43, 0xe0, 0xba, 0x06, 0xc3, 0x06, 0x26, - 0xba, 0x00, 0x45, 0xf7, 0x3d, 0x3f, 0x6d, 0x77, 0x5c, 0x7b, 0x73, 0x0d, 0xd3, 0x72, 0x0a, 0xa6, - 0x2c, 0x2e, 0xbf, 0x3b, 0x14, 0x58, 0xb1, 0xb9, 0xaf, 0xc3, 0xa4, 0x1b, 0x35, 0x23, 0xb7, 0xe6, - 0xd3, 0x73, 0x46, 0x3b, 0xa9, 0x94, 0x56, 0x84, 0x76, 0x5a, 0x41, 0x71, 0x0a, 0x5b, 0xbb, 0xc8, - 0x86, 0x06, 0x66, 0x93, 0xfb, 0xba, 0x36, 0x51, 0x09, 0xa0, 0xcd, 0xbe, 0x2e, 0x62, 0x56, 0x7c, - 0x42, 0x02, 0xe0, 0x1f, 0x1c, 0x61, 0x09, 0xa3, 0x22, 0x67, 0x73, 0xdb, 0x69, 0x2f, 0x74, 0xe2, - 0xed, 0xaa, 0x1b, 0x35, 0x83, 0x3d, 0x12, 0xee, 0x33, 0x6d, 0xc1, 0x68, 0x22, 0x72, 0x2a, 0xc0, - 0xd2, 0x8d, 0x85, 0x3a, 0xc5, 0xc4, 0xdd, 0x75, 0x4c, 0x36, 0x18, 0x4e, 0x82, 0x0d, 0x5e, 0x80, - 0x29, 0xd9, 0x4c, 0x83, 0x44, 0xec, 0x52, 0x1c, 0x63, 0x1d, 0x53, 0xb6, 0xc5, 0xa2, 0x58, 0x75, - 0x2b, 0x8d, 0x8f, 0x5e, 0x86, 0x09, 0xd7, 0x77, 0x63, 0xd7, 0x89, 0x83, 0x90, 0xb1, 0x14, 0x5c, - 0x31, 0xc0, 0x4c, 0xf7, 0x6a, 0x3a, 0x00, 0x9b, 0x78, 0xf6, 0x7f, 0x2d, 0xc1, 0x0c, 0x9b, 0xb6, - 0x6f, 0xad, 0xb0, 0x8f, 0xcc, 0x0a, 0xbb, 0xd3, 0xbd, 0xc2, 0x4e, 0x82, 0xbf, 0xff, 0x30, 0x97, - 0xd9, 0xbb, 0x50, 0x56, 0xc6, 0xcf, 0xd2, 0xfb, 0xc1, 0xca, 0xf1, 0x7e, 0xe8, 0xcf, 0x7d, 0xc8, - 0x77, 0xeb, 0x62, 0xe6, 0xbb, 0xf5, 0xdf, 0xb3, 0x20, 0xb1, 0x01, 0x45, 0x37, 0xa0, 0xdc, 0x0e, - 0x98, 0x9d, 0x45, 0x28, 0x8d, 0x97, 0x1e, 0xcb, 0xbc, 0xa8, 0xf8, 0xa5, 0xc8, 0xc7, 0xaf, 0x2e, - 0x6b, 0xe0, 0xa4, 0x32, 0x5a, 0x84, 0x91, 0x76, 0x48, 0x1a, 0x31, 0xf3, 0xf9, 0xed, 0x4b, 0x87, - 0xaf, 0x11, 0x8e, 0x8f, 0x65, 0x45, 0xfb, 0xe7, 0x2d, 0x00, 0xfe, 0x34, 0xec, 0xf8, 0x5b, 0xe4, - 0x14, 0xd4, 0xdd, 0x55, 0x28, 0x45, 0x6d, 0xd2, 0xec, 0x65, 0x01, 0x93, 0xf4, 0xa7, 0xd1, 0x26, - 0xcd, 0x64, 0xc0, 0xe9, 0x3f, 0xcc, 0x6a, 0xdb, 0xdf, 0x07, 0x30, 0x99, 0xa0, 0xd5, 0x62, 0xb2, - 0x8b, 0x9e, 0x33, 0x7c, 0x00, 0xcf, 0xa7, 0x7c, 0x00, 0xcb, 0x0c, 0x5b, 0xd3, 0xac, 0xbe, 0x0b, - 0xc5, 0x5d, 0xe7, 0xbe, 0x50, 0x9d, 0x3d, 0xd3, 0xbb, 0x1b, 0x94, 0xfe, 0xfc, 0xaa, 0x73, 0x9f, - 0x0b, 0x89, 0xcf, 0xc8, 0x05, 0xb2, 0xea, 0xdc, 0x3f, 0xe2, 0x76, 0x2e, 0xec, 0x90, 0xba, 0xe5, - 0x46, 0xf1, 0x97, 0xff, 0x4b, 0xf2, 0x9f, 0x2d, 0x3b, 0xda, 0x08, 0x6b, 0xcb, 0xf5, 0xc5, 0x43, - 0xe9, 0x40, 0x6d, 0xb9, 0x7e, 0xba, 0x2d, 0xd7, 0x1f, 0xa0, 0x2d, 0xd7, 0x47, 0xef, 0xc3, 0x88, - 0x30, 0x4a, 0x60, 0xc6, 0xed, 0xa6, 0x5a, 0x2e, 0xaf, 0x3d, 0x61, 0xd3, 0xc0, 0xdb, 0xbc, 0x2a, - 0x85, 0x60, 0x51, 0xda, 0xb7, 0x5d, 0xd9, 0x20, 0xfa, 0xdb, 0x16, 0x4c, 0x8a, 0xdf, 0x98, 0xbc, - 0xd7, 0x21, 0x51, 0x2c, 0x78, 0xcf, 0x4f, 0x0f, 0xde, 0x07, 0x51, 0x91, 0x77, 0xe5, 0xd3, 0xf2, - 0x98, 0x35, 0x81, 0x7d, 0x7b, 0x94, 0xea, 0x05, 0xfa, 0xa7, 0x16, 0x9c, 0xdd, 0x75, 0xee, 0xf3, - 0x16, 0x79, 0x19, 0x76, 0x62, 0x37, 0x10, 0xc6, 0xfa, 0xaf, 0x0d, 0x36, 0xfd, 0x5d, 0xd5, 0x79, - 0x27, 0xa5, 0x5d, 0xef, 0xd9, 0x2c, 0x94, 0xbe, 0x5d, 0xcd, 0xec, 0xd7, 0xdc, 0x26, 0x8c, 0xca, - 0xf5, 0x96, 0xa1, 0x6a, 0xa8, 0xea, 0x8c, 0xf5, 0xb1, 0x6d, 0x42, 0x74, 0x47, 0x3c, 0xda, 0x8e, - 0x58, 0x6b, 0x0f, 0xb5, 0x9d, 0x77, 0x61, 0x5c, 0x5f, 0x63, 0x0f, 0xb5, 0xad, 0xf7, 0xe0, 0x4c, - 0xc6, 0x5a, 0x7a, 0xa8, 0x4d, 0xde, 0x83, 0xf3, 0xb9, 0xeb, 0xe3, 0x61, 0x36, 0x6c, 0xff, 0x9c, - 0xa5, 0x9f, 0x83, 0xa7, 0xf0, 0xe6, 0xb0, 0x64, 0xbe, 0x39, 0x5c, 0xec, 0xbd, 0x73, 0x72, 0x1e, - 0x1e, 0xde, 0xd1, 0x3b, 0x4d, 0x4f, 0x75, 0xf4, 0x06, 0x0c, 0x7b, 0xb4, 0x44, 0x5a, 0xc3, 0xd8, - 0xfd, 0x77, 0x64, 0xc2, 0x4b, 0xb1, 0xf2, 0x08, 0x0b, 0x0a, 0xf6, 0x2f, 0x59, 0x50, 0x3a, 0x85, - 0x91, 0xc0, 0xe6, 0x48, 0x3c, 0x97, 0x4b, 0x5a, 0xc4, 0x70, 0x9b, 0xc7, 0xce, 0xbd, 0xe5, 0xfb, - 0x31, 0xf1, 0x23, 0x26, 0x2a, 0x66, 0x0e, 0xcc, 0x77, 0xc1, 0x99, 0x5b, 0x81, 0xd3, 0x5a, 0x74, - 0x3c, 0xc7, 0x6f, 0x92, 0xb0, 0xe6, 0x6f, 0xf5, 0x35, 0xcb, 0xd2, 0x8d, 0xa8, 0x0a, 0xfd, 0x8c, - 0xa8, 0xec, 0x6d, 0x40, 0x7a, 0x03, 0xc2, 0x70, 0x15, 0xc3, 0x88, 0xcb, 0x9b, 0x12, 0xc3, 0xff, - 0x54, 0x36, 0x77, 0xd7, 0xd5, 0x33, 0xcd, 0x24, 0x93, 0x17, 0x60, 0x49, 0xc8, 0x7e, 0x05, 0x32, - 0x9d, 0xd5, 0xfa, 0xab, 0x0d, 0xec, 0xcf, 0xc3, 0x0c, 0xab, 0x79, 0x4c, 0x91, 0xd6, 0x4e, 0x69, - 0x25, 0x33, 0x62, 0x64, 0xd9, 0x5f, 0xb1, 0x60, 0x6a, 0x2d, 0x15, 0xb0, 0xe3, 0x32, 0x7b, 0x00, - 0xcd, 0x50, 0x86, 0x37, 0x58, 0x29, 0x16, 0xd0, 0x13, 0xd7, 0x41, 0xfd, 0x99, 0x05, 0x89, 0xff, - 0xe8, 0x29, 0x30, 0x5e, 0x4b, 0x06, 0xe3, 0x95, 0xa9, 0x1b, 0x51, 0xdd, 0xc9, 0xe3, 0xbb, 0xd0, - 0x4d, 0x15, 0x2c, 0xa1, 0x87, 0x5a, 0x24, 0x21, 0xc3, 0x5d, 0xeb, 0x27, 0xcd, 0x88, 0x0a, 0x32, - 0x7c, 0x02, 0xb3, 0x9d, 0x52, 0xb8, 0x1f, 0x11, 0xdb, 0x29, 0xd5, 0x9f, 0x9c, 0x1d, 0x5a, 0xd7, - 0xba, 0xcc, 0x4e, 0xae, 0x6f, 0x67, 0xb6, 0xf0, 0x8e, 0xe7, 0xbe, 0x4f, 0x54, 0xc4, 0x97, 0x8a, - 0xb0, 0x6d, 0x17, 0xa5, 0x47, 0x07, 0x95, 0x09, 0xf5, 0x8f, 0x87, 0x05, 0x4b, 0xaa, 0xd8, 0x37, - 0x60, 0x2a, 0x35, 0x60, 0xe8, 0x25, 0x18, 0x6a, 0x6f, 0x3b, 0x11, 0x49, 0xd9, 0x8b, 0x0e, 0xd5, - 0x69, 0xe1, 0xd1, 0x41, 0x65, 0x52, 0x55, 0x60, 0x25, 0x98, 0x63, 0xdb, 0xff, 0xd3, 0x82, 0xd2, - 0x5a, 0xd0, 0x3a, 0x8d, 0xc5, 0xf4, 0xba, 0xb1, 0x98, 0x1e, 0xcf, 0x0b, 0xaa, 0x98, 0xbb, 0x8e, - 0x56, 0x52, 0xeb, 0xe8, 0x62, 0x2e, 0x85, 0xde, 0x4b, 0x68, 0x17, 0xc6, 0x58, 0xa8, 0x46, 0x61, - 0xbf, 0xfa, 0x82, 0x21, 0x03, 0x54, 0x52, 0x32, 0xc0, 0x94, 0x86, 0xaa, 0x49, 0x02, 0x4f, 0xc3, - 0x88, 0xb0, 0xa1, 0x4c, 0x5b, 0xfd, 0x0b, 0x5c, 0x2c, 0xe1, 0xf6, 0x8f, 0x15, 0xc1, 0x08, 0x0d, - 0x89, 0x7e, 0xc5, 0x82, 0xf9, 0x90, 0xbb, 0x51, 0xb6, 0xaa, 0x9d, 0xd0, 0xf5, 0xb7, 0x1a, 0xcd, - 0x6d, 0xd2, 0xea, 0x78, 0xae, 0xbf, 0x55, 0xdb, 0xf2, 0x03, 0x55, 0xbc, 0x7c, 0x9f, 0x34, 0x3b, - 0xec, 0x21, 0xa4, 0x4f, 0x1c, 0x4a, 0x65, 0xa3, 0x74, 0xed, 0xf0, 0xa0, 0x32, 0x8f, 0x8f, 0x45, - 0x1b, 0x1f, 0xb3, 0x2f, 0xe8, 0x1b, 0x16, 0x5c, 0xe5, 0x11, 0x13, 0x07, 0xef, 0x7f, 0x0f, 0x89, - 0xa9, 0x2e, 0x49, 0x25, 0x44, 0xd6, 0x49, 0xb8, 0xbb, 0xf8, 0xb2, 0x18, 0xd0, 0xab, 0xf5, 0xe3, - 0xb5, 0x85, 0x8f, 0xdb, 0x39, 0xfb, 0xdf, 0x14, 0x61, 0x42, 0x78, 0xf0, 0x8b, 0xd0, 0x30, 0x2f, - 0x19, 0x4b, 0xe2, 0x89, 0xd4, 0x92, 0x98, 0x31, 0x90, 0x4f, 0x26, 0x2a, 0x4c, 0x04, 0x33, 0x9e, - 0x13, 0xc5, 0x37, 0x88, 0x13, 0xc6, 0x1b, 0xc4, 0xe1, 0xb6, 0x3b, 0xc5, 0x63, 0xdb, 0x19, 0x29, - 0x15, 0xcd, 0xad, 0x34, 0x31, 0xdc, 0x4d, 0x1f, 0xed, 0x01, 0x62, 0x06, 0x48, 0xa1, 0xe3, 0x47, - 0xfc, 0x5b, 0x5c, 0xf1, 0x66, 0x70, 0xbc, 0x56, 0xe7, 0x44, 0xab, 0xe8, 0x56, 0x17, 0x35, 0x9c, - 0xd1, 0x82, 0x66, 0x58, 0x36, 0x34, 0xa8, 0x61, 0xd9, 0x70, 0x1f, 0xd7, 0x1a, 0x1f, 0xa6, 0xbb, - 0x82, 0x30, 0xbc, 0x0d, 0x65, 0x65, 0x00, 0x28, 0x0e, 0x9d, 0xde, 0xb1, 0x4c, 0xd2, 0x14, 0xb8, - 0x1a, 0x25, 0x31, 0x3e, 0x4d, 0xc8, 0xd9, 0xff, 0xac, 0x60, 0x34, 0xc8, 0x27, 0x71, 0x0d, 0x46, - 0x9d, 0x28, 0x72, 0xb7, 0x7c, 0xd2, 0x12, 0x3b, 0xf6, 0xe3, 0x79, 0x3b, 0xd6, 0x68, 0x86, 0x19, - 0x61, 0x2e, 0x88, 0x9a, 0x58, 0xd1, 0x40, 0x37, 0xb8, 0x85, 0xd4, 0x9e, 0xe4, 0xf9, 0x07, 0xa3, - 0x06, 0xd2, 0x86, 0x6a, 0x8f, 0x60, 0x51, 0x1f, 0x7d, 0x81, 0x9b, 0xb0, 0xdd, 0xf4, 0x83, 0x7b, - 0xfe, 0xf5, 0x20, 0x90, 0x6e, 0x77, 0x83, 0x11, 0x9c, 0x91, 0x86, 0x6b, 0xaa, 0x3a, 0x36, 0xa9, - 0x0d, 0x16, 0xa8, 0xe8, 0xbb, 0xe1, 0x0c, 0x25, 0x6d, 0x3a, 0xcf, 0x44, 0x88, 0xc0, 0x94, 0x08, - 0x0f, 0x21, 0xcb, 0xc4, 0xd8, 0x65, 0xb2, 0xf3, 0x66, 0xed, 0x44, 0xe9, 0x77, 0xd3, 0x24, 0x81, - 0xd3, 0x34, 0xed, 0x9f, 0xb4, 0x80, 0x99, 0xfd, 0x9f, 0x02, 0xcb, 0xf0, 0x59, 0x93, 0x65, 0x98, - 0xcd, 0x1b, 0xe4, 0x1c, 0x6e, 0xe1, 0x45, 0xbe, 0xb2, 0xea, 0x61, 0x70, 0x7f, 0x5f, 0x98, 0x0f, - 0xf4, 0xe7, 0x64, 0xed, 0xff, 0x6b, 0xf1, 0x43, 0x4c, 0x79, 0xe2, 0xa3, 0xef, 0x81, 0xd1, 0xa6, - 0xd3, 0x76, 0x9a, 0x3c, 0x8e, 0x71, 0xae, 0x56, 0xc7, 0xa8, 0x34, 0xbf, 0x24, 0x6a, 0x70, 0x2d, - 0x85, 0x0c, 0x33, 0x32, 0x2a, 0x8b, 0xfb, 0x6a, 0x26, 0x54, 0x93, 0x73, 0x3b, 0x30, 0x61, 0x10, - 0x7b, 0xa8, 0x22, 0xed, 0xf7, 0xf0, 0x2b, 0x56, 0x85, 0xc5, 0xd9, 0x85, 0x19, 0x5f, 0xfb, 0x4f, - 0x2f, 0x14, 0x29, 0xa6, 0x7c, 0xbc, 0xdf, 0x25, 0xca, 0x6e, 0x1f, 0xcd, 0xad, 0x21, 0x45, 0x06, - 0x77, 0x53, 0xb6, 0x7f, 0xdc, 0x82, 0x47, 0x75, 0x44, 0x2d, 0x48, 0x42, 0x3f, 0x3d, 0x71, 0x15, - 0x46, 0x83, 0x36, 0x09, 0x9d, 0x38, 0x08, 0xc5, 0xad, 0x71, 0x45, 0x0e, 0xfa, 0x6d, 0x51, 0x7e, - 0x24, 0x02, 0x4a, 0x4a, 0xea, 0xb2, 0x1c, 0xab, 0x9a, 0x54, 0x8e, 0x61, 0x83, 0x11, 0x89, 0x00, - 0x16, 0xec, 0x0c, 0x60, 0x4f, 0xa6, 0x11, 0x16, 0x10, 0xfb, 0x0f, 0x2c, 0xbe, 0xb0, 0xf4, 0xae, - 0xa3, 0xf7, 0x60, 0x7a, 0xd7, 0x89, 0x9b, 0xdb, 0xcb, 0xf7, 0xdb, 0x21, 0x57, 0x8f, 0xcb, 0x71, - 0x7a, 0xa6, 0xdf, 0x38, 0x69, 0x1f, 0x99, 0x58, 0xe5, 0xad, 0xa6, 0x88, 0xe1, 0x2e, 0xf2, 0x68, - 0x03, 0xc6, 0x58, 0x19, 0x33, 0xff, 0x8e, 0x7a, 0xb1, 0x06, 0x79, 0xad, 0xa9, 0x57, 0xe7, 0xd5, - 0x84, 0x0e, 0xd6, 0x89, 0xda, 0x5f, 0x2e, 0xf2, 0xdd, 0xce, 0xb8, 0xed, 0xa7, 0x61, 0xa4, 0x1d, - 0xb4, 0x96, 0x6a, 0x55, 0x2c, 0x66, 0x41, 0x5d, 0x23, 0x75, 0x5e, 0x8c, 0x25, 0x1c, 0xbd, 0x0a, - 0x40, 0xee, 0xc7, 0x24, 0xf4, 0x1d, 0x4f, 0x59, 0xc9, 0x28, 0xbb, 0xd0, 0x6a, 0xb0, 0x16, 0xc4, - 0x77, 0x22, 0xf2, 0x5d, 0xcb, 0x0a, 0x05, 0x6b, 0xe8, 0xe8, 0x1a, 0x40, 0x3b, 0x0c, 0xf6, 0xdc, - 0x16, 0xf3, 0x27, 0x2c, 0x9a, 0x36, 0x24, 0x75, 0x05, 0xc1, 0x1a, 0x16, 0x7a, 0x15, 0x26, 0x3a, - 0x7e, 0xc4, 0x39, 0x14, 0x67, 0x43, 0x84, 0x63, 0x1c, 0x4d, 0xac, 0x1b, 0xee, 0xe8, 0x40, 0x6c, - 0xe2, 0xa2, 0x05, 0x18, 0x8e, 0x1d, 0x66, 0x13, 0x31, 0x94, 0x6f, 0xcc, 0xb9, 0x4e, 0x31, 0xf4, - 0x28, 0xba, 0xb4, 0x02, 0x16, 0x15, 0xd1, 0xdb, 0xd2, 0x39, 0x83, 0x9f, 0xf5, 0xc2, 0x8a, 0x7a, - 0xb0, 0x7b, 0x41, 0x73, 0xcd, 0x10, 0xd6, 0xd9, 0x06, 0x2d, 0xfb, 0x1b, 0x65, 0x80, 0x84, 0x1d, - 0x47, 0xef, 0x77, 0x9d, 0x47, 0xcf, 0xf6, 0x66, 0xe0, 0x4f, 0xee, 0x30, 0x42, 0xdf, 0x6f, 0xc1, - 0x98, 0xe3, 0x79, 0x41, 0xd3, 0x89, 0xd9, 0x28, 0x17, 0x7a, 0x9f, 0x87, 0xa2, 0xfd, 0x85, 0xa4, - 0x06, 0xef, 0xc2, 0x0b, 0x72, 0xe1, 0x69, 0x90, 0xbe, 0xbd, 0xd0, 0x1b, 0x46, 0x9f, 0x92, 0x52, - 0x1a, 0x5f, 0x1e, 0x73, 0x69, 0x29, 0xad, 0xcc, 0x8e, 0x7e, 0x4d, 0x40, 0x43, 0x77, 0x8c, 0x48, - 0x7b, 0xa5, 0xfc, 0xa0, 0x13, 0x06, 0x57, 0xda, 0x2f, 0xc8, 0x1e, 0xaa, 0xeb, 0xde, 0x64, 0x43, - 0xf9, 0x91, 0x59, 0x34, 0xf1, 0xa7, 0x8f, 0x27, 0xd9, 0xbb, 0x30, 0xd5, 0x32, 0xef, 0x76, 0xb1, - 0x9a, 0x9e, 0xca, 0xa3, 0x9b, 0x62, 0x05, 0x92, 0xdb, 0x3c, 0x05, 0xc0, 0x69, 0xc2, 0xa8, 0xce, - 0xfd, 0xfa, 0x6a, 0xfe, 0x66, 0x20, 0xac, 0xf1, 0xed, 0xdc, 0xb9, 0xdc, 0x8f, 0x62, 0xb2, 0x4b, - 0x31, 0x93, 0x4b, 0x7b, 0x4d, 0xd4, 0xc5, 0x8a, 0x0a, 0x7a, 0x03, 0x86, 0x99, 0x63, 0x70, 0x34, - 0x3b, 0x9a, 0xaf, 0x4c, 0x34, 0x63, 0x5a, 0x24, 0x9b, 0x8a, 0xfd, 0x8d, 0xb0, 0xa0, 0x80, 0x6e, - 0xc8, 0xc0, 0x37, 0x51, 0xcd, 0xbf, 0x13, 0x11, 0x16, 0xf8, 0xa6, 0xbc, 0xf8, 0xf1, 0x24, 0xa6, - 0x0d, 0x2f, 0xcf, 0x8c, 0x97, 0x6f, 0xd4, 0xa4, 0xcc, 0x91, 0xf8, 0x2f, 0xc3, 0xf0, 0xcf, 0x42, - 0x7e, 0xf7, 0xcc, 0x50, 0xfd, 0xc9, 0x70, 0xde, 0x35, 0x49, 0xe0, 0x34, 0x4d, 0xca, 0x68, 0xf2, - 0x9d, 0x2b, 0xec, 0xf9, 0xfb, 0xed, 0x7f, 0x2e, 0x5f, 0xb3, 0x4b, 0x86, 0x97, 0x60, 0x51, 0xff, - 0x54, 0x6f, 0xfd, 0x39, 0x1f, 0xa6, 0xd3, 0x5b, 0xf4, 0xa1, 0x72, 0x19, 0xbf, 0x5f, 0x82, 0x49, - 0x73, 0x49, 0xa1, 0xab, 0x50, 0x16, 0x44, 0x54, 0x14, 0x56, 0xb5, 0x4b, 0x56, 0x25, 0x00, 0x27, - 0x38, 0x2c, 0xf8, 0x2e, 0xab, 0xae, 0xd9, 0x61, 0x26, 0xc1, 0x77, 0x15, 0x04, 0x6b, 0x58, 0x54, - 0x5e, 0xda, 0x08, 0x82, 0x58, 0x5d, 0x2a, 0x6a, 0xdd, 0x2d, 0xb2, 0x52, 0x2c, 0xa0, 0xf4, 0x32, - 0xd9, 0x21, 0xa1, 0x4f, 0x3c, 0x33, 0xb8, 0x9b, 0xba, 0x4c, 0x6e, 0xea, 0x40, 0x6c, 0xe2, 0xd2, - 0x5b, 0x32, 0x88, 0xd8, 0x42, 0x16, 0x52, 0x59, 0x62, 0xd7, 0xda, 0xe0, 0x2e, 0xf6, 0x12, 0x8e, - 0x3e, 0x0f, 0x8f, 0x2a, 0x8f, 0x78, 0xcc, 0x15, 0xd5, 0xb2, 0xc5, 0x61, 0x43, 0x89, 0xf2, 0xe8, - 0x52, 0x36, 0x1a, 0xce, 0xab, 0x8f, 0x5e, 0x87, 0x49, 0xc1, 0xb9, 0x4b, 0x8a, 0x23, 0xa6, 0xed, - 0xc4, 0x4d, 0x03, 0x8a, 0x53, 0xd8, 0x32, 0x3c, 0x1d, 0x63, 0x9e, 0x25, 0x85, 0xd1, 0xee, 0xf0, - 0x74, 0x3a, 0x1c, 0x77, 0xd5, 0x40, 0x0b, 0x30, 0xc5, 0x59, 0x2b, 0xd7, 0xdf, 0xe2, 0x73, 0x22, - 0xdc, 0x6d, 0xd4, 0x96, 0xba, 0x6d, 0x82, 0x71, 0x1a, 0x1f, 0xbd, 0x02, 0xe3, 0x4e, 0xd8, 0xdc, - 0x76, 0x63, 0xd2, 0x8c, 0x3b, 0x21, 0xf7, 0xc3, 0xd1, 0x8c, 0x4f, 0x16, 0x34, 0x18, 0x36, 0x30, - 0xed, 0xf7, 0xe1, 0x4c, 0x86, 0xa7, 0x1e, 0x5d, 0x38, 0x4e, 0xdb, 0x95, 0xdf, 0x94, 0xb2, 0x50, - 0x5d, 0xa8, 0xd7, 0xe4, 0xd7, 0x68, 0x58, 0x74, 0x75, 0x32, 0x8f, 0x3e, 0x2d, 0xeb, 0x86, 0x5a, - 0x9d, 0x2b, 0x12, 0x80, 0x13, 0x1c, 0xfb, 0x7f, 0x15, 0x60, 0x2a, 0x43, 0xf9, 0xce, 0x32, 0x3f, - 0xa4, 0x64, 0x8f, 0x24, 0xd1, 0x83, 0x19, 0xed, 0xb0, 0x70, 0x8c, 0x68, 0x87, 0xc5, 0x7e, 0xd1, - 0x0e, 0x4b, 0x1f, 0x24, 0xda, 0xa1, 0x39, 0x62, 0x43, 0x03, 0x8d, 0x58, 0x46, 0x84, 0xc4, 0xe1, - 0x63, 0x46, 0x48, 0x34, 0x06, 0x7d, 0x64, 0x80, 0x41, 0xff, 0x5a, 0x01, 0xa6, 0xd3, 0x46, 0x72, - 0xa7, 0xa0, 0x8e, 0x7d, 0xc3, 0x50, 0xc7, 0x66, 0xe7, 0x51, 0x49, 0x9b, 0xee, 0xe5, 0xa9, 0x66, - 0x71, 0x4a, 0x35, 0xfb, 0xc9, 0x81, 0xa8, 0xf5, 0x56, 0xd3, 0xfe, 0x83, 0x02, 0x9c, 0x4b, 0x57, - 0x59, 0xf2, 0x1c, 0x77, 0xf7, 0x14, 0xc6, 0xe6, 0xb6, 0x31, 0x36, 0xcf, 0x0d, 0xf2, 0x35, 0xac, - 0x6b, 0xb9, 0x03, 0xf4, 0x56, 0x6a, 0x80, 0xae, 0x0e, 0x4e, 0xb2, 0xf7, 0x28, 0x7d, 0xb3, 0x08, - 0x17, 0x33, 0xeb, 0x25, 0xda, 0xcc, 0x15, 0x43, 0x9b, 0x79, 0x2d, 0xa5, 0xcd, 0xb4, 0x7b, 0xd7, - 0x3e, 0x19, 0xf5, 0xa6, 0x70, 0xa1, 0x64, 0x11, 0xf1, 0x1e, 0x50, 0xb5, 0x69, 0xb8, 0x50, 0x2a, - 0x42, 0xd8, 0xa4, 0xfb, 0x17, 0x49, 0xa5, 0xf9, 0xef, 0x2c, 0x38, 0x9f, 0x39, 0x37, 0xa7, 0xa0, - 0xc2, 0x5a, 0x33, 0x55, 0x58, 0x4f, 0x0f, 0xbc, 0x5a, 0x73, 0x74, 0x5a, 0xbf, 0x51, 0xca, 0xf9, - 0x16, 0x26, 0xa0, 0xdf, 0x86, 0x31, 0xa7, 0xd9, 0x24, 0x51, 0xb4, 0x1a, 0xb4, 0x54, 0x84, 0xb8, - 0xe7, 0x98, 0x9c, 0x95, 0x14, 0x1f, 0x1d, 0x54, 0xe6, 0xd2, 0x24, 0x12, 0x30, 0xd6, 0x29, 0x98, - 0x41, 0x2d, 0x0b, 0x27, 0x1a, 0xd4, 0xf2, 0x1a, 0xc0, 0x9e, 0xe2, 0xd6, 0xd3, 0x42, 0xbe, 0xc6, - 0xc7, 0x6b, 0x58, 0xe8, 0x0b, 0x30, 0x1a, 0x89, 0x6b, 0x5c, 0x2c, 0xc5, 0x17, 0x06, 0x9c, 0x2b, - 0x67, 0x83, 0x78, 0xa6, 0xaf, 0xbe, 0xd2, 0x87, 0x28, 0x92, 0xe8, 0x3b, 0x60, 0x3a, 0xe2, 0xa1, - 0x60, 0x96, 0x3c, 0x27, 0x62, 0x7e, 0x10, 0x62, 0x15, 0x32, 0x07, 0xfc, 0x46, 0x0a, 0x86, 0xbb, - 0xb0, 0xd1, 0x8a, 0xfc, 0x28, 0x16, 0xb7, 0x86, 0x2f, 0xcc, 0xcb, 0xc9, 0x07, 0x89, 0xbc, 0x53, - 0x67, 0xd3, 0xc3, 0xcf, 0x06, 0x5e, 0xab, 0x89, 0xbe, 0x00, 0x40, 0x97, 0x8f, 0xd0, 0x25, 0x8c, - 0xe4, 0x1f, 0x9e, 0xf4, 0x54, 0x69, 0x65, 0x5a, 0x7e, 0x32, 0xe7, 0xc5, 0xaa, 0x22, 0x82, 0x35, - 0x82, 0xf6, 0xd7, 0x4a, 0xf0, 0x58, 0x8f, 0x33, 0x12, 0x2d, 0x98, 0x4f, 0xa0, 0xcf, 0xa4, 0x85, - 0xeb, 0xb9, 0xcc, 0xca, 0x86, 0xb4, 0x9d, 0x5a, 0x8a, 0x85, 0x0f, 0xbc, 0x14, 0x7f, 0xc0, 0xd2, - 0xd4, 0x1e, 0xdc, 0x98, 0xef, 0xb3, 0xc7, 0x3c, 0xfb, 0x4f, 0x50, 0x0f, 0xb2, 0x99, 0xa1, 0x4c, - 0xb8, 0x36, 0x70, 0x77, 0x06, 0xd6, 0x2e, 0x9c, 0xae, 0xf2, 0xf7, 0xcb, 0x16, 0x3c, 0x91, 0xd9, - 0x5f, 0xc3, 0x64, 0xe3, 0x2a, 0x94, 0x9b, 0xb4, 0x50, 0xf3, 0x55, 0x4b, 0x9c, 0x78, 0x25, 0x00, - 0x27, 0x38, 0x86, 0x65, 0x46, 0xa1, 0xaf, 0x65, 0xc6, 0xbf, 0xb6, 0xa0, 0x6b, 0x7f, 0x9c, 0xc2, - 0x41, 0x5d, 0x33, 0x0f, 0xea, 0x8f, 0x0f, 0x32, 0x97, 0x39, 0x67, 0xf4, 0x1f, 0x4d, 0xc1, 0x23, - 0x39, 0xbe, 0x1a, 0x7b, 0x30, 0xb3, 0xd5, 0x24, 0xa6, 0x17, 0xa0, 0xf8, 0x98, 0x4c, 0x87, 0xc9, - 0x9e, 0x2e, 0x83, 0x2c, 0x1f, 0xd1, 0x4c, 0x17, 0x0a, 0xee, 0x6e, 0x02, 0x7d, 0xd9, 0x82, 0xb3, - 0xce, 0xbd, 0xa8, 0x2b, 0xeb, 0xa4, 0x58, 0x33, 0x2f, 0x66, 0x2a, 0x41, 0xfa, 0x64, 0xa9, 0xe4, - 0x09, 0x9a, 0xb2, 0xb0, 0x70, 0x66, 0x5b, 0x08, 0x8b, 0x98, 0xa1, 0x94, 0x9d, 0xef, 0xe1, 0xa7, - 0x9a, 0xe5, 0x54, 0xc3, 0x8f, 0x6c, 0x09, 0xc1, 0x8a, 0x0e, 0xfa, 0x12, 0x94, 0xb7, 0xa4, 0xa7, - 0x5b, 0xc6, 0x95, 0x90, 0x0c, 0x64, 0x6f, 0xff, 0x3f, 0xfe, 0x40, 0xa9, 0x90, 0x70, 0x42, 0x14, - 0xbd, 0x0e, 0x45, 0x7f, 0x33, 0xea, 0x95, 0xe3, 0x28, 0x65, 0xd3, 0xc4, 0xbd, 0xc1, 0xd7, 0x56, - 0x1a, 0x98, 0x56, 0x44, 0x37, 0xa0, 0x18, 0x6e, 0xb4, 0x84, 0x06, 0x2f, 0xf3, 0x0c, 0xc7, 0x8b, - 0xd5, 0x9c, 0x5e, 0x31, 0x4a, 0x78, 0xb1, 0x8a, 0x29, 0x09, 0x54, 0x87, 0x21, 0xe6, 0xe0, 0x20, - 0xee, 0x83, 0x4c, 0xce, 0xb7, 0x87, 0xa3, 0x10, 0x77, 0x19, 0x67, 0x08, 0x98, 0x13, 0x42, 0xeb, - 0x30, 0xdc, 0x64, 0xf9, 0x70, 0x44, 0xc0, 0xea, 0x4f, 0x65, 0xea, 0xea, 0x7a, 0x24, 0x0a, 0x12, - 0xaa, 0x2b, 0x86, 0x81, 0x05, 0x2d, 0x46, 0x95, 0xb4, 0xb7, 0x37, 0x23, 0x26, 0xeb, 0xe7, 0x51, - 0xed, 0x91, 0xff, 0x4a, 0x50, 0x65, 0x18, 0x58, 0xd0, 0x42, 0x9f, 0x81, 0xc2, 0x66, 0x53, 0xf8, - 0x3f, 0x64, 0x2a, 0xed, 0x4c, 0x87, 0xfe, 0xc5, 0xe1, 0xc3, 0x83, 0x4a, 0x61, 0x65, 0x09, 0x17, - 0x36, 0x9b, 0x68, 0x0d, 0x46, 0x36, 0xb9, 0x0b, 0xb0, 0xd0, 0xcb, 0x3d, 0x95, 0xed, 0x9d, 0xdc, - 0xe5, 0x25, 0xcc, 0xed, 0xf6, 0x05, 0x00, 0x4b, 0x22, 0x2c, 0x04, 0xa7, 0x72, 0x65, 0x16, 0xb1, - 0xa8, 0xe7, 0x8f, 0xe7, 0x7e, 0xce, 0xef, 0xe7, 0xc4, 0x21, 0x1a, 0x6b, 0x14, 0xe9, 0xaa, 0x76, - 0x64, 0xe6, 0x43, 0x11, 0xab, 0x23, 0x73, 0x55, 0xf7, 0x49, 0x0a, 0xc9, 0x57, 0xb5, 0x42, 0xc2, - 0x09, 0x51, 0xb4, 0x03, 0x13, 0x7b, 0x51, 0x7b, 0x9b, 0xc8, 0x2d, 0xcd, 0x42, 0x77, 0xe4, 0x5c, - 0x61, 0x77, 0x05, 0xa2, 0x1b, 0xc6, 0x1d, 0xc7, 0xeb, 0x3a, 0x85, 0xd8, 0xab, 0xf6, 0x5d, 0x9d, - 0x18, 0x36, 0x69, 0xd3, 0xe1, 0x7f, 0xaf, 0x13, 0x6c, 0xec, 0xc7, 0x44, 0x04, 0xaf, 0xce, 0x1c, - 0xfe, 0x37, 0x39, 0x4a, 0xf7, 0xf0, 0x0b, 0x00, 0x96, 0x44, 0xd0, 0x5d, 0x31, 0x3c, 0xec, 0xf4, - 0x9c, 0xce, 0x0f, 0xa6, 0x94, 0x99, 0x7a, 0x54, 0x1b, 0x14, 0x76, 0x5a, 0x26, 0xa4, 0xd8, 0x29, - 0xd9, 0xde, 0x0e, 0xe2, 0xc0, 0x4f, 0x9d, 0xd0, 0x33, 0xf9, 0xa7, 0x64, 0x3d, 0x03, 0xbf, 0xfb, - 0x94, 0xcc, 0xc2, 0xc2, 0x99, 0x6d, 0xa1, 0x16, 0x4c, 0xb6, 0x83, 0x30, 0xbe, 0x17, 0x84, 0x72, - 0x7d, 0xa1, 0x1e, 0x7a, 0x05, 0x03, 0x53, 0xb4, 0xc8, 0x82, 0xa9, 0x9b, 0x10, 0x9c, 0xa2, 0x89, - 0x3e, 0x07, 0x23, 0x51, 0xd3, 0xf1, 0x48, 0xed, 0xf6, 0xec, 0x99, 0xfc, 0xeb, 0xa7, 0xc1, 0x51, - 0x72, 0x56, 0x17, 0x9b, 0x1c, 0x81, 0x82, 0x25, 0x39, 0xb4, 0x02, 0x43, 0x2c, 0x23, 0x02, 0x8b, - 0xbb, 0x9d, 0x13, 0x13, 0xaa, 0xcb, 0xc2, 0x94, 0x9f, 0x4d, 0xac, 0x18, 0xf3, 0xea, 0x74, 0x0f, - 0x08, 0xf6, 0x3a, 0x88, 0x66, 0xcf, 0xe5, 0xef, 0x01, 0xc1, 0x95, 0xdf, 0x6e, 0xf4, 0xda, 0x03, - 0x0a, 0x09, 0x27, 0x44, 0xe9, 0xc9, 0x4c, 0x4f, 0xd3, 0x47, 0x7a, 0x18, 0xb4, 0xe4, 0x9e, 0xa5, - 0xec, 0x64, 0xa6, 0x27, 0x29, 0x25, 0x61, 0xff, 0xee, 0x48, 0x37, 0xcf, 0xc2, 0x04, 0xb2, 0xbf, - 0x6a, 0x75, 0xbd, 0xd5, 0x7d, 0x7a, 0x50, 0xfd, 0xd0, 0x09, 0x72, 0xab, 0x5f, 0xb6, 0xe0, 0x91, - 0x76, 0xe6, 0x87, 0x08, 0x06, 0x60, 0x30, 0x35, 0x13, 0xff, 0x74, 0x15, 0x1b, 0x3f, 0x1b, 0x8e, - 0x73, 0x5a, 0x4a, 0x4b, 0x04, 0xc5, 0x0f, 0x2c, 0x11, 0xac, 0xc2, 0x28, 0x63, 0x32, 0xfb, 0xe4, - 0x87, 0x4b, 0x0b, 0x46, 0x8c, 0x95, 0x58, 0x12, 0x15, 0xb1, 0x22, 0x81, 0x7e, 0xd0, 0x82, 0x0b, - 0xe9, 0xae, 0x63, 0xc2, 0xc0, 0x22, 0x92, 0x3c, 0x97, 0x05, 0x57, 0xc4, 0xf7, 0x5f, 0xa8, 0xf7, - 0x42, 0x3e, 0xea, 0x87, 0x80, 0x7b, 0x37, 0x86, 0xaa, 0x19, 0xc2, 0xe8, 0xb0, 0xa9, 0x80, 0x1f, - 0x40, 0x20, 0x7d, 0x11, 0xc6, 0x77, 0x83, 0x8e, 0x1f, 0x0b, 0xfb, 0x17, 0xe1, 0xb1, 0xc8, 0x1e, - 0x9c, 0x57, 0xb5, 0x72, 0x6c, 0x60, 0xa5, 0xc4, 0xd8, 0xd1, 0x07, 0x16, 0x63, 0xdf, 0x49, 0x65, - 0x01, 0x2f, 0xe7, 0x47, 0x2c, 0x14, 0x12, 0xff, 0x31, 0x72, 0x81, 0x9f, 0xae, 0x6c, 0xf4, 0xd3, - 0x56, 0x06, 0x53, 0xcf, 0xa5, 0xe5, 0xd7, 0x4c, 0x69, 0xf9, 0x72, 0x5a, 0x5a, 0xee, 0x52, 0xbe, - 0x1a, 0x82, 0xf2, 0xe0, 0x61, 0xaf, 0x07, 0x8d, 0x23, 0x67, 0x7b, 0x70, 0xa9, 0xdf, 0xb5, 0xc4, - 0x0c, 0xa1, 0x5a, 0xea, 0xa9, 0x2d, 0x31, 0x84, 0x6a, 0xd5, 0xaa, 0x98, 0x41, 0x06, 0x0d, 0x34, - 0x62, 0xff, 0x0f, 0x0b, 0x8a, 0xf5, 0xa0, 0x75, 0x0a, 0xca, 0xe4, 0xcf, 0x1a, 0xca, 0xe4, 0xc7, - 0x72, 0xb2, 0xb3, 0xe7, 0xaa, 0x8e, 0x97, 0x53, 0xaa, 0xe3, 0x0b, 0x79, 0x04, 0x7a, 0x2b, 0x8a, - 0x7f, 0xa2, 0x08, 0x7a, 0x2e, 0x79, 0xf4, 0x1b, 0x0f, 0x62, 0x85, 0x5c, 0xec, 0x95, 0x5e, 0x5e, - 0x50, 0x66, 0xf6, 0x53, 0xd2, 0x09, 0xef, 0xcf, 0x99, 0x31, 0xf2, 0x5b, 0xc4, 0xdd, 0xda, 0x8e, - 0x49, 0x2b, 0xfd, 0x39, 0xa7, 0x67, 0x8c, 0xfc, 0xdf, 0x2c, 0x98, 0x4a, 0xb5, 0x8e, 0x3c, 0x98, - 0xf0, 0x74, 0x4d, 0xa0, 0x58, 0xa7, 0x0f, 0xa4, 0x44, 0x14, 0xc6, 0x9c, 0x5a, 0x11, 0x36, 0x89, - 0xa3, 0x79, 0x00, 0xf5, 0x52, 0x27, 0x35, 0x60, 0x8c, 0xeb, 0x57, 0x4f, 0x79, 0x11, 0xd6, 0x30, - 0xd0, 0x4b, 0x30, 0x16, 0x07, 0xed, 0xc0, 0x0b, 0xb6, 0xf6, 0x6f, 0x12, 0x19, 0xda, 0x46, 0x99, - 0x68, 0xad, 0x27, 0x20, 0xac, 0xe3, 0xd9, 0x3f, 0x55, 0xe4, 0x1f, 0xea, 0xc7, 0xee, 0xb7, 0xd6, - 0xe4, 0x47, 0x7b, 0x4d, 0x7e, 0xd3, 0x82, 0x69, 0xda, 0x3a, 0x33, 0x17, 0x91, 0x97, 0xad, 0x4a, - 0xbf, 0x63, 0xf5, 0x48, 0xbf, 0x73, 0x99, 0x9e, 0x5d, 0xad, 0xa0, 0x13, 0x0b, 0x0d, 0x9a, 0x76, - 0x38, 0xd1, 0x52, 0x2c, 0xa0, 0x02, 0x8f, 0x84, 0xa1, 0xf0, 0x81, 0xd2, 0xf1, 0x48, 0x18, 0x62, - 0x01, 0x95, 0xd9, 0x79, 0x4a, 0x39, 0xd9, 0x79, 0x58, 0xa0, 0x3e, 0x61, 0x58, 0x20, 0xd8, 0x1e, - 0x2d, 0x50, 0x9f, 0xb4, 0x38, 0x48, 0x70, 0xec, 0x9f, 0x2b, 0xc2, 0x78, 0x3d, 0x68, 0x25, 0x6f, - 0x65, 0x2f, 0x1a, 0x6f, 0x65, 0x97, 0x52, 0x6f, 0x65, 0xd3, 0x3a, 0xee, 0xb7, 0x5e, 0xc6, 0x3e, - 0xac, 0x97, 0xb1, 0x7f, 0x65, 0xb1, 0x59, 0xab, 0xae, 0x35, 0x44, 0x76, 0xe0, 0xe7, 0x61, 0x8c, - 0x1d, 0x48, 0xcc, 0xe9, 0x4e, 0x3e, 0x20, 0xb1, 0xc0, 0xfb, 0x6b, 0x49, 0x31, 0xd6, 0x71, 0xd0, - 0x15, 0x18, 0x8d, 0x88, 0x13, 0x36, 0xb7, 0xd5, 0x19, 0x27, 0x9e, 0x57, 0x78, 0x19, 0x56, 0x50, - 0xf4, 0x66, 0x12, 0x23, 0xae, 0x98, 0x9f, 0xe7, 0x56, 0xef, 0x0f, 0xdf, 0x22, 0xf9, 0x81, 0xe1, - 0xec, 0xb7, 0x00, 0x75, 0xe3, 0x0f, 0x10, 0x1c, 0xa9, 0x62, 0x06, 0x47, 0x2a, 0x77, 0x05, 0x46, - 0xfa, 0x53, 0x0b, 0x26, 0xeb, 0x41, 0x8b, 0x6e, 0xdd, 0xbf, 0x48, 0xfb, 0x54, 0x0f, 0x90, 0x39, - 0xdc, 0x23, 0x40, 0xe6, 0x3f, 0xb4, 0x60, 0xa4, 0x1e, 0xb4, 0x4e, 0x41, 0xef, 0xfe, 0x9a, 0xa9, - 0x77, 0x7f, 0x34, 0x67, 0x49, 0xe4, 0xa8, 0xda, 0x7f, 0xa1, 0x08, 0x13, 0xb4, 0x9f, 0xc1, 0x96, - 0x9c, 0x25, 0x63, 0x44, 0xac, 0x01, 0x46, 0x84, 0xb2, 0xb9, 0x81, 0xe7, 0x05, 0xf7, 0xd2, 0x33, - 0xb6, 0xc2, 0x4a, 0xb1, 0x80, 0xa2, 0x67, 0x61, 0xb4, 0x1d, 0x92, 0x3d, 0x37, 0x10, 0xfc, 0xa3, - 0xf6, 0x8a, 0x51, 0x17, 0xe5, 0x58, 0x61, 0x50, 0xb9, 0x2b, 0x72, 0xfd, 0x26, 0x91, 0x49, 0xb6, - 0x4b, 0x2c, 0x0f, 0x17, 0x8f, 0x7c, 0xad, 0x95, 0x63, 0x03, 0x0b, 0xbd, 0x05, 0x65, 0xf6, 0x9f, - 0x9d, 0x28, 0xc7, 0xcf, 0x1b, 0x24, 0xd2, 0x4d, 0x08, 0x02, 0x38, 0xa1, 0x85, 0xae, 0x01, 0xc4, - 0x32, 0x3a, 0x72, 0x24, 0x62, 0xdc, 0x28, 0x5e, 0x5b, 0xc5, 0x4d, 0x8e, 0xb0, 0x86, 0x85, 0x9e, - 0x81, 0x72, 0xec, 0xb8, 0xde, 0x2d, 0xd7, 0x27, 0x11, 0x53, 0x39, 0x17, 0x65, 0x36, 0x09, 0x51, - 0x88, 0x13, 0x38, 0xe5, 0x75, 0x98, 0x03, 0x38, 0xcf, 0x3a, 0x36, 0xca, 0xb0, 0x19, 0xaf, 0x73, - 0x4b, 0x95, 0x62, 0x0d, 0xc3, 0x7e, 0x05, 0xce, 0xd5, 0x83, 0x56, 0x3d, 0x08, 0xe3, 0x95, 0x20, - 0xbc, 0xe7, 0x84, 0x2d, 0x39, 0x7f, 0x15, 0x99, 0xd8, 0x80, 0x9e, 0x3d, 0x43, 0x7c, 0x67, 0x1a, - 0x29, 0x0b, 0x5e, 0x60, 0xdc, 0xce, 0x31, 0x9d, 0x3a, 0x9a, 0xec, 0xde, 0x55, 0x09, 0x06, 0xaf, - 0x3b, 0x31, 0x41, 0xb7, 0x59, 0x52, 0xb2, 0xe4, 0x0a, 0x12, 0xd5, 0x9f, 0xd6, 0x92, 0x92, 0x25, - 0xc0, 0xcc, 0x3b, 0xcb, 0xac, 0x6f, 0xff, 0x6a, 0x91, 0x9d, 0x46, 0xa9, 0x7c, 0x7b, 0xe8, 0x8b, - 0x30, 0x19, 0x91, 0x5b, 0xae, 0xdf, 0xb9, 0x2f, 0x85, 0xf0, 0x1e, 0x6e, 0x39, 0x8d, 0x65, 0x1d, - 0x93, 0xab, 0xf2, 0xcc, 0x32, 0x9c, 0xa2, 0x46, 0xe7, 0x29, 0xec, 0xf8, 0x0b, 0xd1, 0x9d, 0x88, - 0x84, 0x22, 0xdf, 0x1b, 0x9b, 0x27, 0x2c, 0x0b, 0x71, 0x02, 0xa7, 0xeb, 0x92, 0xfd, 0x59, 0x0b, - 0x7c, 0x1c, 0x04, 0xb1, 0x5c, 0xc9, 0x2c, 0x63, 0x90, 0x56, 0x8e, 0x0d, 0x2c, 0xb4, 0x02, 0x28, - 0xea, 0xb4, 0xdb, 0x1e, 0x7b, 0xd8, 0x77, 0xbc, 0xeb, 0x61, 0xd0, 0x69, 0xf3, 0x57, 0xcf, 0x22, - 0x0f, 0x4c, 0xd8, 0xe8, 0x82, 0xe2, 0x8c, 0x1a, 0xf4, 0xf4, 0xd9, 0x8c, 0xd8, 0x6f, 0xb6, 0xba, - 0x8b, 0x42, 0xbd, 0xde, 0x60, 0x45, 0x58, 0xc2, 0xe8, 0x62, 0x62, 0xcd, 0x73, 0xcc, 0xe1, 0x64, - 0x31, 0x61, 0x55, 0x8a, 0x35, 0x0c, 0xb4, 0x0c, 0x23, 0xd1, 0x7e, 0xd4, 0x8c, 0x45, 0x44, 0xa6, - 0x9c, 0xcc, 0x9d, 0x0d, 0x86, 0xa2, 0x65, 0x93, 0xe0, 0x55, 0xb0, 0xac, 0x6b, 0x7f, 0x0f, 0xbb, - 0x0c, 0x59, 0x76, 0xb0, 0xb8, 0x13, 0x12, 0xb4, 0x0b, 0x13, 0x6d, 0x36, 0xe5, 0x22, 0x76, 0xb5, - 0x98, 0xb7, 0x17, 0x07, 0x94, 0x6a, 0xef, 0xd1, 0x83, 0x46, 0x69, 0x9d, 0x98, 0xb8, 0x50, 0xd7, - 0xc9, 0x61, 0x93, 0xba, 0xfd, 0x35, 0xc4, 0xce, 0xdc, 0x06, 0x17, 0x55, 0x47, 0x84, 0x69, 0xb1, - 0xe0, 0xcb, 0xe7, 0xf2, 0x75, 0x26, 0xc9, 0x17, 0x09, 0xf3, 0x64, 0x2c, 0xeb, 0xa2, 0x37, 0xd9, - 0x2b, 0x35, 0x3f, 0xe8, 0xfa, 0x25, 0x69, 0xe6, 0x58, 0xc6, 0x83, 0xb4, 0xa8, 0x88, 0x35, 0x22, - 0xe8, 0x16, 0x4c, 0x88, 0x64, 0x52, 0x42, 0x29, 0x56, 0x34, 0x94, 0x1e, 0x13, 0x58, 0x07, 0x1e, - 0xa5, 0x0b, 0xb0, 0x59, 0x19, 0x6d, 0xc1, 0x05, 0x2d, 0xb3, 0xe2, 0xf5, 0xd0, 0x61, 0x2f, 0x97, - 0x2e, 0xdb, 0x44, 0xda, 0xb9, 0xf9, 0xc4, 0xe1, 0x41, 0xe5, 0xc2, 0x7a, 0x2f, 0x44, 0xdc, 0x9b, - 0x0e, 0xba, 0x0d, 0xe7, 0xb8, 0x07, 0x5f, 0x95, 0x38, 0x2d, 0xcf, 0xf5, 0xd5, 0xc1, 0xcc, 0xd7, - 0xe1, 0xf9, 0xc3, 0x83, 0xca, 0xb9, 0x85, 0x2c, 0x04, 0x9c, 0x5d, 0x0f, 0xbd, 0x06, 0xe5, 0x96, - 0x1f, 0x89, 0x31, 0x18, 0x36, 0x92, 0x86, 0x96, 0xab, 0x6b, 0x0d, 0xf5, 0xfd, 0xc9, 0x1f, 0x9c, - 0x54, 0x40, 0x5b, 0x5c, 0x31, 0xa6, 0xe4, 0xd0, 0x91, 0xfc, 0x04, 0xf1, 0x62, 0x49, 0x18, 0x3e, - 0x3c, 0x5c, 0x23, 0xac, 0x6c, 0x60, 0x0d, 0xf7, 0x1e, 0x83, 0x30, 0x7a, 0x03, 0x10, 0x65, 0xd4, - 0xdc, 0x26, 0x59, 0x68, 0xb2, 0x10, 0xe2, 0x4c, 0x8f, 0x38, 0x6a, 0xf8, 0x4c, 0xa0, 0x46, 0x17, - 0x06, 0xce, 0xa8, 0x85, 0x6e, 0xd0, 0x83, 0x4c, 0x2f, 0x15, 0xb6, 0xbc, 0x92, 0xb9, 0x9f, 0xad, - 0x92, 0x76, 0x48, 0x9a, 0x4e, 0x4c, 0x5a, 0x26, 0x45, 0x9c, 0xaa, 0x47, 0xef, 0x52, 0x95, 0x4d, - 0x08, 0xcc, 0xb0, 0x19, 0xdd, 0x19, 0x85, 0xa8, 0x5c, 0xbc, 0x1d, 0x44, 0xf1, 0x1a, 0x89, 0xef, - 0x05, 0xe1, 0x8e, 0x88, 0x52, 0x96, 0x04, 0xcc, 0x4c, 0x40, 0x58, 0xc7, 0xa3, 0x7c, 0x30, 0x7b, - 0x26, 0xae, 0x55, 0xd9, 0x0b, 0xdd, 0x68, 0xb2, 0x4f, 0x6e, 0xf0, 0x62, 0x2c, 0xe1, 0x12, 0xb5, - 0x56, 0x5f, 0x62, 0xaf, 0x6d, 0x29, 0xd4, 0x5a, 0x7d, 0x09, 0x4b, 0x38, 0x22, 0xdd, 0x09, 0x59, - 0x27, 0xf3, 0xb5, 0x9a, 0xdd, 0xd7, 0xc1, 0x80, 0x39, 0x59, 0x7d, 0x98, 0x56, 0xa9, 0x60, 0x79, - 0xf8, 0xb6, 0x68, 0x76, 0x8a, 0x2d, 0x92, 0xc1, 0x63, 0xbf, 0x29, 0x3d, 0x71, 0x2d, 0x45, 0x09, - 0x77, 0xd1, 0x36, 0x02, 0x99, 0x4c, 0xf7, 0xcd, 0x06, 0x75, 0x15, 0xca, 0x51, 0x67, 0xa3, 0x15, - 0xec, 0x3a, 0xae, 0xcf, 0x1e, 0xc7, 0x34, 0x26, 0xab, 0x21, 0x01, 0x38, 0xc1, 0x41, 0x2b, 0x30, - 0xea, 0x48, 0x25, 0x30, 0xca, 0x8f, 0x5a, 0xa0, 0x54, 0xbf, 0xdc, 0x91, 0x57, 0xaa, 0x7d, 0x55, - 0x5d, 0xf4, 0x2a, 0x4c, 0x08, 0xbf, 0x2d, 0x1e, 0xcb, 0x81, 0x3d, 0x5e, 0x69, 0x86, 0xf9, 0x0d, - 0x1d, 0x88, 0x4d, 0x5c, 0xf4, 0x05, 0x98, 0xa4, 0x54, 0x92, 0x83, 0x6d, 0xf6, 0xec, 0x20, 0x27, - 0xa2, 0x96, 0xe5, 0x43, 0xaf, 0x8c, 0x53, 0xc4, 0x50, 0x0b, 0x1e, 0x77, 0x3a, 0x71, 0xc0, 0x14, - 0xe9, 0xe6, 0xfa, 0x5f, 0x0f, 0x76, 0x88, 0xcf, 0xde, 0xb0, 0x46, 0x17, 0x2f, 0x1d, 0x1e, 0x54, - 0x1e, 0x5f, 0xe8, 0x81, 0x87, 0x7b, 0x52, 0x41, 0x77, 0x60, 0x2c, 0x0e, 0x3c, 0x66, 0x22, 0x4f, - 0x59, 0x89, 0x47, 0xf2, 0x03, 0x01, 0xad, 0x2b, 0x34, 0x5d, 0x89, 0xa4, 0xaa, 0x62, 0x9d, 0x0e, - 0x5a, 0xe7, 0x7b, 0x8c, 0x85, 0x48, 0x25, 0xd1, 0xec, 0xa3, 0xf9, 0x03, 0xa3, 0x22, 0xa9, 0x9a, - 0x5b, 0x50, 0xd4, 0xc4, 0x3a, 0x19, 0x74, 0x1d, 0x66, 0xda, 0xa1, 0x1b, 0xb0, 0x85, 0xad, 0x1e, - 0x31, 0x66, 0xcd, 0xc4, 0x0e, 0xf5, 0x34, 0x02, 0xee, 0xae, 0x43, 0x85, 0x4c, 0x59, 0x38, 0x7b, - 0x9e, 0x67, 0x09, 0xe3, 0x8c, 0x37, 0x2f, 0xc3, 0x0a, 0x8a, 0x56, 0xd9, 0xb9, 0xcc, 0xc5, 0xc1, - 0xd9, 0xb9, 0xfc, 0x68, 0x0f, 0xba, 0xd8, 0xc8, 0xf9, 0x25, 0xf5, 0x17, 0x27, 0x14, 0xe8, 0xbd, - 0x11, 0x6d, 0x3b, 0x21, 0xa9, 0x87, 0x41, 0x93, 0x44, 0x5a, 0x54, 0xe6, 0xc7, 0x78, 0x24, 0x47, - 0x7a, 0x6f, 0x34, 0xb2, 0x10, 0x70, 0x76, 0x3d, 0xd4, 0xd2, 0x92, 0x63, 0x53, 0x36, 0x34, 0x9a, - 0x7d, 0xbc, 0x87, 0xc1, 0x51, 0x8a, 0x67, 0x4d, 0xd6, 0xa2, 0x51, 0x1c, 0xe1, 0x14, 0x4d, 0xf4, - 0x1d, 0x30, 0x2d, 0x02, 0x1f, 0x25, 0xe3, 0x7e, 0x21, 0xb1, 0x64, 0xc4, 0x29, 0x18, 0xee, 0xc2, - 0xe6, 0xb1, 0xa8, 0x9d, 0x0d, 0x8f, 0x88, 0x45, 0x78, 0xcb, 0xf5, 0x77, 0xa2, 0xd9, 0x8b, 0xec, - 0xab, 0x45, 0x2c, 0xea, 0x34, 0x14, 0x67, 0xd4, 0x98, 0xfb, 0x76, 0x98, 0xe9, 0xba, 0xb9, 0x8e, - 0x15, 0xbf, 0xfd, 0x4f, 0x86, 0xa0, 0xac, 0x94, 0xf2, 0xe8, 0xaa, 0xf9, 0xd6, 0x72, 0x3e, 0xfd, - 0xd6, 0x32, 0x4a, 0x65, 0x03, 0xfd, 0x79, 0x65, 0xdd, 0x30, 0xd4, 0x2b, 0xe4, 0x67, 0x4b, 0xd3, - 0xb9, 0xfb, 0xbe, 0x4e, 0x7f, 0x9a, 0x8e, 0xa5, 0x38, 0xf0, 0xa3, 0x4d, 0xa9, 0xa7, 0xda, 0x66, - 0xc0, 0x64, 0xc5, 0xe8, 0x49, 0x2a, 0x20, 0xb5, 0x6a, 0xf5, 0x74, 0xf6, 0xce, 0x3a, 0x2d, 0xc4, - 0x1c, 0xc6, 0x04, 0x49, 0xca, 0x66, 0x31, 0x41, 0x72, 0xe4, 0x01, 0x05, 0x49, 0x49, 0x00, 0x27, - 0xb4, 0x90, 0x07, 0x33, 0x4d, 0x33, 0xf1, 0xaa, 0x72, 0xf4, 0x7b, 0xb2, 0x6f, 0x0a, 0xd4, 0x8e, - 0x96, 0xe5, 0x6e, 0x29, 0x4d, 0x05, 0x77, 0x13, 0x46, 0xaf, 0xc2, 0xe8, 0x7b, 0x41, 0xc4, 0x16, - 0xa5, 0xe0, 0x35, 0xa4, 0x43, 0xd4, 0xe8, 0x9b, 0xb7, 0x1b, 0xac, 0xfc, 0xe8, 0xa0, 0x32, 0x56, - 0x0f, 0x5a, 0xf2, 0x2f, 0x56, 0x15, 0xd0, 0x7d, 0x38, 0x67, 0x9c, 0xd0, 0xaa, 0xbb, 0x30, 0x78, - 0x77, 0x2f, 0x88, 0xe6, 0xce, 0xd5, 0xb2, 0x28, 0xe1, 0xec, 0x06, 0xe8, 0xb1, 0xe7, 0x07, 0x22, - 0x69, 0xb1, 0xe4, 0x67, 0x18, 0xdb, 0x52, 0xd6, 0xdd, 0xe1, 0x53, 0x08, 0xb8, 0xbb, 0x8e, 0xfd, - 0xcb, 0xfc, 0x0d, 0x43, 0x68, 0x3a, 0x49, 0xd4, 0xf1, 0x4e, 0x23, 0x27, 0xd6, 0xb2, 0xa1, 0x84, - 0x7d, 0xe0, 0x77, 0xb2, 0x5f, 0xb7, 0xd8, 0x3b, 0xd9, 0x3a, 0xd9, 0x6d, 0x7b, 0x54, 0xde, 0x7e, - 0xf8, 0x1d, 0x7f, 0x13, 0x46, 0x63, 0xd1, 0x5a, 0xaf, 0x34, 0x5e, 0x5a, 0xa7, 0xd8, 0x5b, 0xa1, - 0xe2, 0x74, 0x64, 0x29, 0x56, 0x64, 0xec, 0x7f, 0xc1, 0x67, 0x40, 0x42, 0x4e, 0x41, 0x21, 0x56, - 0x35, 0x15, 0x62, 0x95, 0x3e, 0x5f, 0x90, 0xa3, 0x18, 0xfb, 0xe7, 0x66, 0xbf, 0x99, 0x50, 0xf9, - 0x51, 0x7f, 0xa0, 0xb5, 0x7f, 0xd8, 0x82, 0xb3, 0x59, 0x16, 0x4d, 0x94, 0x3b, 0xe5, 0x22, 0xad, - 0x7a, 0xb0, 0x56, 0x23, 0x78, 0x57, 0x94, 0x63, 0x85, 0x31, 0x70, 0x86, 0x8c, 0xe3, 0x45, 0x8c, - 0xbb, 0x0d, 0x13, 0xf5, 0x90, 0x68, 0x77, 0xc0, 0xeb, 0xdc, 0xb3, 0x8e, 0xf7, 0xe7, 0xd9, 0x63, - 0x7b, 0xd5, 0xd9, 0x3f, 0x53, 0x80, 0xb3, 0xfc, 0xc5, 0x69, 0x61, 0x2f, 0x70, 0x5b, 0xf5, 0xa0, - 0x25, 0xb2, 0x9b, 0xbc, 0x0d, 0xe3, 0x6d, 0x4d, 0x0f, 0xd1, 0x2b, 0x66, 0x95, 0xae, 0xaf, 0x48, - 0xe4, 0x41, 0xbd, 0x14, 0x1b, 0xb4, 0x50, 0x0b, 0xc6, 0xc9, 0x9e, 0xdb, 0x54, 0xcf, 0x16, 0x85, - 0x63, 0xdf, 0x0d, 0xaa, 0x95, 0x65, 0x8d, 0x0e, 0x36, 0xa8, 0x3e, 0x84, 0x84, 0x77, 0xf6, 0x8f, - 0x58, 0xf0, 0x68, 0x4e, 0x84, 0x2b, 0xda, 0xdc, 0x3d, 0xf6, 0xb6, 0x27, 0x72, 0x67, 0xa9, 0xe6, - 0xf8, 0x8b, 0x1f, 0x16, 0x50, 0xf4, 0x39, 0x00, 0xfe, 0x62, 0x47, 0xc5, 0xa3, 0x7e, 0xa1, 0x80, - 0x8c, 0x28, 0x26, 0x5a, 0xf4, 0x09, 0x59, 0x1f, 0x6b, 0xb4, 0xec, 0x9f, 0x2c, 0xc2, 0x10, 0x7b, - 0x21, 0x42, 0x2b, 0x30, 0xb2, 0xcd, 0x63, 0x3e, 0x0f, 0x12, 0x5e, 0x3a, 0x91, 0x33, 0x79, 0x01, - 0x96, 0x95, 0xd1, 0x2a, 0x9c, 0xe1, 0x31, 0xb3, 0xbd, 0x2a, 0xf1, 0x9c, 0x7d, 0xa9, 0xae, 0xe0, - 0xf9, 0xa6, 0x54, 0x24, 0x8d, 0x5a, 0x37, 0x0a, 0xce, 0xaa, 0x87, 0x5e, 0x87, 0x49, 0xca, 0xdf, - 0x05, 0x9d, 0x58, 0x52, 0xe2, 0xd1, 0xb2, 0x15, 0x43, 0xb9, 0x6e, 0x40, 0x71, 0x0a, 0x9b, 0x0a, - 0x5e, 0xed, 0x2e, 0xc5, 0xcc, 0x50, 0x22, 0x78, 0x99, 0xca, 0x18, 0x13, 0x97, 0x99, 0x32, 0x75, - 0x98, 0xe1, 0xd6, 0xfa, 0x76, 0x48, 0xa2, 0xed, 0xc0, 0x6b, 0x89, 0x74, 0xe5, 0x89, 0x29, 0x53, - 0x0a, 0x8e, 0xbb, 0x6a, 0x50, 0x2a, 0x9b, 0x8e, 0xeb, 0x75, 0x42, 0x92, 0x50, 0x19, 0x36, 0xa9, - 0xac, 0xa4, 0xe0, 0xb8, 0xab, 0x06, 0x5d, 0x47, 0xe7, 0x44, 0xfe, 0x70, 0xe9, 0xdf, 0xaf, 0xec, - 0xd3, 0x46, 0xa4, 0xa7, 0x53, 0x8f, 0x00, 0x37, 0xc2, 0x82, 0x47, 0x65, 0x20, 0xd7, 0xf4, 0x89, - 0xc2, 0xc7, 0x49, 0x52, 0x79, 0x90, 0x2c, 0xd6, 0xbf, 0x6b, 0xc1, 0x99, 0x0c, 0x3b, 0x58, 0x7e, - 0x54, 0x6d, 0xb9, 0x51, 0xac, 0x72, 0xea, 0x68, 0x47, 0x15, 0x2f, 0xc7, 0x0a, 0x83, 0xee, 0x07, - 0x7e, 0x18, 0xa6, 0x0f, 0x40, 0x61, 0x67, 0x26, 0xa0, 0xc7, 0xcc, 0x4e, 0x73, 0x09, 0x4a, 0x9d, - 0x88, 0xc8, 0xd0, 0x54, 0xea, 0xfc, 0x66, 0x1a, 0x66, 0x06, 0xa1, 0xac, 0xe9, 0x96, 0x52, 0xee, - 0x6a, 0xac, 0x29, 0xd7, 0xd8, 0x72, 0x98, 0xfd, 0xd5, 0x22, 0x9c, 0xcf, 0xb5, 0x78, 0xa7, 0x5d, - 0xda, 0x0d, 0x7c, 0x37, 0x0e, 0xd4, 0xeb, 0x23, 0x0f, 0x8e, 0x42, 0xda, 0xdb, 0xab, 0xa2, 0x1c, - 0x2b, 0x0c, 0x74, 0x59, 0x66, 0xb2, 0x4f, 0x67, 0x0d, 0x5a, 0xac, 0x1a, 0xc9, 0xec, 0x07, 0xcd, - 0xc8, 0xf6, 0x24, 0x94, 0xda, 0x41, 0xe0, 0xa5, 0x0f, 0x23, 0xda, 0xdd, 0x20, 0xf0, 0x30, 0x03, - 0xa2, 0x4f, 0x88, 0x71, 0x48, 0x3d, 0xb7, 0x61, 0xa7, 0x15, 0x44, 0xda, 0x60, 0x3c, 0x0d, 0x23, - 0x3b, 0x64, 0x3f, 0x74, 0xfd, 0xad, 0xf4, 0x33, 0xec, 0x4d, 0x5e, 0x8c, 0x25, 0xdc, 0xcc, 0x21, - 0x31, 0x72, 0xd2, 0xa9, 0xd4, 0x46, 0xfb, 0x5e, 0x6d, 0x3f, 0x50, 0x84, 0x29, 0xbc, 0x58, 0xfd, - 0xd6, 0x44, 0xdc, 0xe9, 0x9e, 0x88, 0x93, 0x4e, 0xa5, 0xd6, 0x7f, 0x36, 0x7e, 0xc1, 0x82, 0x29, - 0x16, 0x67, 0x59, 0x84, 0xe4, 0x70, 0x03, 0xff, 0x14, 0x58, 0xb7, 0x27, 0x61, 0x28, 0xa4, 0x8d, - 0xa6, 0xd3, 0x05, 0xb1, 0x9e, 0x60, 0x0e, 0x43, 0x8f, 0x43, 0x89, 0x75, 0x81, 0x4e, 0xde, 0x38, - 0xcf, 0xb4, 0x50, 0x75, 0x62, 0x07, 0xb3, 0x52, 0xe6, 0x67, 0x8e, 0x49, 0xdb, 0x73, 0x79, 0xa7, - 0x93, 0xa7, 0x8d, 0x8f, 0x86, 0x9f, 0x79, 0x66, 0xd7, 0x3e, 0x98, 0x9f, 0x79, 0x36, 0xc9, 0xde, - 0x62, 0xd1, 0x1f, 0x16, 0xe0, 0x62, 0x66, 0xbd, 0x81, 0xfd, 0xcc, 0x7b, 0xd7, 0x3e, 0x19, 0x6b, - 0x9a, 0x6c, 0x23, 0x97, 0xe2, 0x29, 0x1a, 0xb9, 0x94, 0x06, 0xe5, 0x1c, 0x87, 0x06, 0x70, 0xff, - 0xce, 0x1c, 0xb2, 0x8f, 0x88, 0xfb, 0x77, 0x66, 0xdf, 0x72, 0xc4, 0xba, 0x3f, 0x2b, 0xe4, 0x7c, - 0x0b, 0x13, 0xf0, 0xae, 0xd0, 0x73, 0x86, 0x01, 0x23, 0xc1, 0x09, 0x8f, 0xf3, 0x33, 0x86, 0x97, - 0x61, 0x05, 0x45, 0xae, 0xe6, 0x48, 0x5d, 0xc8, 0xcf, 0x9e, 0x99, 0xdb, 0xd4, 0xbc, 0xf9, 0x12, - 0xa5, 0x86, 0x20, 0xc3, 0xa9, 0x7a, 0x55, 0x13, 0xca, 0x8b, 0x83, 0x0b, 0xe5, 0xe3, 0xd9, 0x02, - 0x39, 0x5a, 0x80, 0xa9, 0x5d, 0xd7, 0xa7, 0xc7, 0xe6, 0xbe, 0xc9, 0x8a, 0xaa, 0xb8, 0x22, 0xab, - 0x26, 0x18, 0xa7, 0xf1, 0xe7, 0x5e, 0x85, 0x89, 0x07, 0x57, 0x47, 0x7e, 0xb3, 0x08, 0x8f, 0xf5, - 0xd8, 0xf6, 0xfc, 0xac, 0x37, 0xe6, 0x40, 0x3b, 0xeb, 0xbb, 0xe6, 0xa1, 0x0e, 0x67, 0x37, 0x3b, - 0x9e, 0xb7, 0xcf, 0xec, 0x48, 0x49, 0x4b, 0x62, 0x08, 0x5e, 0xf1, 0x71, 0x99, 0xdb, 0x62, 0x25, - 0x03, 0x07, 0x67, 0xd6, 0x44, 0x6f, 0x00, 0x0a, 0x44, 0xea, 0xde, 0xeb, 0xc4, 0x17, 0xfa, 0x7d, - 0x36, 0xf0, 0xc5, 0x64, 0x33, 0xde, 0xee, 0xc2, 0xc0, 0x19, 0xb5, 0x28, 0xd3, 0x4f, 0x6f, 0xa5, - 0x7d, 0xd5, 0xad, 0x14, 0xd3, 0x8f, 0x75, 0x20, 0x36, 0x71, 0xd1, 0x75, 0x98, 0x71, 0xf6, 0x1c, - 0x97, 0xc7, 0xdb, 0x93, 0x04, 0x38, 0xd7, 0xaf, 0x94, 0x60, 0x0b, 0x69, 0x04, 0xdc, 0x5d, 0x27, - 0xe5, 0x6a, 0x3d, 0x9c, 0xef, 0x6a, 0xdd, 0xfb, 0x5c, 0xec, 0xa7, 0xd3, 0xb5, 0xff, 0xb3, 0x45, - 0xaf, 0xaf, 0x8c, 0xf4, 0xfb, 0x74, 0x1c, 0x94, 0x6e, 0x52, 0xf3, 0x7a, 0x3e, 0xa7, 0x59, 0x8a, - 0x24, 0x40, 0x6c, 0xe2, 0xf2, 0x05, 0x11, 0x25, 0xce, 0x36, 0x06, 0xeb, 0x2e, 0xa2, 0x26, 0x28, - 0x0c, 0xf4, 0x79, 0x18, 0x69, 0xb9, 0x7b, 0x6e, 0x14, 0x84, 0x62, 0xb3, 0x1c, 0xd3, 0x65, 0x21, - 0x39, 0x07, 0xab, 0x9c, 0x0c, 0x96, 0xf4, 0xec, 0x1f, 0x28, 0xc0, 0x84, 0x6c, 0xf1, 0xcd, 0x4e, - 0x10, 0x3b, 0xa7, 0x70, 0x2d, 0x5f, 0x37, 0xae, 0xe5, 0x4f, 0xf4, 0x0a, 0x1d, 0xc1, 0xba, 0x94, - 0x7b, 0x1d, 0xdf, 0x4e, 0x5d, 0xc7, 0x4f, 0xf5, 0x27, 0xd5, 0xfb, 0x1a, 0xfe, 0x97, 0x16, 0xcc, - 0x18, 0xf8, 0xa7, 0x70, 0x1b, 0xac, 0x98, 0xb7, 0xc1, 0x13, 0x7d, 0xbf, 0x21, 0xe7, 0x16, 0xf8, - 0xbe, 0x62, 0xaa, 0xef, 0xec, 0xf4, 0x7f, 0x0f, 0x4a, 0xdb, 0x4e, 0xd8, 0xea, 0x15, 0xa2, 0xb6, - 0xab, 0xd2, 0xfc, 0x0d, 0x27, 0x6c, 0xf1, 0x33, 0xfc, 0x59, 0x95, 0xff, 0xd2, 0x09, 0x5b, 0x7d, - 0x7d, 0xcb, 0x58, 0x53, 0xe8, 0x15, 0x18, 0x8e, 0x9a, 0x41, 0x5b, 0x59, 0x7e, 0x5e, 0xe2, 0xb9, - 0x31, 0x69, 0xc9, 0xd1, 0x41, 0x05, 0x99, 0xcd, 0xd1, 0x62, 0x2c, 0xf0, 0xd1, 0xdb, 0x30, 0xc1, - 0x7e, 0x29, 0x0b, 0x88, 0x62, 0x7e, 0x62, 0x84, 0x86, 0x8e, 0xc8, 0x0d, 0x69, 0x8c, 0x22, 0x6c, - 0x92, 0x9a, 0xdb, 0x82, 0xb2, 0xfa, 0xac, 0x87, 0xea, 0x13, 0xf4, 0x1f, 0x8a, 0x70, 0x26, 0x63, - 0xcd, 0xa1, 0xc8, 0x98, 0x89, 0xe7, 0x07, 0x5c, 0xaa, 0x1f, 0x70, 0x2e, 0x22, 0x26, 0x0d, 0xb5, - 0xc4, 0xda, 0x1a, 0xb8, 0xd1, 0x3b, 0x11, 0x49, 0x37, 0x4a, 0x8b, 0xfa, 0x37, 0x4a, 0x1b, 0x3b, - 0xb5, 0xa1, 0xa6, 0x0d, 0xa9, 0x9e, 0x3e, 0xd4, 0x39, 0xfd, 0xe3, 0x22, 0x9c, 0xcd, 0x8a, 0x66, - 0x83, 0xbe, 0x3b, 0x95, 0x24, 0xe7, 0xc5, 0x41, 0xe3, 0xe0, 0xf0, 0xcc, 0x39, 0x22, 0xc7, 0xf5, - 0xbc, 0x99, 0x36, 0xa7, 0xef, 0x30, 0x8b, 0x36, 0x99, 0x23, 0x69, 0xc8, 0x93, 0x1b, 0xc9, 0xe3, - 0xe3, 0xd3, 0x03, 0x77, 0x40, 0x64, 0x45, 0x8a, 0x52, 0x8e, 0xa4, 0xb2, 0xb8, 0xbf, 0x23, 0xa9, - 0x6c, 0x79, 0xce, 0x85, 0x31, 0xed, 0x6b, 0x1e, 0xea, 0x8c, 0xef, 0xd0, 0xdb, 0x4a, 0xeb, 0xf7, - 0x43, 0x9d, 0xf5, 0x1f, 0xb1, 0x20, 0x65, 0x66, 0xa9, 0xd4, 0x5d, 0x56, 0xae, 0xba, 0xeb, 0x12, - 0x94, 0xc2, 0xc0, 0x23, 0xe9, 0x9c, 0x34, 0x38, 0xf0, 0x08, 0x66, 0x10, 0x8a, 0x11, 0x27, 0xca, - 0x8e, 0x71, 0x5d, 0x90, 0x13, 0x22, 0xda, 0x93, 0x30, 0xe4, 0x91, 0x3d, 0xe2, 0xa5, 0x03, 0xbe, - 0xdf, 0xa2, 0x85, 0x98, 0xc3, 0xec, 0x5f, 0x28, 0xc1, 0x85, 0x9e, 0xae, 0xd8, 0x54, 0x1c, 0xda, - 0x72, 0x62, 0x72, 0xcf, 0xd9, 0x4f, 0x47, 0x66, 0xbe, 0xce, 0x8b, 0xb1, 0x84, 0x33, 0xcb, 0x73, - 0x1e, 0x89, 0x31, 0xa5, 0x1c, 0x14, 0x01, 0x18, 0x05, 0xf4, 0x21, 0xe4, 0xf7, 0xbf, 0x06, 0x10, - 0x45, 0x1e, 0xb7, 0x1b, 0x68, 0x09, 0x93, 0xf6, 0x24, 0x62, 0x67, 0xe3, 0x96, 0x80, 0x60, 0x0d, - 0x0b, 0x55, 0x61, 0xba, 0x1d, 0x06, 0x31, 0xd7, 0xb5, 0x56, 0xb9, 0xc1, 0xd1, 0x90, 0xe9, 0x05, - 0x5b, 0x4f, 0xc1, 0x71, 0x57, 0x0d, 0xf4, 0x12, 0x8c, 0x09, 0xcf, 0xd8, 0x7a, 0x10, 0x78, 0x42, - 0x0d, 0xa4, 0xcc, 0x57, 0x1a, 0x09, 0x08, 0xeb, 0x78, 0x5a, 0x35, 0xa6, 0xc0, 0x1d, 0xc9, 0xac, - 0xc6, 0x95, 0xb8, 0x1a, 0x5e, 0x2a, 0xb2, 0xd5, 0xe8, 0x40, 0x91, 0xad, 0x12, 0xc5, 0x58, 0x79, - 0xe0, 0x37, 0x2b, 0xe8, 0xab, 0x4a, 0xfa, 0xd9, 0x12, 0x9c, 0x11, 0x0b, 0xe7, 0x61, 0x2f, 0x97, - 0x3b, 0xdd, 0xcb, 0xe5, 0x24, 0x54, 0x67, 0xdf, 0x5a, 0x33, 0xa7, 0xbd, 0x66, 0x7e, 0xd0, 0x02, - 0x93, 0xbd, 0x42, 0x7f, 0x29, 0x37, 0xb4, 0xfd, 0x4b, 0xb9, 0xec, 0x5a, 0x4b, 0x5e, 0x20, 0x1f, - 0x30, 0xc8, 0xbd, 0xfd, 0x9f, 0x2c, 0x78, 0xa2, 0x2f, 0x45, 0xb4, 0x0c, 0x65, 0xc6, 0x03, 0x6a, - 0xd2, 0xd9, 0x53, 0xca, 0x20, 0x51, 0x02, 0x72, 0x58, 0xd2, 0xa4, 0x26, 0x5a, 0xee, 0xca, 0x21, - 0xf0, 0x74, 0x46, 0x0e, 0x81, 0x73, 0xc6, 0xf0, 0x3c, 0x60, 0x12, 0x81, 0x5f, 0x2e, 0xc2, 0x30, - 0x5f, 0xf1, 0xa7, 0x20, 0x86, 0xad, 0x08, 0xbd, 0x6d, 0x8f, 0xd8, 0x56, 0xbc, 0x2f, 0xf3, 0x55, - 0x27, 0x76, 0x38, 0x9b, 0xa0, 0x6e, 0xab, 0x44, 0xc3, 0x8b, 0xe6, 0x8d, 0xfb, 0x6c, 0x2e, 0xa5, - 0x98, 0x04, 0x4e, 0x43, 0xbb, 0xdd, 0xbe, 0x08, 0x10, 0xb1, 0xfc, 0xfb, 0x94, 0x86, 0x88, 0x92, - 0xf6, 0xc9, 0x1e, 0xad, 0x37, 0x14, 0x32, 0xef, 0x43, 0xb2, 0xd3, 0x15, 0x00, 0x6b, 0x14, 0xe7, - 0x5e, 0x86, 0xb2, 0x42, 0xee, 0xa7, 0xc5, 0x19, 0xd7, 0x99, 0x8b, 0xcf, 0xc2, 0x54, 0xaa, 0xad, - 0x63, 0x29, 0x81, 0x7e, 0xd1, 0x82, 0x29, 0xde, 0xe5, 0x65, 0x7f, 0x4f, 0x9c, 0xa9, 0xef, 0xc3, - 0x59, 0x2f, 0xe3, 0x6c, 0x13, 0x33, 0x3a, 0xf8, 0x59, 0xa8, 0x94, 0x3e, 0x59, 0x50, 0x9c, 0xd9, - 0x06, 0xba, 0x42, 0xd7, 0x2d, 0x3d, 0xbb, 0x1c, 0x4f, 0x78, 0x31, 0x8d, 0xf3, 0x35, 0xcb, 0xcb, - 0xb0, 0x82, 0xda, 0xbf, 0x6d, 0xc1, 0x0c, 0xef, 0xf9, 0x4d, 0xb2, 0xaf, 0x76, 0xf8, 0x87, 0xd9, - 0x77, 0x91, 0xd6, 0xa3, 0x90, 0x93, 0xd6, 0x43, 0xff, 0xb4, 0x62, 0xcf, 0x4f, 0xfb, 0x19, 0x0b, - 0xc4, 0x0a, 0x3c, 0x05, 0x51, 0xfe, 0xdb, 0x4d, 0x51, 0x7e, 0x2e, 0x7f, 0x51, 0xe7, 0xc8, 0xf0, - 0x7f, 0x6a, 0xc1, 0x34, 0x47, 0x48, 0xde, 0x92, 0x3f, 0xd4, 0x79, 0x18, 0x24, 0x3f, 0x9f, 0x4a, - 0xda, 0x9d, 0xfd, 0x51, 0xc6, 0x64, 0x95, 0x7a, 0x4e, 0x56, 0x4b, 0x6e, 0xa0, 0x63, 0xe4, 0xa6, - 0x3c, 0x76, 0x78, 0x6c, 0xfb, 0x0f, 0x2c, 0x40, 0xbc, 0x19, 0x83, 0xfd, 0xa1, 0x4c, 0x05, 0x2b, - 0xd5, 0xae, 0x8b, 0xe4, 0xa8, 0x51, 0x10, 0xac, 0x61, 0x9d, 0xc8, 0xf0, 0xa4, 0x0c, 0x02, 0x8a, - 0xfd, 0x0d, 0x02, 0x8e, 0x31, 0xa2, 0xff, 0xa7, 0x04, 0x69, 0xb7, 0x02, 0x74, 0x17, 0xc6, 0x9b, - 0x4e, 0xdb, 0xd9, 0x70, 0x3d, 0x37, 0x76, 0x49, 0xd4, 0xcb, 0x92, 0x68, 0x49, 0xc3, 0x13, 0x4f, - 0xbd, 0x5a, 0x09, 0x36, 0xe8, 0xa0, 0x79, 0x80, 0x76, 0xe8, 0xee, 0xb9, 0x1e, 0xd9, 0x62, 0x1a, - 0x07, 0xe6, 0x37, 0xc9, 0xcd, 0x63, 0x64, 0x29, 0xd6, 0x30, 0x32, 0x5c, 0xe0, 0x8a, 0x0f, 0xcf, - 0x05, 0xae, 0x74, 0x4c, 0x17, 0xb8, 0xa1, 0x81, 0x5c, 0xe0, 0x30, 0x3c, 0x22, 0x59, 0x24, 0xfa, - 0x7f, 0xc5, 0xf5, 0x88, 0xe0, 0x8b, 0xb9, 0x37, 0xe5, 0xdc, 0xe1, 0x41, 0xe5, 0x11, 0x9c, 0x89, - 0x81, 0x73, 0x6a, 0xa2, 0xcf, 0xc1, 0xac, 0xe3, 0x79, 0xc1, 0x3d, 0x35, 0x6a, 0xcb, 0x51, 0xd3, - 0xf1, 0xb8, 0xc6, 0x7e, 0x84, 0x51, 0x7d, 0xfc, 0xf0, 0xa0, 0x32, 0xbb, 0x90, 0x83, 0x83, 0x73, - 0x6b, 0xa7, 0x3c, 0xe8, 0x46, 0xfb, 0x7a, 0xd0, 0xbd, 0x06, 0xe5, 0x76, 0x18, 0x34, 0x57, 0x35, - 0xaf, 0x9e, 0x8b, 0x2c, 0xf3, 0xbd, 0x2c, 0x3c, 0x3a, 0xa8, 0x4c, 0xa8, 0x3f, 0xec, 0x86, 0x4f, - 0x2a, 0xd8, 0x3b, 0x70, 0xa6, 0x41, 0x42, 0x97, 0xe5, 0xd4, 0x6c, 0x25, 0x1b, 0x7a, 0x1d, 0xca, - 0x61, 0xea, 0x08, 0x1b, 0x28, 0x40, 0x93, 0x16, 0x37, 0x58, 0x1e, 0x59, 0x09, 0x21, 0xfb, 0x4f, - 0x2c, 0x18, 0x11, 0x16, 0xe6, 0xa7, 0xc0, 0x39, 0x2d, 0x18, 0x0a, 0xec, 0x4a, 0xf6, 0x31, 0xcf, - 0x3a, 0x93, 0xab, 0xba, 0xae, 0xa5, 0x54, 0xd7, 0x4f, 0xf4, 0x22, 0xd2, 0x5b, 0x69, 0xfd, 0x77, - 0x8b, 0x30, 0x69, 0x3a, 0x85, 0x9c, 0xc2, 0x10, 0xac, 0xc1, 0x48, 0x24, 0x3c, 0x90, 0x0a, 0xf9, - 0x96, 0xd3, 0xe9, 0x49, 0x4c, 0xcc, 0xa2, 0x84, 0xcf, 0x91, 0x24, 0x92, 0xe9, 0xda, 0x54, 0x7c, - 0x88, 0xae, 0x4d, 0xfd, 0xfc, 0x72, 0x4a, 0x27, 0xe1, 0x97, 0x63, 0x7f, 0x9d, 0x5d, 0x35, 0x7a, - 0xf9, 0x29, 0x70, 0x21, 0xd7, 0xcd, 0x4b, 0xc9, 0xee, 0xb1, 0xb2, 0x44, 0xa7, 0x72, 0xb8, 0x91, - 0x9f, 0xb7, 0xe0, 0x42, 0xc6, 0x57, 0x69, 0xac, 0xc9, 0xb3, 0x30, 0xea, 0x74, 0x5a, 0xae, 0xda, - 0xcb, 0xda, 0x33, 0xd6, 0x82, 0x28, 0xc7, 0x0a, 0x03, 0x2d, 0xc1, 0x0c, 0xb9, 0xdf, 0x76, 0xf9, - 0x3b, 0xa2, 0x6e, 0xbb, 0x58, 0xe4, 0x41, 0x6b, 0x97, 0xd3, 0x40, 0xdc, 0x8d, 0xaf, 0xdc, 0xba, - 0x8b, 0xb9, 0x6e, 0xdd, 0xff, 0xc4, 0x82, 0x31, 0xe5, 0x6d, 0xf2, 0xd0, 0x47, 0xfb, 0x3b, 0xcc, - 0xd1, 0x7e, 0xac, 0xc7, 0x68, 0xe7, 0x0c, 0xf3, 0xdf, 0x2f, 0xa8, 0xfe, 0xd6, 0x83, 0x30, 0x1e, - 0x80, 0xe5, 0x79, 0x05, 0x46, 0xdb, 0x61, 0x10, 0x07, 0xcd, 0xc0, 0x13, 0x1c, 0xcf, 0xe3, 0x49, - 0xd4, 0x01, 0x5e, 0x7e, 0xa4, 0xfd, 0xc6, 0x0a, 0x9b, 0x8d, 0x5e, 0x10, 0xc6, 0x82, 0xcb, 0x48, - 0x46, 0x2f, 0x08, 0x63, 0xcc, 0x20, 0xa8, 0x05, 0x10, 0x3b, 0xe1, 0x16, 0x89, 0x69, 0x99, 0x08, - 0x60, 0x92, 0x7f, 0x78, 0x74, 0x62, 0xd7, 0x9b, 0x77, 0xfd, 0x38, 0x8a, 0xc3, 0xf9, 0x9a, 0x1f, - 0xdf, 0x0e, 0xb9, 0x00, 0xa5, 0x85, 0x11, 0x50, 0xb4, 0xb0, 0x46, 0x57, 0xfa, 0x7a, 0xb2, 0x36, - 0x86, 0xcc, 0x07, 0xf1, 0x35, 0x51, 0x8e, 0x15, 0x86, 0xfd, 0x32, 0xbb, 0x4a, 0xd8, 0x00, 0x1d, - 0xcf, 0xc3, 0xff, 0x1b, 0xa3, 0x6a, 0x68, 0xd9, 0x6b, 0x58, 0x55, 0x8f, 0x23, 0xd0, 0xfb, 0xe4, - 0xa6, 0x0d, 0xeb, 0x7e, 0x34, 0x49, 0xb0, 0x01, 0xf4, 0x9d, 0x5d, 0x76, 0x12, 0xcf, 0xf5, 0xb9, - 0x02, 0x8e, 0x61, 0x19, 0xc1, 0x02, 0x69, 0xb3, 0x30, 0xc3, 0xb5, 0xba, 0x58, 0xe4, 0x5a, 0x20, - 0x6d, 0x01, 0xc0, 0x09, 0x0e, 0xba, 0x2a, 0xc4, 0xef, 0x92, 0x91, 0x4e, 0x4f, 0x8a, 0xdf, 0xf2, - 0xf3, 0x35, 0xf9, 0xfb, 0x79, 0x18, 0x53, 0x69, 0xf5, 0xea, 0x3c, 0x3b, 0x99, 0x08, 0xe7, 0xb2, - 0x9c, 0x14, 0x63, 0x1d, 0x07, 0xad, 0xc3, 0x54, 0xc4, 0x75, 0x2f, 0x2a, 0x6a, 0x1f, 0xd7, 0x61, - 0x7d, 0x52, 0xda, 0x57, 0x34, 0x4c, 0xf0, 0x11, 0x2b, 0xe2, 0x47, 0x87, 0x74, 0xd8, 0x4c, 0x93, - 0x40, 0xaf, 0xc3, 0xa4, 0xa7, 0x27, 0xb0, 0xaf, 0x0b, 0x15, 0x97, 0x32, 0x3f, 0x36, 0xd2, 0xdb, - 0xd7, 0x71, 0x0a, 0x9b, 0x72, 0x4a, 0x7a, 0x89, 0x88, 0x34, 0xe9, 0xf8, 0x5b, 0x24, 0x12, 0x49, - 0xc1, 0x18, 0xa7, 0x74, 0x2b, 0x07, 0x07, 0xe7, 0xd6, 0x46, 0xaf, 0xc0, 0xb8, 0xfc, 0x7c, 0xcd, - 0x1d, 0x39, 0x31, 0x72, 0xd7, 0x60, 0xd8, 0xc0, 0x44, 0xf7, 0xe0, 0x9c, 0xfc, 0xbf, 0x1e, 0x3a, - 0x9b, 0x9b, 0x6e, 0x53, 0x78, 0x83, 0x73, 0x4f, 0x9f, 0x05, 0xe9, 0x3a, 0xb4, 0x9c, 0x85, 0x74, - 0x74, 0x50, 0xb9, 0x24, 0x46, 0x2d, 0x13, 0xce, 0x26, 0x31, 0x9b, 0x3e, 0x5a, 0x85, 0x33, 0xdb, - 0xc4, 0xf1, 0xe2, 0xed, 0xa5, 0x6d, 0xd2, 0xdc, 0x91, 0x9b, 0x88, 0x39, 0x39, 0x6b, 0xa6, 0xe1, - 0x37, 0xba, 0x51, 0x70, 0x56, 0x3d, 0xf4, 0x0e, 0xcc, 0xb6, 0x3b, 0x1b, 0x9e, 0x1b, 0x6d, 0xaf, - 0x05, 0x31, 0x33, 0xe9, 0x50, 0x59, 0xe9, 0x84, 0x37, 0xb4, 0x72, 0xf0, 0xae, 0xe7, 0xe0, 0xe1, - 0x5c, 0x0a, 0xe8, 0x7d, 0x38, 0x97, 0x5a, 0x0c, 0xc2, 0x37, 0x73, 0x32, 0x3f, 0x6e, 0x6f, 0x23, - 0xab, 0x82, 0xf0, 0xb5, 0xcc, 0x02, 0xe1, 0xec, 0x26, 0x3e, 0x98, 0xa1, 0xcf, 0x7b, 0xb4, 0xb2, - 0xc6, 0x94, 0xa1, 0x2f, 0xc1, 0xb8, 0xbe, 0x8a, 0xc4, 0x05, 0x73, 0x39, 0x9b, 0x67, 0xd1, 0x56, - 0x1b, 0x67, 0xe9, 0xd4, 0x8a, 0xd2, 0x61, 0xd8, 0xa0, 0x68, 0x13, 0xc8, 0xfe, 0x3e, 0x74, 0x0b, - 0x46, 0x9b, 0x9e, 0x4b, 0xfc, 0xb8, 0x56, 0xef, 0x15, 0x3c, 0x64, 0x49, 0xe0, 0x88, 0x01, 0x13, - 0x81, 0x4e, 0x79, 0x19, 0x56, 0x14, 0xec, 0x5f, 0x2b, 0x40, 0xa5, 0x4f, 0xd4, 0xdc, 0x94, 0x3e, - 0xda, 0x1a, 0x48, 0x1f, 0xbd, 0x20, 0x73, 0xec, 0xad, 0xa5, 0x84, 0xf4, 0x54, 0xfe, 0xbc, 0x44, - 0x54, 0x4f, 0xe3, 0x0f, 0x6c, 0x1f, 0xac, 0xab, 0xb4, 0x4b, 0x7d, 0x2d, 0xd7, 0x8d, 0xa7, 0xac, - 0xa1, 0xc1, 0x05, 0x91, 0xdc, 0x67, 0x09, 0xfb, 0xeb, 0x05, 0x38, 0xa7, 0x86, 0xf0, 0x2f, 0xee, - 0xc0, 0xdd, 0xe9, 0x1e, 0xb8, 0x13, 0x78, 0xd4, 0xb1, 0x6f, 0xc3, 0x30, 0x0f, 0xbe, 0x32, 0x00, - 0x03, 0xf4, 0xa4, 0x19, 0xa9, 0x4b, 0x5d, 0xd3, 0x46, 0xb4, 0xae, 0xbf, 0x66, 0xc1, 0xd4, 0xfa, - 0x52, 0xbd, 0x11, 0x34, 0x77, 0x48, 0xbc, 0xc0, 0x19, 0x56, 0x2c, 0xf8, 0x1f, 0xeb, 0x01, 0xf9, - 0x9a, 0x2c, 0x8e, 0xe9, 0x12, 0x94, 0xb6, 0x83, 0x28, 0x4e, 0xbf, 0xf8, 0xde, 0x08, 0xa2, 0x18, - 0x33, 0x88, 0xfd, 0x3b, 0x16, 0x0c, 0xb1, 0xcc, 0xb0, 0xfd, 0xd2, 0x15, 0x0f, 0xf2, 0x5d, 0xe8, - 0x25, 0x18, 0x26, 0x9b, 0x9b, 0xa4, 0x19, 0x8b, 0x59, 0x95, 0xee, 0xa8, 0xc3, 0xcb, 0xac, 0x94, - 0x5e, 0xfa, 0xac, 0x31, 0xfe, 0x17, 0x0b, 0x64, 0xf4, 0x16, 0x94, 0x63, 0x77, 0x97, 0x2c, 0xb4, - 0x5a, 0xe2, 0xcd, 0xec, 0x01, 0xbc, 0x7f, 0xd7, 0x25, 0x01, 0x9c, 0xd0, 0xb2, 0xbf, 0x5a, 0x00, - 0x48, 0x42, 0x08, 0xf4, 0xfb, 0xc4, 0xc5, 0xae, 0xd7, 0x94, 0xcb, 0x19, 0xaf, 0x29, 0x28, 0x21, - 0x98, 0xf1, 0x94, 0xa2, 0x86, 0xa9, 0x38, 0xd0, 0x30, 0x95, 0x8e, 0x33, 0x4c, 0x4b, 0x30, 0x93, - 0x84, 0x40, 0x30, 0xe3, 0xc1, 0x30, 0x21, 0x65, 0x3d, 0x0d, 0xc4, 0xdd, 0xf8, 0x36, 0x81, 0x4b, - 0x32, 0x32, 0xa7, 0xbc, 0x6b, 0x98, 0x49, 0xe6, 0x31, 0x32, 0x57, 0x27, 0xcf, 0x45, 0x85, 0xdc, - 0xe7, 0xa2, 0x1f, 0xb7, 0xe0, 0x6c, 0xba, 0x1d, 0xe6, 0xfb, 0xf6, 0x15, 0x0b, 0xce, 0xb1, 0x47, - 0x33, 0xd6, 0x6a, 0xf7, 0x13, 0xdd, 0x8b, 0xd9, 0xa1, 0x21, 0x7a, 0xf7, 0x38, 0xf1, 0x7b, 0x5e, - 0xcd, 0x22, 0x8d, 0xb3, 0x5b, 0xb4, 0xbf, 0x62, 0xc1, 0xf9, 0xdc, 0x84, 0x44, 0xe8, 0x0a, 0x8c, - 0x3a, 0x6d, 0x97, 0x6b, 0xa4, 0xc4, 0x7e, 0x67, 0xd2, 0x63, 0xbd, 0xc6, 0xf5, 0x51, 0x0a, 0xaa, - 0x12, 0x25, 0x16, 0x72, 0x13, 0x25, 0xf6, 0xcd, 0x7b, 0x68, 0x7f, 0xbf, 0x05, 0xc2, 0xdd, 0x69, - 0x80, 0x43, 0xe6, 0x6d, 0x99, 0x67, 0xd6, 0x08, 0x8a, 0x7e, 0x29, 0xdf, 0xff, 0x4b, 0x84, 0x42, - 0x57, 0x97, 0xba, 0x11, 0x00, 0xdd, 0xa0, 0x65, 0xb7, 0x40, 0x40, 0xab, 0x84, 0xe9, 0xac, 0xfa, - 0xf7, 0xe6, 0x1a, 0x40, 0x8b, 0xe1, 0x6a, 0xd9, 0x26, 0xd5, 0x15, 0x52, 0x55, 0x10, 0xac, 0x61, - 0xd9, 0x3f, 0x54, 0x80, 0x31, 0x19, 0x84, 0xbb, 0xe3, 0x0f, 0x22, 0x59, 0x1e, 0x2b, 0x2b, 0x0f, - 0x4b, 0xcf, 0x4a, 0x09, 0xd7, 0x13, 0x81, 0x3c, 0x49, 0xcf, 0x2a, 0x01, 0x38, 0xc1, 0x41, 0x4f, - 0xc3, 0x48, 0xd4, 0xd9, 0x60, 0xe8, 0x29, 0x27, 0x9e, 0x06, 0x2f, 0xc6, 0x12, 0x8e, 0x3e, 0x07, - 0xd3, 0xbc, 0x5e, 0x18, 0xb4, 0x9d, 0x2d, 0xae, 0xfe, 0x1c, 0x52, 0x5e, 0xb5, 0xd3, 0xab, 0x29, - 0xd8, 0xd1, 0x41, 0xe5, 0x6c, 0xba, 0x8c, 0x29, 0xce, 0xbb, 0xa8, 0xd8, 0x5f, 0x02, 0xd4, 0x1d, - 0x57, 0x1c, 0xbd, 0xc1, 0x4d, 0xa9, 0xdc, 0x90, 0xb4, 0x7a, 0x69, 0xc4, 0x75, 0x27, 0x50, 0x69, - 0x48, 0xcf, 0x6b, 0x61, 0x55, 0xdf, 0xfe, 0x9b, 0x45, 0x98, 0x4e, 0xbb, 0x04, 0xa2, 0x1b, 0x30, - 0xcc, 0x2f, 0x3b, 0x41, 0xbe, 0xc7, 0x83, 0xab, 0xe6, 0x48, 0xc8, 0xb6, 0xbd, 0xb8, 0x2f, 0x45, - 0x7d, 0xf4, 0x0e, 0x8c, 0xb5, 0x82, 0x7b, 0xfe, 0x3d, 0x27, 0x6c, 0x2d, 0xd4, 0x6b, 0x62, 0x5d, - 0x66, 0xf2, 0xcc, 0xd5, 0x04, 0x4d, 0x77, 0x4e, 0x64, 0x8f, 0x0b, 0x09, 0x08, 0xeb, 0xe4, 0xd0, - 0x3a, 0x8b, 0x95, 0xb8, 0xe9, 0x6e, 0xad, 0x3a, 0xed, 0x5e, 0x76, 0xb5, 0x4b, 0x12, 0x49, 0xa3, - 0x3c, 0x21, 0x02, 0x2a, 0x72, 0x00, 0x4e, 0x08, 0xa1, 0xef, 0x86, 0x33, 0x51, 0x8e, 0x9a, 0x2d, - 0x2f, 0xcd, 0x44, 0x2f, 0xcd, 0xd3, 0xe2, 0xa3, 0x54, 0x9a, 0xc9, 0x52, 0xc8, 0x65, 0x35, 0x63, - 0x7f, 0xf9, 0x0c, 0x18, 0xbb, 0xd1, 0xc8, 0x3a, 0x64, 0x9d, 0x50, 0xd6, 0x21, 0x0c, 0xa3, 0x64, - 0xb7, 0x1d, 0xef, 0x57, 0xdd, 0xb0, 0x57, 0x56, 0xbc, 0x65, 0x81, 0xd3, 0x4d, 0x53, 0x42, 0xb0, - 0xa2, 0x93, 0x9d, 0x1a, 0xaa, 0xf8, 0x21, 0xa6, 0x86, 0x2a, 0x9d, 0x62, 0x6a, 0xa8, 0x35, 0x18, - 0xd9, 0x72, 0x63, 0x4c, 0xda, 0x81, 0x60, 0x33, 0x33, 0xd7, 0xe1, 0x75, 0x8e, 0xd2, 0x9d, 0x84, - 0x44, 0x00, 0xb0, 0x24, 0x82, 0xde, 0x50, 0x3b, 0x70, 0x38, 0x5f, 0x4a, 0xeb, 0x7e, 0x19, 0xcc, - 0xdc, 0x83, 0x22, 0x01, 0xd4, 0xc8, 0x83, 0x26, 0x80, 0x5a, 0x91, 0x69, 0x9b, 0x46, 0xf3, 0x8d, - 0xe0, 0x59, 0x56, 0xa6, 0x3e, 0xc9, 0x9a, 0xee, 0xea, 0xa9, 0xae, 0xca, 0xf9, 0x27, 0x81, 0xca, - 0x62, 0x35, 0x60, 0x82, 0xab, 0xef, 0xb7, 0xe0, 0x5c, 0x3b, 0x2b, 0xeb, 0x9b, 0x48, 0xb6, 0xf4, - 0xd2, 0xc0, 0x69, 0xed, 0x8c, 0x06, 0x99, 0xb8, 0x9e, 0x89, 0x86, 0xb3, 0x9b, 0xa3, 0x03, 0x1d, - 0x6e, 0xb4, 0x44, 0x86, 0xa6, 0x27, 0x73, 0x32, 0x65, 0xf5, 0xc8, 0x8f, 0xb5, 0x9e, 0x91, 0x95, - 0xe9, 0xe3, 0x79, 0x59, 0x99, 0x06, 0xce, 0xc5, 0xf4, 0x86, 0xca, 0x91, 0x35, 0x91, 0xbf, 0x94, - 0x78, 0x06, 0xac, 0xbe, 0x99, 0xb1, 0xde, 0x50, 0x99, 0xb1, 0x7a, 0xc4, 0x8c, 0xe3, 0x79, 0xaf, - 0xfa, 0xe6, 0xc3, 0xd2, 0x72, 0x5a, 0x4d, 0x9d, 0x4c, 0x4e, 0x2b, 0xe3, 0xaa, 0xe1, 0x69, 0x95, - 0x9e, 0xe9, 0x73, 0xd5, 0x18, 0x74, 0x7b, 0x5f, 0x36, 0x3c, 0x7f, 0xd7, 0xcc, 0x03, 0xe5, 0xef, - 0xba, 0xab, 0xe7, 0xc3, 0x42, 0x7d, 0x12, 0x3e, 0x51, 0xa4, 0x01, 0xb3, 0x60, 0xdd, 0xd5, 0x2f, - 0xc0, 0x33, 0xf9, 0x74, 0xd5, 0x3d, 0xd7, 0x4d, 0x37, 0xf3, 0x0a, 0xec, 0xca, 0xae, 0x75, 0xf6, - 0x74, 0xb2, 0x6b, 0x9d, 0x3b, 0xf1, 0xec, 0x5a, 0x8f, 0x9c, 0x42, 0x76, 0xad, 0x47, 0x3f, 0xd4, - 0xec, 0x5a, 0xb3, 0x0f, 0x21, 0xbb, 0xd6, 0x5a, 0x92, 0x5d, 0xeb, 0x7c, 0xfe, 0x94, 0x64, 0x58, - 0xe6, 0xe6, 0xe4, 0xd4, 0xba, 0xcb, 0x9e, 0xe7, 0x79, 0xcc, 0x0a, 0x11, 0xd4, 0x2e, 0x3b, 0x93, - 0x70, 0x56, 0x60, 0x0b, 0x3e, 0x25, 0x0a, 0x84, 0x13, 0x52, 0x94, 0x6e, 0x92, 0x63, 0xeb, 0xb1, - 0x1e, 0x0a, 0xd9, 0x2c, 0x55, 0x57, 0x7e, 0x66, 0x2d, 0xfb, 0xaf, 0x17, 0xe0, 0x62, 0xef, 0x75, - 0x9d, 0xe8, 0xc9, 0xea, 0xc9, 0xbb, 0x4e, 0x4a, 0x4f, 0xc6, 0x85, 0x9c, 0x04, 0x6b, 0xe0, 0xc0, - 0x3e, 0xd7, 0x61, 0x46, 0x99, 0xe4, 0x7a, 0x6e, 0x73, 0x5f, 0xcb, 0x30, 0xac, 0x5c, 0x0f, 0x1b, - 0x69, 0x04, 0xdc, 0x5d, 0x07, 0x2d, 0xc0, 0x94, 0x51, 0x58, 0xab, 0x0a, 0x61, 0x46, 0x29, 0xe6, - 0x1a, 0x26, 0x18, 0xa7, 0xf1, 0xed, 0x9f, 0xb6, 0xe0, 0xd1, 0x9c, 0xc4, 0x13, 0x03, 0xc7, 0xad, - 0xd9, 0x84, 0xa9, 0xb6, 0x59, 0xb5, 0x4f, 0x78, 0x2b, 0x23, 0xbd, 0x85, 0xea, 0x6b, 0x0a, 0x80, - 0xd3, 0x44, 0x17, 0xaf, 0xfc, 0xe6, 0xef, 0x5d, 0xfc, 0xd8, 0x6f, 0xfd, 0xde, 0xc5, 0x8f, 0xfd, - 0xf6, 0xef, 0x5d, 0xfc, 0xd8, 0x5f, 0x3e, 0xbc, 0x68, 0xfd, 0xe6, 0xe1, 0x45, 0xeb, 0xb7, 0x0e, - 0x2f, 0x5a, 0xbf, 0x7d, 0x78, 0xd1, 0xfa, 0xdd, 0xc3, 0x8b, 0xd6, 0x57, 0x7f, 0xff, 0xe2, 0xc7, - 0xde, 0x2e, 0xec, 0x3d, 0xff, 0xff, 0x03, 0x00, 0x00, 0xff, 0xff, 0x84, 0x97, 0x9c, 0xb4, 0x50, - 0xe8, 0x00, 0x00, + // 12934 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xec, 0xbd, 0x7d, 0x70, 0x64, 0x57, + 0x56, 0x18, 0xbe, 0xaf, 0xbb, 0x25, 0x75, 0x1f, 0x7d, 0xdf, 0x99, 0xb1, 0x35, 0xf2, 0xcc, 0xf4, + 0xf8, 0x79, 0x77, 0x3c, 0x5e, 0xdb, 0xd2, 0x7a, 0x6c, 0xaf, 0xcd, 0xda, 0x6b, 0x90, 0xd4, 0xd2, + 0x4c, 0x7b, 0x46, 0x9a, 0xf6, 0x6d, 0xcd, 0x78, 0xd7, 0x78, 0x17, 0x3f, 0x75, 0x5f, 0x49, 0xcf, + 0x6a, 0xbd, 0xd7, 0x7e, 0xef, 0xb5, 0x66, 0xe4, 0x1f, 0xd4, 0x2f, 0x59, 0x02, 0x61, 0x03, 0x95, + 0xda, 0x4a, 0xb6, 0xf2, 0x01, 0x14, 0xa9, 0x22, 0xa4, 0x80, 0x90, 0xa4, 0x42, 0x20, 0x40, 0x58, + 0x48, 0x08, 0x24, 0x55, 0x24, 0x7f, 0x6c, 0x48, 0xaa, 0x52, 0x4b, 0x15, 0x15, 0x05, 0x44, 0x2a, + 0x29, 0xfe, 0x08, 0xa4, 0x42, 0xfe, 0x08, 0x0a, 0x15, 0x52, 0xf7, 0xf3, 0xdd, 0xfb, 0xfa, 0xbd, + 0xee, 0xd6, 0x58, 0x23, 0x9b, 0xad, 0xfd, 0xaf, 0xfb, 0x9e, 0x73, 0xcf, 0xbd, 0xef, 0x7e, 0x9e, + 0x73, 0xee, 0xf9, 0x80, 0x57, 0x76, 0x5e, 0x0e, 0xe7, 0x5c, 0x7f, 0x7e, 0xa7, 0xb3, 0x41, 0x02, + 0x8f, 0x44, 0x24, 0x9c, 0xdf, 0x23, 0x5e, 0xd3, 0x0f, 0xe6, 0x05, 0xc0, 0x69, 0xbb, 0xf3, 0x0d, + 0x3f, 0x20, 0xf3, 0x7b, 0xcf, 0xcd, 0x6f, 0x11, 0x8f, 0x04, 0x4e, 0x44, 0x9a, 0x73, 0xed, 0xc0, + 0x8f, 0x7c, 0x84, 0x38, 0xce, 0x9c, 0xd3, 0x76, 0xe7, 0x28, 0xce, 0xdc, 0xde, 0x73, 0xb3, 0xcf, + 0x6e, 0xb9, 0xd1, 0x76, 0x67, 0x63, 0xae, 0xe1, 0xef, 0xce, 0x6f, 0xf9, 0x5b, 0xfe, 0x3c, 0x43, + 0xdd, 0xe8, 0x6c, 0xb2, 0x7f, 0xec, 0x0f, 0xfb, 0xc5, 0x49, 0xcc, 0xbe, 0x10, 0x37, 0xb3, 0xeb, + 0x34, 0xb6, 0x5d, 0x8f, 0x04, 0xfb, 0xf3, 0xed, 0x9d, 0x2d, 0xd6, 0x6e, 0x40, 0x42, 0xbf, 0x13, + 0x34, 0x48, 0xb2, 0xe1, 0x9e, 0xb5, 0xc2, 0xf9, 0x5d, 0x12, 0x39, 0x29, 0xdd, 0x9d, 0x9d, 0xcf, + 0xaa, 0x15, 0x74, 0xbc, 0xc8, 0xdd, 0xed, 0x6e, 0xe6, 0xd3, 0xfd, 0x2a, 0x84, 0x8d, 0x6d, 0xb2, + 0xeb, 0x74, 0xd5, 0x7b, 0x3e, 0xab, 0x5e, 0x27, 0x72, 0x5b, 0xf3, 0xae, 0x17, 0x85, 0x51, 0x90, + 0xac, 0x64, 0x7f, 0xc3, 0x82, 0xcb, 0x0b, 0x6f, 0xd6, 0x97, 0x5b, 0x4e, 0x18, 0xb9, 0x8d, 0xc5, + 0x96, 0xdf, 0xd8, 0xa9, 0x47, 0x7e, 0x40, 0xee, 0xfa, 0xad, 0xce, 0x2e, 0xa9, 0xb3, 0x81, 0x40, + 0xcf, 0x40, 0x71, 0x8f, 0xfd, 0xaf, 0x56, 0x66, 0xac, 0xcb, 0xd6, 0xd5, 0xd2, 0xe2, 0xd4, 0x6f, + 0x1e, 0x94, 0x3f, 0x76, 0x78, 0x50, 0x2e, 0xde, 0x15, 0xe5, 0x58, 0x61, 0xa0, 0x2b, 0x30, 0xbc, + 0x19, 0xae, 0xef, 0xb7, 0xc9, 0x4c, 0x8e, 0xe1, 0x4e, 0x08, 0xdc, 0xe1, 0x95, 0x3a, 0x2d, 0xc5, + 0x02, 0x8a, 0xe6, 0xa1, 0xd4, 0x76, 0x82, 0xc8, 0x8d, 0x5c, 0xdf, 0x9b, 0xc9, 0x5f, 0xb6, 0xae, + 0x0e, 0x2d, 0x4e, 0x0b, 0xd4, 0x52, 0x4d, 0x02, 0x70, 0x8c, 0x43, 0xbb, 0x11, 0x10, 0xa7, 0x79, + 0xdb, 0x6b, 0xed, 0xcf, 0x14, 0x2e, 0x5b, 0x57, 0x8b, 0x71, 0x37, 0xb0, 0x28, 0xc7, 0x0a, 0xc3, + 0xfe, 0xe1, 0x1c, 0x14, 0x17, 0x36, 0x37, 0x5d, 0xcf, 0x8d, 0xf6, 0xd1, 0x5d, 0x18, 0xf3, 0xfc, + 0x26, 0x91, 0xff, 0xd9, 0x57, 0x8c, 0x5e, 0xbb, 0x3c, 0xd7, 0xbd, 0x94, 0xe6, 0xd6, 0x34, 0xbc, + 0xc5, 0xa9, 0xc3, 0x83, 0xf2, 0x98, 0x5e, 0x82, 0x0d, 0x3a, 0x08, 0xc3, 0x68, 0xdb, 0x6f, 0x2a, + 0xb2, 0x39, 0x46, 0xb6, 0x9c, 0x46, 0xb6, 0x16, 0xa3, 0x2d, 0x4e, 0x1e, 0x1e, 0x94, 0x47, 0xb5, + 0x02, 0xac, 0x13, 0x41, 0x1b, 0x30, 0x49, 0xff, 0x7a, 0x91, 0xab, 0xe8, 0xe6, 0x19, 0xdd, 0x27, + 0xb2, 0xe8, 0x6a, 0xa8, 0x8b, 0x67, 0x0e, 0x0f, 0xca, 0x93, 0x89, 0x42, 0x9c, 0x24, 0x68, 0xbf, + 0x0f, 0x13, 0x0b, 0x51, 0xe4, 0x34, 0xb6, 0x49, 0x93, 0xcf, 0x20, 0x7a, 0x01, 0x0a, 0x9e, 0xb3, + 0x4b, 0xc4, 0xfc, 0x5e, 0x16, 0x03, 0x5b, 0x58, 0x73, 0x76, 0xc9, 0xd1, 0x41, 0x79, 0xea, 0x8e, + 0xe7, 0xbe, 0xd7, 0x11, 0xab, 0x82, 0x96, 0x61, 0x86, 0x8d, 0xae, 0x01, 0x34, 0xc9, 0x9e, 0xdb, + 0x20, 0x35, 0x27, 0xda, 0x16, 0xf3, 0x8d, 0x44, 0x5d, 0xa8, 0x28, 0x08, 0xd6, 0xb0, 0xec, 0xfb, + 0x50, 0x5a, 0xd8, 0xf3, 0xdd, 0x66, 0xcd, 0x6f, 0x86, 0x68, 0x07, 0x26, 0xdb, 0x01, 0xd9, 0x24, + 0x81, 0x2a, 0x9a, 0xb1, 0x2e, 0xe7, 0xaf, 0x8e, 0x5e, 0xbb, 0x9a, 0xfa, 0xb1, 0x26, 0xea, 0xb2, + 0x17, 0x05, 0xfb, 0x8b, 0x8f, 0x8a, 0xf6, 0x26, 0x13, 0x50, 0x9c, 0xa4, 0x6c, 0xff, 0xab, 0x1c, + 0x9c, 0x5b, 0x78, 0xbf, 0x13, 0x90, 0x8a, 0x1b, 0xee, 0x24, 0x57, 0x78, 0xd3, 0x0d, 0x77, 0xd6, + 0xe2, 0x11, 0x50, 0x4b, 0xab, 0x22, 0xca, 0xb1, 0xc2, 0x40, 0xcf, 0xc2, 0x08, 0xfd, 0x7d, 0x07, + 0x57, 0xc5, 0x27, 0x9f, 0x11, 0xc8, 0xa3, 0x15, 0x27, 0x72, 0x2a, 0x1c, 0x84, 0x25, 0x0e, 0x5a, + 0x85, 0xd1, 0x06, 0xdb, 0x90, 0x5b, 0xab, 0x7e, 0x93, 0xb0, 0xc9, 0x2c, 0x2d, 0x3e, 0x4d, 0xd1, + 0x97, 0xe2, 0xe2, 0xa3, 0x83, 0xf2, 0x0c, 0xef, 0x9b, 0x20, 0xa1, 0xc1, 0xb0, 0x5e, 0x1f, 0xd9, + 0x6a, 0x7f, 0x15, 0x18, 0x25, 0x48, 0xd9, 0x5b, 0x57, 0xb5, 0xad, 0x32, 0xc4, 0xb6, 0xca, 0x58, + 0xfa, 0x36, 0x41, 0xcf, 0x41, 0x61, 0xc7, 0xf5, 0x9a, 0x33, 0xc3, 0x8c, 0xd6, 0x45, 0x3a, 0xe7, + 0x37, 0x5d, 0xaf, 0x79, 0x74, 0x50, 0x9e, 0x36, 0xba, 0x43, 0x0b, 0x31, 0x43, 0xb5, 0xff, 0xd8, + 0x82, 0x32, 0x83, 0xad, 0xb8, 0x2d, 0x52, 0x23, 0x41, 0xe8, 0x86, 0x11, 0xf1, 0x22, 0x63, 0x40, + 0xaf, 0x01, 0x84, 0xa4, 0x11, 0x90, 0x48, 0x1b, 0x52, 0xb5, 0x30, 0xea, 0x0a, 0x82, 0x35, 0x2c, + 0x7a, 0x20, 0x84, 0xdb, 0x4e, 0xc0, 0xd6, 0x97, 0x18, 0x58, 0x75, 0x20, 0xd4, 0x25, 0x00, 0xc7, + 0x38, 0xc6, 0x81, 0x90, 0xef, 0x77, 0x20, 0xa0, 0xcf, 0xc2, 0x64, 0xdc, 0x58, 0xd8, 0x76, 0x1a, + 0x72, 0x00, 0xd9, 0x96, 0xa9, 0x9b, 0x20, 0x9c, 0xc4, 0xb5, 0xff, 0xbe, 0x25, 0x16, 0x0f, 0xfd, + 0xea, 0x8f, 0xf8, 0xb7, 0xda, 0xbf, 0x64, 0xc1, 0xc8, 0xa2, 0xeb, 0x35, 0x5d, 0x6f, 0x0b, 0xbd, + 0x03, 0x45, 0x7a, 0x37, 0x35, 0x9d, 0xc8, 0x11, 0xe7, 0xde, 0xa7, 0xb4, 0xbd, 0xa5, 0xae, 0x8a, + 0xb9, 0xf6, 0xce, 0x16, 0x2d, 0x08, 0xe7, 0x28, 0x36, 0xdd, 0x6d, 0xb7, 0x37, 0xde, 0x25, 0x8d, + 0x68, 0x95, 0x44, 0x4e, 0xfc, 0x39, 0x71, 0x19, 0x56, 0x54, 0xd1, 0x4d, 0x18, 0x8e, 0x9c, 0x60, + 0x8b, 0x44, 0xe2, 0x00, 0x4c, 0x3d, 0xa8, 0x78, 0x4d, 0x4c, 0x77, 0x24, 0xf1, 0x1a, 0x24, 0xbe, + 0x16, 0xd6, 0x59, 0x55, 0x2c, 0x48, 0xd8, 0x7f, 0x65, 0x18, 0xce, 0x2f, 0xd5, 0xab, 0x19, 0xeb, + 0xea, 0x0a, 0x0c, 0x37, 0x03, 0x77, 0x8f, 0x04, 0x62, 0x9c, 0x15, 0x95, 0x0a, 0x2b, 0xc5, 0x02, + 0x8a, 0x5e, 0x86, 0x31, 0x7e, 0x21, 0xdd, 0x70, 0xbc, 0x66, 0x4b, 0x0e, 0xf1, 0x59, 0x81, 0x3d, + 0x76, 0x57, 0x83, 0x61, 0x03, 0xf3, 0x98, 0x8b, 0xea, 0x4a, 0x62, 0x33, 0x66, 0x5d, 0x76, 0x5f, + 0xb6, 0x60, 0x8a, 0x37, 0xb3, 0x10, 0x45, 0x81, 0xbb, 0xd1, 0x89, 0x48, 0x38, 0x33, 0xc4, 0x4e, + 0xba, 0xa5, 0xb4, 0xd1, 0xca, 0x1c, 0x81, 0xb9, 0xbb, 0x09, 0x2a, 0xfc, 0x10, 0x9c, 0x11, 0xed, + 0x4e, 0x25, 0xc1, 0xb8, 0xab, 0x59, 0xf4, 0xbd, 0x16, 0xcc, 0x36, 0x7c, 0x2f, 0x0a, 0xfc, 0x56, + 0x8b, 0x04, 0xb5, 0xce, 0x46, 0xcb, 0x0d, 0xb7, 0xf9, 0x3a, 0xc5, 0x64, 0x93, 0x9d, 0x04, 0x19, + 0x73, 0xa8, 0x90, 0xc4, 0x1c, 0x5e, 0x3a, 0x3c, 0x28, 0xcf, 0x2e, 0x65, 0x92, 0xc2, 0x3d, 0x9a, + 0x41, 0x3b, 0x80, 0xe8, 0x55, 0x5a, 0x8f, 0x9c, 0x2d, 0x12, 0x37, 0x3e, 0x32, 0x78, 0xe3, 0x8f, + 0x1c, 0x1e, 0x94, 0xd1, 0x5a, 0x17, 0x09, 0x9c, 0x42, 0x16, 0xbd, 0x07, 0x67, 0x69, 0x69, 0xd7, + 0xb7, 0x16, 0x07, 0x6f, 0x6e, 0xe6, 0xf0, 0xa0, 0x7c, 0x76, 0x2d, 0x85, 0x08, 0x4e, 0x25, 0x3d, + 0xbb, 0x04, 0xe7, 0x52, 0xa7, 0x0a, 0x4d, 0x41, 0x7e, 0x87, 0x70, 0x16, 0xa4, 0x84, 0xe9, 0x4f, + 0x74, 0x16, 0x86, 0xf6, 0x9c, 0x56, 0x47, 0xac, 0x52, 0xcc, 0xff, 0x7c, 0x26, 0xf7, 0xb2, 0x65, + 0xff, 0xeb, 0x3c, 0x4c, 0x2e, 0xd5, 0xab, 0x0f, 0xb4, 0x05, 0xf4, 0x3b, 0x20, 0xd7, 0xf3, 0x0e, + 0x88, 0x6f, 0x94, 0x7c, 0xe6, 0x8d, 0xf2, 0xff, 0xa7, 0xac, 0xdf, 0x02, 0x5b, 0xbf, 0xdf, 0x96, + 0xb1, 0x7e, 0x4f, 0x78, 0xd5, 0xee, 0x65, 0x4c, 0xe1, 0x10, 0x9b, 0xc2, 0x54, 0x76, 0xe1, 0x96, + 0xdf, 0x70, 0x5a, 0xc9, 0x73, 0xe7, 0x43, 0x99, 0xc7, 0x06, 0x8c, 0x2d, 0x39, 0x6d, 0x67, 0xc3, + 0x6d, 0xb9, 0x91, 0x4b, 0x42, 0xf4, 0x24, 0xe4, 0x9d, 0x66, 0x93, 0xb1, 0x3a, 0xa5, 0xc5, 0x73, + 0x87, 0x07, 0xe5, 0xfc, 0x42, 0x93, 0xde, 0xb9, 0xa0, 0xb0, 0xf6, 0x31, 0xc5, 0x40, 0x9f, 0x84, + 0x42, 0x33, 0xf0, 0xdb, 0x33, 0x39, 0x86, 0x49, 0x97, 0x7c, 0xa1, 0x12, 0xf8, 0xed, 0x04, 0x2a, + 0xc3, 0xb1, 0x7f, 0x2d, 0x07, 0x17, 0x96, 0x48, 0x7b, 0x7b, 0xa5, 0x9e, 0x71, 0x78, 0x5e, 0x85, + 0xe2, 0xae, 0xef, 0xb9, 0x91, 0x1f, 0x84, 0xa2, 0x69, 0xb6, 0x22, 0x56, 0x45, 0x19, 0x56, 0x50, + 0x74, 0x19, 0x0a, 0xed, 0x98, 0xa3, 0x1b, 0x93, 0xdc, 0x20, 0xe3, 0xe5, 0x18, 0x84, 0x62, 0x74, + 0x42, 0x12, 0x88, 0x15, 0xa3, 0x30, 0xee, 0x84, 0x24, 0xc0, 0x0c, 0x12, 0x5f, 0x8b, 0xf4, 0xc2, + 0x14, 0xc7, 0x63, 0xe2, 0x5a, 0xa4, 0x10, 0xac, 0x61, 0xa1, 0x1a, 0x94, 0xc2, 0xc4, 0xcc, 0x0e, + 0xb4, 0x39, 0xc7, 0xd9, 0xbd, 0xa9, 0x66, 0x32, 0x26, 0x62, 0x1c, 0xe7, 0xc3, 0x7d, 0xef, 0xcd, + 0xaf, 0xe5, 0x00, 0xf1, 0x21, 0xfc, 0x73, 0x36, 0x70, 0x77, 0xba, 0x07, 0x6e, 0xf0, 0x2d, 0x71, + 0x52, 0xa3, 0xf7, 0xbf, 0x2c, 0xb8, 0xb0, 0xe4, 0x7a, 0x4d, 0x12, 0x64, 0x2c, 0xc0, 0x87, 0x23, + 0x48, 0x1e, 0xef, 0xc6, 0x36, 0x96, 0x58, 0xe1, 0x04, 0x96, 0x98, 0xfd, 0x47, 0x16, 0x20, 0xfe, + 0xd9, 0x1f, 0xb9, 0x8f, 0xbd, 0xd3, 0xfd, 0xb1, 0x27, 0xb0, 0x2c, 0xec, 0x5b, 0x30, 0xb1, 0xd4, + 0x72, 0x89, 0x17, 0x55, 0x6b, 0x4b, 0xbe, 0xb7, 0xe9, 0x6e, 0xa1, 0xcf, 0xc0, 0x44, 0xe4, 0xee, + 0x12, 0xbf, 0x13, 0xd5, 0x49, 0xc3, 0xf7, 0x98, 0x18, 0x47, 0x25, 0x7a, 0x74, 0x78, 0x50, 0x9e, + 0x58, 0x37, 0x20, 0x38, 0x81, 0x69, 0xff, 0x0e, 0x1d, 0x3f, 0x7f, 0xb7, 0xed, 0x7b, 0xc4, 0x8b, + 0x96, 0x7c, 0xaf, 0xc9, 0xc5, 0xfd, 0xcf, 0x40, 0x21, 0xa2, 0xe3, 0xc1, 0xc7, 0xee, 0x8a, 0xdc, + 0x28, 0x74, 0x14, 0x8e, 0x0e, 0xca, 0x8f, 0x74, 0xd7, 0x60, 0xe3, 0xc4, 0xea, 0xa0, 0x6f, 0x83, + 0xe1, 0x30, 0x72, 0xa2, 0x4e, 0x28, 0x46, 0xf3, 0x71, 0x39, 0x9a, 0x75, 0x56, 0x7a, 0x74, 0x50, + 0x9e, 0x54, 0xd5, 0x78, 0x11, 0x16, 0x15, 0xd0, 0x53, 0x30, 0xb2, 0x4b, 0xc2, 0xd0, 0xd9, 0x92, + 0xb7, 0xe1, 0xa4, 0xa8, 0x3b, 0xb2, 0xca, 0x8b, 0xb1, 0x84, 0xa3, 0x27, 0x60, 0x88, 0x04, 0x81, + 0x1f, 0x88, 0x3d, 0x3a, 0x2e, 0x10, 0x87, 0x96, 0x69, 0x21, 0xe6, 0x30, 0xfb, 0xdf, 0x59, 0x30, + 0xa9, 0xfa, 0xca, 0xdb, 0x3a, 0x05, 0x96, 0xfc, 0x2d, 0x80, 0x86, 0xfc, 0xc0, 0x90, 0xdd, 0x1e, + 0xa3, 0xd7, 0xae, 0xa4, 0x5e, 0xd4, 0x5d, 0xc3, 0x18, 0x53, 0x56, 0x45, 0x21, 0xd6, 0xa8, 0xd9, + 0xbf, 0x6a, 0xc1, 0x99, 0xc4, 0x17, 0xdd, 0x72, 0xc3, 0x08, 0xbd, 0xdd, 0xf5, 0x55, 0x73, 0x83, + 0x7d, 0x15, 0xad, 0xcd, 0xbe, 0x49, 0x2d, 0x65, 0x59, 0xa2, 0x7d, 0xd1, 0x0d, 0x18, 0x72, 0x23, + 0xb2, 0x2b, 0x3f, 0xe6, 0x89, 0x9e, 0x1f, 0xc3, 0x7b, 0x15, 0xcf, 0x48, 0x95, 0xd6, 0xc4, 0x9c, + 0x80, 0xfd, 0xd7, 0xf3, 0x50, 0xe2, 0xcb, 0x76, 0xd5, 0x69, 0x9f, 0xc2, 0x5c, 0x54, 0xa1, 0xc0, + 0xa8, 0xf3, 0x8e, 0x3f, 0x99, 0xde, 0x71, 0xd1, 0x9d, 0x39, 0x2a, 0x6f, 0x73, 0xe6, 0x48, 0x5d, + 0x0d, 0xb4, 0x08, 0x33, 0x12, 0xc8, 0x01, 0xd8, 0x70, 0x3d, 0x27, 0xd8, 0xa7, 0x65, 0x33, 0x79, + 0x46, 0xf0, 0xd9, 0xde, 0x04, 0x17, 0x15, 0x3e, 0x27, 0xab, 0xfa, 0x1a, 0x03, 0xb0, 0x46, 0x74, + 0xf6, 0x25, 0x28, 0x29, 0xe4, 0xe3, 0xf0, 0x38, 0xb3, 0x9f, 0x85, 0xc9, 0x44, 0x5b, 0xfd, 0xaa, + 0x8f, 0xe9, 0x2c, 0xd2, 0x2f, 0xb3, 0x53, 0x40, 0xf4, 0x7a, 0xd9, 0xdb, 0x13, 0xa7, 0xe8, 0xfb, + 0x70, 0xb6, 0x95, 0x72, 0x38, 0x89, 0xa9, 0x1a, 0xfc, 0x30, 0xbb, 0x20, 0x3e, 0xfb, 0x6c, 0x1a, + 0x14, 0xa7, 0xb6, 0x41, 0xaf, 0x7d, 0xbf, 0x4d, 0xd7, 0xbc, 0xd3, 0xd2, 0x39, 0xe8, 0xdb, 0xa2, + 0x0c, 0x2b, 0x28, 0x3d, 0xc2, 0xce, 0xaa, 0xce, 0xdf, 0x24, 0xfb, 0x75, 0xd2, 0x22, 0x8d, 0xc8, + 0x0f, 0x3e, 0xd4, 0xee, 0x5f, 0xe4, 0xa3, 0xcf, 0x4f, 0xc0, 0x51, 0x41, 0x20, 0x7f, 0x93, 0xec, + 0xf3, 0xa9, 0xd0, 0xbf, 0x2e, 0xdf, 0xf3, 0xeb, 0x7e, 0xd6, 0x82, 0x71, 0xf5, 0x75, 0xa7, 0xb0, + 0xd5, 0x17, 0xcd, 0xad, 0x7e, 0xb1, 0xe7, 0x02, 0xcf, 0xd8, 0xe4, 0x5f, 0xcb, 0xc1, 0x79, 0x85, + 0x43, 0xd9, 0x7d, 0xfe, 0x47, 0xac, 0xaa, 0x79, 0x28, 0x79, 0x4a, 0x0b, 0x64, 0x99, 0xea, 0x97, + 0x58, 0x07, 0x14, 0xe3, 0x50, 0xae, 0xcd, 0x8b, 0x55, 0x35, 0x63, 0xba, 0x7a, 0x54, 0xa8, 0x42, + 0x17, 0x21, 0xdf, 0x71, 0x9b, 0xe2, 0xce, 0xf8, 0x94, 0x1c, 0xed, 0x3b, 0xd5, 0xca, 0xd1, 0x41, + 0xf9, 0xf1, 0x2c, 0xd5, 0x3c, 0xbd, 0xac, 0xc2, 0xb9, 0x3b, 0xd5, 0x0a, 0xa6, 0x95, 0xd1, 0x02, + 0x4c, 0xca, 0xd7, 0x87, 0xbb, 0x94, 0x83, 0xf2, 0x3d, 0x71, 0xb5, 0x28, 0x1d, 0x27, 0x36, 0xc1, + 0x38, 0x89, 0x8f, 0x2a, 0x30, 0xb5, 0xd3, 0xd9, 0x20, 0x2d, 0x12, 0xf1, 0x0f, 0xbe, 0x49, 0xb8, + 0x06, 0xb0, 0x14, 0x0b, 0x5b, 0x37, 0x13, 0x70, 0xdc, 0x55, 0xc3, 0xfe, 0x33, 0x76, 0xc4, 0x8b, + 0xd1, 0xab, 0x05, 0x3e, 0x5d, 0x58, 0x94, 0xfa, 0x87, 0xb9, 0x9c, 0x07, 0x59, 0x15, 0x37, 0xc9, + 0xfe, 0xba, 0x4f, 0x99, 0xed, 0xf4, 0x55, 0x61, 0xac, 0xf9, 0x42, 0xcf, 0x35, 0xff, 0xf3, 0x39, + 0x38, 0xa7, 0x46, 0xc0, 0xe0, 0xeb, 0xfe, 0xbc, 0x8f, 0xc1, 0x73, 0x30, 0xda, 0x24, 0x9b, 0x4e, + 0xa7, 0x15, 0x29, 0x75, 0xf4, 0x10, 0x7f, 0x92, 0xa8, 0xc4, 0xc5, 0x58, 0xc7, 0x39, 0xc6, 0xb0, + 0xfd, 0xc4, 0x28, 0xbb, 0x5b, 0x23, 0x87, 0xae, 0x71, 0xb5, 0x6b, 0xac, 0xcc, 0x5d, 0xf3, 0x04, + 0x0c, 0xb9, 0xbb, 0x94, 0xd7, 0xca, 0x99, 0x2c, 0x54, 0x95, 0x16, 0x62, 0x0e, 0x43, 0x9f, 0x80, + 0x91, 0x86, 0xbf, 0xbb, 0xeb, 0x78, 0x4d, 0x76, 0xe5, 0x95, 0x16, 0x47, 0x29, 0x3b, 0xb6, 0xc4, + 0x8b, 0xb0, 0x84, 0xa1, 0x0b, 0x50, 0x70, 0x82, 0x2d, 0xae, 0x96, 0x28, 0x2d, 0x16, 0x69, 0x4b, + 0x0b, 0xc1, 0x56, 0x88, 0x59, 0x29, 0x95, 0xaa, 0xee, 0xf9, 0xc1, 0x8e, 0xeb, 0x6d, 0x55, 0xdc, + 0x40, 0x6c, 0x09, 0x75, 0x17, 0xbe, 0xa9, 0x20, 0x58, 0xc3, 0x42, 0x2b, 0x30, 0xd4, 0xf6, 0x83, + 0x28, 0x9c, 0x19, 0x66, 0xc3, 0xfd, 0x78, 0xc6, 0x41, 0xc4, 0xbf, 0xb6, 0xe6, 0x07, 0x51, 0xfc, + 0x01, 0xf4, 0x5f, 0x88, 0x79, 0x75, 0xf4, 0x6d, 0x90, 0x27, 0xde, 0xde, 0xcc, 0x08, 0xa3, 0x32, + 0x9b, 0x46, 0x65, 0xd9, 0xdb, 0xbb, 0xeb, 0x04, 0xf1, 0x29, 0xbd, 0xec, 0xed, 0x61, 0x5a, 0x07, + 0x7d, 0x1e, 0x4a, 0x72, 0x8b, 0x87, 0x42, 0x5d, 0x95, 0xba, 0xc4, 0xe4, 0xc1, 0x80, 0xc9, 0x7b, + 0x1d, 0x37, 0x20, 0xbb, 0xc4, 0x8b, 0xc2, 0xf8, 0x4c, 0x93, 0xd0, 0x10, 0xc7, 0xd4, 0xd0, 0xe7, + 0xa5, 0x8e, 0x74, 0xd5, 0xef, 0x78, 0x51, 0x38, 0x53, 0x62, 0xdd, 0x4b, 0x7d, 0xbd, 0xba, 0x1b, + 0xe3, 0x25, 0x95, 0xa8, 0xbc, 0x32, 0x36, 0x48, 0x21, 0x0c, 0xe3, 0x2d, 0x77, 0x8f, 0x78, 0x24, + 0x0c, 0x6b, 0x81, 0xbf, 0x41, 0x66, 0x80, 0xf5, 0xfc, 0x7c, 0xfa, 0xa3, 0x8e, 0xbf, 0x41, 0x16, + 0xa7, 0x0f, 0x0f, 0xca, 0xe3, 0xb7, 0xf4, 0x3a, 0xd8, 0x24, 0x81, 0xee, 0xc0, 0x04, 0x95, 0x6b, + 0xdc, 0x98, 0xe8, 0x68, 0x3f, 0xa2, 0x4c, 0xfa, 0xc0, 0x46, 0x25, 0x9c, 0x20, 0x82, 0x5e, 0x87, + 0x52, 0xcb, 0xdd, 0x24, 0x8d, 0xfd, 0x46, 0x8b, 0xcc, 0x8c, 0x31, 0x8a, 0xa9, 0xdb, 0xea, 0x96, + 0x44, 0xe2, 0x72, 0x91, 0xfa, 0x8b, 0xe3, 0xea, 0xe8, 0x2e, 0x3c, 0x12, 0x91, 0x60, 0xd7, 0xf5, + 0x1c, 0xba, 0x1d, 0x84, 0xbc, 0xc0, 0x9e, 0xc6, 0xc6, 0xd9, 0x7a, 0xbb, 0x24, 0x86, 0xee, 0x91, + 0xf5, 0x54, 0x2c, 0x9c, 0x51, 0x1b, 0xdd, 0x86, 0x49, 0xb6, 0x13, 0x6a, 0x9d, 0x56, 0xab, 0xe6, + 0xb7, 0xdc, 0xc6, 0xfe, 0xcc, 0x04, 0x23, 0xf8, 0x09, 0x79, 0x2f, 0x54, 0x4d, 0xf0, 0xd1, 0x41, + 0x19, 0xe2, 0x7f, 0x38, 0x59, 0x1b, 0x6d, 0xb0, 0xb7, 0x90, 0x4e, 0xe0, 0x46, 0xfb, 0x74, 0xfd, + 0x92, 0xfb, 0xd1, 0xcc, 0x64, 0x4f, 0x51, 0x58, 0x47, 0x55, 0x0f, 0x26, 0x7a, 0x21, 0x4e, 0x12, + 0xa4, 0x5b, 0x3b, 0x8c, 0x9a, 0xae, 0x37, 0x33, 0xc5, 0x4e, 0x0c, 0xb5, 0x33, 0xea, 0xb4, 0x10, + 0x73, 0x18, 0x7b, 0x07, 0xa1, 0x3f, 0x6e, 0xd3, 0x13, 0x74, 0x9a, 0x21, 0xc6, 0xef, 0x20, 0x12, + 0x80, 0x63, 0x1c, 0xca, 0xd4, 0x44, 0xd1, 0xfe, 0x0c, 0x62, 0xa8, 0x6a, 0xbb, 0xac, 0xaf, 0x7f, + 0x1e, 0xd3, 0x72, 0x74, 0x0b, 0x46, 0x88, 0xb7, 0xb7, 0x12, 0xf8, 0xbb, 0x33, 0x67, 0xb2, 0xf7, + 0xec, 0x32, 0x47, 0xe1, 0x07, 0x7a, 0x2c, 0xe0, 0x89, 0x62, 0x2c, 0x49, 0xa0, 0xfb, 0x30, 0x93, + 0x32, 0x23, 0x7c, 0x02, 0xce, 0xb2, 0x09, 0x78, 0x55, 0xd4, 0x9d, 0x59, 0xcf, 0xc0, 0x3b, 0xea, + 0x01, 0xc3, 0x99, 0xd4, 0xd1, 0x17, 0x60, 0x9c, 0x6f, 0x28, 0xfe, 0x88, 0x1a, 0xce, 0x9c, 0x63, + 0x5f, 0x73, 0x39, 0x7b, 0x73, 0x72, 0xc4, 0xc5, 0x73, 0xa2, 0x43, 0xe3, 0x7a, 0x69, 0x88, 0x4d, + 0x6a, 0xf6, 0x06, 0x4c, 0xa8, 0x73, 0x8b, 0x2d, 0x1d, 0x54, 0x86, 0x21, 0xc6, 0xed, 0x08, 0xfd, + 0x56, 0x89, 0xce, 0x14, 0xe3, 0x84, 0x30, 0x2f, 0x67, 0x33, 0xe5, 0xbe, 0x4f, 0x16, 0xf7, 0x23, + 0xc2, 0xa5, 0xea, 0xbc, 0x36, 0x53, 0x12, 0x80, 0x63, 0x1c, 0xfb, 0xff, 0x72, 0xae, 0x31, 0x3e, + 0x1c, 0x07, 0xb8, 0x0e, 0x9e, 0x81, 0xe2, 0xb6, 0x1f, 0x46, 0x14, 0x9b, 0xb5, 0x31, 0x14, 0xf3, + 0x89, 0x37, 0x44, 0x39, 0x56, 0x18, 0xe8, 0x15, 0x18, 0x6f, 0xe8, 0x0d, 0x88, 0xbb, 0x4c, 0x0d, + 0x81, 0xd1, 0x3a, 0x36, 0x71, 0xd1, 0xcb, 0x50, 0x64, 0x26, 0x10, 0x0d, 0xbf, 0x25, 0x98, 0x2c, + 0x79, 0x21, 0x17, 0x6b, 0xa2, 0xfc, 0x48, 0xfb, 0x8d, 0x15, 0x36, 0xba, 0x02, 0xc3, 0xb4, 0x0b, + 0xd5, 0x9a, 0xb8, 0x45, 0x94, 0xaa, 0xe6, 0x06, 0x2b, 0xc5, 0x02, 0x6a, 0xff, 0xb5, 0x9c, 0x36, + 0xca, 0x54, 0x22, 0x25, 0xa8, 0x06, 0x23, 0xf7, 0x1c, 0x37, 0x72, 0xbd, 0x2d, 0xc1, 0x2e, 0x3c, + 0xd5, 0xf3, 0x4a, 0x61, 0x95, 0xde, 0xe4, 0x15, 0xf8, 0xa5, 0x27, 0xfe, 0x60, 0x49, 0x86, 0x52, + 0x0c, 0x3a, 0x9e, 0x47, 0x29, 0xe6, 0x06, 0xa5, 0x88, 0x79, 0x05, 0x4e, 0x51, 0xfc, 0xc1, 0x92, + 0x0c, 0x7a, 0x1b, 0x40, 0x2e, 0x4b, 0xd2, 0x14, 0xa6, 0x07, 0xcf, 0xf4, 0x27, 0xba, 0xae, 0xea, + 0x2c, 0x4e, 0xd0, 0x2b, 0x35, 0xfe, 0x8f, 0x35, 0x7a, 0x76, 0xc4, 0xd8, 0xaa, 0xee, 0xce, 0xa0, + 0xef, 0xa4, 0x27, 0x81, 0x13, 0x44, 0xa4, 0xb9, 0x10, 0x89, 0xc1, 0xf9, 0xe4, 0x60, 0x32, 0xc5, + 0xba, 0xbb, 0x4b, 0xf4, 0x53, 0x43, 0x10, 0xc1, 0x31, 0x3d, 0xfb, 0x17, 0xf3, 0x30, 0x93, 0xd5, + 0x5d, 0xba, 0xe8, 0xc8, 0x7d, 0x37, 0x5a, 0xa2, 0xdc, 0x90, 0x65, 0x2e, 0xba, 0x65, 0x51, 0x8e, + 0x15, 0x06, 0x9d, 0xfd, 0xd0, 0xdd, 0x92, 0x22, 0xe1, 0x50, 0x3c, 0xfb, 0x75, 0x56, 0x8a, 0x05, + 0x94, 0xe2, 0x05, 0xc4, 0x09, 0x85, 0x6d, 0x8b, 0xb6, 0x4a, 0x30, 0x2b, 0xc5, 0x02, 0xaa, 0xeb, + 0x9b, 0x0a, 0x7d, 0xf4, 0x4d, 0xc6, 0x10, 0x0d, 0x9d, 0xec, 0x10, 0xa1, 0x2f, 0x02, 0x6c, 0xba, + 0x9e, 0x1b, 0x6e, 0x33, 0xea, 0xc3, 0xc7, 0xa6, 0xae, 0x78, 0xa9, 0x15, 0x45, 0x05, 0x6b, 0x14, + 0xd1, 0x8b, 0x30, 0xaa, 0x36, 0x60, 0xb5, 0xc2, 0x1e, 0xfa, 0x34, 0xc3, 0x89, 0xf8, 0x34, 0xaa, + 0x60, 0x1d, 0xcf, 0x7e, 0x37, 0xb9, 0x5e, 0xc4, 0x0e, 0xd0, 0xc6, 0xd7, 0x1a, 0x74, 0x7c, 0x73, + 0xbd, 0xc7, 0xd7, 0xfe, 0xf5, 0x3c, 0x4c, 0x1a, 0x8d, 0x75, 0xc2, 0x01, 0xce, 0xac, 0xeb, 0xf4, + 0x9e, 0x73, 0x22, 0x22, 0xf6, 0x9f, 0xdd, 0x7f, 0xab, 0xe8, 0x77, 0x21, 0xdd, 0x01, 0xbc, 0x3e, + 0xfa, 0x22, 0x94, 0x5a, 0x4e, 0xc8, 0x74, 0x57, 0x44, 0xec, 0xbb, 0x41, 0x88, 0xc5, 0x72, 0x84, + 0x13, 0x46, 0xda, 0x55, 0xc3, 0x69, 0xc7, 0x24, 0xe9, 0x85, 0x4c, 0x79, 0x1f, 0x69, 0x3c, 0xa5, + 0x3a, 0x41, 0x19, 0xa4, 0x7d, 0xcc, 0x61, 0xe8, 0x65, 0x18, 0x0b, 0x08, 0x5b, 0x15, 0x4b, 0x94, + 0x95, 0x63, 0xcb, 0x6c, 0x28, 0xe6, 0xf9, 0xb0, 0x06, 0xc3, 0x06, 0x66, 0xcc, 0xca, 0x0f, 0xf7, + 0x60, 0xe5, 0x9f, 0x82, 0x11, 0xf6, 0x43, 0xad, 0x00, 0x35, 0x1b, 0x55, 0x5e, 0x8c, 0x25, 0x3c, + 0xb9, 0x60, 0x8a, 0x03, 0x2e, 0x98, 0x4f, 0xc2, 0x44, 0xc5, 0x21, 0xbb, 0xbe, 0xb7, 0xec, 0x35, + 0xdb, 0xbe, 0xeb, 0x45, 0x68, 0x06, 0x0a, 0xec, 0x76, 0xe0, 0x7b, 0xbb, 0x40, 0x29, 0xe0, 0x02, + 0x65, 0xcc, 0xed, 0x2d, 0x38, 0x57, 0xf1, 0xef, 0x79, 0xf7, 0x9c, 0xa0, 0xb9, 0x50, 0xab, 0x6a, + 0x72, 0xee, 0x9a, 0x94, 0xb3, 0xb8, 0x31, 0x52, 0xea, 0x99, 0xaa, 0xd5, 0xe4, 0x77, 0xed, 0x8a, + 0xdb, 0x22, 0x19, 0xda, 0x88, 0xbf, 0x99, 0x33, 0x5a, 0x8a, 0xf1, 0xd5, 0x83, 0x91, 0x95, 0xf9, + 0x60, 0xf4, 0x06, 0x14, 0x37, 0x5d, 0xd2, 0x6a, 0x62, 0xb2, 0x29, 0x96, 0xd8, 0x93, 0xd9, 0xf6, + 0x15, 0x2b, 0x14, 0x53, 0x6a, 0x9f, 0xb8, 0x94, 0xb6, 0x22, 0x2a, 0x63, 0x45, 0x06, 0xed, 0xc0, + 0x94, 0x14, 0x03, 0x24, 0x54, 0x2c, 0xb8, 0xa7, 0x7a, 0xc9, 0x16, 0x26, 0xf1, 0xb3, 0x87, 0x07, + 0xe5, 0x29, 0x9c, 0x20, 0x83, 0xbb, 0x08, 0x53, 0xb1, 0x6c, 0x97, 0x1e, 0xad, 0x05, 0x36, 0xfc, + 0x4c, 0x2c, 0x63, 0x12, 0x26, 0x2b, 0xb5, 0x7f, 0xd4, 0x82, 0x47, 0xbb, 0x46, 0x46, 0x48, 0xda, + 0x27, 0x3c, 0x0b, 0x49, 0xc9, 0x37, 0xd7, 0x5f, 0xf2, 0xb5, 0xff, 0x81, 0x05, 0x67, 0x97, 0x77, + 0xdb, 0xd1, 0x7e, 0xc5, 0x35, 0x5f, 0x77, 0x5e, 0x82, 0xe1, 0x5d, 0xd2, 0x74, 0x3b, 0xbb, 0x62, + 0xe6, 0xca, 0xf2, 0xf8, 0x59, 0x65, 0xa5, 0x47, 0x07, 0xe5, 0xf1, 0x7a, 0xe4, 0x07, 0xce, 0x16, + 0xe1, 0x05, 0x58, 0xa0, 0xb3, 0x43, 0xdc, 0x7d, 0x9f, 0xdc, 0x72, 0x77, 0x5d, 0x69, 0x2f, 0xd3, + 0x53, 0x77, 0x36, 0x27, 0x07, 0x74, 0xee, 0x8d, 0x8e, 0xe3, 0x45, 0x6e, 0xb4, 0x2f, 0x1e, 0x66, + 0x24, 0x11, 0x1c, 0xd3, 0xb3, 0xbf, 0x61, 0xc1, 0xa4, 0x5c, 0xf7, 0x0b, 0xcd, 0x66, 0x40, 0xc2, + 0x10, 0xcd, 0x42, 0xce, 0x6d, 0x8b, 0x5e, 0x82, 0xe8, 0x65, 0xae, 0x5a, 0xc3, 0x39, 0xb7, 0x8d, + 0x6a, 0x50, 0xe2, 0x66, 0x37, 0xf1, 0xe2, 0x1a, 0xc8, 0x78, 0x87, 0xf5, 0x60, 0x5d, 0xd6, 0xc4, + 0x31, 0x11, 0xc9, 0xc1, 0xb1, 0x33, 0x33, 0x6f, 0xbe, 0x7a, 0xdd, 0x10, 0xe5, 0x58, 0x61, 0xa0, + 0xab, 0x50, 0xf4, 0xfc, 0x26, 0xb7, 0x82, 0xe2, 0xb7, 0x1f, 0x5b, 0xb2, 0x6b, 0xa2, 0x0c, 0x2b, + 0xa8, 0xfd, 0x43, 0x16, 0x8c, 0xc9, 0x2f, 0x1b, 0x90, 0x99, 0xa4, 0x5b, 0x2b, 0x66, 0x24, 0xe3, + 0xad, 0x45, 0x99, 0x41, 0x06, 0x31, 0x78, 0xc0, 0xfc, 0x71, 0x78, 0x40, 0xfb, 0x47, 0x72, 0x30, + 0x21, 0xbb, 0x53, 0xef, 0x6c, 0x84, 0x24, 0x42, 0xeb, 0x50, 0x72, 0xf8, 0x90, 0x13, 0xb9, 0x62, + 0x9f, 0x48, 0x17, 0x3e, 0x8c, 0xf9, 0x89, 0xaf, 0xe5, 0x05, 0x59, 0x1b, 0xc7, 0x84, 0x50, 0x0b, + 0xa6, 0x3d, 0x3f, 0x62, 0x47, 0xb4, 0x82, 0xf7, 0x7a, 0x02, 0x49, 0x52, 0x3f, 0x2f, 0xa8, 0x4f, + 0xaf, 0x25, 0xa9, 0xe0, 0x6e, 0xc2, 0x68, 0x59, 0x2a, 0x3c, 0xf2, 0xd9, 0xe2, 0x86, 0x3e, 0x0b, + 0xe9, 0xfa, 0x0e, 0xfb, 0x57, 0x2c, 0x28, 0x49, 0xb4, 0xd3, 0x78, 0xed, 0x5a, 0x85, 0x91, 0x90, + 0x4d, 0x82, 0x1c, 0x1a, 0xbb, 0x57, 0xc7, 0xf9, 0x7c, 0xc5, 0x37, 0x0f, 0xff, 0x1f, 0x62, 0x49, + 0x83, 0xe9, 0xbb, 0x55, 0xf7, 0x3f, 0x22, 0xfa, 0x6e, 0xd5, 0x9f, 0x8c, 0x1b, 0xe6, 0xbf, 0xb1, + 0x3e, 0x6b, 0x62, 0x2d, 0x65, 0x90, 0xda, 0x01, 0xd9, 0x74, 0xef, 0x27, 0x19, 0xa4, 0x1a, 0x2b, + 0xc5, 0x02, 0x8a, 0xde, 0x86, 0xb1, 0x86, 0x54, 0x74, 0xc6, 0xc7, 0xc0, 0x95, 0x9e, 0x4a, 0x77, + 0xf5, 0x3e, 0xc3, 0x2d, 0xa4, 0x97, 0xb4, 0xfa, 0xd8, 0xa0, 0x66, 0x3e, 0xb7, 0xe7, 0xfb, 0x3d, + 0xb7, 0xc7, 0x74, 0xb3, 0x1f, 0x9f, 0x7f, 0xcc, 0x82, 0x61, 0xae, 0x2e, 0x1b, 0x4c, 0xbf, 0xa8, + 0x3d, 0x57, 0xc5, 0x63, 0x77, 0x97, 0x16, 0x8a, 0xe7, 0x27, 0xb4, 0x0a, 0x25, 0xf6, 0x83, 0xa9, + 0x0d, 0xf2, 0xd9, 0xa6, 0xe1, 0xbc, 0x55, 0xbd, 0x83, 0x77, 0x65, 0x35, 0x1c, 0x53, 0xb0, 0xbf, + 0x9a, 0xa7, 0x47, 0x55, 0x8c, 0x6a, 0xdc, 0xe0, 0xd6, 0xc3, 0xbb, 0xc1, 0x73, 0x0f, 0xeb, 0x06, + 0xdf, 0x82, 0xc9, 0x86, 0xf6, 0xb8, 0x15, 0xcf, 0xe4, 0xd5, 0x9e, 0x8b, 0x44, 0x7b, 0x07, 0xe3, + 0x2a, 0xa3, 0x25, 0x93, 0x08, 0x4e, 0x52, 0x45, 0xdf, 0x09, 0x63, 0x7c, 0x9e, 0x45, 0x2b, 0xdc, + 0x62, 0xe1, 0x13, 0xd9, 0xeb, 0x45, 0x6f, 0x82, 0xad, 0xc4, 0xba, 0x56, 0x1d, 0x1b, 0xc4, 0xec, + 0x5f, 0x2c, 0xc2, 0xd0, 0xf2, 0x1e, 0xf1, 0xa2, 0x53, 0x38, 0x90, 0x1a, 0x30, 0xe1, 0x7a, 0x7b, + 0x7e, 0x6b, 0x8f, 0x34, 0x39, 0xfc, 0x38, 0x97, 0xeb, 0x23, 0x82, 0xf4, 0x44, 0xd5, 0x20, 0x81, + 0x13, 0x24, 0x1f, 0x86, 0x84, 0x79, 0x1d, 0x86, 0xf9, 0xdc, 0x0b, 0xf1, 0x32, 0x55, 0x19, 0xcc, + 0x06, 0x51, 0xec, 0x82, 0x58, 0xfa, 0xe5, 0xda, 0x67, 0x51, 0x1d, 0xbd, 0x0b, 0x13, 0x9b, 0x6e, + 0x10, 0x46, 0x54, 0x34, 0x0c, 0x23, 0x67, 0xb7, 0xfd, 0x00, 0x12, 0xa5, 0x1a, 0x87, 0x15, 0x83, + 0x12, 0x4e, 0x50, 0x46, 0x5b, 0x30, 0x4e, 0x85, 0x9c, 0xb8, 0xa9, 0x91, 0x63, 0x37, 0xa5, 0x54, + 0x46, 0xb7, 0x74, 0x42, 0xd8, 0xa4, 0x4b, 0x0f, 0x93, 0x06, 0x13, 0x8a, 0x8a, 0x8c, 0xa3, 0x50, + 0x87, 0x09, 0x97, 0x86, 0x38, 0x8c, 0x9e, 0x49, 0xcc, 0x6c, 0xa5, 0x64, 0x9e, 0x49, 0x9a, 0x71, + 0xca, 0x3b, 0x50, 0x22, 0x74, 0x08, 0x29, 0x61, 0xa1, 0x18, 0x9f, 0x1f, 0xac, 0xaf, 0xab, 0x6e, + 0x23, 0xf0, 0x4d, 0x59, 0x7e, 0x59, 0x52, 0xc2, 0x31, 0x51, 0xb4, 0x04, 0xc3, 0x21, 0x09, 0x5c, + 0x12, 0x0a, 0x15, 0x79, 0x8f, 0x69, 0x64, 0x68, 0xdc, 0xe2, 0x93, 0xff, 0xc6, 0xa2, 0x2a, 0x5d, + 0x5e, 0x0e, 0x93, 0x86, 0x98, 0x56, 0x5c, 0x5b, 0x5e, 0x0b, 0xac, 0x14, 0x0b, 0x28, 0x7a, 0x1d, + 0x46, 0x02, 0xd2, 0x62, 0xca, 0xa2, 0xf1, 0xc1, 0x17, 0x39, 0xd7, 0x3d, 0xf1, 0x7a, 0x58, 0x12, + 0x40, 0x37, 0x01, 0x05, 0x84, 0xf2, 0x10, 0xae, 0xb7, 0xa5, 0x8c, 0x39, 0x84, 0xae, 0xfb, 0x31, + 0xd1, 0xfe, 0x19, 0x1c, 0x63, 0x48, 0xeb, 0x62, 0x9c, 0x52, 0x0d, 0x5d, 0x87, 0x69, 0x55, 0x5a, + 0xf5, 0xc2, 0xc8, 0xf1, 0x1a, 0x84, 0xa9, 0xb9, 0x4b, 0x31, 0x57, 0x84, 0x93, 0x08, 0xb8, 0xbb, + 0x8e, 0xfd, 0xd3, 0x94, 0x9d, 0xa1, 0xa3, 0x75, 0x0a, 0xbc, 0xc0, 0x6b, 0x26, 0x2f, 0x70, 0x3e, + 0x73, 0xe6, 0x32, 0xf8, 0x80, 0x43, 0x0b, 0x46, 0xb5, 0x99, 0x8d, 0xd7, 0xac, 0xd5, 0x63, 0xcd, + 0x76, 0x60, 0x8a, 0xae, 0xf4, 0xdb, 0x1b, 0x21, 0x09, 0xf6, 0x48, 0x93, 0x2d, 0xcc, 0xdc, 0x83, + 0x2d, 0x4c, 0xf5, 0xca, 0x7c, 0x2b, 0x41, 0x10, 0x77, 0x35, 0x81, 0x5e, 0x92, 0x9a, 0x93, 0xbc, + 0x61, 0xa4, 0xc5, 0xb5, 0x22, 0x47, 0x07, 0xe5, 0x29, 0xed, 0x43, 0x74, 0x4d, 0x89, 0xfd, 0x8e, + 0xfc, 0x46, 0xf5, 0x9a, 0xdf, 0x50, 0x8b, 0x25, 0xf1, 0x9a, 0xaf, 0x96, 0x03, 0x8e, 0x71, 0xe8, + 0x1e, 0xa5, 0x22, 0x48, 0xf2, 0x35, 0x9f, 0x0a, 0x28, 0x98, 0x41, 0xec, 0xe7, 0x01, 0x96, 0xef, + 0x93, 0x06, 0x5f, 0xea, 0xfa, 0x03, 0xa4, 0x95, 0xfd, 0x00, 0x69, 0xff, 0x07, 0x0b, 0x26, 0x56, + 0x96, 0x0c, 0x31, 0x71, 0x0e, 0x80, 0xcb, 0x46, 0x6f, 0xbe, 0xb9, 0x26, 0x75, 0xeb, 0x5c, 0x3d, + 0xaa, 0x4a, 0xb1, 0x86, 0x81, 0xce, 0x43, 0xbe, 0xd5, 0xf1, 0x84, 0xc8, 0x32, 0x72, 0x78, 0x50, + 0xce, 0xdf, 0xea, 0x78, 0x98, 0x96, 0x69, 0x16, 0x82, 0xf9, 0x81, 0x2d, 0x04, 0xfb, 0xba, 0xc9, + 0xa1, 0x32, 0x0c, 0xdd, 0xbb, 0xe7, 0x36, 0xb9, 0x33, 0x82, 0xd0, 0xfb, 0xbf, 0xf9, 0x66, 0xb5, + 0x12, 0x62, 0x5e, 0x6e, 0x7f, 0x25, 0x0f, 0xb3, 0x2b, 0x2d, 0x72, 0xff, 0x03, 0x3a, 0x64, 0x0c, + 0x6a, 0xdf, 0x78, 0x3c, 0x7e, 0xf1, 0xb8, 0x36, 0xac, 0xfd, 0xc7, 0x63, 0x13, 0x46, 0xf8, 0x63, + 0xb6, 0x74, 0xcf, 0x78, 0x25, 0xad, 0xf5, 0xec, 0x01, 0x99, 0xe3, 0x8f, 0xe2, 0xc2, 0xc0, 0x5d, + 0xdd, 0xb4, 0xa2, 0x14, 0x4b, 0xe2, 0xb3, 0x9f, 0x81, 0x31, 0x1d, 0xf3, 0x58, 0xd6, 0xe4, 0x7f, + 0x31, 0x0f, 0x53, 0xb4, 0x07, 0x0f, 0x75, 0x22, 0xee, 0x74, 0x4f, 0xc4, 0x49, 0x5b, 0x14, 0xf7, + 0x9f, 0x8d, 0xb7, 0x93, 0xb3, 0xf1, 0x5c, 0xd6, 0x6c, 0x9c, 0xf6, 0x1c, 0x7c, 0xaf, 0x05, 0x67, + 0x56, 0x5a, 0x7e, 0x63, 0x27, 0x61, 0xf5, 0xfb, 0x22, 0x8c, 0xd2, 0x73, 0x3c, 0x34, 0xbc, 0xc1, + 0x0c, 0xff, 0x40, 0x01, 0xc2, 0x3a, 0x9e, 0x56, 0xed, 0xce, 0x9d, 0x6a, 0x25, 0xcd, 0xad, 0x50, + 0x80, 0xb0, 0x8e, 0x67, 0x7f, 0xdd, 0x82, 0x8b, 0xd7, 0x97, 0x96, 0xe3, 0xa5, 0xd8, 0xe5, 0xd9, + 0x48, 0xa5, 0xc0, 0xa6, 0xd6, 0x95, 0x58, 0x0a, 0xac, 0xb0, 0x5e, 0x08, 0xe8, 0x47, 0xc5, 0x6b, + 0xf7, 0xa7, 0x2c, 0x38, 0x73, 0xdd, 0x8d, 0xe8, 0xb5, 0x9c, 0xf4, 0xb1, 0xa3, 0xf7, 0x72, 0xe8, + 0x46, 0x7e, 0xb0, 0x9f, 0xf4, 0xb1, 0xc3, 0x0a, 0x82, 0x35, 0x2c, 0xde, 0xf2, 0x9e, 0xcb, 0xcc, + 0xa8, 0x72, 0xa6, 0x2a, 0x0a, 0x8b, 0x72, 0xac, 0x30, 0xe8, 0x87, 0x35, 0xdd, 0x80, 0x89, 0x12, + 0xfb, 0xe2, 0x84, 0x55, 0x1f, 0x56, 0x91, 0x00, 0x1c, 0xe3, 0xd8, 0x7f, 0x68, 0x41, 0xf9, 0x7a, + 0xab, 0x13, 0x46, 0x24, 0xd8, 0x0c, 0x33, 0x4e, 0xc7, 0xe7, 0xa1, 0x44, 0xa4, 0xe0, 0x2e, 0x7a, + 0xad, 0x58, 0x4d, 0x25, 0xd1, 0x73, 0x57, 0x3f, 0x85, 0x37, 0x80, 0x0f, 0xc1, 0xf1, 0x8c, 0xc0, + 0x57, 0x00, 0x11, 0xbd, 0x2d, 0xdd, 0xf7, 0x91, 0x39, 0x51, 0x2d, 0x77, 0x41, 0x71, 0x4a, 0x0d, + 0xfb, 0x47, 0x2d, 0x38, 0xa7, 0x3e, 0xf8, 0x23, 0xf7, 0x99, 0xf6, 0xcf, 0xe5, 0x60, 0xfc, 0xc6, + 0xfa, 0x7a, 0xed, 0x3a, 0x89, 0xc4, 0xb5, 0xdd, 0x5f, 0xb7, 0x8e, 0x35, 0x15, 0x61, 0x2f, 0x29, + 0xb0, 0x13, 0xb9, 0xad, 0x39, 0xee, 0x42, 0x3f, 0x57, 0xf5, 0xa2, 0xdb, 0x41, 0x3d, 0x0a, 0x5c, + 0x6f, 0x2b, 0x55, 0xa9, 0x28, 0x99, 0x8b, 0x7c, 0x16, 0x73, 0x81, 0x9e, 0x87, 0x61, 0xe6, 0xc3, + 0x2f, 0x27, 0xe1, 0x31, 0x25, 0x44, 0xb1, 0xd2, 0xa3, 0x83, 0x72, 0xe9, 0x0e, 0xae, 0xf2, 0x3f, + 0x58, 0xa0, 0xa2, 0x3b, 0x30, 0xba, 0x1d, 0x45, 0xed, 0x1b, 0xc4, 0x69, 0x92, 0x40, 0x1e, 0x87, + 0x97, 0xd2, 0x8e, 0x43, 0x3a, 0x08, 0x1c, 0x2d, 0x3e, 0x41, 0xe2, 0xb2, 0x10, 0xeb, 0x74, 0xec, + 0x3a, 0x40, 0x0c, 0x3b, 0x21, 0x85, 0x8a, 0xfd, 0xfb, 0x16, 0x8c, 0x70, 0x77, 0xca, 0x00, 0xbd, + 0x0a, 0x05, 0x72, 0x9f, 0x34, 0x04, 0xab, 0x9c, 0xda, 0xe1, 0x98, 0xd3, 0xe2, 0xcf, 0x03, 0xf4, + 0x3f, 0x66, 0xb5, 0xd0, 0x0d, 0x18, 0xa1, 0xbd, 0xbd, 0xae, 0x7c, 0x4b, 0x1f, 0xcf, 0xfa, 0x62, + 0x35, 0xed, 0x9c, 0x39, 0x13, 0x45, 0x58, 0x56, 0x67, 0xaa, 0xee, 0x46, 0xbb, 0x4e, 0x4f, 0xec, + 0xa8, 0x17, 0x63, 0xb1, 0xbe, 0x54, 0xe3, 0x48, 0x82, 0x1a, 0x57, 0x75, 0xcb, 0x42, 0x1c, 0x13, + 0xb1, 0xd7, 0xa1, 0x44, 0x27, 0x75, 0xa1, 0xe5, 0x3a, 0xbd, 0xb5, 0xec, 0x4f, 0x43, 0x49, 0x6a, + 0xbc, 0x43, 0xe1, 0xc9, 0xc5, 0xa8, 0x4a, 0x85, 0x78, 0x88, 0x63, 0xb8, 0xbd, 0x09, 0x67, 0x99, + 0xa9, 0x83, 0x13, 0x6d, 0x1b, 0x7b, 0xac, 0xff, 0x62, 0x7e, 0x46, 0x48, 0x9e, 0x7c, 0x66, 0x66, + 0x34, 0x67, 0x89, 0x31, 0x49, 0x31, 0x96, 0x42, 0xed, 0x3f, 0x28, 0xc0, 0x63, 0xd5, 0x7a, 0xb6, + 0xa7, 0xed, 0xcb, 0x30, 0xc6, 0xf9, 0x52, 0xba, 0xb4, 0x9d, 0x96, 0x68, 0x57, 0x3d, 0x04, 0xae, + 0x6b, 0x30, 0x6c, 0x60, 0xa2, 0x8b, 0x90, 0x77, 0xdf, 0xf3, 0x92, 0x76, 0xc7, 0xd5, 0x37, 0xd6, + 0x30, 0x2d, 0xa7, 0x60, 0xca, 0xe2, 0xf2, 0xbb, 0x43, 0x81, 0x15, 0x9b, 0xfb, 0x1a, 0x4c, 0xb8, + 0x61, 0x23, 0x74, 0xab, 0x1e, 0x3d, 0x67, 0xb4, 0x93, 0x4a, 0x69, 0x45, 0x68, 0xa7, 0x15, 0x14, + 0x27, 0xb0, 0xb5, 0x8b, 0x6c, 0x68, 0x60, 0x36, 0xb9, 0xaf, 0x6b, 0x13, 0x95, 0x00, 0xda, 0xec, + 0xeb, 0x42, 0x66, 0xc5, 0x27, 0x24, 0x00, 0xfe, 0xc1, 0x21, 0x96, 0x30, 0x2a, 0x72, 0x36, 0xb6, + 0x9d, 0xf6, 0x42, 0x27, 0xda, 0xae, 0xb8, 0x61, 0xc3, 0xdf, 0x23, 0xc1, 0x3e, 0xd3, 0x16, 0x14, + 0x63, 0x91, 0x53, 0x01, 0x96, 0x6e, 0x2c, 0xd4, 0x28, 0x26, 0xee, 0xae, 0x63, 0xb2, 0xc1, 0x70, + 0x12, 0x6c, 0xf0, 0x02, 0x4c, 0xca, 0x66, 0xea, 0x24, 0x64, 0x97, 0xe2, 0x28, 0xeb, 0x98, 0xb2, + 0x2d, 0x16, 0xc5, 0xaa, 0x5b, 0x49, 0x7c, 0xf4, 0x12, 0x8c, 0xbb, 0x9e, 0x1b, 0xb9, 0x4e, 0xe4, + 0x07, 0x8c, 0xa5, 0xe0, 0x8a, 0x01, 0x66, 0xba, 0x57, 0xd5, 0x01, 0xd8, 0xc4, 0xb3, 0xff, 0x4b, + 0x01, 0xa6, 0xd9, 0xb4, 0x7d, 0x6b, 0x85, 0x7d, 0x64, 0x56, 0xd8, 0x9d, 0xee, 0x15, 0x76, 0x12, + 0xfc, 0xfd, 0x87, 0xb9, 0xcc, 0xde, 0x85, 0x92, 0x32, 0x7e, 0x96, 0xde, 0x0f, 0x56, 0x86, 0xf7, + 0x43, 0x7f, 0xee, 0x43, 0xbe, 0x5b, 0xe7, 0x53, 0xdf, 0xad, 0xff, 0xb6, 0x05, 0xb1, 0x0d, 0x28, + 0xba, 0x01, 0xa5, 0xb6, 0xcf, 0xec, 0x2c, 0x02, 0x69, 0xbc, 0xf4, 0x58, 0xea, 0x45, 0xc5, 0x2f, + 0x45, 0x3e, 0x7e, 0x35, 0x59, 0x03, 0xc7, 0x95, 0xd1, 0x22, 0x8c, 0xb4, 0x03, 0x52, 0x8f, 0x98, + 0xcf, 0x6f, 0x5f, 0x3a, 0x7c, 0x8d, 0x70, 0x7c, 0x2c, 0x2b, 0xda, 0x3f, 0x6f, 0x01, 0xf0, 0xa7, + 0x61, 0xc7, 0xdb, 0x22, 0xa7, 0xa0, 0xee, 0xae, 0x40, 0x21, 0x6c, 0x93, 0x46, 0x2f, 0x0b, 0x98, + 0xb8, 0x3f, 0xf5, 0x36, 0x69, 0xc4, 0x03, 0x4e, 0xff, 0x61, 0x56, 0xdb, 0xfe, 0x3e, 0x80, 0x89, + 0x18, 0xad, 0x1a, 0x91, 0x5d, 0xf4, 0xac, 0xe1, 0x03, 0x78, 0x3e, 0xe1, 0x03, 0x58, 0x62, 0xd8, + 0x9a, 0x66, 0xf5, 0x5d, 0xc8, 0xef, 0x3a, 0xf7, 0x85, 0xea, 0xec, 0xe9, 0xde, 0xdd, 0xa0, 0xf4, + 0xe7, 0x56, 0x9d, 0xfb, 0x5c, 0x48, 0x7c, 0x5a, 0x2e, 0x90, 0x55, 0xe7, 0xfe, 0x11, 0xb7, 0x73, + 0x61, 0x87, 0xd4, 0x2d, 0x37, 0x8c, 0xbe, 0xf4, 0x9f, 0xe3, 0xff, 0x6c, 0xd9, 0xd1, 0x46, 0x58, + 0x5b, 0xae, 0x27, 0x1e, 0x4a, 0x07, 0x6a, 0xcb, 0xf5, 0x92, 0x6d, 0xb9, 0xde, 0x00, 0x6d, 0xb9, + 0x1e, 0x7a, 0x1f, 0x46, 0x84, 0x51, 0x82, 0xf0, 0xb9, 0x9f, 0x1f, 0xa0, 0x3d, 0x61, 0xd3, 0xc0, + 0xdb, 0x9c, 0x97, 0x42, 0xb0, 0x28, 0xed, 0xdb, 0xae, 0x6c, 0x10, 0xfd, 0x0d, 0x0b, 0x26, 0xc4, + 0x6f, 0x4c, 0xde, 0xeb, 0x90, 0x30, 0x12, 0xbc, 0xe7, 0xa7, 0x07, 0xef, 0x83, 0xa8, 0xc8, 0xbb, + 0xf2, 0x69, 0x79, 0xcc, 0x9a, 0xc0, 0xbe, 0x3d, 0x4a, 0xf4, 0x02, 0xfd, 0x23, 0x0b, 0xce, 0xee, + 0x3a, 0xf7, 0x79, 0x8b, 0xbc, 0x0c, 0x3b, 0x91, 0xeb, 0x0b, 0x63, 0xfd, 0x57, 0x07, 0x9b, 0xfe, + 0xae, 0xea, 0xbc, 0x93, 0xd2, 0xae, 0xf7, 0x6c, 0x1a, 0x4a, 0xdf, 0xae, 0xa6, 0xf6, 0x6b, 0x76, + 0x13, 0x8a, 0x72, 0xbd, 0xa5, 0xa8, 0x1a, 0x2a, 0x3a, 0x63, 0x7d, 0x6c, 0x9b, 0x10, 0xdd, 0x11, + 0x8f, 0xb6, 0x23, 0xd6, 0xda, 0x43, 0x6d, 0xe7, 0x5d, 0x18, 0xd3, 0xd7, 0xd8, 0x43, 0x6d, 0xeb, + 0x3d, 0x38, 0x93, 0xb2, 0x96, 0x1e, 0x6a, 0x93, 0xf7, 0xe0, 0x7c, 0xe6, 0xfa, 0x78, 0x98, 0x0d, + 0xdb, 0x3f, 0x67, 0xe9, 0xe7, 0xe0, 0x29, 0xbc, 0x39, 0x2c, 0x99, 0x6f, 0x0e, 0x97, 0x7a, 0xef, + 0x9c, 0x8c, 0x87, 0x87, 0xb7, 0xf5, 0x4e, 0xd3, 0x53, 0x1d, 0xbd, 0x0e, 0xc3, 0x2d, 0x5a, 0x22, + 0xad, 0x61, 0xec, 0xfe, 0x3b, 0x32, 0xe6, 0xa5, 0x58, 0x79, 0x88, 0x05, 0x05, 0xfb, 0x97, 0x2c, + 0x28, 0x9c, 0xc2, 0x48, 0x60, 0x73, 0x24, 0x9e, 0xcd, 0x24, 0x2d, 0x62, 0xf1, 0xcd, 0x61, 0xe7, + 0xde, 0xf2, 0xfd, 0x88, 0x78, 0x21, 0x13, 0x15, 0x53, 0x07, 0xe6, 0xbb, 0xe0, 0xcc, 0x2d, 0xdf, + 0x69, 0x2e, 0x3a, 0x2d, 0xc7, 0x6b, 0x90, 0xa0, 0xea, 0x6d, 0xf5, 0x35, 0xcb, 0xd2, 0x8d, 0xa8, + 0x72, 0xfd, 0x8c, 0xa8, 0xec, 0x6d, 0x40, 0x7a, 0x03, 0xc2, 0x70, 0x15, 0xc3, 0x88, 0xcb, 0x9b, + 0x12, 0xc3, 0xff, 0x64, 0x3a, 0x77, 0xd7, 0xd5, 0x33, 0xcd, 0x24, 0x93, 0x17, 0x60, 0x49, 0xc8, + 0x7e, 0x19, 0x52, 0x9d, 0xd5, 0xfa, 0xab, 0x0d, 0xec, 0xcf, 0xc3, 0x34, 0xab, 0x79, 0x4c, 0x91, + 0xd6, 0x4e, 0x68, 0x25, 0x53, 0x22, 0xd3, 0xd8, 0x5f, 0xb6, 0x60, 0x72, 0x2d, 0x11, 0xb0, 0xe3, + 0x0a, 0x7b, 0x00, 0x4d, 0x51, 0x86, 0xd7, 0x59, 0x29, 0x16, 0xd0, 0x13, 0xd7, 0x41, 0xfd, 0x99, + 0x05, 0xb1, 0xff, 0xe8, 0x29, 0x30, 0x5e, 0x4b, 0x06, 0xe3, 0x95, 0xaa, 0x1b, 0x51, 0xdd, 0xc9, + 0xe2, 0xbb, 0xd0, 0x4d, 0x15, 0x2c, 0xa1, 0x87, 0x5a, 0x24, 0x26, 0xc3, 0x5d, 0xeb, 0x27, 0xcc, + 0x88, 0x0a, 0x32, 0x7c, 0x02, 0xb3, 0x9d, 0x52, 0xb8, 0x1f, 0x11, 0xdb, 0x29, 0xd5, 0x9f, 0x8c, + 0x1d, 0x5a, 0xd3, 0xba, 0xcc, 0x4e, 0xae, 0x6f, 0x67, 0xb6, 0xf0, 0x4e, 0xcb, 0x7d, 0x9f, 0xa8, + 0x88, 0x2f, 0x65, 0x61, 0xdb, 0x2e, 0x4a, 0x8f, 0x0e, 0xca, 0xe3, 0xea, 0x1f, 0x0f, 0xef, 0x16, + 0x57, 0xb1, 0x6f, 0xc0, 0x64, 0x62, 0xc0, 0xd0, 0x8b, 0x30, 0xd4, 0xde, 0x76, 0x42, 0x92, 0xb0, + 0x17, 0x1d, 0xaa, 0xd1, 0xc2, 0xa3, 0x83, 0xf2, 0x84, 0xaa, 0xc0, 0x4a, 0x30, 0xc7, 0xb6, 0xff, + 0x87, 0x05, 0x85, 0x35, 0xbf, 0x79, 0x1a, 0x8b, 0xe9, 0x35, 0x63, 0x31, 0x5d, 0xc8, 0x0a, 0x8e, + 0x99, 0xb9, 0x8e, 0x56, 0x12, 0xeb, 0xe8, 0x52, 0x26, 0x85, 0xde, 0x4b, 0x68, 0x17, 0x46, 0x59, + 0xc8, 0x4d, 0x61, 0xbf, 0xfa, 0xbc, 0x21, 0x03, 0x94, 0x13, 0x32, 0xc0, 0xa4, 0x86, 0xaa, 0x49, + 0x02, 0x4f, 0xc1, 0x88, 0xb0, 0xa1, 0x4c, 0x5a, 0xfd, 0x0b, 0x5c, 0x2c, 0xe1, 0xf6, 0x8f, 0xe5, + 0xc1, 0x08, 0xf1, 0x89, 0x7e, 0xc5, 0x82, 0xb9, 0x80, 0xbb, 0x51, 0x36, 0x2b, 0x9d, 0xc0, 0xf5, + 0xb6, 0xea, 0x8d, 0x6d, 0xd2, 0xec, 0xb4, 0x5c, 0x6f, 0xab, 0xba, 0xe5, 0xf9, 0xaa, 0x78, 0xf9, + 0x3e, 0x69, 0x74, 0xd8, 0x43, 0x48, 0x9f, 0x78, 0xa2, 0xca, 0x46, 0xe9, 0xda, 0xe1, 0x41, 0x79, + 0x0e, 0x1f, 0x8b, 0x36, 0x3e, 0x66, 0x5f, 0xd0, 0xd7, 0x2d, 0x98, 0xe7, 0x91, 0x2f, 0x07, 0xef, + 0x7f, 0x0f, 0x89, 0xa9, 0x26, 0x49, 0xc5, 0x44, 0xd6, 0x49, 0xb0, 0xbb, 0xf8, 0x92, 0x18, 0xd0, + 0xf9, 0xda, 0xf1, 0xda, 0xc2, 0xc7, 0xed, 0x9c, 0xfd, 0x2f, 0xf3, 0x30, 0x2e, 0x3c, 0xf8, 0x45, + 0x68, 0x98, 0x17, 0x8d, 0x25, 0xf1, 0x78, 0x62, 0x49, 0x4c, 0x1b, 0xc8, 0x27, 0x13, 0x15, 0x26, + 0x84, 0xe9, 0x96, 0x13, 0x46, 0x37, 0x88, 0x13, 0x44, 0x1b, 0xc4, 0xe1, 0xb6, 0x3b, 0xf9, 0x63, + 0xdb, 0x19, 0x29, 0x15, 0xcd, 0xad, 0x24, 0x31, 0xdc, 0x4d, 0x1f, 0xed, 0x01, 0x62, 0x06, 0x48, + 0x81, 0xe3, 0x85, 0xfc, 0x5b, 0x5c, 0xf1, 0x66, 0x70, 0xbc, 0x56, 0x67, 0x45, 0xab, 0xe8, 0x56, + 0x17, 0x35, 0x9c, 0xd2, 0x82, 0x66, 0x58, 0x36, 0x34, 0xa8, 0x61, 0xd9, 0x70, 0x1f, 0xd7, 0x1a, + 0x0f, 0xa6, 0xba, 0x82, 0x30, 0xbc, 0x05, 0x25, 0x65, 0x00, 0x28, 0x0e, 0x9d, 0xde, 0xb1, 0x4c, + 0x92, 0x14, 0xb8, 0x1a, 0x25, 0x36, 0x3e, 0x8d, 0xc9, 0xd9, 0xff, 0x38, 0x67, 0x34, 0xc8, 0x27, + 0x71, 0x0d, 0x8a, 0x4e, 0x18, 0xba, 0x5b, 0x1e, 0x69, 0x8a, 0x1d, 0xfb, 0xf1, 0xac, 0x1d, 0x6b, + 0x34, 0xc3, 0x8c, 0x30, 0x17, 0x44, 0x4d, 0xac, 0x68, 0xa0, 0x1b, 0xdc, 0x42, 0x6a, 0x4f, 0xf2, + 0xfc, 0x83, 0x51, 0x03, 0x69, 0x43, 0xb5, 0x47, 0xb0, 0xa8, 0x8f, 0xbe, 0xc0, 0x4d, 0xd8, 0x6e, + 0x7a, 0xfe, 0x3d, 0xef, 0xba, 0xef, 0x4b, 0xb7, 0xbb, 0xc1, 0x08, 0x4e, 0x4b, 0xc3, 0x35, 0x55, + 0x1d, 0x9b, 0xd4, 0x06, 0x0b, 0x54, 0xf4, 0xdd, 0x70, 0x86, 0x92, 0x36, 0x9d, 0x67, 0x42, 0x44, + 0x60, 0x52, 0x84, 0x87, 0x90, 0x65, 0x62, 0xec, 0x52, 0xd9, 0x79, 0xb3, 0x76, 0xac, 0xf4, 0xbb, + 0x69, 0x92, 0xc0, 0x49, 0x9a, 0xf6, 0x4f, 0x5a, 0xc0, 0xcc, 0xfe, 0x4f, 0x81, 0x65, 0xf8, 0xac, + 0xc9, 0x32, 0xcc, 0x64, 0x0d, 0x72, 0x06, 0xb7, 0xf0, 0x02, 0x5f, 0x59, 0xb5, 0xc0, 0xbf, 0xbf, + 0x2f, 0xcc, 0x07, 0xfa, 0x73, 0xb2, 0xf6, 0xff, 0xb1, 0xf8, 0x21, 0xa6, 0x3c, 0xf1, 0xd1, 0xf7, + 0x40, 0xb1, 0xe1, 0xb4, 0x9d, 0x06, 0x8f, 0x47, 0x9d, 0xa9, 0xd5, 0x31, 0x2a, 0xcd, 0x2d, 0x89, + 0x1a, 0x5c, 0x4b, 0x21, 0xc3, 0x8c, 0x14, 0x65, 0x71, 0x5f, 0xcd, 0x84, 0x6a, 0x72, 0x76, 0x07, + 0xc6, 0x0d, 0x62, 0x0f, 0x55, 0xa4, 0xfd, 0x1e, 0x7e, 0xc5, 0xaa, 0xb0, 0x38, 0xbb, 0x30, 0xed, + 0x69, 0xff, 0xe9, 0x85, 0x22, 0xc5, 0x94, 0x8f, 0xf7, 0xbb, 0x44, 0xd9, 0xed, 0xa3, 0xb9, 0x35, + 0x24, 0xc8, 0xe0, 0x6e, 0xca, 0xf6, 0x8f, 0x5b, 0xf0, 0xa8, 0x8e, 0xa8, 0x05, 0x49, 0xe8, 0xa7, + 0x27, 0xae, 0x40, 0xd1, 0x6f, 0x93, 0xc0, 0x89, 0xfc, 0x40, 0xdc, 0x1a, 0x57, 0xe5, 0xa0, 0xdf, + 0x16, 0xe5, 0x47, 0x22, 0xa0, 0xa4, 0xa4, 0x2e, 0xcb, 0xb1, 0xaa, 0x49, 0xe5, 0x18, 0x36, 0x18, + 0xa1, 0x08, 0x60, 0xc1, 0xce, 0x00, 0xf6, 0x64, 0x1a, 0x62, 0x01, 0xb1, 0xff, 0xc0, 0xe2, 0x0b, + 0x4b, 0xef, 0x3a, 0x7a, 0x0f, 0xa6, 0x76, 0x9d, 0xa8, 0xb1, 0xbd, 0x7c, 0xbf, 0x1d, 0x70, 0xf5, + 0xb8, 0x1c, 0xa7, 0xa7, 0xfb, 0x8d, 0x93, 0xf6, 0x91, 0xb1, 0x55, 0xde, 0x6a, 0x82, 0x18, 0xee, + 0x22, 0x8f, 0x36, 0x60, 0x94, 0x95, 0x31, 0xf3, 0xef, 0xb0, 0x17, 0x6b, 0x90, 0xd5, 0x9a, 0x7a, + 0x75, 0x5e, 0x8d, 0xe9, 0x60, 0x9d, 0xa8, 0xfd, 0xa5, 0x3c, 0xdf, 0xed, 0x8c, 0xdb, 0x7e, 0x0a, + 0x46, 0xda, 0x7e, 0x73, 0xa9, 0x5a, 0xc1, 0x62, 0x16, 0xd4, 0x35, 0x52, 0xe3, 0xc5, 0x58, 0xc2, + 0xd1, 0x2b, 0x00, 0xe4, 0x7e, 0x44, 0x02, 0xcf, 0x69, 0x29, 0x2b, 0x19, 0x65, 0x17, 0x5a, 0xf1, + 0xd7, 0xfc, 0xe8, 0x4e, 0x48, 0xbe, 0x6b, 0x59, 0xa1, 0x60, 0x0d, 0x1d, 0x5d, 0x03, 0x68, 0x07, + 0xfe, 0x9e, 0xdb, 0x64, 0xfe, 0x84, 0x79, 0xd3, 0x86, 0xa4, 0xa6, 0x20, 0x58, 0xc3, 0x42, 0xaf, + 0xc0, 0x78, 0xc7, 0x0b, 0x39, 0x87, 0xe2, 0x6c, 0x88, 0x70, 0x8c, 0xc5, 0xd8, 0xba, 0xe1, 0x8e, + 0x0e, 0xc4, 0x26, 0x2e, 0x5a, 0x80, 0xe1, 0xc8, 0x61, 0x36, 0x11, 0x43, 0xd9, 0xc6, 0x9c, 0xeb, + 0x14, 0x43, 0x8f, 0x86, 0x4c, 0x2b, 0x60, 0x51, 0x11, 0xbd, 0x25, 0x9d, 0x33, 0xf8, 0x59, 0x2f, + 0xac, 0xa8, 0x07, 0xbb, 0x17, 0x34, 0xd7, 0x0c, 0x61, 0x9d, 0x6d, 0xd0, 0xb2, 0xbf, 0x5e, 0x02, + 0x88, 0xd9, 0x71, 0xf4, 0x7e, 0xd7, 0x79, 0xf4, 0x4c, 0x6f, 0x06, 0xfe, 0xe4, 0x0e, 0x23, 0xf4, + 0xfd, 0x16, 0x8c, 0x3a, 0xad, 0x96, 0xdf, 0x70, 0x22, 0x36, 0xca, 0xb9, 0xde, 0xe7, 0xa1, 0x68, + 0x7f, 0x21, 0xae, 0xc1, 0xbb, 0xf0, 0xbc, 0x5c, 0x78, 0x1a, 0xa4, 0x6f, 0x2f, 0xf4, 0x86, 0xd1, + 0xa7, 0xa4, 0x94, 0xc6, 0x97, 0xc7, 0x6c, 0x52, 0x4a, 0x2b, 0xb1, 0xa3, 0x5f, 0x13, 0xd0, 0xd0, + 0x1d, 0x23, 0xd2, 0x5e, 0x21, 0x3b, 0xe8, 0x84, 0xc1, 0x95, 0xf6, 0x0b, 0xb2, 0x87, 0x6a, 0xba, + 0x37, 0xd9, 0x50, 0x76, 0x64, 0x16, 0x4d, 0xfc, 0xe9, 0xe3, 0x49, 0xf6, 0x2e, 0x4c, 0x36, 0xcd, + 0xbb, 0x5d, 0xac, 0xa6, 0x27, 0xb3, 0xe8, 0x26, 0x58, 0x81, 0xf8, 0x36, 0x4f, 0x00, 0x70, 0x92, + 0x30, 0xaa, 0x71, 0xbf, 0xbe, 0xaa, 0xb7, 0xe9, 0x0b, 0x6b, 0x7c, 0x3b, 0x73, 0x2e, 0xf7, 0xc3, + 0x88, 0xec, 0x52, 0xcc, 0xf8, 0xd2, 0x5e, 0x13, 0x75, 0xb1, 0xa2, 0x82, 0x5e, 0x87, 0x61, 0xe6, + 0x18, 0x1c, 0xce, 0x14, 0xb3, 0x95, 0x89, 0x66, 0x4c, 0x8b, 0x78, 0x53, 0xb1, 0xbf, 0x21, 0x16, + 0x14, 0xd0, 0x0d, 0x19, 0xf8, 0x26, 0xac, 0x7a, 0x77, 0x42, 0xc2, 0x02, 0xdf, 0x94, 0x16, 0x3f, + 0x1e, 0xc7, 0xb4, 0xe1, 0xe5, 0xa9, 0x79, 0x0f, 0x8c, 0x9a, 0x94, 0x39, 0x12, 0xff, 0x65, 0x3a, + 0x85, 0x19, 0xc8, 0xee, 0x9e, 0x99, 0x72, 0x21, 0x1e, 0xce, 0xbb, 0x26, 0x09, 0x9c, 0xa4, 0x49, + 0x19, 0x4d, 0xbe, 0x73, 0x85, 0x3d, 0x7f, 0xbf, 0xfd, 0xcf, 0xe5, 0x6b, 0x76, 0xc9, 0xf0, 0x12, + 0x2c, 0xea, 0x9f, 0xea, 0xad, 0x3f, 0xeb, 0xc1, 0x54, 0x72, 0x8b, 0x3e, 0x54, 0x2e, 0xe3, 0xf7, + 0x0b, 0x30, 0x61, 0x2e, 0x29, 0x34, 0x0f, 0x25, 0x41, 0x44, 0x45, 0x61, 0x55, 0xbb, 0x64, 0x55, + 0x02, 0x70, 0x8c, 0xc3, 0x82, 0xef, 0xb2, 0xea, 0x9a, 0x1d, 0x66, 0x1c, 0x7c, 0x57, 0x41, 0xb0, + 0x86, 0x45, 0xe5, 0xa5, 0x0d, 0xdf, 0x8f, 0xd4, 0xa5, 0xa2, 0xd6, 0xdd, 0x22, 0x2b, 0xc5, 0x02, + 0x4a, 0x2f, 0x93, 0x1d, 0x12, 0x78, 0xa4, 0x65, 0x06, 0x77, 0x53, 0x97, 0xc9, 0x4d, 0x1d, 0x88, + 0x4d, 0x5c, 0x7a, 0x4b, 0xfa, 0x21, 0x5b, 0xc8, 0x42, 0x2a, 0x8b, 0xed, 0x5a, 0xeb, 0xdc, 0xc5, + 0x5e, 0xc2, 0xd1, 0xe7, 0xe1, 0x51, 0xe5, 0x11, 0x8f, 0xb9, 0xa2, 0x5a, 0xb6, 0x38, 0x6c, 0x28, + 0x51, 0x1e, 0x5d, 0x4a, 0x47, 0xc3, 0x59, 0xf5, 0xd1, 0x6b, 0x30, 0x21, 0x38, 0x77, 0x49, 0x71, + 0xc4, 0xb4, 0x9d, 0xb8, 0x69, 0x40, 0x71, 0x02, 0x5b, 0x86, 0xa7, 0x63, 0xcc, 0xb3, 0xa4, 0x50, + 0xec, 0x0e, 0x4f, 0xa7, 0xc3, 0x71, 0x57, 0x0d, 0xb4, 0x00, 0x93, 0x9c, 0xb5, 0x72, 0xbd, 0x2d, + 0x3e, 0x27, 0xc2, 0xdd, 0x46, 0x6d, 0xa9, 0xdb, 0x26, 0x18, 0x27, 0xf1, 0xd1, 0xcb, 0x30, 0xe6, + 0x04, 0x8d, 0x6d, 0x37, 0x22, 0x8d, 0xa8, 0x13, 0x70, 0x3f, 0x1c, 0xcd, 0xf8, 0x64, 0x41, 0x83, + 0x61, 0x03, 0xd3, 0x7e, 0x1f, 0xce, 0xa4, 0x78, 0xea, 0xd1, 0x85, 0xe3, 0xb4, 0x5d, 0xf9, 0x4d, + 0x09, 0x0b, 0xd5, 0x85, 0x5a, 0x55, 0x7e, 0x8d, 0x86, 0x45, 0x57, 0x27, 0xf3, 0xe8, 0xd3, 0xb2, + 0xa7, 0xa8, 0xd5, 0xb9, 0x22, 0x01, 0x38, 0xc6, 0xb1, 0xff, 0x67, 0x0e, 0x26, 0x53, 0x94, 0xef, + 0x2c, 0x83, 0x47, 0x42, 0xf6, 0x88, 0x13, 0x76, 0x98, 0xd1, 0x0e, 0x73, 0xc7, 0x88, 0x76, 0x98, + 0xef, 0x17, 0xed, 0xb0, 0xf0, 0x41, 0xa2, 0x1d, 0x9a, 0x23, 0x36, 0x34, 0xd0, 0x88, 0xa5, 0x44, + 0x48, 0x1c, 0x3e, 0x66, 0x84, 0x44, 0x63, 0xd0, 0x47, 0x06, 0x18, 0xf4, 0xaf, 0xe6, 0x60, 0x2a, + 0x69, 0x24, 0x77, 0x0a, 0xea, 0xd8, 0xd7, 0x0d, 0x75, 0x6c, 0x7a, 0x3e, 0x9c, 0xa4, 0xe9, 0x5e, + 0x96, 0x6a, 0x16, 0x27, 0x54, 0xb3, 0x9f, 0x1c, 0x88, 0x5a, 0x6f, 0x35, 0xed, 0xdf, 0xcd, 0xc1, + 0xb9, 0x64, 0x95, 0xa5, 0x96, 0xe3, 0xee, 0x9e, 0xc2, 0xd8, 0xdc, 0x36, 0xc6, 0xe6, 0xd9, 0x41, + 0xbe, 0x86, 0x75, 0x2d, 0x73, 0x80, 0xde, 0x4c, 0x0c, 0xd0, 0xfc, 0xe0, 0x24, 0x7b, 0x8f, 0xd2, + 0x37, 0xf2, 0x70, 0x29, 0xb5, 0x5e, 0xac, 0xcd, 0x5c, 0x31, 0xb4, 0x99, 0xd7, 0x12, 0xda, 0x4c, + 0xbb, 0x77, 0xed, 0x93, 0x51, 0x6f, 0x0a, 0x17, 0x4a, 0x16, 0x11, 0xef, 0x01, 0x55, 0x9b, 0x86, + 0x0b, 0xa5, 0x22, 0x84, 0x4d, 0xba, 0xdf, 0x4c, 0x2a, 0xcd, 0x7f, 0x63, 0xc1, 0xf9, 0xd4, 0xb9, + 0x39, 0x05, 0x15, 0xd6, 0x9a, 0xa9, 0xc2, 0x7a, 0x6a, 0xe0, 0xd5, 0x9a, 0xa1, 0xd3, 0xfa, 0x8d, + 0x42, 0xc6, 0xb7, 0x30, 0x01, 0xfd, 0x36, 0x8c, 0x3a, 0x8d, 0x06, 0x09, 0xc3, 0x55, 0xbf, 0xa9, + 0x22, 0xc4, 0x3d, 0xcb, 0xe4, 0xac, 0xb8, 0xf8, 0xe8, 0xa0, 0x3c, 0x9b, 0x24, 0x11, 0x83, 0xb1, + 0x4e, 0xc1, 0x0c, 0x6a, 0x99, 0x3b, 0xd1, 0xa0, 0x96, 0xd7, 0x00, 0xf6, 0x14, 0xb7, 0x9e, 0x14, + 0xf2, 0x35, 0x3e, 0x5e, 0xc3, 0x42, 0x5f, 0x80, 0x62, 0x28, 0xae, 0x71, 0xb1, 0x14, 0x9f, 0x1f, + 0x70, 0xae, 0x9c, 0x0d, 0xd2, 0x32, 0x7d, 0xf5, 0x95, 0x3e, 0x44, 0x91, 0x44, 0xdf, 0x01, 0x53, + 0x21, 0x0f, 0x05, 0xb3, 0xd4, 0x72, 0x42, 0xe6, 0x07, 0x21, 0x56, 0x21, 0x73, 0xc0, 0xaf, 0x27, + 0x60, 0xb8, 0x0b, 0x1b, 0xad, 0xc8, 0x8f, 0x62, 0x71, 0x6b, 0xf8, 0xc2, 0xbc, 0x12, 0x7f, 0x90, + 0xc8, 0x1f, 0x76, 0x36, 0x39, 0xfc, 0x6c, 0xe0, 0xb5, 0x9a, 0xe8, 0x0b, 0x00, 0x74, 0xf9, 0x08, + 0x5d, 0xc2, 0x48, 0xf6, 0xe1, 0x49, 0x4f, 0x95, 0x66, 0xaa, 0xe5, 0x27, 0x73, 0x5e, 0xac, 0x28, + 0x22, 0x58, 0x23, 0x68, 0x7f, 0xb5, 0x00, 0x8f, 0xf5, 0x38, 0x23, 0xd1, 0x82, 0xf9, 0x04, 0xfa, + 0x74, 0x52, 0xb8, 0x9e, 0x4d, 0xad, 0x6c, 0x48, 0xdb, 0x89, 0xa5, 0x98, 0xfb, 0xc0, 0x4b, 0xf1, + 0x07, 0x2d, 0x4d, 0xed, 0xc1, 0x8d, 0xf9, 0x3e, 0x7b, 0xcc, 0xb3, 0xff, 0x04, 0xf5, 0x20, 0x9b, + 0x29, 0xca, 0x84, 0x6b, 0x03, 0x77, 0x67, 0x60, 0xed, 0xc2, 0xe9, 0x2a, 0x7f, 0xbf, 0x64, 0xc1, + 0xe3, 0xa9, 0xfd, 0x35, 0x4c, 0x36, 0xe6, 0xa1, 0xd4, 0xa0, 0x85, 0x9a, 0xaf, 0x5a, 0xec, 0xc4, + 0x2b, 0x01, 0x38, 0xc6, 0x31, 0x2c, 0x33, 0x72, 0x7d, 0x2d, 0x33, 0xfe, 0x85, 0x05, 0x5d, 0xfb, + 0xe3, 0x14, 0x0e, 0xea, 0xaa, 0x79, 0x50, 0x7f, 0x7c, 0x90, 0xb9, 0xcc, 0x38, 0xa3, 0xff, 0x68, + 0x12, 0x1e, 0xc9, 0xf0, 0xd5, 0xd8, 0x83, 0xe9, 0xad, 0x06, 0x31, 0xbd, 0x00, 0xc5, 0xc7, 0xa4, + 0x3a, 0x4c, 0xf6, 0x74, 0x19, 0x64, 0xf9, 0x88, 0xa6, 0xbb, 0x50, 0x70, 0x77, 0x13, 0xe8, 0x4b, + 0x16, 0x9c, 0x75, 0xee, 0x85, 0x5d, 0xd9, 0x43, 0xc5, 0x9a, 0x79, 0x21, 0x55, 0x09, 0xd2, 0x27, + 0xdb, 0x28, 0x4f, 0xd0, 0x94, 0x86, 0x85, 0x53, 0xdb, 0x42, 0x58, 0xc4, 0x0c, 0xa5, 0xec, 0x7c, + 0x0f, 0x3f, 0xd5, 0x34, 0xa7, 0x1a, 0x7e, 0x64, 0x4b, 0x08, 0x56, 0x74, 0xd0, 0x3b, 0x50, 0xda, + 0x92, 0x9e, 0x6e, 0x29, 0x57, 0x42, 0x3c, 0x90, 0xbd, 0xfd, 0xff, 0xf8, 0x03, 0xa5, 0x42, 0xc2, + 0x31, 0x51, 0xf4, 0x1a, 0xe4, 0xbd, 0xcd, 0xb0, 0x57, 0x8e, 0xa3, 0x84, 0x4d, 0x13, 0xf7, 0x06, + 0x5f, 0x5b, 0xa9, 0x63, 0x5a, 0x11, 0xdd, 0x80, 0x7c, 0xb0, 0xd1, 0x14, 0x1a, 0xbc, 0xd4, 0x33, + 0x1c, 0x2f, 0x56, 0x32, 0x7a, 0xc5, 0x28, 0xe1, 0xc5, 0x0a, 0xa6, 0x24, 0x50, 0x0d, 0x86, 0x98, + 0x83, 0x83, 0xb8, 0x0f, 0x52, 0x39, 0xdf, 0x1e, 0x8e, 0x42, 0xdc, 0x65, 0x9c, 0x21, 0x60, 0x4e, + 0x08, 0xad, 0xc3, 0x70, 0x83, 0xe5, 0xc3, 0x11, 0x01, 0xab, 0x3f, 0x95, 0xaa, 0xab, 0xeb, 0x91, + 0x28, 0x48, 0xa8, 0xae, 0x18, 0x06, 0x16, 0xb4, 0x18, 0x55, 0xd2, 0xde, 0xde, 0x0c, 0x99, 0xac, + 0x9f, 0x45, 0xb5, 0x47, 0xfe, 0x2b, 0x41, 0x95, 0x61, 0x60, 0x41, 0x0b, 0x7d, 0x06, 0x72, 0x9b, + 0x0d, 0xe1, 0xff, 0x90, 0xaa, 0xb4, 0x33, 0x1d, 0xfa, 0x17, 0x87, 0x0f, 0x0f, 0xca, 0xb9, 0x95, + 0x25, 0x9c, 0xdb, 0x6c, 0xa0, 0x35, 0x18, 0xd9, 0xe4, 0x2e, 0xc0, 0x42, 0x2f, 0xf7, 0x64, 0xba, + 0x77, 0x72, 0x97, 0x97, 0x30, 0xb7, 0xdb, 0x17, 0x00, 0x2c, 0x89, 0xb0, 0x10, 0x9c, 0xca, 0x95, + 0x59, 0xc4, 0xa2, 0x9e, 0x3b, 0x9e, 0xfb, 0x39, 0xbf, 0x9f, 0x63, 0x87, 0x68, 0xac, 0x51, 0xa4, + 0xab, 0xda, 0x91, 0x19, 0x2c, 0x45, 0xac, 0x8e, 0xd4, 0x55, 0xdd, 0x27, 0xb9, 0x27, 0x5f, 0xd5, + 0x0a, 0x09, 0xc7, 0x44, 0xd1, 0x0e, 0x8c, 0xef, 0x85, 0xed, 0x6d, 0x22, 0xb7, 0x34, 0x0b, 0xdd, + 0x91, 0x71, 0x85, 0xdd, 0x15, 0x88, 0x6e, 0x10, 0x75, 0x9c, 0x56, 0xd7, 0x29, 0xc4, 0x5e, 0xb5, + 0xef, 0xea, 0xc4, 0xb0, 0x49, 0x9b, 0x0e, 0xff, 0x7b, 0x1d, 0x7f, 0x63, 0x3f, 0x22, 0x22, 0x78, + 0x75, 0xea, 0xf0, 0xbf, 0xc1, 0x51, 0xba, 0x87, 0x5f, 0x00, 0xb0, 0x24, 0x82, 0xee, 0x8a, 0xe1, + 0x61, 0xa7, 0xe7, 0x54, 0x76, 0x30, 0xa5, 0xd4, 0x14, 0xb2, 0xda, 0xa0, 0xb0, 0xd3, 0x32, 0x26, + 0xc5, 0x4e, 0xc9, 0xf6, 0xb6, 0x1f, 0xf9, 0x5e, 0xe2, 0x84, 0x9e, 0xce, 0x3e, 0x25, 0x6b, 0x29, + 0xf8, 0xdd, 0xa7, 0x64, 0x1a, 0x16, 0x4e, 0x6d, 0x0b, 0x35, 0x61, 0xa2, 0xed, 0x07, 0xd1, 0x3d, + 0x3f, 0x90, 0xeb, 0x0b, 0xf5, 0xd0, 0x2b, 0x18, 0x98, 0xa2, 0x45, 0x16, 0x4c, 0xdd, 0x84, 0xe0, + 0x04, 0x4d, 0xf4, 0x39, 0x18, 0x09, 0x1b, 0x4e, 0x8b, 0x54, 0x6f, 0xcf, 0x9c, 0xc9, 0xbe, 0x7e, + 0xea, 0x1c, 0x25, 0x63, 0x75, 0xb1, 0xc9, 0x11, 0x28, 0x58, 0x92, 0x43, 0x2b, 0x30, 0xc4, 0x32, + 0x22, 0xb0, 0xb8, 0xdb, 0x19, 0x31, 0xa1, 0xba, 0x2c, 0x4c, 0xf9, 0xd9, 0xc4, 0x8a, 0x31, 0xaf, + 0x4e, 0xf7, 0x80, 0x60, 0xaf, 0xfd, 0x70, 0xe6, 0x5c, 0xf6, 0x1e, 0x10, 0x5c, 0xf9, 0xed, 0x7a, + 0xaf, 0x3d, 0xa0, 0x90, 0x70, 0x4c, 0x94, 0x9e, 0xcc, 0xf4, 0x34, 0x7d, 0xa4, 0x87, 0x41, 0x4b, + 0xe6, 0x59, 0xca, 0x4e, 0x66, 0x7a, 0x92, 0x52, 0x12, 0xf6, 0xef, 0x8e, 0x74, 0xf3, 0x2c, 0x4c, + 0x20, 0xfb, 0x4b, 0x56, 0xd7, 0x5b, 0xdd, 0xa7, 0x07, 0xd5, 0x0f, 0x9d, 0x20, 0xb7, 0xfa, 0x25, + 0x0b, 0x1e, 0x69, 0xa7, 0x7e, 0x88, 0x60, 0x00, 0x06, 0x53, 0x33, 0xf1, 0x4f, 0x57, 0xb1, 0xf1, + 0xd3, 0xe1, 0x38, 0xa3, 0xa5, 0xa4, 0x44, 0x90, 0xff, 0xc0, 0x12, 0xc1, 0x2a, 0x14, 0x19, 0x93, + 0xd9, 0x27, 0x3f, 0x5c, 0x52, 0x30, 0x62, 0xac, 0xc4, 0x92, 0xa8, 0x88, 0x15, 0x09, 0xf4, 0x43, + 0x16, 0x5c, 0x4c, 0x76, 0x1d, 0x13, 0x06, 0x16, 0x91, 0xe4, 0xb9, 0x2c, 0xb8, 0x22, 0xbe, 0xff, + 0x62, 0xad, 0x17, 0xf2, 0x51, 0x3f, 0x04, 0xdc, 0xbb, 0x31, 0x54, 0x49, 0x11, 0x46, 0x87, 0x4d, + 0x05, 0xfc, 0x00, 0x02, 0xe9, 0x0b, 0x30, 0xb6, 0xeb, 0x77, 0xbc, 0x48, 0xd8, 0xbf, 0x08, 0x8f, + 0x45, 0xf6, 0xe0, 0xbc, 0xaa, 0x95, 0x63, 0x03, 0x2b, 0x21, 0xc6, 0x16, 0x1f, 0x58, 0x8c, 0x7d, + 0x3b, 0x91, 0xcd, 0xbd, 0x94, 0x1d, 0xb1, 0x50, 0x48, 0xfc, 0xc7, 0xc8, 0xe9, 0x7e, 0xba, 0xb2, + 0xd1, 0x4f, 0x5b, 0x29, 0x4c, 0x3d, 0x97, 0x96, 0x5f, 0x35, 0xa5, 0xe5, 0x2b, 0x49, 0x69, 0xb9, + 0x4b, 0xf9, 0x6a, 0x08, 0xca, 0x83, 0x87, 0xbd, 0x1e, 0x34, 0x8e, 0x9c, 0xdd, 0x82, 0xcb, 0xfd, + 0xae, 0x25, 0x66, 0x08, 0xd5, 0x54, 0x4f, 0x6d, 0xb1, 0x21, 0x54, 0xb3, 0x5a, 0xc1, 0x0c, 0x32, + 0x68, 0xa0, 0x11, 0xfb, 0xbf, 0x5b, 0x90, 0xaf, 0xf9, 0xcd, 0x53, 0x50, 0x26, 0x7f, 0xd6, 0x50, + 0x26, 0x3f, 0x96, 0x91, 0x65, 0x3f, 0x53, 0x75, 0xbc, 0x9c, 0x50, 0x1d, 0x5f, 0xcc, 0x22, 0xd0, + 0x5b, 0x51, 0xfc, 0x13, 0x79, 0x18, 0xad, 0xf9, 0x4d, 0x65, 0x85, 0xfc, 0x1b, 0x0f, 0x62, 0x85, + 0x9c, 0x19, 0x16, 0x56, 0xa3, 0xcc, 0xec, 0xa7, 0xa4, 0x13, 0xde, 0x9f, 0x33, 0x63, 0xe4, 0x37, + 0x89, 0xbb, 0xb5, 0x1d, 0x91, 0x66, 0xf2, 0x73, 0x4e, 0xcf, 0x18, 0xf9, 0xbf, 0x5a, 0x30, 0x99, + 0x68, 0x1d, 0xb5, 0x60, 0xbc, 0xa5, 0x6b, 0x02, 0xc5, 0x3a, 0x7d, 0x20, 0x25, 0xa2, 0x30, 0xe6, + 0xd4, 0x8a, 0xb0, 0x49, 0x1c, 0xcd, 0x01, 0xa8, 0x97, 0x3a, 0xa9, 0x01, 0x63, 0x5c, 0xbf, 0x7a, + 0xca, 0x0b, 0xb1, 0x86, 0x81, 0x5e, 0x84, 0xd1, 0xc8, 0x6f, 0xfb, 0x2d, 0x7f, 0x6b, 0xff, 0x26, + 0x91, 0xa1, 0x6d, 0x94, 0x89, 0xd6, 0x7a, 0x0c, 0xc2, 0x3a, 0x9e, 0xfd, 0x53, 0x79, 0xfe, 0xa1, + 0x5e, 0xe4, 0x7e, 0x6b, 0x4d, 0x7e, 0xb4, 0xd7, 0xe4, 0x37, 0x2c, 0x98, 0xa2, 0xad, 0x33, 0x73, + 0x11, 0x79, 0xd9, 0xaa, 0xf4, 0x3b, 0x56, 0x8f, 0xf4, 0x3b, 0x57, 0xe8, 0xd9, 0xd5, 0xf4, 0x3b, + 0x91, 0xd0, 0xa0, 0x69, 0x87, 0x13, 0x2d, 0xc5, 0x02, 0x2a, 0xf0, 0x48, 0x10, 0x08, 0x1f, 0x28, + 0x1d, 0x8f, 0x04, 0x01, 0x16, 0x50, 0x99, 0x9d, 0xa7, 0x90, 0x91, 0x9d, 0x87, 0x05, 0xea, 0x13, + 0x86, 0x05, 0x82, 0xed, 0xd1, 0x02, 0xf5, 0x49, 0x8b, 0x83, 0x18, 0xc7, 0xfe, 0xb9, 0x3c, 0x8c, + 0xd5, 0xfc, 0x66, 0xfc, 0x56, 0xf6, 0x82, 0xf1, 0x56, 0x76, 0x39, 0xf1, 0x56, 0x36, 0xa5, 0xe3, + 0x7e, 0xeb, 0x65, 0xec, 0xc3, 0x7a, 0x19, 0xfb, 0xe7, 0x16, 0x9b, 0xb5, 0xca, 0x5a, 0x5d, 0x64, + 0x07, 0x7e, 0x0e, 0x46, 0xd9, 0x81, 0xc4, 0x9c, 0xee, 0xe4, 0x03, 0x12, 0x0b, 0xbc, 0xbf, 0x16, + 0x17, 0x63, 0x1d, 0x07, 0x5d, 0x85, 0x62, 0x48, 0x9c, 0xa0, 0xb1, 0xad, 0xce, 0x38, 0xf1, 0xbc, + 0xc2, 0xcb, 0xb0, 0x82, 0xa2, 0x37, 0xe2, 0x18, 0x71, 0xf9, 0xec, 0x3c, 0xb7, 0x7a, 0x7f, 0xf8, + 0x16, 0xc9, 0x0e, 0x0c, 0x67, 0xbf, 0x09, 0xa8, 0x1b, 0x7f, 0x80, 0xe0, 0x48, 0x65, 0x33, 0x38, + 0x52, 0xa9, 0x2b, 0x30, 0xd2, 0x9f, 0x5a, 0x30, 0x51, 0xf3, 0x9b, 0x74, 0xeb, 0x7e, 0x33, 0xed, + 0x53, 0x3d, 0x40, 0xe6, 0x70, 0x8f, 0x00, 0x99, 0x7f, 0xcf, 0x82, 0x91, 0x9a, 0xdf, 0x3c, 0x05, + 0xbd, 0xfb, 0xab, 0xa6, 0xde, 0xfd, 0xd1, 0x8c, 0x25, 0x91, 0xa1, 0x6a, 0xff, 0x85, 0x3c, 0x8c, + 0xd3, 0x7e, 0xfa, 0x5b, 0x72, 0x96, 0x8c, 0x11, 0xb1, 0x06, 0x18, 0x11, 0xca, 0xe6, 0xfa, 0xad, + 0x96, 0x7f, 0x2f, 0x39, 0x63, 0x2b, 0xac, 0x14, 0x0b, 0x28, 0x7a, 0x06, 0x8a, 0xed, 0x80, 0xec, + 0xb9, 0xbe, 0xe0, 0x1f, 0xb5, 0x57, 0x8c, 0x9a, 0x28, 0xc7, 0x0a, 0x83, 0xca, 0x5d, 0xa1, 0xeb, + 0x35, 0x88, 0x4c, 0xb2, 0x5d, 0x60, 0x79, 0xb8, 0x78, 0xe4, 0x6b, 0xad, 0x1c, 0x1b, 0x58, 0xe8, + 0x4d, 0x28, 0xb1, 0xff, 0xec, 0x44, 0x39, 0x7e, 0xde, 0x20, 0x91, 0x6e, 0x42, 0x10, 0xc0, 0x31, + 0x2d, 0x74, 0x0d, 0x20, 0x92, 0xd1, 0x91, 0x43, 0x11, 0xe3, 0x46, 0xf1, 0xda, 0x2a, 0x6e, 0x72, + 0x88, 0x35, 0x2c, 0xf4, 0x34, 0x94, 0x22, 0xc7, 0x6d, 0xdd, 0x72, 0x3d, 0x12, 0x32, 0x95, 0x73, + 0x5e, 0x66, 0x93, 0x10, 0x85, 0x38, 0x86, 0x53, 0x5e, 0x87, 0x39, 0x80, 0xf3, 0xac, 0x63, 0x45, + 0x86, 0xcd, 0x78, 0x9d, 0x5b, 0xaa, 0x14, 0x6b, 0x18, 0xf6, 0xcb, 0x70, 0xae, 0xe6, 0x37, 0x6b, + 0x7e, 0x10, 0xad, 0xf8, 0xc1, 0x3d, 0x27, 0x68, 0xca, 0xf9, 0x2b, 0xcb, 0xc4, 0x06, 0xf4, 0xec, + 0x19, 0xe2, 0x3b, 0xd3, 0x48, 0x59, 0xf0, 0x3c, 0xe3, 0x76, 0x8e, 0xe9, 0xd4, 0xd1, 0x60, 0xf7, + 0xae, 0x4a, 0x30, 0x78, 0xdd, 0x89, 0x08, 0xba, 0xcd, 0x92, 0x92, 0xc5, 0x57, 0x90, 0xa8, 0xfe, + 0x94, 0x96, 0x94, 0x2c, 0x06, 0xa6, 0xde, 0x59, 0x66, 0x7d, 0xfb, 0x57, 0xf3, 0xec, 0x34, 0x4a, + 0xe4, 0xdb, 0x43, 0x5f, 0x84, 0x89, 0x90, 0xdc, 0x72, 0xbd, 0xce, 0x7d, 0x29, 0x84, 0xf7, 0x70, + 0xcb, 0xa9, 0x2f, 0xeb, 0x98, 0x5c, 0x95, 0x67, 0x96, 0xe1, 0x04, 0x35, 0x3a, 0x4f, 0x41, 0xc7, + 0x5b, 0x08, 0xef, 0x84, 0x24, 0x10, 0xf9, 0xde, 0xd8, 0x3c, 0x61, 0x59, 0x88, 0x63, 0x38, 0x5d, + 0x97, 0xec, 0xcf, 0x9a, 0xef, 0x61, 0xdf, 0x8f, 0xe4, 0x4a, 0x66, 0x19, 0x83, 0xb4, 0x72, 0x6c, + 0x60, 0xa1, 0x15, 0x40, 0x61, 0xa7, 0xdd, 0x6e, 0xb1, 0x87, 0x7d, 0xa7, 0x75, 0x3d, 0xf0, 0x3b, + 0x6d, 0xfe, 0xea, 0x99, 0xe7, 0x81, 0x09, 0xeb, 0x5d, 0x50, 0x9c, 0x52, 0x83, 0x9e, 0x3e, 0x9b, + 0x21, 0xfb, 0xcd, 0x56, 0x77, 0x5e, 0xa8, 0xd7, 0xeb, 0xac, 0x08, 0x4b, 0x18, 0x5d, 0x4c, 0xac, + 0x79, 0x8e, 0x39, 0x1c, 0x2f, 0x26, 0xac, 0x4a, 0xb1, 0x86, 0x81, 0x96, 0x61, 0x24, 0xdc, 0x0f, + 0x1b, 0x91, 0x88, 0xc8, 0x94, 0x91, 0xb9, 0xb3, 0xce, 0x50, 0xb4, 0x6c, 0x12, 0xbc, 0x0a, 0x96, + 0x75, 0xed, 0xef, 0x61, 0x97, 0x21, 0xcb, 0x0e, 0x16, 0x75, 0x02, 0x82, 0x76, 0x61, 0xbc, 0xcd, + 0xa6, 0x5c, 0xc4, 0xae, 0x16, 0xf3, 0xf6, 0xc2, 0x80, 0x52, 0xed, 0x3d, 0x7a, 0xd0, 0x28, 0xad, + 0x13, 0x13, 0x17, 0x6a, 0x3a, 0x39, 0x6c, 0x52, 0xb7, 0xbf, 0x8a, 0xd8, 0x99, 0x5b, 0xe7, 0xa2, + 0xea, 0x88, 0x30, 0x2d, 0x16, 0x7c, 0xf9, 0x6c, 0xb6, 0xce, 0x24, 0xfe, 0x22, 0x61, 0x9e, 0x8c, + 0x65, 0x5d, 0xf4, 0x06, 0x7b, 0xa5, 0xe6, 0x07, 0x5d, 0xbf, 0x24, 0xcd, 0x1c, 0xcb, 0x78, 0x90, + 0x16, 0x15, 0xb1, 0x46, 0x04, 0xdd, 0x82, 0x71, 0x91, 0x4c, 0x4a, 0x28, 0xc5, 0xf2, 0x86, 0xd2, + 0x63, 0x1c, 0xeb, 0xc0, 0xa3, 0x64, 0x01, 0x36, 0x2b, 0xa3, 0x2d, 0xb8, 0xa8, 0x65, 0x56, 0xbc, + 0x1e, 0x38, 0xec, 0xe5, 0xd2, 0x65, 0x9b, 0x48, 0x3b, 0x37, 0x1f, 0x3f, 0x3c, 0x28, 0x5f, 0x5c, + 0xef, 0x85, 0x88, 0x7b, 0xd3, 0x41, 0xb7, 0xe1, 0x1c, 0xf7, 0xe0, 0xab, 0x10, 0xa7, 0xd9, 0x72, + 0x3d, 0x75, 0x30, 0xf3, 0x75, 0x78, 0xfe, 0xf0, 0xa0, 0x7c, 0x6e, 0x21, 0x0d, 0x01, 0xa7, 0xd7, + 0x43, 0xaf, 0x42, 0xa9, 0xe9, 0x85, 0x62, 0x0c, 0x86, 0x8d, 0xa4, 0xa1, 0xa5, 0xca, 0x5a, 0x5d, + 0x7d, 0x7f, 0xfc, 0x07, 0xc7, 0x15, 0xd0, 0x16, 0x57, 0x8c, 0x29, 0x39, 0x74, 0x24, 0x3b, 0x41, + 0xbc, 0x58, 0x12, 0x86, 0x0f, 0x0f, 0xd7, 0x08, 0x2b, 0x1b, 0x58, 0xc3, 0xbd, 0xc7, 0x20, 0x8c, + 0x5e, 0x07, 0x44, 0x19, 0x35, 0xb7, 0x41, 0x16, 0x1a, 0x2c, 0x84, 0x38, 0xd3, 0x23, 0x16, 0x0d, + 0x9f, 0x09, 0x54, 0xef, 0xc2, 0xc0, 0x29, 0xb5, 0xd0, 0x0d, 0x7a, 0x90, 0xe9, 0xa5, 0xc2, 0x96, + 0x57, 0x32, 0xf7, 0x33, 0x15, 0xd2, 0x0e, 0x48, 0xc3, 0x89, 0x48, 0xd3, 0xa4, 0x88, 0x13, 0xf5, + 0xe8, 0x5d, 0xaa, 0xb2, 0x09, 0x81, 0x19, 0x36, 0xa3, 0x3b, 0xa3, 0x10, 0x95, 0x8b, 0xb7, 0xfd, + 0x30, 0x5a, 0x23, 0xd1, 0x3d, 0x3f, 0xd8, 0x11, 0x51, 0xca, 0xe2, 0x80, 0x99, 0x31, 0x08, 0xeb, + 0x78, 0x94, 0x0f, 0x66, 0xcf, 0xc4, 0xd5, 0x0a, 0x7b, 0xa1, 0x2b, 0xc6, 0xfb, 0xe4, 0x06, 0x2f, + 0xc6, 0x12, 0x2e, 0x51, 0xab, 0xb5, 0x25, 0xf6, 0xda, 0x96, 0x40, 0xad, 0xd6, 0x96, 0xb0, 0x84, + 0x23, 0xd2, 0x9d, 0x90, 0x75, 0x22, 0x5b, 0xab, 0xd9, 0x7d, 0x1d, 0x0c, 0x98, 0x93, 0xd5, 0x83, + 0x29, 0x95, 0x0a, 0x96, 0x87, 0x6f, 0x0b, 0x67, 0x26, 0xd9, 0x22, 0x19, 0x3c, 0xf6, 0x9b, 0xd2, + 0x13, 0x57, 0x13, 0x94, 0x70, 0x17, 0x6d, 0x23, 0x90, 0xc9, 0x54, 0xdf, 0x6c, 0x50, 0xf3, 0x50, + 0x0a, 0x3b, 0x1b, 0x4d, 0x7f, 0xd7, 0x71, 0x3d, 0xf6, 0x38, 0xa6, 0x31, 0x59, 0x75, 0x09, 0xc0, + 0x31, 0x0e, 0x5a, 0x81, 0xa2, 0x23, 0x95, 0xc0, 0x28, 0x3b, 0x6a, 0x81, 0x52, 0xfd, 0x72, 0x47, + 0x5e, 0xa9, 0xf6, 0x55, 0x75, 0xd1, 0x2b, 0x30, 0x2e, 0xfc, 0xb6, 0x78, 0x2c, 0x07, 0xf6, 0x78, + 0xa5, 0x19, 0xe6, 0xd7, 0x75, 0x20, 0x36, 0x71, 0xd1, 0x17, 0x60, 0x82, 0x52, 0x89, 0x0f, 0xb6, + 0x99, 0xb3, 0x83, 0x9c, 0x88, 0x5a, 0x96, 0x0f, 0xbd, 0x32, 0x4e, 0x10, 0x43, 0x4d, 0xb8, 0xe0, + 0x74, 0x22, 0x9f, 0x29, 0xd2, 0xcd, 0xf5, 0xbf, 0xee, 0xef, 0x10, 0x8f, 0xbd, 0x61, 0x15, 0x17, + 0x2f, 0x1f, 0x1e, 0x94, 0x2f, 0x2c, 0xf4, 0xc0, 0xc3, 0x3d, 0xa9, 0xa0, 0x3b, 0x30, 0x1a, 0xf9, + 0x2d, 0x66, 0x22, 0x4f, 0x59, 0x89, 0x47, 0xb2, 0x03, 0x01, 0xad, 0x2b, 0x34, 0x5d, 0x89, 0xa4, + 0xaa, 0x62, 0x9d, 0x0e, 0x5a, 0xe7, 0x7b, 0x8c, 0x85, 0x48, 0x25, 0xe1, 0xcc, 0xa3, 0xd9, 0x03, + 0xa3, 0x22, 0xa9, 0x9a, 0x5b, 0x50, 0xd4, 0xc4, 0x3a, 0x19, 0x74, 0x1d, 0xa6, 0xdb, 0x81, 0xeb, + 0xb3, 0x85, 0xad, 0x1e, 0x31, 0x66, 0xcc, 0xc4, 0x0e, 0xb5, 0x24, 0x02, 0xee, 0xae, 0x43, 0x85, + 0x4c, 0x59, 0x38, 0x73, 0x9e, 0x67, 0x09, 0xe3, 0x8c, 0x37, 0x2f, 0xc3, 0x0a, 0x8a, 0x56, 0xd9, + 0xb9, 0xcc, 0xc5, 0xc1, 0x99, 0xd9, 0xec, 0x68, 0x0f, 0xba, 0xd8, 0xc8, 0xf9, 0x25, 0xf5, 0x17, + 0xc7, 0x14, 0xe8, 0xbd, 0x11, 0x6e, 0x3b, 0x01, 0xa9, 0x05, 0x7e, 0x83, 0x84, 0x5a, 0x54, 0xe6, + 0xc7, 0x78, 0x24, 0x47, 0x7a, 0x6f, 0xd4, 0xd3, 0x10, 0x70, 0x7a, 0x3d, 0xd4, 0xd4, 0x92, 0x63, + 0x53, 0x36, 0x34, 0x9c, 0xb9, 0xd0, 0xc3, 0xe0, 0x28, 0xc1, 0xb3, 0xc6, 0x6b, 0xd1, 0x28, 0x0e, + 0x71, 0x82, 0x26, 0xfa, 0x0e, 0x98, 0x12, 0x81, 0x8f, 0xe2, 0x71, 0xbf, 0x18, 0x5b, 0x32, 0xe2, + 0x04, 0x0c, 0x77, 0x61, 0xf3, 0x58, 0xd4, 0xce, 0x46, 0x8b, 0x88, 0x45, 0x78, 0xcb, 0xf5, 0x76, + 0xc2, 0x99, 0x4b, 0xec, 0xab, 0x45, 0x2c, 0xea, 0x24, 0x14, 0xa7, 0xd4, 0x98, 0xfd, 0x76, 0x98, + 0xee, 0xba, 0xb9, 0x8e, 0x15, 0xbf, 0xfd, 0x4f, 0x86, 0xa0, 0xa4, 0x94, 0xf2, 0x68, 0xde, 0x7c, + 0x6b, 0x39, 0x9f, 0x7c, 0x6b, 0x29, 0x52, 0xd9, 0x40, 0x7f, 0x5e, 0x59, 0x37, 0x0c, 0xf5, 0x72, + 0xd9, 0xd9, 0xd2, 0x74, 0xee, 0xbe, 0xaf, 0xd3, 0x9f, 0xa6, 0x63, 0xc9, 0x0f, 0xfc, 0x68, 0x53, + 0xe8, 0xa9, 0xb6, 0x19, 0x30, 0x59, 0x31, 0x7a, 0x82, 0x0a, 0x48, 0xcd, 0x6a, 0x2d, 0x99, 0xbd, + 0xb3, 0x46, 0x0b, 0x31, 0x87, 0x31, 0x41, 0x92, 0xb2, 0x59, 0x4c, 0x90, 0x1c, 0x79, 0x40, 0x41, + 0x52, 0x12, 0xc0, 0x31, 0x2d, 0xd4, 0x82, 0xe9, 0x86, 0x99, 0x78, 0x55, 0x39, 0xfa, 0x3d, 0xd1, + 0x37, 0x05, 0x6a, 0x47, 0xcb, 0x72, 0xb7, 0x94, 0xa4, 0x82, 0xbb, 0x09, 0xa3, 0x57, 0xa0, 0xf8, + 0x9e, 0x1f, 0xb2, 0x45, 0x29, 0x78, 0x0d, 0xe9, 0x10, 0x55, 0x7c, 0xe3, 0x76, 0x9d, 0x95, 0x1f, + 0x1d, 0x94, 0x47, 0x6b, 0x7e, 0x53, 0xfe, 0xc5, 0xaa, 0x02, 0xba, 0x0f, 0xe7, 0x8c, 0x13, 0x5a, + 0x75, 0x17, 0x06, 0xef, 0xee, 0x45, 0xd1, 0xdc, 0xb9, 0x6a, 0x1a, 0x25, 0x9c, 0xde, 0x00, 0x3d, + 0xf6, 0x3c, 0x5f, 0x24, 0x2d, 0x96, 0xfc, 0x0c, 0x63, 0x5b, 0x4a, 0xba, 0x3b, 0x7c, 0x02, 0x01, + 0x77, 0xd7, 0xb1, 0x7f, 0x99, 0xbf, 0x61, 0x08, 0x4d, 0x27, 0x09, 0x3b, 0xad, 0xd3, 0xc8, 0x89, + 0xb5, 0x6c, 0x28, 0x61, 0x1f, 0xf8, 0x9d, 0xec, 0xd7, 0x2d, 0xf6, 0x4e, 0xb6, 0x4e, 0x76, 0xdb, + 0x2d, 0x2a, 0x6f, 0x3f, 0xfc, 0x8e, 0xbf, 0x01, 0xc5, 0x48, 0xb4, 0xd6, 0x2b, 0x8d, 0x97, 0xd6, + 0x29, 0xf6, 0x56, 0xa8, 0x38, 0x1d, 0x59, 0x8a, 0x15, 0x19, 0xfb, 0x9f, 0xf2, 0x19, 0x90, 0x90, + 0x53, 0x50, 0x88, 0x55, 0x4c, 0x85, 0x58, 0xb9, 0xcf, 0x17, 0x64, 0x28, 0xc6, 0xfe, 0x89, 0xd9, + 0x6f, 0x26, 0x54, 0x7e, 0xd4, 0x1f, 0x68, 0xed, 0x1f, 0xb6, 0xe0, 0x6c, 0x9a, 0x45, 0x13, 0xe5, + 0x4e, 0xb9, 0x48, 0xab, 0x1e, 0xac, 0xd5, 0x08, 0xde, 0x15, 0xe5, 0x58, 0x61, 0x0c, 0x9c, 0x21, + 0xe3, 0x78, 0x11, 0xe3, 0x6e, 0xc3, 0x78, 0x2d, 0x20, 0xda, 0x1d, 0xf0, 0x1a, 0xf7, 0xac, 0xe3, + 0xfd, 0x79, 0xe6, 0xd8, 0x5e, 0x75, 0xf6, 0xcf, 0xe4, 0xe0, 0x2c, 0x7f, 0x71, 0x5a, 0xd8, 0xf3, + 0xdd, 0x66, 0xcd, 0x6f, 0x8a, 0xec, 0x26, 0x6f, 0xc1, 0x58, 0x5b, 0xd3, 0x43, 0xf4, 0x8a, 0x59, + 0xa5, 0xeb, 0x2b, 0x62, 0x79, 0x50, 0x2f, 0xc5, 0x06, 0x2d, 0xd4, 0x84, 0x31, 0xb2, 0xe7, 0x36, + 0xd4, 0xb3, 0x45, 0xee, 0xd8, 0x77, 0x83, 0x6a, 0x65, 0x59, 0xa3, 0x83, 0x0d, 0xaa, 0x0f, 0x21, + 0xe1, 0x9d, 0xfd, 0x23, 0x16, 0x3c, 0x9a, 0x11, 0xe1, 0x8a, 0x36, 0x77, 0x8f, 0xbd, 0xed, 0x89, + 0xdc, 0x59, 0xaa, 0x39, 0xfe, 0xe2, 0x87, 0x05, 0x14, 0x7d, 0x0e, 0x80, 0xbf, 0xd8, 0x51, 0xf1, + 0xa8, 0x5f, 0x28, 0x20, 0x23, 0x8a, 0x89, 0x16, 0x7d, 0x42, 0xd6, 0xc7, 0x1a, 0x2d, 0xfb, 0x27, + 0xf3, 0x30, 0xc4, 0x5e, 0x88, 0xd0, 0x0a, 0x8c, 0x6c, 0xf3, 0x98, 0xcf, 0x83, 0x84, 0x97, 0x8e, + 0xe5, 0x4c, 0x5e, 0x80, 0x65, 0x65, 0xb4, 0x0a, 0x67, 0x78, 0xcc, 0xec, 0x56, 0x85, 0xb4, 0x9c, + 0x7d, 0xa9, 0xae, 0xe0, 0xf9, 0xa6, 0x54, 0x24, 0x8d, 0x6a, 0x37, 0x0a, 0x4e, 0xab, 0x87, 0x5e, + 0x83, 0x09, 0xca, 0xdf, 0xf9, 0x9d, 0x48, 0x52, 0xe2, 0xd1, 0xb2, 0x15, 0x43, 0xb9, 0x6e, 0x40, + 0x71, 0x02, 0x9b, 0x0a, 0x5e, 0xed, 0x2e, 0xc5, 0xcc, 0x50, 0x2c, 0x78, 0x99, 0xca, 0x18, 0x13, + 0x97, 0x99, 0x32, 0x75, 0x98, 0xe1, 0xd6, 0xfa, 0x76, 0x40, 0xc2, 0x6d, 0xbf, 0xd5, 0x14, 0xe9, + 0xca, 0x63, 0x53, 0xa6, 0x04, 0x1c, 0x77, 0xd5, 0xa0, 0x54, 0x36, 0x1d, 0xb7, 0xd5, 0x09, 0x48, + 0x4c, 0x65, 0xd8, 0xa4, 0xb2, 0x92, 0x80, 0xe3, 0xae, 0x1a, 0x74, 0x1d, 0x9d, 0x13, 0xf9, 0xc3, + 0xa5, 0x7f, 0xbf, 0xb2, 0x4f, 0x1b, 0x91, 0x9e, 0x4e, 0x3d, 0x02, 0xdc, 0x08, 0x0b, 0x1e, 0x95, + 0x81, 0x5c, 0xd3, 0x27, 0x0a, 0x1f, 0x27, 0x49, 0xe5, 0x41, 0xb2, 0x58, 0xff, 0x40, 0x0e, 0xce, + 0xa4, 0xd8, 0xc1, 0xf2, 0xa3, 0x6a, 0xcb, 0x0d, 0x23, 0x95, 0x53, 0x47, 0x3b, 0xaa, 0x78, 0x39, + 0x56, 0x18, 0x74, 0x3f, 0xf0, 0xc3, 0x30, 0x79, 0x00, 0x0a, 0x3b, 0x33, 0x01, 0x3d, 0x66, 0x76, + 0x9a, 0xcb, 0x50, 0xe8, 0x84, 0x44, 0x86, 0xa6, 0x52, 0xe7, 0x37, 0xd3, 0x30, 0x33, 0x08, 0x65, + 0x4d, 0xb7, 0x94, 0x72, 0x57, 0x63, 0x4d, 0xb9, 0xc6, 0x96, 0xc3, 0x68, 0xe7, 0x22, 0xe2, 0x39, + 0x5e, 0x24, 0x18, 0xd8, 0x38, 0xa0, 0x0a, 0x2b, 0xc5, 0x02, 0x6a, 0x7f, 0x25, 0x0f, 0xe7, 0x33, + 0x2d, 0xe3, 0x69, 0xd7, 0x77, 0x7d, 0xcf, 0x8d, 0x7c, 0xf5, 0x4a, 0xc9, 0x83, 0xa8, 0x90, 0xf6, + 0xf6, 0xaa, 0x28, 0xc7, 0x0a, 0x03, 0x5d, 0x91, 0x19, 0xef, 0x93, 0xd9, 0x85, 0x16, 0x2b, 0x46, + 0xd2, 0xfb, 0x41, 0x33, 0xb7, 0x3d, 0x01, 0x85, 0xb6, 0xef, 0xb7, 0x92, 0x87, 0x16, 0xed, 0xae, + 0xef, 0xb7, 0x30, 0x03, 0xa2, 0x4f, 0x88, 0xf1, 0x4a, 0x3c, 0xcb, 0x61, 0xa7, 0xe9, 0x87, 0xda, + 0xa0, 0x3d, 0x05, 0x23, 0x3b, 0x64, 0x3f, 0x70, 0xbd, 0xad, 0xe4, 0x73, 0xed, 0x4d, 0x5e, 0x8c, + 0x25, 0xdc, 0xcc, 0x35, 0x31, 0x72, 0xd2, 0x29, 0xd7, 0x8a, 0x7d, 0xaf, 0xc0, 0x1f, 0xcc, 0xc3, + 0x24, 0x5e, 0xac, 0x7c, 0x6b, 0x22, 0xee, 0x74, 0x4f, 0xc4, 0x49, 0xa7, 0x5c, 0xeb, 0x3f, 0x1b, + 0xbf, 0x60, 0xc1, 0x24, 0x8b, 0xc7, 0x2c, 0x42, 0x77, 0xb8, 0xbe, 0x77, 0x0a, 0x2c, 0xde, 0x13, + 0x30, 0x14, 0xd0, 0x46, 0x93, 0x69, 0x85, 0x58, 0x4f, 0x30, 0x87, 0xa1, 0x0b, 0x50, 0x60, 0x5d, + 0xa0, 0x93, 0x37, 0xc6, 0x33, 0x32, 0x54, 0x9c, 0xc8, 0xc1, 0xac, 0x94, 0xf9, 0xa3, 0x63, 0xd2, + 0x6e, 0xb9, 0xbc, 0xd3, 0xf1, 0x13, 0xc8, 0x47, 0xc3, 0x1f, 0x3d, 0xb5, 0x6b, 0x1f, 0xcc, 0x1f, + 0x3d, 0x9d, 0x64, 0x6f, 0xf1, 0xe9, 0x0f, 0x73, 0x70, 0x29, 0xb5, 0xde, 0xc0, 0xfe, 0xe8, 0xbd, + 0x6b, 0x9f, 0x8c, 0xd5, 0x4d, 0xba, 0x31, 0x4c, 0xfe, 0x14, 0x8d, 0x61, 0x0a, 0x83, 0x72, 0x98, + 0x43, 0x03, 0xb8, 0x89, 0xa7, 0x0e, 0xd9, 0x47, 0xc4, 0x4d, 0x3c, 0xb5, 0x6f, 0x19, 0xe2, 0xdf, + 0x9f, 0xe5, 0x32, 0xbe, 0x85, 0x09, 0x82, 0x57, 0xe9, 0x39, 0xc3, 0x80, 0xa1, 0xe0, 0x98, 0xc7, + 0xf8, 0x19, 0xc3, 0xcb, 0xb0, 0x82, 0x22, 0x57, 0x73, 0xb8, 0xce, 0x65, 0x67, 0xd9, 0xcc, 0x6c, + 0x6a, 0xce, 0x7c, 0xb1, 0x52, 0x43, 0x90, 0xe2, 0x7c, 0xbd, 0xaa, 0x09, 0xef, 0xf9, 0xc1, 0x85, + 0xf7, 0xb1, 0x74, 0xc1, 0x1d, 0x2d, 0xc0, 0xe4, 0xae, 0xeb, 0xd1, 0x63, 0x73, 0xdf, 0x64, 0x59, + 0x55, 0xfc, 0x91, 0x55, 0x13, 0x8c, 0x93, 0xf8, 0xb3, 0xaf, 0xc0, 0xf8, 0x83, 0xab, 0x2d, 0xbf, + 0x91, 0x87, 0xc7, 0x7a, 0x6c, 0x7b, 0x7e, 0xd6, 0x1b, 0x73, 0xa0, 0x9d, 0xf5, 0x5d, 0xf3, 0x50, + 0x83, 0xb3, 0x9b, 0x9d, 0x56, 0x6b, 0x9f, 0xd9, 0x9b, 0x92, 0xa6, 0xc4, 0x10, 0x3c, 0xe5, 0x05, + 0x99, 0x03, 0x63, 0x25, 0x05, 0x07, 0xa7, 0xd6, 0x44, 0xaf, 0x03, 0xf2, 0x45, 0x8a, 0xdf, 0xeb, + 0xc4, 0x13, 0xef, 0x00, 0x6c, 0xe0, 0xf3, 0xf1, 0x66, 0xbc, 0xdd, 0x85, 0x81, 0x53, 0x6a, 0x51, + 0xe1, 0x80, 0xde, 0x4a, 0xfb, 0xaa, 0x5b, 0x09, 0xe1, 0x00, 0xeb, 0x40, 0x6c, 0xe2, 0xa2, 0xeb, + 0x30, 0xed, 0xec, 0x39, 0x2e, 0x8f, 0xcb, 0x27, 0x09, 0x70, 0xe9, 0x40, 0x29, 0xcb, 0x16, 0x92, + 0x08, 0xb8, 0xbb, 0x4e, 0xc2, 0x25, 0x7b, 0x38, 0xdb, 0x25, 0xbb, 0xf7, 0xb9, 0xd8, 0x4f, 0xf7, + 0x6b, 0xff, 0x27, 0x8b, 0x5e, 0x5f, 0x29, 0x69, 0xfa, 0xe9, 0x38, 0x28, 0x1d, 0xa6, 0xe6, 0x1d, + 0x7d, 0x4e, 0xb3, 0x28, 0x89, 0x81, 0xd8, 0xc4, 0xe5, 0x0b, 0x22, 0x8c, 0x9d, 0x72, 0x0c, 0x16, + 0x5f, 0x44, 0x57, 0x50, 0x18, 0xe8, 0xf3, 0x30, 0xd2, 0x74, 0xf7, 0xdc, 0xd0, 0x0f, 0xc4, 0x66, + 0x39, 0xa6, 0x6b, 0x43, 0x7c, 0x0e, 0x56, 0x38, 0x19, 0x2c, 0xe9, 0xd9, 0x3f, 0x98, 0x83, 0x71, + 0xd9, 0xe2, 0x1b, 0x1d, 0x3f, 0x72, 0x4e, 0xe1, 0x5a, 0xbe, 0x6e, 0x5c, 0xcb, 0x9f, 0xe8, 0x15, + 0x62, 0x82, 0x75, 0x29, 0xf3, 0x3a, 0xbe, 0x9d, 0xb8, 0x8e, 0x9f, 0xec, 0x4f, 0xaa, 0xf7, 0x35, + 0xfc, 0xcf, 0x2c, 0x98, 0x36, 0xf0, 0x4f, 0xe1, 0x36, 0x58, 0x31, 0x6f, 0x83, 0xc7, 0xfb, 0x7e, + 0x43, 0xc6, 0x2d, 0xf0, 0x7d, 0xf9, 0x44, 0xdf, 0xd9, 0xe9, 0xff, 0x1e, 0x14, 0xb6, 0x9d, 0xa0, + 0xd9, 0x2b, 0x94, 0x6d, 0x57, 0xa5, 0xb9, 0x1b, 0x4e, 0xd0, 0xe4, 0x67, 0xf8, 0x33, 0x2a, 0x4f, + 0xa6, 0x13, 0x34, 0xfb, 0xfa, 0xa0, 0xb1, 0xa6, 0xd0, 0xcb, 0x30, 0x1c, 0x36, 0xfc, 0xb6, 0xb2, + 0x10, 0xbd, 0xcc, 0x73, 0x68, 0xd2, 0x92, 0xa3, 0x83, 0x32, 0x32, 0x9b, 0xa3, 0xc5, 0x58, 0xe0, + 0xa3, 0xb7, 0x60, 0x9c, 0xfd, 0x52, 0x96, 0x12, 0xf9, 0xec, 0x04, 0x0a, 0x75, 0x1d, 0x91, 0x1b, + 0xdc, 0x18, 0x45, 0xd8, 0x24, 0x35, 0xbb, 0x05, 0x25, 0xf5, 0x59, 0x0f, 0xd5, 0x77, 0xe8, 0xdf, + 0xe7, 0xe1, 0x4c, 0xca, 0x9a, 0x43, 0xa1, 0x31, 0x13, 0xcf, 0x0d, 0xb8, 0x54, 0x3f, 0xe0, 0x5c, + 0x84, 0x4c, 0x1a, 0x6a, 0x8a, 0xb5, 0x35, 0x70, 0xa3, 0x77, 0x42, 0x92, 0x6c, 0x94, 0x16, 0xf5, + 0x6f, 0x94, 0x36, 0x76, 0x6a, 0x43, 0x4d, 0x1b, 0x52, 0x3d, 0x7d, 0xa8, 0x73, 0xfa, 0xc7, 0x79, + 0x38, 0x9b, 0x16, 0xf5, 0x06, 0x7d, 0x77, 0x22, 0x99, 0xce, 0x0b, 0x83, 0xc6, 0xcb, 0xe1, 0x19, + 0x76, 0x44, 0x2e, 0xec, 0x39, 0x33, 0xbd, 0x4e, 0xdf, 0x61, 0x16, 0x6d, 0x32, 0x87, 0xd3, 0x80, + 0x27, 0x41, 0x92, 0xc7, 0xc7, 0xa7, 0x07, 0xee, 0x80, 0xc8, 0x9e, 0x14, 0x26, 0x1c, 0x4e, 0x65, + 0x71, 0x7f, 0x87, 0x53, 0xd9, 0xf2, 0xac, 0x0b, 0xa3, 0xda, 0xd7, 0x3c, 0xd4, 0x19, 0xdf, 0xa1, + 0xb7, 0x95, 0xd6, 0xef, 0x87, 0x3a, 0xeb, 0x3f, 0x62, 0x41, 0xc2, 0x1c, 0x53, 0xa9, 0xc5, 0xac, + 0x4c, 0xb5, 0xd8, 0x65, 0x28, 0x04, 0x7e, 0x8b, 0x24, 0x73, 0xd7, 0x60, 0xbf, 0x45, 0x30, 0x83, + 0x50, 0x8c, 0x28, 0x56, 0x76, 0x8c, 0xe9, 0x82, 0x9c, 0x10, 0xd1, 0x9e, 0x80, 0xa1, 0x16, 0xd9, + 0x23, 0xad, 0x64, 0x60, 0xf8, 0x5b, 0xb4, 0x10, 0x73, 0x98, 0xfd, 0x0b, 0x05, 0xb8, 0xd8, 0xd3, + 0x65, 0x9b, 0x8a, 0x43, 0x5b, 0x4e, 0x44, 0xee, 0x39, 0xfb, 0xc9, 0x08, 0xce, 0xd7, 0x79, 0x31, + 0x96, 0x70, 0x66, 0xa1, 0xce, 0x23, 0x36, 0x26, 0x94, 0x88, 0x22, 0x50, 0xa3, 0x80, 0x9a, 0x4a, + 0xa9, 0xfc, 0x49, 0x28, 0xa5, 0xae, 0x01, 0x84, 0x61, 0x8b, 0xdb, 0x17, 0x34, 0x85, 0xe9, 0x7b, + 0x1c, 0xd9, 0xb3, 0x7e, 0x4b, 0x40, 0xb0, 0x86, 0x85, 0x2a, 0x30, 0xd5, 0x0e, 0xfc, 0x88, 0xeb, + 0x64, 0x2b, 0xdc, 0x30, 0x69, 0xc8, 0xf4, 0x96, 0xad, 0x25, 0xe0, 0xb8, 0xab, 0x06, 0x7a, 0x11, + 0x46, 0x85, 0x07, 0x6d, 0xcd, 0xf7, 0x5b, 0x42, 0x0d, 0xa4, 0xcc, 0x5c, 0xea, 0x31, 0x08, 0xeb, + 0x78, 0x5a, 0x35, 0xa6, 0xe8, 0x1d, 0x49, 0xad, 0xc6, 0x95, 0xbd, 0x1a, 0x5e, 0x22, 0x02, 0x56, + 0x71, 0xa0, 0x08, 0x58, 0xb1, 0x62, 0xac, 0x34, 0xf0, 0xdb, 0x16, 0xf4, 0x55, 0x25, 0xfd, 0x6c, + 0x01, 0xce, 0x88, 0x85, 0xf3, 0xb0, 0x97, 0xcb, 0x9d, 0xee, 0xe5, 0x72, 0x12, 0xaa, 0xb3, 0x6f, + 0xad, 0x99, 0xd3, 0x5e, 0x33, 0x3f, 0x64, 0x81, 0xc9, 0x5e, 0xa1, 0xff, 0x2f, 0x33, 0x04, 0xfe, + 0x8b, 0x99, 0xec, 0x5a, 0x53, 0x5e, 0x20, 0x1f, 0x30, 0x18, 0xbe, 0xfd, 0x1f, 0x2d, 0x78, 0xbc, + 0x2f, 0x45, 0xb4, 0x0c, 0x25, 0xc6, 0x03, 0x6a, 0xd2, 0xd9, 0x93, 0xca, 0x70, 0x51, 0x02, 0x32, + 0x58, 0xd2, 0xb8, 0x26, 0x5a, 0xee, 0xca, 0x35, 0xf0, 0x54, 0x4a, 0xae, 0x81, 0x73, 0xc6, 0xf0, + 0x3c, 0x60, 0xb2, 0x81, 0x5f, 0xce, 0xc3, 0x30, 0x5f, 0xf1, 0xa7, 0x20, 0x86, 0xad, 0x08, 0xbd, + 0x6d, 0x8f, 0x18, 0x58, 0xbc, 0x2f, 0x73, 0x15, 0x27, 0x72, 0x38, 0x9b, 0xa0, 0x6e, 0xab, 0x58, + 0xc3, 0x8b, 0xe6, 0x8c, 0xfb, 0x6c, 0x36, 0xa1, 0x98, 0x04, 0x4e, 0x43, 0xbb, 0xdd, 0xbe, 0x08, + 0x10, 0xb2, 0x3c, 0xfd, 0x94, 0x86, 0x88, 0xa6, 0xf6, 0xc9, 0x1e, 0xad, 0xd7, 0x15, 0x32, 0xef, + 0x43, 0xbc, 0xd3, 0x15, 0x00, 0x6b, 0x14, 0x67, 0x5f, 0x82, 0x92, 0x42, 0xee, 0xa7, 0xc5, 0x19, + 0xd3, 0x99, 0x8b, 0xcf, 0xc2, 0x64, 0xa2, 0xad, 0x63, 0x29, 0x81, 0x7e, 0xd1, 0x82, 0x49, 0xde, + 0xe5, 0x65, 0x6f, 0x4f, 0x9c, 0xa9, 0xef, 0xc3, 0xd9, 0x56, 0xca, 0xd9, 0x26, 0x66, 0x74, 0xf0, + 0xb3, 0x50, 0x29, 0x7d, 0xd2, 0xa0, 0x38, 0xb5, 0x0d, 0x74, 0x95, 0xae, 0x5b, 0x7a, 0x76, 0x39, + 0x2d, 0xe1, 0xed, 0x34, 0xc6, 0xd7, 0x2c, 0x2f, 0xc3, 0x0a, 0x6a, 0xff, 0xb6, 0x05, 0xd3, 0xbc, + 0xe7, 0x37, 0xc9, 0xbe, 0xda, 0xe1, 0x1f, 0x66, 0xdf, 0x45, 0xfa, 0x8f, 0x5c, 0x46, 0xfa, 0x0f, + 0xfd, 0xd3, 0xf2, 0x3d, 0x3f, 0xed, 0x67, 0x2c, 0x10, 0x2b, 0xf0, 0x14, 0x44, 0xf9, 0x6f, 0x37, + 0x45, 0xf9, 0xd9, 0xec, 0x45, 0x9d, 0x21, 0xc3, 0xff, 0xa9, 0x05, 0x53, 0x1c, 0x21, 0x7e, 0x73, + 0xfe, 0x50, 0xe7, 0x61, 0x90, 0x3c, 0x7e, 0x2a, 0xb9, 0x77, 0xfa, 0x47, 0x19, 0x93, 0x55, 0xe8, + 0x39, 0x59, 0x4d, 0xb9, 0x81, 0x8e, 0x91, 0xc3, 0xf2, 0xd8, 0x61, 0xb4, 0xed, 0x3f, 0xb0, 0x00, + 0xf1, 0x66, 0x0c, 0xf6, 0x87, 0x32, 0x15, 0xac, 0x54, 0xbb, 0x2e, 0xe2, 0xa3, 0x46, 0x41, 0xb0, + 0x86, 0x75, 0x22, 0xc3, 0x93, 0x30, 0x1c, 0xc8, 0xf7, 0x37, 0x1c, 0x38, 0xc6, 0x88, 0xfe, 0xef, + 0x02, 0x24, 0xdd, 0x0f, 0xd0, 0x5d, 0x18, 0x6b, 0x38, 0x6d, 0x67, 0xc3, 0x6d, 0xb9, 0x91, 0x4b, + 0xc2, 0x5e, 0x16, 0x47, 0x4b, 0x1a, 0x9e, 0x78, 0xea, 0xd5, 0x4a, 0xb0, 0x41, 0x07, 0xcd, 0x01, + 0xb4, 0x03, 0x77, 0xcf, 0x6d, 0x91, 0x2d, 0xa6, 0x71, 0x60, 0xfe, 0x95, 0xdc, 0x8c, 0x46, 0x96, + 0x62, 0x0d, 0x23, 0xc5, 0x55, 0x2e, 0xff, 0xf0, 0x5c, 0xe5, 0x0a, 0xc7, 0x74, 0x95, 0x1b, 0x1a, + 0xc8, 0x55, 0x0e, 0xc3, 0x23, 0x92, 0x45, 0xa2, 0xff, 0x57, 0xdc, 0x16, 0x11, 0x7c, 0x31, 0xf7, + 0xba, 0x9c, 0x3d, 0x3c, 0x28, 0x3f, 0x82, 0x53, 0x31, 0x70, 0x46, 0x4d, 0xf4, 0x39, 0x98, 0x71, + 0x5a, 0x2d, 0xff, 0x9e, 0x1a, 0xb5, 0xe5, 0xb0, 0xe1, 0xb4, 0xb8, 0xc6, 0x7e, 0x84, 0x51, 0xbd, + 0x70, 0x78, 0x50, 0x9e, 0x59, 0xc8, 0xc0, 0xc1, 0x99, 0xb5, 0x13, 0x9e, 0x76, 0xc5, 0xbe, 0x9e, + 0x76, 0xaf, 0x42, 0xa9, 0x1d, 0xf8, 0x8d, 0x55, 0xcd, 0xfb, 0xe7, 0x12, 0xcb, 0x90, 0x2f, 0x0b, + 0x8f, 0x0e, 0xca, 0xe3, 0xea, 0x0f, 0xbb, 0xe1, 0xe3, 0x0a, 0xf6, 0x0e, 0x9c, 0xa9, 0x93, 0xc0, + 0x65, 0xb9, 0x37, 0x9b, 0xf1, 0x86, 0x5e, 0x87, 0x52, 0x90, 0x38, 0xc2, 0x06, 0x0a, 0xe4, 0xa4, + 0xc5, 0x17, 0x96, 0x47, 0x56, 0x4c, 0xc8, 0xfe, 0x13, 0x0b, 0x46, 0x84, 0x25, 0xfa, 0x29, 0x70, + 0x4e, 0x0b, 0x86, 0x02, 0xbb, 0x9c, 0x7e, 0xcc, 0xb3, 0xce, 0x64, 0xaa, 0xae, 0xab, 0x09, 0xd5, + 0xf5, 0xe3, 0xbd, 0x88, 0xf4, 0x56, 0x5a, 0xff, 0xad, 0x3c, 0x4c, 0x98, 0xce, 0x23, 0xa7, 0x30, + 0x04, 0x6b, 0x30, 0x12, 0x0a, 0x4f, 0xa5, 0x5c, 0xb6, 0x85, 0x75, 0x72, 0x12, 0x63, 0xf3, 0x29, + 0xe1, 0x9b, 0x24, 0x89, 0xa4, 0xba, 0x40, 0xe5, 0x1f, 0xa2, 0x0b, 0x54, 0x3f, 0xff, 0x9d, 0xc2, + 0x49, 0xf8, 0xef, 0xd8, 0x5f, 0x63, 0x57, 0x8d, 0x5e, 0x7e, 0x0a, 0x5c, 0xc8, 0x75, 0xf3, 0x52, + 0xb2, 0x7b, 0xac, 0x2c, 0xd1, 0xa9, 0x0c, 0x6e, 0xe4, 0xe7, 0x2d, 0xb8, 0x98, 0xf2, 0x55, 0x1a, + 0x6b, 0xf2, 0x0c, 0x14, 0x9d, 0x4e, 0xd3, 0x55, 0x7b, 0x59, 0x7b, 0xc6, 0x5a, 0x10, 0xe5, 0x58, + 0x61, 0xa0, 0x25, 0x98, 0x26, 0xf7, 0xdb, 0x2e, 0x7f, 0x47, 0xd4, 0x6d, 0x1c, 0xf3, 0x3c, 0xb8, + 0xed, 0x72, 0x12, 0x88, 0xbb, 0xf1, 0x95, 0xfb, 0x77, 0x3e, 0xd3, 0xfd, 0xfb, 0x1f, 0x5a, 0x30, + 0xaa, 0xbc, 0x52, 0x1e, 0xfa, 0x68, 0x7f, 0x87, 0x39, 0xda, 0x8f, 0xf5, 0x18, 0xed, 0x8c, 0x61, + 0xfe, 0x3b, 0x39, 0xd5, 0xdf, 0x9a, 0x1f, 0x44, 0x03, 0xb0, 0x3c, 0x2f, 0x43, 0xb1, 0x1d, 0xf8, + 0x91, 0xdf, 0xf0, 0x5b, 0x82, 0xe3, 0xb9, 0x10, 0x47, 0x27, 0xe0, 0xe5, 0x47, 0xda, 0x6f, 0xac, + 0xb0, 0xd9, 0xe8, 0xf9, 0x41, 0x24, 0xb8, 0x8c, 0x78, 0xf4, 0xfc, 0x20, 0xc2, 0x0c, 0x82, 0x9a, + 0x00, 0x91, 0x13, 0x6c, 0x91, 0x88, 0x96, 0x89, 0x40, 0x27, 0xd9, 0x87, 0x47, 0x27, 0x72, 0x5b, + 0x73, 0xae, 0x17, 0x85, 0x51, 0x30, 0x57, 0xf5, 0xa2, 0xdb, 0x01, 0x17, 0xa0, 0xb4, 0x70, 0x03, + 0x8a, 0x16, 0xd6, 0xe8, 0x4a, 0x9f, 0x50, 0xd6, 0xc6, 0x90, 0xf9, 0x20, 0xbe, 0x26, 0xca, 0xb1, + 0xc2, 0xb0, 0x5f, 0x62, 0x57, 0x09, 0x1b, 0xa0, 0xe3, 0x45, 0x02, 0xf8, 0x7a, 0x51, 0x0d, 0x2d, + 0x7b, 0x0d, 0xab, 0xe8, 0xf1, 0x06, 0x7a, 0x9f, 0xdc, 0xb4, 0x61, 0xdd, 0xdf, 0x26, 0x0e, 0x4a, + 0x80, 0xbe, 0xb3, 0xcb, 0x4e, 0xe2, 0xd9, 0x3e, 0x57, 0xc0, 0x31, 0x2c, 0x23, 0x58, 0xc0, 0x6d, + 0x16, 0x8e, 0xb8, 0x5a, 0x13, 0x8b, 0x5c, 0x0b, 0xb8, 0x2d, 0x00, 0x38, 0xc6, 0x41, 0xf3, 0x42, + 0xfc, 0x2e, 0x18, 0x69, 0xf7, 0xa4, 0xf8, 0x2d, 0x3f, 0x5f, 0x93, 0xbf, 0x9f, 0x83, 0x51, 0x95, + 0x7e, 0xaf, 0xc6, 0xb3, 0x98, 0x89, 0xb0, 0x2f, 0xcb, 0x71, 0x31, 0xd6, 0x71, 0xd0, 0x3a, 0x4c, + 0x86, 0x5c, 0xf7, 0xa2, 0xa2, 0xfb, 0x71, 0x1d, 0xd6, 0x27, 0xa5, 0x7d, 0x45, 0xdd, 0x04, 0x1f, + 0xb1, 0x22, 0x7e, 0x74, 0x48, 0xc7, 0xce, 0x24, 0x09, 0xf4, 0x1a, 0x4c, 0xb4, 0xf4, 0x44, 0xf7, + 0x35, 0xa1, 0xe2, 0x52, 0x66, 0xca, 0x46, 0x1a, 0xfc, 0x1a, 0x4e, 0x60, 0x53, 0x4e, 0x49, 0x2f, + 0x11, 0x11, 0x29, 0x1d, 0x6f, 0x8b, 0x84, 0x22, 0x79, 0x18, 0xe3, 0x94, 0x6e, 0x65, 0xe0, 0xe0, + 0xcc, 0xda, 0xe8, 0x65, 0x18, 0x93, 0x9f, 0xaf, 0xb9, 0x2d, 0xc7, 0xc6, 0xf0, 0x1a, 0x0c, 0x1b, + 0x98, 0xe8, 0x1e, 0x9c, 0x93, 0xff, 0xd7, 0x03, 0x67, 0x73, 0xd3, 0x6d, 0x08, 0xaf, 0x71, 0xee, + 0x11, 0xb4, 0x20, 0x5d, 0x8c, 0x96, 0xd3, 0x90, 0x8e, 0x0e, 0xca, 0x97, 0xc5, 0xa8, 0xa5, 0xc2, + 0xd9, 0x24, 0xa6, 0xd3, 0x47, 0xab, 0x70, 0x66, 0x9b, 0x38, 0xad, 0x68, 0x7b, 0x69, 0x9b, 0x34, + 0x76, 0xe4, 0x26, 0x62, 0xce, 0xd0, 0x9a, 0x09, 0xf9, 0x8d, 0x6e, 0x14, 0x9c, 0x56, 0x0f, 0xbd, + 0x0d, 0x33, 0xed, 0xce, 0x46, 0xcb, 0x0d, 0xb7, 0xd7, 0xfc, 0x88, 0x99, 0x74, 0xa8, 0xec, 0x75, + 0xc2, 0x6b, 0x5a, 0x39, 0x82, 0xd7, 0x32, 0xf0, 0x70, 0x26, 0x05, 0xf4, 0x3e, 0x9c, 0x4b, 0x2c, + 0x06, 0xe1, 0xc3, 0x39, 0x91, 0x1d, 0xdf, 0xb7, 0x9e, 0x56, 0x41, 0xf8, 0x64, 0xa6, 0x81, 0x70, + 0x7a, 0x13, 0x1f, 0xcc, 0xd0, 0xe7, 0x3d, 0x5a, 0x59, 0x63, 0xca, 0xd0, 0x3b, 0x30, 0xa6, 0xaf, + 0x22, 0x71, 0xc1, 0x5c, 0x49, 0xe7, 0x59, 0xb4, 0xd5, 0xc6, 0x59, 0x3a, 0xb5, 0xa2, 0x74, 0x18, + 0x36, 0x28, 0xda, 0x04, 0xd2, 0xbf, 0x0f, 0xdd, 0x82, 0x62, 0xa3, 0xe5, 0x12, 0x2f, 0xaa, 0xd6, + 0x7a, 0x05, 0x19, 0x59, 0x12, 0x38, 0x62, 0xc0, 0x44, 0x40, 0x54, 0x5e, 0x86, 0x15, 0x05, 0xfb, + 0xd7, 0x72, 0x50, 0xee, 0x13, 0x5d, 0x37, 0xa1, 0x8f, 0xb6, 0x06, 0xd2, 0x47, 0x2f, 0xc8, 0x5c, + 0x7c, 0x6b, 0x09, 0x21, 0x3d, 0x91, 0x67, 0x2f, 0x16, 0xd5, 0x93, 0xf8, 0x03, 0xdb, 0x07, 0xeb, + 0x2a, 0xed, 0x42, 0x5f, 0x0b, 0x77, 0xe3, 0x29, 0x6b, 0x68, 0x70, 0x41, 0x24, 0xf3, 0x59, 0xc2, + 0xfe, 0x5a, 0x0e, 0xce, 0xa9, 0x21, 0xfc, 0xe6, 0x1d, 0xb8, 0x3b, 0xdd, 0x03, 0x77, 0x02, 0x8f, + 0x3a, 0xf6, 0x6d, 0x18, 0xe6, 0x41, 0x5a, 0x06, 0x60, 0x80, 0x9e, 0x30, 0x23, 0x7a, 0xa9, 0x6b, + 0xda, 0x88, 0xea, 0xf5, 0x97, 0x2d, 0x98, 0x5c, 0x5f, 0xaa, 0xd5, 0xfd, 0xc6, 0x0e, 0x89, 0x16, + 0x38, 0xc3, 0x8a, 0x05, 0xff, 0x63, 0x3d, 0x20, 0x5f, 0x93, 0xc6, 0x31, 0x5d, 0x86, 0xc2, 0xb6, + 0x1f, 0x46, 0xc9, 0x17, 0xdf, 0x1b, 0x7e, 0x18, 0x61, 0x06, 0xb1, 0x7f, 0xc7, 0x82, 0x21, 0x96, + 0x41, 0xb6, 0x5f, 0x5a, 0xe3, 0x41, 0xbe, 0x0b, 0xbd, 0x08, 0xc3, 0x64, 0x73, 0x93, 0x34, 0x22, + 0x31, 0xab, 0xd2, 0x6d, 0x75, 0x78, 0x99, 0x95, 0xd2, 0x4b, 0x9f, 0x35, 0xc6, 0xff, 0x62, 0x81, + 0x8c, 0xde, 0x84, 0x52, 0xe4, 0xee, 0x92, 0x85, 0x66, 0x53, 0xbc, 0x99, 0x3d, 0x80, 0x97, 0xf0, + 0xba, 0x24, 0x80, 0x63, 0x5a, 0xf6, 0x57, 0x72, 0x00, 0x71, 0xa8, 0x81, 0x7e, 0x9f, 0xb8, 0xd8, + 0xf5, 0x9a, 0x72, 0x25, 0xe5, 0x35, 0x05, 0xc5, 0x04, 0x53, 0x9e, 0x52, 0xd4, 0x30, 0xe5, 0x07, + 0x1a, 0xa6, 0xc2, 0x71, 0x86, 0x69, 0x09, 0xa6, 0xe3, 0x50, 0x09, 0x66, 0xdc, 0x18, 0x26, 0xa4, + 0xac, 0x27, 0x81, 0xb8, 0x1b, 0xdf, 0x26, 0x70, 0x59, 0x46, 0xf0, 0x94, 0x77, 0x0d, 0x33, 0xc9, + 0x3c, 0x46, 0x86, 0xeb, 0xf8, 0xb9, 0x28, 0x97, 0xf9, 0x5c, 0xf4, 0xe3, 0x16, 0x9c, 0x4d, 0xb6, + 0xc3, 0x7c, 0xe4, 0xbe, 0x6c, 0xc1, 0x39, 0xf6, 0x68, 0xc6, 0x5a, 0xed, 0x7e, 0xa2, 0x7b, 0x21, + 0x3d, 0x84, 0x44, 0xef, 0x1e, 0xc7, 0xfe, 0xd1, 0xab, 0x69, 0xa4, 0x71, 0x7a, 0x8b, 0xf6, 0x97, + 0x2d, 0x38, 0x9f, 0x99, 0xb8, 0x08, 0x5d, 0x85, 0xa2, 0xd3, 0x76, 0xb9, 0x46, 0x4a, 0xec, 0x77, + 0x26, 0x3d, 0xd6, 0xaa, 0x5c, 0x1f, 0xa5, 0xa0, 0x2a, 0xa1, 0x62, 0x2e, 0x33, 0xa1, 0x62, 0xdf, + 0xfc, 0x88, 0xf6, 0xf7, 0x5b, 0x20, 0xdc, 0xa2, 0x06, 0x38, 0x64, 0xde, 0x92, 0xf9, 0x68, 0x8d, + 0xe0, 0xe9, 0x97, 0xb3, 0xfd, 0xc4, 0x44, 0xc8, 0x74, 0x75, 0xa9, 0x1b, 0x81, 0xd2, 0x0d, 0x5a, + 0x76, 0x13, 0x04, 0xb4, 0x42, 0x98, 0xce, 0xaa, 0x7f, 0x6f, 0xae, 0x01, 0x34, 0x19, 0xae, 0x96, + 0x95, 0x52, 0x5d, 0x21, 0x15, 0x05, 0xc1, 0x1a, 0x96, 0xfd, 0x6f, 0x73, 0x30, 0x2a, 0x83, 0x75, + 0x77, 0xbc, 0x41, 0x24, 0xcb, 0x63, 0x65, 0xef, 0x61, 0x69, 0x5c, 0x29, 0xe1, 0x5a, 0x2c, 0x90, + 0xc7, 0x69, 0x5c, 0x25, 0x00, 0xc7, 0x38, 0xe8, 0x29, 0x18, 0x09, 0x3b, 0x1b, 0x0c, 0x3d, 0xe1, + 0xc4, 0x53, 0xe7, 0xc5, 0x58, 0xc2, 0xd1, 0xe7, 0x60, 0x8a, 0xd7, 0x0b, 0xfc, 0xb6, 0xb3, 0xc5, + 0xd5, 0x9f, 0x43, 0xca, 0xfb, 0x76, 0x6a, 0x35, 0x01, 0x3b, 0x3a, 0x28, 0x9f, 0x4d, 0x96, 0x31, + 0xc5, 0x79, 0x17, 0x15, 0xf6, 0x18, 0xcf, 0x1b, 0xa1, 0xcb, 0xb4, 0xeb, 0x0d, 0x3f, 0x06, 0x61, + 0x1d, 0xcf, 0x7e, 0x07, 0x50, 0x77, 0xd8, 0x72, 0xf4, 0x3a, 0xb7, 0xc0, 0x72, 0x03, 0xd2, 0xec, + 0xa5, 0x48, 0xd7, 0x7d, 0x4c, 0xa5, 0xfd, 0x3d, 0xaf, 0x85, 0x55, 0x7d, 0xfb, 0xaf, 0xe6, 0x61, + 0x2a, 0xe9, 0x71, 0x88, 0x6e, 0xc0, 0x30, 0xbf, 0x23, 0x05, 0xf9, 0x1e, 0xef, 0xb4, 0x9a, 0x9f, + 0x22, 0x3b, 0x2d, 0xc4, 0x35, 0x2b, 0xea, 0xa3, 0xb7, 0x61, 0xb4, 0xe9, 0xdf, 0xf3, 0xee, 0x39, + 0x41, 0x73, 0xa1, 0x56, 0x15, 0xcb, 0x39, 0x95, 0xd5, 0xae, 0xc4, 0x68, 0xba, 0xef, 0x23, 0x7b, + 0x93, 0x88, 0x41, 0x58, 0x27, 0x87, 0xd6, 0x59, 0x28, 0xc6, 0x4d, 0x77, 0x6b, 0xd5, 0x69, 0xf7, + 0x32, 0xc7, 0x5d, 0x92, 0x48, 0x1a, 0xe5, 0x71, 0x11, 0xaf, 0x91, 0x03, 0x70, 0x4c, 0x08, 0x7d, + 0x37, 0x9c, 0x09, 0x33, 0xb4, 0x73, 0x59, 0x59, 0x2c, 0x7a, 0x29, 0xac, 0x16, 0x1f, 0xa5, 0x42, + 0x50, 0x9a, 0x1e, 0x2f, 0xad, 0x19, 0xfb, 0xd7, 0xcf, 0x80, 0xb1, 0x89, 0x8d, 0xa4, 0x46, 0xd6, + 0x09, 0x25, 0x35, 0xc2, 0x50, 0x24, 0xbb, 0xed, 0x68, 0xbf, 0xe2, 0x06, 0xbd, 0x92, 0xee, 0x2d, + 0x0b, 0x9c, 0x6e, 0x9a, 0x12, 0x82, 0x15, 0x9d, 0xf4, 0xcc, 0x53, 0xf9, 0x0f, 0x31, 0xf3, 0x54, + 0xe1, 0x14, 0x33, 0x4f, 0xad, 0xc1, 0xc8, 0x96, 0x1b, 0x61, 0xd2, 0xf6, 0x05, 0x77, 0x9a, 0xba, + 0x0e, 0xaf, 0x73, 0x94, 0xee, 0x1c, 0x27, 0x02, 0x80, 0x25, 0x11, 0xf4, 0xba, 0xda, 0x81, 0xc3, + 0xd9, 0xc2, 0x5d, 0xf7, 0x83, 0x62, 0xea, 0x1e, 0x14, 0xf9, 0xa5, 0x46, 0x1e, 0x34, 0xbf, 0xd4, + 0x8a, 0xcc, 0x0a, 0x55, 0xcc, 0xb6, 0x9d, 0x67, 0x49, 0x9f, 0xfa, 0xe4, 0x82, 0xba, 0xab, 0x67, + 0xd2, 0x2a, 0x65, 0x9f, 0x04, 0x2a, 0x49, 0xd6, 0x80, 0xf9, 0xb3, 0xbe, 0xdf, 0x82, 0x73, 0xed, + 0xb4, 0xa4, 0x72, 0x22, 0x97, 0xd3, 0x8b, 0x03, 0x67, 0xcd, 0x33, 0x1a, 0x64, 0x52, 0x7e, 0x2a, + 0x1a, 0x4e, 0x6f, 0x8e, 0x0e, 0x74, 0xb0, 0xd1, 0x14, 0x09, 0xa0, 0x9e, 0xc8, 0x48, 0xc4, 0xd5, + 0x23, 0xfd, 0xd6, 0x7a, 0x4a, 0xd2, 0xa7, 0x8f, 0x67, 0x25, 0x7d, 0x1a, 0x38, 0xd5, 0xd3, 0xeb, + 0x2a, 0x05, 0xd7, 0x78, 0xf6, 0x52, 0xe2, 0x09, 0xb6, 0xfa, 0x26, 0xde, 0x7a, 0x5d, 0x25, 0xde, + 0xea, 0x11, 0x92, 0x8e, 0xa7, 0xd5, 0xea, 0x9b, 0x6e, 0x4b, 0x4b, 0x99, 0x35, 0x79, 0x32, 0x29, + 0xb3, 0x8c, 0xab, 0x86, 0x67, 0x6d, 0x7a, 0xba, 0xcf, 0x55, 0x63, 0xd0, 0xed, 0x7d, 0xd9, 0xf0, + 0xf4, 0x60, 0xd3, 0x0f, 0x94, 0x1e, 0xec, 0xae, 0x9e, 0x6e, 0x0b, 0xf5, 0xc9, 0x27, 0x45, 0x91, + 0x06, 0x4c, 0xb2, 0x75, 0x57, 0xbf, 0x00, 0xcf, 0x64, 0xd3, 0x55, 0xf7, 0x5c, 0x37, 0xdd, 0xd4, + 0x2b, 0xb0, 0x2b, 0x79, 0xd7, 0xd9, 0xd3, 0x49, 0xde, 0x75, 0xee, 0xc4, 0x93, 0x77, 0x3d, 0x72, + 0x0a, 0xc9, 0xbb, 0x1e, 0xfd, 0x50, 0x93, 0x77, 0xcd, 0x3c, 0x84, 0xe4, 0x5d, 0x6b, 0x71, 0xf2, + 0xae, 0xf3, 0xd9, 0x53, 0x92, 0x62, 0xd0, 0x9b, 0x91, 0xb2, 0xeb, 0x2e, 0x7b, 0xd5, 0xe7, 0x21, + 0x31, 0x44, 0xcc, 0xbc, 0xf4, 0x44, 0xc5, 0x69, 0x71, 0x33, 0xf8, 0x94, 0x28, 0x10, 0x8e, 0x49, + 0x51, 0xba, 0x71, 0x0a, 0xaf, 0xc7, 0x7a, 0xe8, 0x71, 0xd3, 0x34, 0x64, 0x3d, 0x12, 0x77, 0xbd, + 0xc6, 0x13, 0x77, 0x5d, 0xc8, 0x3e, 0xc9, 0x93, 0xd7, 0x9d, 0x99, 0xae, 0xeb, 0x07, 0x72, 0x70, + 0xa9, 0xf7, 0xbe, 0x88, 0xd5, 0x73, 0xb5, 0xf8, 0x39, 0x29, 0xa1, 0x9e, 0xe3, 0xb2, 0x55, 0x8c, + 0x35, 0x70, 0xdc, 0xa1, 0xeb, 0x30, 0xad, 0x2c, 0x81, 0x5b, 0x6e, 0x63, 0x5f, 0x4b, 0x80, 0xac, + 0x3c, 0x1e, 0xeb, 0x49, 0x04, 0xdc, 0x5d, 0x07, 0x2d, 0xc0, 0xa4, 0x51, 0x58, 0xad, 0x08, 0x19, + 0x4a, 0xe9, 0x03, 0xeb, 0x26, 0x18, 0x27, 0xf1, 0xed, 0x9f, 0xb6, 0xe0, 0xd1, 0x8c, 0xbc, 0x18, + 0x03, 0x87, 0xd5, 0xd9, 0x84, 0xc9, 0xb6, 0x59, 0xb5, 0x4f, 0xf4, 0x2d, 0x23, 0xfb, 0x86, 0xea, + 0x6b, 0x02, 0x80, 0x93, 0x44, 0x17, 0xaf, 0xfe, 0xe6, 0xef, 0x5d, 0xfa, 0xd8, 0x6f, 0xfd, 0xde, + 0xa5, 0x8f, 0xfd, 0xf6, 0xef, 0x5d, 0xfa, 0xd8, 0x5f, 0x38, 0xbc, 0x64, 0xfd, 0xe6, 0xe1, 0x25, + 0xeb, 0xb7, 0x0e, 0x2f, 0x59, 0xbf, 0x7d, 0x78, 0xc9, 0xfa, 0xdd, 0xc3, 0x4b, 0xd6, 0x57, 0x7e, + 0xff, 0xd2, 0xc7, 0xde, 0xca, 0xed, 0x3d, 0xf7, 0xff, 0x02, 0x00, 0x00, 0xff, 0xff, 0xbb, 0x9b, + 0xc3, 0x9b, 0xb7, 0xea, 0x00, 0x00, } diff --git a/vendor/k8s.io/api/core/v1/generated.proto b/vendor/k8s.io/api/core/v1/generated.proto index b13a2db72f..b9d569f5f9 100644 --- a/vendor/k8s.io/api/core/v1/generated.proto +++ b/vendor/k8s.io/api/core/v1/generated.proto @@ -220,6 +220,37 @@ message CSIPersistentVolumeSource { optional SecretReference nodePublishSecretRef = 8; } +// Represents a source location of a volume to mount, managed by an external CSI driver +message CSIVolumeSource { + // Driver is the name of the CSI driver that handles this volume. + // Consult with your admin for the correct name as registered in the cluster. + optional string driver = 1; + + // Specifies a read-only configuration for the volume. + // Defaults to false (read/write). + // +optional + optional bool readOnly = 2; + + // Filesystem type to mount. Ex. "ext4", "xfs", "ntfs". + // If not provided, the empty value is passed to the associated CSI driver + // which will determine the default filesystem to apply. + // +optional + optional string fsType = 3; + + // VolumeAttributes stores driver-specific properties that are passed to the CSI + // driver. Consult your driver's documentation for supported values. + // +optional + map volumeAttributes = 4; + + // NodePublishSecretRef is a reference to the secret object containing + // sensitive information to pass to the CSI driver to complete the CSI + // NodePublishVolume and NodeUnpublishVolume calls. + // This field is optional, and may be empty if no secret is required. If the + // secret object contains more than one secret, all secret references are passed. + // +optional + optional LocalObjectReference nodePublishSecretRef = 5; +} + // Adds and removes POSIX capabilities from running containers. message Capabilities { // Added capabilities @@ -1636,11 +1667,15 @@ message Lifecycle { // +optional optional Handler postStart = 1; - // PreStop is called immediately before a container is terminated. - // The container is terminated after the handler completes. - // The reason for termination is passed to the handler. - // Regardless of the outcome of the handler, the container is eventually terminated. - // Other management of the container blocks until the hook completes. + // PreStop is called immediately before a container is terminated due to an + // API request or management event such as liveness probe failure, + // preemption, resource contention, etc. The handler is not called if the + // container crashes or exits. The reason for termination is passed to the + // handler. The Pod's termination grace period countdown begins before the + // PreStop hooked is executed. Regardless of the outcome of the handler, the + // container will eventually terminate within the Pod's termination grace + // period. Other management of the container blocks until the hook completes + // or until the termination grace period is reached. // More info: https://kubernetes.io/docs/concepts/containers/container-lifecycle-hooks/#container-hooks // +optional optional Handler preStop = 2; @@ -2488,7 +2523,7 @@ message PersistentVolumeSource { // +optional optional StorageOSPersistentVolumeSource storageos = 21; - // CSI represents storage that handled by an external CSI driver (Beta feature). + // CSI represents storage that is handled by an external CSI driver (Beta feature). // +optional optional CSIPersistentVolumeSource csi = 22; } @@ -3141,7 +3176,7 @@ message PodSpec { // If specified, all readiness gates will be evaluated for pod readiness. // A pod is ready when all its containers are ready AND // all conditions specified in the readiness gates have status equal to "True" - // More info: https://github.com/kubernetes/community/blob/master/keps/sig-network/0007-pod-ready%2B%2B.md + // More info: https://git.k8s.io/enhancements/keps/sig-network/0007-pod-ready%2B%2B.md // +optional repeated PodReadinessGate readinessGates = 28; @@ -3149,7 +3184,7 @@ message PodSpec { // to run this pod. If no RuntimeClass resource matches the named class, the pod will not be run. // If unset or empty, the "legacy" RuntimeClass will be used, which is an implicit class with an // empty definition that uses the default runtime handler. - // More info: https://github.com/kubernetes/community/blob/master/keps/sig-node/0014-runtime-class.md + // More info: https://git.k8s.io/enhancements/keps/sig-node/runtime-class.md // This is an alpha feature and may change in the future. // +optional optional string runtimeClassName = 29; @@ -3422,6 +3457,11 @@ message QuobyteVolumeSource { // Default is no group // +optional optional string group = 5; + + // Tenant owning the given Quobyte volume in the Backend + // Used with dynamically provisioned Quobyte volumes, value is set by the plugin + // +optional + optional string tenant = 6; } // Represents a Rados Block Device mount that lasts the lifetime of a pod. @@ -4248,6 +4288,9 @@ message ServiceSpec { // More info: https://kubernetes.io/docs/concepts/services-networking/service/#virtual-ips-and-service-proxies // +patchMergeKey=port // +patchStrategy=merge + // +listType=map + // +listMapKey=port + // +listMapKey=protocol repeated ServicePort ports = 1; // Route service traffic to pods with label keys and values matching this @@ -4284,7 +4327,7 @@ message ServiceSpec { // "LoadBalancer" builds on NodePort and creates an // external load-balancer (if supported in the current cloud) which routes // to the clusterIP. - // More info: https://kubernetes.io/docs/concepts/services-networking/service/#publishing-services---service-types + // More info: https://kubernetes.io/docs/concepts/services-networking/service/#publishing-services-service-types // +optional optional string type = 4; @@ -4596,6 +4639,14 @@ message VolumeMount { // This field is beta in 1.10. // +optional optional string mountPropagation = 5; + + // Expanded path within the volume from which the container's volume should be mounted. + // Behaves similarly to SubPath but environment variable references $(VAR_NAME) are expanded using the container's environment. + // Defaults to "" (volume's root). + // SubPathExpr and SubPath are mutually exclusive. + // This field is alpha in 1.14. + // +optional + optional string subPathExpr = 6; } // VolumeNodeAffinity defines constraints that limit what nodes this volume can be accessed from. @@ -4756,6 +4807,10 @@ message VolumeSource { // StorageOS represents a StorageOS volume attached and mounted on Kubernetes nodes. // +optional optional StorageOSVolumeSource storageos = 27; + + // CSI (Container Storage Interface) represents storage that is handled by an external CSI driver (Alpha feature). + // +optional + optional CSIVolumeSource csi = 28; } // Represents a vSphere volume resource. diff --git a/vendor/k8s.io/api/core/v1/types.go b/vendor/k8s.io/api/core/v1/types.go index 87f3f0c5ba..3af134400c 100644 --- a/vendor/k8s.io/api/core/v1/types.go +++ b/vendor/k8s.io/api/core/v1/types.go @@ -151,6 +151,9 @@ type VolumeSource struct { // StorageOS represents a StorageOS volume attached and mounted on Kubernetes nodes. // +optional StorageOS *StorageOSVolumeSource `json:"storageos,omitempty" protobuf:"bytes,27,opt,name=storageos"` + // CSI (Container Storage Interface) represents storage that is handled by an external CSI driver (Alpha feature). + // +optional + CSI *CSIVolumeSource `json:"csi,omitempty" protobuf:"bytes,28,opt,name=csi"` } // PersistentVolumeClaimVolumeSource references the user's PVC in the same namespace. @@ -248,7 +251,7 @@ type PersistentVolumeSource struct { // More info: https://releases.k8s.io/HEAD/examples/volumes/storageos/README.md // +optional StorageOS *StorageOSPersistentVolumeSource `json:"storageos,omitempty" protobuf:"bytes,21,opt,name=storageos"` - // CSI represents storage that handled by an external CSI driver (Beta feature). + // CSI represents storage that is handled by an external CSI driver (Beta feature). // +optional CSI *CSIPersistentVolumeSource `json:"csi,omitempty" protobuf:"bytes,22,opt,name=csi"` } @@ -467,7 +470,7 @@ type PersistentVolumeClaimSpec struct { // In the future, we plan to support more data source types and the behavior // of the provisioner may change. // +optional - DataSource *TypedLocalObjectReference `json:"dataSource" protobuf:"bytes,7,opt,name=dataSource"` + DataSource *TypedLocalObjectReference `json:"dataSource,omitempty" protobuf:"bytes,7,opt,name=dataSource"` } // PersistentVolumeClaimConditionType is a valid value of PersistentVolumeClaimCondition.Type @@ -523,7 +526,7 @@ type PersistentVolumeClaimStatus struct { type PersistentVolumeAccessMode string const ( - // can be mounted read/write mode to exactly 1 host + // can be mounted in read/write mode to exactly 1 host ReadWriteOnce PersistentVolumeAccessMode = "ReadWriteOnce" // can be mounted in read-only mode to many hosts ReadOnlyMany PersistentVolumeAccessMode = "ReadOnlyMany" @@ -955,6 +958,11 @@ type QuobyteVolumeSource struct { // Default is no group // +optional Group string `json:"group,omitempty" protobuf:"bytes,5,opt,name=group"` + + // Tenant owning the given Quobyte volume in the Backend + // Used with dynamically provisioned Quobyte volumes, value is set by the plugin + // +optional + Tenant string `json:"tenant,omitempty" protobuf:"bytes,6,opt,name=tenant"` } // FlexPersistentVolumeSource represents a generic persistent volume resource that is @@ -1686,6 +1694,37 @@ type CSIPersistentVolumeSource struct { NodePublishSecretRef *SecretReference `json:"nodePublishSecretRef,omitempty" protobuf:"bytes,8,opt,name=nodePublishSecretRef"` } +// Represents a source location of a volume to mount, managed by an external CSI driver +type CSIVolumeSource struct { + // Driver is the name of the CSI driver that handles this volume. + // Consult with your admin for the correct name as registered in the cluster. + Driver string `json:"driver" protobuf:"bytes,1,opt,name=driver"` + + // Specifies a read-only configuration for the volume. + // Defaults to false (read/write). + // +optional + ReadOnly *bool `json:"readOnly,omitempty" protobuf:"varint,2,opt,name=readOnly"` + + // Filesystem type to mount. Ex. "ext4", "xfs", "ntfs". + // If not provided, the empty value is passed to the associated CSI driver + // which will determine the default filesystem to apply. + // +optional + FSType *string `json:"fsType,omitempty" protobuf:"bytes,3,opt,name=fsType"` + + // VolumeAttributes stores driver-specific properties that are passed to the CSI + // driver. Consult your driver's documentation for supported values. + // +optional + VolumeAttributes map[string]string `json:"volumeAttributes,omitempty" protobuf:"bytes,4,rep,name=volumeAttributes"` + + // NodePublishSecretRef is a reference to the secret object containing + // sensitive information to pass to the CSI driver to complete the CSI + // NodePublishVolume and NodeUnpublishVolume calls. + // This field is optional, and may be empty if no secret is required. If the + // secret object contains more than one secret, all secret references are passed. + // +optional + NodePublishSecretRef *LocalObjectReference `json:"nodePublishSecretRef,omitempty" protobuf:"bytes,5,opt,name=nodePublishSecretRef"` +} + // ContainerPort represents a network port in a single container. type ContainerPort struct { // If specified, this must be an IANA_SVC_NAME and unique within the pod. Each @@ -1732,6 +1771,13 @@ type VolumeMount struct { // This field is beta in 1.10. // +optional MountPropagation *MountPropagationMode `json:"mountPropagation,omitempty" protobuf:"bytes,5,opt,name=mountPropagation,casttype=MountPropagationMode"` + // Expanded path within the volume from which the container's volume should be mounted. + // Behaves similarly to SubPath but environment variable references $(VAR_NAME) are expanded using the container's environment. + // Defaults to "" (volume's root). + // SubPathExpr and SubPath are mutually exclusive. + // This field is alpha in 1.14. + // +optional + SubPathExpr string `json:"subPathExpr,omitempty" protobuf:"bytes,6,opt,name=subPathExpr"` } // MountPropagationMode describes mount propagation. @@ -2216,11 +2262,15 @@ type Lifecycle struct { // More info: https://kubernetes.io/docs/concepts/containers/container-lifecycle-hooks/#container-hooks // +optional PostStart *Handler `json:"postStart,omitempty" protobuf:"bytes,1,opt,name=postStart"` - // PreStop is called immediately before a container is terminated. - // The container is terminated after the handler completes. - // The reason for termination is passed to the handler. - // Regardless of the outcome of the handler, the container is eventually terminated. - // Other management of the container blocks until the hook completes. + // PreStop is called immediately before a container is terminated due to an + // API request or management event such as liveness probe failure, + // preemption, resource contention, etc. The handler is not called if the + // container crashes or exits. The reason for termination is passed to the + // handler. The Pod's termination grace period countdown begins before the + // PreStop hooked is executed. Regardless of the outcome of the handler, the + // container will eventually terminate within the Pod's termination grace + // period. Other management of the container blocks until the hook completes + // or until the termination grace period is reached. // More info: https://kubernetes.io/docs/concepts/containers/container-lifecycle-hooks/#container-hooks // +optional PreStop *Handler `json:"preStop,omitempty" protobuf:"bytes,2,opt,name=preStop"` @@ -2907,14 +2957,14 @@ type PodSpec struct { // If specified, all readiness gates will be evaluated for pod readiness. // A pod is ready when all its containers are ready AND // all conditions specified in the readiness gates have status equal to "True" - // More info: https://github.com/kubernetes/community/blob/master/keps/sig-network/0007-pod-ready%2B%2B.md + // More info: https://git.k8s.io/enhancements/keps/sig-network/0007-pod-ready%2B%2B.md // +optional ReadinessGates []PodReadinessGate `json:"readinessGates,omitempty" protobuf:"bytes,28,opt,name=readinessGates"` // RuntimeClassName refers to a RuntimeClass object in the node.k8s.io group, which should be used // to run this pod. If no RuntimeClass resource matches the named class, the pod will not be run. // If unset or empty, the "legacy" RuntimeClass will be used, which is an implicit class with an // empty definition that uses the default runtime handler. - // More info: https://github.com/kubernetes/community/blob/master/keps/sig-node/0014-runtime-class.md + // More info: https://git.k8s.io/enhancements/keps/sig-node/runtime-class.md // This is an alpha feature and may change in the future. // +optional RuntimeClassName *string `json:"runtimeClassName,omitempty" protobuf:"bytes,29,opt,name=runtimeClassName"` @@ -3451,6 +3501,9 @@ type ServiceSpec struct { // More info: https://kubernetes.io/docs/concepts/services-networking/service/#virtual-ips-and-service-proxies // +patchMergeKey=port // +patchStrategy=merge + // +listType=map + // +listMapKey=port + // +listMapKey=protocol Ports []ServicePort `json:"ports,omitempty" patchStrategy:"merge" patchMergeKey:"port" protobuf:"bytes,1,rep,name=ports"` // Route service traffic to pods with label keys and values matching this @@ -3487,7 +3540,7 @@ type ServiceSpec struct { // "LoadBalancer" builds on NodePort and creates an // external load-balancer (if supported in the current cloud) which routes // to the clusterIP. - // More info: https://kubernetes.io/docs/concepts/services-networking/service/#publishing-services---service-types + // More info: https://kubernetes.io/docs/concepts/services-networking/service/#publishing-services-service-types // +optional Type ServiceType `json:"type,omitempty" protobuf:"bytes,4,opt,name=type,casttype=ServiceType"` diff --git a/vendor/k8s.io/api/core/v1/types_swagger_doc_generated.go b/vendor/k8s.io/api/core/v1/types_swagger_doc_generated.go index 13ea6d2265..2c5b04f29e 100644 --- a/vendor/k8s.io/api/core/v1/types_swagger_doc_generated.go +++ b/vendor/k8s.io/api/core/v1/types_swagger_doc_generated.go @@ -132,6 +132,19 @@ func (CSIPersistentVolumeSource) SwaggerDoc() map[string]string { return map_CSIPersistentVolumeSource } +var map_CSIVolumeSource = map[string]string{ + "": "Represents a source location of a volume to mount, managed by an external CSI driver", + "driver": "Driver is the name of the CSI driver that handles this volume. Consult with your admin for the correct name as registered in the cluster.", + "readOnly": "Specifies a read-only configuration for the volume. Defaults to false (read/write).", + "fsType": "Filesystem type to mount. Ex. \"ext4\", \"xfs\", \"ntfs\". If not provided, the empty value is passed to the associated CSI driver which will determine the default filesystem to apply.", + "volumeAttributes": "VolumeAttributes stores driver-specific properties that are passed to the CSI driver. Consult your driver's documentation for supported values.", + "nodePublishSecretRef": "NodePublishSecretRef is a reference to the secret object containing sensitive information to pass to the CSI driver to complete the CSI NodePublishVolume and NodeUnpublishVolume calls. This field is optional, and may be empty if no secret is required. If the secret object contains more than one secret, all secret references are passed.", +} + +func (CSIVolumeSource) SwaggerDoc() map[string]string { + return map_CSIVolumeSource +} + var map_Capabilities = map[string]string{ "": "Adds and removes POSIX capabilities from running containers.", "add": "Added capabilities", @@ -824,7 +837,7 @@ func (KeyToPath) SwaggerDoc() map[string]string { var map_Lifecycle = map[string]string{ "": "Lifecycle describes actions that the management system should take in response to container lifecycle events. For the PostStart and PreStop lifecycle handlers, management of the container blocks until the action is complete, unless the container process fails, in which case the handler is aborted.", "postStart": "PostStart is called immediately after a container is created. If the handler fails, the container is terminated and restarted according to its restart policy. Other management of the container blocks until the hook completes. More info: https://kubernetes.io/docs/concepts/containers/container-lifecycle-hooks/#container-hooks", - "preStop": "PreStop is called immediately before a container is terminated. The container is terminated after the handler completes. The reason for termination is passed to the handler. Regardless of the outcome of the handler, the container is eventually terminated. Other management of the container blocks until the hook completes. More info: https://kubernetes.io/docs/concepts/containers/container-lifecycle-hooks/#container-hooks", + "preStop": "PreStop is called immediately before a container is terminated due to an API request or management event such as liveness probe failure, preemption, resource contention, etc. The handler is not called if the container crashes or exits. The reason for termination is passed to the handler. The Pod's termination grace period countdown begins before the PreStop hooked is executed. Regardless of the outcome of the handler, the container will eventually terminate within the Pod's termination grace period. Other management of the container blocks until the hook completes or until the termination grace period is reached. More info: https://kubernetes.io/docs/concepts/containers/container-lifecycle-hooks/#container-hooks", } func (Lifecycle) SwaggerDoc() map[string]string { @@ -1285,7 +1298,7 @@ var map_PersistentVolumeSource = map[string]string{ "scaleIO": "ScaleIO represents a ScaleIO persistent volume attached and mounted on Kubernetes nodes.", "local": "Local represents directly-attached storage with node affinity", "storageos": "StorageOS represents a StorageOS volume that is attached to the kubelet's host machine and mounted into the pod More info: https://releases.k8s.io/HEAD/examples/volumes/storageos/README.md", - "csi": "CSI represents storage that handled by an external CSI driver (Beta feature).", + "csi": "CSI represents storage that is handled by an external CSI driver (Beta feature).", } func (PersistentVolumeSource) SwaggerDoc() map[string]string { @@ -1538,8 +1551,8 @@ var map_PodSpec = map[string]string{ "priorityClassName": "If specified, indicates the pod's priority. \"system-node-critical\" and \"system-cluster-critical\" are two special keywords which indicate the highest priorities with the former being the highest priority. Any other name must be defined by creating a PriorityClass object with that name. If not specified, the pod priority will be default or zero if there is no default.", "priority": "The priority value. Various system components use this field to find the priority of the pod. When Priority Admission Controller is enabled, it prevents users from setting this field. The admission controller populates this field from PriorityClassName. The higher the value, the higher the priority.", "dnsConfig": "Specifies the DNS parameters of a pod. Parameters specified here will be merged to the generated DNS configuration based on DNSPolicy.", - "readinessGates": "If specified, all readiness gates will be evaluated for pod readiness. A pod is ready when all its containers are ready AND all conditions specified in the readiness gates have status equal to \"True\" More info: https://github.com/kubernetes/community/blob/master/keps/sig-network/0007-pod-ready%2B%2B.md", - "runtimeClassName": "RuntimeClassName refers to a RuntimeClass object in the node.k8s.io group, which should be used to run this pod. If no RuntimeClass resource matches the named class, the pod will not be run. If unset or empty, the \"legacy\" RuntimeClass will be used, which is an implicit class with an empty definition that uses the default runtime handler. More info: https://github.com/kubernetes/community/blob/master/keps/sig-node/0014-runtime-class.md This is an alpha feature and may change in the future.", + "readinessGates": "If specified, all readiness gates will be evaluated for pod readiness. A pod is ready when all its containers are ready AND all conditions specified in the readiness gates have status equal to \"True\" More info: https://git.k8s.io/enhancements/keps/sig-network/0007-pod-ready%2B%2B.md", + "runtimeClassName": "RuntimeClassName refers to a RuntimeClass object in the node.k8s.io group, which should be used to run this pod. If no RuntimeClass resource matches the named class, the pod will not be run. If unset or empty, the \"legacy\" RuntimeClass will be used, which is an implicit class with an empty definition that uses the default runtime handler. More info: https://git.k8s.io/enhancements/keps/sig-node/runtime-class.md This is an alpha feature and may change in the future.", "enableServiceLinks": "EnableServiceLinks indicates whether information about services should be injected into pod's environment variables, matching the syntax of Docker links. Optional: Defaults to true.", } @@ -1678,6 +1691,7 @@ var map_QuobyteVolumeSource = map[string]string{ "readOnly": "ReadOnly here will force the Quobyte volume to be mounted with read-only permissions. Defaults to false.", "user": "User to map volume access to Defaults to serivceaccount user", "group": "Group to map volume access to Default is no group", + "tenant": "Tenant owning the given Quobyte volume in the Backend Used with dynamically provisioned Quobyte volumes, value is set by the plugin", } func (QuobyteVolumeSource) SwaggerDoc() map[string]string { @@ -2098,7 +2112,7 @@ var map_ServiceSpec = map[string]string{ "ports": "The list of ports that are exposed by this service. More info: https://kubernetes.io/docs/concepts/services-networking/service/#virtual-ips-and-service-proxies", "selector": "Route service traffic to pods with label keys and values matching this selector. If empty or not present, the service is assumed to have an external process managing its endpoints, which Kubernetes will not modify. Only applies to types ClusterIP, NodePort, and LoadBalancer. Ignored if type is ExternalName. More info: https://kubernetes.io/docs/concepts/services-networking/service/", "clusterIP": "clusterIP is the IP address of the service and is usually assigned randomly by the master. If an address is specified manually and is not in use by others, it will be allocated to the service; otherwise, creation of the service will fail. This field can not be changed through updates. Valid values are \"None\", empty string (\"\"), or a valid IP address. \"None\" can be specified for headless services when proxying is not required. Only applies to types ClusterIP, NodePort, and LoadBalancer. Ignored if type is ExternalName. More info: https://kubernetes.io/docs/concepts/services-networking/service/#virtual-ips-and-service-proxies", - "type": "type determines how the Service is exposed. Defaults to ClusterIP. Valid options are ExternalName, ClusterIP, NodePort, and LoadBalancer. \"ExternalName\" maps to the specified externalName. \"ClusterIP\" allocates a cluster-internal IP address for load-balancing to endpoints. Endpoints are determined by the selector or if that is not specified, by manual construction of an Endpoints object. If clusterIP is \"None\", no virtual IP is allocated and the endpoints are published as a set of endpoints rather than a stable IP. \"NodePort\" builds on ClusterIP and allocates a port on every node which routes to the clusterIP. \"LoadBalancer\" builds on NodePort and creates an external load-balancer (if supported in the current cloud) which routes to the clusterIP. More info: https://kubernetes.io/docs/concepts/services-networking/service/#publishing-services ", + "type": "type determines how the Service is exposed. Defaults to ClusterIP. Valid options are ExternalName, ClusterIP, NodePort, and LoadBalancer. \"ExternalName\" maps to the specified externalName. \"ClusterIP\" allocates a cluster-internal IP address for load-balancing to endpoints. Endpoints are determined by the selector or if that is not specified, by manual construction of an Endpoints object. If clusterIP is \"None\", no virtual IP is allocated and the endpoints are published as a set of endpoints rather than a stable IP. \"NodePort\" builds on ClusterIP and allocates a port on every node which routes to the clusterIP. \"LoadBalancer\" builds on NodePort and creates an external load-balancer (if supported in the current cloud) which routes to the clusterIP. More info: https://kubernetes.io/docs/concepts/services-networking/service/#publishing-services-service-types", "externalIPs": "externalIPs is a list of IP addresses for which nodes in the cluster will also accept traffic for this service. These IPs are not managed by Kubernetes. The user is responsible for ensuring that traffic arrives at a node with this IP. A common example is external load-balancers that are not part of the Kubernetes system.", "sessionAffinity": "Supports \"ClientIP\" and \"None\". Used to maintain session affinity. Enable client IP based session affinity. Must be ClientIP or None. Defaults to None. More info: https://kubernetes.io/docs/concepts/services-networking/service/#virtual-ips-and-service-proxies", "loadBalancerIP": "Only applies to Service Type: LoadBalancer LoadBalancer will get created with the IP specified in this field. This feature depends on whether the underlying cloud-provider supports specifying the loadBalancerIP when a load balancer is created. This field will be ignored if the cloud-provider does not support the feature.", @@ -2259,6 +2273,7 @@ var map_VolumeMount = map[string]string{ "mountPath": "Path within the container at which the volume should be mounted. Must not contain ':'.", "subPath": "Path within the volume from which the container's volume should be mounted. Defaults to \"\" (volume's root).", "mountPropagation": "mountPropagation determines how mounts are propagated from the host to container and the other way around. When not set, MountPropagationNone is used. This field is beta in 1.10.", + "subPathExpr": "Expanded path within the volume from which the container's volume should be mounted. Behaves similarly to SubPath but environment variable references $(VAR_NAME) are expanded using the container's environment. Defaults to \"\" (volume's root). SubPathExpr and SubPath are mutually exclusive. This field is alpha in 1.14.", } func (VolumeMount) SwaggerDoc() map[string]string { @@ -2315,6 +2330,7 @@ var map_VolumeSource = map[string]string{ "portworxVolume": "PortworxVolume represents a portworx volume attached and mounted on kubelets host machine", "scaleIO": "ScaleIO represents a ScaleIO persistent volume attached and mounted on Kubernetes nodes.", "storageos": "StorageOS represents a StorageOS volume attached and mounted on Kubernetes nodes.", + "csi": "CSI (Container Storage Interface) represents storage that is handled by an external CSI driver (Alpha feature).", } func (VolumeSource) SwaggerDoc() map[string]string { diff --git a/vendor/k8s.io/api/core/v1/well_known_labels.go b/vendor/k8s.io/api/core/v1/well_known_labels.go new file mode 100644 index 0000000000..4497760d3f --- /dev/null +++ b/vendor/k8s.io/api/core/v1/well_known_labels.go @@ -0,0 +1,36 @@ +/* +Copyright 2019 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1 + +const ( + LabelHostname = "kubernetes.io/hostname" + LabelZoneFailureDomain = "failure-domain.beta.kubernetes.io/zone" + LabelZoneRegion = "failure-domain.beta.kubernetes.io/region" + + LabelInstanceType = "beta.kubernetes.io/instance-type" + + LabelOSStable = "kubernetes.io/os" + LabelArchStable = "kubernetes.io/arch" + + // LabelNamespaceSuffixKubelet is an allowed label namespace suffix kubelets can self-set ([*.]kubelet.kubernetes.io/*) + LabelNamespaceSuffixKubelet = "kubelet.kubernetes.io" + // LabelNamespaceSuffixNode is an allowed label namespace suffix kubelets can self-set ([*.]node.kubernetes.io/*) + LabelNamespaceSuffixNode = "node.kubernetes.io" + + // LabelNamespaceNodeRestriction is a forbidden label namespace that kubelets may not self-set when the NodeRestriction admission plugin is enabled + LabelNamespaceNodeRestriction = "node-restriction.kubernetes.io" +) diff --git a/vendor/k8s.io/api/core/v1/zz_generated.deepcopy.go b/vendor/k8s.io/api/core/v1/zz_generated.deepcopy.go index 4219c95eb0..9a580c0797 100644 --- a/vendor/k8s.io/api/core/v1/zz_generated.deepcopy.go +++ b/vendor/k8s.io/api/core/v1/zz_generated.deepcopy.go @@ -250,6 +250,44 @@ func (in *CSIPersistentVolumeSource) DeepCopy() *CSIPersistentVolumeSource { return out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CSIVolumeSource) DeepCopyInto(out *CSIVolumeSource) { + *out = *in + if in.ReadOnly != nil { + in, out := &in.ReadOnly, &out.ReadOnly + *out = new(bool) + **out = **in + } + if in.FSType != nil { + in, out := &in.FSType, &out.FSType + *out = new(string) + **out = **in + } + if in.VolumeAttributes != nil { + in, out := &in.VolumeAttributes, &out.VolumeAttributes + *out = make(map[string]string, len(*in)) + for key, val := range *in { + (*out)[key] = val + } + } + if in.NodePublishSecretRef != nil { + in, out := &in.NodePublishSecretRef, &out.NodePublishSecretRef + *out = new(LocalObjectReference) + **out = **in + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CSIVolumeSource. +func (in *CSIVolumeSource) DeepCopy() *CSIVolumeSource { + if in == nil { + return nil + } + out := new(CSIVolumeSource) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *Capabilities) DeepCopyInto(out *Capabilities) { *out = *in @@ -5383,6 +5421,11 @@ func (in *VolumeSource) DeepCopyInto(out *VolumeSource) { *out = new(StorageOSVolumeSource) (*in).DeepCopyInto(*out) } + if in.CSI != nil { + in, out := &in.CSI, &out.CSI + *out = new(CSIVolumeSource) + (*in).DeepCopyInto(*out) + } return } diff --git a/vendor/k8s.io/api/events/v1beta1/doc.go b/vendor/k8s.io/api/events/v1beta1/doc.go index bd269c6d23..9bec7b3cc1 100644 --- a/vendor/k8s.io/api/events/v1beta1/doc.go +++ b/vendor/k8s.io/api/events/v1beta1/doc.go @@ -15,6 +15,7 @@ limitations under the License. */ // +k8s:deepcopy-gen=package +// +k8s:protobuf-gen=package // +k8s:openapi-gen=true // +groupName=events.k8s.io diff --git a/vendor/k8s.io/api/extensions/v1beta1/doc.go b/vendor/k8s.io/api/extensions/v1beta1/doc.go index 8ce18304be..fa799f3026 100644 --- a/vendor/k8s.io/api/extensions/v1beta1/doc.go +++ b/vendor/k8s.io/api/extensions/v1beta1/doc.go @@ -15,6 +15,7 @@ limitations under the License. */ // +k8s:deepcopy-gen=package +// +k8s:protobuf-gen=package // +k8s:openapi-gen=true package v1beta1 // import "k8s.io/api/extensions/v1beta1" diff --git a/vendor/k8s.io/api/extensions/v1beta1/generated.pb.go b/vendor/k8s.io/api/extensions/v1beta1/generated.pb.go index a0dfa96620..6802be28b7 100644 --- a/vendor/k8s.io/api/extensions/v1beta1/generated.pb.go +++ b/vendor/k8s.io/api/extensions/v1beta1/generated.pb.go @@ -24,6 +24,7 @@ limitations under the License. k8s.io/kubernetes/vendor/k8s.io/api/extensions/v1beta1/generated.proto It has these top-level messages: + AllowedCSIDriver AllowedFlexVolume AllowedHostPath DaemonSet @@ -109,241 +110,246 @@ var _ = math.Inf // proto package needs to be updated. const _ = proto.GoGoProtoPackageIsVersion2 // please upgrade the proto package +func (m *AllowedCSIDriver) Reset() { *m = AllowedCSIDriver{} } +func (*AllowedCSIDriver) ProtoMessage() {} +func (*AllowedCSIDriver) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{0} } + func (m *AllowedFlexVolume) Reset() { *m = AllowedFlexVolume{} } func (*AllowedFlexVolume) ProtoMessage() {} -func (*AllowedFlexVolume) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{0} } +func (*AllowedFlexVolume) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{1} } func (m *AllowedHostPath) Reset() { *m = AllowedHostPath{} } func (*AllowedHostPath) ProtoMessage() {} -func (*AllowedHostPath) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{1} } +func (*AllowedHostPath) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{2} } func (m *DaemonSet) Reset() { *m = DaemonSet{} } func (*DaemonSet) ProtoMessage() {} -func (*DaemonSet) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{2} } +func (*DaemonSet) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{3} } func (m *DaemonSetCondition) Reset() { *m = DaemonSetCondition{} } func (*DaemonSetCondition) ProtoMessage() {} -func (*DaemonSetCondition) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{3} } +func (*DaemonSetCondition) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{4} } func (m *DaemonSetList) Reset() { *m = DaemonSetList{} } func (*DaemonSetList) ProtoMessage() {} -func (*DaemonSetList) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{4} } +func (*DaemonSetList) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{5} } func (m *DaemonSetSpec) Reset() { *m = DaemonSetSpec{} } func (*DaemonSetSpec) ProtoMessage() {} -func (*DaemonSetSpec) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{5} } +func (*DaemonSetSpec) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{6} } func (m *DaemonSetStatus) Reset() { *m = DaemonSetStatus{} } func (*DaemonSetStatus) ProtoMessage() {} -func (*DaemonSetStatus) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{6} } +func (*DaemonSetStatus) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{7} } func (m *DaemonSetUpdateStrategy) Reset() { *m = DaemonSetUpdateStrategy{} } func (*DaemonSetUpdateStrategy) ProtoMessage() {} -func (*DaemonSetUpdateStrategy) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{7} } +func (*DaemonSetUpdateStrategy) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{8} } func (m *Deployment) Reset() { *m = Deployment{} } func (*Deployment) ProtoMessage() {} -func (*Deployment) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{8} } +func (*Deployment) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{9} } func (m *DeploymentCondition) Reset() { *m = DeploymentCondition{} } func (*DeploymentCondition) ProtoMessage() {} -func (*DeploymentCondition) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{9} } +func (*DeploymentCondition) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{10} } func (m *DeploymentList) Reset() { *m = DeploymentList{} } func (*DeploymentList) ProtoMessage() {} -func (*DeploymentList) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{10} } +func (*DeploymentList) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{11} } func (m *DeploymentRollback) Reset() { *m = DeploymentRollback{} } func (*DeploymentRollback) ProtoMessage() {} -func (*DeploymentRollback) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{11} } +func (*DeploymentRollback) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{12} } func (m *DeploymentSpec) Reset() { *m = DeploymentSpec{} } func (*DeploymentSpec) ProtoMessage() {} -func (*DeploymentSpec) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{12} } +func (*DeploymentSpec) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{13} } func (m *DeploymentStatus) Reset() { *m = DeploymentStatus{} } func (*DeploymentStatus) ProtoMessage() {} -func (*DeploymentStatus) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{13} } +func (*DeploymentStatus) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{14} } func (m *DeploymentStrategy) Reset() { *m = DeploymentStrategy{} } func (*DeploymentStrategy) ProtoMessage() {} -func (*DeploymentStrategy) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{14} } +func (*DeploymentStrategy) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{15} } func (m *FSGroupStrategyOptions) Reset() { *m = FSGroupStrategyOptions{} } func (*FSGroupStrategyOptions) ProtoMessage() {} -func (*FSGroupStrategyOptions) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{15} } +func (*FSGroupStrategyOptions) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{16} } func (m *HTTPIngressPath) Reset() { *m = HTTPIngressPath{} } func (*HTTPIngressPath) ProtoMessage() {} -func (*HTTPIngressPath) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{16} } +func (*HTTPIngressPath) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{17} } func (m *HTTPIngressRuleValue) Reset() { *m = HTTPIngressRuleValue{} } func (*HTTPIngressRuleValue) ProtoMessage() {} -func (*HTTPIngressRuleValue) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{17} } +func (*HTTPIngressRuleValue) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{18} } func (m *HostPortRange) Reset() { *m = HostPortRange{} } func (*HostPortRange) ProtoMessage() {} -func (*HostPortRange) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{18} } +func (*HostPortRange) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{19} } func (m *IDRange) Reset() { *m = IDRange{} } func (*IDRange) ProtoMessage() {} -func (*IDRange) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{19} } +func (*IDRange) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{20} } func (m *IPBlock) Reset() { *m = IPBlock{} } func (*IPBlock) ProtoMessage() {} -func (*IPBlock) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{20} } +func (*IPBlock) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{21} } func (m *Ingress) Reset() { *m = Ingress{} } func (*Ingress) ProtoMessage() {} -func (*Ingress) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{21} } +func (*Ingress) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{22} } func (m *IngressBackend) Reset() { *m = IngressBackend{} } func (*IngressBackend) ProtoMessage() {} -func (*IngressBackend) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{22} } +func (*IngressBackend) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{23} } func (m *IngressList) Reset() { *m = IngressList{} } func (*IngressList) ProtoMessage() {} -func (*IngressList) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{23} } +func (*IngressList) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{24} } func (m *IngressRule) Reset() { *m = IngressRule{} } func (*IngressRule) ProtoMessage() {} -func (*IngressRule) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{24} } +func (*IngressRule) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{25} } func (m *IngressRuleValue) Reset() { *m = IngressRuleValue{} } func (*IngressRuleValue) ProtoMessage() {} -func (*IngressRuleValue) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{25} } +func (*IngressRuleValue) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{26} } func (m *IngressSpec) Reset() { *m = IngressSpec{} } func (*IngressSpec) ProtoMessage() {} -func (*IngressSpec) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{26} } +func (*IngressSpec) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{27} } func (m *IngressStatus) Reset() { *m = IngressStatus{} } func (*IngressStatus) ProtoMessage() {} -func (*IngressStatus) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{27} } +func (*IngressStatus) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{28} } func (m *IngressTLS) Reset() { *m = IngressTLS{} } func (*IngressTLS) ProtoMessage() {} -func (*IngressTLS) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{28} } +func (*IngressTLS) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{29} } func (m *NetworkPolicy) Reset() { *m = NetworkPolicy{} } func (*NetworkPolicy) ProtoMessage() {} -func (*NetworkPolicy) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{29} } +func (*NetworkPolicy) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{30} } func (m *NetworkPolicyEgressRule) Reset() { *m = NetworkPolicyEgressRule{} } func (*NetworkPolicyEgressRule) ProtoMessage() {} func (*NetworkPolicyEgressRule) Descriptor() ([]byte, []int) { - return fileDescriptorGenerated, []int{30} + return fileDescriptorGenerated, []int{31} } func (m *NetworkPolicyIngressRule) Reset() { *m = NetworkPolicyIngressRule{} } func (*NetworkPolicyIngressRule) ProtoMessage() {} func (*NetworkPolicyIngressRule) Descriptor() ([]byte, []int) { - return fileDescriptorGenerated, []int{31} + return fileDescriptorGenerated, []int{32} } func (m *NetworkPolicyList) Reset() { *m = NetworkPolicyList{} } func (*NetworkPolicyList) ProtoMessage() {} -func (*NetworkPolicyList) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{32} } +func (*NetworkPolicyList) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{33} } func (m *NetworkPolicyPeer) Reset() { *m = NetworkPolicyPeer{} } func (*NetworkPolicyPeer) ProtoMessage() {} -func (*NetworkPolicyPeer) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{33} } +func (*NetworkPolicyPeer) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{34} } func (m *NetworkPolicyPort) Reset() { *m = NetworkPolicyPort{} } func (*NetworkPolicyPort) ProtoMessage() {} -func (*NetworkPolicyPort) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{34} } +func (*NetworkPolicyPort) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{35} } func (m *NetworkPolicySpec) Reset() { *m = NetworkPolicySpec{} } func (*NetworkPolicySpec) ProtoMessage() {} -func (*NetworkPolicySpec) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{35} } +func (*NetworkPolicySpec) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{36} } func (m *PodSecurityPolicy) Reset() { *m = PodSecurityPolicy{} } func (*PodSecurityPolicy) ProtoMessage() {} -func (*PodSecurityPolicy) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{36} } +func (*PodSecurityPolicy) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{37} } func (m *PodSecurityPolicyList) Reset() { *m = PodSecurityPolicyList{} } func (*PodSecurityPolicyList) ProtoMessage() {} -func (*PodSecurityPolicyList) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{37} } +func (*PodSecurityPolicyList) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{38} } func (m *PodSecurityPolicySpec) Reset() { *m = PodSecurityPolicySpec{} } func (*PodSecurityPolicySpec) ProtoMessage() {} -func (*PodSecurityPolicySpec) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{38} } +func (*PodSecurityPolicySpec) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{39} } func (m *ReplicaSet) Reset() { *m = ReplicaSet{} } func (*ReplicaSet) ProtoMessage() {} -func (*ReplicaSet) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{39} } +func (*ReplicaSet) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{40} } func (m *ReplicaSetCondition) Reset() { *m = ReplicaSetCondition{} } func (*ReplicaSetCondition) ProtoMessage() {} -func (*ReplicaSetCondition) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{40} } +func (*ReplicaSetCondition) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{41} } func (m *ReplicaSetList) Reset() { *m = ReplicaSetList{} } func (*ReplicaSetList) ProtoMessage() {} -func (*ReplicaSetList) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{41} } +func (*ReplicaSetList) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{42} } func (m *ReplicaSetSpec) Reset() { *m = ReplicaSetSpec{} } func (*ReplicaSetSpec) ProtoMessage() {} -func (*ReplicaSetSpec) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{42} } +func (*ReplicaSetSpec) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{43} } func (m *ReplicaSetStatus) Reset() { *m = ReplicaSetStatus{} } func (*ReplicaSetStatus) ProtoMessage() {} -func (*ReplicaSetStatus) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{43} } +func (*ReplicaSetStatus) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{44} } func (m *ReplicationControllerDummy) Reset() { *m = ReplicationControllerDummy{} } func (*ReplicationControllerDummy) ProtoMessage() {} func (*ReplicationControllerDummy) Descriptor() ([]byte, []int) { - return fileDescriptorGenerated, []int{44} + return fileDescriptorGenerated, []int{45} } func (m *RollbackConfig) Reset() { *m = RollbackConfig{} } func (*RollbackConfig) ProtoMessage() {} -func (*RollbackConfig) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{45} } +func (*RollbackConfig) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{46} } func (m *RollingUpdateDaemonSet) Reset() { *m = RollingUpdateDaemonSet{} } func (*RollingUpdateDaemonSet) ProtoMessage() {} -func (*RollingUpdateDaemonSet) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{46} } +func (*RollingUpdateDaemonSet) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{47} } func (m *RollingUpdateDeployment) Reset() { *m = RollingUpdateDeployment{} } func (*RollingUpdateDeployment) ProtoMessage() {} func (*RollingUpdateDeployment) Descriptor() ([]byte, []int) { - return fileDescriptorGenerated, []int{47} + return fileDescriptorGenerated, []int{48} } func (m *RunAsGroupStrategyOptions) Reset() { *m = RunAsGroupStrategyOptions{} } func (*RunAsGroupStrategyOptions) ProtoMessage() {} func (*RunAsGroupStrategyOptions) Descriptor() ([]byte, []int) { - return fileDescriptorGenerated, []int{48} + return fileDescriptorGenerated, []int{49} } func (m *RunAsUserStrategyOptions) Reset() { *m = RunAsUserStrategyOptions{} } func (*RunAsUserStrategyOptions) ProtoMessage() {} func (*RunAsUserStrategyOptions) Descriptor() ([]byte, []int) { - return fileDescriptorGenerated, []int{49} + return fileDescriptorGenerated, []int{50} } func (m *SELinuxStrategyOptions) Reset() { *m = SELinuxStrategyOptions{} } func (*SELinuxStrategyOptions) ProtoMessage() {} -func (*SELinuxStrategyOptions) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{50} } +func (*SELinuxStrategyOptions) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{51} } func (m *Scale) Reset() { *m = Scale{} } func (*Scale) ProtoMessage() {} -func (*Scale) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{51} } +func (*Scale) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{52} } func (m *ScaleSpec) Reset() { *m = ScaleSpec{} } func (*ScaleSpec) ProtoMessage() {} -func (*ScaleSpec) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{52} } +func (*ScaleSpec) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{53} } func (m *ScaleStatus) Reset() { *m = ScaleStatus{} } func (*ScaleStatus) ProtoMessage() {} -func (*ScaleStatus) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{53} } +func (*ScaleStatus) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{54} } func (m *SupplementalGroupsStrategyOptions) Reset() { *m = SupplementalGroupsStrategyOptions{} } func (*SupplementalGroupsStrategyOptions) ProtoMessage() {} func (*SupplementalGroupsStrategyOptions) Descriptor() ([]byte, []int) { - return fileDescriptorGenerated, []int{54} + return fileDescriptorGenerated, []int{55} } func init() { + proto.RegisterType((*AllowedCSIDriver)(nil), "k8s.io.api.extensions.v1beta1.AllowedCSIDriver") proto.RegisterType((*AllowedFlexVolume)(nil), "k8s.io.api.extensions.v1beta1.AllowedFlexVolume") proto.RegisterType((*AllowedHostPath)(nil), "k8s.io.api.extensions.v1beta1.AllowedHostPath") proto.RegisterType((*DaemonSet)(nil), "k8s.io.api.extensions.v1beta1.DaemonSet") @@ -400,6 +406,28 @@ func init() { proto.RegisterType((*ScaleStatus)(nil), "k8s.io.api.extensions.v1beta1.ScaleStatus") proto.RegisterType((*SupplementalGroupsStrategyOptions)(nil), "k8s.io.api.extensions.v1beta1.SupplementalGroupsStrategyOptions") } +func (m *AllowedCSIDriver) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *AllowedCSIDriver) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + dAtA[i] = 0xa + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name))) + i += copy(dAtA[i:], m.Name) + return i, nil +} + func (m *AllowedFlexVolume) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) @@ -2176,6 +2204,20 @@ func (m *PodSecurityPolicySpec) MarshalTo(dAtA []byte) (int, error) { } i += n47 } + if len(m.AllowedCSIDrivers) > 0 { + for _, msg := range m.AllowedCSIDrivers { + dAtA[i] = 0xba + i++ + dAtA[i] = 0x1 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) + n, err := msg.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n + } + } return i, nil } @@ -2748,6 +2790,14 @@ func encodeVarintGenerated(dAtA []byte, offset int, v uint64) int { dAtA[offset] = uint8(v) return offset + 1 } +func (m *AllowedCSIDriver) Size() (n int) { + var l int + _ = l + l = len(m.Name) + n += 1 + l + sovGenerated(uint64(l)) + return n +} + func (m *AllowedFlexVolume) Size() (n int) { var l int _ = l @@ -3379,6 +3429,12 @@ func (m *PodSecurityPolicySpec) Size() (n int) { l = m.RunAsGroup.Size() n += 2 + l + sovGenerated(uint64(l)) } + if len(m.AllowedCSIDrivers) > 0 { + for _, e := range m.AllowedCSIDrivers { + l = e.Size() + n += 2 + l + sovGenerated(uint64(l)) + } + } return n } @@ -3597,6 +3653,16 @@ func sovGenerated(x uint64) (n int) { func sozGenerated(x uint64) (n int) { return sovGenerated(uint64((x << 1) ^ uint64((int64(x) >> 63)))) } +func (this *AllowedCSIDriver) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&AllowedCSIDriver{`, + `Name:` + fmt.Sprintf("%v", this.Name) + `,`, + `}`, + }, "") + return s +} func (this *AllowedFlexVolume) String() string { if this == nil { return "nil" @@ -4088,6 +4154,7 @@ func (this *PodSecurityPolicySpec) String() string { `ForbiddenSysctls:` + fmt.Sprintf("%v", this.ForbiddenSysctls) + `,`, `AllowedProcMountTypes:` + fmt.Sprintf("%v", this.AllowedProcMountTypes) + `,`, `RunAsGroup:` + strings.Replace(fmt.Sprintf("%v", this.RunAsGroup), "RunAsGroupStrategyOptions", "RunAsGroupStrategyOptions", 1) + `,`, + `AllowedCSIDrivers:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.AllowedCSIDrivers), "AllowedCSIDriver", "AllowedCSIDriver", 1), `&`, ``, 1) + `,`, `}`, }, "") return s @@ -4293,6 +4360,85 @@ func valueToStringGenerated(v interface{}) string { pv := reflect.Indirect(rv).Interface() return fmt.Sprintf("*%v", pv) } +func (m *AllowedCSIDriver) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: AllowedCSIDriver: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: AllowedCSIDriver: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Name = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} func (m *AllowedFlexVolume) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 @@ -9978,6 +10124,37 @@ func (m *PodSecurityPolicySpec) Unmarshal(dAtA []byte) error { return err } iNdEx = postIndex + case 23: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field AllowedCSIDrivers", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.AllowedCSIDrivers = append(m.AllowedCSIDrivers, AllowedCSIDriver{}) + if err := m.AllowedCSIDrivers[len(m.AllowedCSIDrivers)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex default: iNdEx = preIndex skippy, err := skipGenerated(dAtA[iNdEx:]) @@ -12069,230 +12246,232 @@ func init() { } var fileDescriptorGenerated = []byte{ - // 3587 bytes of a gzipped FileDescriptorProto + // 3622 bytes of a gzipped FileDescriptorProto 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xec, 0x5b, 0xcd, 0x6f, 0x1c, 0x47, 0x76, 0x57, 0xcf, 0x0c, 0x39, 0xc3, 0x47, 0xf1, 0xab, 0x48, 0x91, 0x63, 0xc9, 0xe2, 0xc8, 0x6d, - 0x40, 0x91, 0x1d, 0x69, 0xc6, 0x92, 0x2d, 0x59, 0xb1, 0x10, 0xdb, 0x1c, 0x52, 0x94, 0xe8, 0xf0, + 0x40, 0x91, 0x1d, 0x69, 0xc6, 0x92, 0x25, 0x59, 0xb1, 0x10, 0xdb, 0x1c, 0x52, 0x94, 0xe8, 0xf0, 0x63, 0x5c, 0x43, 0x2a, 0x86, 0x11, 0x3b, 0x6e, 0xce, 0x14, 0x87, 0x2d, 0xf6, 0x74, 0xb7, 0xbb, - 0x6b, 0x68, 0x0e, 0x90, 0x43, 0x0e, 0x49, 0x80, 0x00, 0x09, 0x92, 0x8b, 0x93, 0x1c, 0x63, 0x04, - 0xc8, 0x69, 0x17, 0xbb, 0xb7, 0xdd, 0x83, 0x61, 0x60, 0x01, 0x2f, 0x20, 0x2c, 0xbc, 0x80, 0x6f, - 0xeb, 0x13, 0xb1, 0xa6, 0x4f, 0x8b, 0xfd, 0x07, 0x16, 0x3a, 0x2c, 0x16, 0x55, 0x5d, 0xfd, 0xdd, - 0xad, 0x69, 0xd2, 0x12, 0xb1, 0x58, 0xec, 0x8d, 0x53, 0xef, 0xbd, 0xdf, 0x7b, 0x55, 0xf5, 0xea, - 0xbd, 0xd7, 0x55, 0x8f, 0xb0, 0xbc, 0x77, 0xdb, 0xae, 0xaa, 0x46, 0x6d, 0xaf, 0xb7, 0x4d, 0x2c, - 0x9d, 0x50, 0x62, 0xd7, 0xf6, 0x89, 0xde, 0x36, 0xac, 0x9a, 0x20, 0x28, 0xa6, 0x5a, 0x23, 0x07, - 0x94, 0xe8, 0xb6, 0x6a, 0xe8, 0x76, 0x6d, 0xff, 0xfa, 0x36, 0xa1, 0xca, 0xf5, 0x5a, 0x87, 0xe8, - 0xc4, 0x52, 0x28, 0x69, 0x57, 0x4d, 0xcb, 0xa0, 0x06, 0xba, 0xe8, 0xb0, 0x57, 0x15, 0x53, 0xad, - 0xfa, 0xec, 0x55, 0xc1, 0x7e, 0xfe, 0x5a, 0x47, 0xa5, 0xbb, 0xbd, 0xed, 0x6a, 0xcb, 0xe8, 0xd6, - 0x3a, 0x46, 0xc7, 0xa8, 0x71, 0xa9, 0xed, 0xde, 0x0e, 0xff, 0xc5, 0x7f, 0xf0, 0xbf, 0x1c, 0xb4, - 0xf3, 0x72, 0x40, 0x79, 0xcb, 0xb0, 0x48, 0x6d, 0x3f, 0xa6, 0xf1, 0xfc, 0x6b, 0x3e, 0x4f, 0x57, - 0x69, 0xed, 0xaa, 0x3a, 0xb1, 0xfa, 0x35, 0x73, 0xaf, 0xc3, 0x06, 0xec, 0x5a, 0x97, 0x50, 0x25, - 0x49, 0xaa, 0x96, 0x26, 0x65, 0xf5, 0x74, 0xaa, 0x76, 0x49, 0x4c, 0xe0, 0xd6, 0x20, 0x01, 0xbb, - 0xb5, 0x4b, 0xba, 0x4a, 0x4c, 0xee, 0xd5, 0x34, 0xb9, 0x1e, 0x55, 0xb5, 0x9a, 0xaa, 0x53, 0x9b, - 0x5a, 0x51, 0x21, 0xf9, 0x0e, 0x4c, 0x2d, 0x68, 0x9a, 0xf1, 0x09, 0x69, 0x2f, 0x6b, 0xe4, 0xe0, - 0x81, 0xa1, 0xf5, 0xba, 0x04, 0x5d, 0x86, 0xe1, 0xb6, 0xa5, 0xee, 0x13, 0xab, 0x2c, 0x5d, 0x92, - 0xae, 0x8c, 0xd4, 0xc7, 0x1f, 0x1d, 0x56, 0xce, 0x1c, 0x1d, 0x56, 0x86, 0x97, 0xf8, 0x28, 0x16, - 0x54, 0xd9, 0x86, 0x09, 0x21, 0x7c, 0xdf, 0xb0, 0x69, 0x43, 0xa1, 0xbb, 0xe8, 0x06, 0x80, 0xa9, - 0xd0, 0xdd, 0x86, 0x45, 0x76, 0xd4, 0x03, 0x21, 0x8e, 0x84, 0x38, 0x34, 0x3c, 0x0a, 0x0e, 0x70, - 0xa1, 0xab, 0x50, 0xb2, 0x88, 0xd2, 0xde, 0xd0, 0xb5, 0x7e, 0x39, 0x77, 0x49, 0xba, 0x52, 0xaa, - 0x4f, 0x0a, 0x89, 0x12, 0x16, 0xe3, 0xd8, 0xe3, 0x90, 0x3f, 0xcd, 0xc1, 0xc8, 0x92, 0x42, 0xba, - 0x86, 0xde, 0x24, 0x14, 0x7d, 0x04, 0x25, 0xb6, 0xf0, 0x6d, 0x85, 0x2a, 0x5c, 0xdb, 0xe8, 0x8d, - 0x57, 0xaa, 0xbe, 0x63, 0x78, 0xeb, 0x50, 0x35, 0xf7, 0x3a, 0x6c, 0xc0, 0xae, 0x32, 0xee, 0xea, - 0xfe, 0xf5, 0xea, 0xc6, 0xf6, 0x43, 0xd2, 0xa2, 0x6b, 0x84, 0x2a, 0xbe, 0x7d, 0xfe, 0x18, 0xf6, - 0x50, 0xd1, 0x3a, 0x14, 0x6c, 0x93, 0xb4, 0xb8, 0x65, 0xa3, 0x37, 0xae, 0x56, 0x9f, 0xe8, 0x76, - 0x55, 0xcf, 0xb2, 0xa6, 0x49, 0x5a, 0xf5, 0xb3, 0x02, 0xb9, 0xc0, 0x7e, 0x61, 0x8e, 0x83, 0x1e, - 0xc0, 0xb0, 0x4d, 0x15, 0xda, 0xb3, 0xcb, 0x79, 0x8e, 0x58, 0xcd, 0x8c, 0xc8, 0xa5, 0xfc, 0xcd, - 0x70, 0x7e, 0x63, 0x81, 0x26, 0xff, 0x26, 0x07, 0xc8, 0xe3, 0x5d, 0x34, 0xf4, 0xb6, 0x4a, 0x55, - 0x43, 0x47, 0x6f, 0x40, 0x81, 0xf6, 0x4d, 0x22, 0xb6, 0xe2, 0xb2, 0x6b, 0xd0, 0x66, 0xdf, 0x24, - 0x8f, 0x0f, 0x2b, 0xb3, 0x71, 0x09, 0x46, 0xc1, 0x5c, 0x06, 0xad, 0x7a, 0xa6, 0xe6, 0xb8, 0xf4, - 0x6b, 0x61, 0xd5, 0x8f, 0x0f, 0x2b, 0x09, 0xc7, 0xa6, 0xea, 0x21, 0x85, 0x0d, 0x44, 0xfb, 0x80, - 0x34, 0xc5, 0xa6, 0x9b, 0x96, 0xa2, 0xdb, 0x8e, 0x26, 0xb5, 0x4b, 0xc4, 0x22, 0xbc, 0x9c, 0x6d, - 0xd3, 0x98, 0x44, 0xfd, 0xbc, 0xb0, 0x02, 0xad, 0xc6, 0xd0, 0x70, 0x82, 0x06, 0xe6, 0xcd, 0x16, - 0x51, 0x6c, 0x43, 0x2f, 0x17, 0xc2, 0xde, 0x8c, 0xf9, 0x28, 0x16, 0x54, 0xf4, 0x12, 0x14, 0xbb, - 0xc4, 0xb6, 0x95, 0x0e, 0x29, 0x0f, 0x71, 0xc6, 0x09, 0xc1, 0x58, 0x5c, 0x73, 0x86, 0xb1, 0x4b, - 0x97, 0x3f, 0x97, 0x60, 0xcc, 0x5b, 0xb9, 0x55, 0xd5, 0xa6, 0xe8, 0xef, 0x62, 0x7e, 0x58, 0xcd, - 0x36, 0x25, 0x26, 0xcd, 0xbd, 0xd0, 0xf3, 0x79, 0x77, 0x24, 0xe0, 0x83, 0x6b, 0x30, 0xa4, 0x52, - 0xd2, 0x65, 0xfb, 0x90, 0xbf, 0x32, 0x7a, 0xe3, 0x4a, 0x56, 0x97, 0xa9, 0x8f, 0x09, 0xd0, 0xa1, - 0x15, 0x26, 0x8e, 0x1d, 0x14, 0xf9, 0xbf, 0x0a, 0x01, 0xf3, 0x99, 0x6b, 0xa2, 0x0f, 0xa0, 0x64, - 0x13, 0x8d, 0xb4, 0xa8, 0x61, 0x09, 0xf3, 0x5f, 0xcd, 0x68, 0xbe, 0xb2, 0x4d, 0xb4, 0xa6, 0x10, - 0xad, 0x9f, 0x65, 0xf6, 0xbb, 0xbf, 0xb0, 0x07, 0x89, 0xde, 0x85, 0x12, 0x25, 0x5d, 0x53, 0x53, - 0x28, 0x11, 0xe7, 0xe8, 0xc5, 0xe0, 0x14, 0x98, 0xe7, 0x30, 0xb0, 0x86, 0xd1, 0xde, 0x14, 0x6c, - 0xfc, 0xf8, 0x78, 0x4b, 0xe2, 0x8e, 0x62, 0x0f, 0x06, 0xed, 0xc3, 0x78, 0xcf, 0x6c, 0x33, 0x4e, - 0xca, 0xe2, 0x59, 0xa7, 0x2f, 0x3c, 0xe9, 0x56, 0xd6, 0xb5, 0xd9, 0x0a, 0x49, 0xd7, 0x67, 0x85, - 0xae, 0xf1, 0xf0, 0x38, 0x8e, 0x68, 0x41, 0x0b, 0x30, 0xd1, 0x55, 0x75, 0x16, 0x97, 0xfa, 0x4d, - 0xd2, 0x32, 0xf4, 0xb6, 0xcd, 0xdd, 0x6a, 0xa8, 0x3e, 0x27, 0x00, 0x26, 0xd6, 0xc2, 0x64, 0x1c, - 0xe5, 0x47, 0xef, 0x00, 0x72, 0xa7, 0x71, 0xcf, 0x09, 0xc7, 0xaa, 0xa1, 0x73, 0x9f, 0xcb, 0xfb, - 0xce, 0xbd, 0x19, 0xe3, 0xc0, 0x09, 0x52, 0x68, 0x15, 0x66, 0x2c, 0xb2, 0xaf, 0xb2, 0x39, 0xde, - 0x57, 0x6d, 0x6a, 0x58, 0xfd, 0x55, 0xb5, 0xab, 0xd2, 0xf2, 0x30, 0xb7, 0xa9, 0x7c, 0x74, 0x58, - 0x99, 0xc1, 0x09, 0x74, 0x9c, 0x28, 0x25, 0xff, 0xf7, 0x30, 0x4c, 0x44, 0xe2, 0x0d, 0x7a, 0x00, - 0xb3, 0xad, 0x9e, 0x65, 0x11, 0x9d, 0xae, 0xf7, 0xba, 0xdb, 0xc4, 0x6a, 0xb6, 0x76, 0x49, 0xbb, - 0xa7, 0x91, 0x36, 0x77, 0x94, 0xa1, 0xfa, 0xbc, 0xb0, 0x78, 0x76, 0x31, 0x91, 0x0b, 0xa7, 0x48, - 0xb3, 0x55, 0xd0, 0xf9, 0xd0, 0x9a, 0x6a, 0xdb, 0x1e, 0x66, 0x8e, 0x63, 0x7a, 0xab, 0xb0, 0x1e, - 0xe3, 0xc0, 0x09, 0x52, 0xcc, 0xc6, 0x36, 0xb1, 0x55, 0x8b, 0xb4, 0xa3, 0x36, 0xe6, 0xc3, 0x36, - 0x2e, 0x25, 0x72, 0xe1, 0x14, 0x69, 0x74, 0x13, 0x46, 0x1d, 0x6d, 0x7c, 0xff, 0xc4, 0x46, 0x4f, - 0x0b, 0xb0, 0xd1, 0x75, 0x9f, 0x84, 0x83, 0x7c, 0x6c, 0x6a, 0xc6, 0xb6, 0x4d, 0xac, 0x7d, 0xd2, - 0x4e, 0xdf, 0xe0, 0x8d, 0x18, 0x07, 0x4e, 0x90, 0x62, 0x53, 0x73, 0x3c, 0x30, 0x36, 0xb5, 0xe1, - 0xf0, 0xd4, 0xb6, 0x12, 0xb9, 0x70, 0x8a, 0x34, 0xf3, 0x63, 0xc7, 0xe4, 0x85, 0x7d, 0x45, 0xd5, - 0x94, 0x6d, 0x8d, 0x94, 0x8b, 0x61, 0x3f, 0x5e, 0x0f, 0x93, 0x71, 0x94, 0x1f, 0xdd, 0x83, 0x29, - 0x67, 0x68, 0x4b, 0x57, 0x3c, 0x90, 0x12, 0x07, 0x79, 0x4e, 0x80, 0x4c, 0xad, 0x47, 0x19, 0x70, - 0x5c, 0x06, 0xbd, 0x01, 0xe3, 0x2d, 0x43, 0xd3, 0xb8, 0x3f, 0x2e, 0x1a, 0x3d, 0x9d, 0x96, 0x47, - 0x38, 0x0a, 0x62, 0xe7, 0x71, 0x31, 0x44, 0xc1, 0x11, 0x4e, 0x44, 0x00, 0x5a, 0x6e, 0xc2, 0xb1, - 0xcb, 0xc0, 0xe3, 0xe3, 0xf5, 0xac, 0x31, 0xc0, 0x4b, 0x55, 0x7e, 0x0d, 0xe0, 0x0d, 0xd9, 0x38, - 0x00, 0x2c, 0xff, 0x42, 0x82, 0xb9, 0x94, 0xd0, 0x81, 0xde, 0x0a, 0xa5, 0xd8, 0xbf, 0x8c, 0xa4, - 0xd8, 0x0b, 0x29, 0x62, 0x81, 0x3c, 0xab, 0xc3, 0x98, 0xc5, 0x66, 0xa5, 0x77, 0x1c, 0x16, 0x11, - 0x23, 0x6f, 0x0e, 0x98, 0x06, 0x0e, 0xca, 0xf8, 0x31, 0x7f, 0xea, 0xe8, 0xb0, 0x32, 0x16, 0xa2, - 0xe1, 0x30, 0xbc, 0xfc, 0x3f, 0x39, 0x80, 0x25, 0x62, 0x6a, 0x46, 0xbf, 0x4b, 0xf4, 0xd3, 0xa8, - 0xa1, 0x36, 0x42, 0x35, 0xd4, 0xb5, 0x41, 0xdb, 0xe3, 0x99, 0x96, 0x5a, 0x44, 0xfd, 0x6d, 0xa4, - 0x88, 0xaa, 0x65, 0x87, 0x7c, 0x72, 0x15, 0xf5, 0xab, 0x3c, 0x4c, 0xfb, 0xcc, 0x7e, 0x19, 0x75, - 0x27, 0xb4, 0xc7, 0x7f, 0x11, 0xd9, 0xe3, 0xb9, 0x04, 0x91, 0x67, 0x56, 0x47, 0x3d, 0xfd, 0x7a, - 0x06, 0x3d, 0x84, 0x71, 0x56, 0x38, 0x39, 0xee, 0xc1, 0xcb, 0xb2, 0xe1, 0x63, 0x97, 0x65, 0x5e, - 0x02, 0x5d, 0x0d, 0x21, 0xe1, 0x08, 0x72, 0x4a, 0x19, 0x58, 0x7c, 0xd6, 0x65, 0xa0, 0xfc, 0x85, - 0x04, 0xe3, 0xfe, 0x36, 0x9d, 0x42, 0xd1, 0xb6, 0x1e, 0x2e, 0xda, 0x5e, 0xca, 0xec, 0xa2, 0x29, - 0x55, 0xdb, 0xef, 0x58, 0x81, 0xef, 0x31, 0xb1, 0x03, 0xbe, 0xad, 0xb4, 0xf6, 0xd0, 0x25, 0x28, - 0xe8, 0x4a, 0xd7, 0xf5, 0x4c, 0xef, 0xb0, 0xac, 0x2b, 0x5d, 0x82, 0x39, 0x05, 0x7d, 0x2a, 0x01, - 0x12, 0x59, 0x60, 0x41, 0xd7, 0x0d, 0xaa, 0x38, 0xb1, 0xd2, 0x31, 0x6b, 0x25, 0xb3, 0x59, 0xae, - 0xc6, 0xea, 0x56, 0x0c, 0xeb, 0xae, 0x4e, 0xad, 0xbe, 0xbf, 0x23, 0x71, 0x06, 0x9c, 0x60, 0x00, - 0x52, 0x00, 0x2c, 0x81, 0xb9, 0x69, 0x88, 0x83, 0x7c, 0x2d, 0x43, 0xcc, 0x63, 0x02, 0x8b, 0x86, - 0xbe, 0xa3, 0x76, 0xfc, 0xb0, 0x83, 0x3d, 0x20, 0x1c, 0x00, 0x3d, 0x7f, 0x17, 0xe6, 0x52, 0xac, - 0x45, 0x93, 0x90, 0xdf, 0x23, 0x7d, 0x67, 0xd9, 0x30, 0xfb, 0x13, 0xcd, 0xc0, 0xd0, 0xbe, 0xa2, - 0xf5, 0x9c, 0xf0, 0x3b, 0x82, 0x9d, 0x1f, 0x6f, 0xe4, 0x6e, 0x4b, 0xf2, 0xe7, 0x43, 0x41, 0xdf, - 0xe1, 0x15, 0xf3, 0x15, 0xf6, 0xd1, 0x6a, 0x6a, 0x6a, 0x4b, 0xb1, 0x45, 0x21, 0x74, 0xd6, 0xf9, - 0x60, 0x75, 0xc6, 0xb0, 0x47, 0x0d, 0xd5, 0xd6, 0xb9, 0x67, 0x5b, 0x5b, 0xe7, 0x9f, 0x4e, 0x6d, - 0xfd, 0xf7, 0x50, 0xb2, 0xdd, 0xaa, 0xba, 0xc0, 0x21, 0xaf, 0x1f, 0x23, 0xbe, 0x8a, 0x82, 0xda, - 0x53, 0xe0, 0x95, 0xd2, 0x1e, 0x68, 0x52, 0x11, 0x3d, 0x74, 0xcc, 0x22, 0xfa, 0xa9, 0x16, 0xbe, - 0x2c, 0xa6, 0x9a, 0x4a, 0xcf, 0x26, 0x6d, 0x1e, 0x88, 0x4a, 0x7e, 0x4c, 0x6d, 0xf0, 0x51, 0x2c, - 0xa8, 0xe8, 0x83, 0x90, 0xcb, 0x96, 0x4e, 0xe2, 0xb2, 0xe3, 0xe9, 0xee, 0x8a, 0xb6, 0x60, 0xce, - 0xb4, 0x8c, 0x8e, 0x45, 0x6c, 0x7b, 0x89, 0x28, 0x6d, 0x4d, 0xd5, 0x89, 0xbb, 0x3e, 0x4e, 0x45, - 0x74, 0xe1, 0xe8, 0xb0, 0x32, 0xd7, 0x48, 0x66, 0xc1, 0x69, 0xb2, 0xf2, 0xa3, 0x02, 0x4c, 0x46, - 0x33, 0x60, 0x4a, 0x91, 0x2a, 0x9d, 0xa8, 0x48, 0xbd, 0x1a, 0x38, 0x0c, 0x4e, 0x05, 0x1f, 0xb8, - 0xc1, 0x89, 0x1d, 0x88, 0x05, 0x98, 0x10, 0xd1, 0xc0, 0x25, 0x8a, 0x32, 0xdd, 0xdb, 0xfd, 0xad, - 0x30, 0x19, 0x47, 0xf9, 0x59, 0xe9, 0xe9, 0x57, 0x94, 0x2e, 0x48, 0x21, 0x5c, 0x7a, 0x2e, 0x44, - 0x19, 0x70, 0x5c, 0x06, 0xad, 0xc1, 0x74, 0x4f, 0x8f, 0x43, 0x39, 0xde, 0x78, 0x41, 0x40, 0x4d, - 0x6f, 0xc5, 0x59, 0x70, 0x92, 0x1c, 0xda, 0x09, 0x55, 0xa3, 0xc3, 0x3c, 0xc2, 0xde, 0xc8, 0x7c, - 0x76, 0x32, 0x97, 0xa3, 0xe8, 0x0e, 0x8c, 0x59, 0xfc, 0xbb, 0xc3, 0x35, 0xd8, 0xa9, 0xdd, 0xcf, - 0x09, 0xb1, 0x31, 0x1c, 0x24, 0xe2, 0x30, 0x6f, 0x42, 0xb9, 0x5d, 0xca, 0x5a, 0x6e, 0xcb, 0x3f, - 0x93, 0x82, 0x49, 0xc8, 0x2b, 0x81, 0x07, 0xdd, 0x32, 0xc5, 0x24, 0x02, 0xd5, 0x91, 0x91, 0x5c, - 0xfd, 0xde, 0x3a, 0x56, 0xf5, 0xeb, 0x27, 0xcf, 0xc1, 0xe5, 0xef, 0x67, 0x12, 0xcc, 0x2e, 0x37, - 0xef, 0x59, 0x46, 0xcf, 0x74, 0xcd, 0xd9, 0x30, 0x9d, 0x75, 0x7d, 0x1d, 0x0a, 0x56, 0x4f, 0x73, - 0xe7, 0xf1, 0xa2, 0x3b, 0x0f, 0xdc, 0xd3, 0xd8, 0x3c, 0xa6, 0x23, 0x52, 0xce, 0x24, 0x98, 0x00, - 0x5a, 0x87, 0x61, 0x4b, 0xd1, 0x3b, 0xc4, 0x4d, 0xab, 0x97, 0x07, 0x58, 0xbf, 0xb2, 0x84, 0x19, - 0x7b, 0xa0, 0x78, 0xe3, 0xd2, 0x58, 0xa0, 0xc8, 0xff, 0x2e, 0xc1, 0xc4, 0xfd, 0xcd, 0xcd, 0xc6, - 0x8a, 0xce, 0x4f, 0x34, 0xbf, 0x5b, 0xbd, 0x04, 0x05, 0x53, 0xa1, 0xbb, 0xd1, 0x4c, 0xcf, 0x68, - 0x98, 0x53, 0xd0, 0x7b, 0x50, 0x64, 0x91, 0x84, 0xe8, 0xed, 0x8c, 0xa5, 0xb6, 0x80, 0xaf, 0x3b, - 0x42, 0x7e, 0x85, 0x28, 0x06, 0xb0, 0x0b, 0x27, 0xef, 0xc1, 0x4c, 0xc0, 0x1c, 0xb6, 0x1e, 0x0f, - 0x58, 0x76, 0x44, 0x4d, 0x18, 0x62, 0x9a, 0x59, 0x0e, 0xcc, 0x67, 0xb8, 0xcc, 0x8c, 0x4c, 0xc9, - 0xaf, 0x74, 0xd8, 0x2f, 0x1b, 0x3b, 0x58, 0xf2, 0x1a, 0x8c, 0xf1, 0x0b, 0x65, 0xc3, 0xa2, 0x7c, - 0x59, 0xd0, 0x45, 0xc8, 0x77, 0x55, 0x5d, 0xe4, 0xd9, 0x51, 0x21, 0x93, 0x67, 0x39, 0x82, 0x8d, - 0x73, 0xb2, 0x72, 0x20, 0x22, 0x8f, 0x4f, 0x56, 0x0e, 0x30, 0x1b, 0x97, 0xef, 0x41, 0x51, 0x2c, - 0x77, 0x10, 0x28, 0xff, 0x64, 0xa0, 0x7c, 0x02, 0xd0, 0x06, 0x14, 0x57, 0x1a, 0x75, 0xcd, 0x70, - 0xaa, 0xae, 0x96, 0xda, 0xb6, 0xa2, 0x7b, 0xb1, 0xb8, 0xb2, 0x84, 0x31, 0xa7, 0x20, 0x19, 0x86, - 0xc9, 0x41, 0x8b, 0x98, 0x94, 0x7b, 0xc4, 0x48, 0x1d, 0xd8, 0x2e, 0xdf, 0xe5, 0x23, 0x58, 0x50, - 0xe4, 0xff, 0xc8, 0x41, 0x51, 0x2c, 0xc7, 0x29, 0x7c, 0x85, 0xad, 0x86, 0xbe, 0xc2, 0x5e, 0xce, - 0xe6, 0x1a, 0xa9, 0x9f, 0x60, 0x9b, 0x91, 0x4f, 0xb0, 0xab, 0x19, 0xf1, 0x9e, 0xfc, 0xfd, 0xf5, - 0x63, 0x09, 0xc6, 0xc3, 0x4e, 0x89, 0x6e, 0xc2, 0x28, 0x4b, 0x38, 0x6a, 0x8b, 0xac, 0xfb, 0x75, - 0xae, 0x77, 0x09, 0xd3, 0xf4, 0x49, 0x38, 0xc8, 0x87, 0x3a, 0x9e, 0x18, 0xf3, 0x23, 0x31, 0xe9, - 0xf4, 0x25, 0xed, 0x51, 0x55, 0xab, 0x3a, 0x8f, 0x24, 0xd5, 0x15, 0x9d, 0x6e, 0x58, 0x4d, 0x6a, - 0xa9, 0x7a, 0x27, 0xa6, 0x88, 0x3b, 0x65, 0x10, 0x59, 0xfe, 0xa9, 0x04, 0xa3, 0xc2, 0xe4, 0x53, - 0xf8, 0xaa, 0xf8, 0x9b, 0xf0, 0x57, 0xc5, 0xe5, 0x8c, 0x07, 0x3c, 0xf9, 0x93, 0xe2, 0xff, 0x7d, - 0xd3, 0xd9, 0x91, 0x66, 0x5e, 0xbd, 0x6b, 0xd8, 0x34, 0xea, 0xd5, 0xec, 0x30, 0x62, 0x4e, 0x41, - 0x3d, 0x98, 0x54, 0x23, 0x31, 0x40, 0x2c, 0x6d, 0x2d, 0x9b, 0x25, 0x9e, 0x58, 0xbd, 0x2c, 0xe0, - 0x27, 0xa3, 0x14, 0x1c, 0x53, 0x21, 0x13, 0x88, 0x71, 0xa1, 0x77, 0xa1, 0xb0, 0x4b, 0xa9, 0x99, - 0x70, 0x5f, 0x3d, 0x20, 0xf2, 0xf8, 0x26, 0x94, 0xf8, 0xec, 0x36, 0x37, 0x1b, 0x98, 0x43, 0xc9, - 0xbf, 0xf7, 0xd7, 0xa3, 0xe9, 0xf8, 0xb8, 0x17, 0x4f, 0xa5, 0x93, 0xc4, 0xd3, 0xd1, 0xa4, 0x58, - 0x8a, 0xee, 0x43, 0x9e, 0x6a, 0x59, 0x3f, 0x0b, 0x05, 0xe2, 0xe6, 0x6a, 0xd3, 0x0f, 0x48, 0x9b, - 0xab, 0x4d, 0xcc, 0x20, 0xd0, 0x06, 0x0c, 0xb1, 0xec, 0xc3, 0x8e, 0x60, 0x3e, 0xfb, 0x91, 0x66, - 0xf3, 0xf7, 0x1d, 0x82, 0xfd, 0xb2, 0xb1, 0x83, 0x23, 0x7f, 0x0c, 0x63, 0xa1, 0x73, 0x8a, 0x3e, - 0x82, 0xb3, 0x9a, 0xa1, 0xb4, 0xeb, 0x8a, 0xa6, 0xe8, 0x2d, 0xe2, 0x3e, 0x0e, 0x5c, 0x4e, 0xfa, - 0xc2, 0x58, 0x0d, 0xf0, 0x89, 0x53, 0x3e, 0x23, 0x94, 0x9c, 0x0d, 0xd2, 0x70, 0x08, 0x51, 0x56, - 0x00, 0xfc, 0x39, 0xa2, 0x0a, 0x0c, 0x31, 0x3f, 0x73, 0xf2, 0xc9, 0x48, 0x7d, 0x84, 0x59, 0xc8, - 0xdc, 0xcf, 0xc6, 0xce, 0x38, 0xba, 0x01, 0x60, 0x93, 0x96, 0x45, 0x28, 0x0f, 0x06, 0xb9, 0xf0, - 0x03, 0x63, 0xd3, 0xa3, 0xe0, 0x00, 0x97, 0xfc, 0x73, 0x09, 0xc6, 0xd6, 0x09, 0xfd, 0xc4, 0xb0, - 0xf6, 0x1a, 0x86, 0xa6, 0xb6, 0xfa, 0xa7, 0x10, 0x6c, 0x71, 0x28, 0xd8, 0xbe, 0x32, 0x60, 0x67, - 0x42, 0xd6, 0xa5, 0x85, 0x5c, 0xf9, 0x0b, 0x09, 0xe6, 0x42, 0x9c, 0x77, 0xfd, 0xa3, 0xbb, 0x05, - 0x43, 0xa6, 0x61, 0x51, 0x37, 0x11, 0x1f, 0x4b, 0x21, 0x0b, 0x63, 0x81, 0x54, 0xcc, 0x60, 0xb0, - 0x83, 0x86, 0x56, 0x21, 0x47, 0x0d, 0xe1, 0xaa, 0xc7, 0xc3, 0x24, 0xc4, 0xaa, 0x83, 0xc0, 0xcc, - 0x6d, 0x1a, 0x38, 0x47, 0x0d, 0xb6, 0x11, 0xe5, 0x10, 0x57, 0x30, 0xf8, 0x3c, 0xa3, 0x19, 0x60, - 0x28, 0xec, 0x58, 0x46, 0xf7, 0xc4, 0x73, 0xf0, 0x36, 0x62, 0xd9, 0x32, 0xba, 0x98, 0x63, 0xc9, - 0x5f, 0x4a, 0x30, 0x15, 0xe2, 0x3c, 0x85, 0xc0, 0xff, 0x6e, 0x38, 0xf0, 0x5f, 0x3d, 0xce, 0x44, - 0x52, 0xc2, 0xff, 0x97, 0xb9, 0xc8, 0x34, 0xd8, 0x84, 0xd1, 0x0e, 0x8c, 0x9a, 0x46, 0xbb, 0xf9, - 0x14, 0x9e, 0x03, 0x27, 0x58, 0xde, 0x6c, 0xf8, 0x58, 0x38, 0x08, 0x8c, 0x0e, 0x60, 0x4a, 0x57, - 0xba, 0xc4, 0x36, 0x95, 0x16, 0x69, 0x3e, 0x85, 0x0b, 0x92, 0x73, 0xfc, 0xbd, 0x21, 0x8a, 0x88, - 0xe3, 0x4a, 0xd0, 0x1a, 0x14, 0x55, 0x93, 0xd7, 0x71, 0xa2, 0x76, 0x19, 0x98, 0x45, 0x9d, 0xaa, - 0xcf, 0x89, 0xe7, 0xe2, 0x07, 0x76, 0x31, 0xe4, 0x1f, 0x44, 0xbd, 0x81, 0xf9, 0x1f, 0xba, 0x07, - 0x25, 0xde, 0x62, 0xd1, 0x32, 0x34, 0xf7, 0x65, 0x80, 0xed, 0x6c, 0x43, 0x8c, 0x3d, 0x3e, 0xac, - 0x5c, 0x48, 0xb8, 0xf4, 0x75, 0xc9, 0xd8, 0x13, 0x46, 0xeb, 0x50, 0x30, 0xbf, 0x4f, 0x05, 0xc3, - 0x93, 0x1c, 0x2f, 0x5b, 0x38, 0x8e, 0xfc, 0x4f, 0xf9, 0x88, 0xb9, 0x3c, 0xd5, 0x3d, 0x7c, 0x6a, - 0xbb, 0xee, 0x55, 0x4c, 0xa9, 0x3b, 0xbf, 0x0d, 0x45, 0x91, 0xe1, 0x85, 0x33, 0xbf, 0x7e, 0x1c, - 0x67, 0x0e, 0x66, 0x31, 0xef, 0x83, 0xc5, 0x1d, 0x74, 0x81, 0xd1, 0x87, 0x30, 0x4c, 0x1c, 0x15, - 0x4e, 0x6e, 0xbc, 0x75, 0x1c, 0x15, 0x7e, 0x5c, 0xf5, 0x0b, 0x55, 0x31, 0x26, 0x50, 0xd1, 0x5b, - 0x6c, 0xbd, 0x18, 0x2f, 0xfb, 0x08, 0xb4, 0xcb, 0x05, 0x9e, 0xae, 0x2e, 0x3a, 0xd3, 0xf6, 0x86, - 0x1f, 0x1f, 0x56, 0xc0, 0xff, 0x89, 0x83, 0x12, 0xf2, 0x2f, 0x25, 0x98, 0xe2, 0x2b, 0xd4, 0xea, - 0x59, 0x2a, 0xed, 0x9f, 0x5a, 0x62, 0x7a, 0x10, 0x4a, 0x4c, 0xaf, 0x0d, 0x58, 0x96, 0x98, 0x85, - 0xa9, 0xc9, 0xe9, 0x2b, 0x09, 0xce, 0xc5, 0xb8, 0x4f, 0x21, 0x2e, 0x6e, 0x85, 0xe3, 0xe2, 0x2b, - 0xc7, 0x9d, 0x50, 0x4a, 0x6c, 0xfc, 0xe7, 0xc9, 0x84, 0xe9, 0xf0, 0x93, 0x72, 0x03, 0xc0, 0xb4, - 0xd4, 0x7d, 0x55, 0x23, 0x1d, 0xf1, 0x08, 0x5e, 0x0a, 0xb4, 0x38, 0x79, 0x14, 0x1c, 0xe0, 0x42, - 0x36, 0xcc, 0xb6, 0xc9, 0x8e, 0xd2, 0xd3, 0xe8, 0x42, 0xbb, 0xbd, 0xa8, 0x98, 0xca, 0xb6, 0xaa, - 0xa9, 0x54, 0x15, 0xd7, 0x05, 0x23, 0xf5, 0x3b, 0xce, 0xe3, 0x74, 0x12, 0xc7, 0xe3, 0xc3, 0xca, - 0xc5, 0xa4, 0xd7, 0x21, 0x97, 0xa5, 0x8f, 0x53, 0xa0, 0x51, 0x1f, 0xca, 0x16, 0xf9, 0xb8, 0xa7, - 0x5a, 0xa4, 0xbd, 0x64, 0x19, 0x66, 0x48, 0x6d, 0x9e, 0xab, 0xfd, 0xeb, 0xa3, 0xc3, 0x4a, 0x19, - 0xa7, 0xf0, 0x0c, 0x56, 0x9c, 0x0a, 0x8f, 0x1e, 0xc2, 0xb4, 0xe2, 0x74, 0x86, 0x85, 0xb4, 0x3a, - 0xa7, 0xe4, 0xf6, 0xd1, 0x61, 0x65, 0x7a, 0x21, 0x4e, 0x1e, 0xac, 0x30, 0x09, 0x14, 0xd5, 0xa0, - 0xb8, 0xcf, 0xfb, 0xd6, 0xec, 0xf2, 0x10, 0xc7, 0x67, 0x89, 0xa0, 0xe8, 0xb4, 0xb2, 0x31, 0xcc, - 0xe1, 0xe5, 0x26, 0x3f, 0x7d, 0x2e, 0x17, 0xfb, 0xa0, 0x64, 0xb5, 0xa4, 0x38, 0xf1, 0xfc, 0xc6, - 0xb8, 0xe4, 0x47, 0xad, 0xfb, 0x3e, 0x09, 0x07, 0xf9, 0xd0, 0x07, 0x30, 0xb2, 0x2b, 0x6e, 0x25, - 0xec, 0x72, 0x31, 0x53, 0x12, 0x0e, 0xdd, 0x62, 0xd4, 0xa7, 0x84, 0x8a, 0x11, 0x77, 0xd8, 0xc6, - 0x3e, 0x22, 0x7a, 0x09, 0x8a, 0xfc, 0xc7, 0xca, 0x12, 0xbf, 0x8e, 0x2b, 0xf9, 0xb1, 0xed, 0xbe, - 0x33, 0x8c, 0x5d, 0xba, 0xcb, 0xba, 0xd2, 0x58, 0xe4, 0xd7, 0xc2, 0x11, 0xd6, 0x95, 0xc6, 0x22, - 0x76, 0xe9, 0xe8, 0x23, 0x28, 0xda, 0x64, 0x55, 0xd5, 0x7b, 0x07, 0x65, 0xc8, 0xf4, 0xa8, 0xdc, - 0xbc, 0xcb, 0xb9, 0x23, 0x17, 0x63, 0xbe, 0x06, 0x41, 0xc7, 0x2e, 0x2c, 0xda, 0x85, 0x11, 0xab, - 0xa7, 0x2f, 0xd8, 0x5b, 0x36, 0xb1, 0xca, 0xa3, 0x5c, 0xc7, 0xa0, 0x70, 0x8e, 0x5d, 0xfe, 0xa8, - 0x16, 0x6f, 0x85, 0x3c, 0x0e, 0xec, 0x83, 0xa3, 0x7f, 0x93, 0x00, 0xd9, 0x3d, 0xd3, 0xd4, 0x48, - 0x97, 0xe8, 0x54, 0xd1, 0xf8, 0x5d, 0x9c, 0x5d, 0x3e, 0xcb, 0x75, 0xbe, 0x3d, 0x68, 0x5e, 0x31, - 0xc1, 0xa8, 0x72, 0xef, 0xd2, 0x3b, 0xce, 0x8a, 0x13, 0xf4, 0xb2, 0xa5, 0xdd, 0xb1, 0xf9, 0xdf, - 0xe5, 0xb1, 0x4c, 0x4b, 0x9b, 0x7c, 0xe7, 0xe8, 0x2f, 0xad, 0xa0, 0x63, 0x17, 0x16, 0x3d, 0x80, - 0x59, 0xb7, 0xed, 0x11, 0x1b, 0x06, 0x5d, 0x56, 0x35, 0x62, 0xf7, 0x6d, 0x4a, 0xba, 0xe5, 0x71, - 0xbe, 0xed, 0x5e, 0xef, 0x07, 0x4e, 0xe4, 0xc2, 0x29, 0xd2, 0xa8, 0x0b, 0x15, 0x37, 0x64, 0xb0, - 0xf3, 0xe4, 0xc5, 0xac, 0xbb, 0x76, 0x4b, 0xd1, 0x9c, 0x77, 0x80, 0x09, 0xae, 0xe0, 0xc5, 0xa3, - 0xc3, 0x4a, 0x65, 0xe9, 0xc9, 0xac, 0x78, 0x10, 0x16, 0x7a, 0x0f, 0xca, 0x4a, 0x9a, 0x9e, 0x49, - 0xae, 0xe7, 0x79, 0x16, 0x87, 0x52, 0x15, 0xa4, 0x4a, 0x23, 0x0a, 0x93, 0x4a, 0xb8, 0x01, 0xd5, - 0x2e, 0x4f, 0x65, 0xba, 0x88, 0x8c, 0xf4, 0xad, 0xfa, 0x97, 0x11, 0x11, 0x82, 0x8d, 0x63, 0x1a, - 0xd0, 0x3f, 0x00, 0x52, 0xa2, 0x3d, 0xb3, 0x76, 0x19, 0x65, 0x4a, 0x3f, 0xb1, 0x66, 0x5b, 0xdf, - 0xed, 0x62, 0x24, 0x1b, 0x27, 0xe8, 0x41, 0xab, 0x30, 0x23, 0x46, 0xb7, 0x74, 0x5b, 0xd9, 0x21, - 0xcd, 0xbe, 0xdd, 0xa2, 0x9a, 0x5d, 0x9e, 0xe6, 0xb1, 0x8f, 0x3f, 0x7c, 0x2d, 0x24, 0xd0, 0x71, - 0xa2, 0x14, 0x7a, 0x1b, 0x26, 0x77, 0x0c, 0x6b, 0x5b, 0x6d, 0xb7, 0x89, 0xee, 0x22, 0xcd, 0x70, - 0xa4, 0x19, 0xb6, 0x1a, 0xcb, 0x11, 0x1a, 0x8e, 0x71, 0x23, 0x1b, 0xce, 0x09, 0xe4, 0x86, 0x65, - 0xb4, 0xd6, 0x8c, 0x9e, 0x4e, 0x9d, 0x92, 0xe8, 0x9c, 0x97, 0x62, 0xce, 0x2d, 0x24, 0x31, 0x3c, - 0x3e, 0xac, 0x5c, 0x4a, 0xae, 0x80, 0x7d, 0x26, 0x9c, 0x8c, 0x8d, 0x76, 0x01, 0x78, 0x5c, 0x70, - 0x8e, 0xdf, 0x2c, 0x3f, 0x7e, 0xb7, 0xb3, 0x44, 0x9d, 0xc4, 0x13, 0xe8, 0x3c, 0xc9, 0x79, 0x64, - 0x1c, 0xc0, 0xe6, 0xbd, 0x32, 0xe2, 0xe5, 0xe4, 0x74, 0xfa, 0x8d, 0x8f, 0xd7, 0x2b, 0xe3, 0x9b, - 0xf6, 0xd4, 0x7a, 0x65, 0x02, 0x90, 0x4f, 0xbe, 0xab, 0xfd, 0x6d, 0x0e, 0xa6, 0x7d, 0xe6, 0xcc, - 0xbd, 0x32, 0x09, 0x22, 0x7f, 0xee, 0x39, 0x1e, 0xdc, 0x73, 0xfc, 0x85, 0x04, 0xe3, 0xfe, 0xd2, - 0xfd, 0xf1, 0xf5, 0xaf, 0xf8, 0xb6, 0xa5, 0x54, 0xd4, 0x3f, 0xca, 0x05, 0x27, 0xf0, 0x27, 0xdf, - 0x44, 0xf1, 0xfd, 0x1b, 0x85, 0xe5, 0xaf, 0xf2, 0x30, 0x19, 0x3d, 0x8d, 0xa1, 0xb7, 0x76, 0x69, - 0xe0, 0x5b, 0x7b, 0x03, 0x66, 0x76, 0x7a, 0x9a, 0xd6, 0xe7, 0xcb, 0x10, 0x78, 0x70, 0x77, 0xde, - 0xca, 0x9e, 0x17, 0x92, 0x33, 0xcb, 0x09, 0x3c, 0x38, 0x51, 0x32, 0xa5, 0x6f, 0x20, 0x7f, 0xa2, - 0xbe, 0x81, 0xd8, 0x33, 0x76, 0xe1, 0x18, 0xcf, 0xd8, 0x89, 0x3d, 0x00, 0x43, 0x27, 0xe8, 0x01, - 0x38, 0xc9, 0xa3, 0x7d, 0x42, 0x10, 0x1b, 0xd8, 0x43, 0xfa, 0x3c, 0x9c, 0x17, 0x62, 0x94, 0xbf, - 0xa7, 0xeb, 0xd4, 0x32, 0x34, 0x8d, 0x58, 0x4b, 0xbd, 0x6e, 0xb7, 0x2f, 0xbf, 0x09, 0xe3, 0xe1, - 0x4e, 0x11, 0x67, 0xa7, 0x9d, 0x66, 0x15, 0xf1, 0x62, 0x19, 0xd8, 0x69, 0x67, 0x1c, 0x7b, 0x1c, - 0xf2, 0xbf, 0x48, 0x30, 0x9b, 0xdc, 0x11, 0x8a, 0x34, 0x18, 0xef, 0x2a, 0x07, 0xc1, 0x2e, 0x5d, - 0xe9, 0x84, 0x77, 0x49, 0xbc, 0x45, 0x60, 0x2d, 0x84, 0x85, 0x23, 0xd8, 0xf2, 0x77, 0x12, 0xcc, - 0xa5, 0x3c, 0xce, 0x9f, 0xae, 0x25, 0xe8, 0x7d, 0x28, 0x75, 0x95, 0x83, 0x66, 0xcf, 0xea, 0x90, - 0x13, 0xdf, 0x9e, 0xf1, 0x88, 0xb1, 0x26, 0x50, 0xb0, 0x87, 0x27, 0xff, 0x9f, 0x04, 0xcf, 0xa5, - 0x56, 0x14, 0xe8, 0x56, 0xa8, 0x8f, 0x40, 0x8e, 0xf4, 0x11, 0xa0, 0xb8, 0xe0, 0x33, 0x6a, 0x23, - 0xf8, 0x4c, 0x82, 0x72, 0xda, 0xd7, 0x16, 0xba, 0x19, 0x32, 0xf2, 0x85, 0x88, 0x91, 0x53, 0x31, - 0xb9, 0x67, 0x64, 0xe3, 0x0f, 0x25, 0x98, 0x4d, 0xfe, 0xea, 0x44, 0xaf, 0x86, 0x2c, 0xac, 0x44, - 0x2c, 0x9c, 0x88, 0x48, 0x09, 0xfb, 0x3e, 0x84, 0x71, 0xf1, 0x6d, 0x2a, 0x60, 0xc4, 0xde, 0xcb, - 0x49, 0x11, 0x5d, 0x40, 0xb8, 0x95, 0x20, 0xf7, 0xaa, 0xf0, 0x18, 0x8e, 0xa0, 0xc9, 0xff, 0x9a, - 0x83, 0xa1, 0x66, 0x4b, 0xd1, 0xc8, 0x29, 0x14, 0x83, 0xef, 0x84, 0x8a, 0xc1, 0x41, 0xff, 0xf7, - 0xc3, 0xad, 0x4a, 0xad, 0x03, 0x71, 0xa4, 0x0e, 0x7c, 0x39, 0x13, 0xda, 0x93, 0x4b, 0xc0, 0xbf, - 0x82, 0x11, 0x4f, 0xe9, 0xf1, 0x32, 0x93, 0xfc, 0xbf, 0x39, 0x18, 0x0d, 0xa8, 0x38, 0x66, 0x5e, - 0xdb, 0x09, 0xd5, 0x03, 0xf9, 0x0c, 0xe5, 0x7f, 0x40, 0x57, 0xd5, 0xad, 0x00, 0x9c, 0xbe, 0x55, - 0xbf, 0x53, 0x31, 0x5e, 0x18, 0xbc, 0x09, 0xe3, 0x54, 0xb1, 0x3a, 0x84, 0x7a, 0x37, 0xe3, 0x79, - 0xee, 0x8b, 0x5e, 0xb7, 0xf3, 0x66, 0x88, 0x8a, 0x23, 0xdc, 0xe7, 0xef, 0xc0, 0x58, 0x48, 0xd9, - 0xb1, 0xda, 0x4e, 0x7f, 0x22, 0xc1, 0x0b, 0x03, 0xef, 0x2d, 0x50, 0x3d, 0x74, 0x48, 0xaa, 0x91, - 0x43, 0x32, 0x9f, 0x0e, 0xf0, 0xec, 0xda, 0x97, 0xea, 0xd7, 0x1e, 0x7d, 0x3b, 0x7f, 0xe6, 0xeb, - 0x6f, 0xe7, 0xcf, 0x7c, 0xf3, 0xed, 0xfc, 0x99, 0x7f, 0x3c, 0x9a, 0x97, 0x1e, 0x1d, 0xcd, 0x4b, - 0x5f, 0x1f, 0xcd, 0x4b, 0xdf, 0x1c, 0xcd, 0x4b, 0xbf, 0x3e, 0x9a, 0x97, 0xfe, 0xf3, 0xbb, 0xf9, - 0x33, 0xef, 0x17, 0x05, 0xdc, 0x1f, 0x02, 0x00, 0x00, 0xff, 0xff, 0x51, 0x7e, 0x9f, 0x62, 0x14, - 0x3c, 0x00, 0x00, + 0x6b, 0x68, 0x0e, 0x90, 0x43, 0x0e, 0x41, 0x80, 0x00, 0x09, 0x92, 0x8b, 0x93, 0x1c, 0x63, 0x04, + 0xc8, 0x29, 0x41, 0x72, 0xcb, 0x1e, 0x0c, 0x03, 0x0b, 0x78, 0x01, 0x61, 0xe1, 0x05, 0x7c, 0x5b, + 0x9f, 0x88, 0x35, 0x7d, 0x5a, 0xec, 0x3f, 0xb0, 0xd0, 0x61, 0x77, 0x51, 0xd5, 0xd5, 0xdf, 0xdd, + 0x9a, 0x26, 0x2d, 0x11, 0x8b, 0xc5, 0xde, 0x38, 0xf5, 0xde, 0xfb, 0xbd, 0x57, 0x55, 0xaf, 0xde, + 0x7b, 0x5d, 0xf5, 0x08, 0xcb, 0x7b, 0x77, 0xec, 0xaa, 0x6a, 0xd4, 0xf6, 0x7a, 0xdb, 0xc4, 0xd2, + 0x09, 0x25, 0x76, 0x6d, 0x9f, 0xe8, 0x6d, 0xc3, 0xaa, 0x09, 0x82, 0x62, 0xaa, 0x35, 0x72, 0x40, + 0x89, 0x6e, 0xab, 0x86, 0x6e, 0xd7, 0xf6, 0xaf, 0x6f, 0x13, 0xaa, 0x5c, 0xaf, 0x75, 0x88, 0x4e, + 0x2c, 0x85, 0x92, 0x76, 0xd5, 0xb4, 0x0c, 0x6a, 0xa0, 0x8b, 0x0e, 0x7b, 0x55, 0x31, 0xd5, 0xaa, + 0xcf, 0x5e, 0x15, 0xec, 0xe7, 0xaf, 0x75, 0x54, 0xba, 0xdb, 0xdb, 0xae, 0xb6, 0x8c, 0x6e, 0xad, + 0x63, 0x74, 0x8c, 0x1a, 0x97, 0xda, 0xee, 0xed, 0xf0, 0x5f, 0xfc, 0x07, 0xff, 0xcb, 0x41, 0x3b, + 0x2f, 0x07, 0x94, 0xb7, 0x0c, 0x8b, 0xd4, 0xf6, 0x63, 0x1a, 0xcf, 0xdf, 0xf4, 0x79, 0xba, 0x4a, + 0x6b, 0x57, 0xd5, 0x89, 0xd5, 0xaf, 0x99, 0x7b, 0x1d, 0x36, 0x60, 0xd7, 0xba, 0x84, 0x2a, 0x49, + 0x52, 0xb5, 0x34, 0x29, 0xab, 0xa7, 0x53, 0xb5, 0x4b, 0x62, 0x02, 0xb7, 0x07, 0x09, 0xd8, 0xad, + 0x5d, 0xd2, 0x55, 0x62, 0x72, 0xaf, 0xa7, 0xc9, 0xf5, 0xa8, 0xaa, 0xd5, 0x54, 0x9d, 0xda, 0xd4, + 0x8a, 0x0a, 0xc9, 0x37, 0x61, 0x72, 0x41, 0xd3, 0x8c, 0x4f, 0x49, 0x7b, 0xb1, 0xb9, 0xb2, 0x64, + 0xa9, 0xfb, 0xc4, 0x42, 0x97, 0xa0, 0xa0, 0x2b, 0x5d, 0x52, 0x96, 0x2e, 0x49, 0x57, 0x46, 0xea, + 0x67, 0x1f, 0x1f, 0x56, 0xce, 0x1c, 0x1d, 0x56, 0x0a, 0xeb, 0x4a, 0x97, 0x60, 0x4e, 0x91, 0xef, + 0xc2, 0x94, 0x90, 0x5a, 0xd6, 0xc8, 0xc1, 0x43, 0x43, 0xeb, 0x75, 0x09, 0xba, 0x0c, 0xc3, 0x6d, + 0x0e, 0x20, 0x04, 0xc7, 0x85, 0xe0, 0xb0, 0x03, 0x8b, 0x05, 0x55, 0xb6, 0x61, 0x42, 0x08, 0x3f, + 0x30, 0x6c, 0xda, 0x50, 0xe8, 0x2e, 0xba, 0x01, 0x60, 0x2a, 0x74, 0xb7, 0x61, 0x91, 0x1d, 0xf5, + 0x40, 0x88, 0x23, 0x21, 0x0e, 0x0d, 0x8f, 0x82, 0x03, 0x5c, 0xe8, 0x2a, 0x94, 0x2c, 0xa2, 0xb4, + 0x37, 0x74, 0xad, 0x5f, 0xce, 0x5d, 0x92, 0xae, 0x94, 0xea, 0x93, 0x42, 0xa2, 0x84, 0xc5, 0x38, + 0xf6, 0x38, 0xe4, 0xcf, 0x72, 0x30, 0xb2, 0xa4, 0x90, 0xae, 0xa1, 0x37, 0x09, 0x45, 0x1f, 0x43, + 0x89, 0x6d, 0x57, 0x5b, 0xa1, 0x0a, 0xd7, 0x36, 0x7a, 0xe3, 0xb5, 0xaa, 0xef, 0x4e, 0xde, 0xea, + 0x55, 0xcd, 0xbd, 0x0e, 0x1b, 0xb0, 0xab, 0x8c, 0xbb, 0xba, 0x7f, 0xbd, 0xba, 0xb1, 0xfd, 0x88, + 0xb4, 0xe8, 0x1a, 0xa1, 0x8a, 0x6f, 0x9f, 0x3f, 0x86, 0x3d, 0x54, 0xb4, 0x0e, 0x05, 0xdb, 0x24, + 0x2d, 0x6e, 0xd9, 0xe8, 0x8d, 0xab, 0xd5, 0xa7, 0x3a, 0x6b, 0xd5, 0xb3, 0xac, 0x69, 0x92, 0x96, + 0xbf, 0xe2, 0xec, 0x17, 0xe6, 0x38, 0xe8, 0x21, 0x0c, 0xdb, 0x54, 0xa1, 0x3d, 0xbb, 0x9c, 0xe7, + 0x88, 0xd5, 0xcc, 0x88, 0x5c, 0xca, 0xdf, 0x0c, 0xe7, 0x37, 0x16, 0x68, 0xf2, 0x2f, 0x73, 0x80, + 0x3c, 0xde, 0x45, 0x43, 0x6f, 0xab, 0x54, 0x35, 0x74, 0xf4, 0x26, 0x14, 0x68, 0xdf, 0x74, 0x5d, + 0xe0, 0xb2, 0x6b, 0xd0, 0x66, 0xdf, 0x24, 0x4f, 0x0e, 0x2b, 0xb3, 0x71, 0x09, 0x46, 0xc1, 0x5c, + 0x06, 0xad, 0x7a, 0xa6, 0xe6, 0xb8, 0xf4, 0xcd, 0xb0, 0xea, 0x27, 0x87, 0x95, 0x84, 0xc3, 0x56, + 0xf5, 0x90, 0xc2, 0x06, 0xa2, 0x7d, 0x40, 0x9a, 0x62, 0xd3, 0x4d, 0x4b, 0xd1, 0x6d, 0x47, 0x93, + 0xda, 0x25, 0x62, 0x11, 0x5e, 0xcd, 0xb6, 0x69, 0x4c, 0xa2, 0x7e, 0x5e, 0x58, 0x81, 0x56, 0x63, + 0x68, 0x38, 0x41, 0x03, 0xf3, 0x66, 0x8b, 0x28, 0xb6, 0xa1, 0x97, 0x0b, 0x61, 0x6f, 0xc6, 0x7c, + 0x14, 0x0b, 0x2a, 0x7a, 0x05, 0x8a, 0x5d, 0x62, 0xdb, 0x4a, 0x87, 0x94, 0x87, 0x38, 0xe3, 0x84, + 0x60, 0x2c, 0xae, 0x39, 0xc3, 0xd8, 0xa5, 0xcb, 0x5f, 0x48, 0x30, 0xe6, 0xad, 0xdc, 0xaa, 0x6a, + 0x53, 0xf4, 0x57, 0x31, 0x3f, 0xac, 0x66, 0x9b, 0x12, 0x93, 0xe6, 0x5e, 0xe8, 0xf9, 0xbc, 0x3b, + 0x12, 0xf0, 0xc1, 0x35, 0x18, 0x52, 0x29, 0xe9, 0xb2, 0x7d, 0xc8, 0x5f, 0x19, 0xbd, 0x71, 0x25, + 0xab, 0xcb, 0xd4, 0xc7, 0x04, 0xe8, 0xd0, 0x0a, 0x13, 0xc7, 0x0e, 0x8a, 0xfc, 0xaf, 0x85, 0x80, + 0xf9, 0xcc, 0x35, 0xd1, 0x87, 0x50, 0xb2, 0x89, 0x46, 0x5a, 0xd4, 0xb0, 0x84, 0xf9, 0xaf, 0x67, + 0x34, 0x5f, 0xd9, 0x26, 0x5a, 0x53, 0x88, 0xd6, 0xcf, 0x32, 0xfb, 0xdd, 0x5f, 0xd8, 0x83, 0x44, + 0xef, 0x41, 0x89, 0x92, 0xae, 0xa9, 0x29, 0x94, 0x88, 0x73, 0xf4, 0x72, 0x70, 0x0a, 0xcc, 0x73, + 0x18, 0x58, 0xc3, 0x68, 0x6f, 0x0a, 0x36, 0x7e, 0x7c, 0xbc, 0x25, 0x71, 0x47, 0xb1, 0x07, 0x83, + 0xf6, 0x61, 0xbc, 0x67, 0xb6, 0x19, 0x27, 0x65, 0x51, 0xb0, 0xd3, 0x17, 0x9e, 0x74, 0x3b, 0xeb, + 0xda, 0x6c, 0x85, 0xa4, 0xeb, 0xb3, 0x42, 0xd7, 0x78, 0x78, 0x1c, 0x47, 0xb4, 0xa0, 0x05, 0x98, + 0xe8, 0xaa, 0x3a, 0x8b, 0x4b, 0xfd, 0x26, 0x69, 0x19, 0x7a, 0xdb, 0xe6, 0x6e, 0x35, 0x54, 0x9f, + 0x13, 0x00, 0x13, 0x6b, 0x61, 0x32, 0x8e, 0xf2, 0xa3, 0x77, 0x01, 0xb9, 0xd3, 0xb8, 0xef, 0x04, + 0x71, 0xd5, 0xd0, 0xb9, 0xcf, 0xe5, 0x7d, 0xe7, 0xde, 0x8c, 0x71, 0xe0, 0x04, 0x29, 0xb4, 0x0a, + 0x33, 0x16, 0xd9, 0x57, 0xd9, 0x1c, 0x1f, 0xa8, 0x36, 0x35, 0xac, 0xfe, 0xaa, 0xda, 0x55, 0x69, + 0x79, 0x98, 0xdb, 0x54, 0x3e, 0x3a, 0xac, 0xcc, 0xe0, 0x04, 0x3a, 0x4e, 0x94, 0x92, 0xff, 0x6d, + 0x18, 0x26, 0x22, 0xf1, 0x06, 0x3d, 0x84, 0xd9, 0x56, 0xcf, 0xb2, 0x88, 0x4e, 0xd7, 0x7b, 0xdd, + 0x6d, 0x62, 0x35, 0x5b, 0xbb, 0xa4, 0xdd, 0xd3, 0x48, 0x9b, 0x3b, 0xca, 0x50, 0x7d, 0x5e, 0x58, + 0x3c, 0xbb, 0x98, 0xc8, 0x85, 0x53, 0xa4, 0xd9, 0x2a, 0xe8, 0x7c, 0x68, 0x4d, 0xb5, 0x6d, 0x0f, + 0x33, 0xc7, 0x31, 0xbd, 0x55, 0x58, 0x8f, 0x71, 0xe0, 0x04, 0x29, 0x66, 0x63, 0x9b, 0xd8, 0xaa, + 0x45, 0xda, 0x51, 0x1b, 0xf3, 0x61, 0x1b, 0x97, 0x12, 0xb9, 0x70, 0x8a, 0x34, 0xba, 0x05, 0xa3, + 0x8e, 0x36, 0xbe, 0x7f, 0x62, 0xa3, 0xa7, 0x05, 0xd8, 0xe8, 0xba, 0x4f, 0xc2, 0x41, 0x3e, 0x36, + 0x35, 0x63, 0xdb, 0x26, 0xd6, 0x3e, 0x69, 0xa7, 0x6f, 0xf0, 0x46, 0x8c, 0x03, 0x27, 0x48, 0xb1, + 0xa9, 0x39, 0x1e, 0x18, 0x9b, 0xda, 0x70, 0x78, 0x6a, 0x5b, 0x89, 0x5c, 0x38, 0x45, 0x9a, 0xf9, + 0xb1, 0x63, 0xf2, 0xc2, 0xbe, 0xa2, 0x6a, 0xca, 0xb6, 0x46, 0xca, 0xc5, 0xb0, 0x1f, 0xaf, 0x87, + 0xc9, 0x38, 0xca, 0x8f, 0xee, 0xc3, 0x94, 0x33, 0xb4, 0xa5, 0x2b, 0x1e, 0x48, 0x89, 0x83, 0xbc, + 0x20, 0x40, 0xa6, 0xd6, 0xa3, 0x0c, 0x38, 0x2e, 0x83, 0xde, 0x84, 0xf1, 0x96, 0xa1, 0x69, 0xdc, + 0x1f, 0x17, 0x8d, 0x9e, 0x4e, 0xcb, 0x23, 0x1c, 0x05, 0xb1, 0xf3, 0xb8, 0x18, 0xa2, 0xe0, 0x08, + 0x27, 0x22, 0x00, 0x2d, 0x37, 0xe1, 0xd8, 0x65, 0xe0, 0xf1, 0xf1, 0x7a, 0xd6, 0x18, 0xe0, 0xa5, + 0x2a, 0xbf, 0x06, 0xf0, 0x86, 0x6c, 0x1c, 0x00, 0x96, 0x7f, 0x2a, 0xc1, 0x5c, 0x4a, 0xe8, 0x40, + 0x6f, 0x87, 0x52, 0xec, 0x9f, 0x46, 0x52, 0xec, 0x85, 0x14, 0xb1, 0x40, 0x9e, 0xd5, 0x61, 0xcc, + 0x62, 0xb3, 0xd2, 0x3b, 0x0e, 0x8b, 0x88, 0x91, 0xb7, 0x06, 0x4c, 0x03, 0x07, 0x65, 0xfc, 0x98, + 0x3f, 0x75, 0x74, 0x58, 0x19, 0x0b, 0xd1, 0x70, 0x18, 0x5e, 0xfe, 0xf7, 0x1c, 0xc0, 0x12, 0x31, + 0x35, 0xa3, 0xdf, 0x25, 0xfa, 0x69, 0xd4, 0x50, 0x1b, 0xa1, 0x1a, 0xea, 0xda, 0xa0, 0xed, 0xf1, + 0x4c, 0x4b, 0x2d, 0xa2, 0xfe, 0x32, 0x52, 0x44, 0xd5, 0xb2, 0x43, 0x3e, 0xbd, 0x8a, 0xfa, 0x79, + 0x1e, 0xa6, 0x7d, 0x66, 0xbf, 0x8c, 0xba, 0x1b, 0xda, 0xe3, 0x3f, 0x89, 0xec, 0xf1, 0x5c, 0x82, + 0xc8, 0x73, 0xab, 0xa3, 0x9e, 0x7d, 0x3d, 0x83, 0x1e, 0xc1, 0x38, 0x2b, 0x9c, 0x1c, 0xf7, 0xe0, + 0x65, 0xd9, 0xf0, 0xb1, 0xcb, 0x32, 0x2f, 0x81, 0xae, 0x86, 0x90, 0x70, 0x04, 0x39, 0xa5, 0x0c, + 0x2c, 0x3e, 0xef, 0x32, 0x50, 0xfe, 0x52, 0x82, 0x71, 0x7f, 0x9b, 0x4e, 0xa1, 0x68, 0x5b, 0x0f, + 0x17, 0x6d, 0xaf, 0x64, 0x76, 0xd1, 0x94, 0xaa, 0xed, 0xd7, 0xac, 0xc0, 0xf7, 0x98, 0xd8, 0x01, + 0xdf, 0x56, 0x5a, 0x7b, 0x83, 0xbf, 0xf1, 0xd0, 0x67, 0x12, 0x20, 0x91, 0x05, 0x16, 0x74, 0xdd, + 0xa0, 0x8a, 0x13, 0x2b, 0x1d, 0xb3, 0x56, 0x32, 0x9b, 0xe5, 0x6a, 0xac, 0x6e, 0xc5, 0xb0, 0xee, + 0xe9, 0xd4, 0xea, 0xfb, 0x3b, 0x12, 0x67, 0xc0, 0x09, 0x06, 0x20, 0x05, 0xc0, 0x12, 0x98, 0x9b, + 0x86, 0x38, 0xc8, 0xd7, 0x32, 0xc4, 0x3c, 0x26, 0xb0, 0x68, 0xe8, 0x3b, 0x6a, 0xc7, 0x0f, 0x3b, + 0xd8, 0x03, 0xc2, 0x01, 0xd0, 0xf3, 0xf7, 0x60, 0x2e, 0xc5, 0x5a, 0x34, 0x09, 0xf9, 0x3d, 0xd2, + 0x77, 0x96, 0x0d, 0xb3, 0x3f, 0xd1, 0x0c, 0x0c, 0xed, 0x2b, 0x5a, 0xcf, 0x09, 0xbf, 0x23, 0xd8, + 0xf9, 0xf1, 0x66, 0xee, 0x8e, 0x24, 0x7f, 0x31, 0x14, 0xf4, 0x1d, 0x5e, 0x31, 0x5f, 0x61, 0x1f, + 0xad, 0xa6, 0xa6, 0xb6, 0x14, 0x5b, 0x14, 0x42, 0x67, 0x9d, 0x0f, 0x56, 0x67, 0x0c, 0x7b, 0xd4, + 0x50, 0x6d, 0x9d, 0x7b, 0xbe, 0xb5, 0x75, 0xfe, 0xd9, 0xd4, 0xd6, 0x7f, 0x0d, 0x25, 0xdb, 0xad, + 0xaa, 0x0b, 0x1c, 0xf2, 0xfa, 0x31, 0xe2, 0xab, 0x28, 0xa8, 0x3d, 0x05, 0x5e, 0x29, 0xed, 0x81, + 0x26, 0x15, 0xd1, 0x43, 0xc7, 0x2c, 0xa2, 0x9f, 0x69, 0xe1, 0xcb, 0x62, 0xaa, 0xa9, 0xf4, 0x6c, + 0xd2, 0xe6, 0x81, 0xa8, 0xe4, 0xc7, 0xd4, 0x06, 0x1f, 0xc5, 0x82, 0x8a, 0x3e, 0x0c, 0xb9, 0x6c, + 0xe9, 0x24, 0x2e, 0x3b, 0x9e, 0xee, 0xae, 0x68, 0x0b, 0xe6, 0x4c, 0xcb, 0xe8, 0x58, 0xc4, 0xb6, + 0x97, 0x88, 0xd2, 0xd6, 0x54, 0x9d, 0xb8, 0xeb, 0xe3, 0x54, 0x44, 0x17, 0x8e, 0x0e, 0x2b, 0x73, + 0x8d, 0x64, 0x16, 0x9c, 0x26, 0x2b, 0x3f, 0x2e, 0xc0, 0x64, 0x34, 0x03, 0xa6, 0x14, 0xa9, 0xd2, + 0x89, 0x8a, 0xd4, 0xab, 0x81, 0xc3, 0xe0, 0x54, 0xf0, 0x81, 0x1b, 0x9c, 0xd8, 0x81, 0x58, 0x80, + 0x09, 0x11, 0x0d, 0x5c, 0xa2, 0x28, 0xd3, 0xbd, 0xdd, 0xdf, 0x0a, 0x93, 0x71, 0x94, 0x9f, 0x95, + 0x9e, 0x7e, 0x45, 0xe9, 0x82, 0x14, 0xc2, 0xa5, 0xe7, 0x42, 0x94, 0x01, 0xc7, 0x65, 0xd0, 0x1a, + 0x4c, 0xf7, 0xf4, 0x38, 0x94, 0xe3, 0x8d, 0x17, 0x04, 0xd4, 0xf4, 0x56, 0x9c, 0x05, 0x27, 0xc9, + 0xa1, 0x9d, 0x50, 0x35, 0x3a, 0xcc, 0x23, 0xec, 0x8d, 0xcc, 0x67, 0x27, 0x73, 0x39, 0x8a, 0xee, + 0xc2, 0x98, 0xc5, 0xbf, 0x3b, 0x5c, 0x83, 0x9d, 0xda, 0xfd, 0x9c, 0x10, 0x1b, 0xc3, 0x41, 0x22, + 0x0e, 0xf3, 0x26, 0x94, 0xdb, 0xa5, 0xac, 0xe5, 0xb6, 0xfc, 0x63, 0x29, 0x98, 0x84, 0xbc, 0x12, + 0x78, 0xd0, 0x2d, 0x53, 0x4c, 0x22, 0x50, 0x1d, 0x19, 0xc9, 0xd5, 0xef, 0xed, 0x63, 0x55, 0xbf, + 0x7e, 0xf2, 0x1c, 0x5c, 0xfe, 0x7e, 0x2e, 0xc1, 0xec, 0x72, 0xf3, 0xbe, 0x65, 0xf4, 0x4c, 0xd7, + 0x9c, 0x0d, 0xd3, 0x59, 0xd7, 0x37, 0xa0, 0x60, 0xf5, 0x34, 0x77, 0x1e, 0x2f, 0xbb, 0xf3, 0xc0, + 0x3d, 0x8d, 0xcd, 0x63, 0x3a, 0x22, 0xe5, 0x4c, 0x82, 0x09, 0xa0, 0x75, 0x18, 0xb6, 0x14, 0xbd, + 0x43, 0xdc, 0xb4, 0x7a, 0x79, 0x80, 0xf5, 0x2b, 0x4b, 0x98, 0xb1, 0x07, 0x8a, 0x37, 0x2e, 0x8d, + 0x05, 0x8a, 0xfc, 0x4f, 0x12, 0x4c, 0x3c, 0xd8, 0xdc, 0x6c, 0xac, 0xe8, 0xfc, 0x44, 0xf3, 0xbb, + 0xd5, 0x4b, 0x50, 0x30, 0x15, 0xba, 0x1b, 0xcd, 0xf4, 0x8c, 0x86, 0x39, 0x05, 0xbd, 0x0f, 0x45, + 0x16, 0x49, 0x88, 0xde, 0xce, 0x58, 0x6a, 0x0b, 0xf8, 0xba, 0x23, 0xe4, 0x57, 0x88, 0x62, 0x00, + 0xbb, 0x70, 0xf2, 0x1e, 0xcc, 0x04, 0xcc, 0x61, 0xeb, 0xf1, 0x90, 0x65, 0x47, 0xd4, 0x84, 0x21, + 0xa6, 0x99, 0xe5, 0xc0, 0x7c, 0x86, 0xcb, 0xcc, 0xc8, 0x94, 0xfc, 0x4a, 0x87, 0xfd, 0xb2, 0xb1, + 0x83, 0x25, 0xaf, 0xc1, 0x18, 0xbf, 0x50, 0x36, 0x2c, 0xca, 0x97, 0x05, 0x5d, 0x84, 0x7c, 0x57, + 0xd5, 0x45, 0x9e, 0x1d, 0x15, 0x32, 0x79, 0x96, 0x23, 0xd8, 0x38, 0x27, 0x2b, 0x07, 0x22, 0xf2, + 0xf8, 0x64, 0xe5, 0x00, 0xb3, 0x71, 0xf9, 0x3e, 0x14, 0xc5, 0x72, 0x07, 0x81, 0xf2, 0x4f, 0x07, + 0xca, 0x27, 0x00, 0x6d, 0x40, 0x71, 0xa5, 0x51, 0xd7, 0x0c, 0xa7, 0xea, 0x6a, 0xa9, 0x6d, 0x2b, + 0xba, 0x17, 0x8b, 0x2b, 0x4b, 0x18, 0x73, 0x0a, 0x92, 0x61, 0x98, 0x1c, 0xb4, 0x88, 0x49, 0xb9, + 0x47, 0x8c, 0xd4, 0x81, 0xed, 0xf2, 0x3d, 0x3e, 0x82, 0x05, 0x45, 0xfe, 0xe7, 0x1c, 0x14, 0xc5, + 0x72, 0x9c, 0xc2, 0x57, 0xd8, 0x6a, 0xe8, 0x2b, 0xec, 0xd5, 0x6c, 0xae, 0x91, 0xfa, 0x09, 0xb6, + 0x19, 0xf9, 0x04, 0xbb, 0x9a, 0x11, 0xef, 0xe9, 0xdf, 0x5f, 0xff, 0x27, 0xc1, 0x78, 0xd8, 0x29, + 0xd1, 0x2d, 0x18, 0x65, 0x09, 0x47, 0x6d, 0x91, 0x75, 0xbf, 0xce, 0xf5, 0x2e, 0x61, 0x9a, 0x3e, + 0x09, 0x07, 0xf9, 0x50, 0xc7, 0x13, 0x63, 0x7e, 0x24, 0x26, 0x9d, 0xbe, 0xa4, 0x3d, 0xaa, 0x6a, + 0x55, 0xe7, 0x69, 0xa5, 0xba, 0xa2, 0xd3, 0x0d, 0xab, 0x49, 0x2d, 0x55, 0xef, 0xc4, 0x14, 0x71, + 0xa7, 0x0c, 0x22, 0xcb, 0x3f, 0x92, 0x60, 0x54, 0x98, 0x7c, 0x0a, 0x5f, 0x15, 0x7f, 0x11, 0xfe, + 0xaa, 0xb8, 0x9c, 0xf1, 0x80, 0x27, 0x7f, 0x52, 0xfc, 0x97, 0x6f, 0x3a, 0x3b, 0xd2, 0xcc, 0xab, + 0x77, 0x0d, 0x9b, 0x46, 0xbd, 0x9a, 0x1d, 0x46, 0xcc, 0x29, 0xa8, 0x07, 0x93, 0x6a, 0x24, 0x06, + 0x88, 0xa5, 0xad, 0x65, 0xb3, 0xc4, 0x13, 0xab, 0x97, 0x05, 0xfc, 0x64, 0x94, 0x82, 0x63, 0x2a, + 0x64, 0x02, 0x31, 0x2e, 0xf4, 0x1e, 0x14, 0x76, 0x29, 0x35, 0x13, 0xee, 0xab, 0x07, 0x44, 0x1e, + 0xdf, 0x84, 0x12, 0x9f, 0xdd, 0xe6, 0x66, 0x03, 0x73, 0x28, 0xf9, 0x37, 0xfe, 0x7a, 0x34, 0x1d, + 0x1f, 0xf7, 0xe2, 0xa9, 0x74, 0x92, 0x78, 0x3a, 0x9a, 0x14, 0x4b, 0xd1, 0x03, 0xc8, 0x53, 0x2d, + 0xeb, 0x67, 0xa1, 0x40, 0xdc, 0x5c, 0x6d, 0xfa, 0x01, 0x69, 0x73, 0xb5, 0x89, 0x19, 0x04, 0xda, + 0x80, 0x21, 0x96, 0x7d, 0xd8, 0x11, 0xcc, 0x67, 0x3f, 0xd2, 0x6c, 0xfe, 0xbe, 0x43, 0xb0, 0x5f, + 0x36, 0x76, 0x70, 0xe4, 0x4f, 0x60, 0x2c, 0x74, 0x4e, 0xd1, 0xc7, 0x70, 0x56, 0x33, 0x94, 0x76, + 0x5d, 0xd1, 0x14, 0xbd, 0x45, 0xdc, 0xc7, 0x81, 0xcb, 0x49, 0x5f, 0x18, 0xab, 0x01, 0x3e, 0x71, + 0xca, 0x67, 0x84, 0x92, 0xb3, 0x41, 0x1a, 0x0e, 0x21, 0xca, 0x0a, 0x80, 0x3f, 0x47, 0x54, 0x81, + 0x21, 0xe6, 0x67, 0x4e, 0x3e, 0x19, 0xa9, 0x8f, 0x30, 0x0b, 0x99, 0xfb, 0xd9, 0xd8, 0x19, 0x47, + 0x37, 0x00, 0x6c, 0xd2, 0xb2, 0x08, 0xe5, 0xc1, 0x20, 0x17, 0x7e, 0x60, 0x6c, 0x7a, 0x14, 0x1c, + 0xe0, 0x92, 0x7f, 0x22, 0xc1, 0xd8, 0x3a, 0xa1, 0x9f, 0x1a, 0xd6, 0x5e, 0xc3, 0xd0, 0xd4, 0x56, + 0xff, 0x14, 0x82, 0x2d, 0x0e, 0x05, 0xdb, 0xd7, 0x06, 0xec, 0x4c, 0xc8, 0xba, 0xb4, 0x90, 0x2b, + 0x7f, 0x29, 0xc1, 0x5c, 0x88, 0xf3, 0x9e, 0x7f, 0x74, 0xb7, 0x60, 0xc8, 0x34, 0x2c, 0xea, 0x26, + 0xe2, 0x63, 0x29, 0x64, 0x61, 0x2c, 0x90, 0x8a, 0x19, 0x0c, 0x76, 0xd0, 0xd0, 0x2a, 0xe4, 0xa8, + 0x21, 0x5c, 0xf5, 0x78, 0x98, 0x84, 0x58, 0x75, 0x10, 0x98, 0xb9, 0x4d, 0x03, 0xe7, 0xa8, 0xc1, + 0x36, 0xa2, 0x1c, 0xe2, 0x0a, 0x06, 0x9f, 0xe7, 0x34, 0x03, 0x0c, 0x85, 0x1d, 0xcb, 0xe8, 0x9e, + 0x78, 0x0e, 0xde, 0x46, 0x2c, 0x5b, 0x46, 0x17, 0x73, 0x2c, 0xf9, 0x2b, 0x09, 0xa6, 0x42, 0x9c, + 0xa7, 0x10, 0xf8, 0xdf, 0x0b, 0x07, 0xfe, 0xab, 0xc7, 0x99, 0x48, 0x4a, 0xf8, 0xff, 0x2a, 0x17, + 0x99, 0x06, 0x9b, 0x30, 0xda, 0x81, 0x51, 0xd3, 0x68, 0x37, 0x9f, 0xc1, 0x73, 0xe0, 0x04, 0xcb, + 0x9b, 0x0d, 0x1f, 0x0b, 0x07, 0x81, 0xd1, 0x01, 0x4c, 0xe9, 0x4a, 0x97, 0xd8, 0xa6, 0xd2, 0x22, + 0xcd, 0x67, 0x70, 0x41, 0x72, 0x8e, 0xbf, 0x37, 0x44, 0x11, 0x71, 0x5c, 0x09, 0x5a, 0x83, 0xa2, + 0x6a, 0xf2, 0x3a, 0x4e, 0xd4, 0x2e, 0x03, 0xb3, 0xa8, 0x53, 0xf5, 0x39, 0xf1, 0x5c, 0xfc, 0xc0, + 0x2e, 0x86, 0xfc, 0xdf, 0x51, 0x6f, 0x60, 0xfe, 0x87, 0xee, 0x43, 0x89, 0x37, 0x66, 0xb4, 0x0c, + 0xcd, 0x7d, 0x19, 0x60, 0x3b, 0xdb, 0x10, 0x63, 0x4f, 0x0e, 0x2b, 0x17, 0x12, 0x2e, 0x7d, 0x5d, + 0x32, 0xf6, 0x84, 0xd1, 0x3a, 0x14, 0xcc, 0x1f, 0x52, 0xc1, 0xf0, 0x24, 0xc7, 0xcb, 0x16, 0x8e, + 0x23, 0xff, 0x5d, 0x3e, 0x62, 0x2e, 0x4f, 0x75, 0x8f, 0x9e, 0xd9, 0xae, 0x7b, 0x15, 0x53, 0xea, + 0xce, 0x6f, 0x43, 0x51, 0x64, 0x78, 0xe1, 0xcc, 0x6f, 0x1c, 0xc7, 0x99, 0x83, 0x59, 0xcc, 0xfb, + 0x60, 0x71, 0x07, 0x5d, 0x60, 0xf4, 0x11, 0x0c, 0x13, 0x47, 0x85, 0x93, 0x1b, 0x6f, 0x1f, 0x47, + 0x85, 0x1f, 0x57, 0xfd, 0x42, 0x55, 0x8c, 0x09, 0x54, 0xf4, 0x36, 0x5b, 0x2f, 0xc6, 0xcb, 0x3e, + 0x02, 0xed, 0x72, 0x81, 0xa7, 0xab, 0x8b, 0xce, 0xb4, 0xbd, 0xe1, 0x27, 0x87, 0x15, 0xf0, 0x7f, + 0xe2, 0xa0, 0x84, 0xfc, 0x33, 0x09, 0xa6, 0xf8, 0x0a, 0xb5, 0x7a, 0x96, 0x4a, 0xfb, 0xa7, 0x96, + 0x98, 0x1e, 0x86, 0x12, 0xd3, 0xcd, 0x01, 0xcb, 0x12, 0xb3, 0x30, 0x35, 0x39, 0x7d, 0x2d, 0xc1, + 0xb9, 0x18, 0xf7, 0x29, 0xc4, 0xc5, 0xad, 0x70, 0x5c, 0x7c, 0xed, 0xb8, 0x13, 0x4a, 0x89, 0x8d, + 0xbf, 0x9d, 0x4c, 0x98, 0x0e, 0x3f, 0x29, 0x37, 0x00, 0x4c, 0x4b, 0xdd, 0x57, 0x35, 0xd2, 0x11, + 0x8f, 0xe0, 0xa5, 0x40, 0x8b, 0x93, 0x47, 0xc1, 0x01, 0x2e, 0x64, 0xc3, 0x6c, 0x9b, 0xec, 0x28, + 0x3d, 0x8d, 0x2e, 0xb4, 0xdb, 0x8b, 0x8a, 0xa9, 0x6c, 0xab, 0x9a, 0x4a, 0x55, 0x71, 0x5d, 0x30, + 0x52, 0xbf, 0xeb, 0x3c, 0x4e, 0x27, 0x71, 0x3c, 0x39, 0xac, 0x5c, 0x4c, 0x7a, 0x1d, 0x72, 0x59, + 0xfa, 0x38, 0x05, 0x1a, 0xf5, 0xa1, 0x6c, 0x91, 0x4f, 0x7a, 0xaa, 0x45, 0xda, 0x4b, 0x96, 0x61, + 0x86, 0xd4, 0xe6, 0xb9, 0xda, 0x3f, 0x3f, 0x3a, 0xac, 0x94, 0x71, 0x0a, 0xcf, 0x60, 0xc5, 0xa9, + 0xf0, 0xe8, 0x11, 0x4c, 0x2b, 0xa2, 0x19, 0x2d, 0xa8, 0xd5, 0x39, 0x25, 0x77, 0x8e, 0x0e, 0x2b, + 0xd3, 0x0b, 0x71, 0xf2, 0x60, 0x85, 0x49, 0xa0, 0xa8, 0x06, 0xc5, 0x7d, 0xde, 0xb7, 0x66, 0x97, + 0x87, 0x38, 0x3e, 0x4b, 0x04, 0x45, 0xa7, 0x95, 0x8d, 0x61, 0x0e, 0x2f, 0x37, 0xf9, 0xe9, 0x73, + 0xb9, 0xd8, 0x07, 0x25, 0xab, 0x25, 0xc5, 0x89, 0xe7, 0x37, 0xc6, 0x25, 0x3f, 0x6a, 0x3d, 0xf0, + 0x49, 0x38, 0xc8, 0x87, 0x3e, 0x84, 0x91, 0x5d, 0x71, 0x2b, 0x61, 0x97, 0x8b, 0x99, 0x92, 0x70, + 0xe8, 0x16, 0xa3, 0x3e, 0x25, 0x54, 0x8c, 0xb8, 0xc3, 0x36, 0xf6, 0x11, 0xd1, 0x2b, 0x50, 0xe4, + 0x3f, 0x56, 0x96, 0xf8, 0x75, 0x5c, 0xc9, 0x8f, 0x6d, 0x0f, 0x9c, 0x61, 0xec, 0xd2, 0x5d, 0xd6, + 0x95, 0xc6, 0x22, 0xbf, 0x16, 0x8e, 0xb0, 0xae, 0x34, 0x16, 0xb1, 0x4b, 0x47, 0x1f, 0x43, 0xd1, + 0x26, 0xab, 0xaa, 0xde, 0x3b, 0x28, 0x43, 0xa6, 0x47, 0xe5, 0xe6, 0x3d, 0xce, 0x1d, 0xb9, 0x18, + 0xf3, 0x35, 0x08, 0x3a, 0x76, 0x61, 0xd1, 0x2e, 0x8c, 0x58, 0x3d, 0x7d, 0xc1, 0xde, 0xb2, 0x89, + 0x55, 0x1e, 0xe5, 0x3a, 0x06, 0x85, 0x73, 0xec, 0xf2, 0x47, 0xb5, 0x78, 0x2b, 0xe4, 0x71, 0x60, + 0x1f, 0x1c, 0xfd, 0xa3, 0x04, 0xc8, 0xee, 0x99, 0xa6, 0x46, 0xba, 0x44, 0xa7, 0x8a, 0xc6, 0xef, + 0xe2, 0xec, 0xf2, 0x59, 0xae, 0xf3, 0x9d, 0x41, 0xf3, 0x8a, 0x09, 0x46, 0x95, 0x7b, 0x97, 0xde, + 0x71, 0x56, 0x9c, 0xa0, 0x97, 0x2d, 0xed, 0x8e, 0xcd, 0xff, 0x2e, 0x8f, 0x65, 0x5a, 0xda, 0xe4, + 0x3b, 0x47, 0x7f, 0x69, 0x05, 0x1d, 0xbb, 0xb0, 0xe8, 0x21, 0xcc, 0xba, 0x6d, 0x8f, 0xd8, 0x30, + 0xe8, 0xb2, 0xaa, 0x11, 0xbb, 0x6f, 0x53, 0xd2, 0x2d, 0x8f, 0xf3, 0x6d, 0xf7, 0x7a, 0x3f, 0x70, + 0x22, 0x17, 0x4e, 0x91, 0x46, 0x5d, 0xa8, 0xb8, 0x21, 0x83, 0x9d, 0x27, 0x2f, 0x66, 0xdd, 0xb3, + 0x5b, 0x8a, 0xe6, 0xbc, 0x03, 0x4c, 0x70, 0x05, 0x2f, 0x1f, 0x1d, 0x56, 0x2a, 0x4b, 0x4f, 0x67, + 0xc5, 0x83, 0xb0, 0xd0, 0xfb, 0x50, 0x56, 0xd2, 0xf4, 0x4c, 0x72, 0x3d, 0x2f, 0xb2, 0x38, 0x94, + 0xaa, 0x20, 0x55, 0x1a, 0x51, 0x98, 0x54, 0xc2, 0x0d, 0xa8, 0x76, 0x79, 0x2a, 0xd3, 0x45, 0x64, + 0xa4, 0x6f, 0xd5, 0xbf, 0x8c, 0x88, 0x10, 0x6c, 0x1c, 0xd3, 0x80, 0xfe, 0x06, 0x90, 0x12, 0xed, + 0x99, 0xb5, 0xcb, 0x28, 0x53, 0xfa, 0x89, 0x35, 0xdb, 0xfa, 0x6e, 0x17, 0x23, 0xd9, 0x38, 0x41, + 0x0f, 0x5a, 0x85, 0x19, 0x31, 0xba, 0xa5, 0xdb, 0xca, 0x0e, 0x69, 0xf6, 0xed, 0x16, 0xd5, 0xec, + 0xf2, 0x34, 0x8f, 0x7d, 0xfc, 0xe1, 0x6b, 0x21, 0x81, 0x8e, 0x13, 0xa5, 0xd0, 0x3b, 0x30, 0xb9, + 0x63, 0x58, 0xdb, 0x6a, 0xbb, 0x4d, 0x74, 0x17, 0x69, 0x86, 0x23, 0xcd, 0xb0, 0xd5, 0x58, 0x8e, + 0xd0, 0x70, 0x8c, 0x1b, 0xd9, 0x70, 0x4e, 0x20, 0x37, 0x2c, 0xa3, 0xb5, 0x66, 0xf4, 0x74, 0xea, + 0x94, 0x44, 0xe7, 0xbc, 0x14, 0x73, 0x6e, 0x21, 0x89, 0xe1, 0xc9, 0x61, 0xe5, 0x52, 0x72, 0x05, + 0xec, 0x33, 0xe1, 0x64, 0x6c, 0xb4, 0x0b, 0xc0, 0xe3, 0x82, 0x73, 0xfc, 0x66, 0xf9, 0xf1, 0xbb, + 0x93, 0x25, 0xea, 0x24, 0x9e, 0x40, 0xe7, 0x49, 0xce, 0x23, 0xe3, 0x00, 0x36, 0xfb, 0x4a, 0x51, + 0x22, 0x6d, 0xd5, 0x76, 0x79, 0x8e, 0xef, 0x75, 0x2d, 0xdb, 0x5e, 0x7b, 0x72, 0x81, 0xa7, 0xa9, + 0x28, 0x22, 0x8e, 0x2b, 0xe1, 0x5d, 0x3a, 0xe2, 0xcd, 0xe6, 0x74, 0x3a, 0x9d, 0x8f, 0xd7, 0xa5, + 0xe3, 0x9b, 0xf6, 0xcc, 0xba, 0x74, 0x02, 0x90, 0x4f, 0xbf, 0x25, 0xfe, 0x55, 0x0e, 0xa6, 0x7d, + 0xe6, 0xcc, 0x5d, 0x3a, 0x09, 0x22, 0x7f, 0xec, 0x76, 0x1e, 0xdc, 0xed, 0xfc, 0xa5, 0x04, 0xe3, + 0xfe, 0xd2, 0xfd, 0xfe, 0x75, 0xce, 0xf8, 0xb6, 0xa5, 0xd4, 0xf2, 0xff, 0x9b, 0x0b, 0x4e, 0xe0, + 0x0f, 0xbe, 0x7d, 0xe3, 0x87, 0xb7, 0x28, 0xcb, 0x5f, 0xe7, 0x61, 0x32, 0x7a, 0x1a, 0x43, 0xaf, + 0xfc, 0xd2, 0xc0, 0x57, 0xfe, 0x06, 0xcc, 0xec, 0xf4, 0x34, 0xad, 0xcf, 0x97, 0x21, 0xf0, 0xd4, + 0xef, 0xbc, 0xd2, 0xbd, 0x28, 0x24, 0x67, 0x96, 0x13, 0x78, 0x70, 0xa2, 0x64, 0x4a, 0xc7, 0x42, + 0xfe, 0x44, 0x1d, 0x0b, 0xb1, 0x07, 0xf4, 0xc2, 0x31, 0x1e, 0xd0, 0x13, 0xbb, 0x0f, 0x86, 0x4e, + 0xd0, 0x7d, 0x70, 0x92, 0x76, 0x81, 0x84, 0x20, 0x36, 0xb0, 0x7b, 0xf5, 0x45, 0x38, 0x2f, 0xc4, + 0x28, 0x7f, 0xc9, 0xd7, 0xa9, 0x65, 0x68, 0x1a, 0xb1, 0x96, 0x7a, 0xdd, 0x6e, 0x5f, 0x7e, 0x0b, + 0xc6, 0xc3, 0x3d, 0x2a, 0xce, 0x4e, 0x3b, 0x6d, 0x32, 0xe2, 0xad, 0x34, 0xb0, 0xd3, 0xce, 0x38, + 0xf6, 0x38, 0xe4, 0xbf, 0x97, 0x60, 0x36, 0xb9, 0x17, 0x15, 0x69, 0x30, 0xde, 0x55, 0x0e, 0x82, + 0xfd, 0xc1, 0xd2, 0x09, 0x6f, 0xb1, 0x78, 0x73, 0xc2, 0x5a, 0x08, 0x0b, 0x47, 0xb0, 0xe5, 0xef, + 0x25, 0x98, 0x4b, 0x69, 0x0b, 0x38, 0x5d, 0x4b, 0xd0, 0x07, 0x50, 0xea, 0x2a, 0x07, 0xcd, 0x9e, + 0xd5, 0x21, 0x27, 0xbe, 0xb7, 0xe3, 0x11, 0x63, 0x4d, 0xa0, 0x60, 0x0f, 0x4f, 0xfe, 0x4f, 0x09, + 0x5e, 0x48, 0xad, 0x65, 0xd0, 0xed, 0x50, 0x07, 0x83, 0x1c, 0xe9, 0x60, 0x40, 0x71, 0xc1, 0xe7, + 0xd4, 0xc0, 0xf0, 0xb9, 0x04, 0xe5, 0xb4, 0xef, 0x3c, 0x74, 0x2b, 0x64, 0xe4, 0x4b, 0x11, 0x23, + 0xa7, 0x62, 0x72, 0xcf, 0xc9, 0xc6, 0xff, 0x91, 0x60, 0x36, 0xf9, 0x7b, 0x17, 0xbd, 0x1e, 0xb2, + 0xb0, 0x12, 0xb1, 0x70, 0x22, 0x22, 0x25, 0xec, 0xfb, 0x08, 0xc6, 0xc5, 0x57, 0xb1, 0x80, 0x11, + 0x7b, 0x2f, 0x27, 0x45, 0x74, 0x01, 0xe1, 0xd6, 0xa0, 0xdc, 0xab, 0xc2, 0x63, 0x38, 0x82, 0x26, + 0xff, 0x43, 0x0e, 0x86, 0x9a, 0x2d, 0x45, 0x23, 0xa7, 0x50, 0x0c, 0xbe, 0x1b, 0x2a, 0x06, 0x07, + 0xfd, 0xc7, 0x11, 0xb7, 0x2a, 0xb5, 0x0e, 0xc4, 0x91, 0x3a, 0xf0, 0xd5, 0x4c, 0x68, 0x4f, 0x2f, + 0x01, 0xff, 0x0c, 0x46, 0x3c, 0xa5, 0xc7, 0xcb, 0x4c, 0xf2, 0x7f, 0xe4, 0x60, 0x34, 0xa0, 0xe2, + 0x98, 0x79, 0x6d, 0x27, 0x54, 0x0f, 0xe4, 0x33, 0x7c, 0x78, 0x04, 0x74, 0x55, 0xdd, 0x0a, 0xc0, + 0xe9, 0x98, 0xf5, 0x7b, 0x24, 0xe3, 0x85, 0xc1, 0x5b, 0x30, 0x4e, 0x15, 0xab, 0x43, 0xa8, 0x77, + 0x27, 0x9f, 0xe7, 0xbe, 0xe8, 0xf5, 0x59, 0x6f, 0x86, 0xa8, 0x38, 0xc2, 0x7d, 0xfe, 0x2e, 0x8c, + 0x85, 0x94, 0x1d, 0xab, 0xe1, 0xf5, 0xff, 0x25, 0x78, 0x69, 0xe0, 0x8d, 0x09, 0xaa, 0x87, 0x0e, + 0x49, 0x35, 0x72, 0x48, 0xe6, 0xd3, 0x01, 0x9e, 0x5f, 0xe3, 0x54, 0xfd, 0xda, 0xe3, 0xef, 0xe6, + 0xcf, 0x7c, 0xf3, 0xdd, 0xfc, 0x99, 0x6f, 0xbf, 0x9b, 0x3f, 0xf3, 0xb7, 0x47, 0xf3, 0xd2, 0xe3, + 0xa3, 0x79, 0xe9, 0x9b, 0xa3, 0x79, 0xe9, 0xdb, 0xa3, 0x79, 0xe9, 0x17, 0x47, 0xf3, 0xd2, 0xbf, + 0x7c, 0x3f, 0x7f, 0xe6, 0x83, 0xa2, 0x80, 0xfb, 0x5d, 0x00, 0x00, 0x00, 0xff, 0xff, 0x66, 0xf0, + 0x40, 0xcd, 0xc4, 0x3c, 0x00, 0x00, } diff --git a/vendor/k8s.io/api/extensions/v1beta1/generated.proto b/vendor/k8s.io/api/extensions/v1beta1/generated.proto index efcda7ebde..55239b8dff 100644 --- a/vendor/k8s.io/api/extensions/v1beta1/generated.proto +++ b/vendor/k8s.io/api/extensions/v1beta1/generated.proto @@ -30,6 +30,12 @@ import "k8s.io/apimachinery/pkg/util/intstr/generated.proto"; // Package-wide variables from generator "generated". option go_package = "v1beta1"; +// AllowedCSIDriver represents a single inline CSI Driver that is allowed to be used. +message AllowedCSIDriver { + // Name is the registered name of the CSI driver + optional string name = 1; +} + // AllowedFlexVolume represents a single Flexvolume that is allowed to be used. // Deprecated: use AllowedFlexVolume from policy API Group instead. message AllowedFlexVolume { @@ -60,12 +66,12 @@ message AllowedHostPath { // DaemonSet represents the configuration of a daemon set. message DaemonSet { // Standard object's metadata. - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata // +optional optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; // The desired behavior of this daemon set. - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status // +optional optional DaemonSetSpec spec = 2; @@ -73,7 +79,7 @@ message DaemonSet { // out of date by some window of time. // Populated by the system. // Read-only. - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status // +optional optional DaemonSetStatus status = 3; } @@ -102,7 +108,7 @@ message DaemonSetCondition { // DaemonSetList is a collection of daemon sets. message DaemonSetList { // Standard list metadata. - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata // +optional optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; @@ -471,19 +477,20 @@ message IPBlock { // endpoints defined by a backend. An Ingress can be configured to give services // externally-reachable urls, load balance traffic, terminate SSL, offer name // based virtual hosting etc. +// DEPRECATED - This group version of Ingress is deprecated by networking.k8s.io/v1beta1 Ingress. See the release notes for more information. message Ingress { // Standard object's metadata. - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata // +optional optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; // Spec is the desired state of the Ingress. - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status // +optional optional IngressSpec spec = 2; // Status is the current state of the Ingress. - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status // +optional optional IngressStatus status = 3; } @@ -500,7 +507,7 @@ message IngressBackend { // IngressList is a collection of Ingress. message IngressList { // Standard object's metadata. - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata // +optional optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; @@ -597,7 +604,7 @@ message IngressTLS { // NetworkPolicy describes what network traffic is allowed for a set of Pods message NetworkPolicy { // Standard object's metadata. - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata // +optional optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; @@ -652,7 +659,7 @@ message NetworkPolicyIngressRule { // Network Policy List is a list of NetworkPolicy objects. message NetworkPolicyList { // Standard list metadata. - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata // +optional optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; @@ -732,7 +739,7 @@ message NetworkPolicySpec { repeated NetworkPolicyEgressRule egress = 3; // List of rule types that the NetworkPolicy relates to. - // Valid options are Ingress, Egress, or Ingress,Egress. + // Valid options are "Ingress", "Egress", or "Ingress,Egress". // If this field is not specified, it will default based on the existence of Ingress or Egress rules; // policies that contain an Egress section are assumed to affect Egress, and all policies // (whether or not they contain an Ingress section) are assumed to affect Ingress. @@ -750,7 +757,7 @@ message NetworkPolicySpec { // Deprecated: use PodSecurityPolicy from policy API Group instead. message PodSecurityPolicy { // Standard object's metadata. - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata // +optional optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; @@ -763,7 +770,7 @@ message PodSecurityPolicy { // Deprecated: use PodSecurityPolicyList from policy API Group instead. message PodSecurityPolicyList { // Standard list metadata. - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata // +optional optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; @@ -864,6 +871,11 @@ message PodSecurityPolicySpec { // +optional repeated AllowedFlexVolume allowedFlexVolumes = 18; + // AllowedCSIDrivers is a whitelist of inline CSI drivers that must be explicitly set to be embedded within a pod spec. + // An empty value means no CSI drivers can run inline within a pod spec. + // +optional + repeated AllowedCSIDriver allowedCSIDrivers = 23; + // allowedUnsafeSysctls is a list of explicitly allowed unsafe sysctls, defaults to none. // Each entry is either a plain sysctl name or ends in "*" in which case it is considered // as a prefix of allowed sysctls. Single * means all unsafe sysctls are allowed. @@ -898,12 +910,12 @@ message PodSecurityPolicySpec { message ReplicaSet { // If the Labels of a ReplicaSet are empty, they are defaulted to // be the same as the Pod(s) that the ReplicaSet manages. - // Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata + // Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata // +optional optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; // Spec defines the specification of the desired behavior of the ReplicaSet. - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status // +optional optional ReplicaSetSpec spec = 2; @@ -911,7 +923,7 @@ message ReplicaSet { // This data may be out of date by some window of time. // Populated by the system. // Read-only. - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status // +optional optional ReplicaSetStatus status = 3; } @@ -940,7 +952,7 @@ message ReplicaSetCondition { // ReplicaSetList is a collection of ReplicaSets. message ReplicaSetList { // Standard list metadata. - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds // +optional optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; @@ -1063,7 +1075,7 @@ message RollingUpdateDeployment { // the rolling update starts, such that the total number of old and new pods do not exceed // 130% of desired pods. Once old pods have been killed, // new RC can be scaled up further, ensuring that total number of pods running - // at any time during the update is atmost 130% of desired pods. + // at any time during the update is at most 130% of desired pods. // +optional optional k8s.io.apimachinery.pkg.util.intstr.IntOrString maxSurge = 2; } @@ -1106,15 +1118,15 @@ message SELinuxStrategyOptions { // represents a scaling request for a resource. message Scale { - // Standard object metadata; More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata. + // Standard object metadata; More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata. // +optional optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; - // defines the behavior of the scale. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status. + // defines the behavior of the scale. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status. // +optional optional ScaleSpec spec = 2; - // current status of the scale. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status. Read-only. + // current status of the scale. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status. Read-only. // +optional optional ScaleStatus status = 3; } diff --git a/vendor/k8s.io/api/extensions/v1beta1/types.go b/vendor/k8s.io/api/extensions/v1beta1/types.go index 5ba6f95854..c34ea599d6 100644 --- a/vendor/k8s.io/api/extensions/v1beta1/types.go +++ b/vendor/k8s.io/api/extensions/v1beta1/types.go @@ -18,7 +18,7 @@ package v1beta1 import ( appsv1beta1 "k8s.io/api/apps/v1beta1" - "k8s.io/api/core/v1" + v1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/util/intstr" ) @@ -54,15 +54,15 @@ type ScaleStatus struct { // represents a scaling request for a resource. type Scale struct { metav1.TypeMeta `json:",inline"` - // Standard object metadata; More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata. + // Standard object metadata; More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata. // +optional metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` - // defines the behavior of the scale. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status. + // defines the behavior of the scale. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status. // +optional Spec ScaleSpec `json:"spec,omitempty" protobuf:"bytes,2,opt,name=spec"` - // current status of the scale. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status. Read-only. + // current status of the scale. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status. Read-only. // +optional Status ScaleStatus `json:"status,omitempty" protobuf:"bytes,3,opt,name=status"` } @@ -229,7 +229,7 @@ type RollingUpdateDeployment struct { // the rolling update starts, such that the total number of old and new pods do not exceed // 130% of desired pods. Once old pods have been killed, // new RC can be scaled up further, ensuring that total number of pods running - // at any time during the update is atmost 130% of desired pods. + // at any time during the update is at most 130% of desired pods. // +optional MaxSurge *intstr.IntOrString `json:"maxSurge,omitempty" protobuf:"bytes,2,opt,name=maxSurge"` } @@ -489,12 +489,12 @@ type DaemonSetCondition struct { type DaemonSet struct { metav1.TypeMeta `json:",inline"` // Standard object's metadata. - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata // +optional metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` // The desired behavior of this daemon set. - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status // +optional Spec DaemonSetSpec `json:"spec,omitempty" protobuf:"bytes,2,opt,name=spec"` @@ -502,7 +502,7 @@ type DaemonSet struct { // out of date by some window of time. // Populated by the system. // Read-only. - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status // +optional Status DaemonSetStatus `json:"status,omitempty" protobuf:"bytes,3,opt,name=status"` } @@ -526,7 +526,7 @@ const ( type DaemonSetList struct { metav1.TypeMeta `json:",inline"` // Standard list metadata. - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata // +optional metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` @@ -541,20 +541,21 @@ type DaemonSetList struct { // endpoints defined by a backend. An Ingress can be configured to give services // externally-reachable urls, load balance traffic, terminate SSL, offer name // based virtual hosting etc. +// DEPRECATED - This group version of Ingress is deprecated by networking.k8s.io/v1beta1 Ingress. See the release notes for more information. type Ingress struct { metav1.TypeMeta `json:",inline"` // Standard object's metadata. - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata // +optional metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` // Spec is the desired state of the Ingress. - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status // +optional Spec IngressSpec `json:"spec,omitempty" protobuf:"bytes,2,opt,name=spec"` // Status is the current state of the Ingress. - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status // +optional Status IngressStatus `json:"status,omitempty" protobuf:"bytes,3,opt,name=status"` } @@ -565,7 +566,7 @@ type Ingress struct { type IngressList struct { metav1.TypeMeta `json:",inline"` // Standard object's metadata. - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata // +optional metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` @@ -716,12 +717,12 @@ type ReplicaSet struct { // If the Labels of a ReplicaSet are empty, they are defaulted to // be the same as the Pod(s) that the ReplicaSet manages. - // Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata + // Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata // +optional metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` // Spec defines the specification of the desired behavior of the ReplicaSet. - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status // +optional Spec ReplicaSetSpec `json:"spec,omitempty" protobuf:"bytes,2,opt,name=spec"` @@ -729,7 +730,7 @@ type ReplicaSet struct { // This data may be out of date by some window of time. // Populated by the system. // Read-only. - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status // +optional Status ReplicaSetStatus `json:"status,omitempty" protobuf:"bytes,3,opt,name=status"` } @@ -740,7 +741,7 @@ type ReplicaSet struct { type ReplicaSetList struct { metav1.TypeMeta `json:",inline"` // Standard list metadata. - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds // +optional metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` @@ -844,7 +845,7 @@ type ReplicaSetCondition struct { type PodSecurityPolicy struct { metav1.TypeMeta `json:",inline"` // Standard object's metadata. - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata // +optional metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` @@ -927,6 +928,10 @@ type PodSecurityPolicySpec struct { // is allowed in the "volumes" field. // +optional AllowedFlexVolumes []AllowedFlexVolume `json:"allowedFlexVolumes,omitempty" protobuf:"bytes,18,rep,name=allowedFlexVolumes"` + // AllowedCSIDrivers is a whitelist of inline CSI drivers that must be explicitly set to be embedded within a pod spec. + // An empty value means no CSI drivers can run inline within a pod spec. + // +optional + AllowedCSIDrivers []AllowedCSIDriver `json:"allowedCSIDrivers,omitempty" protobuf:"bytes,23,rep,name=allowedCSIDrivers"` // allowedUnsafeSysctls is a list of explicitly allowed unsafe sysctls, defaults to none. // Each entry is either a plain sysctl name or ends in "*" in which case it is considered // as a prefix of allowed sysctls. Single * means all unsafe sysctls are allowed. @@ -997,6 +1002,7 @@ var ( ConfigMap FSType = "configMap" Quobyte FSType = "quobyte" AzureDisk FSType = "azureDisk" + CSI FSType = "csi" All FSType = "*" ) @@ -1007,6 +1013,12 @@ type AllowedFlexVolume struct { Driver string `json:"driver" protobuf:"bytes,1,opt,name=driver"` } +// AllowedCSIDriver represents a single inline CSI Driver that is allowed to be used. +type AllowedCSIDriver struct { + // Name is the registered name of the CSI driver + Name string `json:"name" protobuf:"bytes,1,opt,name=name"` +} + // HostPortRange defines a range of host ports that will be enabled by a policy // for pods to use. It requires both the start and end to be defined. // Deprecated: use HostPortRange from policy API Group instead. @@ -1166,7 +1178,7 @@ const ( type PodSecurityPolicyList struct { metav1.TypeMeta `json:",inline"` // Standard list metadata. - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata // +optional metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` @@ -1181,7 +1193,7 @@ type PodSecurityPolicyList struct { type NetworkPolicy struct { metav1.TypeMeta `json:",inline"` // Standard object's metadata. - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata // +optional metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` @@ -1232,7 +1244,7 @@ type NetworkPolicySpec struct { Egress []NetworkPolicyEgressRule `json:"egress,omitempty" protobuf:"bytes,3,rep,name=egress"` // List of rule types that the NetworkPolicy relates to. - // Valid options are Ingress, Egress, or Ingress,Egress. + // Valid options are "Ingress", "Egress", or "Ingress,Egress". // If this field is not specified, it will default based on the existence of Ingress or Egress rules; // policies that contain an Egress section are assumed to affect Egress, and all policies // (whether or not they contain an Ingress section) are assumed to affect Ingress. @@ -1351,7 +1363,7 @@ type NetworkPolicyPeer struct { type NetworkPolicyList struct { metav1.TypeMeta `json:",inline"` // Standard list metadata. - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata // +optional metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` diff --git a/vendor/k8s.io/api/extensions/v1beta1/types_swagger_doc_generated.go b/vendor/k8s.io/api/extensions/v1beta1/types_swagger_doc_generated.go index bce6036cd5..654f34ae3d 100644 --- a/vendor/k8s.io/api/extensions/v1beta1/types_swagger_doc_generated.go +++ b/vendor/k8s.io/api/extensions/v1beta1/types_swagger_doc_generated.go @@ -27,6 +27,15 @@ package v1beta1 // Those methods can be generated by using hack/update-generated-swagger-docs.sh // AUTO-GENERATED FUNCTIONS START HERE. DO NOT EDIT. +var map_AllowedCSIDriver = map[string]string{ + "": "AllowedCSIDriver represents a single inline CSI Driver that is allowed to be used.", + "name": "Name is the registered name of the CSI driver", +} + +func (AllowedCSIDriver) SwaggerDoc() map[string]string { + return map_AllowedCSIDriver +} + var map_AllowedFlexVolume = map[string]string{ "": "AllowedFlexVolume represents a single Flexvolume that is allowed to be used. Deprecated: use AllowedFlexVolume from policy API Group instead.", "driver": "driver is the name of the Flexvolume driver.", @@ -48,9 +57,9 @@ func (AllowedHostPath) SwaggerDoc() map[string]string { var map_DaemonSet = map[string]string{ "": "DEPRECATED - This group version of DaemonSet is deprecated by apps/v1beta2/DaemonSet. See the release notes for more information. DaemonSet represents the configuration of a daemon set.", - "metadata": "Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata", - "spec": "The desired behavior of this daemon set. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status", - "status": "The current status of this daemon set. This data may be out of date by some window of time. Populated by the system. Read-only. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status", + "metadata": "Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", + "spec": "The desired behavior of this daemon set. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status", + "status": "The current status of this daemon set. This data may be out of date by some window of time. Populated by the system. Read-only. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status", } func (DaemonSet) SwaggerDoc() map[string]string { @@ -72,7 +81,7 @@ func (DaemonSetCondition) SwaggerDoc() map[string]string { var map_DaemonSetList = map[string]string{ "": "DaemonSetList is a collection of daemon sets.", - "metadata": "Standard list metadata. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata", + "metadata": "Standard list metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", "items": "A list of daemon sets.", } @@ -270,10 +279,10 @@ func (IPBlock) SwaggerDoc() map[string]string { } var map_Ingress = map[string]string{ - "": "Ingress is a collection of rules that allow inbound connections to reach the endpoints defined by a backend. An Ingress can be configured to give services externally-reachable urls, load balance traffic, terminate SSL, offer name based virtual hosting etc.", - "metadata": "Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata", - "spec": "Spec is the desired state of the Ingress. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status", - "status": "Status is the current state of the Ingress. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status", + "": "Ingress is a collection of rules that allow inbound connections to reach the endpoints defined by a backend. An Ingress can be configured to give services externally-reachable urls, load balance traffic, terminate SSL, offer name based virtual hosting etc. DEPRECATED - This group version of Ingress is deprecated by networking.k8s.io/v1beta1 Ingress. See the release notes for more information.", + "metadata": "Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", + "spec": "Spec is the desired state of the Ingress. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status", + "status": "Status is the current state of the Ingress. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status", } func (Ingress) SwaggerDoc() map[string]string { @@ -292,7 +301,7 @@ func (IngressBackend) SwaggerDoc() map[string]string { var map_IngressList = map[string]string{ "": "IngressList is a collection of Ingress.", - "metadata": "Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata", + "metadata": "Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", "items": "Items is the list of Ingress.", } @@ -349,7 +358,7 @@ func (IngressTLS) SwaggerDoc() map[string]string { var map_NetworkPolicy = map[string]string{ "": "DEPRECATED 1.9 - This group version of NetworkPolicy is deprecated by networking/v1/NetworkPolicy. NetworkPolicy describes what network traffic is allowed for a set of Pods", - "metadata": "Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata", + "metadata": "Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", "spec": "Specification of the desired behavior for this NetworkPolicy.", } @@ -379,7 +388,7 @@ func (NetworkPolicyIngressRule) SwaggerDoc() map[string]string { var map_NetworkPolicyList = map[string]string{ "": "DEPRECATED 1.9 - This group version of NetworkPolicyList is deprecated by networking/v1/NetworkPolicyList. Network Policy List is a list of NetworkPolicy objects.", - "metadata": "Standard list metadata. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata", + "metadata": "Standard list metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", "items": "Items is a list of schema objects.", } @@ -413,7 +422,7 @@ var map_NetworkPolicySpec = map[string]string{ "podSelector": "Selects the pods to which this NetworkPolicy object applies. The array of ingress rules is applied to any pods selected by this field. Multiple network policies can select the same set of pods. In this case, the ingress rules for each are combined additively. This field is NOT optional and follows standard label selector semantics. An empty podSelector matches all pods in this namespace.", "ingress": "List of ingress rules to be applied to the selected pods. Traffic is allowed to a pod if there are no NetworkPolicies selecting the pod OR if the traffic source is the pod's local node, OR if the traffic matches at least one ingress rule across all of the NetworkPolicy objects whose podSelector matches the pod. If this field is empty then this NetworkPolicy does not allow any traffic (and serves solely to ensure that the pods it selects are isolated by default).", "egress": "List of egress rules to be applied to the selected pods. Outgoing traffic is allowed if there are no NetworkPolicies selecting the pod (and cluster policy otherwise allows the traffic), OR if the traffic matches at least one egress rule across all of the NetworkPolicy objects whose podSelector matches the pod. If this field is empty then this NetworkPolicy limits all outgoing traffic (and serves solely to ensure that the pods it selects are isolated by default). This field is beta-level in 1.8", - "policyTypes": "List of rule types that the NetworkPolicy relates to. Valid options are Ingress, Egress, or Ingress,Egress. If this field is not specified, it will default based on the existence of Ingress or Egress rules; policies that contain an Egress section are assumed to affect Egress, and all policies (whether or not they contain an Ingress section) are assumed to affect Ingress. If you want to write an egress-only policy, you must explicitly specify policyTypes [ \"Egress\" ]. Likewise, if you want to write a policy that specifies that no egress is allowed, you must specify a policyTypes value that include \"Egress\" (since such a policy would not include an Egress section and would otherwise default to just [ \"Ingress\" ]). This field is beta-level in 1.8", + "policyTypes": "List of rule types that the NetworkPolicy relates to. Valid options are \"Ingress\", \"Egress\", or \"Ingress,Egress\". If this field is not specified, it will default based on the existence of Ingress or Egress rules; policies that contain an Egress section are assumed to affect Egress, and all policies (whether or not they contain an Ingress section) are assumed to affect Ingress. If you want to write an egress-only policy, you must explicitly specify policyTypes [ \"Egress\" ]. Likewise, if you want to write a policy that specifies that no egress is allowed, you must specify a policyTypes value that include \"Egress\" (since such a policy would not include an Egress section and would otherwise default to just [ \"Ingress\" ]). This field is beta-level in 1.8", } func (NetworkPolicySpec) SwaggerDoc() map[string]string { @@ -422,7 +431,7 @@ func (NetworkPolicySpec) SwaggerDoc() map[string]string { var map_PodSecurityPolicy = map[string]string{ "": "PodSecurityPolicy governs the ability to make requests that affect the Security Context that will be applied to a pod and container. Deprecated: use PodSecurityPolicy from policy API Group instead.", - "metadata": "Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata", + "metadata": "Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", "spec": "spec defines the policy enforced.", } @@ -432,7 +441,7 @@ func (PodSecurityPolicy) SwaggerDoc() map[string]string { var map_PodSecurityPolicyList = map[string]string{ "": "PodSecurityPolicyList is a list of PodSecurityPolicy objects. Deprecated: use PodSecurityPolicyList from policy API Group instead.", - "metadata": "Standard list metadata. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata", + "metadata": "Standard list metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", "items": "items is a list of schema objects.", } @@ -461,6 +470,7 @@ var map_PodSecurityPolicySpec = map[string]string{ "allowPrivilegeEscalation": "allowPrivilegeEscalation determines if a pod can request to allow privilege escalation. If unspecified, defaults to true.", "allowedHostPaths": "allowedHostPaths is a white list of allowed host paths. Empty indicates that all host paths may be used.", "allowedFlexVolumes": "allowedFlexVolumes is a whitelist of allowed Flexvolumes. Empty or nil indicates that all Flexvolumes may be used. This parameter is effective only when the usage of the Flexvolumes is allowed in the \"volumes\" field.", + "allowedCSIDrivers": "AllowedCSIDrivers is a whitelist of inline CSI drivers that must be explicitly set to be embedded within a pod spec. An empty value means no CSI drivers can run inline within a pod spec.", "allowedUnsafeSysctls": "allowedUnsafeSysctls is a list of explicitly allowed unsafe sysctls, defaults to none. Each entry is either a plain sysctl name or ends in \"*\" in which case it is considered as a prefix of allowed sysctls. Single * means all unsafe sysctls are allowed. Kubelet has to whitelist all allowed unsafe sysctls explicitly to avoid rejection.\n\nExamples: e.g. \"foo/*\" allows \"foo/bar\", \"foo/baz\", etc. e.g. \"foo.*\" allows \"foo.bar\", \"foo.baz\", etc.", "forbiddenSysctls": "forbiddenSysctls is a list of explicitly forbidden sysctls, defaults to none. Each entry is either a plain sysctl name or ends in \"*\" in which case it is considered as a prefix of forbidden sysctls. Single * means all sysctls are forbidden.\n\nExamples: e.g. \"foo/*\" forbids \"foo/bar\", \"foo/baz\", etc. e.g. \"foo.*\" forbids \"foo.bar\", \"foo.baz\", etc.", "allowedProcMountTypes": "AllowedProcMountTypes is a whitelist of allowed ProcMountTypes. Empty or nil indicates that only the DefaultProcMountType may be used. This requires the ProcMountType feature flag to be enabled.", @@ -472,9 +482,9 @@ func (PodSecurityPolicySpec) SwaggerDoc() map[string]string { var map_ReplicaSet = map[string]string{ "": "DEPRECATED - This group version of ReplicaSet is deprecated by apps/v1beta2/ReplicaSet. See the release notes for more information. ReplicaSet ensures that a specified number of pod replicas are running at any given time.", - "metadata": "If the Labels of a ReplicaSet are empty, they are defaulted to be the same as the Pod(s) that the ReplicaSet manages. Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata", - "spec": "Spec defines the specification of the desired behavior of the ReplicaSet. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status", - "status": "Status is the most recently observed status of the ReplicaSet. This data may be out of date by some window of time. Populated by the system. Read-only. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status", + "metadata": "If the Labels of a ReplicaSet are empty, they are defaulted to be the same as the Pod(s) that the ReplicaSet manages. Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", + "spec": "Spec defines the specification of the desired behavior of the ReplicaSet. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status", + "status": "Status is the most recently observed status of the ReplicaSet. This data may be out of date by some window of time. Populated by the system. Read-only. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status", } func (ReplicaSet) SwaggerDoc() map[string]string { @@ -496,7 +506,7 @@ func (ReplicaSetCondition) SwaggerDoc() map[string]string { var map_ReplicaSetList = map[string]string{ "": "ReplicaSetList is a collection of ReplicaSets.", - "metadata": "Standard list metadata. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds", + "metadata": "Standard list metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds", "items": "List of ReplicaSets. More info: https://kubernetes.io/docs/concepts/workloads/controllers/replicationcontroller", } @@ -559,7 +569,7 @@ func (RollingUpdateDaemonSet) SwaggerDoc() map[string]string { var map_RollingUpdateDeployment = map[string]string{ "": "Spec to control the desired behavior of rolling update.", "maxUnavailable": "The maximum number of pods that can be unavailable during the update. Value can be an absolute number (ex: 5) or a percentage of desired pods (ex: 10%). Absolute number is calculated from percentage by rounding down. This can not be 0 if MaxSurge is 0. By default, a fixed value of 1 is used. Example: when this is set to 30%, the old RC can be scaled down to 70% of desired pods immediately when the rolling update starts. Once new pods are ready, old RC can be scaled down further, followed by scaling up the new RC, ensuring that the total number of pods available at all times during the update is at least 70% of desired pods.", - "maxSurge": "The maximum number of pods that can be scheduled above the desired number of pods. Value can be an absolute number (ex: 5) or a percentage of desired pods (ex: 10%). This can not be 0 if MaxUnavailable is 0. Absolute number is calculated from percentage by rounding up. By default, a value of 1 is used. Example: when this is set to 30%, the new RC can be scaled up immediately when the rolling update starts, such that the total number of old and new pods do not exceed 130% of desired pods. Once old pods have been killed, new RC can be scaled up further, ensuring that total number of pods running at any time during the update is atmost 130% of desired pods.", + "maxSurge": "The maximum number of pods that can be scheduled above the desired number of pods. Value can be an absolute number (ex: 5) or a percentage of desired pods (ex: 10%). This can not be 0 if MaxUnavailable is 0. Absolute number is calculated from percentage by rounding up. By default, a value of 1 is used. Example: when this is set to 30%, the new RC can be scaled up immediately when the rolling update starts, such that the total number of old and new pods do not exceed 130% of desired pods. Once old pods have been killed, new RC can be scaled up further, ensuring that total number of pods running at any time during the update is at most 130% of desired pods.", } func (RollingUpdateDeployment) SwaggerDoc() map[string]string { @@ -598,9 +608,9 @@ func (SELinuxStrategyOptions) SwaggerDoc() map[string]string { var map_Scale = map[string]string{ "": "represents a scaling request for a resource.", - "metadata": "Standard object metadata; More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata.", - "spec": "defines the behavior of the scale. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status.", - "status": "current status of the scale. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status. Read-only.", + "metadata": "Standard object metadata; More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata.", + "spec": "defines the behavior of the scale. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status.", + "status": "current status of the scale. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status. Read-only.", } func (Scale) SwaggerDoc() map[string]string { diff --git a/vendor/k8s.io/api/extensions/v1beta1/zz_generated.deepcopy.go b/vendor/k8s.io/api/extensions/v1beta1/zz_generated.deepcopy.go index 8128c079b4..a67968af4c 100644 --- a/vendor/k8s.io/api/extensions/v1beta1/zz_generated.deepcopy.go +++ b/vendor/k8s.io/api/extensions/v1beta1/zz_generated.deepcopy.go @@ -27,6 +27,22 @@ import ( intstr "k8s.io/apimachinery/pkg/util/intstr" ) +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AllowedCSIDriver) DeepCopyInto(out *AllowedCSIDriver) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AllowedCSIDriver. +func (in *AllowedCSIDriver) DeepCopy() *AllowedCSIDriver { + if in == nil { + return nil + } + out := new(AllowedCSIDriver) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *AllowedFlexVolume) DeepCopyInto(out *AllowedFlexVolume) { *out = *in @@ -1049,6 +1065,11 @@ func (in *PodSecurityPolicySpec) DeepCopyInto(out *PodSecurityPolicySpec) { *out = make([]AllowedFlexVolume, len(*in)) copy(*out, *in) } + if in.AllowedCSIDrivers != nil { + in, out := &in.AllowedCSIDrivers, &out.AllowedCSIDrivers + *out = make([]AllowedCSIDriver, len(*in)) + copy(*out, *in) + } if in.AllowedUnsafeSysctls != nil { in, out := &in.AllowedUnsafeSysctls, &out.AllowedUnsafeSysctls *out = make([]string, len(*in)) diff --git a/vendor/k8s.io/api/networking/v1/doc.go b/vendor/k8s.io/api/networking/v1/doc.go index 887c366479..d3ffd5ed17 100644 --- a/vendor/k8s.io/api/networking/v1/doc.go +++ b/vendor/k8s.io/api/networking/v1/doc.go @@ -15,6 +15,7 @@ limitations under the License. */ // +k8s:deepcopy-gen=package +// +k8s:protobuf-gen=package // +k8s:openapi-gen=true // +groupName=networking.k8s.io diff --git a/vendor/k8s.io/api/networking/v1/generated.proto b/vendor/k8s.io/api/networking/v1/generated.proto index ab3731e9c3..cbbb265289 100644 --- a/vendor/k8s.io/api/networking/v1/generated.proto +++ b/vendor/k8s.io/api/networking/v1/generated.proto @@ -48,7 +48,7 @@ message IPBlock { // NetworkPolicy describes what network traffic is allowed for a set of Pods message NetworkPolicy { // Standard object's metadata. - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata // +optional optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; @@ -101,7 +101,7 @@ message NetworkPolicyIngressRule { // NetworkPolicyList is a list of NetworkPolicy objects. message NetworkPolicyList { // Standard list metadata. - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata // +optional optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; @@ -180,7 +180,7 @@ message NetworkPolicySpec { repeated NetworkPolicyEgressRule egress = 3; // List of rule types that the NetworkPolicy relates to. - // Valid options are Ingress, Egress, or Ingress,Egress. + // Valid options are "Ingress", "Egress", or "Ingress,Egress". // If this field is not specified, it will default based on the existence of Ingress or Egress rules; // policies that contain an Egress section are assumed to affect Egress, and all policies // (whether or not they contain an Ingress section) are assumed to affect Ingress. diff --git a/vendor/k8s.io/api/networking/v1/types.go b/vendor/k8s.io/api/networking/v1/types.go index ce70448d32..59331111f4 100644 --- a/vendor/k8s.io/api/networking/v1/types.go +++ b/vendor/k8s.io/api/networking/v1/types.go @@ -29,7 +29,7 @@ import ( type NetworkPolicy struct { metav1.TypeMeta `json:",inline"` // Standard object's metadata. - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata // +optional metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` @@ -80,7 +80,7 @@ type NetworkPolicySpec struct { Egress []NetworkPolicyEgressRule `json:"egress,omitempty" protobuf:"bytes,3,rep,name=egress"` // List of rule types that the NetworkPolicy relates to. - // Valid options are Ingress, Egress, or Ingress,Egress. + // Valid options are "Ingress", "Egress", or "Ingress,Egress". // If this field is not specified, it will default based on the existence of Ingress or Egress rules; // policies that contain an Egress section are assumed to affect Egress, and all policies // (whether or not they contain an Ingress section) are assumed to affect Ingress. @@ -194,7 +194,7 @@ type NetworkPolicyPeer struct { type NetworkPolicyList struct { metav1.TypeMeta `json:",inline"` // Standard list metadata. - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata // +optional metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` diff --git a/vendor/k8s.io/api/networking/v1/types_swagger_doc_generated.go b/vendor/k8s.io/api/networking/v1/types_swagger_doc_generated.go index f4363bc09e..cfcd0c54c5 100644 --- a/vendor/k8s.io/api/networking/v1/types_swagger_doc_generated.go +++ b/vendor/k8s.io/api/networking/v1/types_swagger_doc_generated.go @@ -39,7 +39,7 @@ func (IPBlock) SwaggerDoc() map[string]string { var map_NetworkPolicy = map[string]string{ "": "NetworkPolicy describes what network traffic is allowed for a set of Pods", - "metadata": "Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata", + "metadata": "Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", "spec": "Specification of the desired behavior for this NetworkPolicy.", } @@ -69,7 +69,7 @@ func (NetworkPolicyIngressRule) SwaggerDoc() map[string]string { var map_NetworkPolicyList = map[string]string{ "": "NetworkPolicyList is a list of NetworkPolicy objects.", - "metadata": "Standard list metadata. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata", + "metadata": "Standard list metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", "items": "Items is a list of schema objects.", } @@ -103,7 +103,7 @@ var map_NetworkPolicySpec = map[string]string{ "podSelector": "Selects the pods to which this NetworkPolicy object applies. The array of ingress rules is applied to any pods selected by this field. Multiple network policies can select the same set of pods. In this case, the ingress rules for each are combined additively. This field is NOT optional and follows standard label selector semantics. An empty podSelector matches all pods in this namespace.", "ingress": "List of ingress rules to be applied to the selected pods. Traffic is allowed to a pod if there are no NetworkPolicies selecting the pod (and cluster policy otherwise allows the traffic), OR if the traffic source is the pod's local node, OR if the traffic matches at least one ingress rule across all of the NetworkPolicy objects whose podSelector matches the pod. If this field is empty then this NetworkPolicy does not allow any traffic (and serves solely to ensure that the pods it selects are isolated by default)", "egress": "List of egress rules to be applied to the selected pods. Outgoing traffic is allowed if there are no NetworkPolicies selecting the pod (and cluster policy otherwise allows the traffic), OR if the traffic matches at least one egress rule across all of the NetworkPolicy objects whose podSelector matches the pod. If this field is empty then this NetworkPolicy limits all outgoing traffic (and serves solely to ensure that the pods it selects are isolated by default). This field is beta-level in 1.8", - "policyTypes": "List of rule types that the NetworkPolicy relates to. Valid options are Ingress, Egress, or Ingress,Egress. If this field is not specified, it will default based on the existence of Ingress or Egress rules; policies that contain an Egress section are assumed to affect Egress, and all policies (whether or not they contain an Ingress section) are assumed to affect Ingress. If you want to write an egress-only policy, you must explicitly specify policyTypes [ \"Egress\" ]. Likewise, if you want to write a policy that specifies that no egress is allowed, you must specify a policyTypes value that include \"Egress\" (since such a policy would not include an Egress section and would otherwise default to just [ \"Ingress\" ]). This field is beta-level in 1.8", + "policyTypes": "List of rule types that the NetworkPolicy relates to. Valid options are \"Ingress\", \"Egress\", or \"Ingress,Egress\". If this field is not specified, it will default based on the existence of Ingress or Egress rules; policies that contain an Egress section are assumed to affect Egress, and all policies (whether or not they contain an Ingress section) are assumed to affect Ingress. If you want to write an egress-only policy, you must explicitly specify policyTypes [ \"Egress\" ]. Likewise, if you want to write a policy that specifies that no egress is allowed, you must specify a policyTypes value that include \"Egress\" (since such a policy would not include an Egress section and would otherwise default to just [ \"Ingress\" ]). This field is beta-level in 1.8", } func (NetworkPolicySpec) SwaggerDoc() map[string]string { diff --git a/vendor/k8s.io/api/networking/v1beta1/doc.go b/vendor/k8s.io/api/networking/v1beta1/doc.go new file mode 100644 index 0000000000..12d3d4ff06 --- /dev/null +++ b/vendor/k8s.io/api/networking/v1beta1/doc.go @@ -0,0 +1,22 @@ +/* +Copyright 2019 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// +k8s:deepcopy-gen=package +// +k8s:protobuf-gen=package +// +k8s:openapi-gen=true +// +groupName=networking.k8s.io + +package v1beta1 // import "k8s.io/api/networking/v1beta1" diff --git a/vendor/k8s.io/api/networking/v1beta1/generated.pb.go b/vendor/k8s.io/api/networking/v1beta1/generated.pb.go new file mode 100644 index 0000000000..14430cbacb --- /dev/null +++ b/vendor/k8s.io/api/networking/v1beta1/generated.pb.go @@ -0,0 +1,1953 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by protoc-gen-gogo. DO NOT EDIT. +// source: k8s.io/kubernetes/vendor/k8s.io/api/networking/v1beta1/generated.proto + +/* + Package v1beta1 is a generated protocol buffer package. + + It is generated from these files: + k8s.io/kubernetes/vendor/k8s.io/api/networking/v1beta1/generated.proto + + It has these top-level messages: + HTTPIngressPath + HTTPIngressRuleValue + Ingress + IngressBackend + IngressList + IngressRule + IngressRuleValue + IngressSpec + IngressStatus + IngressTLS +*/ +package v1beta1 + +import proto "github.com/gogo/protobuf/proto" +import fmt "fmt" +import math "math" + +import strings "strings" +import reflect "reflect" + +import io "io" + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.GoGoProtoPackageIsVersion2 // please upgrade the proto package + +func (m *HTTPIngressPath) Reset() { *m = HTTPIngressPath{} } +func (*HTTPIngressPath) ProtoMessage() {} +func (*HTTPIngressPath) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{0} } + +func (m *HTTPIngressRuleValue) Reset() { *m = HTTPIngressRuleValue{} } +func (*HTTPIngressRuleValue) ProtoMessage() {} +func (*HTTPIngressRuleValue) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{1} } + +func (m *Ingress) Reset() { *m = Ingress{} } +func (*Ingress) ProtoMessage() {} +func (*Ingress) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{2} } + +func (m *IngressBackend) Reset() { *m = IngressBackend{} } +func (*IngressBackend) ProtoMessage() {} +func (*IngressBackend) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{3} } + +func (m *IngressList) Reset() { *m = IngressList{} } +func (*IngressList) ProtoMessage() {} +func (*IngressList) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{4} } + +func (m *IngressRule) Reset() { *m = IngressRule{} } +func (*IngressRule) ProtoMessage() {} +func (*IngressRule) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{5} } + +func (m *IngressRuleValue) Reset() { *m = IngressRuleValue{} } +func (*IngressRuleValue) ProtoMessage() {} +func (*IngressRuleValue) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{6} } + +func (m *IngressSpec) Reset() { *m = IngressSpec{} } +func (*IngressSpec) ProtoMessage() {} +func (*IngressSpec) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{7} } + +func (m *IngressStatus) Reset() { *m = IngressStatus{} } +func (*IngressStatus) ProtoMessage() {} +func (*IngressStatus) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{8} } + +func (m *IngressTLS) Reset() { *m = IngressTLS{} } +func (*IngressTLS) ProtoMessage() {} +func (*IngressTLS) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{9} } + +func init() { + proto.RegisterType((*HTTPIngressPath)(nil), "k8s.io.api.networking.v1beta1.HTTPIngressPath") + proto.RegisterType((*HTTPIngressRuleValue)(nil), "k8s.io.api.networking.v1beta1.HTTPIngressRuleValue") + proto.RegisterType((*Ingress)(nil), "k8s.io.api.networking.v1beta1.Ingress") + proto.RegisterType((*IngressBackend)(nil), "k8s.io.api.networking.v1beta1.IngressBackend") + proto.RegisterType((*IngressList)(nil), "k8s.io.api.networking.v1beta1.IngressList") + proto.RegisterType((*IngressRule)(nil), "k8s.io.api.networking.v1beta1.IngressRule") + proto.RegisterType((*IngressRuleValue)(nil), "k8s.io.api.networking.v1beta1.IngressRuleValue") + proto.RegisterType((*IngressSpec)(nil), "k8s.io.api.networking.v1beta1.IngressSpec") + proto.RegisterType((*IngressStatus)(nil), "k8s.io.api.networking.v1beta1.IngressStatus") + proto.RegisterType((*IngressTLS)(nil), "k8s.io.api.networking.v1beta1.IngressTLS") +} +func (m *HTTPIngressPath) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *HTTPIngressPath) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + dAtA[i] = 0xa + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Path))) + i += copy(dAtA[i:], m.Path) + dAtA[i] = 0x12 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.Backend.Size())) + n1, err := m.Backend.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n1 + return i, nil +} + +func (m *HTTPIngressRuleValue) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *HTTPIngressRuleValue) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if len(m.Paths) > 0 { + for _, msg := range m.Paths { + dAtA[i] = 0xa + i++ + i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) + n, err := msg.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n + } + } + return i, nil +} + +func (m *Ingress) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Ingress) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + dAtA[i] = 0xa + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.ObjectMeta.Size())) + n2, err := m.ObjectMeta.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n2 + dAtA[i] = 0x12 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.Spec.Size())) + n3, err := m.Spec.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n3 + dAtA[i] = 0x1a + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.Status.Size())) + n4, err := m.Status.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n4 + return i, nil +} + +func (m *IngressBackend) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *IngressBackend) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + dAtA[i] = 0xa + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.ServiceName))) + i += copy(dAtA[i:], m.ServiceName) + dAtA[i] = 0x12 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.ServicePort.Size())) + n5, err := m.ServicePort.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n5 + return i, nil +} + +func (m *IngressList) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *IngressList) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + dAtA[i] = 0xa + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.ListMeta.Size())) + n6, err := m.ListMeta.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n6 + if len(m.Items) > 0 { + for _, msg := range m.Items { + dAtA[i] = 0x12 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) + n, err := msg.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n + } + } + return i, nil +} + +func (m *IngressRule) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *IngressRule) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + dAtA[i] = 0xa + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Host))) + i += copy(dAtA[i:], m.Host) + dAtA[i] = 0x12 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.IngressRuleValue.Size())) + n7, err := m.IngressRuleValue.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n7 + return i, nil +} + +func (m *IngressRuleValue) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *IngressRuleValue) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if m.HTTP != nil { + dAtA[i] = 0xa + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.HTTP.Size())) + n8, err := m.HTTP.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n8 + } + return i, nil +} + +func (m *IngressSpec) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *IngressSpec) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if m.Backend != nil { + dAtA[i] = 0xa + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.Backend.Size())) + n9, err := m.Backend.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n9 + } + if len(m.TLS) > 0 { + for _, msg := range m.TLS { + dAtA[i] = 0x12 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) + n, err := msg.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n + } + } + if len(m.Rules) > 0 { + for _, msg := range m.Rules { + dAtA[i] = 0x1a + i++ + i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) + n, err := msg.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n + } + } + return i, nil +} + +func (m *IngressStatus) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *IngressStatus) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + dAtA[i] = 0xa + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.LoadBalancer.Size())) + n10, err := m.LoadBalancer.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n10 + return i, nil +} + +func (m *IngressTLS) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *IngressTLS) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if len(m.Hosts) > 0 { + for _, s := range m.Hosts { + dAtA[i] = 0xa + i++ + l = len(s) + for l >= 1<<7 { + dAtA[i] = uint8(uint64(l)&0x7f | 0x80) + l >>= 7 + i++ + } + dAtA[i] = uint8(l) + i++ + i += copy(dAtA[i:], s) + } + } + dAtA[i] = 0x12 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.SecretName))) + i += copy(dAtA[i:], m.SecretName) + return i, nil +} + +func encodeVarintGenerated(dAtA []byte, offset int, v uint64) int { + for v >= 1<<7 { + dAtA[offset] = uint8(v&0x7f | 0x80) + v >>= 7 + offset++ + } + dAtA[offset] = uint8(v) + return offset + 1 +} +func (m *HTTPIngressPath) Size() (n int) { + var l int + _ = l + l = len(m.Path) + n += 1 + l + sovGenerated(uint64(l)) + l = m.Backend.Size() + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *HTTPIngressRuleValue) Size() (n int) { + var l int + _ = l + if len(m.Paths) > 0 { + for _, e := range m.Paths { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *Ingress) Size() (n int) { + var l int + _ = l + l = m.ObjectMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = m.Spec.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = m.Status.Size() + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *IngressBackend) Size() (n int) { + var l int + _ = l + l = len(m.ServiceName) + n += 1 + l + sovGenerated(uint64(l)) + l = m.ServicePort.Size() + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *IngressList) Size() (n int) { + var l int + _ = l + l = m.ListMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Items) > 0 { + for _, e := range m.Items { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *IngressRule) Size() (n int) { + var l int + _ = l + l = len(m.Host) + n += 1 + l + sovGenerated(uint64(l)) + l = m.IngressRuleValue.Size() + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *IngressRuleValue) Size() (n int) { + var l int + _ = l + if m.HTTP != nil { + l = m.HTTP.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + return n +} + +func (m *IngressSpec) Size() (n int) { + var l int + _ = l + if m.Backend != nil { + l = m.Backend.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if len(m.TLS) > 0 { + for _, e := range m.TLS { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + if len(m.Rules) > 0 { + for _, e := range m.Rules { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *IngressStatus) Size() (n int) { + var l int + _ = l + l = m.LoadBalancer.Size() + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *IngressTLS) Size() (n int) { + var l int + _ = l + if len(m.Hosts) > 0 { + for _, s := range m.Hosts { + l = len(s) + n += 1 + l + sovGenerated(uint64(l)) + } + } + l = len(m.SecretName) + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func sovGenerated(x uint64) (n int) { + for { + n++ + x >>= 7 + if x == 0 { + break + } + } + return n +} +func sozGenerated(x uint64) (n int) { + return sovGenerated(uint64((x << 1) ^ uint64((int64(x) >> 63)))) +} +func (this *HTTPIngressPath) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&HTTPIngressPath{`, + `Path:` + fmt.Sprintf("%v", this.Path) + `,`, + `Backend:` + strings.Replace(strings.Replace(this.Backend.String(), "IngressBackend", "IngressBackend", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *HTTPIngressRuleValue) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&HTTPIngressRuleValue{`, + `Paths:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Paths), "HTTPIngressPath", "HTTPIngressPath", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *Ingress) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&Ingress{`, + `ObjectMeta:` + strings.Replace(strings.Replace(this.ObjectMeta.String(), "ObjectMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta", 1), `&`, ``, 1) + `,`, + `Spec:` + strings.Replace(strings.Replace(this.Spec.String(), "IngressSpec", "IngressSpec", 1), `&`, ``, 1) + `,`, + `Status:` + strings.Replace(strings.Replace(this.Status.String(), "IngressStatus", "IngressStatus", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *IngressBackend) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&IngressBackend{`, + `ServiceName:` + fmt.Sprintf("%v", this.ServiceName) + `,`, + `ServicePort:` + strings.Replace(strings.Replace(this.ServicePort.String(), "IntOrString", "k8s_io_apimachinery_pkg_util_intstr.IntOrString", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *IngressList) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&IngressList{`, + `ListMeta:` + strings.Replace(strings.Replace(this.ListMeta.String(), "ListMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ListMeta", 1), `&`, ``, 1) + `,`, + `Items:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Items), "Ingress", "Ingress", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *IngressRule) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&IngressRule{`, + `Host:` + fmt.Sprintf("%v", this.Host) + `,`, + `IngressRuleValue:` + strings.Replace(strings.Replace(this.IngressRuleValue.String(), "IngressRuleValue", "IngressRuleValue", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *IngressRuleValue) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&IngressRuleValue{`, + `HTTP:` + strings.Replace(fmt.Sprintf("%v", this.HTTP), "HTTPIngressRuleValue", "HTTPIngressRuleValue", 1) + `,`, + `}`, + }, "") + return s +} +func (this *IngressSpec) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&IngressSpec{`, + `Backend:` + strings.Replace(fmt.Sprintf("%v", this.Backend), "IngressBackend", "IngressBackend", 1) + `,`, + `TLS:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.TLS), "IngressTLS", "IngressTLS", 1), `&`, ``, 1) + `,`, + `Rules:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Rules), "IngressRule", "IngressRule", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *IngressStatus) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&IngressStatus{`, + `LoadBalancer:` + strings.Replace(strings.Replace(this.LoadBalancer.String(), "LoadBalancerStatus", "k8s_io_api_core_v1.LoadBalancerStatus", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *IngressTLS) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&IngressTLS{`, + `Hosts:` + fmt.Sprintf("%v", this.Hosts) + `,`, + `SecretName:` + fmt.Sprintf("%v", this.SecretName) + `,`, + `}`, + }, "") + return s +} +func valueToStringGenerated(v interface{}) string { + rv := reflect.ValueOf(v) + if rv.IsNil() { + return "nil" + } + pv := reflect.Indirect(rv).Interface() + return fmt.Sprintf("*%v", pv) +} +func (m *HTTPIngressPath) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: HTTPIngressPath: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: HTTPIngressPath: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Path", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Path = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Backend", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Backend.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *HTTPIngressRuleValue) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: HTTPIngressRuleValue: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: HTTPIngressRuleValue: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Paths", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Paths = append(m.Paths, HTTPIngressPath{}) + if err := m.Paths[len(m.Paths)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *Ingress) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Ingress: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Ingress: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ObjectMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Spec", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Spec.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Status", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Status.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *IngressBackend) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: IngressBackend: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: IngressBackend: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ServiceName", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ServiceName = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ServicePort", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ServicePort.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *IngressList) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: IngressList: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: IngressList: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ListMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ListMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Items = append(m.Items, Ingress{}) + if err := m.Items[len(m.Items)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *IngressRule) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: IngressRule: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: IngressRule: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Host", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Host = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field IngressRuleValue", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.IngressRuleValue.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *IngressRuleValue) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: IngressRuleValue: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: IngressRuleValue: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field HTTP", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.HTTP == nil { + m.HTTP = &HTTPIngressRuleValue{} + } + if err := m.HTTP.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *IngressSpec) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: IngressSpec: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: IngressSpec: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Backend", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Backend == nil { + m.Backend = &IngressBackend{} + } + if err := m.Backend.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field TLS", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.TLS = append(m.TLS, IngressTLS{}) + if err := m.TLS[len(m.TLS)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Rules", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Rules = append(m.Rules, IngressRule{}) + if err := m.Rules[len(m.Rules)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *IngressStatus) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: IngressStatus: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: IngressStatus: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field LoadBalancer", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.LoadBalancer.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *IngressTLS) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: IngressTLS: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: IngressTLS: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Hosts", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Hosts = append(m.Hosts, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field SecretName", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.SecretName = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func skipGenerated(dAtA []byte) (n int, err error) { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowGenerated + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + wireType := int(wire & 0x7) + switch wireType { + case 0: + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowGenerated + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + iNdEx++ + if dAtA[iNdEx-1] < 0x80 { + break + } + } + return iNdEx, nil + case 1: + iNdEx += 8 + return iNdEx, nil + case 2: + var length int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowGenerated + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + length |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + iNdEx += length + if length < 0 { + return 0, ErrInvalidLengthGenerated + } + return iNdEx, nil + case 3: + for { + var innerWire uint64 + var start int = iNdEx + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowGenerated + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + innerWire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + innerWireType := int(innerWire & 0x7) + if innerWireType == 4 { + break + } + next, err := skipGenerated(dAtA[start:]) + if err != nil { + return 0, err + } + iNdEx = start + next + } + return iNdEx, nil + case 4: + return iNdEx, nil + case 5: + iNdEx += 4 + return iNdEx, nil + default: + return 0, fmt.Errorf("proto: illegal wireType %d", wireType) + } + } + panic("unreachable") +} + +var ( + ErrInvalidLengthGenerated = fmt.Errorf("proto: negative length found during unmarshaling") + ErrIntOverflowGenerated = fmt.Errorf("proto: integer overflow") +) + +func init() { + proto.RegisterFile("k8s.io/kubernetes/vendor/k8s.io/api/networking/v1beta1/generated.proto", fileDescriptorGenerated) +} + +var fileDescriptorGenerated = []byte{ + // 812 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x9c, 0x55, 0xcf, 0x6e, 0xfb, 0x44, + 0x10, 0x8e, 0xf3, 0xa7, 0x69, 0xd7, 0xfd, 0xa7, 0xa5, 0x87, 0xa8, 0x12, 0x6e, 0xe4, 0x03, 0x2a, + 0x88, 0xae, 0x69, 0x0a, 0x88, 0xb3, 0x0f, 0xa8, 0x15, 0x81, 0x86, 0x75, 0x84, 0x10, 0xe2, 0xd0, + 0x8d, 0xb3, 0x38, 0x26, 0x89, 0x6d, 0x76, 0xd7, 0x41, 0xdc, 0x78, 0x01, 0x04, 0x4f, 0xc1, 0x99, + 0x23, 0x8f, 0xd0, 0x63, 0x8f, 0x3d, 0x55, 0x34, 0xbc, 0x07, 0x42, 0xbb, 0xde, 0xda, 0x4e, 0xd2, + 0xfe, 0x6a, 0xfd, 0x6e, 0xde, 0x9d, 0xf9, 0xbe, 0xd9, 0x99, 0xf9, 0x66, 0x0c, 0x3e, 0x9f, 0x7e, + 0xc6, 0x51, 0x18, 0x3b, 0xd3, 0x74, 0x44, 0x59, 0x44, 0x05, 0xe5, 0xce, 0x82, 0x46, 0xe3, 0x98, + 0x39, 0xda, 0x40, 0x92, 0xd0, 0x89, 0xa8, 0xf8, 0x39, 0x66, 0xd3, 0x30, 0x0a, 0x9c, 0xc5, 0xf9, + 0x88, 0x0a, 0x72, 0xee, 0x04, 0x34, 0xa2, 0x8c, 0x08, 0x3a, 0x46, 0x09, 0x8b, 0x45, 0x0c, 0xdf, + 0xcd, 0xdc, 0x11, 0x49, 0x42, 0x54, 0xb8, 0x23, 0xed, 0x7e, 0x7c, 0x16, 0x84, 0x62, 0x92, 0x8e, + 0x90, 0x1f, 0xcf, 0x9d, 0x20, 0x0e, 0x62, 0x47, 0xa1, 0x46, 0xe9, 0x0f, 0xea, 0xa4, 0x0e, 0xea, + 0x2b, 0x63, 0x3b, 0xb6, 0x4b, 0xc1, 0xfd, 0x98, 0x51, 0x67, 0xb1, 0x11, 0xf1, 0xf8, 0xe3, 0xc2, + 0x67, 0x4e, 0xfc, 0x49, 0x18, 0x51, 0xf6, 0x8b, 0x93, 0x4c, 0x03, 0x79, 0xc1, 0x9d, 0x39, 0x15, + 0xe4, 0x39, 0x94, 0xf3, 0x12, 0x8a, 0xa5, 0x91, 0x08, 0xe7, 0x74, 0x03, 0xf0, 0xe9, 0x6b, 0x00, + 0xee, 0x4f, 0xe8, 0x9c, 0x6c, 0xe0, 0x2e, 0x5e, 0xc2, 0xa5, 0x22, 0x9c, 0x39, 0x61, 0x24, 0xb8, + 0x60, 0xeb, 0x20, 0xfb, 0x37, 0x03, 0x1c, 0x5c, 0x0e, 0x87, 0x83, 0xab, 0x28, 0x60, 0x94, 0xf3, + 0x01, 0x11, 0x13, 0xd8, 0x05, 0xcd, 0x84, 0x88, 0x49, 0xc7, 0xe8, 0x1a, 0xa7, 0x3b, 0xee, 0xee, + 0xed, 0xc3, 0x49, 0x6d, 0xf9, 0x70, 0xd2, 0x94, 0x36, 0xac, 0x2c, 0xf0, 0x5b, 0xd0, 0x1e, 0x11, + 0x7f, 0x4a, 0xa3, 0x71, 0xa7, 0xde, 0x35, 0x4e, 0xcd, 0xde, 0x19, 0x7a, 0x63, 0x37, 0x90, 0xa6, + 0x77, 0x33, 0x90, 0x7b, 0xa0, 0x39, 0xdb, 0xfa, 0x02, 0x3f, 0xd1, 0xd9, 0x53, 0x70, 0x54, 0x7a, + 0x0e, 0x4e, 0x67, 0xf4, 0x1b, 0x32, 0x4b, 0x29, 0xf4, 0x40, 0x4b, 0x46, 0xe6, 0x1d, 0xa3, 0xdb, + 0x38, 0x35, 0x7b, 0xe8, 0x95, 0x78, 0x6b, 0x29, 0xb9, 0x7b, 0x3a, 0x60, 0x4b, 0x9e, 0x38, 0xce, + 0xb8, 0xec, 0xdf, 0xeb, 0xa0, 0xad, 0xbd, 0xe0, 0x0d, 0xd8, 0x96, 0x1d, 0x1c, 0x13, 0x41, 0x54, + 0xe2, 0x66, 0xef, 0xa3, 0x52, 0x8c, 0xbc, 0xa0, 0x28, 0x99, 0x06, 0xf2, 0x82, 0x23, 0xe9, 0x8d, + 0x16, 0xe7, 0xe8, 0x7a, 0xf4, 0x23, 0xf5, 0xc5, 0x97, 0x54, 0x10, 0x17, 0xea, 0x28, 0xa0, 0xb8, + 0xc3, 0x39, 0x2b, 0xec, 0x83, 0x26, 0x4f, 0xa8, 0xaf, 0x2b, 0xf6, 0x41, 0xb5, 0x8a, 0x79, 0x09, + 0xf5, 0x8b, 0x16, 0xc8, 0x13, 0x56, 0x2c, 0x70, 0x08, 0xb6, 0xb8, 0x20, 0x22, 0xe5, 0x9d, 0x86, + 0xe2, 0xfb, 0xb0, 0x22, 0x9f, 0xc2, 0xb8, 0xfb, 0x9a, 0x71, 0x2b, 0x3b, 0x63, 0xcd, 0x65, 0xff, + 0x65, 0x80, 0xfd, 0xd5, 0x5e, 0xc1, 0x4f, 0x80, 0xc9, 0x29, 0x5b, 0x84, 0x3e, 0xfd, 0x8a, 0xcc, + 0xa9, 0x16, 0xc5, 0x3b, 0x1a, 0x6f, 0x7a, 0x85, 0x09, 0x97, 0xfd, 0x60, 0x90, 0xc3, 0x06, 0x31, + 0x13, 0x3a, 0xe9, 0x97, 0x4b, 0x2a, 0x35, 0x8a, 0x32, 0x8d, 0xa2, 0xab, 0x48, 0x5c, 0x33, 0x4f, + 0xb0, 0x30, 0x0a, 0x36, 0x02, 0x49, 0x32, 0x5c, 0x66, 0xb6, 0xff, 0x36, 0x80, 0xa9, 0x9f, 0xdc, + 0x0f, 0xb9, 0x80, 0xdf, 0x6f, 0x34, 0x12, 0x55, 0x6b, 0xa4, 0x44, 0xab, 0x36, 0x1e, 0xea, 0x98, + 0xdb, 0x4f, 0x37, 0xa5, 0x26, 0x7e, 0x01, 0x5a, 0xa1, 0xa0, 0x73, 0xde, 0xa9, 0x2b, 0x1d, 0xbe, + 0x57, 0x51, 0xf7, 0xb9, 0xfe, 0xae, 0x24, 0x18, 0x67, 0x1c, 0xf6, 0x9f, 0xc5, 0xd3, 0xa5, 0xd2, + 0xe5, 0xe0, 0x4d, 0x62, 0x2e, 0xd6, 0x07, 0xef, 0x32, 0xe6, 0x02, 0x2b, 0x0b, 0x4c, 0xc1, 0x61, + 0xb8, 0x36, 0x1a, 0xba, 0xb4, 0x4e, 0xb5, 0x97, 0xe4, 0x30, 0xb7, 0xa3, 0xe9, 0x0f, 0xd7, 0x2d, + 0x78, 0x23, 0x84, 0x4d, 0xc1, 0x86, 0x17, 0xfc, 0x1a, 0x34, 0x27, 0x42, 0x24, 0xba, 0xc6, 0x17, + 0xd5, 0x07, 0xb2, 0x78, 0xc2, 0xb6, 0xca, 0x6e, 0x38, 0x1c, 0x60, 0x45, 0x65, 0xff, 0x57, 0xd4, + 0xc3, 0xcb, 0x34, 0x9e, 0xaf, 0x19, 0xe3, 0x6d, 0xd6, 0x8c, 0xf9, 0xdc, 0x8a, 0x81, 0x97, 0xa0, + 0x21, 0x66, 0x4f, 0x0d, 0x7c, 0xbf, 0x1a, 0xe3, 0xb0, 0xef, 0xb9, 0xa6, 0x2e, 0x58, 0x63, 0xd8, + 0xf7, 0xb0, 0xa4, 0x80, 0xd7, 0xa0, 0xc5, 0xd2, 0x19, 0x95, 0x23, 0xd8, 0xa8, 0x3e, 0xd2, 0x32, + 0xff, 0x42, 0x10, 0xf2, 0xc4, 0x71, 0xc6, 0x63, 0xff, 0x04, 0xf6, 0x56, 0xe6, 0x14, 0xde, 0x80, + 0xdd, 0x59, 0x4c, 0xc6, 0x2e, 0x99, 0x91, 0xc8, 0xa7, 0x4c, 0x97, 0x61, 0x45, 0x75, 0xf2, 0x6f, + 0xa5, 0xe4, 0x5b, 0xf2, 0xd3, 0x53, 0x7e, 0xa4, 0x83, 0xec, 0x96, 0x6d, 0x78, 0x85, 0xd1, 0x26, + 0x00, 0x14, 0x39, 0xc2, 0x13, 0xd0, 0x92, 0x3a, 0xcb, 0xd6, 0xec, 0x8e, 0xbb, 0x23, 0x5f, 0x28, + 0xe5, 0xc7, 0x71, 0x76, 0x0f, 0x7b, 0x00, 0x70, 0xea, 0x33, 0x2a, 0xd4, 0x32, 0xa8, 0x2b, 0xa1, + 0xe6, 0x6b, 0xcf, 0xcb, 0x2d, 0xb8, 0xe4, 0xe5, 0x9e, 0xdd, 0x3e, 0x5a, 0xb5, 0xbb, 0x47, 0xab, + 0x76, 0xff, 0x68, 0xd5, 0x7e, 0x5d, 0x5a, 0xc6, 0xed, 0xd2, 0x32, 0xee, 0x96, 0x96, 0x71, 0xbf, + 0xb4, 0x8c, 0x7f, 0x96, 0x96, 0xf1, 0xc7, 0xbf, 0x56, 0xed, 0xbb, 0xb6, 0x2e, 0xd3, 0xff, 0x01, + 0x00, 0x00, 0xff, 0xff, 0xdb, 0x8a, 0xe4, 0xd8, 0x21, 0x08, 0x00, 0x00, +} diff --git a/vendor/k8s.io/api/networking/v1beta1/generated.proto b/vendor/k8s.io/api/networking/v1beta1/generated.proto new file mode 100644 index 0000000000..7df19138e2 --- /dev/null +++ b/vendor/k8s.io/api/networking/v1beta1/generated.proto @@ -0,0 +1,186 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + + +// This file was autogenerated by go-to-protobuf. Do not edit it manually! + +syntax = 'proto2'; + +package k8s.io.api.networking.v1beta1; + +import "k8s.io/api/core/v1/generated.proto"; +import "k8s.io/apimachinery/pkg/apis/meta/v1/generated.proto"; +import "k8s.io/apimachinery/pkg/runtime/generated.proto"; +import "k8s.io/apimachinery/pkg/runtime/schema/generated.proto"; +import "k8s.io/apimachinery/pkg/util/intstr/generated.proto"; + +// Package-wide variables from generator "generated". +option go_package = "v1beta1"; + +// HTTPIngressPath associates a path regex with a backend. Incoming urls matching +// the path are forwarded to the backend. +message HTTPIngressPath { + // Path is an extended POSIX regex as defined by IEEE Std 1003.1, + // (i.e this follows the egrep/unix syntax, not the perl syntax) + // matched against the path of an incoming request. Currently it can + // contain characters disallowed from the conventional "path" + // part of a URL as defined by RFC 3986. Paths must begin with + // a '/'. If unspecified, the path defaults to a catch all sending + // traffic to the backend. + // +optional + optional string path = 1; + + // Backend defines the referenced service endpoint to which the traffic + // will be forwarded to. + optional IngressBackend backend = 2; +} + +// HTTPIngressRuleValue is a list of http selectors pointing to backends. +// In the example: http:///? -> backend where +// where parts of the url correspond to RFC 3986, this resource will be used +// to match against everything after the last '/' and before the first '?' +// or '#'. +message HTTPIngressRuleValue { + // A collection of paths that map requests to backends. + repeated HTTPIngressPath paths = 1; +} + +// Ingress is a collection of rules that allow inbound connections to reach the +// endpoints defined by a backend. An Ingress can be configured to give services +// externally-reachable urls, load balance traffic, terminate SSL, offer name +// based virtual hosting etc. +message Ingress { + // Standard object's metadata. + // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata + // +optional + optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; + + // Spec is the desired state of the Ingress. + // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status + // +optional + optional IngressSpec spec = 2; + + // Status is the current state of the Ingress. + // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status + // +optional + optional IngressStatus status = 3; +} + +// IngressBackend describes all endpoints for a given service and port. +message IngressBackend { + // Specifies the name of the referenced service. + optional string serviceName = 1; + + // Specifies the port of the referenced service. + optional k8s.io.apimachinery.pkg.util.intstr.IntOrString servicePort = 2; +} + +// IngressList is a collection of Ingress. +message IngressList { + // Standard object's metadata. + // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata + // +optional + optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; + + // Items is the list of Ingress. + repeated Ingress items = 2; +} + +// IngressRule represents the rules mapping the paths under a specified host to +// the related backend services. Incoming requests are first evaluated for a host +// match, then routed to the backend associated with the matching IngressRuleValue. +message IngressRule { + // Host is the fully qualified domain name of a network host, as defined + // by RFC 3986. Note the following deviations from the "host" part of the + // URI as defined in the RFC: + // 1. IPs are not allowed. Currently an IngressRuleValue can only apply to the + // IP in the Spec of the parent Ingress. + // 2. The `:` delimiter is not respected because ports are not allowed. + // Currently the port of an Ingress is implicitly :80 for http and + // :443 for https. + // Both these may change in the future. + // Incoming requests are matched against the host before the IngressRuleValue. + // If the host is unspecified, the Ingress routes all traffic based on the + // specified IngressRuleValue. + // +optional + optional string host = 1; + + // IngressRuleValue represents a rule to route requests for this IngressRule. + // If unspecified, the rule defaults to a http catch-all. Whether that sends + // just traffic matching the host to the default backend or all traffic to the + // default backend, is left to the controller fulfilling the Ingress. Http is + // currently the only supported IngressRuleValue. + // +optional + optional IngressRuleValue ingressRuleValue = 2; +} + +// IngressRuleValue represents a rule to apply against incoming requests. If the +// rule is satisfied, the request is routed to the specified backend. Currently +// mixing different types of rules in a single Ingress is disallowed, so exactly +// one of the following must be set. +message IngressRuleValue { + // +optional + optional HTTPIngressRuleValue http = 1; +} + +// IngressSpec describes the Ingress the user wishes to exist. +message IngressSpec { + // A default backend capable of servicing requests that don't match any + // rule. At least one of 'backend' or 'rules' must be specified. This field + // is optional to allow the loadbalancer controller or defaulting logic to + // specify a global default. + // +optional + optional IngressBackend backend = 1; + + // TLS configuration. Currently the Ingress only supports a single TLS + // port, 443. If multiple members of this list specify different hosts, they + // will be multiplexed on the same port according to the hostname specified + // through the SNI TLS extension, if the ingress controller fulfilling the + // ingress supports SNI. + // +optional + repeated IngressTLS tls = 2; + + // A list of host rules used to configure the Ingress. If unspecified, or + // no rule matches, all traffic is sent to the default backend. + // +optional + repeated IngressRule rules = 3; +} + +// IngressStatus describe the current state of the Ingress. +message IngressStatus { + // LoadBalancer contains the current status of the load-balancer. + // +optional + optional k8s.io.api.core.v1.LoadBalancerStatus loadBalancer = 1; +} + +// IngressTLS describes the transport layer security associated with an Ingress. +message IngressTLS { + // Hosts are a list of hosts included in the TLS certificate. The values in + // this list must match the name/s used in the tlsSecret. Defaults to the + // wildcard host setting for the loadbalancer controller fulfilling this + // Ingress, if left unspecified. + // +optional + repeated string hosts = 1; + + // SecretName is the name of the secret used to terminate SSL traffic on 443. + // Field is left optional to allow SSL routing based on SNI hostname alone. + // If the SNI host in a listener conflicts with the "Host" header field used + // by an IngressRule, the SNI host is used for termination and value of the + // Host header is used for routing. + // +optional + optional string secretName = 2; +} + diff --git a/vendor/k8s.io/api/networking/v1beta1/register.go b/vendor/k8s.io/api/networking/v1beta1/register.go new file mode 100644 index 0000000000..c046c49012 --- /dev/null +++ b/vendor/k8s.io/api/networking/v1beta1/register.go @@ -0,0 +1,56 @@ +/* +Copyright 2019 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1beta1 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" +) + +// GroupName is the group name use in this package +const GroupName = "networking.k8s.io" + +// SchemeGroupVersion is group version used to register these objects +var SchemeGroupVersion = schema.GroupVersion{Group: GroupName, Version: "v1beta1"} + +// Resource takes an unqualified resource and returns a Group qualified GroupResource +func Resource(resource string) schema.GroupResource { + return SchemeGroupVersion.WithResource(resource).GroupResource() +} + +var ( + // SchemeBuilder holds functions that add things to a scheme + // TODO: move SchemeBuilder with zz_generated.deepcopy.go to k8s.io/api. + // localSchemeBuilder and AddToScheme will stay in k8s.io/kubernetes. + SchemeBuilder = runtime.NewSchemeBuilder(addKnownTypes) + localSchemeBuilder = &SchemeBuilder + + // AddToScheme adds the types of this group into the given scheme. + AddToScheme = localSchemeBuilder.AddToScheme +) + +// Adds the list of known types to the given scheme. +func addKnownTypes(scheme *runtime.Scheme) error { + scheme.AddKnownTypes(SchemeGroupVersion, + &Ingress{}, + &IngressList{}, + ) + // Add the watch version that applies + metav1.AddToGroupVersion(scheme, SchemeGroupVersion) + return nil +} diff --git a/vendor/k8s.io/api/networking/v1beta1/types.go b/vendor/k8s.io/api/networking/v1beta1/types.go new file mode 100644 index 0000000000..63bf2d52a3 --- /dev/null +++ b/vendor/k8s.io/api/networking/v1beta1/types.go @@ -0,0 +1,192 @@ +/* +Copyright 2019 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1beta1 + +import ( + "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/util/intstr" +) + +// +genclient +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// Ingress is a collection of rules that allow inbound connections to reach the +// endpoints defined by a backend. An Ingress can be configured to give services +// externally-reachable urls, load balance traffic, terminate SSL, offer name +// based virtual hosting etc. +type Ingress struct { + metav1.TypeMeta `json:",inline"` + // Standard object's metadata. + // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata + // +optional + metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + + // Spec is the desired state of the Ingress. + // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status + // +optional + Spec IngressSpec `json:"spec,omitempty" protobuf:"bytes,2,opt,name=spec"` + + // Status is the current state of the Ingress. + // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status + // +optional + Status IngressStatus `json:"status,omitempty" protobuf:"bytes,3,opt,name=status"` +} + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// IngressList is a collection of Ingress. +type IngressList struct { + metav1.TypeMeta `json:",inline"` + // Standard object's metadata. + // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata + // +optional + metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + + // Items is the list of Ingress. + Items []Ingress `json:"items" protobuf:"bytes,2,rep,name=items"` +} + +// IngressSpec describes the Ingress the user wishes to exist. +type IngressSpec struct { + // A default backend capable of servicing requests that don't match any + // rule. At least one of 'backend' or 'rules' must be specified. This field + // is optional to allow the loadbalancer controller or defaulting logic to + // specify a global default. + // +optional + Backend *IngressBackend `json:"backend,omitempty" protobuf:"bytes,1,opt,name=backend"` + + // TLS configuration. Currently the Ingress only supports a single TLS + // port, 443. If multiple members of this list specify different hosts, they + // will be multiplexed on the same port according to the hostname specified + // through the SNI TLS extension, if the ingress controller fulfilling the + // ingress supports SNI. + // +optional + TLS []IngressTLS `json:"tls,omitempty" protobuf:"bytes,2,rep,name=tls"` + + // A list of host rules used to configure the Ingress. If unspecified, or + // no rule matches, all traffic is sent to the default backend. + // +optional + Rules []IngressRule `json:"rules,omitempty" protobuf:"bytes,3,rep,name=rules"` + // TODO: Add the ability to specify load-balancer IP through claims +} + +// IngressTLS describes the transport layer security associated with an Ingress. +type IngressTLS struct { + // Hosts are a list of hosts included in the TLS certificate. The values in + // this list must match the name/s used in the tlsSecret. Defaults to the + // wildcard host setting for the loadbalancer controller fulfilling this + // Ingress, if left unspecified. + // +optional + Hosts []string `json:"hosts,omitempty" protobuf:"bytes,1,rep,name=hosts"` + // SecretName is the name of the secret used to terminate SSL traffic on 443. + // Field is left optional to allow SSL routing based on SNI hostname alone. + // If the SNI host in a listener conflicts with the "Host" header field used + // by an IngressRule, the SNI host is used for termination and value of the + // Host header is used for routing. + // +optional + SecretName string `json:"secretName,omitempty" protobuf:"bytes,2,opt,name=secretName"` + // TODO: Consider specifying different modes of termination, protocols etc. +} + +// IngressStatus describe the current state of the Ingress. +type IngressStatus struct { + // LoadBalancer contains the current status of the load-balancer. + // +optional + LoadBalancer v1.LoadBalancerStatus `json:"loadBalancer,omitempty" protobuf:"bytes,1,opt,name=loadBalancer"` +} + +// IngressRule represents the rules mapping the paths under a specified host to +// the related backend services. Incoming requests are first evaluated for a host +// match, then routed to the backend associated with the matching IngressRuleValue. +type IngressRule struct { + // Host is the fully qualified domain name of a network host, as defined + // by RFC 3986. Note the following deviations from the "host" part of the + // URI as defined in the RFC: + // 1. IPs are not allowed. Currently an IngressRuleValue can only apply to the + // IP in the Spec of the parent Ingress. + // 2. The `:` delimiter is not respected because ports are not allowed. + // Currently the port of an Ingress is implicitly :80 for http and + // :443 for https. + // Both these may change in the future. + // Incoming requests are matched against the host before the IngressRuleValue. + // If the host is unspecified, the Ingress routes all traffic based on the + // specified IngressRuleValue. + // +optional + Host string `json:"host,omitempty" protobuf:"bytes,1,opt,name=host"` + // IngressRuleValue represents a rule to route requests for this IngressRule. + // If unspecified, the rule defaults to a http catch-all. Whether that sends + // just traffic matching the host to the default backend or all traffic to the + // default backend, is left to the controller fulfilling the Ingress. Http is + // currently the only supported IngressRuleValue. + // +optional + IngressRuleValue `json:",inline,omitempty" protobuf:"bytes,2,opt,name=ingressRuleValue"` +} + +// IngressRuleValue represents a rule to apply against incoming requests. If the +// rule is satisfied, the request is routed to the specified backend. Currently +// mixing different types of rules in a single Ingress is disallowed, so exactly +// one of the following must be set. +type IngressRuleValue struct { + //TODO: + // 1. Consider renaming this resource and the associated rules so they + // aren't tied to Ingress. They can be used to route intra-cluster traffic. + // 2. Consider adding fields for ingress-type specific global options + // usable by a loadbalancer, like http keep-alive. + + // +optional + HTTP *HTTPIngressRuleValue `json:"http,omitempty" protobuf:"bytes,1,opt,name=http"` +} + +// HTTPIngressRuleValue is a list of http selectors pointing to backends. +// In the example: http:///? -> backend where +// where parts of the url correspond to RFC 3986, this resource will be used +// to match against everything after the last '/' and before the first '?' +// or '#'. +type HTTPIngressRuleValue struct { + // A collection of paths that map requests to backends. + Paths []HTTPIngressPath `json:"paths" protobuf:"bytes,1,rep,name=paths"` + // TODO: Consider adding fields for ingress-type specific global + // options usable by a loadbalancer, like http keep-alive. +} + +// HTTPIngressPath associates a path regex with a backend. Incoming urls matching +// the path are forwarded to the backend. +type HTTPIngressPath struct { + // Path is an extended POSIX regex as defined by IEEE Std 1003.1, + // (i.e this follows the egrep/unix syntax, not the perl syntax) + // matched against the path of an incoming request. Currently it can + // contain characters disallowed from the conventional "path" + // part of a URL as defined by RFC 3986. Paths must begin with + // a '/'. If unspecified, the path defaults to a catch all sending + // traffic to the backend. + // +optional + Path string `json:"path,omitempty" protobuf:"bytes,1,opt,name=path"` + + // Backend defines the referenced service endpoint to which the traffic + // will be forwarded to. + Backend IngressBackend `json:"backend" protobuf:"bytes,2,opt,name=backend"` +} + +// IngressBackend describes all endpoints for a given service and port. +type IngressBackend struct { + // Specifies the name of the referenced service. + ServiceName string `json:"serviceName" protobuf:"bytes,1,opt,name=serviceName"` + + // Specifies the port of the referenced service. + ServicePort intstr.IntOrString `json:"servicePort" protobuf:"bytes,2,opt,name=servicePort"` +} diff --git a/vendor/k8s.io/api/networking/v1beta1/types_swagger_doc_generated.go b/vendor/k8s.io/api/networking/v1beta1/types_swagger_doc_generated.go new file mode 100644 index 0000000000..9e05b7f1bb --- /dev/null +++ b/vendor/k8s.io/api/networking/v1beta1/types_swagger_doc_generated.go @@ -0,0 +1,127 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1beta1 + +// This file contains a collection of methods that can be used from go-restful to +// generate Swagger API documentation for its models. Please read this PR for more +// information on the implementation: https://github.com/emicklei/go-restful/pull/215 +// +// TODOs are ignored from the parser (e.g. TODO(andronat):... || TODO:...) if and only if +// they are on one line! For multiple line or blocks that you want to ignore use ---. +// Any context after a --- is ignored. +// +// Those methods can be generated by using hack/update-generated-swagger-docs.sh + +// AUTO-GENERATED FUNCTIONS START HERE. DO NOT EDIT. +var map_HTTPIngressPath = map[string]string{ + "": "HTTPIngressPath associates a path regex with a backend. Incoming urls matching the path are forwarded to the backend.", + "path": "Path is an extended POSIX regex as defined by IEEE Std 1003.1, (i.e this follows the egrep/unix syntax, not the perl syntax) matched against the path of an incoming request. Currently it can contain characters disallowed from the conventional \"path\" part of a URL as defined by RFC 3986. Paths must begin with a '/'. If unspecified, the path defaults to a catch all sending traffic to the backend.", + "backend": "Backend defines the referenced service endpoint to which the traffic will be forwarded to.", +} + +func (HTTPIngressPath) SwaggerDoc() map[string]string { + return map_HTTPIngressPath +} + +var map_HTTPIngressRuleValue = map[string]string{ + "": "HTTPIngressRuleValue is a list of http selectors pointing to backends. In the example: http:///? -> backend where where parts of the url correspond to RFC 3986, this resource will be used to match against everything after the last '/' and before the first '?' or '#'.", + "paths": "A collection of paths that map requests to backends.", +} + +func (HTTPIngressRuleValue) SwaggerDoc() map[string]string { + return map_HTTPIngressRuleValue +} + +var map_Ingress = map[string]string{ + "": "Ingress is a collection of rules that allow inbound connections to reach the endpoints defined by a backend. An Ingress can be configured to give services externally-reachable urls, load balance traffic, terminate SSL, offer name based virtual hosting etc.", + "metadata": "Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata", + "spec": "Spec is the desired state of the Ingress. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status", + "status": "Status is the current state of the Ingress. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status", +} + +func (Ingress) SwaggerDoc() map[string]string { + return map_Ingress +} + +var map_IngressBackend = map[string]string{ + "": "IngressBackend describes all endpoints for a given service and port.", + "serviceName": "Specifies the name of the referenced service.", + "servicePort": "Specifies the port of the referenced service.", +} + +func (IngressBackend) SwaggerDoc() map[string]string { + return map_IngressBackend +} + +var map_IngressList = map[string]string{ + "": "IngressList is a collection of Ingress.", + "metadata": "Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata", + "items": "Items is the list of Ingress.", +} + +func (IngressList) SwaggerDoc() map[string]string { + return map_IngressList +} + +var map_IngressRule = map[string]string{ + "": "IngressRule represents the rules mapping the paths under a specified host to the related backend services. Incoming requests are first evaluated for a host match, then routed to the backend associated with the matching IngressRuleValue.", + "host": "Host is the fully qualified domain name of a network host, as defined by RFC 3986. Note the following deviations from the \"host\" part of the URI as defined in the RFC: 1. IPs are not allowed. Currently an IngressRuleValue can only apply to the\n\t IP in the Spec of the parent Ingress.\n2. The `:` delimiter is not respected because ports are not allowed.\n\t Currently the port of an Ingress is implicitly :80 for http and\n\t :443 for https.\nBoth these may change in the future. Incoming requests are matched against the host before the IngressRuleValue. If the host is unspecified, the Ingress routes all traffic based on the specified IngressRuleValue.", +} + +func (IngressRule) SwaggerDoc() map[string]string { + return map_IngressRule +} + +var map_IngressRuleValue = map[string]string{ + "": "IngressRuleValue represents a rule to apply against incoming requests. If the rule is satisfied, the request is routed to the specified backend. Currently mixing different types of rules in a single Ingress is disallowed, so exactly one of the following must be set.", +} + +func (IngressRuleValue) SwaggerDoc() map[string]string { + return map_IngressRuleValue +} + +var map_IngressSpec = map[string]string{ + "": "IngressSpec describes the Ingress the user wishes to exist.", + "backend": "A default backend capable of servicing requests that don't match any rule. At least one of 'backend' or 'rules' must be specified. This field is optional to allow the loadbalancer controller or defaulting logic to specify a global default.", + "tls": "TLS configuration. Currently the Ingress only supports a single TLS port, 443. If multiple members of this list specify different hosts, they will be multiplexed on the same port according to the hostname specified through the SNI TLS extension, if the ingress controller fulfilling the ingress supports SNI.", + "rules": "A list of host rules used to configure the Ingress. If unspecified, or no rule matches, all traffic is sent to the default backend.", +} + +func (IngressSpec) SwaggerDoc() map[string]string { + return map_IngressSpec +} + +var map_IngressStatus = map[string]string{ + "": "IngressStatus describe the current state of the Ingress.", + "loadBalancer": "LoadBalancer contains the current status of the load-balancer.", +} + +func (IngressStatus) SwaggerDoc() map[string]string { + return map_IngressStatus +} + +var map_IngressTLS = map[string]string{ + "": "IngressTLS describes the transport layer security associated with an Ingress.", + "hosts": "Hosts are a list of hosts included in the TLS certificate. The values in this list must match the name/s used in the tlsSecret. Defaults to the wildcard host setting for the loadbalancer controller fulfilling this Ingress, if left unspecified.", + "secretName": "SecretName is the name of the secret used to terminate SSL traffic on 443. Field is left optional to allow SSL routing based on SNI hostname alone. If the SNI host in a listener conflicts with the \"Host\" header field used by an IngressRule, the SNI host is used for termination and value of the Host header is used for routing.", +} + +func (IngressTLS) SwaggerDoc() map[string]string { + return map_IngressTLS +} + +// AUTO-GENERATED FUNCTIONS END HERE diff --git a/vendor/k8s.io/api/networking/v1beta1/zz_generated.deepcopy.go b/vendor/k8s.io/api/networking/v1beta1/zz_generated.deepcopy.go new file mode 100644 index 0000000000..6342c985c8 --- /dev/null +++ b/vendor/k8s.io/api/networking/v1beta1/zz_generated.deepcopy.go @@ -0,0 +1,252 @@ +// +build !ignore_autogenerated + +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by deepcopy-gen. DO NOT EDIT. + +package v1beta1 + +import ( + runtime "k8s.io/apimachinery/pkg/runtime" +) + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *HTTPIngressPath) DeepCopyInto(out *HTTPIngressPath) { + *out = *in + out.Backend = in.Backend + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HTTPIngressPath. +func (in *HTTPIngressPath) DeepCopy() *HTTPIngressPath { + if in == nil { + return nil + } + out := new(HTTPIngressPath) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *HTTPIngressRuleValue) DeepCopyInto(out *HTTPIngressRuleValue) { + *out = *in + if in.Paths != nil { + in, out := &in.Paths, &out.Paths + *out = make([]HTTPIngressPath, len(*in)) + copy(*out, *in) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HTTPIngressRuleValue. +func (in *HTTPIngressRuleValue) DeepCopy() *HTTPIngressRuleValue { + if in == nil { + return nil + } + out := new(HTTPIngressRuleValue) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Ingress) DeepCopyInto(out *Ingress) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Ingress. +func (in *Ingress) DeepCopy() *Ingress { + if in == nil { + return nil + } + out := new(Ingress) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *Ingress) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *IngressBackend) DeepCopyInto(out *IngressBackend) { + *out = *in + out.ServicePort = in.ServicePort + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IngressBackend. +func (in *IngressBackend) DeepCopy() *IngressBackend { + if in == nil { + return nil + } + out := new(IngressBackend) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *IngressList) DeepCopyInto(out *IngressList) { + *out = *in + out.TypeMeta = in.TypeMeta + out.ListMeta = in.ListMeta + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]Ingress, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IngressList. +func (in *IngressList) DeepCopy() *IngressList { + if in == nil { + return nil + } + out := new(IngressList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *IngressList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *IngressRule) DeepCopyInto(out *IngressRule) { + *out = *in + in.IngressRuleValue.DeepCopyInto(&out.IngressRuleValue) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IngressRule. +func (in *IngressRule) DeepCopy() *IngressRule { + if in == nil { + return nil + } + out := new(IngressRule) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *IngressRuleValue) DeepCopyInto(out *IngressRuleValue) { + *out = *in + if in.HTTP != nil { + in, out := &in.HTTP, &out.HTTP + *out = new(HTTPIngressRuleValue) + (*in).DeepCopyInto(*out) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IngressRuleValue. +func (in *IngressRuleValue) DeepCopy() *IngressRuleValue { + if in == nil { + return nil + } + out := new(IngressRuleValue) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *IngressSpec) DeepCopyInto(out *IngressSpec) { + *out = *in + if in.Backend != nil { + in, out := &in.Backend, &out.Backend + *out = new(IngressBackend) + **out = **in + } + if in.TLS != nil { + in, out := &in.TLS, &out.TLS + *out = make([]IngressTLS, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Rules != nil { + in, out := &in.Rules, &out.Rules + *out = make([]IngressRule, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IngressSpec. +func (in *IngressSpec) DeepCopy() *IngressSpec { + if in == nil { + return nil + } + out := new(IngressSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *IngressStatus) DeepCopyInto(out *IngressStatus) { + *out = *in + in.LoadBalancer.DeepCopyInto(&out.LoadBalancer) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IngressStatus. +func (in *IngressStatus) DeepCopy() *IngressStatus { + if in == nil { + return nil + } + out := new(IngressStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *IngressTLS) DeepCopyInto(out *IngressTLS) { + *out = *in + if in.Hosts != nil { + in, out := &in.Hosts, &out.Hosts + *out = make([]string, len(*in)) + copy(*out, *in) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IngressTLS. +func (in *IngressTLS) DeepCopy() *IngressTLS { + if in == nil { + return nil + } + out := new(IngressTLS) + in.DeepCopyInto(out) + return out +} diff --git a/vendor/k8s.io/api/node/v1alpha1/doc.go b/vendor/k8s.io/api/node/v1alpha1/doc.go new file mode 100644 index 0000000000..f7f8b78b42 --- /dev/null +++ b/vendor/k8s.io/api/node/v1alpha1/doc.go @@ -0,0 +1,22 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// +k8s:deepcopy-gen=package +// +k8s:openapi-gen=true + +// +groupName=node.k8s.io + +package v1alpha1 // import "k8s.io/api/node/v1alpha1" diff --git a/vendor/k8s.io/api/node/v1alpha1/generated.pb.go b/vendor/k8s.io/api/node/v1alpha1/generated.pb.go new file mode 100644 index 0000000000..16f5af9299 --- /dev/null +++ b/vendor/k8s.io/api/node/v1alpha1/generated.pb.go @@ -0,0 +1,696 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by protoc-gen-gogo. DO NOT EDIT. +// source: k8s.io/kubernetes/vendor/k8s.io/api/node/v1alpha1/generated.proto + +/* + Package v1alpha1 is a generated protocol buffer package. + + It is generated from these files: + k8s.io/kubernetes/vendor/k8s.io/api/node/v1alpha1/generated.proto + + It has these top-level messages: + RuntimeClass + RuntimeClassList + RuntimeClassSpec +*/ +package v1alpha1 + +import proto "github.com/gogo/protobuf/proto" +import fmt "fmt" +import math "math" + +import strings "strings" +import reflect "reflect" + +import io "io" + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.GoGoProtoPackageIsVersion2 // please upgrade the proto package + +func (m *RuntimeClass) Reset() { *m = RuntimeClass{} } +func (*RuntimeClass) ProtoMessage() {} +func (*RuntimeClass) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{0} } + +func (m *RuntimeClassList) Reset() { *m = RuntimeClassList{} } +func (*RuntimeClassList) ProtoMessage() {} +func (*RuntimeClassList) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{1} } + +func (m *RuntimeClassSpec) Reset() { *m = RuntimeClassSpec{} } +func (*RuntimeClassSpec) ProtoMessage() {} +func (*RuntimeClassSpec) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{2} } + +func init() { + proto.RegisterType((*RuntimeClass)(nil), "k8s.io.api.node.v1alpha1.RuntimeClass") + proto.RegisterType((*RuntimeClassList)(nil), "k8s.io.api.node.v1alpha1.RuntimeClassList") + proto.RegisterType((*RuntimeClassSpec)(nil), "k8s.io.api.node.v1alpha1.RuntimeClassSpec") +} +func (m *RuntimeClass) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *RuntimeClass) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + dAtA[i] = 0xa + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.ObjectMeta.Size())) + n1, err := m.ObjectMeta.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n1 + dAtA[i] = 0x12 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.Spec.Size())) + n2, err := m.Spec.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n2 + return i, nil +} + +func (m *RuntimeClassList) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *RuntimeClassList) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + dAtA[i] = 0xa + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.ListMeta.Size())) + n3, err := m.ListMeta.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n3 + if len(m.Items) > 0 { + for _, msg := range m.Items { + dAtA[i] = 0x12 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) + n, err := msg.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n + } + } + return i, nil +} + +func (m *RuntimeClassSpec) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *RuntimeClassSpec) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + dAtA[i] = 0xa + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.RuntimeHandler))) + i += copy(dAtA[i:], m.RuntimeHandler) + return i, nil +} + +func encodeVarintGenerated(dAtA []byte, offset int, v uint64) int { + for v >= 1<<7 { + dAtA[offset] = uint8(v&0x7f | 0x80) + v >>= 7 + offset++ + } + dAtA[offset] = uint8(v) + return offset + 1 +} +func (m *RuntimeClass) Size() (n int) { + var l int + _ = l + l = m.ObjectMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = m.Spec.Size() + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *RuntimeClassList) Size() (n int) { + var l int + _ = l + l = m.ListMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Items) > 0 { + for _, e := range m.Items { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *RuntimeClassSpec) Size() (n int) { + var l int + _ = l + l = len(m.RuntimeHandler) + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func sovGenerated(x uint64) (n int) { + for { + n++ + x >>= 7 + if x == 0 { + break + } + } + return n +} +func sozGenerated(x uint64) (n int) { + return sovGenerated(uint64((x << 1) ^ uint64((int64(x) >> 63)))) +} +func (this *RuntimeClass) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&RuntimeClass{`, + `ObjectMeta:` + strings.Replace(strings.Replace(this.ObjectMeta.String(), "ObjectMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta", 1), `&`, ``, 1) + `,`, + `Spec:` + strings.Replace(strings.Replace(this.Spec.String(), "RuntimeClassSpec", "RuntimeClassSpec", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *RuntimeClassList) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&RuntimeClassList{`, + `ListMeta:` + strings.Replace(strings.Replace(this.ListMeta.String(), "ListMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ListMeta", 1), `&`, ``, 1) + `,`, + `Items:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Items), "RuntimeClass", "RuntimeClass", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *RuntimeClassSpec) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&RuntimeClassSpec{`, + `RuntimeHandler:` + fmt.Sprintf("%v", this.RuntimeHandler) + `,`, + `}`, + }, "") + return s +} +func valueToStringGenerated(v interface{}) string { + rv := reflect.ValueOf(v) + if rv.IsNil() { + return "nil" + } + pv := reflect.Indirect(rv).Interface() + return fmt.Sprintf("*%v", pv) +} +func (m *RuntimeClass) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: RuntimeClass: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: RuntimeClass: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ObjectMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Spec", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Spec.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *RuntimeClassList) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: RuntimeClassList: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: RuntimeClassList: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ListMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ListMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Items = append(m.Items, RuntimeClass{}) + if err := m.Items[len(m.Items)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *RuntimeClassSpec) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: RuntimeClassSpec: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: RuntimeClassSpec: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field RuntimeHandler", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.RuntimeHandler = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func skipGenerated(dAtA []byte) (n int, err error) { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowGenerated + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + wireType := int(wire & 0x7) + switch wireType { + case 0: + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowGenerated + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + iNdEx++ + if dAtA[iNdEx-1] < 0x80 { + break + } + } + return iNdEx, nil + case 1: + iNdEx += 8 + return iNdEx, nil + case 2: + var length int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowGenerated + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + length |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + iNdEx += length + if length < 0 { + return 0, ErrInvalidLengthGenerated + } + return iNdEx, nil + case 3: + for { + var innerWire uint64 + var start int = iNdEx + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowGenerated + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + innerWire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + innerWireType := int(innerWire & 0x7) + if innerWireType == 4 { + break + } + next, err := skipGenerated(dAtA[start:]) + if err != nil { + return 0, err + } + iNdEx = start + next + } + return iNdEx, nil + case 4: + return iNdEx, nil + case 5: + iNdEx += 4 + return iNdEx, nil + default: + return 0, fmt.Errorf("proto: illegal wireType %d", wireType) + } + } + panic("unreachable") +} + +var ( + ErrInvalidLengthGenerated = fmt.Errorf("proto: negative length found during unmarshaling") + ErrIntOverflowGenerated = fmt.Errorf("proto: integer overflow") +) + +func init() { + proto.RegisterFile("k8s.io/kubernetes/vendor/k8s.io/api/node/v1alpha1/generated.proto", fileDescriptorGenerated) +} + +var fileDescriptorGenerated = []byte{ + // 421 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x8c, 0x91, 0x41, 0x6b, 0xd4, 0x40, + 0x14, 0xc7, 0x33, 0xb5, 0x85, 0x75, 0x5a, 0x4b, 0xc9, 0x41, 0xc2, 0x1e, 0xa6, 0x65, 0x0f, 0x52, + 0x04, 0x67, 0xdc, 0x22, 0xe2, 0x49, 0x30, 0x5e, 0x14, 0x2b, 0x42, 0xbc, 0x89, 0x07, 0x27, 0xc9, + 0x33, 0x19, 0xb3, 0xc9, 0x0c, 0x99, 0x49, 0xc0, 0x9b, 0x1f, 0xc1, 0x2f, 0xa4, 0xe7, 0x3d, 0xf6, + 0xd8, 0x53, 0x71, 0xe3, 0x17, 0x91, 0x99, 0x64, 0xbb, 0xdb, 0x2e, 0xc5, 0xbd, 0xe5, 0xbd, 0xf9, + 0xff, 0x7f, 0xef, 0xfd, 0x5f, 0xf0, 0xab, 0xe2, 0x85, 0xa6, 0x42, 0xb2, 0xa2, 0x89, 0xa1, 0xae, + 0xc0, 0x80, 0x66, 0x2d, 0x54, 0xa9, 0xac, 0xd9, 0xf0, 0xc0, 0x95, 0x60, 0x95, 0x4c, 0x81, 0xb5, + 0x53, 0x3e, 0x53, 0x39, 0x9f, 0xb2, 0x0c, 0x2a, 0xa8, 0xb9, 0x81, 0x94, 0xaa, 0x5a, 0x1a, 0xe9, + 0x07, 0xbd, 0x92, 0x72, 0x25, 0xa8, 0x55, 0xd2, 0xa5, 0x72, 0xfc, 0x24, 0x13, 0x26, 0x6f, 0x62, + 0x9a, 0xc8, 0x92, 0x65, 0x32, 0x93, 0xcc, 0x19, 0xe2, 0xe6, 0xab, 0xab, 0x5c, 0xe1, 0xbe, 0x7a, + 0xd0, 0xf8, 0xd9, 0x6a, 0x64, 0xc9, 0x93, 0x5c, 0x54, 0x50, 0x7f, 0x67, 0xaa, 0xc8, 0x6c, 0x43, + 0xb3, 0x12, 0x0c, 0x67, 0xed, 0xc6, 0xf8, 0x31, 0xbb, 0xcb, 0x55, 0x37, 0x95, 0x11, 0x25, 0x6c, + 0x18, 0x9e, 0xff, 0xcf, 0xa0, 0x93, 0x1c, 0x4a, 0x7e, 0xdb, 0x37, 0xf9, 0x8d, 0xf0, 0x41, 0xd4, + 0x4b, 0x5e, 0xcf, 0xb8, 0xd6, 0xfe, 0x17, 0x3c, 0xb2, 0x4b, 0xa5, 0xdc, 0xf0, 0x00, 0x9d, 0xa0, + 0xd3, 0xfd, 0xb3, 0xa7, 0x74, 0x75, 0x8b, 0x6b, 0x36, 0x55, 0x45, 0x66, 0x1b, 0x9a, 0x5a, 0x35, + 0x6d, 0xa7, 0xf4, 0x43, 0xfc, 0x0d, 0x12, 0xf3, 0x1e, 0x0c, 0x0f, 0xfd, 0xf9, 0xd5, 0xb1, 0xd7, + 0x5d, 0x1d, 0xe3, 0x55, 0x2f, 0xba, 0xa6, 0xfa, 0xe7, 0x78, 0x57, 0x2b, 0x48, 0x82, 0x1d, 0x47, + 0x7f, 0x4c, 0xef, 0xba, 0x34, 0x5d, 0xdf, 0xeb, 0xa3, 0x82, 0x24, 0x3c, 0x18, 0xb8, 0xbb, 0xb6, + 0x8a, 0x1c, 0x65, 0xf2, 0x0b, 0xe1, 0xa3, 0x75, 0xe1, 0xb9, 0xd0, 0xc6, 0xff, 0xbc, 0x11, 0x82, + 0x6e, 0x17, 0xc2, 0xba, 0x5d, 0x84, 0xa3, 0x61, 0xd4, 0x68, 0xd9, 0x59, 0x0b, 0xf0, 0x0e, 0xef, + 0x09, 0x03, 0xa5, 0x0e, 0x76, 0x4e, 0xee, 0x9d, 0xee, 0x9f, 0x3d, 0xda, 0x2e, 0x41, 0xf8, 0x60, + 0x40, 0xee, 0xbd, 0xb5, 0xe6, 0xa8, 0x67, 0x4c, 0xa2, 0x9b, 0xeb, 0xdb, 0x64, 0xfe, 0x4b, 0x7c, + 0x38, 0xfc, 0xb6, 0x37, 0xbc, 0x4a, 0x67, 0x50, 0xbb, 0x10, 0xf7, 0xc3, 0x87, 0x03, 0xe1, 0x30, + 0xba, 0xf1, 0x1a, 0xdd, 0x52, 0x87, 0x74, 0xbe, 0x20, 0xde, 0xc5, 0x82, 0x78, 0x97, 0x0b, 0xe2, + 0xfd, 0xe8, 0x08, 0x9a, 0x77, 0x04, 0x5d, 0x74, 0x04, 0x5d, 0x76, 0x04, 0xfd, 0xe9, 0x08, 0xfa, + 0xf9, 0x97, 0x78, 0x9f, 0x46, 0xcb, 0x35, 0xff, 0x05, 0x00, 0x00, 0xff, 0xff, 0x94, 0x34, 0x0e, + 0xef, 0x30, 0x03, 0x00, 0x00, +} diff --git a/vendor/k8s.io/api/node/v1alpha1/generated.proto b/vendor/k8s.io/api/node/v1alpha1/generated.proto new file mode 100644 index 0000000000..ca4e5e5350 --- /dev/null +++ b/vendor/k8s.io/api/node/v1alpha1/generated.proto @@ -0,0 +1,76 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + + +// This file was autogenerated by go-to-protobuf. Do not edit it manually! + +syntax = 'proto2'; + +package k8s.io.api.node.v1alpha1; + +import "k8s.io/apimachinery/pkg/apis/meta/v1/generated.proto"; +import "k8s.io/apimachinery/pkg/runtime/generated.proto"; +import "k8s.io/apimachinery/pkg/runtime/schema/generated.proto"; + +// Package-wide variables from generator "generated". +option go_package = "v1alpha1"; + +// RuntimeClass defines a class of container runtime supported in the cluster. +// The RuntimeClass is used to determine which container runtime is used to run +// all containers in a pod. RuntimeClasses are (currently) manually defined by a +// user or cluster provisioner, and referenced in the PodSpec. The Kubelet is +// responsible for resolving the RuntimeClassName reference before running the +// pod. For more details, see +// https://git.k8s.io/enhancements/keps/sig-node/runtime-class.md +message RuntimeClass { + // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata + // +optional + optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; + + // Specification of the RuntimeClass + // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status + optional RuntimeClassSpec spec = 2; +} + +// RuntimeClassList is a list of RuntimeClass objects. +message RuntimeClassList { + // Standard list metadata. + // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata + // +optional + optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; + + // Items is a list of schema objects. + repeated RuntimeClass items = 2; +} + +// RuntimeClassSpec is a specification of a RuntimeClass. It contains parameters +// that are required to describe the RuntimeClass to the Container Runtime +// Interface (CRI) implementation, as well as any other components that need to +// understand how the pod will be run. The RuntimeClassSpec is immutable. +message RuntimeClassSpec { + // RuntimeHandler specifies the underlying runtime and configuration that the + // CRI implementation will use to handle pods of this class. The possible + // values are specific to the node & CRI configuration. It is assumed that + // all handlers are available on every node, and handlers of the same name are + // equivalent on every node. + // For example, a handler called "runc" might specify that the runc OCI + // runtime (using native Linux containers) will be used to run the containers + // in a pod. + // The RuntimeHandler must conform to the DNS Label (RFC 1123) requirements + // and is immutable. + optional string runtimeHandler = 1; +} + diff --git a/vendor/k8s.io/api/node/v1alpha1/register.go b/vendor/k8s.io/api/node/v1alpha1/register.go new file mode 100644 index 0000000000..b6082142a2 --- /dev/null +++ b/vendor/k8s.io/api/node/v1alpha1/register.go @@ -0,0 +1,52 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1alpha1 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" +) + +// GroupName is the group name use in this package +const GroupName = "node.k8s.io" + +// SchemeGroupVersion is group version used to register these objects +var SchemeGroupVersion = schema.GroupVersion{Group: GroupName, Version: "v1alpha1"} + +// Resource takes an unqualified resource and returns a Group qualified GroupResource +func Resource(resource string) schema.GroupResource { + return SchemeGroupVersion.WithResource(resource).GroupResource() +} + +var ( + // SchemeBuilder is the scheme builder with scheme init functions to run for this API package + SchemeBuilder = runtime.NewSchemeBuilder(addKnownTypes) + // AddToScheme is a common registration function for mapping packaged scoped group & version keys to a scheme + AddToScheme = SchemeBuilder.AddToScheme +) + +// addKnownTypes adds the list of known types to api.Scheme. +func addKnownTypes(scheme *runtime.Scheme) error { + scheme.AddKnownTypes(SchemeGroupVersion, + &RuntimeClass{}, + &RuntimeClassList{}, + ) + + metav1.AddToGroupVersion(scheme, SchemeGroupVersion) + return nil +} diff --git a/vendor/k8s.io/api/node/v1alpha1/types.go b/vendor/k8s.io/api/node/v1alpha1/types.go new file mode 100644 index 0000000000..2ce67c116f --- /dev/null +++ b/vendor/k8s.io/api/node/v1alpha1/types.go @@ -0,0 +1,75 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1alpha1 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +// +genclient +// +genclient:nonNamespaced +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// RuntimeClass defines a class of container runtime supported in the cluster. +// The RuntimeClass is used to determine which container runtime is used to run +// all containers in a pod. RuntimeClasses are (currently) manually defined by a +// user or cluster provisioner, and referenced in the PodSpec. The Kubelet is +// responsible for resolving the RuntimeClassName reference before running the +// pod. For more details, see +// https://git.k8s.io/enhancements/keps/sig-node/runtime-class.md +type RuntimeClass struct { + metav1.TypeMeta `json:",inline"` + // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata + // +optional + metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + + // Specification of the RuntimeClass + // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status + Spec RuntimeClassSpec `json:"spec" protobuf:"bytes,2,name=spec"` +} + +// RuntimeClassSpec is a specification of a RuntimeClass. It contains parameters +// that are required to describe the RuntimeClass to the Container Runtime +// Interface (CRI) implementation, as well as any other components that need to +// understand how the pod will be run. The RuntimeClassSpec is immutable. +type RuntimeClassSpec struct { + // RuntimeHandler specifies the underlying runtime and configuration that the + // CRI implementation will use to handle pods of this class. The possible + // values are specific to the node & CRI configuration. It is assumed that + // all handlers are available on every node, and handlers of the same name are + // equivalent on every node. + // For example, a handler called "runc" might specify that the runc OCI + // runtime (using native Linux containers) will be used to run the containers + // in a pod. + // The RuntimeHandler must conform to the DNS Label (RFC 1123) requirements + // and is immutable. + RuntimeHandler string `json:"runtimeHandler" protobuf:"bytes,1,opt,name=runtimeHandler"` +} + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// RuntimeClassList is a list of RuntimeClass objects. +type RuntimeClassList struct { + metav1.TypeMeta `json:",inline"` + // Standard list metadata. + // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata + // +optional + metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + + // Items is a list of schema objects. + Items []RuntimeClass `json:"items" protobuf:"bytes,2,rep,name=items"` +} diff --git a/vendor/k8s.io/api/node/v1alpha1/types_swagger_doc_generated.go b/vendor/k8s.io/api/node/v1alpha1/types_swagger_doc_generated.go new file mode 100644 index 0000000000..a51fa525df --- /dev/null +++ b/vendor/k8s.io/api/node/v1alpha1/types_swagger_doc_generated.go @@ -0,0 +1,59 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1alpha1 + +// This file contains a collection of methods that can be used from go-restful to +// generate Swagger API documentation for its models. Please read this PR for more +// information on the implementation: https://github.com/emicklei/go-restful/pull/215 +// +// TODOs are ignored from the parser (e.g. TODO(andronat):... || TODO:...) if and only if +// they are on one line! For multiple line or blocks that you want to ignore use ---. +// Any context after a --- is ignored. +// +// Those methods can be generated by using hack/update-generated-swagger-docs.sh + +// AUTO-GENERATED FUNCTIONS START HERE. DO NOT EDIT. +var map_RuntimeClass = map[string]string{ + "": "RuntimeClass defines a class of container runtime supported in the cluster. The RuntimeClass is used to determine which container runtime is used to run all containers in a pod. RuntimeClasses are (currently) manually defined by a user or cluster provisioner, and referenced in the PodSpec. The Kubelet is responsible for resolving the RuntimeClassName reference before running the pod. For more details, see https://git.k8s.io/enhancements/keps/sig-node/runtime-class.md", + "metadata": "More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata", + "spec": "Specification of the RuntimeClass More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status", +} + +func (RuntimeClass) SwaggerDoc() map[string]string { + return map_RuntimeClass +} + +var map_RuntimeClassList = map[string]string{ + "": "RuntimeClassList is a list of RuntimeClass objects.", + "metadata": "Standard list metadata. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata", + "items": "Items is a list of schema objects.", +} + +func (RuntimeClassList) SwaggerDoc() map[string]string { + return map_RuntimeClassList +} + +var map_RuntimeClassSpec = map[string]string{ + "": "RuntimeClassSpec is a specification of a RuntimeClass. It contains parameters that are required to describe the RuntimeClass to the Container Runtime Interface (CRI) implementation, as well as any other components that need to understand how the pod will be run. The RuntimeClassSpec is immutable.", + "runtimeHandler": "RuntimeHandler specifies the underlying runtime and configuration that the CRI implementation will use to handle pods of this class. The possible values are specific to the node & CRI configuration. It is assumed that all handlers are available on every node, and handlers of the same name are equivalent on every node. For example, a handler called \"runc\" might specify that the runc OCI runtime (using native Linux containers) will be used to run the containers in a pod. The RuntimeHandler must conform to the DNS Label (RFC 1123) requirements and is immutable.", +} + +func (RuntimeClassSpec) SwaggerDoc() map[string]string { + return map_RuntimeClassSpec +} + +// AUTO-GENERATED FUNCTIONS END HERE diff --git a/vendor/k8s.io/api/admissionregistration/v1alpha1/zz_generated.deepcopy.go b/vendor/k8s.io/api/node/v1alpha1/zz_generated.deepcopy.go similarity index 51% rename from vendor/k8s.io/api/admissionregistration/v1alpha1/zz_generated.deepcopy.go rename to vendor/k8s.io/api/node/v1alpha1/zz_generated.deepcopy.go index 9f636b4840..0d63110b3b 100644 --- a/vendor/k8s.io/api/admissionregistration/v1alpha1/zz_generated.deepcopy.go +++ b/vendor/k8s.io/api/node/v1alpha1/zz_generated.deepcopy.go @@ -25,55 +25,26 @@ import ( ) // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *Initializer) DeepCopyInto(out *Initializer) { - *out = *in - if in.Rules != nil { - in, out := &in.Rules, &out.Rules - *out = make([]Rule, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Initializer. -func (in *Initializer) DeepCopy() *Initializer { - if in == nil { - return nil - } - out := new(Initializer) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *InitializerConfiguration) DeepCopyInto(out *InitializerConfiguration) { +func (in *RuntimeClass) DeepCopyInto(out *RuntimeClass) { *out = *in out.TypeMeta = in.TypeMeta in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) - if in.Initializers != nil { - in, out := &in.Initializers, &out.Initializers - *out = make([]Initializer, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } - } + out.Spec = in.Spec return } -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new InitializerConfiguration. -func (in *InitializerConfiguration) DeepCopy() *InitializerConfiguration { +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RuntimeClass. +func (in *RuntimeClass) DeepCopy() *RuntimeClass { if in == nil { return nil } - out := new(InitializerConfiguration) + out := new(RuntimeClass) in.DeepCopyInto(out) return out } // DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. -func (in *InitializerConfiguration) DeepCopyObject() runtime.Object { +func (in *RuntimeClass) DeepCopyObject() runtime.Object { if c := in.DeepCopy(); c != nil { return c } @@ -81,13 +52,13 @@ func (in *InitializerConfiguration) DeepCopyObject() runtime.Object { } // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *InitializerConfigurationList) DeepCopyInto(out *InitializerConfigurationList) { +func (in *RuntimeClassList) DeepCopyInto(out *RuntimeClassList) { *out = *in out.TypeMeta = in.TypeMeta out.ListMeta = in.ListMeta if in.Items != nil { in, out := &in.Items, &out.Items - *out = make([]InitializerConfiguration, len(*in)) + *out = make([]RuntimeClass, len(*in)) for i := range *in { (*in)[i].DeepCopyInto(&(*out)[i]) } @@ -95,18 +66,18 @@ func (in *InitializerConfigurationList) DeepCopyInto(out *InitializerConfigurati return } -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new InitializerConfigurationList. -func (in *InitializerConfigurationList) DeepCopy() *InitializerConfigurationList { +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RuntimeClassList. +func (in *RuntimeClassList) DeepCopy() *RuntimeClassList { if in == nil { return nil } - out := new(InitializerConfigurationList) + out := new(RuntimeClassList) in.DeepCopyInto(out) return out } // DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. -func (in *InitializerConfigurationList) DeepCopyObject() runtime.Object { +func (in *RuntimeClassList) DeepCopyObject() runtime.Object { if c := in.DeepCopy(); c != nil { return c } @@ -114,32 +85,17 @@ func (in *InitializerConfigurationList) DeepCopyObject() runtime.Object { } // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *Rule) DeepCopyInto(out *Rule) { +func (in *RuntimeClassSpec) DeepCopyInto(out *RuntimeClassSpec) { *out = *in - if in.APIGroups != nil { - in, out := &in.APIGroups, &out.APIGroups - *out = make([]string, len(*in)) - copy(*out, *in) - } - if in.APIVersions != nil { - in, out := &in.APIVersions, &out.APIVersions - *out = make([]string, len(*in)) - copy(*out, *in) - } - if in.Resources != nil { - in, out := &in.Resources, &out.Resources - *out = make([]string, len(*in)) - copy(*out, *in) - } return } -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Rule. -func (in *Rule) DeepCopy() *Rule { +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RuntimeClassSpec. +func (in *RuntimeClassSpec) DeepCopy() *RuntimeClassSpec { if in == nil { return nil } - out := new(Rule) + out := new(RuntimeClassSpec) in.DeepCopyInto(out) return out } diff --git a/vendor/k8s.io/api/node/v1beta1/doc.go b/vendor/k8s.io/api/node/v1beta1/doc.go new file mode 100644 index 0000000000..0e8338cf7a --- /dev/null +++ b/vendor/k8s.io/api/node/v1beta1/doc.go @@ -0,0 +1,22 @@ +/* +Copyright 2019 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// +k8s:deepcopy-gen=package +// +k8s:openapi-gen=true + +// +groupName=node.k8s.io + +package v1beta1 // import "k8s.io/api/node/v1beta1" diff --git a/vendor/k8s.io/api/node/v1beta1/generated.pb.go b/vendor/k8s.io/api/node/v1beta1/generated.pb.go new file mode 100644 index 0000000000..27251a8a89 --- /dev/null +++ b/vendor/k8s.io/api/node/v1beta1/generated.pb.go @@ -0,0 +1,564 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by protoc-gen-gogo. DO NOT EDIT. +// source: k8s.io/kubernetes/vendor/k8s.io/api/node/v1beta1/generated.proto + +/* + Package v1beta1 is a generated protocol buffer package. + + It is generated from these files: + k8s.io/kubernetes/vendor/k8s.io/api/node/v1beta1/generated.proto + + It has these top-level messages: + RuntimeClass + RuntimeClassList +*/ +package v1beta1 + +import proto "github.com/gogo/protobuf/proto" +import fmt "fmt" +import math "math" + +import strings "strings" +import reflect "reflect" + +import io "io" + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.GoGoProtoPackageIsVersion2 // please upgrade the proto package + +func (m *RuntimeClass) Reset() { *m = RuntimeClass{} } +func (*RuntimeClass) ProtoMessage() {} +func (*RuntimeClass) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{0} } + +func (m *RuntimeClassList) Reset() { *m = RuntimeClassList{} } +func (*RuntimeClassList) ProtoMessage() {} +func (*RuntimeClassList) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{1} } + +func init() { + proto.RegisterType((*RuntimeClass)(nil), "k8s.io.api.node.v1beta1.RuntimeClass") + proto.RegisterType((*RuntimeClassList)(nil), "k8s.io.api.node.v1beta1.RuntimeClassList") +} +func (m *RuntimeClass) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *RuntimeClass) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + dAtA[i] = 0xa + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.ObjectMeta.Size())) + n1, err := m.ObjectMeta.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n1 + dAtA[i] = 0x12 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Handler))) + i += copy(dAtA[i:], m.Handler) + return i, nil +} + +func (m *RuntimeClassList) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *RuntimeClassList) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + dAtA[i] = 0xa + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.ListMeta.Size())) + n2, err := m.ListMeta.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n2 + if len(m.Items) > 0 { + for _, msg := range m.Items { + dAtA[i] = 0x12 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) + n, err := msg.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n + } + } + return i, nil +} + +func encodeVarintGenerated(dAtA []byte, offset int, v uint64) int { + for v >= 1<<7 { + dAtA[offset] = uint8(v&0x7f | 0x80) + v >>= 7 + offset++ + } + dAtA[offset] = uint8(v) + return offset + 1 +} +func (m *RuntimeClass) Size() (n int) { + var l int + _ = l + l = m.ObjectMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Handler) + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *RuntimeClassList) Size() (n int) { + var l int + _ = l + l = m.ListMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Items) > 0 { + for _, e := range m.Items { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func sovGenerated(x uint64) (n int) { + for { + n++ + x >>= 7 + if x == 0 { + break + } + } + return n +} +func sozGenerated(x uint64) (n int) { + return sovGenerated(uint64((x << 1) ^ uint64((int64(x) >> 63)))) +} +func (this *RuntimeClass) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&RuntimeClass{`, + `ObjectMeta:` + strings.Replace(strings.Replace(this.ObjectMeta.String(), "ObjectMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta", 1), `&`, ``, 1) + `,`, + `Handler:` + fmt.Sprintf("%v", this.Handler) + `,`, + `}`, + }, "") + return s +} +func (this *RuntimeClassList) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&RuntimeClassList{`, + `ListMeta:` + strings.Replace(strings.Replace(this.ListMeta.String(), "ListMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ListMeta", 1), `&`, ``, 1) + `,`, + `Items:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Items), "RuntimeClass", "RuntimeClass", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func valueToStringGenerated(v interface{}) string { + rv := reflect.ValueOf(v) + if rv.IsNil() { + return "nil" + } + pv := reflect.Indirect(rv).Interface() + return fmt.Sprintf("*%v", pv) +} +func (m *RuntimeClass) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: RuntimeClass: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: RuntimeClass: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ObjectMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Handler", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Handler = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *RuntimeClassList) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: RuntimeClassList: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: RuntimeClassList: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ListMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ListMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Items = append(m.Items, RuntimeClass{}) + if err := m.Items[len(m.Items)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func skipGenerated(dAtA []byte) (n int, err error) { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowGenerated + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + wireType := int(wire & 0x7) + switch wireType { + case 0: + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowGenerated + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + iNdEx++ + if dAtA[iNdEx-1] < 0x80 { + break + } + } + return iNdEx, nil + case 1: + iNdEx += 8 + return iNdEx, nil + case 2: + var length int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowGenerated + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + length |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + iNdEx += length + if length < 0 { + return 0, ErrInvalidLengthGenerated + } + return iNdEx, nil + case 3: + for { + var innerWire uint64 + var start int = iNdEx + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowGenerated + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + innerWire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + innerWireType := int(innerWire & 0x7) + if innerWireType == 4 { + break + } + next, err := skipGenerated(dAtA[start:]) + if err != nil { + return 0, err + } + iNdEx = start + next + } + return iNdEx, nil + case 4: + return iNdEx, nil + case 5: + iNdEx += 4 + return iNdEx, nil + default: + return 0, fmt.Errorf("proto: illegal wireType %d", wireType) + } + } + panic("unreachable") +} + +var ( + ErrInvalidLengthGenerated = fmt.Errorf("proto: negative length found during unmarshaling") + ErrIntOverflowGenerated = fmt.Errorf("proto: integer overflow") +) + +func init() { + proto.RegisterFile("k8s.io/kubernetes/vendor/k8s.io/api/node/v1beta1/generated.proto", fileDescriptorGenerated) +} + +var fileDescriptorGenerated = []byte{ + // 389 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x8c, 0x92, 0xcd, 0x6a, 0xdb, 0x40, + 0x14, 0x85, 0x35, 0x2e, 0xc6, 0xae, 0xdc, 0x52, 0xa3, 0x4d, 0x8d, 0x17, 0x63, 0x63, 0x28, 0xb8, + 0x0b, 0xcf, 0xd4, 0xa6, 0x94, 0x2e, 0x8b, 0xba, 0x69, 0x4b, 0x4b, 0x41, 0xcb, 0x90, 0x45, 0x46, + 0xd2, 0x8d, 0x34, 0x91, 0xa5, 0x11, 0x9a, 0x91, 0x20, 0xbb, 0x3c, 0x42, 0xf6, 0x79, 0x95, 0x3c, + 0x80, 0x97, 0x5e, 0x7a, 0x65, 0x62, 0xe5, 0x45, 0x82, 0x7e, 0xfc, 0x43, 0x8c, 0x49, 0x76, 0xba, + 0xe7, 0x9e, 0x73, 0xee, 0x87, 0x18, 0xfd, 0x47, 0xf0, 0x5d, 0x12, 0x2e, 0x68, 0x90, 0xda, 0x90, + 0x44, 0xa0, 0x40, 0xd2, 0x0c, 0x22, 0x57, 0x24, 0xb4, 0x5e, 0xb0, 0x98, 0xd3, 0x48, 0xb8, 0x40, + 0xb3, 0xa9, 0x0d, 0x8a, 0x4d, 0xa9, 0x07, 0x11, 0x24, 0x4c, 0x81, 0x4b, 0xe2, 0x44, 0x28, 0x61, + 0x7c, 0xac, 0x8c, 0x84, 0xc5, 0x9c, 0x14, 0x46, 0x52, 0x1b, 0xfb, 0x13, 0x8f, 0x2b, 0x3f, 0xb5, + 0x89, 0x23, 0x42, 0xea, 0x09, 0x4f, 0xd0, 0xd2, 0x6f, 0xa7, 0x97, 0xe5, 0x54, 0x0e, 0xe5, 0x57, + 0xd5, 0xd3, 0xff, 0xba, 0x3f, 0x18, 0x32, 0xc7, 0xe7, 0x11, 0x24, 0xd7, 0x34, 0x0e, 0xbc, 0x42, + 0x90, 0x34, 0x04, 0xc5, 0x68, 0x76, 0x74, 0xbd, 0x4f, 0x4f, 0xa5, 0x92, 0x34, 0x52, 0x3c, 0x84, + 0xa3, 0xc0, 0xb7, 0x97, 0x02, 0xd2, 0xf1, 0x21, 0x64, 0xcf, 0x73, 0xa3, 0x3b, 0xa4, 0xbf, 0xb3, + 0x2a, 0xcb, 0xcf, 0x39, 0x93, 0xd2, 0xb8, 0xd0, 0xdb, 0x05, 0x94, 0xcb, 0x14, 0xeb, 0xa1, 0x21, + 0x1a, 0x77, 0x66, 0x5f, 0xc8, 0xfe, 0x57, 0xec, 0xba, 0x49, 0x1c, 0x78, 0x85, 0x20, 0x49, 0xe1, + 0x26, 0xd9, 0x94, 0xfc, 0xb7, 0xaf, 0xc0, 0x51, 0xff, 0x40, 0x31, 0xd3, 0x58, 0xac, 0x07, 0x5a, + 0xbe, 0x1e, 0xe8, 0x7b, 0xcd, 0xda, 0xb5, 0x1a, 0x9f, 0xf5, 0x96, 0xcf, 0x22, 0x77, 0x0e, 0x49, + 0xaf, 0x31, 0x44, 0xe3, 0xb7, 0xe6, 0x87, 0xda, 0xde, 0xfa, 0x55, 0xc9, 0xd6, 0x76, 0x3f, 0xba, + 0x47, 0x7a, 0xf7, 0x90, 0xee, 0x2f, 0x97, 0xca, 0x38, 0x3f, 0x22, 0x24, 0xaf, 0x23, 0x2c, 0xd2, + 0x25, 0x5f, 0xb7, 0x3e, 0xd8, 0xde, 0x2a, 0x07, 0x74, 0x7f, 0xf4, 0x26, 0x57, 0x10, 0xca, 0x5e, + 0x63, 0xf8, 0x66, 0xdc, 0x99, 0x7d, 0x22, 0x27, 0xde, 0x01, 0x39, 0xe4, 0x32, 0xdf, 0xd7, 0x8d, + 0xcd, 0xdf, 0x45, 0xd6, 0xaa, 0x2a, 0xcc, 0xc9, 0x62, 0x83, 0xb5, 0xe5, 0x06, 0x6b, 0xab, 0x0d, + 0xd6, 0x6e, 0x72, 0x8c, 0x16, 0x39, 0x46, 0xcb, 0x1c, 0xa3, 0x55, 0x8e, 0xd1, 0x43, 0x8e, 0xd1, + 0xed, 0x23, 0xd6, 0xce, 0x5a, 0x75, 0xe3, 0x53, 0x00, 0x00, 0x00, 0xff, 0xff, 0x93, 0x68, 0xe5, + 0x0d, 0xb5, 0x02, 0x00, 0x00, +} diff --git a/vendor/k8s.io/api/node/v1beta1/generated.proto b/vendor/k8s.io/api/node/v1beta1/generated.proto new file mode 100644 index 0000000000..9082fbd334 --- /dev/null +++ b/vendor/k8s.io/api/node/v1beta1/generated.proto @@ -0,0 +1,66 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + + +// This file was autogenerated by go-to-protobuf. Do not edit it manually! + +syntax = 'proto2'; + +package k8s.io.api.node.v1beta1; + +import "k8s.io/apimachinery/pkg/apis/meta/v1/generated.proto"; +import "k8s.io/apimachinery/pkg/runtime/generated.proto"; +import "k8s.io/apimachinery/pkg/runtime/schema/generated.proto"; + +// Package-wide variables from generator "generated". +option go_package = "v1beta1"; + +// RuntimeClass defines a class of container runtime supported in the cluster. +// The RuntimeClass is used to determine which container runtime is used to run +// all containers in a pod. RuntimeClasses are (currently) manually defined by a +// user or cluster provisioner, and referenced in the PodSpec. The Kubelet is +// responsible for resolving the RuntimeClassName reference before running the +// pod. For more details, see +// https://git.k8s.io/enhancements/keps/sig-node/runtime-class.md +message RuntimeClass { + // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata + // +optional + optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; + + // Handler specifies the underlying runtime and configuration that the CRI + // implementation will use to handle pods of this class. The possible values + // are specific to the node & CRI configuration. It is assumed that all + // handlers are available on every node, and handlers of the same name are + // equivalent on every node. + // For example, a handler called "runc" might specify that the runc OCI + // runtime (using native Linux containers) will be used to run the containers + // in a pod. + // The Handler must conform to the DNS Label (RFC 1123) requirements, and is + // immutable. + optional string handler = 2; +} + +// RuntimeClassList is a list of RuntimeClass objects. +message RuntimeClassList { + // Standard list metadata. + // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata + // +optional + optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; + + // Items is a list of schema objects. + repeated RuntimeClass items = 2; +} + diff --git a/vendor/k8s.io/api/node/v1beta1/register.go b/vendor/k8s.io/api/node/v1beta1/register.go new file mode 100644 index 0000000000..3c3b61ba40 --- /dev/null +++ b/vendor/k8s.io/api/node/v1beta1/register.go @@ -0,0 +1,52 @@ +/* +Copyright 2019 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1beta1 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" +) + +// GroupName is the group name use in this package +const GroupName = "node.k8s.io" + +// SchemeGroupVersion is group version used to register these objects +var SchemeGroupVersion = schema.GroupVersion{Group: GroupName, Version: "v1beta1"} + +// Resource takes an unqualified resource and returns a Group qualified GroupResource +func Resource(resource string) schema.GroupResource { + return SchemeGroupVersion.WithResource(resource).GroupResource() +} + +var ( + // SchemeBuilder is the scheme builder with scheme init functions to run for this API package + SchemeBuilder = runtime.NewSchemeBuilder(addKnownTypes) + // AddToScheme is a common registration function for mapping packaged scoped group & version keys to a scheme + AddToScheme = SchemeBuilder.AddToScheme +) + +// addKnownTypes adds the list of known types to api.Scheme. +func addKnownTypes(scheme *runtime.Scheme) error { + scheme.AddKnownTypes(SchemeGroupVersion, + &RuntimeClass{}, + &RuntimeClassList{}, + ) + + metav1.AddToGroupVersion(scheme, SchemeGroupVersion) + return nil +} diff --git a/vendor/k8s.io/api/node/v1beta1/types.go b/vendor/k8s.io/api/node/v1beta1/types.go new file mode 100644 index 0000000000..993c6e5066 --- /dev/null +++ b/vendor/k8s.io/api/node/v1beta1/types.go @@ -0,0 +1,65 @@ +/* +Copyright 2019 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1beta1 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +// +genclient +// +genclient:nonNamespaced +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// RuntimeClass defines a class of container runtime supported in the cluster. +// The RuntimeClass is used to determine which container runtime is used to run +// all containers in a pod. RuntimeClasses are (currently) manually defined by a +// user or cluster provisioner, and referenced in the PodSpec. The Kubelet is +// responsible for resolving the RuntimeClassName reference before running the +// pod. For more details, see +// https://git.k8s.io/enhancements/keps/sig-node/runtime-class.md +type RuntimeClass struct { + metav1.TypeMeta `json:",inline"` + // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata + // +optional + metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + + // Handler specifies the underlying runtime and configuration that the CRI + // implementation will use to handle pods of this class. The possible values + // are specific to the node & CRI configuration. It is assumed that all + // handlers are available on every node, and handlers of the same name are + // equivalent on every node. + // For example, a handler called "runc" might specify that the runc OCI + // runtime (using native Linux containers) will be used to run the containers + // in a pod. + // The Handler must conform to the DNS Label (RFC 1123) requirements, and is + // immutable. + Handler string `json:"handler" protobuf:"bytes,2,opt,name=handler"` +} + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// RuntimeClassList is a list of RuntimeClass objects. +type RuntimeClassList struct { + metav1.TypeMeta `json:",inline"` + // Standard list metadata. + // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata + // +optional + metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + + // Items is a list of schema objects. + Items []RuntimeClass `json:"items" protobuf:"bytes,2,rep,name=items"` +} diff --git a/vendor/k8s.io/api/node/v1beta1/types_swagger_doc_generated.go b/vendor/k8s.io/api/node/v1beta1/types_swagger_doc_generated.go new file mode 100644 index 0000000000..8bfa304e78 --- /dev/null +++ b/vendor/k8s.io/api/node/v1beta1/types_swagger_doc_generated.go @@ -0,0 +1,50 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1beta1 + +// This file contains a collection of methods that can be used from go-restful to +// generate Swagger API documentation for its models. Please read this PR for more +// information on the implementation: https://github.com/emicklei/go-restful/pull/215 +// +// TODOs are ignored from the parser (e.g. TODO(andronat):... || TODO:...) if and only if +// they are on one line! For multiple line or blocks that you want to ignore use ---. +// Any context after a --- is ignored. +// +// Those methods can be generated by using hack/update-generated-swagger-docs.sh + +// AUTO-GENERATED FUNCTIONS START HERE. DO NOT EDIT. +var map_RuntimeClass = map[string]string{ + "": "RuntimeClass defines a class of container runtime supported in the cluster. The RuntimeClass is used to determine which container runtime is used to run all containers in a pod. RuntimeClasses are (currently) manually defined by a user or cluster provisioner, and referenced in the PodSpec. The Kubelet is responsible for resolving the RuntimeClassName reference before running the pod. For more details, see https://git.k8s.io/enhancements/keps/sig-node/runtime-class.md", + "metadata": "More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata", + "handler": "Handler specifies the underlying runtime and configuration that the CRI implementation will use to handle pods of this class. The possible values are specific to the node & CRI configuration. It is assumed that all handlers are available on every node, and handlers of the same name are equivalent on every node. For example, a handler called \"runc\" might specify that the runc OCI runtime (using native Linux containers) will be used to run the containers in a pod. The Handler must conform to the DNS Label (RFC 1123) requirements, and is immutable.", +} + +func (RuntimeClass) SwaggerDoc() map[string]string { + return map_RuntimeClass +} + +var map_RuntimeClassList = map[string]string{ + "": "RuntimeClassList is a list of RuntimeClass objects.", + "metadata": "Standard list metadata. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata", + "items": "Items is a list of schema objects.", +} + +func (RuntimeClassList) SwaggerDoc() map[string]string { + return map_RuntimeClassList +} + +// AUTO-GENERATED FUNCTIONS END HERE diff --git a/vendor/k8s.io/api/node/v1beta1/zz_generated.deepcopy.go b/vendor/k8s.io/api/node/v1beta1/zz_generated.deepcopy.go new file mode 100644 index 0000000000..98b5d480af --- /dev/null +++ b/vendor/k8s.io/api/node/v1beta1/zz_generated.deepcopy.go @@ -0,0 +1,84 @@ +// +build !ignore_autogenerated + +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by deepcopy-gen. DO NOT EDIT. + +package v1beta1 + +import ( + runtime "k8s.io/apimachinery/pkg/runtime" +) + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RuntimeClass) DeepCopyInto(out *RuntimeClass) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RuntimeClass. +func (in *RuntimeClass) DeepCopy() *RuntimeClass { + if in == nil { + return nil + } + out := new(RuntimeClass) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *RuntimeClass) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RuntimeClassList) DeepCopyInto(out *RuntimeClassList) { + *out = *in + out.TypeMeta = in.TypeMeta + out.ListMeta = in.ListMeta + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]RuntimeClass, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RuntimeClassList. +func (in *RuntimeClassList) DeepCopy() *RuntimeClassList { + if in == nil { + return nil + } + out := new(RuntimeClassList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *RuntimeClassList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} diff --git a/vendor/k8s.io/api/policy/v1beta1/doc.go b/vendor/k8s.io/api/policy/v1beta1/doc.go index 74611c6ba5..05d8332f80 100644 --- a/vendor/k8s.io/api/policy/v1beta1/doc.go +++ b/vendor/k8s.io/api/policy/v1beta1/doc.go @@ -15,6 +15,7 @@ limitations under the License. */ // +k8s:deepcopy-gen=package +// +k8s:protobuf-gen=package // +k8s:openapi-gen=true // Package policy is for any kind of policy object. Suitable examples, even if diff --git a/vendor/k8s.io/api/policy/v1beta1/generated.pb.go b/vendor/k8s.io/api/policy/v1beta1/generated.pb.go index d122fcfda1..deeeac6961 100644 --- a/vendor/k8s.io/api/policy/v1beta1/generated.pb.go +++ b/vendor/k8s.io/api/policy/v1beta1/generated.pb.go @@ -24,6 +24,7 @@ limitations under the License. k8s.io/kubernetes/vendor/k8s.io/api/policy/v1beta1/generated.proto It has these top-level messages: + AllowedCSIDriver AllowedFlexVolume AllowedHostPath Eviction @@ -71,83 +72,88 @@ var _ = math.Inf // proto package needs to be updated. const _ = proto.GoGoProtoPackageIsVersion2 // please upgrade the proto package +func (m *AllowedCSIDriver) Reset() { *m = AllowedCSIDriver{} } +func (*AllowedCSIDriver) ProtoMessage() {} +func (*AllowedCSIDriver) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{0} } + func (m *AllowedFlexVolume) Reset() { *m = AllowedFlexVolume{} } func (*AllowedFlexVolume) ProtoMessage() {} -func (*AllowedFlexVolume) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{0} } +func (*AllowedFlexVolume) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{1} } func (m *AllowedHostPath) Reset() { *m = AllowedHostPath{} } func (*AllowedHostPath) ProtoMessage() {} -func (*AllowedHostPath) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{1} } +func (*AllowedHostPath) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{2} } func (m *Eviction) Reset() { *m = Eviction{} } func (*Eviction) ProtoMessage() {} -func (*Eviction) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{2} } +func (*Eviction) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{3} } func (m *FSGroupStrategyOptions) Reset() { *m = FSGroupStrategyOptions{} } func (*FSGroupStrategyOptions) ProtoMessage() {} -func (*FSGroupStrategyOptions) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{3} } +func (*FSGroupStrategyOptions) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{4} } func (m *HostPortRange) Reset() { *m = HostPortRange{} } func (*HostPortRange) ProtoMessage() {} -func (*HostPortRange) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{4} } +func (*HostPortRange) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{5} } func (m *IDRange) Reset() { *m = IDRange{} } func (*IDRange) ProtoMessage() {} -func (*IDRange) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{5} } +func (*IDRange) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{6} } func (m *PodDisruptionBudget) Reset() { *m = PodDisruptionBudget{} } func (*PodDisruptionBudget) ProtoMessage() {} -func (*PodDisruptionBudget) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{6} } +func (*PodDisruptionBudget) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{7} } func (m *PodDisruptionBudgetList) Reset() { *m = PodDisruptionBudgetList{} } func (*PodDisruptionBudgetList) ProtoMessage() {} -func (*PodDisruptionBudgetList) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{7} } +func (*PodDisruptionBudgetList) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{8} } func (m *PodDisruptionBudgetSpec) Reset() { *m = PodDisruptionBudgetSpec{} } func (*PodDisruptionBudgetSpec) ProtoMessage() {} -func (*PodDisruptionBudgetSpec) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{8} } +func (*PodDisruptionBudgetSpec) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{9} } func (m *PodDisruptionBudgetStatus) Reset() { *m = PodDisruptionBudgetStatus{} } func (*PodDisruptionBudgetStatus) ProtoMessage() {} func (*PodDisruptionBudgetStatus) Descriptor() ([]byte, []int) { - return fileDescriptorGenerated, []int{9} + return fileDescriptorGenerated, []int{10} } func (m *PodSecurityPolicy) Reset() { *m = PodSecurityPolicy{} } func (*PodSecurityPolicy) ProtoMessage() {} -func (*PodSecurityPolicy) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{10} } +func (*PodSecurityPolicy) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{11} } func (m *PodSecurityPolicyList) Reset() { *m = PodSecurityPolicyList{} } func (*PodSecurityPolicyList) ProtoMessage() {} -func (*PodSecurityPolicyList) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{11} } +func (*PodSecurityPolicyList) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{12} } func (m *PodSecurityPolicySpec) Reset() { *m = PodSecurityPolicySpec{} } func (*PodSecurityPolicySpec) ProtoMessage() {} -func (*PodSecurityPolicySpec) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{12} } +func (*PodSecurityPolicySpec) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{13} } func (m *RunAsGroupStrategyOptions) Reset() { *m = RunAsGroupStrategyOptions{} } func (*RunAsGroupStrategyOptions) ProtoMessage() {} func (*RunAsGroupStrategyOptions) Descriptor() ([]byte, []int) { - return fileDescriptorGenerated, []int{13} + return fileDescriptorGenerated, []int{14} } func (m *RunAsUserStrategyOptions) Reset() { *m = RunAsUserStrategyOptions{} } func (*RunAsUserStrategyOptions) ProtoMessage() {} func (*RunAsUserStrategyOptions) Descriptor() ([]byte, []int) { - return fileDescriptorGenerated, []int{14} + return fileDescriptorGenerated, []int{15} } func (m *SELinuxStrategyOptions) Reset() { *m = SELinuxStrategyOptions{} } func (*SELinuxStrategyOptions) ProtoMessage() {} -func (*SELinuxStrategyOptions) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{15} } +func (*SELinuxStrategyOptions) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{16} } func (m *SupplementalGroupsStrategyOptions) Reset() { *m = SupplementalGroupsStrategyOptions{} } func (*SupplementalGroupsStrategyOptions) ProtoMessage() {} func (*SupplementalGroupsStrategyOptions) Descriptor() ([]byte, []int) { - return fileDescriptorGenerated, []int{16} + return fileDescriptorGenerated, []int{17} } func init() { + proto.RegisterType((*AllowedCSIDriver)(nil), "k8s.io.api.policy.v1beta1.AllowedCSIDriver") proto.RegisterType((*AllowedFlexVolume)(nil), "k8s.io.api.policy.v1beta1.AllowedFlexVolume") proto.RegisterType((*AllowedHostPath)(nil), "k8s.io.api.policy.v1beta1.AllowedHostPath") proto.RegisterType((*Eviction)(nil), "k8s.io.api.policy.v1beta1.Eviction") @@ -166,6 +172,28 @@ func init() { proto.RegisterType((*SELinuxStrategyOptions)(nil), "k8s.io.api.policy.v1beta1.SELinuxStrategyOptions") proto.RegisterType((*SupplementalGroupsStrategyOptions)(nil), "k8s.io.api.policy.v1beta1.SupplementalGroupsStrategyOptions") } +func (m *AllowedCSIDriver) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *AllowedCSIDriver) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + dAtA[i] = 0xa + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name))) + i += copy(dAtA[i:], m.Name) + return i, nil +} + func (m *AllowedFlexVolume) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) @@ -872,6 +900,20 @@ func (m *PodSecurityPolicySpec) MarshalTo(dAtA []byte) (int, error) { } i += n18 } + if len(m.AllowedCSIDrivers) > 0 { + for _, msg := range m.AllowedCSIDrivers { + dAtA[i] = 0xba + i++ + dAtA[i] = 0x1 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) + n, err := msg.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n + } + } return i, nil } @@ -1018,6 +1060,14 @@ func encodeVarintGenerated(dAtA []byte, offset int, v uint64) int { dAtA[offset] = uint8(v) return offset + 1 } +func (m *AllowedCSIDriver) Size() (n int) { + var l int + _ = l + l = len(m.Name) + n += 1 + l + sovGenerated(uint64(l)) + return n +} + func (m *AllowedFlexVolume) Size() (n int) { var l int _ = l @@ -1251,6 +1301,12 @@ func (m *PodSecurityPolicySpec) Size() (n int) { l = m.RunAsGroup.Size() n += 2 + l + sovGenerated(uint64(l)) } + if len(m.AllowedCSIDrivers) > 0 { + for _, e := range m.AllowedCSIDrivers { + l = e.Size() + n += 2 + l + sovGenerated(uint64(l)) + } + } return n } @@ -1321,6 +1377,16 @@ func sovGenerated(x uint64) (n int) { func sozGenerated(x uint64) (n int) { return sovGenerated(uint64((x << 1) ^ uint64((int64(x) >> 63)))) } +func (this *AllowedCSIDriver) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&AllowedCSIDriver{`, + `Name:` + fmt.Sprintf("%v", this.Name) + `,`, + `}`, + }, "") + return s +} func (this *AllowedFlexVolume) String() string { if this == nil { return "nil" @@ -1495,6 +1561,7 @@ func (this *PodSecurityPolicySpec) String() string { `ForbiddenSysctls:` + fmt.Sprintf("%v", this.ForbiddenSysctls) + `,`, `AllowedProcMountTypes:` + fmt.Sprintf("%v", this.AllowedProcMountTypes) + `,`, `RunAsGroup:` + strings.Replace(fmt.Sprintf("%v", this.RunAsGroup), "RunAsGroupStrategyOptions", "RunAsGroupStrategyOptions", 1) + `,`, + `AllowedCSIDrivers:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.AllowedCSIDrivers), "AllowedCSIDriver", "AllowedCSIDriver", 1), `&`, ``, 1) + `,`, `}`, }, "") return s @@ -1551,6 +1618,85 @@ func valueToStringGenerated(v interface{}) string { pv := reflect.Indirect(rv).Interface() return fmt.Sprintf("*%v", pv) } +func (m *AllowedCSIDriver) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: AllowedCSIDriver: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: AllowedCSIDriver: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Name = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} func (m *AllowedFlexVolume) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 @@ -3637,6 +3783,37 @@ func (m *PodSecurityPolicySpec) Unmarshal(dAtA []byte) error { return err } iNdEx = postIndex + case 23: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field AllowedCSIDrivers", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.AllowedCSIDrivers = append(m.AllowedCSIDrivers, AllowedCSIDriver{}) + if err := m.AllowedCSIDrivers[len(m.AllowedCSIDrivers)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex default: iNdEx = preIndex skippy, err := skipGenerated(dAtA[iNdEx:]) @@ -4210,115 +4387,119 @@ func init() { } var fileDescriptorGenerated = []byte{ - // 1756 bytes of a gzipped FileDescriptorProto + // 1809 bytes of a gzipped FileDescriptorProto 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xbc, 0x58, 0xdd, 0x8e, 0xdb, 0xc6, - 0x15, 0x5e, 0x5a, 0xfb, 0xa3, 0x9d, 0xfd, 0x9f, 0xfd, 0x29, 0xbd, 0xa8, 0x45, 0x47, 0x01, 0x0a, - 0x37, 0x48, 0xa8, 0x78, 0x9d, 0xa4, 0x46, 0xd3, 0x16, 0x59, 0x5a, 0xbb, 0xf6, 0x06, 0xde, 0xae, - 0x3a, 0xb2, 0x83, 0xb6, 0x70, 0x8b, 0x8e, 0xc4, 0x59, 0xed, 0x64, 0x29, 0x92, 0x9d, 0x19, 0x2a, - 0xab, 0xbb, 0x5e, 0xf4, 0xa2, 0x97, 0x7d, 0x81, 0xa0, 0x0f, 0x50, 0xf4, 0xaa, 0x2f, 0xe1, 0x02, - 0x45, 0x91, 0xcb, 0xa0, 0x17, 0x42, 0xad, 0x22, 0x2f, 0xe1, 0xab, 0x80, 0xa3, 0x21, 0x25, 0xfe, - 0x49, 0x5e, 0x03, 0xf6, 0x1d, 0x39, 0xe7, 0xfb, 0xbe, 0x73, 0xe6, 0xcc, 0x99, 0x33, 0x43, 0x02, - 0xeb, 0xf2, 0x3e, 0x37, 0xa9, 0x57, 0xbb, 0x0c, 0x5a, 0x84, 0xb9, 0x44, 0x10, 0x5e, 0xeb, 0x11, - 0xd7, 0xf6, 0x58, 0x4d, 0x19, 0xb0, 0x4f, 0x6b, 0xbe, 0xe7, 0xd0, 0x76, 0xbf, 0xd6, 0xbb, 0xdb, - 0x22, 0x02, 0xdf, 0xad, 0x75, 0x88, 0x4b, 0x18, 0x16, 0xc4, 0x36, 0x7d, 0xe6, 0x09, 0x0f, 0xde, - 0x1c, 0x41, 0x4d, 0xec, 0x53, 0x73, 0x04, 0x35, 0x15, 0x74, 0xff, 0x83, 0x0e, 0x15, 0x17, 0x41, - 0xcb, 0x6c, 0x7b, 0xdd, 0x5a, 0xc7, 0xeb, 0x78, 0x35, 0xc9, 0x68, 0x05, 0xe7, 0xf2, 0x4d, 0xbe, - 0xc8, 0xa7, 0x91, 0xd2, 0x7e, 0x75, 0xc2, 0x69, 0xdb, 0x63, 0xa4, 0xd6, 0xcb, 0x78, 0xdb, 0xff, - 0x68, 0x8c, 0xe9, 0xe2, 0xf6, 0x05, 0x75, 0x09, 0xeb, 0xd7, 0xfc, 0xcb, 0x4e, 0x38, 0xc0, 0x6b, - 0x5d, 0x22, 0x70, 0x1e, 0xab, 0x56, 0xc4, 0x62, 0x81, 0x2b, 0x68, 0x97, 0x64, 0x08, 0x9f, 0xcc, - 0x22, 0xf0, 0xf6, 0x05, 0xe9, 0xe2, 0x0c, 0xef, 0x5e, 0x11, 0x2f, 0x10, 0xd4, 0xa9, 0x51, 0x57, - 0x70, 0xc1, 0xd2, 0xa4, 0xea, 0xa7, 0x60, 0xeb, 0xd0, 0x71, 0xbc, 0xaf, 0x88, 0x7d, 0xec, 0x90, - 0xab, 0x2f, 0x3c, 0x27, 0xe8, 0x12, 0xf8, 0x23, 0xb0, 0x68, 0x33, 0xda, 0x23, 0x4c, 0xd7, 0x6e, - 0x6b, 0x77, 0x96, 0xad, 0xf5, 0xe7, 0x03, 0x63, 0x6e, 0x38, 0x30, 0x16, 0xeb, 0x72, 0x14, 0x29, - 0x6b, 0x95, 0x83, 0x0d, 0x45, 0x7e, 0xe4, 0x71, 0xd1, 0xc0, 0xe2, 0x02, 0x1e, 0x00, 0xe0, 0x63, - 0x71, 0xd1, 0x60, 0xe4, 0x9c, 0x5e, 0x29, 0x3a, 0x54, 0x74, 0xd0, 0x88, 0x2d, 0x68, 0x02, 0x05, - 0xdf, 0x07, 0x65, 0x46, 0xb0, 0x7d, 0xe6, 0x3a, 0x7d, 0xfd, 0xc6, 0x6d, 0xed, 0x4e, 0xd9, 0xda, - 0x54, 0x8c, 0x32, 0x52, 0xe3, 0x28, 0x46, 0x54, 0xff, 0xab, 0x81, 0xf2, 0x51, 0x8f, 0xb6, 0x05, - 0xf5, 0x5c, 0xf8, 0x07, 0x50, 0x0e, 0xf3, 0x6e, 0x63, 0x81, 0xa5, 0xb3, 0x95, 0x83, 0x0f, 0xcd, - 0x71, 0x4d, 0xc4, 0x69, 0x30, 0xfd, 0xcb, 0x4e, 0x38, 0xc0, 0xcd, 0x10, 0x6d, 0xf6, 0xee, 0x9a, - 0x67, 0xad, 0x2f, 0x49, 0x5b, 0x9c, 0x12, 0x81, 0xc7, 0xe1, 0x8d, 0xc7, 0x50, 0xac, 0x0a, 0x1d, - 0xb0, 0x66, 0x13, 0x87, 0x08, 0x72, 0xe6, 0x87, 0x1e, 0xb9, 0x8c, 0x70, 0xe5, 0xe0, 0xde, 0xab, - 0xb9, 0xa9, 0x4f, 0x52, 0xad, 0xad, 0xe1, 0xc0, 0x58, 0x4b, 0x0c, 0xa1, 0xa4, 0x78, 0xf5, 0x6b, - 0x0d, 0xec, 0x1d, 0x37, 0x1f, 0x32, 0x2f, 0xf0, 0x9b, 0x22, 0x5c, 0xa7, 0x4e, 0x5f, 0x99, 0xe0, - 0x4f, 0xc0, 0x3c, 0x0b, 0x1c, 0xa2, 0x72, 0xfa, 0xae, 0x0a, 0x7a, 0x1e, 0x05, 0x0e, 0x79, 0x39, - 0x30, 0xb6, 0x53, 0xac, 0x27, 0x7d, 0x9f, 0x20, 0x49, 0x80, 0x9f, 0x83, 0x45, 0x86, 0xdd, 0x0e, - 0x09, 0x43, 0x2f, 0xdd, 0x59, 0x39, 0xa8, 0x9a, 0x85, 0xbb, 0xc6, 0x3c, 0xa9, 0xa3, 0x10, 0x3a, - 0x5e, 0x71, 0xf9, 0xca, 0x91, 0x52, 0xa8, 0x9e, 0x82, 0x35, 0xb9, 0xd4, 0x1e, 0x13, 0xd2, 0x02, - 0x6f, 0x81, 0x52, 0x97, 0xba, 0x32, 0xa8, 0x05, 0x6b, 0x45, 0xb1, 0x4a, 0xa7, 0xd4, 0x45, 0xe1, - 0xb8, 0x34, 0xe3, 0x2b, 0x99, 0xb3, 0x49, 0x33, 0xbe, 0x42, 0xe1, 0x78, 0xf5, 0x21, 0x58, 0x52, - 0x1e, 0x27, 0x85, 0x4a, 0xd3, 0x85, 0x4a, 0x39, 0x42, 0x7f, 0xbf, 0x01, 0xb6, 0x1b, 0x9e, 0x5d, - 0xa7, 0x9c, 0x05, 0x32, 0x5f, 0x56, 0x60, 0x77, 0x88, 0x78, 0x0b, 0xf5, 0xf1, 0x04, 0xcc, 0x73, - 0x9f, 0xb4, 0x55, 0x59, 0x1c, 0x4c, 0xc9, 0x6d, 0x4e, 0x7c, 0x4d, 0x9f, 0xb4, 0xad, 0xd5, 0x68, - 0x29, 0xc3, 0x37, 0x24, 0xd5, 0xe0, 0x33, 0xb0, 0xc8, 0x05, 0x16, 0x01, 0xd7, 0x4b, 0x52, 0xf7, - 0xa3, 0x6b, 0xea, 0x4a, 0xee, 0x78, 0x15, 0x47, 0xef, 0x48, 0x69, 0x56, 0xff, 0xad, 0x81, 0x1f, - 0xe4, 0xb0, 0x1e, 0x53, 0x2e, 0xe0, 0xb3, 0x4c, 0xc6, 0xcc, 0x57, 0xcb, 0x58, 0xc8, 0x96, 0xf9, - 0x8a, 0x37, 0x6f, 0x34, 0x32, 0x91, 0xad, 0x26, 0x58, 0xa0, 0x82, 0x74, 0xa3, 0x52, 0x34, 0xaf, - 0x37, 0x2d, 0x6b, 0x4d, 0x49, 0x2f, 0x9c, 0x84, 0x22, 0x68, 0xa4, 0x55, 0xfd, 0xcf, 0x8d, 0xdc, - 0xe9, 0x84, 0xe9, 0x84, 0xe7, 0x60, 0xb5, 0x4b, 0xdd, 0xc3, 0x1e, 0xa6, 0x0e, 0x6e, 0xa9, 0xdd, - 0x33, 0xad, 0x08, 0xc2, 0x5e, 0x69, 0x8e, 0x7a, 0xa5, 0x79, 0xe2, 0x8a, 0x33, 0xd6, 0x14, 0x8c, - 0xba, 0x1d, 0x6b, 0x73, 0x38, 0x30, 0x56, 0x4f, 0x27, 0x94, 0x50, 0x42, 0x17, 0xfe, 0x0e, 0x94, - 0x39, 0x71, 0x48, 0x5b, 0x78, 0xec, 0x7a, 0x1d, 0xe2, 0x31, 0x6e, 0x11, 0xa7, 0xa9, 0xa8, 0xd6, - 0x6a, 0x98, 0xb7, 0xe8, 0x0d, 0xc5, 0x92, 0xd0, 0x01, 0xeb, 0x5d, 0x7c, 0xf5, 0xd4, 0xc5, 0xf1, - 0x44, 0x4a, 0xaf, 0x39, 0x11, 0x38, 0x1c, 0x18, 0xeb, 0xa7, 0x09, 0x2d, 0x94, 0xd2, 0xae, 0x7e, - 0x37, 0x0f, 0x6e, 0x16, 0x56, 0x15, 0xfc, 0x1c, 0x40, 0xaf, 0xc5, 0x09, 0xeb, 0x11, 0xfb, 0xe1, - 0xe8, 0x34, 0xa1, 0x5e, 0xb4, 0x71, 0xf7, 0xd5, 0x02, 0xc1, 0xb3, 0x0c, 0x02, 0xe5, 0xb0, 0xe0, - 0x9f, 0x35, 0xb0, 0x66, 0x8f, 0xdc, 0x10, 0xbb, 0xe1, 0xd9, 0x51, 0x61, 0x3c, 0x7c, 0x9d, 0x7a, - 0x37, 0xeb, 0x93, 0x4a, 0x47, 0xae, 0x60, 0x7d, 0x6b, 0x57, 0x05, 0xb4, 0x96, 0xb0, 0xa1, 0xa4, - 0x53, 0x78, 0x0a, 0xa0, 0x1d, 0x4b, 0x72, 0x75, 0xa6, 0xc9, 0x14, 0x2f, 0x58, 0xb7, 0x94, 0xc2, - 0x6e, 0xc2, 0x6f, 0x04, 0x42, 0x39, 0x44, 0xf8, 0x0b, 0xb0, 0xde, 0x0e, 0x18, 0x23, 0xae, 0x78, - 0x44, 0xb0, 0x23, 0x2e, 0xfa, 0xfa, 0xbc, 0x94, 0xda, 0x53, 0x52, 0xeb, 0x0f, 0x12, 0x56, 0x94, - 0x42, 0x87, 0x7c, 0x9b, 0x70, 0xca, 0x88, 0x1d, 0xf1, 0x17, 0x92, 0xfc, 0x7a, 0xc2, 0x8a, 0x52, - 0x68, 0x78, 0x1f, 0xac, 0x92, 0x2b, 0x9f, 0xb4, 0xa3, 0x9c, 0x2e, 0x4a, 0xf6, 0x8e, 0x62, 0xaf, - 0x1e, 0x4d, 0xd8, 0x50, 0x02, 0xb9, 0xef, 0x00, 0x98, 0x4d, 0x22, 0xdc, 0x04, 0xa5, 0x4b, 0xd2, - 0x1f, 0x9d, 0x3c, 0x28, 0x7c, 0x84, 0x9f, 0x81, 0x85, 0x1e, 0x76, 0x02, 0xa2, 0x6a, 0xfd, 0xbd, - 0x57, 0xab, 0xf5, 0x27, 0xb4, 0x4b, 0xd0, 0x88, 0xf8, 0xd3, 0x1b, 0xf7, 0xb5, 0xea, 0xbf, 0x34, - 0xb0, 0xd5, 0xf0, 0xec, 0x26, 0x69, 0x07, 0x8c, 0x8a, 0x7e, 0x43, 0xae, 0xf3, 0x5b, 0xe8, 0xd9, - 0x28, 0xd1, 0xb3, 0x3f, 0x9c, 0x5e, 0x6b, 0xc9, 0xe8, 0x8a, 0x3a, 0x76, 0xf5, 0xb9, 0x06, 0x76, - 0x33, 0xe8, 0xb7, 0xd0, 0x51, 0x7f, 0x95, 0xec, 0xa8, 0xef, 0x5f, 0x67, 0x32, 0x05, 0xfd, 0xf4, - 0xbb, 0x8d, 0x9c, 0xa9, 0xc8, 0x6e, 0x1a, 0xde, 0xee, 0x18, 0xed, 0x51, 0x87, 0x74, 0x88, 0x2d, - 0x27, 0x53, 0x9e, 0xb8, 0xdd, 0xc5, 0x16, 0x34, 0x81, 0x82, 0x1c, 0xec, 0xd9, 0xe4, 0x1c, 0x07, - 0x8e, 0x38, 0xb4, 0xed, 0x07, 0xd8, 0xc7, 0x2d, 0xea, 0x50, 0x41, 0xd5, 0x75, 0x64, 0xd9, 0xfa, - 0x74, 0x38, 0x30, 0xf6, 0xea, 0xb9, 0x88, 0x97, 0x03, 0xe3, 0x56, 0xf6, 0x5e, 0x6e, 0xc6, 0x90, - 0x3e, 0x2a, 0x90, 0x86, 0x7d, 0xa0, 0x33, 0xf2, 0xc7, 0x20, 0xdc, 0x14, 0x75, 0xe6, 0xf9, 0x09, - 0xb7, 0x25, 0xe9, 0xf6, 0xe7, 0xc3, 0x81, 0xa1, 0xa3, 0x02, 0xcc, 0x6c, 0xc7, 0x85, 0xf2, 0xf0, - 0x4b, 0xb0, 0x8d, 0x47, 0x7d, 0x20, 0xe1, 0x75, 0x5e, 0x7a, 0xbd, 0x3f, 0x1c, 0x18, 0xdb, 0x87, - 0x59, 0xf3, 0x6c, 0x87, 0x79, 0xa2, 0xb0, 0x06, 0x96, 0x7a, 0xf2, 0xca, 0xce, 0xf5, 0x05, 0xa9, - 0xbf, 0x3b, 0x1c, 0x18, 0x4b, 0xa3, 0x5b, 0x7c, 0xa8, 0xb9, 0x78, 0xdc, 0x94, 0x17, 0xc1, 0x08, - 0x05, 0x3f, 0x06, 0x2b, 0x17, 0x1e, 0x17, 0xbf, 0x24, 0xe2, 0x2b, 0x8f, 0x5d, 0xca, 0xc6, 0x50, - 0xb6, 0xb6, 0xd5, 0x0a, 0xae, 0x3c, 0x1a, 0x9b, 0xd0, 0x24, 0x0e, 0xfe, 0x06, 0x2c, 0x5f, 0xa8, - 0x6b, 0x1f, 0xd7, 0x97, 0x64, 0xa1, 0xdd, 0x99, 0x52, 0x68, 0x89, 0x2b, 0xa2, 0xb5, 0xa5, 0xe4, - 0x97, 0xa3, 0x61, 0x8e, 0xc6, 0x6a, 0xf0, 0xc7, 0x60, 0x49, 0xbe, 0x9c, 0xd4, 0xf5, 0xb2, 0x8c, - 0x66, 0x43, 0xc1, 0x97, 0x1e, 0x8d, 0x86, 0x51, 0x64, 0x8f, 0xa0, 0x27, 0x8d, 0x07, 0xfa, 0x72, - 0x16, 0x7a, 0xd2, 0x78, 0x80, 0x22, 0x3b, 0x7c, 0x06, 0x96, 0x38, 0x79, 0x4c, 0xdd, 0xe0, 0x4a, - 0x07, 0x72, 0xcb, 0xdd, 0x9d, 0x12, 0x6e, 0xf3, 0x48, 0x22, 0x53, 0x17, 0xee, 0xb1, 0xba, 0xb2, - 0xa3, 0x48, 0x12, 0xda, 0x60, 0x99, 0x05, 0xee, 0x21, 0x7f, 0xca, 0x09, 0xd3, 0x57, 0x32, 0xa7, - 0x7d, 0x5a, 0x1f, 0x45, 0xd8, 0xb4, 0x87, 0x38, 0x33, 0x31, 0x02, 0x8d, 0x85, 0xe1, 0x5f, 0x34, - 0x00, 0x79, 0xe0, 0xfb, 0x0e, 0xe9, 0x12, 0x57, 0x60, 0x47, 0xde, 0xef, 0xb9, 0xbe, 0x2a, 0xfd, - 0xfd, 0x6c, 0xda, 0x7c, 0x32, 0xa4, 0xb4, 0xe3, 0xf8, 0x98, 0xce, 0x42, 0x51, 0x8e, 0xcf, 0x30, - 0x9d, 0xe7, 0x5c, 0x3e, 0xeb, 0x6b, 0x33, 0xd3, 0x99, 0xff, 0xfd, 0x32, 0x4e, 0xa7, 0xb2, 0xa3, - 0x48, 0x12, 0x7e, 0x01, 0xf6, 0xa2, 0xaf, 0x3b, 0xe4, 0x79, 0xe2, 0x98, 0x3a, 0x84, 0xf7, 0xb9, - 0x20, 0x5d, 0x7d, 0x5d, 0x2e, 0x73, 0x45, 0x31, 0xf7, 0x50, 0x2e, 0x0a, 0x15, 0xb0, 0x61, 0x17, - 0x18, 0x51, 0x7b, 0x08, 0xf7, 0x4e, 0xdc, 0x9f, 0x8e, 0x78, 0x1b, 0x3b, 0xa3, 0x5b, 0xcb, 0x86, - 0x74, 0xf0, 0xee, 0x70, 0x60, 0x18, 0xf5, 0xe9, 0x50, 0x34, 0x4b, 0x0b, 0xfe, 0x1a, 0xe8, 0xb8, - 0xc8, 0xcf, 0xa6, 0xf4, 0xf3, 0xc3, 0xb0, 0xe7, 0x14, 0x3a, 0x28, 0x64, 0x43, 0x1f, 0x6c, 0xe2, - 0xe4, 0x77, 0x36, 0xd7, 0xb7, 0xe4, 0x2e, 0x7c, 0x6f, 0xca, 0x3a, 0xa4, 0x3e, 0xcd, 0x2d, 0x5d, - 0xa5, 0x71, 0x33, 0x65, 0xe0, 0x28, 0xa3, 0x0e, 0xaf, 0x00, 0xc4, 0xe9, 0xdf, 0x02, 0x5c, 0x87, - 0x33, 0x8f, 0x98, 0xcc, 0xbf, 0x84, 0x71, 0xa9, 0x65, 0x4c, 0x1c, 0xe5, 0xf8, 0x80, 0x8f, 0xc1, - 0x8e, 0x1a, 0x7d, 0xea, 0x72, 0x7c, 0x4e, 0x9a, 0x7d, 0xde, 0x16, 0x0e, 0xd7, 0xb7, 0x65, 0x7f, - 0xd3, 0x87, 0x03, 0x63, 0xe7, 0x30, 0xc7, 0x8e, 0x72, 0x59, 0xf0, 0x33, 0xb0, 0x79, 0xee, 0xb1, - 0x16, 0xb5, 0x6d, 0xe2, 0x46, 0x4a, 0x3b, 0x52, 0x69, 0x27, 0xcc, 0xc4, 0x71, 0xca, 0x86, 0x32, - 0x68, 0xc8, 0xc1, 0xae, 0x52, 0x6e, 0x30, 0xaf, 0x7d, 0xea, 0x05, 0xae, 0x08, 0x5b, 0x2a, 0xd7, - 0x77, 0xe3, 0x63, 0x64, 0xf7, 0x30, 0x0f, 0xf0, 0x72, 0x60, 0xdc, 0xce, 0x69, 0xe9, 0x09, 0x10, - 0xca, 0xd7, 0x86, 0x36, 0x00, 0xb2, 0x0f, 0x8c, 0xb6, 0xdc, 0xde, 0xcc, 0x4f, 0x40, 0x14, 0x83, - 0xd3, 0xbb, 0x6e, 0x3d, 0x3c, 0x99, 0xc7, 0x66, 0x34, 0xa1, 0x5b, 0xfd, 0x9b, 0x06, 0x6e, 0x16, - 0x32, 0xe1, 0x27, 0x89, 0xff, 0x0d, 0xd5, 0xd4, 0xff, 0x06, 0x98, 0x25, 0xbe, 0x81, 0xdf, 0x0d, - 0x5f, 0x6b, 0x40, 0x2f, 0xea, 0x9e, 0xf0, 0xe3, 0x44, 0x80, 0xef, 0xa4, 0x02, 0xdc, 0xca, 0xf0, - 0xde, 0x40, 0x7c, 0xff, 0xd0, 0xc0, 0x5e, 0xfe, 0xe9, 0x01, 0xef, 0x25, 0xa2, 0x33, 0x52, 0xd1, - 0x6d, 0xa4, 0x58, 0x2a, 0xb6, 0xdf, 0x83, 0x75, 0x75, 0xc6, 0x24, 0xff, 0x36, 0x25, 0x62, 0x0c, - 0x2b, 0x29, 0xbc, 0x1e, 0x2a, 0x89, 0x68, 0xa5, 0xe5, 0x87, 0x5d, 0x72, 0x0c, 0xa5, 0xd4, 0xaa, - 0xff, 0xd4, 0xc0, 0x3b, 0x33, 0x4f, 0x07, 0x68, 0x25, 0x42, 0x37, 0x53, 0xa1, 0x57, 0x8a, 0x05, - 0xde, 0xcc, 0x4f, 0x27, 0xeb, 0x83, 0xe7, 0x2f, 0x2a, 0x73, 0xdf, 0xbc, 0xa8, 0xcc, 0x7d, 0xfb, - 0xa2, 0x32, 0xf7, 0xa7, 0x61, 0x45, 0x7b, 0x3e, 0xac, 0x68, 0xdf, 0x0c, 0x2b, 0xda, 0xb7, 0xc3, - 0x8a, 0xf6, 0xbf, 0x61, 0x45, 0xfb, 0xeb, 0xff, 0x2b, 0x73, 0xbf, 0x5d, 0x52, 0x72, 0xdf, 0x07, - 0x00, 0x00, 0xff, 0xff, 0x15, 0x2e, 0xf4, 0x72, 0x59, 0x16, 0x00, 0x00, + 0x15, 0x5e, 0x7a, 0xff, 0xb4, 0xb3, 0x3f, 0xd6, 0xce, 0xfe, 0x84, 0x5e, 0xd4, 0xa2, 0xc3, 0x00, + 0x85, 0x9b, 0x26, 0x54, 0xbc, 0x76, 0x52, 0xa3, 0x69, 0x8b, 0x2c, 0x57, 0xbb, 0xf6, 0x06, 0xde, + 0xac, 0x3a, 0xb2, 0x83, 0xb6, 0x70, 0x8b, 0x8e, 0xc4, 0x59, 0xed, 0x64, 0x29, 0x92, 0x9d, 0x19, + 0x2a, 0xab, 0xbb, 0x5e, 0xf4, 0xa2, 0x97, 0x7d, 0x81, 0xa0, 0x0f, 0x50, 0xf4, 0xaa, 0x2f, 0xe1, + 0x02, 0x45, 0x91, 0xcb, 0xa0, 0x05, 0x84, 0x5a, 0x45, 0x5f, 0xc2, 0x57, 0x05, 0x47, 0x43, 0x4a, + 0xfc, 0x91, 0x64, 0x1b, 0xb0, 0xef, 0xc8, 0x39, 0xdf, 0xf7, 0x9d, 0xc3, 0x33, 0x67, 0xce, 0x0c, + 0x07, 0xd8, 0x97, 0xf7, 0xb9, 0x45, 0xfd, 0xea, 0x65, 0xd8, 0x24, 0xcc, 0x23, 0x82, 0xf0, 0x6a, + 0x97, 0x78, 0x8e, 0xcf, 0xaa, 0xca, 0x80, 0x03, 0x5a, 0x0d, 0x7c, 0x97, 0xb6, 0x7a, 0xd5, 0xee, + 0x9d, 0x26, 0x11, 0xf8, 0x4e, 0xb5, 0x4d, 0x3c, 0xc2, 0xb0, 0x20, 0x8e, 0x15, 0x30, 0x5f, 0xf8, + 0xf0, 0xc6, 0x10, 0x6a, 0xe1, 0x80, 0x5a, 0x43, 0xa8, 0xa5, 0xa0, 0x7b, 0x1f, 0xb6, 0xa9, 0xb8, + 0x08, 0x9b, 0x56, 0xcb, 0xef, 0x54, 0xdb, 0x7e, 0xdb, 0xaf, 0x4a, 0x46, 0x33, 0x3c, 0x97, 0x6f, + 0xf2, 0x45, 0x3e, 0x0d, 0x95, 0xf6, 0xcc, 0x31, 0xa7, 0x2d, 0x9f, 0x91, 0x6a, 0x37, 0xe7, 0x6d, + 0xef, 0xde, 0x08, 0xd3, 0xc1, 0xad, 0x0b, 0xea, 0x11, 0xd6, 0xab, 0x06, 0x97, 0xed, 0x68, 0x80, + 0x57, 0x3b, 0x44, 0xe0, 0x22, 0x56, 0x75, 0x12, 0x8b, 0x85, 0x9e, 0xa0, 0x1d, 0x92, 0x23, 0x7c, + 0x32, 0x8b, 0xc0, 0x5b, 0x17, 0xa4, 0x83, 0x73, 0xbc, 0xbb, 0x93, 0x78, 0xa1, 0xa0, 0x6e, 0x95, + 0x7a, 0x82, 0x0b, 0x96, 0x25, 0x99, 0xf7, 0x40, 0xf9, 0xc0, 0x75, 0xfd, 0xaf, 0x89, 0x73, 0xd8, + 0x38, 0xa9, 0x31, 0xda, 0x25, 0x0c, 0xde, 0x02, 0x0b, 0x1e, 0xee, 0x10, 0x5d, 0xbb, 0xa5, 0xdd, + 0x5e, 0xb1, 0xd7, 0x9e, 0xf5, 0x8d, 0xb9, 0x41, 0xdf, 0x58, 0xf8, 0x02, 0x77, 0x08, 0x92, 0x16, + 0xf3, 0x53, 0xb0, 0xa9, 0x58, 0xc7, 0x2e, 0xb9, 0xfa, 0xd2, 0x77, 0xc3, 0x0e, 0x81, 0xdf, 0x07, + 0x4b, 0x8e, 0x14, 0x50, 0xc4, 0x0d, 0x45, 0x5c, 0x1a, 0xca, 0x22, 0x65, 0x35, 0x39, 0xb8, 0xae, + 0xc8, 0x0f, 0x7d, 0x2e, 0xea, 0x58, 0x5c, 0xc0, 0x7d, 0x00, 0x02, 0x2c, 0x2e, 0xea, 0x8c, 0x9c, + 0xd3, 0x2b, 0x45, 0x87, 0x8a, 0x0e, 0xea, 0x89, 0x05, 0x8d, 0xa1, 0xe0, 0x07, 0xa0, 0xc4, 0x08, + 0x76, 0xce, 0x3c, 0xb7, 0xa7, 0x5f, 0xbb, 0xa5, 0xdd, 0x2e, 0xd9, 0x65, 0xc5, 0x28, 0x21, 0x35, + 0x8e, 0x12, 0x84, 0xf9, 0x2f, 0x0d, 0x94, 0x8e, 0xba, 0xb4, 0x25, 0xa8, 0xef, 0xc1, 0xdf, 0x82, + 0x52, 0x34, 0x5b, 0x0e, 0x16, 0x58, 0x3a, 0x5b, 0xdd, 0xff, 0xc8, 0x1a, 0x55, 0x52, 0x92, 0x3c, + 0x2b, 0xb8, 0x6c, 0x47, 0x03, 0xdc, 0x8a, 0xd0, 0x56, 0xf7, 0x8e, 0x75, 0xd6, 0xfc, 0x8a, 0xb4, + 0xc4, 0x29, 0x11, 0x78, 0x14, 0xde, 0x68, 0x0c, 0x25, 0xaa, 0xd0, 0x05, 0xeb, 0x0e, 0x71, 0x89, + 0x20, 0x67, 0x41, 0xe4, 0x91, 0xcb, 0x08, 0x57, 0xf7, 0xef, 0xbe, 0x9c, 0x9b, 0xda, 0x38, 0xd5, + 0xde, 0x1c, 0xf4, 0x8d, 0xf5, 0xd4, 0x10, 0x4a, 0x8b, 0x9b, 0xdf, 0x68, 0x60, 0xf7, 0xb8, 0xf1, + 0x80, 0xf9, 0x61, 0xd0, 0x10, 0xd1, 0xec, 0xb6, 0x7b, 0xca, 0x04, 0x7f, 0x04, 0x16, 0x58, 0xe8, + 0xc6, 0x73, 0xf9, 0x5e, 0x3c, 0x97, 0x28, 0x74, 0xc9, 0x8b, 0xbe, 0xb1, 0x95, 0x61, 0x3d, 0xee, + 0x05, 0x04, 0x49, 0x02, 0xfc, 0x1c, 0x2c, 0x31, 0xec, 0xb5, 0x49, 0x14, 0xfa, 0xfc, 0xed, 0xd5, + 0x7d, 0xd3, 0x9a, 0xb8, 0xd6, 0xac, 0x93, 0x1a, 0x8a, 0xa0, 0xa3, 0x19, 0x97, 0xaf, 0x1c, 0x29, + 0x05, 0xf3, 0x14, 0xac, 0xcb, 0xa9, 0xf6, 0x99, 0x90, 0x16, 0x78, 0x13, 0xcc, 0x77, 0xa8, 0x27, + 0x83, 0x5a, 0xb4, 0x57, 0x15, 0x6b, 0xfe, 0x94, 0x7a, 0x28, 0x1a, 0x97, 0x66, 0x7c, 0x25, 0x73, + 0x36, 0x6e, 0xc6, 0x57, 0x28, 0x1a, 0x37, 0x1f, 0x80, 0x65, 0xe5, 0x71, 0x5c, 0x68, 0x7e, 0xba, + 0xd0, 0x7c, 0x81, 0xd0, 0x5f, 0xae, 0x81, 0xad, 0xba, 0xef, 0xd4, 0x28, 0x67, 0xa1, 0xcc, 0x97, + 0x1d, 0x3a, 0x6d, 0x22, 0xde, 0x42, 0x7d, 0x3c, 0x06, 0x0b, 0x3c, 0x20, 0x2d, 0x55, 0x16, 0xfb, + 0x53, 0x72, 0x5b, 0x10, 0x5f, 0x23, 0x20, 0xad, 0xd1, 0xb2, 0x8c, 0xde, 0x90, 0x54, 0x83, 0x4f, + 0xc1, 0x12, 0x17, 0x58, 0x84, 0x5c, 0x9f, 0x97, 0xba, 0xf7, 0x5e, 0x51, 0x57, 0x72, 0x47, 0xb3, + 0x38, 0x7c, 0x47, 0x4a, 0xd3, 0xfc, 0x87, 0x06, 0xde, 0x29, 0x60, 0x3d, 0xa2, 0x5c, 0xc0, 0xa7, + 0xb9, 0x8c, 0x59, 0x2f, 0x97, 0xb1, 0x88, 0x2d, 0xf3, 0x95, 0x2c, 0xde, 0x78, 0x64, 0x2c, 0x5b, + 0x0d, 0xb0, 0x48, 0x05, 0xe9, 0xc4, 0xa5, 0x68, 0xbd, 0xda, 0x67, 0xd9, 0xeb, 0x4a, 0x7a, 0xf1, + 0x24, 0x12, 0x41, 0x43, 0x2d, 0xf3, 0x9f, 0xd7, 0x0a, 0x3f, 0x27, 0x4a, 0x27, 0x3c, 0x07, 0x6b, + 0x1d, 0xea, 0x1d, 0x74, 0x31, 0x75, 0x71, 0x53, 0xad, 0x9e, 0x69, 0x45, 0x10, 0x75, 0x58, 0x6b, + 0xd8, 0x61, 0xad, 0x13, 0x4f, 0x9c, 0xb1, 0x86, 0x60, 0xd4, 0x6b, 0xdb, 0xe5, 0x41, 0xdf, 0x58, + 0x3b, 0x1d, 0x53, 0x42, 0x29, 0x5d, 0xf8, 0x6b, 0x50, 0xe2, 0xc4, 0x25, 0x2d, 0xe1, 0xb3, 0x57, + 0xeb, 0x10, 0x8f, 0x70, 0x93, 0xb8, 0x0d, 0x45, 0xb5, 0xd7, 0xa2, 0xbc, 0xc5, 0x6f, 0x28, 0x91, + 0x84, 0x2e, 0xd8, 0xe8, 0xe0, 0xab, 0x27, 0x1e, 0x4e, 0x3e, 0x64, 0xfe, 0x35, 0x3f, 0x04, 0x0e, + 0xfa, 0xc6, 0xc6, 0x69, 0x4a, 0x0b, 0x65, 0xb4, 0xcd, 0xff, 0x2d, 0x80, 0x1b, 0x13, 0xab, 0x0a, + 0x7e, 0x0e, 0xa0, 0xdf, 0xe4, 0x84, 0x75, 0x89, 0xf3, 0x60, 0xb8, 0x07, 0x51, 0x3f, 0x5e, 0xb8, + 0x7b, 0x6a, 0x82, 0xe0, 0x59, 0x0e, 0x81, 0x0a, 0x58, 0xf0, 0x0f, 0x1a, 0x58, 0x77, 0x86, 0x6e, + 0x88, 0x53, 0xf7, 0x9d, 0xb8, 0x30, 0x1e, 0xbc, 0x4e, 0xbd, 0x5b, 0xb5, 0x71, 0xa5, 0x23, 0x4f, + 0xb0, 0x9e, 0xbd, 0xa3, 0x02, 0x5a, 0x4f, 0xd9, 0x50, 0xda, 0x29, 0x3c, 0x05, 0xd0, 0x49, 0x24, + 0xb9, 0xda, 0xd3, 0x64, 0x8a, 0x17, 0xed, 0x9b, 0x4a, 0x61, 0x27, 0xe5, 0x37, 0x06, 0xa1, 0x02, + 0x22, 0xfc, 0x19, 0xd8, 0x68, 0x85, 0x8c, 0x11, 0x4f, 0x3c, 0x24, 0xd8, 0x15, 0x17, 0x3d, 0x7d, + 0x41, 0x4a, 0xed, 0x2a, 0xa9, 0x8d, 0xc3, 0x94, 0x15, 0x65, 0xd0, 0x11, 0xdf, 0x21, 0x9c, 0x32, + 0xe2, 0xc4, 0xfc, 0xc5, 0x34, 0xbf, 0x96, 0xb2, 0xa2, 0x0c, 0x1a, 0xde, 0x07, 0x6b, 0xe4, 0x2a, + 0x20, 0xad, 0x38, 0xa7, 0x4b, 0x92, 0xbd, 0xad, 0xd8, 0x6b, 0x47, 0x63, 0x36, 0x94, 0x42, 0xee, + 0xb9, 0x00, 0xe6, 0x93, 0x08, 0xcb, 0x60, 0xfe, 0x92, 0xf4, 0x86, 0x3b, 0x0f, 0x8a, 0x1e, 0xe1, + 0x67, 0x60, 0xb1, 0x8b, 0xdd, 0x90, 0xa8, 0x5a, 0x7f, 0xff, 0xe5, 0x6a, 0xfd, 0x31, 0xed, 0x10, + 0x34, 0x24, 0xfe, 0xf8, 0xda, 0x7d, 0xcd, 0xfc, 0xbb, 0x06, 0x36, 0xeb, 0xbe, 0xd3, 0x20, 0xad, + 0x90, 0x51, 0xd1, 0xab, 0xcb, 0x79, 0x7e, 0x0b, 0x3d, 0x1b, 0xa5, 0x7a, 0xf6, 0x47, 0xd3, 0x6b, + 0x2d, 0x1d, 0xdd, 0xa4, 0x8e, 0x6d, 0x3e, 0xd3, 0xc0, 0x4e, 0x0e, 0xfd, 0x16, 0x3a, 0xea, 0xcf, + 0xd3, 0x1d, 0xf5, 0x83, 0x57, 0xf9, 0x98, 0x09, 0xfd, 0xf4, 0xdf, 0xe5, 0x82, 0x4f, 0x91, 0xdd, + 0x34, 0x3a, 0xdd, 0x31, 0xda, 0xa5, 0x2e, 0x69, 0x13, 0x47, 0x7e, 0x4c, 0x69, 0xec, 0x74, 0x97, + 0x58, 0xd0, 0x18, 0x0a, 0x72, 0xb0, 0xeb, 0x90, 0x73, 0x1c, 0xba, 0xe2, 0xc0, 0x71, 0x0e, 0x71, + 0x80, 0x9b, 0xd4, 0xa5, 0x82, 0xaa, 0xe3, 0xc8, 0x8a, 0xfd, 0xe9, 0xa0, 0x6f, 0xec, 0xd6, 0x0a, + 0x11, 0x2f, 0xfa, 0xc6, 0xcd, 0xfc, 0x69, 0xde, 0x4a, 0x20, 0x3d, 0x34, 0x41, 0x1a, 0xf6, 0x80, + 0xce, 0xc8, 0xef, 0xc2, 0x68, 0x51, 0xd4, 0x98, 0x1f, 0xa4, 0xdc, 0xce, 0x4b, 0xb7, 0x3f, 0x1d, + 0xf4, 0x0d, 0x1d, 0x4d, 0xc0, 0xcc, 0x76, 0x3c, 0x51, 0x1e, 0x7e, 0x05, 0xb6, 0xb0, 0x3a, 0x87, + 0x8f, 0x7b, 0x5d, 0x90, 0x5e, 0xef, 0x0f, 0xfa, 0xc6, 0xd6, 0x41, 0xde, 0x3c, 0xdb, 0x61, 0x91, + 0x28, 0xac, 0x82, 0xe5, 0xae, 0x3c, 0xb2, 0x73, 0x7d, 0x51, 0xea, 0xef, 0x0c, 0xfa, 0xc6, 0xf2, + 0xf0, 0x14, 0x1f, 0x69, 0x2e, 0x1d, 0x37, 0xe4, 0x41, 0x30, 0x46, 0xc1, 0x8f, 0xc1, 0xea, 0x85, + 0xcf, 0xc5, 0x17, 0x44, 0x7c, 0xed, 0xb3, 0x4b, 0xd9, 0x18, 0x4a, 0xf6, 0x96, 0x9a, 0xc1, 0xd5, + 0x87, 0x23, 0x13, 0x1a, 0xc7, 0xc1, 0x5f, 0x82, 0x95, 0x0b, 0x75, 0xec, 0xe3, 0xfa, 0xb2, 0x2c, + 0xb4, 0xdb, 0x53, 0x0a, 0x2d, 0x75, 0x44, 0xb4, 0x37, 0x95, 0xfc, 0x4a, 0x3c, 0xcc, 0xd1, 0x48, + 0x0d, 0xfe, 0x00, 0x2c, 0xcb, 0x97, 0x93, 0x9a, 0x5e, 0x92, 0xd1, 0x5c, 0x57, 0xf0, 0xe5, 0x87, + 0xc3, 0x61, 0x14, 0xdb, 0x63, 0xe8, 0x49, 0xfd, 0x50, 0x5f, 0xc9, 0x43, 0x4f, 0xea, 0x87, 0x28, + 0xb6, 0xc3, 0xa7, 0x60, 0x99, 0x93, 0x47, 0xd4, 0x0b, 0xaf, 0x74, 0x20, 0x97, 0xdc, 0x9d, 0x29, + 0xe1, 0x36, 0x8e, 0x24, 0x32, 0x73, 0xe0, 0x1e, 0xa9, 0x2b, 0x3b, 0x8a, 0x25, 0xa1, 0x03, 0x56, + 0x58, 0xe8, 0x1d, 0xf0, 0x27, 0x9c, 0x30, 0x7d, 0x35, 0xb7, 0xdb, 0x67, 0xf5, 0x51, 0x8c, 0xcd, + 0x7a, 0x48, 0x32, 0x93, 0x20, 0xd0, 0x48, 0x18, 0xfe, 0x51, 0x03, 0x90, 0x87, 0x41, 0xe0, 0x92, + 0x0e, 0xf1, 0x04, 0x76, 0xe5, 0xf9, 0x9e, 0xeb, 0x6b, 0xd2, 0xdf, 0x4f, 0xa6, 0x7d, 0x4f, 0x8e, + 0x94, 0x75, 0x9c, 0x6c, 0xd3, 0x79, 0x28, 0x2a, 0xf0, 0x19, 0xa5, 0xf3, 0x9c, 0xcb, 0x67, 0x7d, + 0x7d, 0x66, 0x3a, 0x8b, 0xff, 0x5f, 0x46, 0xe9, 0x54, 0x76, 0x14, 0x4b, 0xc2, 0x2f, 0xc1, 0x6e, + 0xfc, 0x77, 0x87, 0x7c, 0x5f, 0x1c, 0x53, 0x97, 0xf0, 0x1e, 0x17, 0xa4, 0xa3, 0x6f, 0xc8, 0x69, + 0xae, 0x28, 0xe6, 0x2e, 0x2a, 0x44, 0xa1, 0x09, 0x6c, 0xd8, 0x01, 0x46, 0xdc, 0x1e, 0xa2, 0xb5, + 0x93, 0xf4, 0xa7, 0x23, 0xde, 0xc2, 0xee, 0xf0, 0xd4, 0x72, 0x5d, 0x3a, 0x78, 0x6f, 0xd0, 0x37, + 0x8c, 0xda, 0x74, 0x28, 0x9a, 0xa5, 0x05, 0x7f, 0x01, 0x74, 0x3c, 0xc9, 0x4f, 0x59, 0xfa, 0xf9, + 0x5e, 0xd4, 0x73, 0x26, 0x3a, 0x98, 0xc8, 0x86, 0x01, 0x28, 0xe3, 0xf4, 0x7f, 0x36, 0xd7, 0x37, + 0xe5, 0x2a, 0x7c, 0x7f, 0xca, 0x3c, 0x64, 0x7e, 0xcd, 0x6d, 0x5d, 0xa5, 0xb1, 0x9c, 0x31, 0x70, + 0x94, 0x53, 0x87, 0x57, 0x00, 0xe2, 0xec, 0xb5, 0x00, 0xd7, 0xe1, 0xcc, 0x2d, 0x26, 0x77, 0x97, + 0x30, 0x2a, 0xb5, 0x9c, 0x89, 0xa3, 0x02, 0x1f, 0xf0, 0x11, 0xd8, 0x56, 0xa3, 0x4f, 0x3c, 0x8e, + 0xcf, 0x49, 0xa3, 0xc7, 0x5b, 0xc2, 0xe5, 0xfa, 0x96, 0xec, 0x6f, 0xfa, 0xa0, 0x6f, 0x6c, 0x1f, + 0x14, 0xd8, 0x51, 0x21, 0x0b, 0x7e, 0x06, 0xca, 0xe7, 0x3e, 0x6b, 0x52, 0xc7, 0x21, 0x5e, 0xac, + 0xb4, 0x2d, 0x95, 0xb6, 0xa3, 0x4c, 0x1c, 0x67, 0x6c, 0x28, 0x87, 0x86, 0x1c, 0xec, 0x28, 0xe5, + 0x3a, 0xf3, 0x5b, 0xa7, 0x7e, 0xe8, 0x89, 0xa8, 0xa5, 0x72, 0x7d, 0x27, 0xd9, 0x46, 0x76, 0x0e, + 0x8a, 0x00, 0x2f, 0xfa, 0xc6, 0xad, 0x82, 0x96, 0x9e, 0x02, 0xa1, 0x62, 0x6d, 0xe8, 0x00, 0x20, + 0xfb, 0xc0, 0x70, 0xc9, 0xed, 0xce, 0xfc, 0x05, 0x44, 0x09, 0x38, 0xbb, 0xea, 0x36, 0xa2, 0x9d, + 0x79, 0x64, 0x46, 0x63, 0xba, 0x50, 0x80, 0x4d, 0x9c, 0xb9, 0x31, 0xe2, 0xfa, 0x3b, 0x72, 0x8e, + 0x7f, 0x38, 0x7b, 0x8e, 0x13, 0x8e, 0x7d, 0x43, 0x4d, 0xf1, 0x66, 0xd6, 0xc2, 0x51, 0xde, 0x81, + 0xf9, 0x67, 0x0d, 0xdc, 0x98, 0x18, 0x2f, 0xfc, 0x24, 0x75, 0xcb, 0x61, 0x66, 0x6e, 0x39, 0x60, + 0x9e, 0xf8, 0x06, 0x2e, 0x39, 0xbe, 0xd1, 0x80, 0x3e, 0xa9, 0x67, 0xc3, 0x8f, 0x53, 0x01, 0xbe, + 0x9b, 0x09, 0x70, 0x33, 0xc7, 0x7b, 0x03, 0xf1, 0xfd, 0x55, 0x03, 0xbb, 0xc5, 0x7b, 0x16, 0xbc, + 0x9b, 0x8a, 0xce, 0xc8, 0x44, 0x77, 0x3d, 0xc3, 0x52, 0xb1, 0xfd, 0x06, 0x6c, 0xa8, 0x9d, 0x2d, + 0x7d, 0xc7, 0x95, 0x8a, 0x31, 0xaa, 0xdf, 0xe8, 0x50, 0xaa, 0x24, 0xe2, 0xfa, 0x92, 0xbf, 0x93, + 0xe9, 0x31, 0x94, 0x51, 0x33, 0xff, 0xa6, 0x81, 0x77, 0x67, 0xee, 0x49, 0xd0, 0x4e, 0x85, 0x6e, + 0x65, 0x42, 0xaf, 0x4c, 0x16, 0x78, 0x33, 0x57, 0x5d, 0xf6, 0x87, 0xcf, 0x9e, 0x57, 0xe6, 0xbe, + 0x7d, 0x5e, 0x99, 0xfb, 0xee, 0x79, 0x65, 0xee, 0xf7, 0x83, 0x8a, 0xf6, 0x6c, 0x50, 0xd1, 0xbe, + 0x1d, 0x54, 0xb4, 0xef, 0x06, 0x15, 0xed, 0x3f, 0x83, 0x8a, 0xf6, 0xa7, 0xff, 0x56, 0xe6, 0x7e, + 0xb5, 0xac, 0xe4, 0xfe, 0x1f, 0x00, 0x00, 0xff, 0xff, 0x6f, 0xf9, 0x85, 0x0c, 0x05, 0x17, 0x00, + 0x00, } diff --git a/vendor/k8s.io/api/policy/v1beta1/generated.proto b/vendor/k8s.io/api/policy/v1beta1/generated.proto index e9df3c16fe..738c82c67b 100644 --- a/vendor/k8s.io/api/policy/v1beta1/generated.proto +++ b/vendor/k8s.io/api/policy/v1beta1/generated.proto @@ -30,6 +30,12 @@ import "k8s.io/apimachinery/pkg/util/intstr/generated.proto"; // Package-wide variables from generator "generated". option go_package = "v1beta1"; +// AllowedCSIDriver represents a single inline CSI Driver that is allowed to be used. +message AllowedCSIDriver { + // Name is the registered name of the CSI driver + optional string name = 1; +} + // AllowedFlexVolume represents a single Flexvolume that is allowed to be used. message AllowedFlexVolume { // driver is the name of the Flexvolume driver. @@ -292,6 +298,11 @@ message PodSecurityPolicySpec { // +optional repeated AllowedFlexVolume allowedFlexVolumes = 18; + // AllowedCSIDrivers is a whitelist of inline CSI drivers that must be explicitly set to be embedded within a pod spec. + // An empty value means no CSI drivers can run inline within a pod spec. + // +optional + repeated AllowedCSIDriver allowedCSIDrivers = 23; + // allowedUnsafeSysctls is a list of explicitly allowed unsafe sysctls, defaults to none. // Each entry is either a plain sysctl name or ends in "*" in which case it is considered // as a prefix of allowed sysctls. Single * means all unsafe sysctls are allowed. diff --git a/vendor/k8s.io/api/policy/v1beta1/types.go b/vendor/k8s.io/api/policy/v1beta1/types.go index 91ea118587..74ddd06932 100644 --- a/vendor/k8s.io/api/policy/v1beta1/types.go +++ b/vendor/k8s.io/api/policy/v1beta1/types.go @@ -216,6 +216,10 @@ type PodSecurityPolicySpec struct { // is allowed in the "volumes" field. // +optional AllowedFlexVolumes []AllowedFlexVolume `json:"allowedFlexVolumes,omitempty" protobuf:"bytes,18,rep,name=allowedFlexVolumes"` + // AllowedCSIDrivers is a whitelist of inline CSI drivers that must be explicitly set to be embedded within a pod spec. + // An empty value means no CSI drivers can run inline within a pod spec. + // +optional + AllowedCSIDrivers []AllowedCSIDriver `json:"allowedCSIDrivers,omitempty" protobuf:"bytes,23,rep,name=allowedCSIDrivers"` // allowedUnsafeSysctls is a list of explicitly allowed unsafe sysctls, defaults to none. // Each entry is either a plain sysctl name or ends in "*" in which case it is considered // as a prefix of allowed sysctls. Single * means all unsafe sysctls are allowed. @@ -304,6 +308,12 @@ type AllowedFlexVolume struct { Driver string `json:"driver" protobuf:"bytes,1,opt,name=driver"` } +// AllowedCSIDriver represents a single inline CSI Driver that is allowed to be used. +type AllowedCSIDriver struct { + // Name is the registered name of the CSI driver + Name string `json:"name" protobuf:"bytes,1,opt,name=name"` +} + // HostPortRange defines a range of host ports that will be enabled by a policy // for pods to use. It requires both the start and end to be defined. type HostPortRange struct { diff --git a/vendor/k8s.io/api/policy/v1beta1/types_swagger_doc_generated.go b/vendor/k8s.io/api/policy/v1beta1/types_swagger_doc_generated.go index 547ef18ea4..324116d5d9 100644 --- a/vendor/k8s.io/api/policy/v1beta1/types_swagger_doc_generated.go +++ b/vendor/k8s.io/api/policy/v1beta1/types_swagger_doc_generated.go @@ -27,6 +27,15 @@ package v1beta1 // Those methods can be generated by using hack/update-generated-swagger-docs.sh // AUTO-GENERATED FUNCTIONS START HERE. DO NOT EDIT. +var map_AllowedCSIDriver = map[string]string{ + "": "AllowedCSIDriver represents a single inline CSI Driver that is allowed to be used.", + "name": "Name is the registered name of the CSI driver", +} + +func (AllowedCSIDriver) SwaggerDoc() map[string]string { + return map_AllowedCSIDriver +} + var map_AllowedFlexVolume = map[string]string{ "": "AllowedFlexVolume represents a single Flexvolume that is allowed to be used.", "driver": "driver is the name of the Flexvolume driver.", @@ -170,6 +179,7 @@ var map_PodSecurityPolicySpec = map[string]string{ "allowPrivilegeEscalation": "allowPrivilegeEscalation determines if a pod can request to allow privilege escalation. If unspecified, defaults to true.", "allowedHostPaths": "allowedHostPaths is a white list of allowed host paths. Empty indicates that all host paths may be used.", "allowedFlexVolumes": "allowedFlexVolumes is a whitelist of allowed Flexvolumes. Empty or nil indicates that all Flexvolumes may be used. This parameter is effective only when the usage of the Flexvolumes is allowed in the \"volumes\" field.", + "allowedCSIDrivers": "AllowedCSIDrivers is a whitelist of inline CSI drivers that must be explicitly set to be embedded within a pod spec. An empty value means no CSI drivers can run inline within a pod spec.", "allowedUnsafeSysctls": "allowedUnsafeSysctls is a list of explicitly allowed unsafe sysctls, defaults to none. Each entry is either a plain sysctl name or ends in \"*\" in which case it is considered as a prefix of allowed sysctls. Single * means all unsafe sysctls are allowed. Kubelet has to whitelist all allowed unsafe sysctls explicitly to avoid rejection.\n\nExamples: e.g. \"foo/*\" allows \"foo/bar\", \"foo/baz\", etc. e.g. \"foo.*\" allows \"foo.bar\", \"foo.baz\", etc.", "forbiddenSysctls": "forbiddenSysctls is a list of explicitly forbidden sysctls, defaults to none. Each entry is either a plain sysctl name or ends in \"*\" in which case it is considered as a prefix of forbidden sysctls. Single * means all sysctls are forbidden.\n\nExamples: e.g. \"foo/*\" forbids \"foo/bar\", \"foo/baz\", etc. e.g. \"foo.*\" forbids \"foo.bar\", \"foo.baz\", etc.", "allowedProcMountTypes": "AllowedProcMountTypes is a whitelist of allowed ProcMountTypes. Empty or nil indicates that only the DefaultProcMountType may be used. This requires the ProcMountType feature flag to be enabled.", diff --git a/vendor/k8s.io/api/policy/v1beta1/zz_generated.deepcopy.go b/vendor/k8s.io/api/policy/v1beta1/zz_generated.deepcopy.go index 1a02ae6007..24b078eda1 100644 --- a/vendor/k8s.io/api/policy/v1beta1/zz_generated.deepcopy.go +++ b/vendor/k8s.io/api/policy/v1beta1/zz_generated.deepcopy.go @@ -27,6 +27,22 @@ import ( intstr "k8s.io/apimachinery/pkg/util/intstr" ) +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AllowedCSIDriver) DeepCopyInto(out *AllowedCSIDriver) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AllowedCSIDriver. +func (in *AllowedCSIDriver) DeepCopy() *AllowedCSIDriver { + if in == nil { + return nil + } + out := new(AllowedCSIDriver) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *AllowedFlexVolume) DeepCopyInto(out *AllowedFlexVolume) { *out = *in @@ -375,6 +391,11 @@ func (in *PodSecurityPolicySpec) DeepCopyInto(out *PodSecurityPolicySpec) { *out = make([]AllowedFlexVolume, len(*in)) copy(*out, *in) } + if in.AllowedCSIDrivers != nil { + in, out := &in.AllowedCSIDrivers, &out.AllowedCSIDrivers + *out = make([]AllowedCSIDriver, len(*in)) + copy(*out, *in) + } if in.AllowedUnsafeSysctls != nil { in, out := &in.AllowedUnsafeSysctls, &out.AllowedUnsafeSysctls *out = make([]string, len(*in)) diff --git a/vendor/k8s.io/api/rbac/v1/doc.go b/vendor/k8s.io/api/rbac/v1/doc.go index 76899ef096..80f43ce922 100644 --- a/vendor/k8s.io/api/rbac/v1/doc.go +++ b/vendor/k8s.io/api/rbac/v1/doc.go @@ -15,6 +15,7 @@ limitations under the License. */ // +k8s:deepcopy-gen=package +// +k8s:protobuf-gen=package // +k8s:openapi-gen=true // +groupName=rbac.authorization.k8s.io diff --git a/vendor/k8s.io/api/rbac/v1/generated.proto b/vendor/k8s.io/api/rbac/v1/generated.proto index 4b321a7c24..71fa08341c 100644 --- a/vendor/k8s.io/api/rbac/v1/generated.proto +++ b/vendor/k8s.io/api/rbac/v1/generated.proto @@ -43,6 +43,7 @@ message ClusterRole { optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; // Rules holds all the PolicyRules for this ClusterRole + // +optional repeated PolicyRule rules = 2; // AggregationRule is an optional field that describes how to build the Rules for this ClusterRole. @@ -121,6 +122,7 @@ message Role { optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; // Rules holds all the PolicyRules for this Role + // +optional repeated PolicyRule rules = 2; } diff --git a/vendor/k8s.io/api/rbac/v1/types.go b/vendor/k8s.io/api/rbac/v1/types.go index 17163cbb26..7ba7d05435 100644 --- a/vendor/k8s.io/api/rbac/v1/types.go +++ b/vendor/k8s.io/api/rbac/v1/types.go @@ -108,6 +108,7 @@ type Role struct { metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` // Rules holds all the PolicyRules for this Role + // +optional Rules []PolicyRule `json:"rules" protobuf:"bytes,2,rep,name=rules"` } @@ -170,6 +171,7 @@ type ClusterRole struct { metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` // Rules holds all the PolicyRules for this ClusterRole + // +optional Rules []PolicyRule `json:"rules" protobuf:"bytes,2,rep,name=rules"` // AggregationRule is an optional field that describes how to build the Rules for this ClusterRole. diff --git a/vendor/k8s.io/api/rbac/v1alpha1/doc.go b/vendor/k8s.io/api/rbac/v1alpha1/doc.go index f2547a58f7..918b8a337c 100644 --- a/vendor/k8s.io/api/rbac/v1alpha1/doc.go +++ b/vendor/k8s.io/api/rbac/v1alpha1/doc.go @@ -15,6 +15,7 @@ limitations under the License. */ // +k8s:deepcopy-gen=package +// +k8s:protobuf-gen=package // +k8s:openapi-gen=true // +groupName=rbac.authorization.k8s.io diff --git a/vendor/k8s.io/api/rbac/v1alpha1/generated.proto b/vendor/k8s.io/api/rbac/v1alpha1/generated.proto index cde3aaac98..b16715bc49 100644 --- a/vendor/k8s.io/api/rbac/v1alpha1/generated.proto +++ b/vendor/k8s.io/api/rbac/v1alpha1/generated.proto @@ -43,6 +43,7 @@ message ClusterRole { optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; // Rules holds all the PolicyRules for this ClusterRole + // +optional repeated PolicyRule rules = 2; // AggregationRule is an optional field that describes how to build the Rules for this ClusterRole. @@ -122,6 +123,7 @@ message Role { optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; // Rules holds all the PolicyRules for this Role + // +optional repeated PolicyRule rules = 2; } diff --git a/vendor/k8s.io/api/rbac/v1alpha1/types.go b/vendor/k8s.io/api/rbac/v1alpha1/types.go index 398d6a169c..521cce4f31 100644 --- a/vendor/k8s.io/api/rbac/v1alpha1/types.go +++ b/vendor/k8s.io/api/rbac/v1alpha1/types.go @@ -110,6 +110,7 @@ type Role struct { metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` // Rules holds all the PolicyRules for this Role + // +optional Rules []PolicyRule `json:"rules" protobuf:"bytes,2,rep,name=rules"` } @@ -172,6 +173,7 @@ type ClusterRole struct { metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` // Rules holds all the PolicyRules for this ClusterRole + // +optional Rules []PolicyRule `json:"rules" protobuf:"bytes,2,rep,name=rules"` // AggregationRule is an optional field that describes how to build the Rules for this ClusterRole. diff --git a/vendor/k8s.io/api/rbac/v1beta1/doc.go b/vendor/k8s.io/api/rbac/v1beta1/doc.go index 516625eeea..fe7aae975a 100644 --- a/vendor/k8s.io/api/rbac/v1beta1/doc.go +++ b/vendor/k8s.io/api/rbac/v1beta1/doc.go @@ -15,6 +15,7 @@ limitations under the License. */ // +k8s:deepcopy-gen=package +// +k8s:protobuf-gen=package // +k8s:openapi-gen=true // +groupName=rbac.authorization.k8s.io diff --git a/vendor/k8s.io/api/rbac/v1beta1/generated.proto b/vendor/k8s.io/api/rbac/v1beta1/generated.proto index 27bd30ce9f..07bf735a06 100644 --- a/vendor/k8s.io/api/rbac/v1beta1/generated.proto +++ b/vendor/k8s.io/api/rbac/v1beta1/generated.proto @@ -43,6 +43,7 @@ message ClusterRole { optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; // Rules holds all the PolicyRules for this ClusterRole + // +optional repeated PolicyRule rules = 2; // AggregationRule is an optional field that describes how to build the Rules for this ClusterRole. @@ -122,6 +123,7 @@ message Role { optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; // Rules holds all the PolicyRules for this Role + // +optional repeated PolicyRule rules = 2; } diff --git a/vendor/k8s.io/api/rbac/v1beta1/types.go b/vendor/k8s.io/api/rbac/v1beta1/types.go index 857b67a6f8..35843c90d1 100644 --- a/vendor/k8s.io/api/rbac/v1beta1/types.go +++ b/vendor/k8s.io/api/rbac/v1beta1/types.go @@ -109,6 +109,7 @@ type Role struct { metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` // Rules holds all the PolicyRules for this Role + // +optional Rules []PolicyRule `json:"rules" protobuf:"bytes,2,rep,name=rules"` } @@ -171,6 +172,7 @@ type ClusterRole struct { metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` // Rules holds all the PolicyRules for this ClusterRole + // +optional Rules []PolicyRule `json:"rules" protobuf:"bytes,2,rep,name=rules"` // AggregationRule is an optional field that describes how to build the Rules for this ClusterRole. // If AggregationRule is set, then the Rules are controller managed and direct changes to Rules will be diff --git a/vendor/k8s.io/api/scheduling/v1/doc.go b/vendor/k8s.io/api/scheduling/v1/doc.go new file mode 100644 index 0000000000..76c4da002e --- /dev/null +++ b/vendor/k8s.io/api/scheduling/v1/doc.go @@ -0,0 +1,23 @@ +/* +Copyright 2019 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// +k8s:deepcopy-gen=package +// +k8s:protobuf-gen=package +// +k8s:openapi-gen=true + +// +groupName=scheduling.k8s.io + +package v1 // import "k8s.io/api/scheduling/v1" diff --git a/vendor/k8s.io/api/scheduling/v1/generated.pb.go b/vendor/k8s.io/api/scheduling/v1/generated.pb.go new file mode 100644 index 0000000000..5adf978ef8 --- /dev/null +++ b/vendor/k8s.io/api/scheduling/v1/generated.pb.go @@ -0,0 +1,621 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by protoc-gen-gogo. DO NOT EDIT. +// source: k8s.io/kubernetes/vendor/k8s.io/api/scheduling/v1/generated.proto + +/* + Package v1 is a generated protocol buffer package. + + It is generated from these files: + k8s.io/kubernetes/vendor/k8s.io/api/scheduling/v1/generated.proto + + It has these top-level messages: + PriorityClass + PriorityClassList +*/ +package v1 + +import proto "github.com/gogo/protobuf/proto" +import fmt "fmt" +import math "math" + +import strings "strings" +import reflect "reflect" + +import io "io" + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.GoGoProtoPackageIsVersion2 // please upgrade the proto package + +func (m *PriorityClass) Reset() { *m = PriorityClass{} } +func (*PriorityClass) ProtoMessage() {} +func (*PriorityClass) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{0} } + +func (m *PriorityClassList) Reset() { *m = PriorityClassList{} } +func (*PriorityClassList) ProtoMessage() {} +func (*PriorityClassList) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{1} } + +func init() { + proto.RegisterType((*PriorityClass)(nil), "k8s.io.api.scheduling.v1.PriorityClass") + proto.RegisterType((*PriorityClassList)(nil), "k8s.io.api.scheduling.v1.PriorityClassList") +} +func (m *PriorityClass) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *PriorityClass) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + dAtA[i] = 0xa + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.ObjectMeta.Size())) + n1, err := m.ObjectMeta.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n1 + dAtA[i] = 0x10 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.Value)) + dAtA[i] = 0x18 + i++ + if m.GlobalDefault { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i++ + dAtA[i] = 0x22 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Description))) + i += copy(dAtA[i:], m.Description) + return i, nil +} + +func (m *PriorityClassList) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *PriorityClassList) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + dAtA[i] = 0xa + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.ListMeta.Size())) + n2, err := m.ListMeta.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n2 + if len(m.Items) > 0 { + for _, msg := range m.Items { + dAtA[i] = 0x12 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) + n, err := msg.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n + } + } + return i, nil +} + +func encodeVarintGenerated(dAtA []byte, offset int, v uint64) int { + for v >= 1<<7 { + dAtA[offset] = uint8(v&0x7f | 0x80) + v >>= 7 + offset++ + } + dAtA[offset] = uint8(v) + return offset + 1 +} +func (m *PriorityClass) Size() (n int) { + var l int + _ = l + l = m.ObjectMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + n += 1 + sovGenerated(uint64(m.Value)) + n += 2 + l = len(m.Description) + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *PriorityClassList) Size() (n int) { + var l int + _ = l + l = m.ListMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Items) > 0 { + for _, e := range m.Items { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func sovGenerated(x uint64) (n int) { + for { + n++ + x >>= 7 + if x == 0 { + break + } + } + return n +} +func sozGenerated(x uint64) (n int) { + return sovGenerated(uint64((x << 1) ^ uint64((int64(x) >> 63)))) +} +func (this *PriorityClass) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&PriorityClass{`, + `ObjectMeta:` + strings.Replace(strings.Replace(this.ObjectMeta.String(), "ObjectMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta", 1), `&`, ``, 1) + `,`, + `Value:` + fmt.Sprintf("%v", this.Value) + `,`, + `GlobalDefault:` + fmt.Sprintf("%v", this.GlobalDefault) + `,`, + `Description:` + fmt.Sprintf("%v", this.Description) + `,`, + `}`, + }, "") + return s +} +func (this *PriorityClassList) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&PriorityClassList{`, + `ListMeta:` + strings.Replace(strings.Replace(this.ListMeta.String(), "ListMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ListMeta", 1), `&`, ``, 1) + `,`, + `Items:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Items), "PriorityClass", "PriorityClass", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func valueToStringGenerated(v interface{}) string { + rv := reflect.ValueOf(v) + if rv.IsNil() { + return "nil" + } + pv := reflect.Indirect(rv).Interface() + return fmt.Sprintf("*%v", pv) +} +func (m *PriorityClass) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: PriorityClass: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: PriorityClass: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ObjectMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Value", wireType) + } + m.Value = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Value |= (int32(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + case 3: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field GlobalDefault", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + m.GlobalDefault = bool(v != 0) + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Description", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Description = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *PriorityClassList) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: PriorityClassList: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: PriorityClassList: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ListMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ListMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Items = append(m.Items, PriorityClass{}) + if err := m.Items[len(m.Items)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func skipGenerated(dAtA []byte) (n int, err error) { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowGenerated + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + wireType := int(wire & 0x7) + switch wireType { + case 0: + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowGenerated + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + iNdEx++ + if dAtA[iNdEx-1] < 0x80 { + break + } + } + return iNdEx, nil + case 1: + iNdEx += 8 + return iNdEx, nil + case 2: + var length int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowGenerated + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + length |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + iNdEx += length + if length < 0 { + return 0, ErrInvalidLengthGenerated + } + return iNdEx, nil + case 3: + for { + var innerWire uint64 + var start int = iNdEx + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowGenerated + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + innerWire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + innerWireType := int(innerWire & 0x7) + if innerWireType == 4 { + break + } + next, err := skipGenerated(dAtA[start:]) + if err != nil { + return 0, err + } + iNdEx = start + next + } + return iNdEx, nil + case 4: + return iNdEx, nil + case 5: + iNdEx += 4 + return iNdEx, nil + default: + return 0, fmt.Errorf("proto: illegal wireType %d", wireType) + } + } + panic("unreachable") +} + +var ( + ErrInvalidLengthGenerated = fmt.Errorf("proto: negative length found during unmarshaling") + ErrIntOverflowGenerated = fmt.Errorf("proto: integer overflow") +) + +func init() { + proto.RegisterFile("k8s.io/kubernetes/vendor/k8s.io/api/scheduling/v1/generated.proto", fileDescriptorGenerated) +} + +var fileDescriptorGenerated = []byte{ + // 442 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x8c, 0x92, 0x3f, 0x8b, 0xd4, 0x40, + 0x18, 0xc6, 0x33, 0x7b, 0x2e, 0xac, 0xb3, 0x2c, 0x68, 0x44, 0x08, 0x5b, 0xcc, 0x85, 0xb3, 0x30, + 0x8d, 0x33, 0xee, 0xa1, 0x22, 0x58, 0x19, 0x0f, 0x44, 0x38, 0x51, 0x52, 0x58, 0x88, 0x85, 0x93, + 0xe4, 0xbd, 0xec, 0xb8, 0x49, 0x26, 0xcc, 0x4c, 0x02, 0xd7, 0x59, 0x5b, 0xf9, 0x8d, 0x6c, 0xb7, + 0xbc, 0xf2, 0xaa, 0xc3, 0x8d, 0x5f, 0x44, 0xf2, 0xc7, 0xcb, 0xae, 0xe7, 0xe1, 0x75, 0x99, 0xe7, + 0x7d, 0x7e, 0xcf, 0x3b, 0x79, 0x18, 0xfc, 0x72, 0xf5, 0x5c, 0x53, 0x21, 0xd9, 0xaa, 0x0c, 0x41, + 0xe5, 0x60, 0x40, 0xb3, 0x0a, 0xf2, 0x58, 0x2a, 0xd6, 0x0f, 0x78, 0x21, 0x98, 0x8e, 0x96, 0x10, + 0x97, 0xa9, 0xc8, 0x13, 0x56, 0x2d, 0x58, 0x02, 0x39, 0x28, 0x6e, 0x20, 0xa6, 0x85, 0x92, 0x46, + 0xda, 0x4e, 0xe7, 0xa4, 0xbc, 0x10, 0x74, 0x70, 0xd2, 0x6a, 0x31, 0x7f, 0x94, 0x08, 0xb3, 0x2c, + 0x43, 0x1a, 0xc9, 0x8c, 0x25, 0x32, 0x91, 0xac, 0x05, 0xc2, 0xf2, 0xa4, 0x3d, 0xb5, 0x87, 0xf6, + 0xab, 0x0b, 0x9a, 0x3f, 0x19, 0x56, 0x66, 0x3c, 0x5a, 0x8a, 0x1c, 0xd4, 0x29, 0x2b, 0x56, 0x49, + 0x23, 0x68, 0x96, 0x81, 0xe1, 0xff, 0x58, 0x3f, 0x67, 0xd7, 0x51, 0xaa, 0xcc, 0x8d, 0xc8, 0xe0, + 0x0a, 0xf0, 0xec, 0x7f, 0x40, 0xf3, 0x13, 0x19, 0xff, 0x9b, 0x3b, 0xf8, 0x36, 0xc2, 0xb3, 0xf7, + 0x4a, 0x48, 0x25, 0xcc, 0xe9, 0xab, 0x94, 0x6b, 0x6d, 0x7f, 0xc6, 0x93, 0xe6, 0x56, 0x31, 0x37, + 0xdc, 0x41, 0x2e, 0xf2, 0xa6, 0x87, 0x8f, 0xe9, 0x50, 0xc6, 0x65, 0x38, 0x2d, 0x56, 0x49, 0x23, + 0x68, 0xda, 0xb8, 0x69, 0xb5, 0xa0, 0xef, 0xc2, 0x2f, 0x10, 0x99, 0xb7, 0x60, 0xb8, 0x6f, 0xaf, + 0x2f, 0xf6, 0xad, 0xfa, 0x62, 0x1f, 0x0f, 0x5a, 0x70, 0x99, 0x6a, 0x3f, 0xc0, 0xe3, 0x8a, 0xa7, + 0x25, 0x38, 0x23, 0x17, 0x79, 0x63, 0x7f, 0xd6, 0x9b, 0xc7, 0x1f, 0x1a, 0x31, 0xe8, 0x66, 0xf6, + 0x0b, 0x3c, 0x4b, 0x52, 0x19, 0xf2, 0xf4, 0x08, 0x4e, 0x78, 0x99, 0x1a, 0x67, 0xcf, 0x45, 0xde, + 0xc4, 0xbf, 0xdf, 0x9b, 0x67, 0xaf, 0xb7, 0x87, 0xc1, 0xae, 0xd7, 0x7e, 0x8a, 0xa7, 0x31, 0xe8, + 0x48, 0x89, 0xc2, 0x08, 0x99, 0x3b, 0xb7, 0x5c, 0xe4, 0xdd, 0xf6, 0xef, 0xf5, 0xe8, 0xf4, 0x68, + 0x18, 0x05, 0xdb, 0xbe, 0x83, 0x1f, 0x08, 0xdf, 0xdd, 0x29, 0xe3, 0x58, 0x68, 0x63, 0x7f, 0xba, + 0x52, 0x08, 0xbd, 0x59, 0x21, 0x0d, 0xdd, 0xd6, 0x71, 0xa7, 0xdf, 0x3c, 0xf9, 0xa3, 0x6c, 0x95, + 0x71, 0x8c, 0xc7, 0xc2, 0x40, 0xa6, 0x9d, 0x91, 0xbb, 0xe7, 0x4d, 0x0f, 0x1f, 0xd2, 0xeb, 0x1e, + 0x1e, 0xdd, 0xb9, 0xd9, 0xd0, 0xda, 0x9b, 0x86, 0x0e, 0xba, 0x10, 0xdf, 0x5b, 0x6f, 0x88, 0x75, + 0xb6, 0x21, 0xd6, 0xf9, 0x86, 0x58, 0x5f, 0x6b, 0x82, 0xd6, 0x35, 0x41, 0x67, 0x35, 0x41, 0xe7, + 0x35, 0x41, 0x3f, 0x6b, 0x82, 0xbe, 0xff, 0x22, 0xd6, 0xc7, 0x51, 0xb5, 0xf8, 0x1d, 0x00, 0x00, + 0xff, 0xff, 0x32, 0xe8, 0x23, 0x88, 0x24, 0x03, 0x00, 0x00, +} diff --git a/vendor/k8s.io/api/scheduling/v1/generated.proto b/vendor/k8s.io/api/scheduling/v1/generated.proto new file mode 100644 index 0000000000..791ba8dc79 --- /dev/null +++ b/vendor/k8s.io/api/scheduling/v1/generated.proto @@ -0,0 +1,67 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + + +// This file was autogenerated by go-to-protobuf. Do not edit it manually! + +syntax = 'proto2'; + +package k8s.io.api.scheduling.v1; + +import "k8s.io/apimachinery/pkg/apis/meta/v1/generated.proto"; +import "k8s.io/apimachinery/pkg/runtime/generated.proto"; +import "k8s.io/apimachinery/pkg/runtime/schema/generated.proto"; + +// Package-wide variables from generator "generated". +option go_package = "v1"; + +// PriorityClass defines mapping from a priority class name to the priority +// integer value. The value can be any valid integer. +message PriorityClass { + // Standard object's metadata. + // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata + // +optional + optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; + + // The value of this priority class. This is the actual priority that pods + // receive when they have the name of this class in their pod spec. + optional int32 value = 2; + + // globalDefault specifies whether this PriorityClass should be considered as + // the default priority for pods that do not have any priority class. + // Only one PriorityClass can be marked as `globalDefault`. However, if more than + // one PriorityClasses exists with their `globalDefault` field set to true, + // the smallest value of such global default PriorityClasses will be used as the default priority. + // +optional + optional bool globalDefault = 3; + + // description is an arbitrary string that usually provides guidelines on + // when this priority class should be used. + // +optional + optional string description = 4; +} + +// PriorityClassList is a collection of priority classes. +message PriorityClassList { + // Standard list metadata + // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata + // +optional + optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; + + // items is the list of PriorityClasses + repeated PriorityClass items = 2; +} + diff --git a/vendor/k8s.io/api/scheduling/v1/register.go b/vendor/k8s.io/api/scheduling/v1/register.go new file mode 100644 index 0000000000..33977fe9a7 --- /dev/null +++ b/vendor/k8s.io/api/scheduling/v1/register.go @@ -0,0 +1,55 @@ +/* +Copyright 2019 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" +) + +// GroupName is the group name use in this package +const GroupName = "scheduling.k8s.io" + +// SchemeGroupVersion is group version used to register these objects +var SchemeGroupVersion = schema.GroupVersion{Group: GroupName, Version: "v1"} + +// Resource takes an unqualified resource and returns a Group qualified GroupResource +func Resource(resource string) schema.GroupResource { + return SchemeGroupVersion.WithResource(resource).GroupResource() +} + +var ( + // TODO: move SchemeBuilder with zz_generated.deepcopy.go to k8s.io/api. + // localSchemeBuilder and AddToScheme will stay in k8s.io/kubernetes. + + // SchemeBuilder is a collection of functions that add things to a scheme. + SchemeBuilder = runtime.NewSchemeBuilder(addKnownTypes) + localSchemeBuilder = &SchemeBuilder + // AddToScheme applies all the stored functions to the scheme. + AddToScheme = localSchemeBuilder.AddToScheme +) + +// Adds the list of known types to the given scheme. +func addKnownTypes(scheme *runtime.Scheme) error { + scheme.AddKnownTypes(SchemeGroupVersion, + &PriorityClass{}, + &PriorityClassList{}, + ) + metav1.AddToGroupVersion(scheme, SchemeGroupVersion) + return nil +} diff --git a/vendor/k8s.io/api/scheduling/v1/types.go b/vendor/k8s.io/api/scheduling/v1/types.go new file mode 100644 index 0000000000..d33e0085aa --- /dev/null +++ b/vendor/k8s.io/api/scheduling/v1/types.go @@ -0,0 +1,66 @@ +/* +Copyright 2019 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +// +genclient +// +genclient:nonNamespaced +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// PriorityClass defines mapping from a priority class name to the priority +// integer value. The value can be any valid integer. +type PriorityClass struct { + metav1.TypeMeta `json:",inline"` + // Standard object's metadata. + // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata + // +optional + metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + + // The value of this priority class. This is the actual priority that pods + // receive when they have the name of this class in their pod spec. + Value int32 `json:"value" protobuf:"bytes,2,opt,name=value"` + + // globalDefault specifies whether this PriorityClass should be considered as + // the default priority for pods that do not have any priority class. + // Only one PriorityClass can be marked as `globalDefault`. However, if more than + // one PriorityClasses exists with their `globalDefault` field set to true, + // the smallest value of such global default PriorityClasses will be used as the default priority. + // +optional + GlobalDefault bool `json:"globalDefault,omitempty" protobuf:"bytes,3,opt,name=globalDefault"` + + // description is an arbitrary string that usually provides guidelines on + // when this priority class should be used. + // +optional + Description string `json:"description,omitempty" protobuf:"bytes,4,opt,name=description"` +} + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// PriorityClassList is a collection of priority classes. +type PriorityClassList struct { + metav1.TypeMeta `json:",inline"` + // Standard list metadata + // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata + // +optional + metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + + // items is the list of PriorityClasses + Items []PriorityClass `json:"items" protobuf:"bytes,2,rep,name=items"` +} diff --git a/vendor/k8s.io/api/scheduling/v1/types_swagger_doc_generated.go b/vendor/k8s.io/api/scheduling/v1/types_swagger_doc_generated.go new file mode 100644 index 0000000000..6f3999a915 --- /dev/null +++ b/vendor/k8s.io/api/scheduling/v1/types_swagger_doc_generated.go @@ -0,0 +1,52 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1 + +// This file contains a collection of methods that can be used from go-restful to +// generate Swagger API documentation for its models. Please read this PR for more +// information on the implementation: https://github.com/emicklei/go-restful/pull/215 +// +// TODOs are ignored from the parser (e.g. TODO(andronat):... || TODO:...) if and only if +// they are on one line! For multiple line or blocks that you want to ignore use ---. +// Any context after a --- is ignored. +// +// Those methods can be generated by using hack/update-generated-swagger-docs.sh + +// AUTO-GENERATED FUNCTIONS START HERE. DO NOT EDIT. +var map_PriorityClass = map[string]string{ + "": "PriorityClass defines mapping from a priority class name to the priority integer value. The value can be any valid integer.", + "metadata": "Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata", + "value": "The value of this priority class. This is the actual priority that pods receive when they have the name of this class in their pod spec.", + "globalDefault": "globalDefault specifies whether this PriorityClass should be considered as the default priority for pods that do not have any priority class. Only one PriorityClass can be marked as `globalDefault`. However, if more than one PriorityClasses exists with their `globalDefault` field set to true, the smallest value of such global default PriorityClasses will be used as the default priority.", + "description": "description is an arbitrary string that usually provides guidelines on when this priority class should be used.", +} + +func (PriorityClass) SwaggerDoc() map[string]string { + return map_PriorityClass +} + +var map_PriorityClassList = map[string]string{ + "": "PriorityClassList is a collection of priority classes.", + "metadata": "Standard list metadata More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata", + "items": "items is the list of PriorityClasses", +} + +func (PriorityClassList) SwaggerDoc() map[string]string { + return map_PriorityClassList +} + +// AUTO-GENERATED FUNCTIONS END HERE diff --git a/vendor/k8s.io/api/scheduling/v1/zz_generated.deepcopy.go b/vendor/k8s.io/api/scheduling/v1/zz_generated.deepcopy.go new file mode 100644 index 0000000000..09bfc3775f --- /dev/null +++ b/vendor/k8s.io/api/scheduling/v1/zz_generated.deepcopy.go @@ -0,0 +1,84 @@ +// +build !ignore_autogenerated + +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by deepcopy-gen. DO NOT EDIT. + +package v1 + +import ( + runtime "k8s.io/apimachinery/pkg/runtime" +) + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PriorityClass) DeepCopyInto(out *PriorityClass) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PriorityClass. +func (in *PriorityClass) DeepCopy() *PriorityClass { + if in == nil { + return nil + } + out := new(PriorityClass) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *PriorityClass) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PriorityClassList) DeepCopyInto(out *PriorityClassList) { + *out = *in + out.TypeMeta = in.TypeMeta + out.ListMeta = in.ListMeta + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]PriorityClass, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PriorityClassList. +func (in *PriorityClassList) DeepCopy() *PriorityClassList { + if in == nil { + return nil + } + out := new(PriorityClassList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *PriorityClassList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} diff --git a/vendor/k8s.io/api/scheduling/v1alpha1/doc.go b/vendor/k8s.io/api/scheduling/v1alpha1/doc.go index 05a454a529..cff47e1f4a 100644 --- a/vendor/k8s.io/api/scheduling/v1alpha1/doc.go +++ b/vendor/k8s.io/api/scheduling/v1alpha1/doc.go @@ -15,6 +15,7 @@ limitations under the License. */ // +k8s:deepcopy-gen=package +// +k8s:protobuf-gen=package // +k8s:openapi-gen=true // +groupName=scheduling.k8s.io diff --git a/vendor/k8s.io/api/scheduling/v1alpha1/generated.proto b/vendor/k8s.io/api/scheduling/v1alpha1/generated.proto index 5fb5472116..bfd85f5598 100644 --- a/vendor/k8s.io/api/scheduling/v1alpha1/generated.proto +++ b/vendor/k8s.io/api/scheduling/v1alpha1/generated.proto @@ -28,6 +28,7 @@ import "k8s.io/apimachinery/pkg/runtime/schema/generated.proto"; // Package-wide variables from generator "generated". option go_package = "v1alpha1"; +// DEPRECATED - This group version of PriorityClass is deprecated by scheduling.k8s.io/v1/PriorityClass. // PriorityClass defines mapping from a priority class name to the priority // integer value. The value can be any valid integer. message PriorityClass { diff --git a/vendor/k8s.io/api/scheduling/v1alpha1/types.go b/vendor/k8s.io/api/scheduling/v1alpha1/types.go index 21e3df0af9..6103ea4e7e 100644 --- a/vendor/k8s.io/api/scheduling/v1alpha1/types.go +++ b/vendor/k8s.io/api/scheduling/v1alpha1/types.go @@ -24,6 +24,7 @@ import ( // +genclient:nonNamespaced // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object +// DEPRECATED - This group version of PriorityClass is deprecated by scheduling.k8s.io/v1/PriorityClass. // PriorityClass defines mapping from a priority class name to the priority // integer value. The value can be any valid integer. type PriorityClass struct { diff --git a/vendor/k8s.io/api/scheduling/v1alpha1/types_swagger_doc_generated.go b/vendor/k8s.io/api/scheduling/v1alpha1/types_swagger_doc_generated.go index f406f4402e..89565012f9 100644 --- a/vendor/k8s.io/api/scheduling/v1alpha1/types_swagger_doc_generated.go +++ b/vendor/k8s.io/api/scheduling/v1alpha1/types_swagger_doc_generated.go @@ -28,7 +28,7 @@ package v1alpha1 // AUTO-GENERATED FUNCTIONS START HERE. DO NOT EDIT. var map_PriorityClass = map[string]string{ - "": "PriorityClass defines mapping from a priority class name to the priority integer value. The value can be any valid integer.", + "": "DEPRECATED - This group version of PriorityClass is deprecated by scheduling.k8s.io/v1/PriorityClass. PriorityClass defines mapping from a priority class name to the priority integer value. The value can be any valid integer.", "metadata": "Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata", "value": "The value of this priority class. This is the actual priority that pods receive when they have the name of this class in their pod spec.", "globalDefault": "globalDefault specifies whether this PriorityClass should be considered as the default priority for pods that do not have any priority class. Only one PriorityClass can be marked as `globalDefault`. However, if more than one PriorityClasses exists with their `globalDefault` field set to true, the smallest value of such global default PriorityClasses will be used as the default priority.", diff --git a/vendor/k8s.io/api/scheduling/v1beta1/doc.go b/vendor/k8s.io/api/scheduling/v1beta1/doc.go index 7cf1af2124..e661968980 100644 --- a/vendor/k8s.io/api/scheduling/v1beta1/doc.go +++ b/vendor/k8s.io/api/scheduling/v1beta1/doc.go @@ -15,6 +15,7 @@ limitations under the License. */ // +k8s:deepcopy-gen=package +// +k8s:protobuf-gen=package // +k8s:openapi-gen=true // +groupName=scheduling.k8s.io diff --git a/vendor/k8s.io/api/scheduling/v1beta1/generated.proto b/vendor/k8s.io/api/scheduling/v1beta1/generated.proto index 0a95755032..3f15dc1d5c 100644 --- a/vendor/k8s.io/api/scheduling/v1beta1/generated.proto +++ b/vendor/k8s.io/api/scheduling/v1beta1/generated.proto @@ -28,11 +28,12 @@ import "k8s.io/apimachinery/pkg/runtime/schema/generated.proto"; // Package-wide variables from generator "generated". option go_package = "v1beta1"; +// DEPRECATED - This group version of PriorityClass is deprecated by scheduling.k8s.io/v1/PriorityClass. // PriorityClass defines mapping from a priority class name to the priority // integer value. The value can be any valid integer. message PriorityClass { // Standard object's metadata. - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata // +optional optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; @@ -57,7 +58,7 @@ message PriorityClass { // PriorityClassList is a collection of priority classes. message PriorityClassList { // Standard list metadata - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata // +optional optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; diff --git a/vendor/k8s.io/api/scheduling/v1beta1/types.go b/vendor/k8s.io/api/scheduling/v1beta1/types.go index a9aaa86650..2f6b3c9683 100644 --- a/vendor/k8s.io/api/scheduling/v1beta1/types.go +++ b/vendor/k8s.io/api/scheduling/v1beta1/types.go @@ -24,12 +24,13 @@ import ( // +genclient:nonNamespaced // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object +// DEPRECATED - This group version of PriorityClass is deprecated by scheduling.k8s.io/v1/PriorityClass. // PriorityClass defines mapping from a priority class name to the priority // integer value. The value can be any valid integer. type PriorityClass struct { metav1.TypeMeta `json:",inline"` // Standard object's metadata. - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata // +optional metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` @@ -57,7 +58,7 @@ type PriorityClass struct { type PriorityClassList struct { metav1.TypeMeta `json:",inline"` // Standard list metadata - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata // +optional metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` diff --git a/vendor/k8s.io/api/scheduling/v1beta1/types_swagger_doc_generated.go b/vendor/k8s.io/api/scheduling/v1beta1/types_swagger_doc_generated.go index c18f54a82c..e99ed8fc48 100644 --- a/vendor/k8s.io/api/scheduling/v1beta1/types_swagger_doc_generated.go +++ b/vendor/k8s.io/api/scheduling/v1beta1/types_swagger_doc_generated.go @@ -28,8 +28,8 @@ package v1beta1 // AUTO-GENERATED FUNCTIONS START HERE. DO NOT EDIT. var map_PriorityClass = map[string]string{ - "": "PriorityClass defines mapping from a priority class name to the priority integer value. The value can be any valid integer.", - "metadata": "Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata", + "": "DEPRECATED - This group version of PriorityClass is deprecated by scheduling.k8s.io/v1/PriorityClass. PriorityClass defines mapping from a priority class name to the priority integer value. The value can be any valid integer.", + "metadata": "Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", "value": "The value of this priority class. This is the actual priority that pods receive when they have the name of this class in their pod spec.", "globalDefault": "globalDefault specifies whether this PriorityClass should be considered as the default priority for pods that do not have any priority class. Only one PriorityClass can be marked as `globalDefault`. However, if more than one PriorityClasses exists with their `globalDefault` field set to true, the smallest value of such global default PriorityClasses will be used as the default priority.", "description": "description is an arbitrary string that usually provides guidelines on when this priority class should be used.", @@ -41,7 +41,7 @@ func (PriorityClass) SwaggerDoc() map[string]string { var map_PriorityClassList = map[string]string{ "": "PriorityClassList is a collection of priority classes.", - "metadata": "Standard list metadata More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata", + "metadata": "Standard list metadata More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", "items": "items is the list of PriorityClasses", } diff --git a/vendor/k8s.io/api/settings/v1alpha1/doc.go b/vendor/k8s.io/api/settings/v1alpha1/doc.go index 9126211d64..60066bb6db 100644 --- a/vendor/k8s.io/api/settings/v1alpha1/doc.go +++ b/vendor/k8s.io/api/settings/v1alpha1/doc.go @@ -15,6 +15,7 @@ limitations under the License. */ // +k8s:deepcopy-gen=package +// +k8s:protobuf-gen=package // +k8s:openapi-gen=true // +groupName=settings.k8s.io diff --git a/vendor/k8s.io/api/settings/v1alpha1/generated.proto b/vendor/k8s.io/api/settings/v1alpha1/generated.proto index d5534c4de9..db1ec9312b 100644 --- a/vendor/k8s.io/api/settings/v1alpha1/generated.proto +++ b/vendor/k8s.io/api/settings/v1alpha1/generated.proto @@ -42,7 +42,7 @@ message PodPreset { // PodPresetList is a list of PodPreset objects. message PodPresetList { // Standard list metadata. - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata // +optional optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; diff --git a/vendor/k8s.io/api/settings/v1alpha1/types.go b/vendor/k8s.io/api/settings/v1alpha1/types.go index 506aacf4a9..8cc99d440d 100644 --- a/vendor/k8s.io/api/settings/v1alpha1/types.go +++ b/vendor/k8s.io/api/settings/v1alpha1/types.go @@ -61,7 +61,7 @@ type PodPresetSpec struct { type PodPresetList struct { metav1.TypeMeta `json:",inline"` // Standard list metadata. - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata // +optional metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` diff --git a/vendor/k8s.io/api/settings/v1alpha1/types_swagger_doc_generated.go b/vendor/k8s.io/api/settings/v1alpha1/types_swagger_doc_generated.go index 508c452f19..0501e0af35 100644 --- a/vendor/k8s.io/api/settings/v1alpha1/types_swagger_doc_generated.go +++ b/vendor/k8s.io/api/settings/v1alpha1/types_swagger_doc_generated.go @@ -37,7 +37,7 @@ func (PodPreset) SwaggerDoc() map[string]string { var map_PodPresetList = map[string]string{ "": "PodPresetList is a list of PodPreset objects.", - "metadata": "Standard list metadata. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata", + "metadata": "Standard list metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", "items": "Items is a list of schema objects.", } diff --git a/vendor/k8s.io/api/storage/v1/doc.go b/vendor/k8s.io/api/storage/v1/doc.go index ff8bb34ca1..75a6489da2 100644 --- a/vendor/k8s.io/api/storage/v1/doc.go +++ b/vendor/k8s.io/api/storage/v1/doc.go @@ -15,7 +15,8 @@ limitations under the License. */ // +k8s:deepcopy-gen=package +// +k8s:protobuf-gen=package // +groupName=storage.k8s.io // +k8s:openapi-gen=true -package v1 +package v1 // import "k8s.io/api/storage/v1" diff --git a/vendor/k8s.io/api/storage/v1/generated.proto b/vendor/k8s.io/api/storage/v1/generated.proto index 668c854474..7ac6cb2d2a 100644 --- a/vendor/k8s.io/api/storage/v1/generated.proto +++ b/vendor/k8s.io/api/storage/v1/generated.proto @@ -178,7 +178,7 @@ message VolumeError { optional k8s.io.apimachinery.pkg.apis.meta.v1.Time time = 1; // String detailing the error encountered during Attach or Detach operation. - // This string maybe logged, so it should not contain sensitive + // This string may be logged, so it should not contain sensitive // information. // +optional optional string message = 2; diff --git a/vendor/k8s.io/api/storage/v1/types.go b/vendor/k8s.io/api/storage/v1/types.go index 9f2f67b6b7..bd60e1026b 100644 --- a/vendor/k8s.io/api/storage/v1/types.go +++ b/vendor/k8s.io/api/storage/v1/types.go @@ -204,7 +204,7 @@ type VolumeError struct { Time metav1.Time `json:"time,omitempty" protobuf:"bytes,1,opt,name=time"` // String detailing the error encountered during Attach or Detach operation. - // This string maybe logged, so it should not contain sensitive + // This string may be logged, so it should not contain sensitive // information. // +optional Message string `json:"message,omitempty" protobuf:"bytes,2,opt,name=message"` diff --git a/vendor/k8s.io/api/storage/v1/types_swagger_doc_generated.go b/vendor/k8s.io/api/storage/v1/types_swagger_doc_generated.go index d4a022d52e..e31dd7f712 100644 --- a/vendor/k8s.io/api/storage/v1/types_swagger_doc_generated.go +++ b/vendor/k8s.io/api/storage/v1/types_swagger_doc_generated.go @@ -109,7 +109,7 @@ func (VolumeAttachmentStatus) SwaggerDoc() map[string]string { var map_VolumeError = map[string]string{ "": "VolumeError captures an error encountered during a volume operation.", "time": "Time the error was encountered.", - "message": "String detailing the error encountered during Attach or Detach operation. This string maybe logged, so it should not contain sensitive information.", + "message": "String detailing the error encountered during Attach or Detach operation. This string may be logged, so it should not contain sensitive information.", } func (VolumeError) SwaggerDoc() map[string]string { diff --git a/vendor/k8s.io/api/storage/v1alpha1/doc.go b/vendor/k8s.io/api/storage/v1alpha1/doc.go index 0056b00d97..6f7ad7e732 100644 --- a/vendor/k8s.io/api/storage/v1alpha1/doc.go +++ b/vendor/k8s.io/api/storage/v1alpha1/doc.go @@ -14,7 +14,8 @@ See the License for the specific language governing permissions and limitations under the License. */ -// +k8s:deepcopy-gen=package,register +// +k8s:deepcopy-gen=package +// +k8s:protobuf-gen=package // +groupName=storage.k8s.io // +k8s:openapi-gen=true diff --git a/vendor/k8s.io/api/storage/v1beta1/doc.go b/vendor/k8s.io/api/storage/v1beta1/doc.go index ea7667dda3..e3e3626e24 100644 --- a/vendor/k8s.io/api/storage/v1beta1/doc.go +++ b/vendor/k8s.io/api/storage/v1beta1/doc.go @@ -15,6 +15,7 @@ limitations under the License. */ // +k8s:deepcopy-gen=package +// +k8s:protobuf-gen=package // +groupName=storage.k8s.io // +k8s:openapi-gen=true diff --git a/vendor/k8s.io/api/storage/v1beta1/generated.pb.go b/vendor/k8s.io/api/storage/v1beta1/generated.pb.go index 3e995f039f..0cde6ec087 100644 --- a/vendor/k8s.io/api/storage/v1beta1/generated.pb.go +++ b/vendor/k8s.io/api/storage/v1beta1/generated.pb.go @@ -24,6 +24,13 @@ limitations under the License. k8s.io/kubernetes/vendor/k8s.io/api/storage/v1beta1/generated.proto It has these top-level messages: + CSIDriver + CSIDriverList + CSIDriverSpec + CSINode + CSINodeDriver + CSINodeList + CSINodeSpec StorageClass StorageClassList VolumeAttachment @@ -59,39 +66,74 @@ var _ = math.Inf // proto package needs to be updated. const _ = proto.GoGoProtoPackageIsVersion2 // please upgrade the proto package +func (m *CSIDriver) Reset() { *m = CSIDriver{} } +func (*CSIDriver) ProtoMessage() {} +func (*CSIDriver) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{0} } + +func (m *CSIDriverList) Reset() { *m = CSIDriverList{} } +func (*CSIDriverList) ProtoMessage() {} +func (*CSIDriverList) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{1} } + +func (m *CSIDriverSpec) Reset() { *m = CSIDriverSpec{} } +func (*CSIDriverSpec) ProtoMessage() {} +func (*CSIDriverSpec) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{2} } + +func (m *CSINode) Reset() { *m = CSINode{} } +func (*CSINode) ProtoMessage() {} +func (*CSINode) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{3} } + +func (m *CSINodeDriver) Reset() { *m = CSINodeDriver{} } +func (*CSINodeDriver) ProtoMessage() {} +func (*CSINodeDriver) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{4} } + +func (m *CSINodeList) Reset() { *m = CSINodeList{} } +func (*CSINodeList) ProtoMessage() {} +func (*CSINodeList) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{5} } + +func (m *CSINodeSpec) Reset() { *m = CSINodeSpec{} } +func (*CSINodeSpec) ProtoMessage() {} +func (*CSINodeSpec) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{6} } + func (m *StorageClass) Reset() { *m = StorageClass{} } func (*StorageClass) ProtoMessage() {} -func (*StorageClass) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{0} } +func (*StorageClass) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{7} } func (m *StorageClassList) Reset() { *m = StorageClassList{} } func (*StorageClassList) ProtoMessage() {} -func (*StorageClassList) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{1} } +func (*StorageClassList) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{8} } func (m *VolumeAttachment) Reset() { *m = VolumeAttachment{} } func (*VolumeAttachment) ProtoMessage() {} -func (*VolumeAttachment) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{2} } +func (*VolumeAttachment) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{9} } func (m *VolumeAttachmentList) Reset() { *m = VolumeAttachmentList{} } func (*VolumeAttachmentList) ProtoMessage() {} -func (*VolumeAttachmentList) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{3} } +func (*VolumeAttachmentList) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{10} } func (m *VolumeAttachmentSource) Reset() { *m = VolumeAttachmentSource{} } func (*VolumeAttachmentSource) ProtoMessage() {} -func (*VolumeAttachmentSource) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{4} } +func (*VolumeAttachmentSource) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{11} } func (m *VolumeAttachmentSpec) Reset() { *m = VolumeAttachmentSpec{} } func (*VolumeAttachmentSpec) ProtoMessage() {} -func (*VolumeAttachmentSpec) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{5} } +func (*VolumeAttachmentSpec) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{12} } func (m *VolumeAttachmentStatus) Reset() { *m = VolumeAttachmentStatus{} } func (*VolumeAttachmentStatus) ProtoMessage() {} -func (*VolumeAttachmentStatus) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{6} } +func (*VolumeAttachmentStatus) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{13} } func (m *VolumeError) Reset() { *m = VolumeError{} } func (*VolumeError) ProtoMessage() {} -func (*VolumeError) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{7} } +func (*VolumeError) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{14} } func init() { + proto.RegisterType((*CSIDriver)(nil), "k8s.io.api.storage.v1beta1.CSIDriver") + proto.RegisterType((*CSIDriverList)(nil), "k8s.io.api.storage.v1beta1.CSIDriverList") + proto.RegisterType((*CSIDriverSpec)(nil), "k8s.io.api.storage.v1beta1.CSIDriverSpec") + proto.RegisterType((*CSINode)(nil), "k8s.io.api.storage.v1beta1.CSINode") + proto.RegisterType((*CSINodeDriver)(nil), "k8s.io.api.storage.v1beta1.CSINodeDriver") + proto.RegisterType((*CSINodeList)(nil), "k8s.io.api.storage.v1beta1.CSINodeList") + proto.RegisterType((*CSINodeSpec)(nil), "k8s.io.api.storage.v1beta1.CSINodeSpec") proto.RegisterType((*StorageClass)(nil), "k8s.io.api.storage.v1beta1.StorageClass") proto.RegisterType((*StorageClassList)(nil), "k8s.io.api.storage.v1beta1.StorageClassList") proto.RegisterType((*VolumeAttachment)(nil), "k8s.io.api.storage.v1beta1.VolumeAttachment") @@ -101,7 +143,7 @@ func init() { proto.RegisterType((*VolumeAttachmentStatus)(nil), "k8s.io.api.storage.v1beta1.VolumeAttachmentStatus") proto.RegisterType((*VolumeError)(nil), "k8s.io.api.storage.v1beta1.VolumeError") } -func (m *StorageClass) Marshal() (dAtA []byte, err error) { +func (m *CSIDriver) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) n, err := m.MarshalTo(dAtA) @@ -111,7 +153,7 @@ func (m *StorageClass) Marshal() (dAtA []byte, err error) { return dAtA[:n], nil } -func (m *StorageClass) MarshalTo(dAtA []byte) (int, error) { +func (m *CSIDriver) MarshalTo(dAtA []byte) (int, error) { var i int _ = i var l int @@ -126,6 +168,259 @@ func (m *StorageClass) MarshalTo(dAtA []byte) (int, error) { i += n1 dAtA[i] = 0x12 i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.Spec.Size())) + n2, err := m.Spec.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n2 + return i, nil +} + +func (m *CSIDriverList) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *CSIDriverList) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + dAtA[i] = 0xa + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.ListMeta.Size())) + n3, err := m.ListMeta.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n3 + if len(m.Items) > 0 { + for _, msg := range m.Items { + dAtA[i] = 0x12 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) + n, err := msg.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n + } + } + return i, nil +} + +func (m *CSIDriverSpec) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *CSIDriverSpec) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if m.AttachRequired != nil { + dAtA[i] = 0x8 + i++ + if *m.AttachRequired { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i++ + } + if m.PodInfoOnMount != nil { + dAtA[i] = 0x10 + i++ + if *m.PodInfoOnMount { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i++ + } + return i, nil +} + +func (m *CSINode) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *CSINode) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + dAtA[i] = 0xa + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.ObjectMeta.Size())) + n4, err := m.ObjectMeta.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n4 + dAtA[i] = 0x12 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.Spec.Size())) + n5, err := m.Spec.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n5 + return i, nil +} + +func (m *CSINodeDriver) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *CSINodeDriver) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + dAtA[i] = 0xa + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name))) + i += copy(dAtA[i:], m.Name) + dAtA[i] = 0x12 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.NodeID))) + i += copy(dAtA[i:], m.NodeID) + if len(m.TopologyKeys) > 0 { + for _, s := range m.TopologyKeys { + dAtA[i] = 0x1a + i++ + l = len(s) + for l >= 1<<7 { + dAtA[i] = uint8(uint64(l)&0x7f | 0x80) + l >>= 7 + i++ + } + dAtA[i] = uint8(l) + i++ + i += copy(dAtA[i:], s) + } + } + return i, nil +} + +func (m *CSINodeList) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *CSINodeList) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + dAtA[i] = 0xa + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.ListMeta.Size())) + n6, err := m.ListMeta.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n6 + if len(m.Items) > 0 { + for _, msg := range m.Items { + dAtA[i] = 0x12 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) + n, err := msg.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n + } + } + return i, nil +} + +func (m *CSINodeSpec) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *CSINodeSpec) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if len(m.Drivers) > 0 { + for _, msg := range m.Drivers { + dAtA[i] = 0xa + i++ + i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) + n, err := msg.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n + } + } + return i, nil +} + +func (m *StorageClass) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *StorageClass) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + dAtA[i] = 0xa + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.ObjectMeta.Size())) + n7, err := m.ObjectMeta.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n7 + dAtA[i] = 0x12 + i++ i = encodeVarintGenerated(dAtA, i, uint64(len(m.Provisioner))) i += copy(dAtA[i:], m.Provisioner) if len(m.Parameters) > 0 { @@ -220,11 +515,11 @@ func (m *StorageClassList) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0xa i++ i = encodeVarintGenerated(dAtA, i, uint64(m.ListMeta.Size())) - n2, err := m.ListMeta.MarshalTo(dAtA[i:]) + n8, err := m.ListMeta.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n2 + i += n8 if len(m.Items) > 0 { for _, msg := range m.Items { dAtA[i] = 0x12 @@ -258,27 +553,27 @@ func (m *VolumeAttachment) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0xa i++ i = encodeVarintGenerated(dAtA, i, uint64(m.ObjectMeta.Size())) - n3, err := m.ObjectMeta.MarshalTo(dAtA[i:]) + n9, err := m.ObjectMeta.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n3 + i += n9 dAtA[i] = 0x12 i++ i = encodeVarintGenerated(dAtA, i, uint64(m.Spec.Size())) - n4, err := m.Spec.MarshalTo(dAtA[i:]) + n10, err := m.Spec.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n4 + i += n10 dAtA[i] = 0x1a i++ i = encodeVarintGenerated(dAtA, i, uint64(m.Status.Size())) - n5, err := m.Status.MarshalTo(dAtA[i:]) + n11, err := m.Status.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n5 + i += n11 return i, nil } @@ -300,11 +595,11 @@ func (m *VolumeAttachmentList) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0xa i++ i = encodeVarintGenerated(dAtA, i, uint64(m.ListMeta.Size())) - n6, err := m.ListMeta.MarshalTo(dAtA[i:]) + n12, err := m.ListMeta.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n6 + i += n12 if len(m.Items) > 0 { for _, msg := range m.Items { dAtA[i] = 0x12 @@ -366,11 +661,11 @@ func (m *VolumeAttachmentSpec) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0x12 i++ i = encodeVarintGenerated(dAtA, i, uint64(m.Source.Size())) - n7, err := m.Source.MarshalTo(dAtA[i:]) + n13, err := m.Source.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n7 + i += n13 dAtA[i] = 0x1a i++ i = encodeVarintGenerated(dAtA, i, uint64(len(m.NodeName))) @@ -427,21 +722,21 @@ func (m *VolumeAttachmentStatus) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0x1a i++ i = encodeVarintGenerated(dAtA, i, uint64(m.AttachError.Size())) - n8, err := m.AttachError.MarshalTo(dAtA[i:]) + n14, err := m.AttachError.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n8 + i += n14 } if m.DetachError != nil { dAtA[i] = 0x22 i++ i = encodeVarintGenerated(dAtA, i, uint64(m.DetachError.Size())) - n9, err := m.DetachError.MarshalTo(dAtA[i:]) + n15, err := m.DetachError.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n9 + i += n15 } return i, nil } @@ -464,11 +759,11 @@ func (m *VolumeError) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0xa i++ i = encodeVarintGenerated(dAtA, i, uint64(m.Time.Size())) - n10, err := m.Time.MarshalTo(dAtA[i:]) + n16, err := m.Time.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n10 + i += n16 dAtA[i] = 0x12 i++ i = encodeVarintGenerated(dAtA, i, uint64(len(m.Message))) @@ -485,26 +780,114 @@ func encodeVarintGenerated(dAtA []byte, offset int, v uint64) int { dAtA[offset] = uint8(v) return offset + 1 } -func (m *StorageClass) Size() (n int) { +func (m *CSIDriver) Size() (n int) { var l int _ = l l = m.ObjectMeta.Size() n += 1 + l + sovGenerated(uint64(l)) - l = len(m.Provisioner) + l = m.Spec.Size() n += 1 + l + sovGenerated(uint64(l)) - if len(m.Parameters) > 0 { - for k, v := range m.Parameters { - _ = k - _ = v - mapEntrySize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + len(v) + sovGenerated(uint64(len(v))) - n += mapEntrySize + 1 + sovGenerated(uint64(mapEntrySize)) - } - } - if m.ReclaimPolicy != nil { - l = len(*m.ReclaimPolicy) - n += 1 + l + sovGenerated(uint64(l)) - } - if len(m.MountOptions) > 0 { + return n +} + +func (m *CSIDriverList) Size() (n int) { + var l int + _ = l + l = m.ListMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Items) > 0 { + for _, e := range m.Items { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *CSIDriverSpec) Size() (n int) { + var l int + _ = l + if m.AttachRequired != nil { + n += 2 + } + if m.PodInfoOnMount != nil { + n += 2 + } + return n +} + +func (m *CSINode) Size() (n int) { + var l int + _ = l + l = m.ObjectMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = m.Spec.Size() + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *CSINodeDriver) Size() (n int) { + var l int + _ = l + l = len(m.Name) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.NodeID) + n += 1 + l + sovGenerated(uint64(l)) + if len(m.TopologyKeys) > 0 { + for _, s := range m.TopologyKeys { + l = len(s) + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *CSINodeList) Size() (n int) { + var l int + _ = l + l = m.ListMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Items) > 0 { + for _, e := range m.Items { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *CSINodeSpec) Size() (n int) { + var l int + _ = l + if len(m.Drivers) > 0 { + for _, e := range m.Drivers { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *StorageClass) Size() (n int) { + var l int + _ = l + l = m.ObjectMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Provisioner) + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Parameters) > 0 { + for k, v := range m.Parameters { + _ = k + _ = v + mapEntrySize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + len(v) + sovGenerated(uint64(len(v))) + n += mapEntrySize + 1 + sovGenerated(uint64(mapEntrySize)) + } + } + if m.ReclaimPolicy != nil { + l = len(*m.ReclaimPolicy) + n += 1 + l + sovGenerated(uint64(l)) + } + if len(m.MountOptions) > 0 { for _, s := range m.MountOptions { l = len(s) n += 1 + l + sovGenerated(uint64(l)) @@ -634,6 +1017,83 @@ func sovGenerated(x uint64) (n int) { func sozGenerated(x uint64) (n int) { return sovGenerated(uint64((x << 1) ^ uint64((int64(x) >> 63)))) } +func (this *CSIDriver) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&CSIDriver{`, + `ObjectMeta:` + strings.Replace(strings.Replace(this.ObjectMeta.String(), "ObjectMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta", 1), `&`, ``, 1) + `,`, + `Spec:` + strings.Replace(strings.Replace(this.Spec.String(), "CSIDriverSpec", "CSIDriverSpec", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *CSIDriverList) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&CSIDriverList{`, + `ListMeta:` + strings.Replace(strings.Replace(this.ListMeta.String(), "ListMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ListMeta", 1), `&`, ``, 1) + `,`, + `Items:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Items), "CSIDriver", "CSIDriver", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *CSIDriverSpec) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&CSIDriverSpec{`, + `AttachRequired:` + valueToStringGenerated(this.AttachRequired) + `,`, + `PodInfoOnMount:` + valueToStringGenerated(this.PodInfoOnMount) + `,`, + `}`, + }, "") + return s +} +func (this *CSINode) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&CSINode{`, + `ObjectMeta:` + strings.Replace(strings.Replace(this.ObjectMeta.String(), "ObjectMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta", 1), `&`, ``, 1) + `,`, + `Spec:` + strings.Replace(strings.Replace(this.Spec.String(), "CSINodeSpec", "CSINodeSpec", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *CSINodeDriver) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&CSINodeDriver{`, + `Name:` + fmt.Sprintf("%v", this.Name) + `,`, + `NodeID:` + fmt.Sprintf("%v", this.NodeID) + `,`, + `TopologyKeys:` + fmt.Sprintf("%v", this.TopologyKeys) + `,`, + `}`, + }, "") + return s +} +func (this *CSINodeList) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&CSINodeList{`, + `ListMeta:` + strings.Replace(strings.Replace(this.ListMeta.String(), "ListMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ListMeta", 1), `&`, ``, 1) + `,`, + `Items:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Items), "CSINode", "CSINode", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *CSINodeSpec) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&CSINodeSpec{`, + `Drivers:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Drivers), "CSINodeDriver", "CSINodeDriver", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} func (this *StorageClass) String() string { if this == nil { return "nil" @@ -759,6 +1219,758 @@ func valueToStringGenerated(v interface{}) string { pv := reflect.Indirect(rv).Interface() return fmt.Sprintf("*%v", pv) } +func (m *CSIDriver) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: CSIDriver: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: CSIDriver: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ObjectMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Spec", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Spec.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *CSIDriverList) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: CSIDriverList: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: CSIDriverList: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ListMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ListMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Items = append(m.Items, CSIDriver{}) + if err := m.Items[len(m.Items)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *CSIDriverSpec) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: CSIDriverSpec: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: CSIDriverSpec: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field AttachRequired", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + b := bool(v != 0) + m.AttachRequired = &b + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field PodInfoOnMount", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + b := bool(v != 0) + m.PodInfoOnMount = &b + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *CSINode) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: CSINode: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: CSINode: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ObjectMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Spec", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Spec.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *CSINodeDriver) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: CSINodeDriver: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: CSINodeDriver: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Name = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field NodeID", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.NodeID = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field TopologyKeys", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.TopologyKeys = append(m.TopologyKeys, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *CSINodeList) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: CSINodeList: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: CSINodeList: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ListMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ListMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Items = append(m.Items, CSINode{}) + if err := m.Items[len(m.Items)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *CSINodeSpec) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: CSINodeSpec: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: CSINodeSpec: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Drivers", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Drivers = append(m.Drivers, CSINodeDriver{}) + if err := m.Drivers[len(m.Drivers)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} func (m *StorageClass) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 @@ -2180,67 +3392,81 @@ func init() { } var fileDescriptorGenerated = []byte{ - // 988 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xbc, 0x56, 0x4d, 0x6f, 0x1b, 0x45, - 0x18, 0xce, 0xc6, 0xf9, 0x70, 0xc6, 0x09, 0x4d, 0x86, 0x08, 0x8c, 0x0f, 0x76, 0xe4, 0x0b, 0xa6, - 0x6a, 0x77, 0x9b, 0xa8, 0xa0, 0x08, 0x89, 0x83, 0xb7, 0xe4, 0x00, 0x8a, 0xdb, 0x30, 0x89, 0x2a, - 0x54, 0x71, 0x60, 0xb2, 0xfb, 0x76, 0xb3, 0x78, 0x77, 0x67, 0x99, 0x19, 0x1b, 0x72, 0xe3, 0xc4, - 0x19, 0x71, 0xe0, 0x17, 0xf0, 0x3f, 0x38, 0x92, 0x13, 0xea, 0xb1, 0x27, 0x8b, 0x2c, 0xff, 0x22, - 0xe2, 0x80, 0x66, 0x76, 0x62, 0xaf, 0xbd, 0x0e, 0x6d, 0x7a, 0xe8, 0xcd, 0xef, 0xc7, 0xf3, 0xbc, - 0xdf, 0xb3, 0x46, 0x8f, 0xfa, 0xfb, 0xc2, 0x0e, 0x99, 0xd3, 0x1f, 0x9c, 0x02, 0x4f, 0x40, 0x82, - 0x70, 0x86, 0x90, 0xf8, 0x8c, 0x3b, 0xc6, 0x40, 0xd3, 0xd0, 0x11, 0x92, 0x71, 0x1a, 0x80, 0x33, - 0xdc, 0x3d, 0x05, 0x49, 0x77, 0x9d, 0x00, 0x12, 0xe0, 0x54, 0x82, 0x6f, 0xa7, 0x9c, 0x49, 0x86, - 0x1b, 0xb9, 0xaf, 0x4d, 0xd3, 0xd0, 0x36, 0xbe, 0xb6, 0xf1, 0x6d, 0xdc, 0x0f, 0x42, 0x79, 0x36, - 0x38, 0xb5, 0x3d, 0x16, 0x3b, 0x01, 0x0b, 0x98, 0xa3, 0x21, 0xa7, 0x83, 0xe7, 0x5a, 0xd2, 0x82, - 0xfe, 0x95, 0x53, 0x35, 0xda, 0x85, 0xb0, 0x1e, 0xe3, 0x2a, 0xe6, 0x6c, 0xb8, 0xc6, 0xc3, 0x89, - 0x4f, 0x4c, 0xbd, 0xb3, 0x30, 0x01, 0x7e, 0xee, 0xa4, 0xfd, 0x40, 0x29, 0x84, 0x13, 0x83, 0xa4, - 0xf3, 0x50, 0xce, 0x4d, 0x28, 0x3e, 0x48, 0x64, 0x18, 0x43, 0x09, 0xf0, 0xc9, 0xab, 0x00, 0xc2, - 0x3b, 0x83, 0x98, 0xce, 0xe2, 0xda, 0xbf, 0xae, 0xa0, 0xf5, 0xe3, 0xbc, 0x0b, 0x8f, 0x22, 0x2a, - 0x04, 0xfe, 0x16, 0x55, 0x55, 0x52, 0x3e, 0x95, 0xb4, 0x6e, 0xed, 0x58, 0x9d, 0xda, 0xde, 0x03, - 0x7b, 0xd2, 0xb1, 0x31, 0xb7, 0x9d, 0xf6, 0x03, 0xa5, 0x10, 0xb6, 0xf2, 0xb6, 0x87, 0xbb, 0xf6, - 0x93, 0xd3, 0xef, 0xc0, 0x93, 0x3d, 0x90, 0xd4, 0xc5, 0x17, 0xa3, 0xd6, 0x42, 0x36, 0x6a, 0xa1, - 0x89, 0x8e, 0x8c, 0x59, 0xf1, 0xc7, 0xa8, 0x96, 0x72, 0x36, 0x0c, 0x45, 0xc8, 0x12, 0xe0, 0xf5, - 0xc5, 0x1d, 0xab, 0xb3, 0xe6, 0xbe, 0x6b, 0x20, 0xb5, 0xa3, 0x89, 0x89, 0x14, 0xfd, 0x70, 0x84, - 0x50, 0x4a, 0x39, 0x8d, 0x41, 0x02, 0x17, 0xf5, 0xca, 0x4e, 0xa5, 0x53, 0xdb, 0xdb, 0xb7, 0x6f, - 0x1e, 0xa6, 0x5d, 0x2c, 0xcb, 0x3e, 0x1a, 0x43, 0x0f, 0x12, 0xc9, 0xcf, 0x27, 0x29, 0x4e, 0x0c, - 0xa4, 0xc0, 0x8f, 0xfb, 0x68, 0x83, 0x83, 0x17, 0xd1, 0x30, 0x3e, 0x62, 0x51, 0xe8, 0x9d, 0xd7, - 0x97, 0x74, 0x9a, 0x07, 0xd9, 0xa8, 0xb5, 0x41, 0x8a, 0x86, 0xab, 0x51, 0xeb, 0x41, 0x79, 0x0d, - 0xec, 0x23, 0xe0, 0x22, 0x14, 0x12, 0x12, 0xf9, 0x94, 0x45, 0x83, 0x18, 0xa6, 0x30, 0x64, 0x9a, - 0x1b, 0x3f, 0x44, 0xeb, 0x31, 0x1b, 0x24, 0xf2, 0x49, 0x2a, 0x43, 0x96, 0x88, 0xfa, 0xf2, 0x4e, - 0xa5, 0xb3, 0xe6, 0x6e, 0x66, 0xa3, 0xd6, 0x7a, 0xaf, 0xa0, 0x27, 0x53, 0x5e, 0xf8, 0x10, 0x6d, - 0xd3, 0x28, 0x62, 0x3f, 0xe4, 0x01, 0x0e, 0x7e, 0x4c, 0x69, 0xa2, 0x5a, 0x55, 0x5f, 0xd9, 0xb1, - 0x3a, 0x55, 0xb7, 0x9e, 0x8d, 0x5a, 0xdb, 0xdd, 0x39, 0x76, 0x32, 0x17, 0x85, 0xbf, 0x46, 0x5b, - 0x43, 0xad, 0x72, 0xc3, 0xc4, 0x0f, 0x93, 0xa0, 0xc7, 0x7c, 0xa8, 0xaf, 0xea, 0xa2, 0xef, 0x66, - 0xa3, 0xd6, 0xd6, 0xd3, 0x59, 0xe3, 0xd5, 0x3c, 0x25, 0x29, 0x93, 0xe0, 0xef, 0xd1, 0x96, 0x8e, - 0x08, 0xfe, 0x09, 0x4b, 0x59, 0xc4, 0x82, 0x10, 0x44, 0xbd, 0xaa, 0xe7, 0xd7, 0x29, 0xce, 0x4f, - 0xb5, 0x4e, 0x2d, 0x92, 0xf1, 0x3a, 0x3f, 0x86, 0x08, 0x3c, 0xc9, 0xf8, 0x09, 0xf0, 0xd8, 0xfd, - 0xc0, 0xcc, 0x6b, 0xab, 0x3b, 0x4b, 0x45, 0xca, 0xec, 0x8d, 0xcf, 0xd0, 0x9d, 0x99, 0x81, 0xe3, - 0x4d, 0x54, 0xe9, 0xc3, 0xb9, 0x5e, 0xe9, 0x35, 0xa2, 0x7e, 0xe2, 0x6d, 0xb4, 0x3c, 0xa4, 0xd1, - 0x00, 0xf2, 0x0d, 0x24, 0xb9, 0xf0, 0xe9, 0xe2, 0xbe, 0xd5, 0xfe, 0xc3, 0x42, 0x9b, 0xc5, 0xed, - 0x39, 0x0c, 0x85, 0xc4, 0xdf, 0x94, 0x0e, 0xc3, 0x7e, 0xbd, 0xc3, 0x50, 0x68, 0x7d, 0x16, 0x9b, - 0xa6, 0x86, 0xea, 0xb5, 0xa6, 0x70, 0x14, 0x3d, 0xb4, 0x1c, 0x4a, 0x88, 0x45, 0x7d, 0xb1, 0xdc, - 0x98, 0xff, 0x5b, 0x6c, 0x77, 0xc3, 0x90, 0x2e, 0x7f, 0xa1, 0xe0, 0x24, 0x67, 0x69, 0xff, 0xbe, - 0x88, 0x36, 0xf3, 0xe1, 0x74, 0xa5, 0xa4, 0xde, 0x59, 0x0c, 0x89, 0x7c, 0x0b, 0xa7, 0x4d, 0xd0, - 0x92, 0x48, 0xc1, 0xd3, 0x1d, 0x9d, 0x66, 0x2f, 0x15, 0x31, 0x9b, 0xdd, 0x71, 0x0a, 0x9e, 0xbb, - 0x6e, 0xd8, 0x97, 0x94, 0x44, 0x34, 0x17, 0x7e, 0x86, 0x56, 0x84, 0xa4, 0x72, 0xa0, 0x6e, 0x5e, - 0xb1, 0xee, 0xdd, 0x8a, 0x55, 0x23, 0xdd, 0x77, 0x0c, 0xef, 0x4a, 0x2e, 0x13, 0xc3, 0xd8, 0xfe, - 0xd3, 0x42, 0xdb, 0xb3, 0x90, 0xb7, 0x30, 0xec, 0xaf, 0xa6, 0x87, 0x7d, 0xef, 0x36, 0x15, 0xdd, - 0x30, 0xf0, 0xe7, 0xe8, 0xbd, 0x52, 0xed, 0x6c, 0xc0, 0x3d, 0x50, 0xcf, 0x44, 0x3a, 0xf3, 0x18, - 0x3d, 0xa6, 0x31, 0xe4, 0x97, 0x90, 0x3f, 0x13, 0x47, 0x73, 0xec, 0x64, 0x2e, 0xaa, 0xfd, 0xd7, - 0x9c, 0x8e, 0xa9, 0x61, 0xe1, 0x7b, 0xa8, 0x4a, 0xb5, 0x06, 0xb8, 0xa1, 0x1e, 0x77, 0xa0, 0x6b, - 0xf4, 0x64, 0xec, 0xa1, 0x87, 0xaa, 0xd3, 0x33, 0xab, 0x72, 0xbb, 0xa1, 0x6a, 0x64, 0x61, 0xa8, - 0x5a, 0x26, 0x86, 0x51, 0x65, 0x92, 0x30, 0x3f, 0x2f, 0xb2, 0x32, 0x9d, 0xc9, 0x63, 0xa3, 0x27, - 0x63, 0x8f, 0xf6, 0xbf, 0x95, 0x39, 0x9d, 0xd3, 0xdb, 0x51, 0x28, 0xc9, 0xd7, 0x25, 0x55, 0x4b, - 0x25, 0xf9, 0xe3, 0x92, 0x7c, 0xfc, 0x9b, 0x85, 0x30, 0x1d, 0x53, 0xf4, 0xae, 0xb7, 0x27, 0x1f, - 0xf1, 0x97, 0xb7, 0x5f, 0x5a, 0xbb, 0x5b, 0x22, 0xcb, 0x3f, 0x5d, 0x0d, 0x93, 0x04, 0x2e, 0x3b, - 0x90, 0x39, 0x19, 0xe0, 0x10, 0xd5, 0x72, 0xed, 0x01, 0xe7, 0x8c, 0x9b, 0x2b, 0xfa, 0xf0, 0xd5, - 0x09, 0x69, 0x77, 0xb7, 0xa9, 0x3e, 0xca, 0xdd, 0x09, 0xfe, 0x6a, 0xd4, 0xaa, 0x15, 0xec, 0xa4, - 0xc8, 0xad, 0x42, 0xf9, 0x30, 0x09, 0xb5, 0xf4, 0x06, 0xa1, 0x3e, 0x87, 0x9b, 0x43, 0x15, 0xb8, - 0x1b, 0x07, 0xe8, 0xfd, 0x1b, 0x1a, 0x74, 0xab, 0xa7, 0xfe, 0x67, 0x0b, 0x15, 0x63, 0xe0, 0x43, - 0xb4, 0xa4, 0xfe, 0x2e, 0x99, 0xa3, 0xbf, 0xfb, 0x7a, 0x47, 0x7f, 0x12, 0xc6, 0x30, 0x79, 0xbb, - 0x94, 0x44, 0x34, 0x0b, 0xfe, 0x08, 0xad, 0xc6, 0x20, 0x04, 0x0d, 0x4c, 0x64, 0xf7, 0x8e, 0x71, - 0x5a, 0xed, 0xe5, 0x6a, 0x72, 0x6d, 0x77, 0xef, 0x5f, 0x5c, 0x36, 0x17, 0x5e, 0x5c, 0x36, 0x17, - 0x5e, 0x5e, 0x36, 0x17, 0x7e, 0xca, 0x9a, 0xd6, 0x45, 0xd6, 0xb4, 0x5e, 0x64, 0x4d, 0xeb, 0x65, - 0xd6, 0xb4, 0xfe, 0xce, 0x9a, 0xd6, 0x2f, 0xff, 0x34, 0x17, 0x9e, 0xad, 0x9a, 0xbe, 0xfd, 0x17, - 0x00, 0x00, 0xff, 0xff, 0xb4, 0x63, 0x7e, 0xa7, 0x0b, 0x0b, 0x00, 0x00, + // 1216 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xbc, 0x57, 0x4f, 0x6f, 0x1b, 0x45, + 0x14, 0xcf, 0xc6, 0x4e, 0x9c, 0x8c, 0x93, 0x36, 0x59, 0x22, 0x30, 0x3e, 0xd8, 0x91, 0x11, 0x34, + 0xad, 0xda, 0x75, 0x1b, 0x15, 0x14, 0x55, 0xe2, 0x60, 0x27, 0x91, 0x70, 0x1b, 0x27, 0x66, 0x12, + 0x55, 0xa8, 0xe2, 0xc0, 0x64, 0xf7, 0xc5, 0x59, 0xec, 0xdd, 0xd9, 0xce, 0x8e, 0x0d, 0xbe, 0x71, + 0x82, 0x23, 0x88, 0x03, 0x9f, 0x80, 0xaf, 0x00, 0x12, 0x5c, 0x38, 0x92, 0x13, 0xea, 0xb1, 0x27, + 0x8b, 0x98, 0x6f, 0x11, 0x71, 0x40, 0x33, 0x3b, 0xf6, 0xee, 0xfa, 0x4f, 0xe3, 0x70, 0xf0, 0xcd, + 0xf3, 0xde, 0xfb, 0xfd, 0xde, 0xdf, 0x79, 0xb3, 0x46, 0xbb, 0x8d, 0x1d, 0xdf, 0xb0, 0x69, 0xb1, + 0xd1, 0x3a, 0x05, 0xe6, 0x02, 0x07, 0xbf, 0xd8, 0x06, 0xd7, 0xa2, 0xac, 0xa8, 0x14, 0xc4, 0xb3, + 0x8b, 0x3e, 0xa7, 0x8c, 0xd4, 0xa1, 0xd8, 0x7e, 0x74, 0x0a, 0x9c, 0x3c, 0x2a, 0xd6, 0xc1, 0x05, + 0x46, 0x38, 0x58, 0x86, 0xc7, 0x28, 0xa7, 0x7a, 0x36, 0xb0, 0x35, 0x88, 0x67, 0x1b, 0xca, 0xd6, + 0x50, 0xb6, 0xd9, 0x07, 0x75, 0x9b, 0x9f, 0xb7, 0x4e, 0x0d, 0x93, 0x3a, 0xc5, 0x3a, 0xad, 0xd3, + 0xa2, 0x84, 0x9c, 0xb6, 0xce, 0xe4, 0x49, 0x1e, 0xe4, 0xaf, 0x80, 0x2a, 0x5b, 0x88, 0xb8, 0x35, + 0x29, 0x13, 0x3e, 0x87, 0xdd, 0x65, 0x1f, 0x87, 0x36, 0x0e, 0x31, 0xcf, 0x6d, 0x17, 0x58, 0xa7, + 0xe8, 0x35, 0xea, 0x42, 0xe0, 0x17, 0x1d, 0xe0, 0x64, 0x1c, 0xaa, 0x38, 0x09, 0xc5, 0x5a, 0x2e, + 0xb7, 0x1d, 0x18, 0x01, 0x7c, 0x74, 0x1d, 0xc0, 0x37, 0xcf, 0xc1, 0x21, 0xc3, 0xb8, 0xc2, 0xef, + 0x1a, 0x5a, 0xde, 0x3d, 0xae, 0xec, 0x31, 0xbb, 0x0d, 0x4c, 0xff, 0x02, 0x2d, 0x89, 0x88, 0x2c, + 0xc2, 0x49, 0x46, 0xdb, 0xd4, 0xb6, 0xd2, 0xdb, 0x0f, 0x8d, 0xb0, 0x5c, 0x03, 0x62, 0xc3, 0x6b, + 0xd4, 0x85, 0xc0, 0x37, 0x84, 0xb5, 0xd1, 0x7e, 0x64, 0x1c, 0x9d, 0x7e, 0x09, 0x26, 0xaf, 0x02, + 0x27, 0x65, 0xfd, 0xa2, 0x9b, 0x9f, 0xeb, 0x75, 0xf3, 0x28, 0x94, 0xe1, 0x01, 0xab, 0xfe, 0x0c, + 0x25, 0x7d, 0x0f, 0xcc, 0xcc, 0xbc, 0x64, 0xbf, 0x6b, 0x4c, 0x6e, 0x86, 0x31, 0x08, 0xeb, 0xd8, + 0x03, 0xb3, 0xbc, 0xa2, 0x68, 0x93, 0xe2, 0x84, 0x25, 0x49, 0xe1, 0x37, 0x0d, 0xad, 0x0e, 0xac, + 0x0e, 0x6c, 0x9f, 0xeb, 0x9f, 0x8f, 0x24, 0x60, 0x4c, 0x97, 0x80, 0x40, 0xcb, 0xf0, 0xd7, 0x94, + 0x9f, 0xa5, 0xbe, 0x24, 0x12, 0xfc, 0x53, 0xb4, 0x60, 0x73, 0x70, 0xfc, 0xcc, 0xfc, 0x66, 0x62, + 0x2b, 0xbd, 0xfd, 0xfe, 0x54, 0xd1, 0x97, 0x57, 0x15, 0xe3, 0x42, 0x45, 0x60, 0x71, 0x40, 0x51, + 0xf8, 0x2e, 0x1a, 0xbb, 0xc8, 0x49, 0x7f, 0x82, 0x6e, 0x11, 0xce, 0x89, 0x79, 0x8e, 0xe1, 0x65, + 0xcb, 0x66, 0x60, 0xc9, 0x0c, 0x96, 0xca, 0x7a, 0xaf, 0x9b, 0xbf, 0x55, 0x8a, 0x69, 0xf0, 0x90, + 0xa5, 0xc0, 0x7a, 0xd4, 0xaa, 0xb8, 0x67, 0xf4, 0xc8, 0xad, 0xd2, 0x96, 0xcb, 0x65, 0x81, 0x15, + 0xb6, 0x16, 0xd3, 0xe0, 0x21, 0xcb, 0xc2, 0xaf, 0x1a, 0x4a, 0xed, 0x1e, 0x57, 0x0e, 0xa9, 0x05, + 0x33, 0x18, 0x80, 0x4a, 0x6c, 0x00, 0xee, 0x5c, 0x53, 0x42, 0x11, 0xd4, 0xc4, 0xf6, 0x7f, 0x1f, + 0x94, 0x50, 0xd8, 0xa8, 0xf9, 0xdd, 0x44, 0x49, 0x97, 0x38, 0x20, 0x43, 0x5f, 0x0e, 0x31, 0x87, + 0xc4, 0x01, 0x2c, 0x35, 0xfa, 0x07, 0x68, 0xd1, 0xa5, 0x16, 0x54, 0xf6, 0x64, 0x00, 0xcb, 0xe5, + 0x5b, 0xca, 0x66, 0xf1, 0x50, 0x4a, 0xb1, 0xd2, 0xea, 0x8f, 0xd1, 0x0a, 0xa7, 0x1e, 0x6d, 0xd2, + 0x7a, 0xe7, 0x19, 0x74, 0xfc, 0x4c, 0x62, 0x33, 0xb1, 0xb5, 0x5c, 0x5e, 0xeb, 0x75, 0xf3, 0x2b, + 0x27, 0x11, 0x39, 0x8e, 0x59, 0x15, 0x7e, 0xd1, 0x50, 0x5a, 0x45, 0x34, 0x83, 0x71, 0xfc, 0x24, + 0x3e, 0x8e, 0xef, 0x4d, 0x51, 0xcb, 0x09, 0xc3, 0x68, 0x0e, 0xc2, 0x96, 0x93, 0x78, 0x82, 0x52, + 0x96, 0x2c, 0xa8, 0x9f, 0xd1, 0x24, 0xf5, 0xdd, 0x29, 0xa8, 0xd5, 0xb4, 0xdf, 0x56, 0x0e, 0x52, + 0xc1, 0xd9, 0xc7, 0x7d, 0xaa, 0xc2, 0x8f, 0x8b, 0x68, 0xe5, 0x38, 0xc0, 0xee, 0x36, 0x89, 0xef, + 0xcf, 0x60, 0xd8, 0x3e, 0x44, 0x69, 0x8f, 0xd1, 0xb6, 0xed, 0xdb, 0xd4, 0x05, 0xa6, 0x5a, 0xfe, + 0x96, 0x82, 0xa4, 0x6b, 0xa1, 0x0a, 0x47, 0xed, 0xf4, 0x26, 0x42, 0x1e, 0x61, 0xc4, 0x01, 0x2e, + 0x4a, 0x90, 0x90, 0x25, 0xd8, 0x79, 0x53, 0x09, 0xa2, 0x69, 0x19, 0xb5, 0x01, 0x74, 0xdf, 0xe5, + 0xac, 0x13, 0x86, 0x18, 0x2a, 0x70, 0x84, 0x5f, 0x6f, 0xa0, 0x55, 0x06, 0x66, 0x93, 0xd8, 0x4e, + 0x8d, 0x36, 0x6d, 0xb3, 0x93, 0x49, 0xca, 0x30, 0xf7, 0x7b, 0xdd, 0xfc, 0x2a, 0x8e, 0x2a, 0xae, + 0xba, 0xf9, 0x87, 0xa3, 0x2f, 0x8e, 0x51, 0x03, 0xe6, 0xdb, 0x3e, 0x07, 0x97, 0x3f, 0xa7, 0xcd, + 0x96, 0x03, 0x31, 0x0c, 0x8e, 0x73, 0x8b, 0xb9, 0x76, 0xc4, 0xad, 0x3f, 0xf2, 0xb8, 0x4d, 0x5d, + 0x3f, 0xb3, 0x10, 0xce, 0x75, 0x35, 0x22, 0xc7, 0x31, 0x2b, 0xfd, 0x00, 0x6d, 0x90, 0x66, 0x93, + 0x7e, 0x15, 0x38, 0xd8, 0xff, 0xda, 0x23, 0xae, 0x28, 0x55, 0x66, 0x51, 0x2e, 0x99, 0x4c, 0xaf, + 0x9b, 0xdf, 0x28, 0x8d, 0xd1, 0xe3, 0xb1, 0x28, 0xfd, 0x33, 0xb4, 0xde, 0x96, 0xa2, 0xb2, 0xed, + 0x5a, 0xb6, 0x5b, 0xaf, 0x52, 0x0b, 0x32, 0x29, 0x99, 0xf4, 0xbd, 0x5e, 0x37, 0xbf, 0xfe, 0x7c, + 0x58, 0x79, 0x35, 0x4e, 0x88, 0x47, 0x49, 0xf4, 0x97, 0x68, 0x5d, 0x7a, 0x04, 0x4b, 0x5d, 0x52, + 0x1b, 0xfc, 0xcc, 0x92, 0xec, 0xdf, 0x56, 0xb4, 0x7f, 0xa2, 0x74, 0x62, 0x90, 0xfa, 0x57, 0xf9, + 0x18, 0x9a, 0x60, 0x72, 0xca, 0x4e, 0x80, 0x39, 0xe5, 0x77, 0x55, 0xbf, 0xd6, 0x4b, 0xc3, 0x54, + 0x78, 0x94, 0x3d, 0xfb, 0x31, 0xba, 0x3d, 0xd4, 0x70, 0x7d, 0x0d, 0x25, 0x1a, 0xd0, 0x09, 0x96, + 0x10, 0x16, 0x3f, 0xf5, 0x0d, 0xb4, 0xd0, 0x26, 0xcd, 0x16, 0x04, 0x13, 0x88, 0x83, 0xc3, 0x93, + 0xf9, 0x1d, 0xad, 0xf0, 0x87, 0x86, 0xd6, 0xa2, 0xd3, 0x33, 0x83, 0xb5, 0x51, 0x8d, 0xaf, 0x8d, + 0xad, 0x69, 0x07, 0x7b, 0xc2, 0xee, 0xf8, 0x79, 0x1e, 0xad, 0x05, 0xcd, 0x09, 0xde, 0x28, 0x07, + 0x5c, 0x3e, 0x83, 0xab, 0x8d, 0x63, 0xef, 0xc8, 0xc3, 0x37, 0x25, 0x31, 0x1c, 0xdd, 0xa4, 0x07, + 0x45, 0x7f, 0x81, 0x16, 0x7d, 0x4e, 0x78, 0x4b, 0xdc, 0x79, 0xc1, 0xba, 0x7d, 0x23, 0x56, 0x89, + 0x0c, 0x1f, 0x94, 0xe0, 0x8c, 0x15, 0x63, 0xe1, 0x4f, 0x0d, 0x6d, 0x0c, 0x43, 0x66, 0xd0, 0xec, + 0x4f, 0xe3, 0xcd, 0xbe, 0x7f, 0x93, 0x8c, 0x26, 0x34, 0xfc, 0x0c, 0xbd, 0x3d, 0x92, 0x3b, 0x6d, + 0x31, 0x13, 0xc4, 0x9a, 0xf0, 0x86, 0x96, 0xd1, 0x61, 0xf8, 0x1c, 0xcb, 0x35, 0x51, 0x1b, 0xa3, + 0xc7, 0x63, 0x51, 0x85, 0xbf, 0xc6, 0x54, 0x4c, 0x3e, 0x4f, 0xf7, 0xd1, 0x52, 0xf0, 0xf9, 0x03, + 0x4c, 0x51, 0x0f, 0x2a, 0x50, 0x52, 0x72, 0x3c, 0xb0, 0x90, 0x4d, 0x95, 0xe1, 0xa9, 0x51, 0xb9, + 0x59, 0x53, 0x25, 0x32, 0xd2, 0x54, 0x79, 0xc6, 0x8a, 0x51, 0x44, 0x22, 0xbe, 0x17, 0x64, 0x92, + 0x89, 0x78, 0x24, 0x87, 0x4a, 0x8e, 0x07, 0x16, 0x85, 0x7f, 0x13, 0x63, 0x2a, 0x27, 0xa7, 0x23, + 0x92, 0x52, 0xff, 0xab, 0x6f, 0x38, 0x25, 0x6b, 0x90, 0x92, 0xa5, 0xff, 0xa4, 0x21, 0x9d, 0x0c, + 0x28, 0xaa, 0xfd, 0xe9, 0x09, 0x5a, 0xfc, 0xf4, 0xe6, 0x43, 0x6b, 0x94, 0x46, 0xc8, 0x82, 0xa7, + 0x2b, 0xab, 0x82, 0xd0, 0x47, 0x0d, 0xf0, 0x98, 0x08, 0x74, 0x1b, 0xa5, 0x03, 0xe9, 0x3e, 0x63, + 0x94, 0xa9, 0x5b, 0x74, 0xe7, 0xfa, 0x80, 0xa4, 0x79, 0x39, 0x27, 0x1e, 0xe5, 0x52, 0x88, 0xbf, + 0xea, 0xe6, 0xd3, 0x11, 0x3d, 0x8e, 0x72, 0x0b, 0x57, 0x16, 0x84, 0xae, 0x92, 0xff, 0xc3, 0xd5, + 0x1e, 0x4c, 0x76, 0x15, 0xe1, 0xce, 0xee, 0xa3, 0x77, 0x26, 0x14, 0xe8, 0x46, 0xab, 0xfe, 0x5b, + 0x0d, 0x45, 0x7d, 0xe8, 0x07, 0x28, 0x29, 0xfe, 0x99, 0xa9, 0x4b, 0x7f, 0x6f, 0xba, 0x4b, 0x7f, + 0x62, 0x3b, 0x10, 0xee, 0x2e, 0x71, 0xc2, 0x92, 0x45, 0xbf, 0x8b, 0x52, 0x0e, 0xf8, 0x3e, 0xa9, + 0x2b, 0xcf, 0xe1, 0x87, 0x58, 0x35, 0x10, 0xe3, 0xbe, 0xbe, 0xfc, 0xe0, 0xe2, 0x32, 0x37, 0xf7, + 0xea, 0x32, 0x37, 0xf7, 0xfa, 0x32, 0x37, 0xf7, 0x4d, 0x2f, 0xa7, 0x5d, 0xf4, 0x72, 0xda, 0xab, + 0x5e, 0x4e, 0x7b, 0xdd, 0xcb, 0x69, 0x7f, 0xf7, 0x72, 0xda, 0x0f, 0xff, 0xe4, 0xe6, 0x5e, 0xa4, + 0x54, 0xdd, 0xfe, 0x0b, 0x00, 0x00, 0xff, 0xff, 0x78, 0xdc, 0x5e, 0x79, 0x76, 0x0f, 0x00, 0x00, } diff --git a/vendor/k8s.io/api/storage/v1beta1/generated.proto b/vendor/k8s.io/api/storage/v1beta1/generated.proto index db1f302a05..6d9fe90499 100644 --- a/vendor/k8s.io/api/storage/v1beta1/generated.proto +++ b/vendor/k8s.io/api/storage/v1beta1/generated.proto @@ -29,6 +29,143 @@ import "k8s.io/apimachinery/pkg/runtime/schema/generated.proto"; // Package-wide variables from generator "generated". option go_package = "v1beta1"; +// CSIDriver captures information about a Container Storage Interface (CSI) +// volume driver deployed on the cluster. +// CSI drivers do not need to create the CSIDriver object directly. Instead they may use the +// cluster-driver-registrar sidecar container. When deployed with a CSI driver it automatically +// creates a CSIDriver object representing the driver. +// Kubernetes attach detach controller uses this object to determine whether attach is required. +// Kubelet uses this object to determine whether pod information needs to be passed on mount. +// CSIDriver objects are non-namespaced. +message CSIDriver { + // Standard object metadata. + // metadata.Name indicates the name of the CSI driver that this object + // refers to; it MUST be the same name returned by the CSI GetPluginName() + // call for that driver. + // The driver name must be 63 characters or less, beginning and ending with + // an alphanumeric character ([a-z0-9A-Z]) with dashes (-), dots (.), and + // alphanumerics between. + // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata + optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; + + // Specification of the CSI Driver. + optional CSIDriverSpec spec = 2; +} + +// CSIDriverList is a collection of CSIDriver objects. +message CSIDriverList { + // Standard list metadata + // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata + // +optional + optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; + + // items is the list of CSIDriver + repeated CSIDriver items = 2; +} + +// CSIDriverSpec is the specification of a CSIDriver. +message CSIDriverSpec { + // attachRequired indicates this CSI volume driver requires an attach + // operation (because it implements the CSI ControllerPublishVolume() + // method), and that the Kubernetes attach detach controller should call + // the attach volume interface which checks the volumeattachment status + // and waits until the volume is attached before proceeding to mounting. + // The CSI external-attacher coordinates with CSI volume driver and updates + // the volumeattachment status when the attach operation is complete. + // If the CSIDriverRegistry feature gate is enabled and the value is + // specified to false, the attach operation will be skipped. + // Otherwise the attach operation will be called. + // +optional + optional bool attachRequired = 1; + + // If set to true, podInfoOnMount indicates this CSI volume driver + // requires additional pod information (like podName, podUID, etc.) during + // mount operations. + // If set to false, pod information will not be passed on mount. + // Default is false. + // The CSI driver specifies podInfoOnMount as part of driver deployment. + // If true, Kubelet will pass pod information as VolumeContext in the CSI + // NodePublishVolume() calls. + // The CSI driver is responsible for parsing and validating the information + // passed in as VolumeContext. + // The following VolumeConext will be passed if podInfoOnMount is set to true. + // This list might grow, but the prefix will be used. + // "csi.storage.k8s.io/pod.name": pod.Name + // "csi.storage.k8s.io/pod.namespace": pod.Namespace + // "csi.storage.k8s.io/pod.uid": string(pod.UID) + // +optional + optional bool podInfoOnMount = 2; +} + +// CSINode holds information about all CSI drivers installed on a node. +// CSI drivers do not need to create the CSINode object directly. As long as +// they use the node-driver-registrar sidecar container, the kubelet will +// automatically populate the CSINode object for the CSI driver as part of +// kubelet plugin registration. +// CSINode has the same name as a node. If the object is missing, it means either +// there are no CSI Drivers available on the node, or the Kubelet version is low +// enough that it doesn't create this object. +// CSINode has an OwnerReference that points to the corresponding node object. +message CSINode { + // metadata.name must be the Kubernetes node name. + optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; + + // spec is the specification of CSINode + optional CSINodeSpec spec = 2; +} + +// CSINodeDriver holds information about the specification of one CSI driver installed on a node +message CSINodeDriver { + // This is the name of the CSI driver that this object refers to. + // This MUST be the same name returned by the CSI GetPluginName() call for + // that driver. + optional string name = 1; + + // nodeID of the node from the driver point of view. + // This field enables Kubernetes to communicate with storage systems that do + // not share the same nomenclature for nodes. For example, Kubernetes may + // refer to a given node as "node1", but the storage system may refer to + // the same node as "nodeA". When Kubernetes issues a command to the storage + // system to attach a volume to a specific node, it can use this field to + // refer to the node name using the ID that the storage system will + // understand, e.g. "nodeA" instead of "node1". This field is required. + optional string nodeID = 2; + + // topologyKeys is the list of keys supported by the driver. + // When a driver is initialized on a cluster, it provides a set of topology + // keys that it understands (e.g. "company.com/zone", "company.com/region"). + // When a driver is initialized on a node, it provides the same topology keys + // along with values. Kubelet will expose these topology keys as labels + // on its own node object. + // When Kubernetes does topology aware provisioning, it can use this list to + // determine which labels it should retrieve from the node object and pass + // back to the driver. + // It is possible for different nodes to use different topology keys. + // This can be empty if driver does not support topology. + // +optional + repeated string topologyKeys = 3; +} + +// CSINodeList is a collection of CSINode objects. +message CSINodeList { + // Standard list metadata + // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata + // +optional + optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; + + // items is the list of CSINode + repeated CSINode items = 2; +} + +// CSINodeSpec holds information about the specification of all CSI drivers installed on a node +message CSINodeSpec { + // drivers is a list of information of all CSI Drivers existing on a node. + // If all drivers in the list are uninstalled, this can become empty. + // +patchMergeKey=name + // +patchStrategy=merge + repeated CSINodeDriver drivers = 1; +} + // StorageClass describes the parameters for a class of storage for // which PersistentVolumes can be dynamically provisioned. // @@ -178,7 +315,7 @@ message VolumeError { optional k8s.io.apimachinery.pkg.apis.meta.v1.Time time = 1; // String detailing the error encountered during Attach or Detach operation. - // This string maybe logged, so it should not contain sensitive + // This string may be logged, so it should not contain sensitive // information. // +optional optional string message = 2; diff --git a/vendor/k8s.io/api/storage/v1beta1/register.go b/vendor/k8s.io/api/storage/v1beta1/register.go index 06b0f3d529..c270ace57d 100644 --- a/vendor/k8s.io/api/storage/v1beta1/register.go +++ b/vendor/k8s.io/api/storage/v1beta1/register.go @@ -49,6 +49,12 @@ func addKnownTypes(scheme *runtime.Scheme) error { &VolumeAttachment{}, &VolumeAttachmentList{}, + + &CSIDriver{}, + &CSIDriverList{}, + + &CSINode{}, + &CSINodeList{}, ) metav1.AddToGroupVersion(scheme, SchemeGroupVersion) diff --git a/vendor/k8s.io/api/storage/v1beta1/types.go b/vendor/k8s.io/api/storage/v1beta1/types.go index 5702c21bcc..4900579120 100644 --- a/vendor/k8s.io/api/storage/v1beta1/types.go +++ b/vendor/k8s.io/api/storage/v1beta1/types.go @@ -204,8 +204,165 @@ type VolumeError struct { Time metav1.Time `json:"time,omitempty" protobuf:"bytes,1,opt,name=time"` // String detailing the error encountered during Attach or Detach operation. - // This string maybe logged, so it should not contain sensitive + // This string may be logged, so it should not contain sensitive // information. // +optional Message string `json:"message,omitempty" protobuf:"bytes,2,opt,name=message"` } + +// +genclient +// +genclient:nonNamespaced +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// CSIDriver captures information about a Container Storage Interface (CSI) +// volume driver deployed on the cluster. +// CSI drivers do not need to create the CSIDriver object directly. Instead they may use the +// cluster-driver-registrar sidecar container. When deployed with a CSI driver it automatically +// creates a CSIDriver object representing the driver. +// Kubernetes attach detach controller uses this object to determine whether attach is required. +// Kubelet uses this object to determine whether pod information needs to be passed on mount. +// CSIDriver objects are non-namespaced. +type CSIDriver struct { + metav1.TypeMeta `json:",inline"` + + // Standard object metadata. + // metadata.Name indicates the name of the CSI driver that this object + // refers to; it MUST be the same name returned by the CSI GetPluginName() + // call for that driver. + // The driver name must be 63 characters or less, beginning and ending with + // an alphanumeric character ([a-z0-9A-Z]) with dashes (-), dots (.), and + // alphanumerics between. + // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata + metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + + // Specification of the CSI Driver. + Spec CSIDriverSpec `json:"spec" protobuf:"bytes,2,opt,name=spec"` +} + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// CSIDriverList is a collection of CSIDriver objects. +type CSIDriverList struct { + metav1.TypeMeta `json:",inline"` + + // Standard list metadata + // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata + // +optional + metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + + // items is the list of CSIDriver + Items []CSIDriver `json:"items" protobuf:"bytes,2,rep,name=items"` +} + +// CSIDriverSpec is the specification of a CSIDriver. +type CSIDriverSpec struct { + // attachRequired indicates this CSI volume driver requires an attach + // operation (because it implements the CSI ControllerPublishVolume() + // method), and that the Kubernetes attach detach controller should call + // the attach volume interface which checks the volumeattachment status + // and waits until the volume is attached before proceeding to mounting. + // The CSI external-attacher coordinates with CSI volume driver and updates + // the volumeattachment status when the attach operation is complete. + // If the CSIDriverRegistry feature gate is enabled and the value is + // specified to false, the attach operation will be skipped. + // Otherwise the attach operation will be called. + // +optional + AttachRequired *bool `json:"attachRequired,omitempty" protobuf:"varint,1,opt,name=attachRequired"` + + // If set to true, podInfoOnMount indicates this CSI volume driver + // requires additional pod information (like podName, podUID, etc.) during + // mount operations. + // If set to false, pod information will not be passed on mount. + // Default is false. + // The CSI driver specifies podInfoOnMount as part of driver deployment. + // If true, Kubelet will pass pod information as VolumeContext in the CSI + // NodePublishVolume() calls. + // The CSI driver is responsible for parsing and validating the information + // passed in as VolumeContext. + // The following VolumeConext will be passed if podInfoOnMount is set to true. + // This list might grow, but the prefix will be used. + // "csi.storage.k8s.io/pod.name": pod.Name + // "csi.storage.k8s.io/pod.namespace": pod.Namespace + // "csi.storage.k8s.io/pod.uid": string(pod.UID) + // +optional + PodInfoOnMount *bool `json:"podInfoOnMount,omitempty" protobuf:"bytes,2,opt,name=podInfoOnMount"` +} + +// +genclient +// +genclient:nonNamespaced +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// CSINode holds information about all CSI drivers installed on a node. +// CSI drivers do not need to create the CSINode object directly. As long as +// they use the node-driver-registrar sidecar container, the kubelet will +// automatically populate the CSINode object for the CSI driver as part of +// kubelet plugin registration. +// CSINode has the same name as a node. If the object is missing, it means either +// there are no CSI Drivers available on the node, or the Kubelet version is low +// enough that it doesn't create this object. +// CSINode has an OwnerReference that points to the corresponding node object. +type CSINode struct { + metav1.TypeMeta `json:",inline"` + + // metadata.name must be the Kubernetes node name. + metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + + // spec is the specification of CSINode + Spec CSINodeSpec `json:"spec" protobuf:"bytes,2,opt,name=spec"` +} + +// CSINodeSpec holds information about the specification of all CSI drivers installed on a node +type CSINodeSpec struct { + // drivers is a list of information of all CSI Drivers existing on a node. + // If all drivers in the list are uninstalled, this can become empty. + // +patchMergeKey=name + // +patchStrategy=merge + Drivers []CSINodeDriver `json:"drivers" patchStrategy:"merge" patchMergeKey:"name" protobuf:"bytes,1,rep,name=drivers"` +} + +// CSINodeDriver holds information about the specification of one CSI driver installed on a node +type CSINodeDriver struct { + // This is the name of the CSI driver that this object refers to. + // This MUST be the same name returned by the CSI GetPluginName() call for + // that driver. + Name string `json:"name" protobuf:"bytes,1,opt,name=name"` + + // nodeID of the node from the driver point of view. + // This field enables Kubernetes to communicate with storage systems that do + // not share the same nomenclature for nodes. For example, Kubernetes may + // refer to a given node as "node1", but the storage system may refer to + // the same node as "nodeA". When Kubernetes issues a command to the storage + // system to attach a volume to a specific node, it can use this field to + // refer to the node name using the ID that the storage system will + // understand, e.g. "nodeA" instead of "node1". This field is required. + NodeID string `json:"nodeID" protobuf:"bytes,2,opt,name=nodeID"` + + // topologyKeys is the list of keys supported by the driver. + // When a driver is initialized on a cluster, it provides a set of topology + // keys that it understands (e.g. "company.com/zone", "company.com/region"). + // When a driver is initialized on a node, it provides the same topology keys + // along with values. Kubelet will expose these topology keys as labels + // on its own node object. + // When Kubernetes does topology aware provisioning, it can use this list to + // determine which labels it should retrieve from the node object and pass + // back to the driver. + // It is possible for different nodes to use different topology keys. + // This can be empty if driver does not support topology. + // +optional + TopologyKeys []string `json:"topologyKeys" protobuf:"bytes,3,rep,name=topologyKeys"` +} + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// CSINodeList is a collection of CSINode objects. +type CSINodeList struct { + metav1.TypeMeta `json:",inline"` + + // Standard list metadata + // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata + // +optional + metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + + // items is the list of CSINode + Items []CSINode `json:"items" protobuf:"bytes,2,rep,name=items"` +} diff --git a/vendor/k8s.io/api/storage/v1beta1/types_swagger_doc_generated.go b/vendor/k8s.io/api/storage/v1beta1/types_swagger_doc_generated.go index 834553e1a8..ec741ecf70 100644 --- a/vendor/k8s.io/api/storage/v1beta1/types_swagger_doc_generated.go +++ b/vendor/k8s.io/api/storage/v1beta1/types_swagger_doc_generated.go @@ -27,6 +27,76 @@ package v1beta1 // Those methods can be generated by using hack/update-generated-swagger-docs.sh // AUTO-GENERATED FUNCTIONS START HERE. DO NOT EDIT. +var map_CSIDriver = map[string]string{ + "": "CSIDriver captures information about a Container Storage Interface (CSI) volume driver deployed on the cluster. CSI drivers do not need to create the CSIDriver object directly. Instead they may use the cluster-driver-registrar sidecar container. When deployed with a CSI driver it automatically creates a CSIDriver object representing the driver. Kubernetes attach detach controller uses this object to determine whether attach is required. Kubelet uses this object to determine whether pod information needs to be passed on mount. CSIDriver objects are non-namespaced.", + "metadata": "Standard object metadata. metadata.Name indicates the name of the CSI driver that this object refers to; it MUST be the same name returned by the CSI GetPluginName() call for that driver. The driver name must be 63 characters or less, beginning and ending with an alphanumeric character ([a-z0-9A-Z]) with dashes (-), dots (.), and alphanumerics between. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata", + "spec": "Specification of the CSI Driver.", +} + +func (CSIDriver) SwaggerDoc() map[string]string { + return map_CSIDriver +} + +var map_CSIDriverList = map[string]string{ + "": "CSIDriverList is a collection of CSIDriver objects.", + "metadata": "Standard list metadata More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata", + "items": "items is the list of CSIDriver", +} + +func (CSIDriverList) SwaggerDoc() map[string]string { + return map_CSIDriverList +} + +var map_CSIDriverSpec = map[string]string{ + "": "CSIDriverSpec is the specification of a CSIDriver.", + "attachRequired": "attachRequired indicates this CSI volume driver requires an attach operation (because it implements the CSI ControllerPublishVolume() method), and that the Kubernetes attach detach controller should call the attach volume interface which checks the volumeattachment status and waits until the volume is attached before proceeding to mounting. The CSI external-attacher coordinates with CSI volume driver and updates the volumeattachment status when the attach operation is complete. If the CSIDriverRegistry feature gate is enabled and the value is specified to false, the attach operation will be skipped. Otherwise the attach operation will be called.", + "podInfoOnMount": "If set to true, podInfoOnMount indicates this CSI volume driver requires additional pod information (like podName, podUID, etc.) during mount operations. If set to false, pod information will not be passed on mount. Default is false. The CSI driver specifies podInfoOnMount as part of driver deployment. If true, Kubelet will pass pod information as VolumeContext in the CSI NodePublishVolume() calls. The CSI driver is responsible for parsing and validating the information passed in as VolumeContext. The following VolumeConext will be passed if podInfoOnMount is set to true. This list might grow, but the prefix will be used. \"csi.storage.k8s.io/pod.name\": pod.Name \"csi.storage.k8s.io/pod.namespace\": pod.Namespace \"csi.storage.k8s.io/pod.uid\": string(pod.UID)", +} + +func (CSIDriverSpec) SwaggerDoc() map[string]string { + return map_CSIDriverSpec +} + +var map_CSINode = map[string]string{ + "": "CSINode holds information about all CSI drivers installed on a node. CSI drivers do not need to create the CSINode object directly. As long as they use the node-driver-registrar sidecar container, the kubelet will automatically populate the CSINode object for the CSI driver as part of kubelet plugin registration. CSINode has the same name as a node. If the object is missing, it means either there are no CSI Drivers available on the node, or the Kubelet version is low enough that it doesn't create this object. CSINode has an OwnerReference that points to the corresponding node object.", + "metadata": "metadata.name must be the Kubernetes node name.", + "spec": "spec is the specification of CSINode", +} + +func (CSINode) SwaggerDoc() map[string]string { + return map_CSINode +} + +var map_CSINodeDriver = map[string]string{ + "": "CSINodeDriver holds information about the specification of one CSI driver installed on a node", + "name": "This is the name of the CSI driver that this object refers to. This MUST be the same name returned by the CSI GetPluginName() call for that driver.", + "nodeID": "nodeID of the node from the driver point of view. This field enables Kubernetes to communicate with storage systems that do not share the same nomenclature for nodes. For example, Kubernetes may refer to a given node as \"node1\", but the storage system may refer to the same node as \"nodeA\". When Kubernetes issues a command to the storage system to attach a volume to a specific node, it can use this field to refer to the node name using the ID that the storage system will understand, e.g. \"nodeA\" instead of \"node1\". This field is required.", + "topologyKeys": "topologyKeys is the list of keys supported by the driver. When a driver is initialized on a cluster, it provides a set of topology keys that it understands (e.g. \"company.com/zone\", \"company.com/region\"). When a driver is initialized on a node, it provides the same topology keys along with values. Kubelet will expose these topology keys as labels on its own node object. When Kubernetes does topology aware provisioning, it can use this list to determine which labels it should retrieve from the node object and pass back to the driver. It is possible for different nodes to use different topology keys. This can be empty if driver does not support topology.", +} + +func (CSINodeDriver) SwaggerDoc() map[string]string { + return map_CSINodeDriver +} + +var map_CSINodeList = map[string]string{ + "": "CSINodeList is a collection of CSINode objects.", + "metadata": "Standard list metadata More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata", + "items": "items is the list of CSINode", +} + +func (CSINodeList) SwaggerDoc() map[string]string { + return map_CSINodeList +} + +var map_CSINodeSpec = map[string]string{ + "": "CSINodeSpec holds information about the specification of all CSI drivers installed on a node", + "drivers": "drivers is a list of information of all CSI Drivers existing on a node. If all drivers in the list are uninstalled, this can become empty.", +} + +func (CSINodeSpec) SwaggerDoc() map[string]string { + return map_CSINodeSpec +} + var map_StorageClass = map[string]string{ "": "StorageClass describes the parameters for a class of storage for which PersistentVolumes can be dynamically provisioned.\n\nStorageClasses are non-namespaced; the name of the storage class according to etcd is in ObjectMeta.Name.", "metadata": "Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata", @@ -109,7 +179,7 @@ func (VolumeAttachmentStatus) SwaggerDoc() map[string]string { var map_VolumeError = map[string]string{ "": "VolumeError captures an error encountered during a volume operation.", "time": "Time the error was encountered.", - "message": "String detailing the error encountered during Attach or Detach operation. This string maybe logged, so it should not contain sensitive information.", + "message": "String detailing the error encountered during Attach or Detach operation. This string may be logged, so it should not contain sensitive information.", } func (VolumeError) SwaggerDoc() map[string]string { diff --git a/vendor/k8s.io/api/storage/v1beta1/zz_generated.deepcopy.go b/vendor/k8s.io/api/storage/v1beta1/zz_generated.deepcopy.go index 8096dba9be..022815f183 100644 --- a/vendor/k8s.io/api/storage/v1beta1/zz_generated.deepcopy.go +++ b/vendor/k8s.io/api/storage/v1beta1/zz_generated.deepcopy.go @@ -25,6 +25,196 @@ import ( runtime "k8s.io/apimachinery/pkg/runtime" ) +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CSIDriver) DeepCopyInto(out *CSIDriver) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CSIDriver. +func (in *CSIDriver) DeepCopy() *CSIDriver { + if in == nil { + return nil + } + out := new(CSIDriver) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *CSIDriver) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CSIDriverList) DeepCopyInto(out *CSIDriverList) { + *out = *in + out.TypeMeta = in.TypeMeta + out.ListMeta = in.ListMeta + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]CSIDriver, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CSIDriverList. +func (in *CSIDriverList) DeepCopy() *CSIDriverList { + if in == nil { + return nil + } + out := new(CSIDriverList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *CSIDriverList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CSIDriverSpec) DeepCopyInto(out *CSIDriverSpec) { + *out = *in + if in.AttachRequired != nil { + in, out := &in.AttachRequired, &out.AttachRequired + *out = new(bool) + **out = **in + } + if in.PodInfoOnMount != nil { + in, out := &in.PodInfoOnMount, &out.PodInfoOnMount + *out = new(bool) + **out = **in + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CSIDriverSpec. +func (in *CSIDriverSpec) DeepCopy() *CSIDriverSpec { + if in == nil { + return nil + } + out := new(CSIDriverSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CSINode) DeepCopyInto(out *CSINode) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CSINode. +func (in *CSINode) DeepCopy() *CSINode { + if in == nil { + return nil + } + out := new(CSINode) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *CSINode) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CSINodeDriver) DeepCopyInto(out *CSINodeDriver) { + *out = *in + if in.TopologyKeys != nil { + in, out := &in.TopologyKeys, &out.TopologyKeys + *out = make([]string, len(*in)) + copy(*out, *in) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CSINodeDriver. +func (in *CSINodeDriver) DeepCopy() *CSINodeDriver { + if in == nil { + return nil + } + out := new(CSINodeDriver) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CSINodeList) DeepCopyInto(out *CSINodeList) { + *out = *in + out.TypeMeta = in.TypeMeta + out.ListMeta = in.ListMeta + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]CSINode, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CSINodeList. +func (in *CSINodeList) DeepCopy() *CSINodeList { + if in == nil { + return nil + } + out := new(CSINodeList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *CSINodeList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CSINodeSpec) DeepCopyInto(out *CSINodeSpec) { + *out = *in + if in.Drivers != nil { + in, out := &in.Drivers, &out.Drivers + *out = make([]CSINodeDriver, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CSINodeSpec. +func (in *CSINodeSpec) DeepCopy() *CSINodeSpec { + if in == nil { + return nil + } + out := new(CSINodeSpec) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *StorageClass) DeepCopyInto(out *StorageClass) { *out = *in diff --git a/vendor/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/helpers.go b/vendor/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/helpers.go index 92cad7d9b7..964a6190ba 100644 --- a/vendor/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/helpers.go +++ b/vendor/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/helpers.go @@ -23,8 +23,9 @@ import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" ) -// SetCRDCondition sets the status condition. It either overwrites the existing one or -// creates a new one +var swaggerMetadataDescriptions = metav1.ObjectMeta{}.SwaggerDoc() + +// SetCRDCondition sets the status condition. It either overwrites the existing one or creates a new one. func SetCRDCondition(crd *CustomResourceDefinition, newCondition CustomResourceDefinitionCondition) { existingCondition := FindCRDCondition(crd, newCondition.Type) if existingCondition == nil { @@ -53,7 +54,7 @@ func RemoveCRDCondition(crd *CustomResourceDefinition, conditionType CustomResou crd.Status.Conditions = newConditions } -// FindCRDCondition returns the condition you're looking for or nil +// FindCRDCondition returns the condition you're looking for or nil. func FindCRDCondition(crd *CustomResourceDefinition, conditionType CustomResourceDefinitionConditionType) *CustomResourceDefinitionCondition { for i := range crd.Status.Conditions { if crd.Status.Conditions[i].Type == conditionType { @@ -64,17 +65,17 @@ func FindCRDCondition(crd *CustomResourceDefinition, conditionType CustomResourc return nil } -// IsCRDConditionTrue indicates if the condition is present and strictly true +// IsCRDConditionTrue indicates if the condition is present and strictly true. func IsCRDConditionTrue(crd *CustomResourceDefinition, conditionType CustomResourceDefinitionConditionType) bool { return IsCRDConditionPresentAndEqual(crd, conditionType, ConditionTrue) } -// IsCRDConditionFalse indicates if the condition is present and false true +// IsCRDConditionFalse indicates if the condition is present and false. func IsCRDConditionFalse(crd *CustomResourceDefinition, conditionType CustomResourceDefinitionConditionType) bool { return IsCRDConditionPresentAndEqual(crd, conditionType, ConditionFalse) } -// IsCRDConditionPresentAndEqual indicates if the condition is present and equal to the arg +// IsCRDConditionPresentAndEqual indicates if the condition is present and equal to the given status. func IsCRDConditionPresentAndEqual(crd *CustomResourceDefinition, conditionType CustomResourceDefinitionConditionType, status ConditionStatus) bool { for _, condition := range crd.Status.Conditions { if condition.Type == conditionType { @@ -84,7 +85,7 @@ func IsCRDConditionPresentAndEqual(crd *CustomResourceDefinition, conditionType return false } -// IsCRDConditionEquivalent returns true if the lhs and rhs are equivalent except for times +// IsCRDConditionEquivalent returns true if the lhs and rhs are equivalent except for times. func IsCRDConditionEquivalent(lhs, rhs *CustomResourceDefinitionCondition) bool { if lhs == nil && rhs == nil { return true @@ -96,7 +97,7 @@ func IsCRDConditionEquivalent(lhs, rhs *CustomResourceDefinitionCondition) bool return lhs.Message == rhs.Message && lhs.Reason == rhs.Reason && lhs.Status == rhs.Status && lhs.Type == rhs.Type } -// CRDHasFinalizer returns true if the finalizer is in the list +// CRDHasFinalizer returns true if the finalizer is in the list. func CRDHasFinalizer(crd *CustomResourceDefinition, needle string) bool { for _, finalizer := range crd.Finalizers { if finalizer == needle { @@ -107,7 +108,7 @@ func CRDHasFinalizer(crd *CustomResourceDefinition, needle string) bool { return false } -// CRDRemoveFinalizer removes the finalizer if present +// CRDRemoveFinalizer removes the finalizer if present. func CRDRemoveFinalizer(crd *CustomResourceDefinition, needle string) { newFinalizers := []string{} for _, finalizer := range crd.Finalizers { @@ -118,7 +119,7 @@ func CRDRemoveFinalizer(crd *CustomResourceDefinition, needle string) { crd.Finalizers = newFinalizers } -// HasServedCRDVersion returns true if `version` is in the list of CRD's versions and the Served flag is set. +// HasServedCRDVersion returns true if the given version is in the list of CRD's versions and the Served flag is set. func HasServedCRDVersion(crd *CustomResourceDefinition, version string) bool { for _, v := range crd.Spec.Versions { if v.Name == version { @@ -139,6 +140,7 @@ func GetCRDStorageVersion(crd *CustomResourceDefinition) (string, error) { return "", fmt.Errorf("invalid CustomResourceDefinition, no storage version") } +// IsStoredVersion returns whether the given version is the storage version of the CRD. func IsStoredVersion(crd *CustomResourceDefinition, version string) bool { for _, v := range crd.Status.StoredVersions { if version == v { @@ -147,3 +149,108 @@ func IsStoredVersion(crd *CustomResourceDefinition, version string) bool { } return false } + +// GetSchemaForVersion returns the validation schema for the given version or nil. +func GetSchemaForVersion(crd *CustomResourceDefinition, version string) (*CustomResourceValidation, error) { + if !HasPerVersionSchema(crd.Spec.Versions) { + return crd.Spec.Validation, nil + } + if crd.Spec.Validation != nil { + return nil, fmt.Errorf("malformed CustomResourceDefinition %s version %s: top-level and per-version schemas must be mutual exclusive", crd.Name, version) + } + for _, v := range crd.Spec.Versions { + if version == v.Name { + return v.Schema, nil + } + } + return nil, fmt.Errorf("version %s not found in CustomResourceDefinition: %v", version, crd.Name) +} + +// GetSubresourcesForVersion returns the subresources for given version or nil. +func GetSubresourcesForVersion(crd *CustomResourceDefinition, version string) (*CustomResourceSubresources, error) { + if !HasPerVersionSubresources(crd.Spec.Versions) { + return crd.Spec.Subresources, nil + } + if crd.Spec.Subresources != nil { + return nil, fmt.Errorf("malformed CustomResourceDefinition %s version %s: top-level and per-version subresources must be mutual exclusive", crd.Name, version) + } + for _, v := range crd.Spec.Versions { + if version == v.Name { + return v.Subresources, nil + } + } + return nil, fmt.Errorf("version %s not found in CustomResourceDefinition: %v", version, crd.Name) +} + +// GetColumnsForVersion returns the columns for given version or nil. +// NOTE: the newly logically-defaulted columns is not pointing to the original CRD object. +// One cannot mutate the original CRD columns using the logically-defaulted columns. Please iterate through +// the original CRD object instead. +func GetColumnsForVersion(crd *CustomResourceDefinition, version string) ([]CustomResourceColumnDefinition, error) { + if !HasPerVersionColumns(crd.Spec.Versions) { + return serveDefaultColumnsIfEmpty(crd.Spec.AdditionalPrinterColumns), nil + } + if len(crd.Spec.AdditionalPrinterColumns) > 0 { + return nil, fmt.Errorf("malformed CustomResourceDefinition %s version %s: top-level and per-version additionalPrinterColumns must be mutual exclusive", crd.Name, version) + } + for _, v := range crd.Spec.Versions { + if version == v.Name { + return serveDefaultColumnsIfEmpty(v.AdditionalPrinterColumns), nil + } + } + return nil, fmt.Errorf("version %s not found in CustomResourceDefinition: %v", version, crd.Name) +} + +// HasPerVersionSchema returns true if a CRD uses per-version schema. +func HasPerVersionSchema(versions []CustomResourceDefinitionVersion) bool { + for _, v := range versions { + if v.Schema != nil { + return true + } + } + return false +} + +// HasPerVersionSubresources returns true if a CRD uses per-version subresources. +func HasPerVersionSubresources(versions []CustomResourceDefinitionVersion) bool { + for _, v := range versions { + if v.Subresources != nil { + return true + } + } + return false +} + +// HasPerVersionColumns returns true if a CRD uses per-version columns. +func HasPerVersionColumns(versions []CustomResourceDefinitionVersion) bool { + for _, v := range versions { + if len(v.AdditionalPrinterColumns) > 0 { + return true + } + } + return false +} + +// serveDefaultColumnsIfEmpty applies logically defaulting to columns, if the input columns is empty. +// NOTE: in this way, the newly logically-defaulted columns is not pointing to the original CRD object. +// One cannot mutate the original CRD columns using the logically-defaulted columns. Please iterate through +// the original CRD object instead. +func serveDefaultColumnsIfEmpty(columns []CustomResourceColumnDefinition) []CustomResourceColumnDefinition { + if len(columns) > 0 { + return columns + } + return []CustomResourceColumnDefinition{ + {Name: "Age", Type: "date", Description: swaggerMetadataDescriptions["creationTimestamp"], JSONPath: ".metadata.creationTimestamp"}, + } +} + +// HasVersionServed returns true if given CRD has given version served. +func HasVersionServed(crd *CustomResourceDefinition, version string) bool { + for _, v := range crd.Spec.Versions { + if !v.Served || v.Name != version { + continue + } + return true + } + return false +} diff --git a/vendor/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/types.go b/vendor/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/types.go index fcbd8bd197..21873d4602 100644 --- a/vendor/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/types.go +++ b/vendor/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/types.go @@ -84,6 +84,15 @@ type CustomResourceConversion struct { // `webhookClientConfig` is the instructions for how to call the webhook if strategy is `Webhook`. WebhookClientConfig *WebhookClientConfig + + // ConversionReviewVersions is an ordered list of preferred `ConversionReview` + // versions the Webhook expects. API server will try to use first version in + // the list which it supports. If none of the versions specified in this list + // supported by API server, conversion will fail for this object. + // If a persisted Webhook configuration specifies allowed versions and does not + // include any versions known to the API Server, calls to the webhook will fail. + // +optional + ConversionReviewVersions []string } // WebhookClientConfig contains the information to make a TLS diff --git a/vendor/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/types_jsonschema.go b/vendor/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/types_jsonschema.go index 79f34e8bf6..af78c34fb6 100644 --- a/vendor/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/types_jsonschema.go +++ b/vendor/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/types_jsonschema.go @@ -23,6 +23,7 @@ type JSONSchemaProps struct { Ref *string Description string Type string + Nullable bool Format string Title string Default *JSON diff --git a/vendor/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1/defaults.go b/vendor/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1/defaults.go index 5aae97cf1a..7bea2d6988 100644 --- a/vendor/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1/defaults.go +++ b/vendor/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1/defaults.go @@ -68,6 +68,9 @@ func SetDefaults_CustomResourceDefinitionSpec(obj *CustomResourceDefinitionSpec) Strategy: NoneConverter, } } + if obj.Conversion.Strategy == WebhookConverter && len(obj.Conversion.ConversionReviewVersions) == 0 { + obj.Conversion.ConversionReviewVersions = []string{SchemeGroupVersion.Version} + } } // hasPerVersionColumns returns true if a CRD uses per-version columns. diff --git a/vendor/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1/doc.go b/vendor/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1/doc.go index acd09aca28..3f631dd4e6 100644 --- a/vendor/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1/doc.go +++ b/vendor/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1/doc.go @@ -15,6 +15,7 @@ limitations under the License. */ // +k8s:deepcopy-gen=package +// +k8s:protobuf-gen=package // +k8s:conversion-gen=k8s.io/apiextensions-apiserver/pkg/apis/apiextensions // +k8s:defaulter-gen=TypeMeta // +k8s:openapi-gen=true diff --git a/vendor/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1/generated.pb.go b/vendor/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1/generated.pb.go index ef2dfe16aa..ea46688ec3 100644 --- a/vendor/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1/generated.pb.go +++ b/vendor/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1/generated.pb.go @@ -416,6 +416,21 @@ func (m *CustomResourceConversion) MarshalTo(dAtA []byte) (int, error) { } i += n4 } + if len(m.ConversionReviewVersions) > 0 { + for _, s := range m.ConversionReviewVersions { + dAtA[i] = 0x1a + i++ + l = len(s) + for l >= 1<<7 { + dAtA[i] = uint8(uint64(l)&0x7f | 0x80) + l >>= 7 + i++ + } + dAtA[i] = uint8(l) + i++ + i += copy(dAtA[i:], s) + } + } return i, nil } @@ -1406,6 +1421,16 @@ func (m *JSONSchemaProps) MarshalTo(dAtA []byte) (int, error) { } i += n30 } + dAtA[i] = 0xa8 + i++ + dAtA[i] = 0x2 + i++ + if m.Nullable { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i++ return i, nil } @@ -1681,6 +1706,12 @@ func (m *CustomResourceConversion) Size() (n int) { l = m.WebhookClientConfig.Size() n += 1 + l + sovGenerated(uint64(l)) } + if len(m.ConversionReviewVersions) > 0 { + for _, s := range m.ConversionReviewVersions { + l = len(s) + n += 1 + l + sovGenerated(uint64(l)) + } + } return n } @@ -2043,6 +2074,7 @@ func (m *JSONSchemaProps) Size() (n int) { l = m.Example.Size() n += 2 + l + sovGenerated(uint64(l)) } + n += 3 return n } @@ -2191,6 +2223,7 @@ func (this *CustomResourceConversion) String() string { s := strings.Join([]string{`&CustomResourceConversion{`, `Strategy:` + fmt.Sprintf("%v", this.Strategy) + `,`, `WebhookClientConfig:` + strings.Replace(fmt.Sprintf("%v", this.WebhookClientConfig), "WebhookClientConfig", "WebhookClientConfig", 1) + `,`, + `ConversionReviewVersions:` + fmt.Sprintf("%v", this.ConversionReviewVersions) + `,`, `}`, }, "") return s @@ -2436,6 +2469,7 @@ func (this *JSONSchemaProps) String() string { `Definitions:` + mapStringForDefinitions + `,`, `ExternalDocs:` + strings.Replace(fmt.Sprintf("%v", this.ExternalDocs), "ExternalDocumentation", "ExternalDocumentation", 1) + `,`, `Example:` + strings.Replace(fmt.Sprintf("%v", this.Example), "JSON", "JSON", 1) + `,`, + `Nullable:` + fmt.Sprintf("%v", this.Nullable) + `,`, `}`, }, "") return s @@ -3205,6 +3239,35 @@ func (m *CustomResourceConversion) Unmarshal(dAtA []byte) error { return err } iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ConversionReviewVersions", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ConversionReviewVersions = append(m.ConversionReviewVersions, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex default: iNdEx = preIndex skippy, err := skipGenerated(dAtA[iNdEx:]) @@ -6511,6 +6574,26 @@ func (m *JSONSchemaProps) Unmarshal(dAtA []byte) error { return err } iNdEx = postIndex + case 37: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Nullable", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + m.Nullable = bool(v != 0) default: iNdEx = preIndex skippy, err := skipGenerated(dAtA[iNdEx:]) @@ -7253,177 +7336,179 @@ func init() { } var fileDescriptorGenerated = []byte{ - // 2740 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xcc, 0x5a, 0xcd, 0x73, 0x1c, 0x47, - 0x15, 0xf7, 0xec, 0x6a, 0xa5, 0x55, 0x4b, 0xb2, 0xa4, 0xb6, 0xe5, 0x8c, 0x85, 0xbd, 0x2b, 0xad, - 0x49, 0x4a, 0x04, 0x7b, 0x15, 0x9b, 0x84, 0x84, 0x54, 0x71, 0xd0, 0x4a, 0x4a, 0x4a, 0xc6, 0xfa, - 0xa0, 0xd7, 0x76, 0x80, 0x7c, 0xb6, 0x66, 0x7b, 0x57, 0x63, 0xcd, 0x97, 0xa7, 0x67, 0x56, 0x52, - 0x05, 0x28, 0x48, 0x2a, 0x05, 0x45, 0x01, 0xa1, 0x88, 0x2f, 0x14, 0x70, 0x00, 0x8a, 0x0b, 0x45, - 0xc1, 0x01, 0x6e, 0xf0, 0x07, 0xf8, 0x98, 0xe2, 0x94, 0xd3, 0x16, 0xde, 0xfc, 0x0b, 0x54, 0x51, - 0xa5, 0x13, 0xd5, 0x1f, 0xd3, 0x33, 0x3b, 0xbb, 0x6b, 0xab, 0xe2, 0xdd, 0x98, 0x9b, 0xe6, 0xbd, - 0xd7, 0xef, 0xf7, 0xfa, 0xf5, 0x7b, 0xaf, 0x5f, 0xbf, 0x15, 0xa8, 0xef, 0xbf, 0x44, 0xcb, 0xa6, - 0xbb, 0xbc, 0x1f, 0xee, 0x12, 0xdf, 0x21, 0x01, 0xa1, 0xcb, 0x4d, 0xe2, 0xd4, 0x5c, 0x7f, 0x59, - 0x32, 0xb0, 0x67, 0x92, 0xc3, 0x80, 0x38, 0xd4, 0x74, 0x1d, 0x7a, 0x05, 0x7b, 0x26, 0x25, 0x7e, - 0x93, 0xf8, 0xcb, 0xde, 0x7e, 0x83, 0xf1, 0x68, 0xa7, 0xc0, 0x72, 0xf3, 0xea, 0x2e, 0x09, 0xf0, - 0xd5, 0xe5, 0x06, 0x71, 0x88, 0x8f, 0x03, 0x52, 0x2b, 0x7b, 0xbe, 0x1b, 0xb8, 0xf0, 0xeb, 0x42, - 0x5d, 0xb9, 0x43, 0xfa, 0x6d, 0xa5, 0xae, 0xec, 0xed, 0x37, 0x18, 0x8f, 0x76, 0x0a, 0x94, 0xa5, - 0xba, 0xf9, 0x2b, 0x0d, 0x33, 0xd8, 0x0b, 0x77, 0xcb, 0x86, 0x6b, 0x2f, 0x37, 0xdc, 0x86, 0xbb, - 0xcc, 0xb5, 0xee, 0x86, 0x75, 0xfe, 0xc5, 0x3f, 0xf8, 0x5f, 0x02, 0x6d, 0xfe, 0xf9, 0xd8, 0x78, - 0x1b, 0x1b, 0x7b, 0xa6, 0x43, 0xfc, 0xa3, 0xd8, 0x62, 0x9b, 0x04, 0x78, 0xb9, 0xd9, 0x65, 0xe3, - 0xfc, 0x72, 0xbf, 0x55, 0x7e, 0xe8, 0x04, 0xa6, 0x4d, 0xba, 0x16, 0x7c, 0xf5, 0x51, 0x0b, 0xa8, - 0xb1, 0x47, 0x6c, 0x9c, 0x5e, 0x57, 0x3a, 0xd6, 0xc0, 0xec, 0xaa, 0xeb, 0x34, 0x89, 0xcf, 0x76, - 0x89, 0xc8, 0xdd, 0x90, 0xd0, 0x00, 0x56, 0x40, 0x36, 0x34, 0x6b, 0xba, 0xb6, 0xa0, 0x2d, 0x8d, - 0x57, 0x9e, 0xbb, 0xdf, 0x2a, 0x9e, 0x6a, 0xb7, 0x8a, 0xd9, 0x5b, 0x1b, 0x6b, 0xc7, 0xad, 0xe2, - 0x62, 0x3f, 0xa4, 0xe0, 0xc8, 0x23, 0xb4, 0x7c, 0x6b, 0x63, 0x0d, 0xb1, 0xc5, 0xf0, 0x55, 0x30, - 0x5b, 0x23, 0xd4, 0xf4, 0x49, 0x6d, 0x65, 0x67, 0xe3, 0xb6, 0xd0, 0xaf, 0x67, 0xb8, 0xc6, 0xf3, - 0x52, 0xe3, 0xec, 0x5a, 0x5a, 0x00, 0x75, 0xaf, 0x81, 0xdf, 0x02, 0x63, 0xee, 0xee, 0x1d, 0x62, - 0x04, 0x54, 0xcf, 0x2e, 0x64, 0x97, 0x26, 0xae, 0x5d, 0x29, 0xc7, 0x27, 0xa8, 0x4c, 0xe0, 0xc7, - 0x26, 0x37, 0x5b, 0x46, 0xf8, 0x60, 0x3d, 0x3a, 0xb9, 0xca, 0xb4, 0x44, 0x1b, 0xdb, 0x16, 0x5a, - 0x50, 0xa4, 0xae, 0xf4, 0x87, 0x0c, 0x80, 0xc9, 0xcd, 0x53, 0xcf, 0x75, 0x28, 0x19, 0xc8, 0xee, - 0x29, 0x98, 0x31, 0xb8, 0xe6, 0x80, 0xd4, 0x24, 0xae, 0x9e, 0xf9, 0x2c, 0xd6, 0xeb, 0x12, 0x7f, - 0x66, 0x35, 0xa5, 0x0e, 0x75, 0x01, 0xc0, 0x9b, 0x60, 0xd4, 0x27, 0x34, 0xb4, 0x02, 0x3d, 0xbb, - 0xa0, 0x2d, 0x4d, 0x5c, 0xbb, 0xdc, 0x17, 0x8a, 0xc7, 0x37, 0x0b, 0xbe, 0x72, 0xf3, 0x6a, 0xb9, - 0x1a, 0xe0, 0x20, 0xa4, 0x95, 0xd3, 0x12, 0x69, 0x14, 0x71, 0x1d, 0x48, 0xea, 0x2a, 0xfd, 0x38, - 0x03, 0x66, 0x92, 0x5e, 0x6a, 0x9a, 0xe4, 0x00, 0x1e, 0x80, 0x31, 0x5f, 0x04, 0x0b, 0xf7, 0xd3, - 0xc4, 0xb5, 0x9d, 0xf2, 0x63, 0xa5, 0x55, 0xb9, 0x2b, 0x08, 0x2b, 0x13, 0xec, 0xcc, 0xe4, 0x07, - 0x8a, 0xd0, 0xe0, 0xbb, 0x20, 0xef, 0xcb, 0x83, 0xe2, 0xd1, 0x34, 0x71, 0xed, 0x9b, 0x03, 0x44, - 0x16, 0x8a, 0x2b, 0x93, 0xed, 0x56, 0x31, 0x1f, 0x7d, 0x21, 0x05, 0x58, 0xfa, 0x28, 0x03, 0x0a, - 0xab, 0x21, 0x0d, 0x5c, 0x1b, 0x11, 0xea, 0x86, 0xbe, 0x41, 0x56, 0x5d, 0x2b, 0xb4, 0x9d, 0x35, - 0x52, 0x37, 0x1d, 0x33, 0x60, 0xd1, 0xba, 0x00, 0x46, 0x1c, 0x6c, 0x13, 0x19, 0x3d, 0x93, 0xd2, - 0xa7, 0x23, 0x5b, 0xd8, 0x26, 0x88, 0x73, 0x98, 0x04, 0x0b, 0x16, 0x99, 0x0b, 0x4a, 0xe2, 0xe6, - 0x91, 0x47, 0x10, 0xe7, 0xc0, 0x67, 0xc0, 0x68, 0xdd, 0xf5, 0x6d, 0x2c, 0xce, 0x71, 0x3c, 0x3e, - 0x99, 0x57, 0x38, 0x15, 0x49, 0x2e, 0x7c, 0x01, 0x4c, 0xd4, 0x08, 0x35, 0x7c, 0xd3, 0x63, 0xd0, - 0xfa, 0x08, 0x17, 0x3e, 0x23, 0x85, 0x27, 0xd6, 0x62, 0x16, 0x4a, 0xca, 0xc1, 0xcb, 0x20, 0xef, - 0xf9, 0xa6, 0xeb, 0x9b, 0xc1, 0x91, 0x9e, 0x5b, 0xd0, 0x96, 0x72, 0x95, 0x19, 0xb9, 0x26, 0xbf, - 0x23, 0xe9, 0x48, 0x49, 0xc0, 0x05, 0x90, 0xbf, 0x5e, 0xdd, 0xde, 0xda, 0xc1, 0xc1, 0x9e, 0x3e, - 0xca, 0x11, 0x46, 0x98, 0x34, 0xca, 0xdf, 0x91, 0xd4, 0xd2, 0x7b, 0x19, 0xa0, 0xa7, 0xbd, 0x12, - 0xb9, 0x14, 0xbe, 0x02, 0xf2, 0x34, 0x60, 0x15, 0xa7, 0x71, 0x24, 0x7d, 0xf2, 0x6c, 0x04, 0x56, - 0x95, 0xf4, 0xe3, 0x56, 0xf1, 0x5c, 0xbc, 0x22, 0xa2, 0x72, 0x7f, 0xa8, 0xb5, 0xf0, 0xb7, 0x1a, - 0x38, 0x73, 0x40, 0x76, 0xf7, 0x5c, 0x77, 0x7f, 0xd5, 0x32, 0x89, 0x13, 0xac, 0xba, 0x4e, 0xdd, - 0x6c, 0xc8, 0x18, 0x40, 0x8f, 0x19, 0x03, 0xaf, 0x75, 0x6b, 0xae, 0x3c, 0xd5, 0x6e, 0x15, 0xcf, - 0xf4, 0x60, 0xa0, 0x5e, 0x76, 0x94, 0xde, 0xcf, 0xa6, 0x9d, 0x90, 0x08, 0x8a, 0x77, 0x40, 0x9e, - 0x25, 0x5b, 0x0d, 0x07, 0x58, 0xa6, 0xcb, 0x73, 0x27, 0x4b, 0x4d, 0x91, 0xd9, 0x9b, 0x24, 0xc0, - 0x15, 0x28, 0xdd, 0x06, 0x62, 0x1a, 0x52, 0x5a, 0xe1, 0xf7, 0xc0, 0x08, 0xf5, 0x88, 0x21, 0xdd, - 0xf1, 0xfa, 0xe3, 0xa6, 0x44, 0x9f, 0x8d, 0x54, 0x3d, 0x62, 0xc4, 0x11, 0xcb, 0xbe, 0x10, 0x87, - 0x85, 0x1f, 0x68, 0x60, 0x94, 0xf2, 0x32, 0x22, 0x4b, 0xcf, 0x9b, 0xc3, 0xb2, 0x20, 0x55, 0xab, - 0xc4, 0x37, 0x92, 0xe0, 0xa5, 0xff, 0x64, 0xc0, 0x62, 0xbf, 0xa5, 0xab, 0xae, 0x53, 0x13, 0xc7, - 0xb1, 0x21, 0x33, 0x50, 0xc4, 0xe3, 0x0b, 0xc9, 0x0c, 0x3c, 0x6e, 0x15, 0x9f, 0x7e, 0xa4, 0x82, - 0x44, 0xaa, 0x7e, 0x4d, 0xed, 0x5b, 0xa4, 0xf3, 0x62, 0xa7, 0x61, 0xc7, 0xad, 0xe2, 0xb4, 0x5a, - 0xd6, 0x69, 0x2b, 0x6c, 0x02, 0x68, 0x61, 0x1a, 0xdc, 0xf4, 0xb1, 0x43, 0x85, 0x5a, 0xd3, 0x26, - 0xd2, 0x7d, 0xcf, 0x9e, 0x2c, 0x3c, 0xd8, 0x8a, 0xca, 0xbc, 0x84, 0x84, 0x37, 0xba, 0xb4, 0xa1, - 0x1e, 0x08, 0xac, 0xba, 0xf8, 0x04, 0x53, 0x55, 0x30, 0x12, 0x75, 0x9f, 0x51, 0x91, 0xe4, 0xc2, - 0x2f, 0x81, 0x31, 0x9b, 0x50, 0x8a, 0x1b, 0x84, 0x57, 0x89, 0xf1, 0xf8, 0x22, 0xdd, 0x14, 0x64, - 0x14, 0xf1, 0x59, 0x17, 0x71, 0xa1, 0x9f, 0xd7, 0x6e, 0x98, 0x34, 0x80, 0x6f, 0x74, 0x25, 0x40, - 0xf9, 0x64, 0x3b, 0x64, 0xab, 0x79, 0xf8, 0xab, 0x12, 0x15, 0x51, 0x12, 0xc1, 0xff, 0x5d, 0x90, - 0x33, 0x03, 0x62, 0x47, 0x37, 0xec, 0x6b, 0x43, 0x8a, 0xbd, 0xca, 0x94, 0xb4, 0x21, 0xb7, 0xc1, - 0xd0, 0x90, 0x00, 0x2d, 0xfd, 0x31, 0x03, 0x2e, 0xf6, 0x5b, 0xc2, 0xca, 0x3e, 0x65, 0x1e, 0xf7, - 0xac, 0xd0, 0xc7, 0x96, 0x8c, 0x38, 0xe5, 0xf1, 0x1d, 0x4e, 0x45, 0x92, 0xcb, 0x0a, 0x33, 0x35, - 0x9d, 0x46, 0x68, 0x61, 0x5f, 0x86, 0x93, 0xda, 0x75, 0x55, 0xd2, 0x91, 0x92, 0x80, 0x65, 0x00, - 0xe8, 0x9e, 0xeb, 0x07, 0x1c, 0x83, 0xb7, 0x46, 0xe3, 0x95, 0xd3, 0xac, 0x40, 0x54, 0x15, 0x15, - 0x25, 0x24, 0xd8, 0xbd, 0xb3, 0x6f, 0x3a, 0x35, 0x79, 0xea, 0x2a, 0x8b, 0xbf, 0x61, 0x3a, 0x35, - 0xc4, 0x39, 0x0c, 0xdf, 0x32, 0x69, 0xc0, 0x28, 0xf2, 0xc8, 0x3b, 0xbc, 0xce, 0x25, 0x95, 0x04, - 0xc3, 0x37, 0x58, 0x6d, 0x76, 0x7d, 0x93, 0x50, 0x7d, 0x34, 0xc6, 0x5f, 0x55, 0x54, 0x94, 0x90, - 0x28, 0xfd, 0x3a, 0xdf, 0x3f, 0x48, 0x58, 0x29, 0x81, 0x97, 0x40, 0xae, 0xe1, 0xbb, 0xa1, 0x27, - 0xbd, 0xa4, 0xbc, 0xfd, 0x2a, 0x23, 0x22, 0xc1, 0x63, 0x51, 0xd9, 0xec, 0x68, 0x26, 0x55, 0x54, - 0x46, 0x2d, 0x64, 0xc4, 0x87, 0x3f, 0xd4, 0x40, 0xce, 0x91, 0xce, 0x61, 0x21, 0xf7, 0xc6, 0x90, - 0xe2, 0x82, 0xbb, 0x37, 0x36, 0x57, 0x78, 0x5e, 0x20, 0xc3, 0xe7, 0x41, 0x8e, 0x1a, 0xae, 0x47, - 0xa4, 0xd7, 0x0b, 0x91, 0x50, 0x95, 0x11, 0x8f, 0x5b, 0xc5, 0xa9, 0x48, 0x1d, 0x27, 0x20, 0x21, - 0x0c, 0x7f, 0xa4, 0x01, 0xd0, 0xc4, 0x96, 0x59, 0xc3, 0xfc, 0x62, 0xcf, 0x71, 0xf3, 0x07, 0x1b, - 0xd6, 0xb7, 0x95, 0x7a, 0x71, 0x68, 0xf1, 0x37, 0x4a, 0x40, 0xc3, 0x0f, 0x35, 0x30, 0x49, 0xc3, - 0x5d, 0x5f, 0xae, 0xa2, 0xbc, 0x05, 0x98, 0xb8, 0xf6, 0xed, 0x81, 0xda, 0x52, 0x4d, 0x00, 0x54, - 0x66, 0xda, 0xad, 0xe2, 0x64, 0x92, 0x82, 0x3a, 0x0c, 0x80, 0x3f, 0xd5, 0x40, 0x5e, 0x9e, 0x30, - 0xd5, 0xc7, 0x78, 0xc2, 0xbf, 0x35, 0xa4, 0x83, 0x95, 0x11, 0x15, 0x67, 0x81, 0x24, 0x50, 0xa4, - 0x2c, 0x80, 0xff, 0xd0, 0x80, 0x8e, 0x6b, 0xa2, 0xc0, 0x63, 0x6b, 0xc7, 0x37, 0x9d, 0x80, 0xf8, - 0xa2, 0x2b, 0xa4, 0x7a, 0x9e, 0x9b, 0x37, 0xd8, 0xbb, 0x30, 0xdd, 0x71, 0x56, 0x16, 0xa4, 0x75, - 0xfa, 0x4a, 0x1f, 0x33, 0x50, 0x5f, 0x03, 0x79, 0xa0, 0x19, 0xaa, 0xf5, 0xd2, 0xc7, 0x87, 0x10, - 0x68, 0x71, 0x67, 0x27, 0xab, 0x43, 0xdc, 0x6e, 0x27, 0xa0, 0x4b, 0x1f, 0x66, 0xd3, 0xad, 0x75, - 0xfa, 0xd2, 0x87, 0xf7, 0x84, 0xb1, 0x62, 0x2b, 0x54, 0xd7, 0xb8, 0x73, 0xdf, 0x19, 0xd2, 0xd9, - 0xab, 0x5b, 0x3b, 0x6e, 0xbc, 0x14, 0x89, 0xa2, 0x84, 0x1d, 0xf0, 0x57, 0x1a, 0x98, 0xc2, 0x86, - 0x41, 0xbc, 0x80, 0xd4, 0x44, 0x2d, 0xce, 0x7c, 0x0e, 0xe5, 0x66, 0x4e, 0x5a, 0x35, 0xb5, 0x92, - 0x84, 0x46, 0x9d, 0x96, 0xc0, 0x97, 0xc1, 0x69, 0x1a, 0xb8, 0x3e, 0xa9, 0x45, 0x91, 0x2b, 0xef, - 0x09, 0xd8, 0x6e, 0x15, 0x4f, 0x57, 0x3b, 0x38, 0x28, 0x25, 0x59, 0xfa, 0x74, 0x04, 0x14, 0x1f, - 0x91, 0x19, 0x27, 0x78, 0xed, 0x3c, 0x03, 0x46, 0xf9, 0x76, 0x6b, 0xdc, 0x2b, 0xf9, 0x44, 0xe7, - 0xc6, 0xa9, 0x48, 0x72, 0x59, 0x5d, 0x67, 0xf8, 0xac, 0xdb, 0xc8, 0x72, 0x41, 0x55, 0xd7, 0xab, - 0x82, 0x8c, 0x22, 0x3e, 0x7c, 0x17, 0x8c, 0x8a, 0x69, 0x06, 0x2f, 0xaa, 0x43, 0x2c, 0x8c, 0x80, - 0xdb, 0xc9, 0xa1, 0x90, 0x84, 0xec, 0x2e, 0x88, 0xb9, 0x27, 0x5d, 0x10, 0x1f, 0x5a, 0x81, 0x46, - 0xff, 0xcf, 0x2b, 0x50, 0xe9, 0xbf, 0x5a, 0x3a, 0xef, 0x13, 0x5b, 0xad, 0x1a, 0xd8, 0x22, 0x70, - 0x0d, 0xcc, 0xb0, 0x47, 0x06, 0x22, 0x9e, 0x65, 0x1a, 0x98, 0xf2, 0x97, 0xa8, 0x08, 0x38, 0x35, - 0x1c, 0xa9, 0xa6, 0xf8, 0xa8, 0x6b, 0x05, 0xbc, 0x0e, 0xa0, 0x68, 0xbc, 0x3b, 0xf4, 0x88, 0x1e, - 0x42, 0xb5, 0xd0, 0xd5, 0x2e, 0x09, 0xd4, 0x63, 0x15, 0x5c, 0x05, 0xb3, 0x16, 0xde, 0x25, 0x56, - 0x95, 0x58, 0xc4, 0x08, 0x5c, 0x9f, 0xab, 0x12, 0x6f, 0xf5, 0xb9, 0x76, 0xab, 0x38, 0x7b, 0x23, - 0xcd, 0x44, 0xdd, 0xf2, 0xa5, 0xc5, 0x74, 0x7a, 0x25, 0x37, 0x2e, 0x9e, 0x33, 0xbf, 0xcb, 0x80, - 0xf9, 0xfe, 0x91, 0x01, 0xdf, 0x8b, 0x5f, 0x5d, 0xa2, 0xa9, 0x7e, 0x6b, 0x58, 0x51, 0x28, 0x9f, - 0x5d, 0xa0, 0xfb, 0xc9, 0x05, 0xbf, 0xcf, 0x3a, 0x1c, 0x6c, 0x45, 0xd3, 0x98, 0x37, 0x87, 0x66, - 0x02, 0x03, 0xa9, 0x8c, 0x8b, 0xe6, 0x09, 0x5b, 0xbc, 0x57, 0xc2, 0x16, 0x29, 0xfd, 0x49, 0x4b, - 0x3f, 0xbc, 0xe3, 0x0c, 0x86, 0x3f, 0xd3, 0xc0, 0xb4, 0xeb, 0x11, 0x67, 0x65, 0x67, 0xe3, 0xf6, - 0x57, 0x44, 0x26, 0x4b, 0x57, 0x6d, 0x3d, 0xa6, 0x9d, 0xd7, 0xab, 0xdb, 0x5b, 0x42, 0xe1, 0x8e, - 0xef, 0x7a, 0xb4, 0x72, 0xa6, 0xdd, 0x2a, 0x4e, 0x6f, 0x77, 0x42, 0xa1, 0x34, 0x76, 0xc9, 0x06, - 0x73, 0xeb, 0x87, 0x01, 0xf1, 0x1d, 0x6c, 0xad, 0xb9, 0x46, 0x68, 0x13, 0x27, 0x10, 0x86, 0xa6, - 0x46, 0x39, 0xda, 0x09, 0x47, 0x39, 0x17, 0x41, 0x36, 0xf4, 0x2d, 0x19, 0xc5, 0x13, 0x6a, 0x54, - 0x89, 0x6e, 0x20, 0x46, 0x2f, 0x2d, 0x82, 0x11, 0x66, 0x27, 0x3c, 0x0f, 0xb2, 0x3e, 0x3e, 0xe0, - 0x5a, 0x27, 0x2b, 0x63, 0x4c, 0x04, 0xe1, 0x03, 0xc4, 0x68, 0xa5, 0x3f, 0x5f, 0x00, 0xd3, 0xa9, - 0xbd, 0xc0, 0x79, 0x90, 0x51, 0xf3, 0x4f, 0x20, 0x95, 0x66, 0x36, 0xd6, 0x50, 0xc6, 0xac, 0xc1, - 0x17, 0x55, 0xf1, 0x15, 0xa0, 0x45, 0x55, 0xcf, 0x39, 0x95, 0xb5, 0xb4, 0xb1, 0x3a, 0x66, 0x48, - 0x54, 0x38, 0x99, 0x0d, 0xa4, 0x2e, 0xb3, 0x44, 0xd8, 0x40, 0xea, 0x88, 0xd1, 0x3e, 0xeb, 0x1c, - 0x2b, 0x1a, 0xa4, 0xe5, 0x4e, 0x30, 0x48, 0x1b, 0x7d, 0xe8, 0x20, 0xed, 0x12, 0xc8, 0x05, 0x66, - 0x60, 0x11, 0x7d, 0xac, 0xf3, 0xe5, 0x71, 0x93, 0x11, 0x91, 0xe0, 0xc1, 0x3b, 0x60, 0xac, 0x46, - 0xea, 0x38, 0xb4, 0x02, 0x3d, 0xcf, 0x43, 0x68, 0x75, 0x00, 0x21, 0x24, 0xa6, 0x9c, 0x6b, 0x42, - 0x2f, 0x8a, 0x00, 0xe0, 0xd3, 0x60, 0xcc, 0xc6, 0x87, 0xa6, 0x1d, 0xda, 0xbc, 0x27, 0xd3, 0x84, - 0xd8, 0xa6, 0x20, 0xa1, 0x88, 0xc7, 0x2a, 0x23, 0x39, 0x34, 0xac, 0x90, 0x9a, 0x4d, 0x22, 0x99, - 0x3a, 0xe0, 0xb7, 0xa7, 0xaa, 0x8c, 0xeb, 0x29, 0x3e, 0xea, 0x5a, 0xc1, 0xc1, 0x4c, 0x87, 0x2f, - 0x9e, 0x48, 0x80, 0x09, 0x12, 0x8a, 0x78, 0x9d, 0x60, 0x52, 0x7e, 0xb2, 0x1f, 0x98, 0x5c, 0xdc, - 0xb5, 0x02, 0x7e, 0x19, 0x8c, 0xdb, 0xf8, 0xf0, 0x06, 0x71, 0x1a, 0xc1, 0x9e, 0x3e, 0xb5, 0xa0, - 0x2d, 0x65, 0x2b, 0x53, 0xed, 0x56, 0x71, 0x7c, 0x33, 0x22, 0xa2, 0x98, 0xcf, 0x85, 0x4d, 0x47, - 0x0a, 0x9f, 0x4e, 0x08, 0x47, 0x44, 0x14, 0xf3, 0x59, 0x07, 0xe1, 0xe1, 0x80, 0x25, 0x97, 0x3e, - 0xdd, 0xf9, 0x32, 0xdc, 0x11, 0x64, 0x14, 0xf1, 0xe1, 0x12, 0xc8, 0xdb, 0xf8, 0x90, 0xbf, 0xe2, - 0xf5, 0x19, 0xae, 0x96, 0x4f, 0x7c, 0x37, 0x25, 0x0d, 0x29, 0x2e, 0x97, 0x34, 0x1d, 0x21, 0x39, - 0x9b, 0x90, 0x94, 0x34, 0xa4, 0xb8, 0x2c, 0x88, 0x43, 0xc7, 0xbc, 0x1b, 0x12, 0x21, 0x0c, 0xb9, - 0x67, 0x54, 0x10, 0xdf, 0x8a, 0x59, 0x28, 0x29, 0xc7, 0x5e, 0xd1, 0x76, 0x68, 0x05, 0xa6, 0x67, - 0x91, 0xed, 0xba, 0x7e, 0x86, 0xfb, 0x9f, 0xf7, 0xc9, 0x9b, 0x8a, 0x8a, 0x12, 0x12, 0x90, 0x80, - 0x11, 0xe2, 0x84, 0xb6, 0x7e, 0x96, 0x5f, 0xec, 0x03, 0x09, 0x41, 0x95, 0x39, 0xeb, 0x4e, 0x68, - 0x23, 0xae, 0x1e, 0xbe, 0x08, 0xa6, 0x6c, 0x7c, 0xc8, 0xca, 0x01, 0xf1, 0x03, 0xf6, 0xbe, 0x9f, - 0xe3, 0x9b, 0x9f, 0x65, 0x1d, 0xe7, 0x66, 0x92, 0x81, 0x3a, 0xe5, 0xf8, 0x42, 0xd3, 0x49, 0x2c, - 0x3c, 0x97, 0x58, 0x98, 0x64, 0xa0, 0x4e, 0x39, 0xe6, 0x69, 0x9f, 0xdc, 0x0d, 0x4d, 0x9f, 0xd4, - 0xf4, 0xa7, 0x78, 0x93, 0x2a, 0xa7, 0xf0, 0x82, 0x86, 0x14, 0x17, 0x36, 0xa3, 0x71, 0x8f, 0xce, - 0xd3, 0xf0, 0xd6, 0x60, 0x2b, 0xf9, 0xb6, 0xbf, 0xe2, 0xfb, 0xf8, 0x48, 0xdc, 0x34, 0xc9, 0x41, - 0x0f, 0xa4, 0x20, 0x87, 0x2d, 0x6b, 0xbb, 0xae, 0x9f, 0xe7, 0xbe, 0x1f, 0xf4, 0x0d, 0xa2, 0xaa, - 0xce, 0x0a, 0x03, 0x41, 0x02, 0x8b, 0x81, 0xba, 0x0e, 0x0b, 0x8d, 0xf9, 0xe1, 0x82, 0x6e, 0x33, - 0x10, 0x24, 0xb0, 0xf8, 0x4e, 0x9d, 0xa3, 0xed, 0xba, 0xfe, 0x85, 0x21, 0xef, 0x94, 0x81, 0x20, - 0x81, 0x05, 0x4d, 0x90, 0x75, 0xdc, 0x40, 0xbf, 0x30, 0x94, 0xeb, 0x99, 0x5f, 0x38, 0x5b, 0x6e, - 0x80, 0x18, 0x06, 0xfc, 0xa5, 0x06, 0x80, 0x17, 0x87, 0xe8, 0xc5, 0x81, 0x4c, 0x11, 0x52, 0x90, - 0xe5, 0x38, 0xb6, 0xd7, 0x9d, 0xc0, 0x3f, 0x8a, 0xdf, 0x91, 0x89, 0x1c, 0x48, 0x58, 0x01, 0x7f, - 0xaf, 0x81, 0xb3, 0xc9, 0x36, 0x59, 0x99, 0x57, 0xe0, 0x1e, 0xb9, 0x39, 0xe8, 0x30, 0xaf, 0xb8, - 0xae, 0x55, 0xd1, 0xdb, 0xad, 0xe2, 0xd9, 0x95, 0x1e, 0xa8, 0xa8, 0xa7, 0x2d, 0xf0, 0x2f, 0x1a, - 0x98, 0x95, 0x55, 0x34, 0x61, 0x61, 0x91, 0x3b, 0x90, 0x0c, 0xda, 0x81, 0x69, 0x1c, 0xe1, 0x47, - 0xf5, 0xeb, 0x71, 0x17, 0x1f, 0x75, 0x9b, 0x06, 0xff, 0xae, 0x81, 0xc9, 0x1a, 0xf1, 0x88, 0x53, - 0x23, 0x8e, 0xc1, 0x6c, 0x5d, 0x18, 0xc8, 0xd8, 0x20, 0x6d, 0xeb, 0x5a, 0x02, 0x42, 0x98, 0x59, - 0x96, 0x66, 0x4e, 0x26, 0x59, 0xc7, 0xad, 0xe2, 0xb9, 0x78, 0x69, 0x92, 0x83, 0x3a, 0xac, 0x84, - 0x1f, 0x69, 0x60, 0x3a, 0x3e, 0x00, 0x71, 0xa5, 0x2c, 0x0e, 0x31, 0x0e, 0x78, 0xfb, 0xba, 0xd2, - 0x09, 0x88, 0xd2, 0x16, 0xc0, 0xbf, 0x6a, 0xac, 0x53, 0x8b, 0xde, 0x7d, 0x54, 0x2f, 0x71, 0x5f, - 0xbe, 0x3d, 0x70, 0x5f, 0x2a, 0x04, 0xe1, 0xca, 0xcb, 0x71, 0x2b, 0xa8, 0x38, 0xc7, 0xad, 0xe2, - 0x5c, 0xd2, 0x93, 0x8a, 0x81, 0x92, 0x16, 0xc2, 0x9f, 0x68, 0x60, 0x92, 0xc4, 0x1d, 0x37, 0xd5, - 0x2f, 0x0d, 0xc4, 0x89, 0x3d, 0x9b, 0x78, 0xf1, 0x52, 0x4f, 0xb0, 0x28, 0xea, 0xc0, 0x66, 0x1d, - 0x24, 0x39, 0xc4, 0xb6, 0x67, 0x11, 0xfd, 0x8b, 0x03, 0xee, 0x20, 0xd7, 0x85, 0x5e, 0x14, 0x01, - 0xcc, 0xb3, 0x97, 0x4f, 0x2a, 0x73, 0xe0, 0x0c, 0xc8, 0xee, 0x13, 0xf9, 0x33, 0x2c, 0x62, 0x7f, - 0xc2, 0x1a, 0xc8, 0x35, 0xb1, 0x15, 0x46, 0x8f, 0xb7, 0x01, 0x57, 0x5d, 0x24, 0x94, 0xbf, 0x9c, - 0x79, 0x49, 0x9b, 0xbf, 0xa7, 0x81, 0x73, 0xbd, 0x13, 0xfa, 0x89, 0x9a, 0xf5, 0x1b, 0x0d, 0xcc, - 0x76, 0xe5, 0x6e, 0x0f, 0x8b, 0xee, 0x76, 0x5a, 0xf4, 0xfa, 0xa0, 0x93, 0xb0, 0x1a, 0xf8, 0xa6, - 0xd3, 0xe0, 0x9d, 0x47, 0xd2, 0xbc, 0x9f, 0x6b, 0x60, 0x26, 0x9d, 0x0e, 0x4f, 0xd2, 0x5f, 0xa5, - 0x7b, 0x19, 0x70, 0xae, 0x77, 0xc3, 0x04, 0x7d, 0xf5, 0x32, 0x1c, 0xce, 0x0b, 0xbb, 0xd7, 0x34, - 0xee, 0x03, 0x0d, 0x4c, 0xdc, 0x51, 0x72, 0xd1, 0x0f, 0x80, 0x03, 0x7f, 0xdb, 0x47, 0xf5, 0x27, - 0x66, 0x50, 0x94, 0xc4, 0x2d, 0xfd, 0x4d, 0x03, 0x73, 0x3d, 0x0b, 0x2b, 0x7b, 0x82, 0x62, 0xcb, - 0x72, 0x0f, 0xc4, 0x88, 0x26, 0x31, 0xff, 0x5c, 0xe1, 0x54, 0x24, 0xb9, 0x09, 0xef, 0x65, 0x3e, - 0x2f, 0xef, 0x95, 0xfe, 0xa9, 0x81, 0x0b, 0x0f, 0x8b, 0xc4, 0x27, 0x72, 0xa4, 0x4b, 0x20, 0x2f, - 0x9b, 0xa2, 0x23, 0x7e, 0x9c, 0xf2, 0x1d, 0x20, 0x8b, 0x06, 0xff, 0xcf, 0x14, 0xf1, 0x57, 0xe9, - 0x7d, 0x0d, 0xcc, 0x54, 0x89, 0xdf, 0x34, 0x0d, 0x82, 0x48, 0x9d, 0xf8, 0xc4, 0x31, 0x08, 0x5c, - 0x06, 0xe3, 0xfc, 0x97, 0x37, 0x0f, 0x1b, 0xd1, 0x58, 0x7a, 0x56, 0xba, 0x7c, 0x7c, 0x2b, 0x62, - 0xa0, 0x58, 0x46, 0x8d, 0xb0, 0x33, 0x7d, 0x47, 0xd8, 0x17, 0xc0, 0x88, 0x17, 0x0f, 0xf8, 0xf2, - 0x8c, 0xcb, 0x67, 0x7a, 0x9c, 0x5a, 0xfa, 0x97, 0x06, 0x7a, 0xfd, 0x97, 0x08, 0x6c, 0x82, 0x31, - 0x2a, 0x8c, 0x93, 0xce, 0xdb, 0x7e, 0x4c, 0xe7, 0xa5, 0xb7, 0x2a, 0x0a, 0x7f, 0x44, 0x8d, 0xc0, - 0x98, 0xff, 0x0c, 0x5c, 0x09, 0x9d, 0x9a, 0x1c, 0xc9, 0x4d, 0x0a, 0xff, 0xad, 0xae, 0x08, 0x1a, - 0x52, 0x5c, 0x78, 0x5e, 0x0c, 0x8f, 0x12, 0x13, 0x99, 0x68, 0x70, 0x54, 0xb9, 0x72, 0xff, 0x41, - 0xe1, 0xd4, 0xc7, 0x0f, 0x0a, 0xa7, 0x3e, 0x79, 0x50, 0x38, 0xf5, 0x83, 0x76, 0x41, 0xbb, 0xdf, - 0x2e, 0x68, 0x1f, 0xb7, 0x0b, 0xda, 0x27, 0xed, 0x82, 0xf6, 0xef, 0x76, 0x41, 0xfb, 0xc5, 0xa7, - 0x85, 0x53, 0xdf, 0x19, 0x93, 0xa6, 0xfd, 0x2f, 0x00, 0x00, 0xff, 0xff, 0xe7, 0xc5, 0xe4, 0x3a, - 0xbb, 0x29, 0x00, 0x00, + // 2783 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xcc, 0x5a, 0xdf, 0x6f, 0x23, 0x57, + 0xf5, 0xdf, 0xb1, 0xe3, 0xc4, 0xb9, 0x49, 0x36, 0xc9, 0xdd, 0x66, 0x3b, 0x9b, 0x6f, 0x6a, 0x27, + 0xee, 0xb7, 0x55, 0x28, 0xbb, 0x4e, 0xbb, 0xb4, 0xb4, 0x54, 0xe2, 0x21, 0x76, 0xd2, 0x2a, 0x65, + 0xf3, 0x83, 0xeb, 0xdd, 0xb6, 0xd0, 0x9f, 0x37, 0xe3, 0x6b, 0x67, 0x36, 0xf3, 0x6b, 0xe7, 0xce, + 0x38, 0x89, 0x0a, 0x08, 0xa8, 0x2a, 0x10, 0x02, 0x8a, 0xe8, 0xbe, 0x20, 0xe0, 0x01, 0x10, 0x2f, + 0x3c, 0xc0, 0x03, 0xbc, 0xc1, 0x1f, 0xb0, 0x8f, 0x15, 0x4f, 0x15, 0x42, 0x16, 0xeb, 0xfe, 0x0b, + 0x48, 0x48, 0x79, 0x42, 0xf7, 0xc7, 0xdc, 0x19, 0x8f, 0xed, 0xdd, 0xa8, 0x6b, 0x77, 0x79, 0xcb, + 0x9c, 0x73, 0xee, 0xf9, 0x9c, 0x7b, 0xee, 0x39, 0xe7, 0x9e, 0x7b, 0x1c, 0xd0, 0x38, 0x7c, 0x81, + 0x96, 0x4d, 0x77, 0xed, 0x30, 0xdc, 0x27, 0xbe, 0x43, 0x02, 0x42, 0xd7, 0x5a, 0xc4, 0xa9, 0xbb, + 0xfe, 0x9a, 0x64, 0x60, 0xcf, 0x24, 0xc7, 0x01, 0x71, 0xa8, 0xe9, 0x3a, 0xf4, 0x0a, 0xf6, 0x4c, + 0x4a, 0xfc, 0x16, 0xf1, 0xd7, 0xbc, 0xc3, 0x26, 0xe3, 0xd1, 0x6e, 0x81, 0xb5, 0xd6, 0x33, 0xfb, + 0x24, 0xc0, 0xcf, 0xac, 0x35, 0x89, 0x43, 0x7c, 0x1c, 0x90, 0x7a, 0xd9, 0xf3, 0xdd, 0xc0, 0x85, + 0x5f, 0x15, 0xea, 0xca, 0x5d, 0xd2, 0xef, 0x28, 0x75, 0x65, 0xef, 0xb0, 0xc9, 0x78, 0xb4, 0x5b, + 0xa0, 0x2c, 0xd5, 0x2d, 0x5e, 0x69, 0x9a, 0xc1, 0x41, 0xb8, 0x5f, 0x36, 0x5c, 0x7b, 0xad, 0xe9, + 0x36, 0xdd, 0x35, 0xae, 0x75, 0x3f, 0x6c, 0xf0, 0x2f, 0xfe, 0xc1, 0xff, 0x12, 0x68, 0x8b, 0xcf, + 0xc6, 0xc6, 0xdb, 0xd8, 0x38, 0x30, 0x1d, 0xe2, 0x9f, 0xc4, 0x16, 0xdb, 0x24, 0xc0, 0x6b, 0xad, + 0x1e, 0x1b, 0x17, 0xd7, 0x06, 0xad, 0xf2, 0x43, 0x27, 0x30, 0x6d, 0xd2, 0xb3, 0xe0, 0xcb, 0xf7, + 0x5b, 0x40, 0x8d, 0x03, 0x62, 0xe3, 0xf4, 0xba, 0xd2, 0xa9, 0x06, 0xe6, 0xab, 0xae, 0xd3, 0x22, + 0x3e, 0xdb, 0x25, 0x22, 0xb7, 0x42, 0x42, 0x03, 0x58, 0x01, 0xd9, 0xd0, 0xac, 0xeb, 0xda, 0xb2, + 0xb6, 0x3a, 0x59, 0x79, 0xfa, 0x4e, 0xbb, 0x78, 0xae, 0xd3, 0x2e, 0x66, 0x6f, 0x6c, 0x6d, 0x9c, + 0xb6, 0x8b, 0x2b, 0x83, 0x90, 0x82, 0x13, 0x8f, 0xd0, 0xf2, 0x8d, 0xad, 0x0d, 0xc4, 0x16, 0xc3, + 0x97, 0xc1, 0x7c, 0x9d, 0x50, 0xd3, 0x27, 0xf5, 0xf5, 0xbd, 0xad, 0x57, 0x85, 0x7e, 0x3d, 0xc3, + 0x35, 0x5e, 0x92, 0x1a, 0xe7, 0x37, 0xd2, 0x02, 0xa8, 0x77, 0x0d, 0x7c, 0x1d, 0x4c, 0xb8, 0xfb, + 0x37, 0x89, 0x11, 0x50, 0x3d, 0xbb, 0x9c, 0x5d, 0x9d, 0xba, 0x7a, 0xa5, 0x1c, 0x9f, 0xa0, 0x32, + 0x81, 0x1f, 0x9b, 0xdc, 0x6c, 0x19, 0xe1, 0xa3, 0xcd, 0xe8, 0xe4, 0x2a, 0xb3, 0x12, 0x6d, 0x62, + 0x57, 0x68, 0x41, 0x91, 0xba, 0xd2, 0xef, 0x32, 0x00, 0x26, 0x37, 0x4f, 0x3d, 0xd7, 0xa1, 0x64, + 0x28, 0xbb, 0xa7, 0x60, 0xce, 0xe0, 0x9a, 0x03, 0x52, 0x97, 0xb8, 0x7a, 0xe6, 0xb3, 0x58, 0xaf, + 0x4b, 0xfc, 0xb9, 0x6a, 0x4a, 0x1d, 0xea, 0x01, 0x80, 0xd7, 0xc1, 0xb8, 0x4f, 0x68, 0x68, 0x05, + 0x7a, 0x76, 0x59, 0x5b, 0x9d, 0xba, 0x7a, 0x79, 0x20, 0x14, 0x8f, 0x6f, 0x16, 0x7c, 0xe5, 0xd6, + 0x33, 0xe5, 0x5a, 0x80, 0x83, 0x90, 0x56, 0xce, 0x4b, 0xa4, 0x71, 0xc4, 0x75, 0x20, 0xa9, 0xab, + 0xf4, 0xc3, 0x0c, 0x98, 0x4b, 0x7a, 0xa9, 0x65, 0x92, 0x23, 0x78, 0x04, 0x26, 0x7c, 0x11, 0x2c, + 0xdc, 0x4f, 0x53, 0x57, 0xf7, 0xca, 0x0f, 0x94, 0x56, 0xe5, 0x9e, 0x20, 0xac, 0x4c, 0xb1, 0x33, + 0x93, 0x1f, 0x28, 0x42, 0x83, 0xef, 0x81, 0xbc, 0x2f, 0x0f, 0x8a, 0x47, 0xd3, 0xd4, 0xd5, 0xaf, + 0x0f, 0x11, 0x59, 0x28, 0xae, 0x4c, 0x77, 0xda, 0xc5, 0x7c, 0xf4, 0x85, 0x14, 0x60, 0xe9, 0xa3, + 0x0c, 0x28, 0x54, 0x43, 0x1a, 0xb8, 0x36, 0x22, 0xd4, 0x0d, 0x7d, 0x83, 0x54, 0x5d, 0x2b, 0xb4, + 0x9d, 0x0d, 0xd2, 0x30, 0x1d, 0x33, 0x60, 0xd1, 0xba, 0x0c, 0xc6, 0x1c, 0x6c, 0x13, 0x19, 0x3d, + 0xd3, 0xd2, 0xa7, 0x63, 0x3b, 0xd8, 0x26, 0x88, 0x73, 0x98, 0x04, 0x0b, 0x16, 0x99, 0x0b, 0x4a, + 0xe2, 0xfa, 0x89, 0x47, 0x10, 0xe7, 0xc0, 0x27, 0xc1, 0x78, 0xc3, 0xf5, 0x6d, 0x2c, 0xce, 0x71, + 0x32, 0x3e, 0x99, 0x97, 0x38, 0x15, 0x49, 0x2e, 0x7c, 0x0e, 0x4c, 0xd5, 0x09, 0x35, 0x7c, 0xd3, + 0x63, 0xd0, 0xfa, 0x18, 0x17, 0xbe, 0x20, 0x85, 0xa7, 0x36, 0x62, 0x16, 0x4a, 0xca, 0xc1, 0xcb, + 0x20, 0xef, 0xf9, 0xa6, 0xeb, 0x9b, 0xc1, 0x89, 0x9e, 0x5b, 0xd6, 0x56, 0x73, 0x95, 0x39, 0xb9, + 0x26, 0xbf, 0x27, 0xe9, 0x48, 0x49, 0xc0, 0x65, 0x90, 0x7f, 0xa5, 0xb6, 0xbb, 0xb3, 0x87, 0x83, + 0x03, 0x7d, 0x9c, 0x23, 0x8c, 0x31, 0x69, 0x94, 0xbf, 0x29, 0xa9, 0xa5, 0x7f, 0x66, 0x80, 0x9e, + 0xf6, 0x4a, 0xe4, 0x52, 0xf8, 0x12, 0xc8, 0xd3, 0x80, 0x55, 0x9c, 0xe6, 0x89, 0xf4, 0xc9, 0x53, + 0x11, 0x58, 0x4d, 0xd2, 0x4f, 0xdb, 0xc5, 0x8b, 0xf1, 0x8a, 0x88, 0xca, 0xfd, 0xa1, 0xd6, 0xc2, + 0x5f, 0x6b, 0xe0, 0xc2, 0x11, 0xd9, 0x3f, 0x70, 0xdd, 0xc3, 0xaa, 0x65, 0x12, 0x27, 0xa8, 0xba, + 0x4e, 0xc3, 0x6c, 0xca, 0x18, 0x40, 0x0f, 0x18, 0x03, 0xaf, 0xf5, 0x6a, 0xae, 0x3c, 0xda, 0x69, + 0x17, 0x2f, 0xf4, 0x61, 0xa0, 0x7e, 0x76, 0xc0, 0xd7, 0x81, 0x6e, 0xa4, 0x92, 0x44, 0x16, 0x30, + 0x51, 0xb6, 0x26, 0x2b, 0x4b, 0x9d, 0x76, 0x51, 0xaf, 0x0e, 0x90, 0x41, 0x03, 0x57, 0x97, 0xde, + 0xcf, 0xa6, 0xdd, 0x9b, 0x08, 0xb7, 0x77, 0x41, 0x9e, 0xa5, 0x71, 0x1d, 0x07, 0x58, 0x26, 0xe2, + 0xd3, 0x67, 0x4b, 0x7a, 0x51, 0x33, 0xb6, 0x49, 0x80, 0x2b, 0x50, 0x1e, 0x08, 0x88, 0x69, 0x48, + 0x69, 0x85, 0xdf, 0x06, 0x63, 0xd4, 0x23, 0x86, 0x74, 0xf4, 0x1b, 0x0f, 0x9a, 0x6c, 0x03, 0x36, + 0x52, 0xf3, 0x88, 0x11, 0xe7, 0x02, 0xfb, 0x42, 0x1c, 0x16, 0x7e, 0xa0, 0x81, 0x71, 0xca, 0x0b, + 0x94, 0x2c, 0x6a, 0x6f, 0x8d, 0xca, 0x82, 0x54, 0x15, 0x14, 0xdf, 0x48, 0x82, 0x97, 0xfe, 0x9d, + 0x01, 0x2b, 0x83, 0x96, 0x56, 0x5d, 0xa7, 0x2e, 0x8e, 0x63, 0x4b, 0xe6, 0xb6, 0x88, 0xf4, 0xe7, + 0x92, 0xb9, 0x7d, 0xda, 0x2e, 0x3e, 0x71, 0x5f, 0x05, 0x89, 0x22, 0xf0, 0x15, 0xb5, 0x6f, 0x51, + 0x28, 0x56, 0xba, 0x0d, 0x3b, 0x6d, 0x17, 0x67, 0xd5, 0xb2, 0x6e, 0x5b, 0x61, 0x0b, 0x40, 0x0b, + 0xd3, 0xe0, 0xba, 0x8f, 0x1d, 0x2a, 0xd4, 0x9a, 0x36, 0x91, 0xee, 0x7b, 0xea, 0x6c, 0xe1, 0xc1, + 0x56, 0x54, 0x16, 0x25, 0x24, 0xbc, 0xd6, 0xa3, 0x0d, 0xf5, 0x41, 0x60, 0x75, 0xcb, 0x27, 0x98, + 0xaa, 0x52, 0x94, 0xb8, 0x51, 0x18, 0x15, 0x49, 0x2e, 0xfc, 0x02, 0x98, 0xb0, 0x09, 0xa5, 0xb8, + 0x49, 0x78, 0xfd, 0x99, 0x8c, 0xaf, 0xe8, 0x6d, 0x41, 0x46, 0x11, 0x9f, 0xf5, 0x27, 0x4b, 0x83, + 0xbc, 0x76, 0xcd, 0xa4, 0x01, 0x7c, 0xb3, 0x27, 0x01, 0xca, 0x67, 0xdb, 0x21, 0x5b, 0xcd, 0xc3, + 0x5f, 0x15, 0xbf, 0x88, 0x92, 0x08, 0xfe, 0x6f, 0x81, 0x9c, 0x19, 0x10, 0x3b, 0xba, 0xbb, 0x5f, + 0x1b, 0x51, 0xec, 0x55, 0x66, 0xa4, 0x0d, 0xb9, 0x2d, 0x86, 0x86, 0x04, 0x68, 0xe9, 0xf7, 0x19, + 0xf0, 0xd8, 0xa0, 0x25, 0xec, 0x42, 0xa1, 0xcc, 0xe3, 0x9e, 0x15, 0xfa, 0xd8, 0x92, 0x11, 0xa7, + 0x3c, 0xbe, 0xc7, 0xa9, 0x48, 0x72, 0x59, 0xc9, 0xa7, 0xa6, 0xd3, 0x0c, 0x2d, 0xec, 0xcb, 0x70, + 0x52, 0xbb, 0xae, 0x49, 0x3a, 0x52, 0x12, 0xb0, 0x0c, 0x00, 0x3d, 0x70, 0xfd, 0x80, 0x63, 0xc8, + 0xea, 0x75, 0x9e, 0x15, 0x88, 0x9a, 0xa2, 0xa2, 0x84, 0x04, 0xbb, 0xd1, 0x0e, 0x4d, 0xa7, 0x2e, + 0x4f, 0x5d, 0x65, 0xf1, 0xd7, 0x4c, 0xa7, 0x8e, 0x38, 0x87, 0xe1, 0x5b, 0x26, 0x0d, 0x18, 0x45, + 0x1e, 0x79, 0x97, 0xd7, 0xb9, 0xa4, 0x92, 0x60, 0xf8, 0x06, 0xab, 0xfa, 0xae, 0x6f, 0x12, 0xaa, + 0x8f, 0xc7, 0xf8, 0x55, 0x45, 0x45, 0x09, 0x89, 0xd2, 0x2f, 0xf3, 0x83, 0x83, 0x84, 0x95, 0x12, + 0xf8, 0x38, 0xc8, 0x35, 0x7d, 0x37, 0xf4, 0xa4, 0x97, 0x94, 0xb7, 0x5f, 0x66, 0x44, 0x24, 0x78, + 0x2c, 0x2a, 0x5b, 0x5d, 0x6d, 0xaa, 0x8a, 0xca, 0xa8, 0x39, 0x8d, 0xf8, 0xf0, 0x7b, 0x1a, 0xc8, + 0x39, 0xd2, 0x39, 0x2c, 0xe4, 0xde, 0x1c, 0x51, 0x5c, 0x70, 0xf7, 0xc6, 0xe6, 0x0a, 0xcf, 0x0b, + 0x64, 0xf8, 0x2c, 0xc8, 0x51, 0xc3, 0xf5, 0x88, 0xf4, 0x7a, 0x21, 0x12, 0xaa, 0x31, 0xe2, 0x69, + 0xbb, 0x38, 0x13, 0xa9, 0xe3, 0x04, 0x24, 0x84, 0xe1, 0x0f, 0x34, 0x00, 0x5a, 0xd8, 0x32, 0xeb, + 0x98, 0xb7, 0x0c, 0x39, 0x6e, 0xfe, 0x70, 0xc3, 0xfa, 0x55, 0xa5, 0x5e, 0x1c, 0x5a, 0xfc, 0x8d, + 0x12, 0xd0, 0xf0, 0x43, 0x0d, 0x4c, 0xd3, 0x70, 0xdf, 0x97, 0xab, 0x28, 0x6f, 0x2e, 0xa6, 0xae, + 0x7e, 0x63, 0xa8, 0xb6, 0xd4, 0x12, 0x00, 0x95, 0xb9, 0x4e, 0xbb, 0x38, 0x9d, 0xa4, 0xa0, 0x2e, + 0x03, 0xe0, 0x8f, 0x35, 0x90, 0x6f, 0x45, 0x77, 0xf6, 0x04, 0x4f, 0xf8, 0xb7, 0x47, 0x74, 0xb0, + 0x32, 0xa2, 0xe2, 0x2c, 0x50, 0x7d, 0x80, 0xb2, 0x00, 0xfe, 0x55, 0x03, 0x3a, 0xae, 0x8b, 0x02, + 0x8f, 0xad, 0x3d, 0xdf, 0x74, 0x02, 0xe2, 0x8b, 0x7e, 0x93, 0xea, 0x79, 0x6e, 0xde, 0x70, 0xef, + 0xc2, 0x74, 0x2f, 0x5b, 0x59, 0x96, 0xd6, 0xe9, 0xeb, 0x03, 0xcc, 0x40, 0x03, 0x0d, 0xe4, 0x81, + 0x16, 0xb7, 0x34, 0xfa, 0xe4, 0x08, 0x02, 0x2d, 0xee, 0xa5, 0x64, 0x75, 0x88, 0x3b, 0xa8, 0x04, + 0x74, 0xe9, 0xc3, 0x6c, 0xba, 0x69, 0x4f, 0x5f, 0xfa, 0xf0, 0xb6, 0x30, 0x56, 0x6c, 0x85, 0xea, + 0x1a, 0x77, 0xee, 0xbb, 0x23, 0x3a, 0x7b, 0x75, 0x6b, 0xc7, 0x8d, 0x97, 0x22, 0x51, 0x94, 0xb0, + 0x03, 0xfe, 0x42, 0x03, 0x33, 0xd8, 0x30, 0x88, 0x17, 0x90, 0xba, 0xa8, 0xc5, 0x99, 0xcf, 0xa1, + 0xdc, 0x2c, 0x48, 0xab, 0x66, 0xd6, 0x93, 0xd0, 0xa8, 0xdb, 0x12, 0xf8, 0x22, 0x38, 0x4f, 0x03, + 0xd7, 0x27, 0xf5, 0x54, 0x97, 0x0b, 0x3b, 0xed, 0xe2, 0xf9, 0x5a, 0x17, 0x07, 0xa5, 0x24, 0x4b, + 0x9f, 0x8e, 0x81, 0xe2, 0x7d, 0x32, 0xe3, 0x0c, 0xef, 0xa8, 0x27, 0xc1, 0x38, 0xdf, 0x6e, 0x9d, + 0x7b, 0x25, 0x9f, 0xe8, 0xdc, 0x38, 0x15, 0x49, 0x2e, 0xab, 0xeb, 0x0c, 0x9f, 0x75, 0x1b, 0x59, + 0x2e, 0xa8, 0xea, 0x7a, 0x4d, 0x90, 0x51, 0xc4, 0x87, 0xef, 0x81, 0x71, 0x31, 0x27, 0xe1, 0x45, + 0x75, 0x84, 0x85, 0x11, 0x70, 0x3b, 0x39, 0x14, 0x92, 0x90, 0xbd, 0x05, 0x31, 0xf7, 0xb0, 0x0b, + 0xe2, 0x3d, 0x2b, 0xd0, 0xf8, 0xff, 0x78, 0x05, 0x2a, 0xfd, 0x47, 0x4b, 0xe7, 0x7d, 0x62, 0xab, + 0x35, 0x03, 0x5b, 0x04, 0x6e, 0x80, 0x39, 0xf6, 0xc8, 0x40, 0xc4, 0xb3, 0x4c, 0x03, 0x53, 0xfe, + 0xc6, 0x15, 0x01, 0xa7, 0xc6, 0x2e, 0xb5, 0x14, 0x1f, 0xf5, 0xac, 0x80, 0xaf, 0x00, 0x28, 0x1a, + 0xef, 0x2e, 0x3d, 0xa2, 0x87, 0x50, 0x2d, 0x74, 0xad, 0x47, 0x02, 0xf5, 0x59, 0x05, 0xab, 0x60, + 0xde, 0xc2, 0xfb, 0xc4, 0xaa, 0x11, 0x8b, 0x18, 0x81, 0xeb, 0x73, 0x55, 0x62, 0x0a, 0xb0, 0xd0, + 0x69, 0x17, 0xe7, 0xaf, 0xa5, 0x99, 0xa8, 0x57, 0xbe, 0xb4, 0x92, 0x4e, 0xaf, 0xe4, 0xc6, 0xc5, + 0x73, 0xe6, 0x37, 0x19, 0xb0, 0x38, 0x38, 0x32, 0xe0, 0xf7, 0xe3, 0x57, 0x97, 0x68, 0xaa, 0xdf, + 0x1e, 0x55, 0x14, 0xca, 0x67, 0x17, 0xe8, 0x7d, 0x72, 0xc1, 0xef, 0xb0, 0x0e, 0x07, 0x5b, 0xd1, + 0x9c, 0xe7, 0xad, 0x91, 0x99, 0xc0, 0x40, 0x2a, 0x93, 0xa2, 0x79, 0xc2, 0x16, 0xef, 0x95, 0xb0, + 0x45, 0x4a, 0x7f, 0xd0, 0xd2, 0x0f, 0xef, 0x38, 0x83, 0xe1, 0x4f, 0x34, 0x30, 0xeb, 0x7a, 0xc4, + 0x59, 0xdf, 0xdb, 0x7a, 0xf5, 0x4b, 0x22, 0x93, 0xa5, 0xab, 0x76, 0x1e, 0xd0, 0xce, 0x57, 0x6a, + 0xbb, 0x3b, 0x42, 0xe1, 0x9e, 0xef, 0x7a, 0xb4, 0x72, 0xa1, 0xd3, 0x2e, 0xce, 0xee, 0x76, 0x43, + 0xa1, 0x34, 0x76, 0xc9, 0x06, 0x0b, 0x9b, 0xc7, 0x01, 0xf1, 0x1d, 0x6c, 0x6d, 0xb8, 0x46, 0x68, + 0x13, 0x27, 0x10, 0x86, 0xa6, 0x86, 0x44, 0xda, 0x19, 0x87, 0x44, 0x8f, 0x81, 0x6c, 0xe8, 0x5b, + 0x32, 0x8a, 0xa7, 0xd4, 0x10, 0x14, 0x5d, 0x43, 0x8c, 0x5e, 0x5a, 0x01, 0x63, 0xcc, 0x4e, 0x78, + 0x09, 0x64, 0x7d, 0x7c, 0xc4, 0xb5, 0x4e, 0x57, 0x26, 0x98, 0x08, 0xc2, 0x47, 0x88, 0xd1, 0x4a, + 0xff, 0x58, 0x02, 0xb3, 0xa9, 0xbd, 0xc0, 0x45, 0x90, 0x51, 0x93, 0x55, 0x20, 0x95, 0x66, 0xb6, + 0x36, 0x50, 0xc6, 0xac, 0xc3, 0xe7, 0x55, 0xf1, 0x15, 0xa0, 0x45, 0x55, 0xcf, 0x39, 0x95, 0xb5, + 0xb4, 0xb1, 0x3a, 0x66, 0x48, 0x54, 0x38, 0x99, 0x0d, 0xa4, 0x21, 0xb3, 0x44, 0xd8, 0x40, 0x1a, + 0x88, 0xd1, 0x3e, 0xeb, 0x84, 0x2c, 0x1a, 0xd1, 0xe5, 0xce, 0x30, 0xa2, 0x1b, 0xbf, 0xe7, 0x88, + 0xee, 0x71, 0x90, 0x0b, 0xcc, 0xc0, 0x22, 0xfa, 0x44, 0xf7, 0xcb, 0xe3, 0x3a, 0x23, 0x22, 0xc1, + 0x83, 0x37, 0xc1, 0x44, 0x9d, 0x34, 0x70, 0x68, 0x05, 0x7a, 0x9e, 0x87, 0x50, 0x75, 0x08, 0x21, + 0x24, 0xe6, 0xa7, 0x1b, 0x42, 0x2f, 0x8a, 0x00, 0xe0, 0x13, 0x60, 0xc2, 0xc6, 0xc7, 0xa6, 0x1d, + 0xda, 0xbc, 0x27, 0xd3, 0x84, 0xd8, 0xb6, 0x20, 0xa1, 0x88, 0xc7, 0x2a, 0x23, 0x39, 0x36, 0xac, + 0x90, 0x9a, 0x2d, 0x22, 0x99, 0x3a, 0xe0, 0xb7, 0xa7, 0xaa, 0x8c, 0x9b, 0x29, 0x3e, 0xea, 0x59, + 0xc1, 0xc1, 0x4c, 0x87, 0x2f, 0x9e, 0x4a, 0x80, 0x09, 0x12, 0x8a, 0x78, 0xdd, 0x60, 0x52, 0x7e, + 0x7a, 0x10, 0x98, 0x5c, 0xdc, 0xb3, 0x02, 0x7e, 0x11, 0x4c, 0xda, 0xf8, 0xf8, 0x1a, 0x71, 0x9a, + 0xc1, 0x81, 0x3e, 0xb3, 0xac, 0xad, 0x66, 0x2b, 0x33, 0x9d, 0x76, 0x71, 0x72, 0x3b, 0x22, 0xa2, + 0x98, 0xcf, 0x85, 0x4d, 0x47, 0x0a, 0x9f, 0x4f, 0x08, 0x47, 0x44, 0x14, 0xf3, 0x59, 0x07, 0xe1, + 0xe1, 0x80, 0x25, 0x97, 0x3e, 0xdb, 0xfd, 0x32, 0xdc, 0x13, 0x64, 0x14, 0xf1, 0xe1, 0x2a, 0xc8, + 0xdb, 0xf8, 0x98, 0xbf, 0xe2, 0xf5, 0x39, 0xae, 0x96, 0xcf, 0x92, 0xb7, 0x25, 0x0d, 0x29, 0x2e, + 0x97, 0x34, 0x1d, 0x21, 0x39, 0x9f, 0x90, 0x94, 0x34, 0xa4, 0xb8, 0x2c, 0x88, 0x43, 0xc7, 0xbc, + 0x15, 0x12, 0x21, 0x0c, 0xb9, 0x67, 0x54, 0x10, 0xdf, 0x88, 0x59, 0x28, 0x29, 0xc7, 0x5e, 0xd1, + 0x76, 0x68, 0x05, 0xa6, 0x67, 0x91, 0xdd, 0x86, 0x7e, 0x81, 0xfb, 0x9f, 0xf7, 0xc9, 0xdb, 0x8a, + 0x8a, 0x12, 0x12, 0x90, 0x80, 0x31, 0xe2, 0x84, 0xb6, 0xfe, 0x08, 0xbf, 0xd8, 0x87, 0x12, 0x82, + 0x2a, 0x73, 0x36, 0x9d, 0xd0, 0x46, 0x5c, 0x3d, 0x7c, 0x1e, 0xcc, 0xd8, 0xf8, 0x98, 0x95, 0x03, + 0xe2, 0x07, 0xec, 0x7d, 0xbf, 0xc0, 0x37, 0x3f, 0xcf, 0x3a, 0xce, 0xed, 0x24, 0x03, 0x75, 0xcb, + 0xf1, 0x85, 0xa6, 0x93, 0x58, 0x78, 0x31, 0xb1, 0x30, 0xc9, 0x40, 0xdd, 0x72, 0xcc, 0xd3, 0x3e, + 0xb9, 0x15, 0x9a, 0x3e, 0xa9, 0xeb, 0x8f, 0xf2, 0x26, 0x55, 0xce, 0xf7, 0x05, 0x0d, 0x29, 0x2e, + 0x6c, 0x45, 0xe3, 0x1e, 0x9d, 0xa7, 0xe1, 0x8d, 0xe1, 0x56, 0xf2, 0x5d, 0x7f, 0xdd, 0xf7, 0xf1, + 0x89, 0xb8, 0x69, 0x92, 0x83, 0x1e, 0x48, 0x41, 0x0e, 0x5b, 0xd6, 0x6e, 0x43, 0xbf, 0xc4, 0x7d, + 0x3f, 0xec, 0x1b, 0x44, 0x55, 0x9d, 0x75, 0x06, 0x82, 0x04, 0x16, 0x03, 0x75, 0x1d, 0x16, 0x1a, + 0x8b, 0xa3, 0x05, 0xdd, 0x65, 0x20, 0x48, 0x60, 0xf1, 0x9d, 0x3a, 0x27, 0xbb, 0x0d, 0xfd, 0xff, + 0x46, 0xbc, 0x53, 0x06, 0x82, 0x04, 0x16, 0x34, 0x41, 0xd6, 0x71, 0x03, 0x7d, 0x69, 0x24, 0xd7, + 0x33, 0xbf, 0x70, 0x76, 0xdc, 0x00, 0x31, 0x0c, 0xf8, 0x73, 0x0d, 0x00, 0x2f, 0x0e, 0xd1, 0xc7, + 0x86, 0x32, 0x45, 0x48, 0x41, 0x96, 0xe3, 0xd8, 0xde, 0x74, 0x02, 0xff, 0x24, 0x7e, 0x47, 0x26, + 0x72, 0x20, 0x61, 0x05, 0xfc, 0xad, 0x06, 0x1e, 0x49, 0xb6, 0xc9, 0xca, 0xbc, 0x02, 0xf7, 0xc8, + 0xf5, 0x61, 0x87, 0x79, 0xc5, 0x75, 0xad, 0x8a, 0xde, 0x69, 0x17, 0x1f, 0x59, 0xef, 0x83, 0x8a, + 0xfa, 0xda, 0x02, 0xff, 0xa8, 0x81, 0x79, 0x59, 0x45, 0x13, 0x16, 0x16, 0xb9, 0x03, 0xc9, 0xb0, + 0x1d, 0x98, 0xc6, 0x11, 0x7e, 0x54, 0xbf, 0x4b, 0xf7, 0xf0, 0x51, 0xaf, 0x69, 0xf0, 0x2f, 0x1a, + 0x98, 0xae, 0x13, 0x8f, 0x38, 0x75, 0xe2, 0x18, 0xcc, 0xd6, 0xe5, 0xa1, 0x8c, 0x0d, 0xd2, 0xb6, + 0x6e, 0x24, 0x20, 0x84, 0x99, 0x65, 0x69, 0xe6, 0x74, 0x92, 0x75, 0xda, 0x2e, 0x5e, 0x8c, 0x97, + 0x26, 0x39, 0xa8, 0xcb, 0x4a, 0xf8, 0x91, 0x06, 0x66, 0xe3, 0x03, 0x10, 0x57, 0xca, 0xca, 0x08, + 0xe3, 0x80, 0xb7, 0xaf, 0xeb, 0xdd, 0x80, 0x28, 0x6d, 0x01, 0xfc, 0x93, 0xc6, 0x3a, 0xb5, 0xe8, + 0xdd, 0x47, 0xf5, 0x12, 0xf7, 0xe5, 0x3b, 0x43, 0xf7, 0xa5, 0x42, 0x10, 0xae, 0xbc, 0x1c, 0xb7, + 0x82, 0x8a, 0x73, 0xda, 0x2e, 0x2e, 0x24, 0x3d, 0xa9, 0x18, 0x28, 0x69, 0x21, 0xfc, 0x91, 0x06, + 0xa6, 0x49, 0xdc, 0x71, 0x53, 0xfd, 0xf1, 0xa1, 0x38, 0xb1, 0x6f, 0x13, 0x2f, 0x5e, 0xea, 0x09, + 0x16, 0x45, 0x5d, 0xd8, 0xac, 0x83, 0x24, 0xc7, 0xd8, 0xf6, 0x2c, 0xa2, 0xff, 0xff, 0x90, 0x3b, + 0xc8, 0x4d, 0xa1, 0x17, 0x45, 0x00, 0xf0, 0x32, 0xc8, 0x3b, 0xa1, 0x65, 0xe1, 0x7d, 0x8b, 0xe8, + 0x4f, 0xf0, 0x5e, 0x44, 0x4d, 0x31, 0x77, 0x24, 0x1d, 0x29, 0x89, 0x45, 0xf6, 0x4e, 0x4a, 0xe5, + 0x19, 0x9c, 0x03, 0xd9, 0x43, 0x22, 0x7f, 0x0e, 0x46, 0xec, 0x4f, 0x58, 0x07, 0xb9, 0x16, 0xb6, + 0xc2, 0xe8, 0xa9, 0x37, 0xe4, 0x1a, 0x8d, 0x84, 0xf2, 0x17, 0x33, 0x2f, 0x68, 0x8b, 0xb7, 0x35, + 0x70, 0xb1, 0x7f, 0xfa, 0x3f, 0x54, 0xb3, 0x7e, 0xa5, 0x81, 0xf9, 0x9e, 0x4c, 0xef, 0x63, 0xd1, + 0xad, 0x6e, 0x8b, 0xde, 0x18, 0x76, 0xca, 0xd6, 0x02, 0xdf, 0x74, 0x9a, 0xbc, 0x4f, 0x49, 0x9a, + 0xf7, 0x53, 0x0d, 0xcc, 0xa5, 0x93, 0xe7, 0x61, 0xfa, 0xab, 0x74, 0x3b, 0x03, 0x2e, 0xf6, 0x6f, + 0xaf, 0xa0, 0xaf, 0xde, 0x91, 0xa3, 0x79, 0x8f, 0xf7, 0x9b, 0xdd, 0x7d, 0xa0, 0x81, 0xa9, 0x9b, + 0x4a, 0x2e, 0xfa, 0xb9, 0x70, 0xe8, 0x93, 0x80, 0xa8, 0x5a, 0xc5, 0x0c, 0x8a, 0x92, 0xb8, 0xa5, + 0x3f, 0x6b, 0x60, 0xa1, 0x6f, 0x19, 0x66, 0x0f, 0x56, 0x6c, 0x59, 0xee, 0x91, 0x18, 0xe8, 0x24, + 0xa6, 0xa5, 0xeb, 0x9c, 0x8a, 0x24, 0x37, 0xe1, 0xbd, 0xcc, 0xe7, 0xe5, 0xbd, 0xd2, 0xdf, 0x34, + 0xb0, 0x74, 0xaf, 0x48, 0x7c, 0x28, 0x47, 0xba, 0x0a, 0xf2, 0xb2, 0x85, 0x3a, 0xe1, 0xc7, 0x29, + 0x5f, 0x0d, 0xb2, 0x68, 0xf0, 0xff, 0x90, 0x11, 0x7f, 0x95, 0xde, 0xd7, 0xc0, 0x5c, 0x8d, 0xf8, + 0x2d, 0xd3, 0x20, 0x88, 0x34, 0x88, 0x4f, 0x1c, 0x83, 0xc0, 0x35, 0x30, 0xc9, 0x7f, 0xa7, 0xf3, + 0xb0, 0x11, 0x0d, 0xb1, 0xe7, 0xa5, 0xcb, 0x27, 0x77, 0x22, 0x06, 0x8a, 0x65, 0xd4, 0xc0, 0x3b, + 0x33, 0x70, 0xe0, 0xbd, 0x04, 0xc6, 0xbc, 0x78, 0x1c, 0x98, 0x67, 0x5c, 0x3e, 0x01, 0xe4, 0xd4, + 0xd2, 0xdf, 0x35, 0xd0, 0xef, 0xbf, 0x55, 0x60, 0x0b, 0x4c, 0x50, 0x61, 0x9c, 0x74, 0xde, 0xee, + 0x03, 0x3a, 0x2f, 0xbd, 0x55, 0x71, 0x4d, 0x44, 0xd4, 0x08, 0x8c, 0xf9, 0xcf, 0xc0, 0x95, 0xd0, + 0xa9, 0xcb, 0x01, 0xde, 0xb4, 0xf0, 0x5f, 0x75, 0x5d, 0xd0, 0x90, 0xe2, 0xc2, 0x4b, 0x62, 0xd4, + 0x94, 0x98, 0xdf, 0x44, 0x63, 0xa6, 0xca, 0x95, 0x3b, 0x77, 0x0b, 0xe7, 0x3e, 0xbe, 0x5b, 0x38, + 0xf7, 0xc9, 0xdd, 0xc2, 0xb9, 0xef, 0x76, 0x0a, 0xda, 0x9d, 0x4e, 0x41, 0xfb, 0xb8, 0x53, 0xd0, + 0x3e, 0xe9, 0x14, 0xb4, 0x7f, 0x75, 0x0a, 0xda, 0xcf, 0x3e, 0x2d, 0x9c, 0xfb, 0xe6, 0x84, 0x34, + 0xed, 0xbf, 0x01, 0x00, 0x00, 0xff, 0xff, 0x16, 0xda, 0x75, 0x14, 0x43, 0x2a, 0x00, 0x00, } diff --git a/vendor/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1/generated.proto b/vendor/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1/generated.proto index 8ee83cf567..7e18cb9e31 100644 --- a/vendor/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1/generated.proto +++ b/vendor/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1/generated.proto @@ -113,6 +113,16 @@ message CustomResourceConversion { // alpha-level and is only honored by servers that enable the CustomResourceWebhookConversion feature. // +optional optional WebhookClientConfig webhookClientConfig = 2; + + // ConversionReviewVersions is an ordered list of preferred `ConversionReview` + // versions the Webhook expects. API server will try to use first version in + // the list which it supports. If none of the versions specified in this list + // supported by API server, conversion will fail for this object. + // If a persisted Webhook configuration specifies allowed versions and does not + // include any versions known to the API Server, calls to the webhook will fail. + // Default to `['v1beta1']`. + // +optional + repeated string conversionReviewVersions = 3; } // CustomResourceDefinition represents a resource that should be exposed on the API server. Its name MUST be in the format @@ -431,6 +441,8 @@ message JSONSchemaProps { optional ExternalDocumentation externalDocs = 35; optional JSON example = 36; + + optional bool nullable = 37; } // JSONSchemaPropsOrArray represents a value that can either be a JSONSchemaProps diff --git a/vendor/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1/types.go b/vendor/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1/types.go index e99d9e49b5..973ac0eedd 100644 --- a/vendor/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1/types.go +++ b/vendor/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1/types.go @@ -91,6 +91,16 @@ type CustomResourceConversion struct { // alpha-level and is only honored by servers that enable the CustomResourceWebhookConversion feature. // +optional WebhookClientConfig *WebhookClientConfig `json:"webhookClientConfig,omitempty" protobuf:"bytes,2,name=webhookClientConfig"` + + // ConversionReviewVersions is an ordered list of preferred `ConversionReview` + // versions the Webhook expects. API server will try to use first version in + // the list which it supports. If none of the versions specified in this list + // supported by API server, conversion will fail for this object. + // If a persisted Webhook configuration specifies allowed versions and does not + // include any versions known to the API Server, calls to the webhook will fail. + // Default to `['v1beta1']`. + // +optional + ConversionReviewVersions []string `json:"conversionReviewVersions,omitempty" protobuf:"bytes,3,rep,name=conversionReviewVersions"` } // WebhookClientConfig contains the information to make a TLS diff --git a/vendor/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1/types_jsonschema.go b/vendor/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1/types_jsonschema.go index 9776731cf9..54c0a4ae13 100644 --- a/vendor/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1/types_jsonschema.go +++ b/vendor/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1/types_jsonschema.go @@ -54,6 +54,7 @@ type JSONSchemaProps struct { Definitions JSONSchemaDefinitions `json:"definitions,omitempty" protobuf:"bytes,34,opt,name=definitions"` ExternalDocs *ExternalDocumentation `json:"externalDocs,omitempty" protobuf:"bytes,35,opt,name=externalDocs"` Example *JSON `json:"example,omitempty" protobuf:"bytes,36,opt,name=example"` + Nullable bool `json:"nullable,omitempty" protobuf:"bytes,37,opt,name=nullable"` } // JSON represents any valid JSON value. @@ -68,7 +69,7 @@ type JSON struct { // See: https://github.com/kubernetes/kube-openapi/tree/master/pkg/generators func (_ JSON) OpenAPISchemaType() []string { // TODO: return actual types when anyOf is supported - return []string{} + return nil } // OpenAPISchemaFormat is used by the kube-openapi generator when constructing @@ -91,7 +92,7 @@ type JSONSchemaPropsOrArray struct { // See: https://github.com/kubernetes/kube-openapi/tree/master/pkg/generators func (_ JSONSchemaPropsOrArray) OpenAPISchemaType() []string { // TODO: return actual types when anyOf is supported - return []string{} + return nil } // OpenAPISchemaFormat is used by the kube-openapi generator when constructing @@ -111,7 +112,7 @@ type JSONSchemaPropsOrBool struct { // See: https://github.com/kubernetes/kube-openapi/tree/master/pkg/generators func (_ JSONSchemaPropsOrBool) OpenAPISchemaType() []string { // TODO: return actual types when anyOf is supported - return []string{} + return nil } // OpenAPISchemaFormat is used by the kube-openapi generator when constructing @@ -133,7 +134,7 @@ type JSONSchemaPropsOrStringArray struct { // See: https://github.com/kubernetes/kube-openapi/tree/master/pkg/generators func (_ JSONSchemaPropsOrStringArray) OpenAPISchemaType() []string { // TODO: return actual types when anyOf is supported - return []string{} + return nil } // OpenAPISchemaFormat is used by the kube-openapi generator when constructing diff --git a/vendor/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1/zz_generated.conversion.go b/vendor/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1/zz_generated.conversion.go index cab8019b45..e7ae745b62 100644 --- a/vendor/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1/zz_generated.conversion.go +++ b/vendor/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1/zz_generated.conversion.go @@ -296,6 +296,7 @@ func Convert_apiextensions_CustomResourceColumnDefinition_To_v1beta1_CustomResou func autoConvert_v1beta1_CustomResourceConversion_To_apiextensions_CustomResourceConversion(in *CustomResourceConversion, out *apiextensions.CustomResourceConversion, s conversion.Scope) error { out.Strategy = apiextensions.ConversionStrategyType(in.Strategy) out.WebhookClientConfig = (*apiextensions.WebhookClientConfig)(unsafe.Pointer(in.WebhookClientConfig)) + out.ConversionReviewVersions = *(*[]string)(unsafe.Pointer(&in.ConversionReviewVersions)) return nil } @@ -307,6 +308,7 @@ func Convert_v1beta1_CustomResourceConversion_To_apiextensions_CustomResourceCon func autoConvert_apiextensions_CustomResourceConversion_To_v1beta1_CustomResourceConversion(in *apiextensions.CustomResourceConversion, out *CustomResourceConversion, s conversion.Scope) error { out.Strategy = ConversionStrategyType(in.Strategy) out.WebhookClientConfig = (*WebhookClientConfig)(unsafe.Pointer(in.WebhookClientConfig)) + out.ConversionReviewVersions = *(*[]string)(unsafe.Pointer(&in.ConversionReviewVersions)) return nil } @@ -902,6 +904,7 @@ func autoConvert_v1beta1_JSONSchemaProps_To_apiextensions_JSONSchemaProps(in *JS } else { out.Example = nil } + out.Nullable = in.Nullable return nil } @@ -916,6 +919,7 @@ func autoConvert_apiextensions_JSONSchemaProps_To_v1beta1_JSONSchemaProps(in *ap out.Ref = (*string)(unsafe.Pointer(in.Ref)) out.Description = in.Description out.Type = in.Type + out.Nullable = in.Nullable out.Format = in.Format out.Title = in.Title if in.Default != nil { diff --git a/vendor/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1/zz_generated.deepcopy.go b/vendor/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1/zz_generated.deepcopy.go index 8dd7a87bfc..20ded01bf3 100644 --- a/vendor/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1/zz_generated.deepcopy.go +++ b/vendor/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1/zz_generated.deepcopy.go @@ -130,6 +130,11 @@ func (in *CustomResourceConversion) DeepCopyInto(out *CustomResourceConversion) *out = new(WebhookClientConfig) (*in).DeepCopyInto(*out) } + if in.ConversionReviewVersions != nil { + in, out := &in.ConversionReviewVersions, &out.ConversionReviewVersions + *out = make([]string, len(*in)) + copy(*out, *in) + } return } diff --git a/vendor/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/zz_generated.deepcopy.go b/vendor/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/zz_generated.deepcopy.go index 5a01307539..667e556ac9 100644 --- a/vendor/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/zz_generated.deepcopy.go +++ b/vendor/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/zz_generated.deepcopy.go @@ -48,6 +48,11 @@ func (in *CustomResourceConversion) DeepCopyInto(out *CustomResourceConversion) *out = new(WebhookClientConfig) (*in).DeepCopyInto(*out) } + if in.ConversionReviewVersions != nil { + in, out := &in.ConversionReviewVersions, &out.ConversionReviewVersions + *out = make([]string, len(*in)) + copy(*out, *in) + } return } diff --git a/vendor/sigs.k8s.io/cluster-api/pkg/client/clientset_generated/clientset/clientset.go b/vendor/k8s.io/apiextensions-apiserver/pkg/client/clientset/clientset/clientset.go similarity index 71% rename from vendor/sigs.k8s.io/cluster-api/pkg/client/clientset_generated/clientset/clientset.go rename to vendor/k8s.io/apiextensions-apiserver/pkg/client/clientset/clientset/clientset.go index 4c43c131a1..aa2dbcbb08 100644 --- a/vendor/sigs.k8s.io/cluster-api/pkg/client/clientset_generated/clientset/clientset.go +++ b/vendor/k8s.io/apiextensions-apiserver/pkg/client/clientset/clientset/clientset.go @@ -19,35 +19,27 @@ limitations under the License. package clientset import ( + apiextensionsv1beta1 "k8s.io/apiextensions-apiserver/pkg/client/clientset/clientset/typed/apiextensions/v1beta1" discovery "k8s.io/client-go/discovery" rest "k8s.io/client-go/rest" flowcontrol "k8s.io/client-go/util/flowcontrol" - clusterv1alpha1 "sigs.k8s.io/cluster-api/pkg/client/clientset_generated/clientset/typed/cluster/v1alpha1" ) type Interface interface { Discovery() discovery.DiscoveryInterface - ClusterV1alpha1() clusterv1alpha1.ClusterV1alpha1Interface - // Deprecated: please explicitly pick a version if possible. - Cluster() clusterv1alpha1.ClusterV1alpha1Interface + ApiextensionsV1beta1() apiextensionsv1beta1.ApiextensionsV1beta1Interface } // Clientset contains the clients for groups. Each group has exactly one // version included in a Clientset. type Clientset struct { *discovery.DiscoveryClient - clusterV1alpha1 *clusterv1alpha1.ClusterV1alpha1Client + apiextensionsV1beta1 *apiextensionsv1beta1.ApiextensionsV1beta1Client } -// ClusterV1alpha1 retrieves the ClusterV1alpha1Client -func (c *Clientset) ClusterV1alpha1() clusterv1alpha1.ClusterV1alpha1Interface { - return c.clusterV1alpha1 -} - -// Deprecated: Cluster retrieves the default version of ClusterClient. -// Please explicitly pick a version. -func (c *Clientset) Cluster() clusterv1alpha1.ClusterV1alpha1Interface { - return c.clusterV1alpha1 +// ApiextensionsV1beta1 retrieves the ApiextensionsV1beta1Client +func (c *Clientset) ApiextensionsV1beta1() apiextensionsv1beta1.ApiextensionsV1beta1Interface { + return c.apiextensionsV1beta1 } // Discovery retrieves the DiscoveryClient @@ -66,7 +58,7 @@ func NewForConfig(c *rest.Config) (*Clientset, error) { } var cs Clientset var err error - cs.clusterV1alpha1, err = clusterv1alpha1.NewForConfig(&configShallowCopy) + cs.apiextensionsV1beta1, err = apiextensionsv1beta1.NewForConfig(&configShallowCopy) if err != nil { return nil, err } @@ -82,7 +74,7 @@ func NewForConfig(c *rest.Config) (*Clientset, error) { // panics if there is an error in the config. func NewForConfigOrDie(c *rest.Config) *Clientset { var cs Clientset - cs.clusterV1alpha1 = clusterv1alpha1.NewForConfigOrDie(c) + cs.apiextensionsV1beta1 = apiextensionsv1beta1.NewForConfigOrDie(c) cs.DiscoveryClient = discovery.NewDiscoveryClientForConfigOrDie(c) return &cs @@ -91,7 +83,7 @@ func NewForConfigOrDie(c *rest.Config) *Clientset { // New creates a new Clientset for the given RESTClient. func New(c rest.Interface) *Clientset { var cs Clientset - cs.clusterV1alpha1 = clusterv1alpha1.New(c) + cs.apiextensionsV1beta1 = apiextensionsv1beta1.New(c) cs.DiscoveryClient = discovery.NewDiscoveryClient(c) return &cs diff --git a/vendor/sigs.k8s.io/cluster-api/pkg/client/clientset_generated/clientset/doc.go b/vendor/k8s.io/apiextensions-apiserver/pkg/client/clientset/clientset/doc.go similarity index 100% rename from vendor/sigs.k8s.io/cluster-api/pkg/client/clientset_generated/clientset/doc.go rename to vendor/k8s.io/apiextensions-apiserver/pkg/client/clientset/clientset/doc.go diff --git a/vendor/sigs.k8s.io/cluster-api/pkg/client/clientset_generated/clientset/scheme/doc.go b/vendor/k8s.io/apiextensions-apiserver/pkg/client/clientset/clientset/scheme/doc.go similarity index 100% rename from vendor/sigs.k8s.io/cluster-api/pkg/client/clientset_generated/clientset/scheme/doc.go rename to vendor/k8s.io/apiextensions-apiserver/pkg/client/clientset/clientset/scheme/doc.go diff --git a/vendor/sigs.k8s.io/cluster-api/pkg/client/clientset_generated/clientset/scheme/register.go b/vendor/k8s.io/apiextensions-apiserver/pkg/client/clientset/clientset/scheme/register.go similarity index 93% rename from vendor/sigs.k8s.io/cluster-api/pkg/client/clientset_generated/clientset/scheme/register.go rename to vendor/k8s.io/apiextensions-apiserver/pkg/client/clientset/clientset/scheme/register.go index f9b985262c..4232ce5ce1 100644 --- a/vendor/sigs.k8s.io/cluster-api/pkg/client/clientset_generated/clientset/scheme/register.go +++ b/vendor/k8s.io/apiextensions-apiserver/pkg/client/clientset/clientset/scheme/register.go @@ -19,19 +19,19 @@ limitations under the License. package scheme import ( + apiextensionsv1beta1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" runtime "k8s.io/apimachinery/pkg/runtime" schema "k8s.io/apimachinery/pkg/runtime/schema" serializer "k8s.io/apimachinery/pkg/runtime/serializer" utilruntime "k8s.io/apimachinery/pkg/util/runtime" - clusterv1alpha1 "sigs.k8s.io/cluster-api/pkg/apis/cluster/v1alpha1" ) var Scheme = runtime.NewScheme() var Codecs = serializer.NewCodecFactory(Scheme) var ParameterCodec = runtime.NewParameterCodec(Scheme) var localSchemeBuilder = runtime.SchemeBuilder{ - clusterv1alpha1.AddToScheme, + apiextensionsv1beta1.AddToScheme, } // AddToScheme adds all types of this clientset into the given scheme. This allows composition diff --git a/vendor/k8s.io/apiextensions-apiserver/pkg/client/clientset/clientset/typed/apiextensions/v1beta1/apiextensions_client.go b/vendor/k8s.io/apiextensions-apiserver/pkg/client/clientset/clientset/typed/apiextensions/v1beta1/apiextensions_client.go new file mode 100644 index 0000000000..a1fd337f98 --- /dev/null +++ b/vendor/k8s.io/apiextensions-apiserver/pkg/client/clientset/clientset/typed/apiextensions/v1beta1/apiextensions_client.go @@ -0,0 +1,90 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +package v1beta1 + +import ( + v1beta1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1" + "k8s.io/apiextensions-apiserver/pkg/client/clientset/clientset/scheme" + serializer "k8s.io/apimachinery/pkg/runtime/serializer" + rest "k8s.io/client-go/rest" +) + +type ApiextensionsV1beta1Interface interface { + RESTClient() rest.Interface + CustomResourceDefinitionsGetter +} + +// ApiextensionsV1beta1Client is used to interact with features provided by the apiextensions.k8s.io group. +type ApiextensionsV1beta1Client struct { + restClient rest.Interface +} + +func (c *ApiextensionsV1beta1Client) CustomResourceDefinitions() CustomResourceDefinitionInterface { + return newCustomResourceDefinitions(c) +} + +// NewForConfig creates a new ApiextensionsV1beta1Client for the given config. +func NewForConfig(c *rest.Config) (*ApiextensionsV1beta1Client, error) { + config := *c + if err := setConfigDefaults(&config); err != nil { + return nil, err + } + client, err := rest.RESTClientFor(&config) + if err != nil { + return nil, err + } + return &ApiextensionsV1beta1Client{client}, nil +} + +// NewForConfigOrDie creates a new ApiextensionsV1beta1Client for the given config and +// panics if there is an error in the config. +func NewForConfigOrDie(c *rest.Config) *ApiextensionsV1beta1Client { + client, err := NewForConfig(c) + if err != nil { + panic(err) + } + return client +} + +// New creates a new ApiextensionsV1beta1Client for the given RESTClient. +func New(c rest.Interface) *ApiextensionsV1beta1Client { + return &ApiextensionsV1beta1Client{c} +} + +func setConfigDefaults(config *rest.Config) error { + gv := v1beta1.SchemeGroupVersion + config.GroupVersion = &gv + config.APIPath = "/apis" + config.NegotiatedSerializer = serializer.DirectCodecFactory{CodecFactory: scheme.Codecs} + + if config.UserAgent == "" { + config.UserAgent = rest.DefaultKubernetesUserAgent() + } + + return nil +} + +// RESTClient returns a RESTClient that is used to communicate +// with API server by this client implementation. +func (c *ApiextensionsV1beta1Client) RESTClient() rest.Interface { + if c == nil { + return nil + } + return c.restClient +} diff --git a/vendor/k8s.io/apiextensions-apiserver/pkg/client/clientset/clientset/typed/apiextensions/v1beta1/customresourcedefinition.go b/vendor/k8s.io/apiextensions-apiserver/pkg/client/clientset/clientset/typed/apiextensions/v1beta1/customresourcedefinition.go new file mode 100644 index 0000000000..c925313a7c --- /dev/null +++ b/vendor/k8s.io/apiextensions-apiserver/pkg/client/clientset/clientset/typed/apiextensions/v1beta1/customresourcedefinition.go @@ -0,0 +1,180 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +package v1beta1 + +import ( + "time" + + v1beta1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1" + scheme "k8s.io/apiextensions-apiserver/pkg/client/clientset/clientset/scheme" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + types "k8s.io/apimachinery/pkg/types" + watch "k8s.io/apimachinery/pkg/watch" + rest "k8s.io/client-go/rest" +) + +// CustomResourceDefinitionsGetter has a method to return a CustomResourceDefinitionInterface. +// A group's client should implement this interface. +type CustomResourceDefinitionsGetter interface { + CustomResourceDefinitions() CustomResourceDefinitionInterface +} + +// CustomResourceDefinitionInterface has methods to work with CustomResourceDefinition resources. +type CustomResourceDefinitionInterface interface { + Create(*v1beta1.CustomResourceDefinition) (*v1beta1.CustomResourceDefinition, error) + Update(*v1beta1.CustomResourceDefinition) (*v1beta1.CustomResourceDefinition, error) + UpdateStatus(*v1beta1.CustomResourceDefinition) (*v1beta1.CustomResourceDefinition, error) + Delete(name string, options *v1.DeleteOptions) error + DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error + Get(name string, options v1.GetOptions) (*v1beta1.CustomResourceDefinition, error) + List(opts v1.ListOptions) (*v1beta1.CustomResourceDefinitionList, error) + Watch(opts v1.ListOptions) (watch.Interface, error) + Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1beta1.CustomResourceDefinition, err error) + CustomResourceDefinitionExpansion +} + +// customResourceDefinitions implements CustomResourceDefinitionInterface +type customResourceDefinitions struct { + client rest.Interface +} + +// newCustomResourceDefinitions returns a CustomResourceDefinitions +func newCustomResourceDefinitions(c *ApiextensionsV1beta1Client) *customResourceDefinitions { + return &customResourceDefinitions{ + client: c.RESTClient(), + } +} + +// Get takes name of the customResourceDefinition, and returns the corresponding customResourceDefinition object, and an error if there is any. +func (c *customResourceDefinitions) Get(name string, options v1.GetOptions) (result *v1beta1.CustomResourceDefinition, err error) { + result = &v1beta1.CustomResourceDefinition{} + err = c.client.Get(). + Resource("customresourcedefinitions"). + Name(name). + VersionedParams(&options, scheme.ParameterCodec). + Do(). + Into(result) + return +} + +// List takes label and field selectors, and returns the list of CustomResourceDefinitions that match those selectors. +func (c *customResourceDefinitions) List(opts v1.ListOptions) (result *v1beta1.CustomResourceDefinitionList, err error) { + var timeout time.Duration + if opts.TimeoutSeconds != nil { + timeout = time.Duration(*opts.TimeoutSeconds) * time.Second + } + result = &v1beta1.CustomResourceDefinitionList{} + err = c.client.Get(). + Resource("customresourcedefinitions"). + VersionedParams(&opts, scheme.ParameterCodec). + Timeout(timeout). + Do(). + Into(result) + return +} + +// Watch returns a watch.Interface that watches the requested customResourceDefinitions. +func (c *customResourceDefinitions) Watch(opts v1.ListOptions) (watch.Interface, error) { + var timeout time.Duration + if opts.TimeoutSeconds != nil { + timeout = time.Duration(*opts.TimeoutSeconds) * time.Second + } + opts.Watch = true + return c.client.Get(). + Resource("customresourcedefinitions"). + VersionedParams(&opts, scheme.ParameterCodec). + Timeout(timeout). + Watch() +} + +// Create takes the representation of a customResourceDefinition and creates it. Returns the server's representation of the customResourceDefinition, and an error, if there is any. +func (c *customResourceDefinitions) Create(customResourceDefinition *v1beta1.CustomResourceDefinition) (result *v1beta1.CustomResourceDefinition, err error) { + result = &v1beta1.CustomResourceDefinition{} + err = c.client.Post(). + Resource("customresourcedefinitions"). + Body(customResourceDefinition). + Do(). + Into(result) + return +} + +// Update takes the representation of a customResourceDefinition and updates it. Returns the server's representation of the customResourceDefinition, and an error, if there is any. +func (c *customResourceDefinitions) Update(customResourceDefinition *v1beta1.CustomResourceDefinition) (result *v1beta1.CustomResourceDefinition, err error) { + result = &v1beta1.CustomResourceDefinition{} + err = c.client.Put(). + Resource("customresourcedefinitions"). + Name(customResourceDefinition.Name). + Body(customResourceDefinition). + Do(). + Into(result) + return +} + +// UpdateStatus was generated because the type contains a Status member. +// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). + +func (c *customResourceDefinitions) UpdateStatus(customResourceDefinition *v1beta1.CustomResourceDefinition) (result *v1beta1.CustomResourceDefinition, err error) { + result = &v1beta1.CustomResourceDefinition{} + err = c.client.Put(). + Resource("customresourcedefinitions"). + Name(customResourceDefinition.Name). + SubResource("status"). + Body(customResourceDefinition). + Do(). + Into(result) + return +} + +// Delete takes name of the customResourceDefinition and deletes it. Returns an error if one occurs. +func (c *customResourceDefinitions) Delete(name string, options *v1.DeleteOptions) error { + return c.client.Delete(). + Resource("customresourcedefinitions"). + Name(name). + Body(options). + Do(). + Error() +} + +// DeleteCollection deletes a collection of objects. +func (c *customResourceDefinitions) DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error { + var timeout time.Duration + if listOptions.TimeoutSeconds != nil { + timeout = time.Duration(*listOptions.TimeoutSeconds) * time.Second + } + return c.client.Delete(). + Resource("customresourcedefinitions"). + VersionedParams(&listOptions, scheme.ParameterCodec). + Timeout(timeout). + Body(options). + Do(). + Error() +} + +// Patch applies the patch and returns the patched customResourceDefinition. +func (c *customResourceDefinitions) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1beta1.CustomResourceDefinition, err error) { + result = &v1beta1.CustomResourceDefinition{} + err = c.client.Patch(pt). + Resource("customresourcedefinitions"). + SubResource(subresources...). + Name(name). + Body(data). + Do(). + Into(result) + return +} diff --git a/vendor/sigs.k8s.io/cluster-api/pkg/client/clientset_generated/clientset/typed/cluster/v1alpha1/doc.go b/vendor/k8s.io/apiextensions-apiserver/pkg/client/clientset/clientset/typed/apiextensions/v1beta1/doc.go similarity index 97% rename from vendor/sigs.k8s.io/cluster-api/pkg/client/clientset_generated/clientset/typed/cluster/v1alpha1/doc.go rename to vendor/k8s.io/apiextensions-apiserver/pkg/client/clientset/clientset/typed/apiextensions/v1beta1/doc.go index df51baa4d4..771101956f 100644 --- a/vendor/sigs.k8s.io/cluster-api/pkg/client/clientset_generated/clientset/typed/cluster/v1alpha1/doc.go +++ b/vendor/k8s.io/apiextensions-apiserver/pkg/client/clientset/clientset/typed/apiextensions/v1beta1/doc.go @@ -17,4 +17,4 @@ limitations under the License. // Code generated by client-gen. DO NOT EDIT. // This package has the automatically generated typed clients. -package v1alpha1 +package v1beta1 diff --git a/vendor/sigs.k8s.io/cluster-api/pkg/client/clientset_generated/clientset/typed/cluster/v1alpha1/fake/doc.go b/vendor/k8s.io/apiextensions-apiserver/pkg/client/clientset/clientset/typed/apiextensions/v1beta1/generated_expansion.go similarity index 89% rename from vendor/sigs.k8s.io/cluster-api/pkg/client/clientset_generated/clientset/typed/cluster/v1alpha1/fake/doc.go rename to vendor/k8s.io/apiextensions-apiserver/pkg/client/clientset/clientset/typed/apiextensions/v1beta1/generated_expansion.go index 16f4439906..2a989d4bea 100644 --- a/vendor/sigs.k8s.io/cluster-api/pkg/client/clientset_generated/clientset/typed/cluster/v1alpha1/fake/doc.go +++ b/vendor/k8s.io/apiextensions-apiserver/pkg/client/clientset/clientset/typed/apiextensions/v1beta1/generated_expansion.go @@ -16,5 +16,6 @@ limitations under the License. // Code generated by client-gen. DO NOT EDIT. -// Package fake has the automatically generated clients. -package fake +package v1beta1 + +type CustomResourceDefinitionExpansion interface{} diff --git a/vendor/k8s.io/apimachinery/pkg/api/equality/semantic.go b/vendor/k8s.io/apimachinery/pkg/api/equality/semantic.go new file mode 100644 index 0000000000..f02fa8e434 --- /dev/null +++ b/vendor/k8s.io/apimachinery/pkg/api/equality/semantic.go @@ -0,0 +1,49 @@ +/* +Copyright 2014 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package equality + +import ( + "k8s.io/apimachinery/pkg/api/resource" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/conversion" + "k8s.io/apimachinery/pkg/fields" + "k8s.io/apimachinery/pkg/labels" +) + +// Semantic can do semantic deep equality checks for api objects. +// Example: apiequality.Semantic.DeepEqual(aPod, aPodWithNonNilButEmptyMaps) == true +var Semantic = conversion.EqualitiesOrDie( + func(a, b resource.Quantity) bool { + // Ignore formatting, only care that numeric value stayed the same. + // TODO: if we decide it's important, it should be safe to start comparing the format. + // + // Uninitialized quantities are equivalent to 0 quantities. + return a.Cmp(b) == 0 + }, + func(a, b metav1.MicroTime) bool { + return a.UTC() == b.UTC() + }, + func(a, b metav1.Time) bool { + return a.UTC() == b.UTC() + }, + func(a, b labels.Selector) bool { + return a.String() == b.String() + }, + func(a, b fields.Selector) bool { + return a.String() == b.String() + }, +) diff --git a/vendor/k8s.io/apimachinery/pkg/api/errors/OWNERS b/vendor/k8s.io/apimachinery/pkg/api/errors/OWNERS index dc6a4c7242..63434030ca 100644 --- a/vendor/k8s.io/apimachinery/pkg/api/errors/OWNERS +++ b/vendor/k8s.io/apimachinery/pkg/api/errors/OWNERS @@ -1,3 +1,5 @@ +# See the OWNERS docs at https://go.k8s.io/owners + reviewers: - thockin - lavalamp diff --git a/vendor/k8s.io/apimachinery/pkg/api/errors/errors.go b/vendor/k8s.io/apimachinery/pkg/api/errors/errors.go index 48c1104d99..afd97f7adc 100644 --- a/vendor/k8s.io/apimachinery/pkg/api/errors/errors.go +++ b/vendor/k8s.io/apimachinery/pkg/api/errors/errors.go @@ -184,6 +184,20 @@ func NewConflict(qualifiedResource schema.GroupResource, name string, err error) }} } +// NewApplyConflict returns an error including details on the requests apply conflicts +func NewApplyConflict(causes []metav1.StatusCause, message string) *StatusError { + return &StatusError{ErrStatus: metav1.Status{ + Status: metav1.StatusFailure, + Code: http.StatusConflict, + Reason: metav1.StatusReasonConflict, + Details: &metav1.StatusDetails{ + // TODO: Get obj details here? + Causes: causes, + }, + Message: message, + }} +} + // NewGone returns an error indicating the item no longer available at the server and no forwarding address is known. func NewGone(message string) *StatusError { return &StatusError{metav1.Status{ diff --git a/vendor/k8s.io/apimachinery/pkg/api/meta/OWNERS b/vendor/k8s.io/apimachinery/pkg/api/meta/OWNERS index 5f729ffe63..dd2c0cb614 100644 --- a/vendor/k8s.io/apimachinery/pkg/api/meta/OWNERS +++ b/vendor/k8s.io/apimachinery/pkg/api/meta/OWNERS @@ -1,3 +1,5 @@ +# See the OWNERS docs at https://go.k8s.io/owners + reviewers: - thockin - smarterclayton diff --git a/vendor/k8s.io/apimachinery/pkg/api/meta/help.go b/vendor/k8s.io/apimachinery/pkg/api/meta/help.go index c70b3d2b6c..3425055f6e 100644 --- a/vendor/k8s.io/apimachinery/pkg/api/meta/help.go +++ b/vendor/k8s.io/apimachinery/pkg/api/meta/help.go @@ -158,6 +158,19 @@ func ExtractList(obj runtime.Object) ([]runtime.Object, error) { // objectSliceType is the type of a slice of Objects var objectSliceType = reflect.TypeOf([]runtime.Object{}) +// LenList returns the length of this list or 0 if it is not a list. +func LenList(list runtime.Object) int { + itemsPtr, err := GetItemsPtr(list) + if err != nil { + return 0 + } + items, err := conversion.EnforcePtr(itemsPtr) + if err != nil { + return 0 + } + return items.Len() +} + // SetList sets the given list object's Items member have the elements given in // objects. // Returns an error if list is not a List type (does not have an Items member), diff --git a/vendor/k8s.io/apimachinery/pkg/api/meta/meta.go b/vendor/k8s.io/apimachinery/pkg/api/meta/meta.go index 6fe7458f6c..b50337e13f 100644 --- a/vendor/k8s.io/apimachinery/pkg/api/meta/meta.go +++ b/vendor/k8s.io/apimachinery/pkg/api/meta/meta.go @@ -20,14 +20,13 @@ import ( "fmt" "reflect" - "k8s.io/klog" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" metav1beta1 "k8s.io/apimachinery/pkg/apis/meta/v1beta1" "k8s.io/apimachinery/pkg/conversion" "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/runtime/schema" "k8s.io/apimachinery/pkg/types" + "k8s.io/klog" ) // errNotList is returned when an object implements the Object style interfaces but not the List style @@ -138,6 +137,7 @@ func AsPartialObjectMetadata(m metav1.Object) *metav1beta1.PartialObjectMetadata Finalizers: m.GetFinalizers(), ClusterName: m.GetClusterName(), Initializers: m.GetInitializers(), + ManagedFields: m.GetManagedFields(), }, } } diff --git a/vendor/k8s.io/apimachinery/pkg/api/resource/OWNERS b/vendor/k8s.io/apimachinery/pkg/api/resource/OWNERS index c430067f35..8454be55ed 100644 --- a/vendor/k8s.io/apimachinery/pkg/api/resource/OWNERS +++ b/vendor/k8s.io/apimachinery/pkg/api/resource/OWNERS @@ -1,3 +1,5 @@ +# See the OWNERS docs at https://go.k8s.io/owners + reviewers: - thockin - lavalamp diff --git a/vendor/k8s.io/apimachinery/pkg/api/resource/quantity.go b/vendor/k8s.io/apimachinery/pkg/api/resource/quantity.go index b155a62a45..54fda58064 100644 --- a/vendor/k8s.io/apimachinery/pkg/api/resource/quantity.go +++ b/vendor/k8s.io/apimachinery/pkg/api/resource/quantity.go @@ -680,7 +680,7 @@ func NewScaledQuantity(value int64, scale Scale) *Quantity { } } -// Value returns the value of q; any fractional part will be lost. +// Value returns the unscaled value of q rounded up to the nearest integer away from 0. func (q *Quantity) Value() int64 { return q.ScaledValue(0) } diff --git a/vendor/k8s.io/apimachinery/pkg/apis/config/OWNERS b/vendor/k8s.io/apimachinery/pkg/apis/config/OWNERS deleted file mode 100644 index 2f7b10df4b..0000000000 --- a/vendor/k8s.io/apimachinery/pkg/apis/config/OWNERS +++ /dev/null @@ -1,7 +0,0 @@ -approvers: -- api-approvers -- sttts -- luxas -reviewers: -- api-reviewers -- hanxiaoshuai diff --git a/vendor/k8s.io/apimachinery/pkg/apis/config/types.go b/vendor/k8s.io/apimachinery/pkg/apis/config/types.go deleted file mode 100644 index b32fc8a281..0000000000 --- a/vendor/k8s.io/apimachinery/pkg/apis/config/types.go +++ /dev/null @@ -1,33 +0,0 @@ -/* -Copyright 2018 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package config - -// ClientConnectionConfiguration contains details for constructing a client. -type ClientConnectionConfiguration struct { - // kubeconfig is the path to a KubeConfig file. - Kubeconfig string - // acceptContentTypes defines the Accept header sent by clients when connecting to a server, overriding the - // default value of 'application/json'. This field will control all connections to the server used by a particular - // client. - AcceptContentTypes string - // contentType is the content type used when sending data to the server from this client. - ContentType string - // qps controls the number of queries per second allowed for this connection. - QPS float32 - // burst allows extra queries to accumulate when a client is exceeding its rate. - Burst int32 -} diff --git a/vendor/k8s.io/apimachinery/pkg/apis/meta/internalversion/conversion.go b/vendor/k8s.io/apimachinery/pkg/apis/meta/internalversion/conversion.go index 673e56212a..bdb71340ab 100644 --- a/vendor/k8s.io/apimachinery/pkg/apis/meta/internalversion/conversion.go +++ b/vendor/k8s.io/apimachinery/pkg/apis/meta/internalversion/conversion.go @@ -28,7 +28,6 @@ func Convert_internalversion_ListOptions_To_v1_ListOptions(in *ListOptions, out if err := metav1.Convert_labels_Selector_To_string(&in.LabelSelector, &out.LabelSelector, s); err != nil { return err } - out.IncludeUninitialized = in.IncludeUninitialized out.ResourceVersion = in.ResourceVersion out.TimeoutSeconds = in.TimeoutSeconds out.Watch = in.Watch @@ -44,7 +43,6 @@ func Convert_v1_ListOptions_To_internalversion_ListOptions(in *metav1.ListOption if err := metav1.Convert_string_To_labels_Selector(&in.LabelSelector, &out.LabelSelector, s); err != nil { return err } - out.IncludeUninitialized = in.IncludeUninitialized out.ResourceVersion = in.ResourceVersion out.TimeoutSeconds = in.TimeoutSeconds out.Watch = in.Watch diff --git a/vendor/k8s.io/apimachinery/pkg/apis/meta/internalversion/doc.go b/vendor/k8s.io/apimachinery/pkg/apis/meta/internalversion/doc.go index 1e85c5c43d..2741ee2c80 100644 --- a/vendor/k8s.io/apimachinery/pkg/apis/meta/internalversion/doc.go +++ b/vendor/k8s.io/apimachinery/pkg/apis/meta/internalversion/doc.go @@ -17,4 +17,4 @@ limitations under the License. // +k8s:deepcopy-gen=package // +k8s:conversion-gen=k8s.io/apimachinery/pkg/apis/meta/v1 -package internalversion +package internalversion // import "k8s.io/apimachinery/pkg/apis/meta/internalversion" diff --git a/vendor/k8s.io/apimachinery/pkg/apis/meta/internalversion/types.go b/vendor/k8s.io/apimachinery/pkg/apis/meta/internalversion/types.go index 2f6740d368..e434e5055a 100644 --- a/vendor/k8s.io/apimachinery/pkg/apis/meta/internalversion/types.go +++ b/vendor/k8s.io/apimachinery/pkg/apis/meta/internalversion/types.go @@ -33,9 +33,6 @@ type ListOptions struct { LabelSelector labels.Selector // A selector based on fields FieldSelector fields.Selector - // If true, partially initialized resources are included in the response. - // +optional - IncludeUninitialized bool // If true, watch for changes to this list Watch bool // When specified with a watch call, shows changes that occur after that particular version of a resource. diff --git a/vendor/k8s.io/apimachinery/pkg/apis/meta/internalversion/zz_generated.conversion.go b/vendor/k8s.io/apimachinery/pkg/apis/meta/internalversion/zz_generated.conversion.go index 18d190b244..79b7567360 100644 --- a/vendor/k8s.io/apimachinery/pkg/apis/meta/internalversion/zz_generated.conversion.go +++ b/vendor/k8s.io/apimachinery/pkg/apis/meta/internalversion/zz_generated.conversion.go @@ -117,7 +117,6 @@ func autoConvert_internalversion_ListOptions_To_v1_ListOptions(in *ListOptions, if err := v1.Convert_fields_Selector_To_string(&in.FieldSelector, &out.FieldSelector, s); err != nil { return err } - out.IncludeUninitialized = in.IncludeUninitialized out.Watch = in.Watch out.ResourceVersion = in.ResourceVersion out.TimeoutSeconds = (*int64)(unsafe.Pointer(in.TimeoutSeconds)) @@ -133,7 +132,6 @@ func autoConvert_v1_ListOptions_To_internalversion_ListOptions(in *v1.ListOption if err := v1.Convert_string_To_fields_Selector(&in.FieldSelector, &out.FieldSelector, s); err != nil { return err } - out.IncludeUninitialized = in.IncludeUninitialized out.Watch = in.Watch out.ResourceVersion = in.ResourceVersion out.TimeoutSeconds = (*int64)(unsafe.Pointer(in.TimeoutSeconds)) diff --git a/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/OWNERS b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/OWNERS index cdb125a0dd..44929b1c00 100644 --- a/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/OWNERS +++ b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/OWNERS @@ -1,3 +1,5 @@ +# See the OWNERS docs at https://go.k8s.io/owners + reviewers: - thockin - smarterclayton diff --git a/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/duration.go b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/duration.go index 2eaabf0794..babe8a8b53 100644 --- a/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/duration.go +++ b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/duration.go @@ -48,3 +48,13 @@ func (d *Duration) UnmarshalJSON(b []byte) error { func (d Duration) MarshalJSON() ([]byte, error) { return json.Marshal(d.Duration.String()) } + +// OpenAPISchemaType is used by the kube-openapi generator when constructing +// the OpenAPI spec of this type. +// +// See: https://github.com/kubernetes/kube-openapi/tree/master/pkg/generators +func (_ Duration) OpenAPISchemaType() []string { return []string{"string"} } + +// OpenAPISchemaFormat is used by the kube-openapi generator when constructing +// the OpenAPI spec of this type. +func (_ Duration) OpenAPISchemaFormat() string { return "" } diff --git a/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/generated.pb.go b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/generated.pb.go index 81320c9c88..69e6509932 100644 --- a/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/generated.pb.go +++ b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/generated.pb.go @@ -33,6 +33,7 @@ limitations under the License. DeleteOptions Duration ExportOptions + Fields GetOptions GroupKind GroupResource @@ -47,10 +48,12 @@ limitations under the License. List ListMeta ListOptions + ManagedFieldsEntry MicroTime ObjectMeta OwnerReference Patch + PatchOptions Preconditions RootPaths ServerAddressByClientCIDR @@ -130,131 +133,143 @@ func (m *ExportOptions) Reset() { *m = ExportOptions{} } func (*ExportOptions) ProtoMessage() {} func (*ExportOptions) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{8} } +func (m *Fields) Reset() { *m = Fields{} } +func (*Fields) ProtoMessage() {} +func (*Fields) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{9} } + func (m *GetOptions) Reset() { *m = GetOptions{} } func (*GetOptions) ProtoMessage() {} -func (*GetOptions) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{9} } +func (*GetOptions) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{10} } func (m *GroupKind) Reset() { *m = GroupKind{} } func (*GroupKind) ProtoMessage() {} -func (*GroupKind) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{10} } +func (*GroupKind) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{11} } func (m *GroupResource) Reset() { *m = GroupResource{} } func (*GroupResource) ProtoMessage() {} -func (*GroupResource) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{11} } +func (*GroupResource) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{12} } func (m *GroupVersion) Reset() { *m = GroupVersion{} } func (*GroupVersion) ProtoMessage() {} -func (*GroupVersion) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{12} } +func (*GroupVersion) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{13} } func (m *GroupVersionForDiscovery) Reset() { *m = GroupVersionForDiscovery{} } func (*GroupVersionForDiscovery) ProtoMessage() {} func (*GroupVersionForDiscovery) Descriptor() ([]byte, []int) { - return fileDescriptorGenerated, []int{13} + return fileDescriptorGenerated, []int{14} } func (m *GroupVersionKind) Reset() { *m = GroupVersionKind{} } func (*GroupVersionKind) ProtoMessage() {} -func (*GroupVersionKind) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{14} } +func (*GroupVersionKind) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{15} } func (m *GroupVersionResource) Reset() { *m = GroupVersionResource{} } func (*GroupVersionResource) ProtoMessage() {} -func (*GroupVersionResource) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{15} } +func (*GroupVersionResource) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{16} } func (m *Initializer) Reset() { *m = Initializer{} } func (*Initializer) ProtoMessage() {} -func (*Initializer) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{16} } +func (*Initializer) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{17} } func (m *Initializers) Reset() { *m = Initializers{} } func (*Initializers) ProtoMessage() {} -func (*Initializers) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{17} } +func (*Initializers) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{18} } func (m *LabelSelector) Reset() { *m = LabelSelector{} } func (*LabelSelector) ProtoMessage() {} -func (*LabelSelector) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{18} } +func (*LabelSelector) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{19} } func (m *LabelSelectorRequirement) Reset() { *m = LabelSelectorRequirement{} } func (*LabelSelectorRequirement) ProtoMessage() {} func (*LabelSelectorRequirement) Descriptor() ([]byte, []int) { - return fileDescriptorGenerated, []int{19} + return fileDescriptorGenerated, []int{20} } func (m *List) Reset() { *m = List{} } func (*List) ProtoMessage() {} -func (*List) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{20} } +func (*List) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{21} } func (m *ListMeta) Reset() { *m = ListMeta{} } func (*ListMeta) ProtoMessage() {} -func (*ListMeta) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{21} } +func (*ListMeta) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{22} } func (m *ListOptions) Reset() { *m = ListOptions{} } func (*ListOptions) ProtoMessage() {} -func (*ListOptions) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{22} } +func (*ListOptions) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{23} } + +func (m *ManagedFieldsEntry) Reset() { *m = ManagedFieldsEntry{} } +func (*ManagedFieldsEntry) ProtoMessage() {} +func (*ManagedFieldsEntry) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{24} } func (m *MicroTime) Reset() { *m = MicroTime{} } func (*MicroTime) ProtoMessage() {} -func (*MicroTime) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{23} } +func (*MicroTime) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{25} } func (m *ObjectMeta) Reset() { *m = ObjectMeta{} } func (*ObjectMeta) ProtoMessage() {} -func (*ObjectMeta) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{24} } +func (*ObjectMeta) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{26} } func (m *OwnerReference) Reset() { *m = OwnerReference{} } func (*OwnerReference) ProtoMessage() {} -func (*OwnerReference) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{25} } +func (*OwnerReference) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{27} } func (m *Patch) Reset() { *m = Patch{} } func (*Patch) ProtoMessage() {} -func (*Patch) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{26} } +func (*Patch) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{28} } + +func (m *PatchOptions) Reset() { *m = PatchOptions{} } +func (*PatchOptions) ProtoMessage() {} +func (*PatchOptions) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{29} } func (m *Preconditions) Reset() { *m = Preconditions{} } func (*Preconditions) ProtoMessage() {} -func (*Preconditions) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{27} } +func (*Preconditions) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{30} } func (m *RootPaths) Reset() { *m = RootPaths{} } func (*RootPaths) ProtoMessage() {} -func (*RootPaths) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{28} } +func (*RootPaths) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{31} } func (m *ServerAddressByClientCIDR) Reset() { *m = ServerAddressByClientCIDR{} } func (*ServerAddressByClientCIDR) ProtoMessage() {} func (*ServerAddressByClientCIDR) Descriptor() ([]byte, []int) { - return fileDescriptorGenerated, []int{29} + return fileDescriptorGenerated, []int{32} } func (m *Status) Reset() { *m = Status{} } func (*Status) ProtoMessage() {} -func (*Status) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{30} } +func (*Status) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{33} } func (m *StatusCause) Reset() { *m = StatusCause{} } func (*StatusCause) ProtoMessage() {} -func (*StatusCause) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{31} } +func (*StatusCause) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{34} } func (m *StatusDetails) Reset() { *m = StatusDetails{} } func (*StatusDetails) ProtoMessage() {} -func (*StatusDetails) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{32} } +func (*StatusDetails) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{35} } func (m *Time) Reset() { *m = Time{} } func (*Time) ProtoMessage() {} -func (*Time) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{33} } +func (*Time) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{36} } func (m *Timestamp) Reset() { *m = Timestamp{} } func (*Timestamp) ProtoMessage() {} -func (*Timestamp) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{34} } +func (*Timestamp) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{37} } func (m *TypeMeta) Reset() { *m = TypeMeta{} } func (*TypeMeta) ProtoMessage() {} -func (*TypeMeta) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{35} } +func (*TypeMeta) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{38} } func (m *UpdateOptions) Reset() { *m = UpdateOptions{} } func (*UpdateOptions) ProtoMessage() {} -func (*UpdateOptions) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{36} } +func (*UpdateOptions) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{39} } func (m *Verbs) Reset() { *m = Verbs{} } func (*Verbs) ProtoMessage() {} -func (*Verbs) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{37} } +func (*Verbs) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{40} } func (m *WatchEvent) Reset() { *m = WatchEvent{} } func (*WatchEvent) ProtoMessage() {} -func (*WatchEvent) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{38} } +func (*WatchEvent) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{41} } func init() { proto.RegisterType((*APIGroup)(nil), "k8s.io.apimachinery.pkg.apis.meta.v1.APIGroup") @@ -266,6 +281,7 @@ func init() { proto.RegisterType((*DeleteOptions)(nil), "k8s.io.apimachinery.pkg.apis.meta.v1.DeleteOptions") proto.RegisterType((*Duration)(nil), "k8s.io.apimachinery.pkg.apis.meta.v1.Duration") proto.RegisterType((*ExportOptions)(nil), "k8s.io.apimachinery.pkg.apis.meta.v1.ExportOptions") + proto.RegisterType((*Fields)(nil), "k8s.io.apimachinery.pkg.apis.meta.v1.Fields") proto.RegisterType((*GetOptions)(nil), "k8s.io.apimachinery.pkg.apis.meta.v1.GetOptions") proto.RegisterType((*GroupKind)(nil), "k8s.io.apimachinery.pkg.apis.meta.v1.GroupKind") proto.RegisterType((*GroupResource)(nil), "k8s.io.apimachinery.pkg.apis.meta.v1.GroupResource") @@ -280,10 +296,12 @@ func init() { proto.RegisterType((*List)(nil), "k8s.io.apimachinery.pkg.apis.meta.v1.List") proto.RegisterType((*ListMeta)(nil), "k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta") proto.RegisterType((*ListOptions)(nil), "k8s.io.apimachinery.pkg.apis.meta.v1.ListOptions") + proto.RegisterType((*ManagedFieldsEntry)(nil), "k8s.io.apimachinery.pkg.apis.meta.v1.ManagedFieldsEntry") proto.RegisterType((*MicroTime)(nil), "k8s.io.apimachinery.pkg.apis.meta.v1.MicroTime") proto.RegisterType((*ObjectMeta)(nil), "k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta") proto.RegisterType((*OwnerReference)(nil), "k8s.io.apimachinery.pkg.apis.meta.v1.OwnerReference") proto.RegisterType((*Patch)(nil), "k8s.io.apimachinery.pkg.apis.meta.v1.Patch") + proto.RegisterType((*PatchOptions)(nil), "k8s.io.apimachinery.pkg.apis.meta.v1.PatchOptions") proto.RegisterType((*Preconditions)(nil), "k8s.io.apimachinery.pkg.apis.meta.v1.Preconditions") proto.RegisterType((*RootPaths)(nil), "k8s.io.apimachinery.pkg.apis.meta.v1.RootPaths") proto.RegisterType((*ServerAddressByClientCIDR)(nil), "k8s.io.apimachinery.pkg.apis.meta.v1.ServerAddressByClientCIDR") @@ -464,6 +482,10 @@ func (m *APIResource) MarshalTo(dAtA []byte) (int, error) { i++ i = encodeVarintGenerated(dAtA, i, uint64(len(m.Version))) i += copy(dAtA[i:], m.Version) + dAtA[i] = 0x52 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.StorageVersionHash))) + i += copy(dAtA[i:], m.StorageVersionHash) return i, nil } @@ -576,14 +598,10 @@ func (m *CreateOptions) MarshalTo(dAtA []byte) (int, error) { i += copy(dAtA[i:], s) } } - dAtA[i] = 0x10 - i++ - if m.IncludeUninitialized { - dAtA[i] = 1 - } else { - dAtA[i] = 0 - } + dAtA[i] = 0x1a i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.FieldManager))) + i += copy(dAtA[i:], m.FieldManager) return i, nil } @@ -706,6 +724,55 @@ func (m *ExportOptions) MarshalTo(dAtA []byte) (int, error) { return i, nil } +func (m *Fields) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Fields) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if len(m.Map) > 0 { + keysForMap := make([]string, 0, len(m.Map)) + for k := range m.Map { + keysForMap = append(keysForMap, string(k)) + } + github_com_gogo_protobuf_sortkeys.Strings(keysForMap) + for _, k := range keysForMap { + dAtA[i] = 0xa + i++ + v := m.Map[string(k)] + msgSize := 0 + if (&v) != nil { + msgSize = (&v).Size() + msgSize += 1 + sovGenerated(uint64(msgSize)) + } + mapSize := 1 + len(k) + sovGenerated(uint64(len(k))) + msgSize + i = encodeVarintGenerated(dAtA, i, uint64(mapSize)) + dAtA[i] = 0xa + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(k))) + i += copy(dAtA[i:], k) + dAtA[i] = 0x12 + i++ + i = encodeVarintGenerated(dAtA, i, uint64((&v).Size())) + n4, err := (&v).MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n4 + } + } + return i, nil +} + func (m *GetOptions) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) @@ -725,14 +792,6 @@ func (m *GetOptions) MarshalTo(dAtA []byte) (int, error) { i++ i = encodeVarintGenerated(dAtA, i, uint64(len(m.ResourceVersion))) i += copy(dAtA[i:], m.ResourceVersion) - dAtA[i] = 0x10 - i++ - if m.IncludeUninitialized { - dAtA[i] = 1 - } else { - dAtA[i] = 0 - } - i++ return i, nil } @@ -953,11 +1012,11 @@ func (m *Initializers) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0x12 i++ i = encodeVarintGenerated(dAtA, i, uint64(m.Result.Size())) - n4, err := m.Result.MarshalTo(dAtA[i:]) + n5, err := m.Result.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n4 + i += n5 } return i, nil } @@ -1073,11 +1132,11 @@ func (m *List) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0xa i++ i = encodeVarintGenerated(dAtA, i, uint64(m.ListMeta.Size())) - n5, err := m.ListMeta.MarshalTo(dAtA[i:]) + n6, err := m.ListMeta.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n5 + i += n6 if len(m.Items) > 0 { for _, msg := range m.Items { dAtA[i] = 0x12 @@ -1163,14 +1222,6 @@ func (m *ListOptions) MarshalTo(dAtA []byte) (int, error) { i++ i = encodeVarintGenerated(dAtA, i, uint64(*m.TimeoutSeconds)) } - dAtA[i] = 0x30 - i++ - if m.IncludeUninitialized { - dAtA[i] = 1 - } else { - dAtA[i] = 0 - } - i++ dAtA[i] = 0x38 i++ i = encodeVarintGenerated(dAtA, i, uint64(m.Limit)) @@ -1181,6 +1232,56 @@ func (m *ListOptions) MarshalTo(dAtA []byte) (int, error) { return i, nil } +func (m *ManagedFieldsEntry) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ManagedFieldsEntry) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + dAtA[i] = 0xa + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Manager))) + i += copy(dAtA[i:], m.Manager) + dAtA[i] = 0x12 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Operation))) + i += copy(dAtA[i:], m.Operation) + dAtA[i] = 0x1a + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.APIVersion))) + i += copy(dAtA[i:], m.APIVersion) + if m.Time != nil { + dAtA[i] = 0x22 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.Time.Size())) + n7, err := m.Time.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n7 + } + if m.Fields != nil { + dAtA[i] = 0x2a + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.Fields.Size())) + n8, err := m.Fields.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n8 + } + return i, nil +} + func (m *ObjectMeta) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) @@ -1226,20 +1327,20 @@ func (m *ObjectMeta) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0x42 i++ i = encodeVarintGenerated(dAtA, i, uint64(m.CreationTimestamp.Size())) - n6, err := m.CreationTimestamp.MarshalTo(dAtA[i:]) + n9, err := m.CreationTimestamp.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n6 + i += n9 if m.DeletionTimestamp != nil { dAtA[i] = 0x4a i++ i = encodeVarintGenerated(dAtA, i, uint64(m.DeletionTimestamp.Size())) - n7, err := m.DeletionTimestamp.MarshalTo(dAtA[i:]) + n10, err := m.DeletionTimestamp.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n7 + i += n10 } if m.DeletionGracePeriodSeconds != nil { dAtA[i] = 0x50 @@ -1327,11 +1428,25 @@ func (m *ObjectMeta) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0x1 i++ i = encodeVarintGenerated(dAtA, i, uint64(m.Initializers.Size())) - n8, err := m.Initializers.MarshalTo(dAtA[i:]) + n11, err := m.Initializers.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n8 + i += n11 + } + if len(m.ManagedFields) > 0 { + for _, msg := range m.ManagedFields { + dAtA[i] = 0x8a + i++ + dAtA[i] = 0x1 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) + n, err := msg.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n + } } return i, nil } @@ -1408,6 +1523,53 @@ func (m *Patch) MarshalTo(dAtA []byte) (int, error) { return i, nil } +func (m *PatchOptions) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *PatchOptions) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if len(m.DryRun) > 0 { + for _, s := range m.DryRun { + dAtA[i] = 0xa + i++ + l = len(s) + for l >= 1<<7 { + dAtA[i] = uint8(uint64(l)&0x7f | 0x80) + l >>= 7 + i++ + } + dAtA[i] = uint8(l) + i++ + i += copy(dAtA[i:], s) + } + } + if m.Force != nil { + dAtA[i] = 0x10 + i++ + if *m.Force { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i++ + } + dAtA[i] = 0x1a + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.FieldManager))) + i += copy(dAtA[i:], m.FieldManager) + return i, nil +} + func (m *Preconditions) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) @@ -1429,6 +1591,12 @@ func (m *Preconditions) MarshalTo(dAtA []byte) (int, error) { i = encodeVarintGenerated(dAtA, i, uint64(len(*m.UID))) i += copy(dAtA[i:], *m.UID) } + if m.ResourceVersion != nil { + dAtA[i] = 0x12 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(*m.ResourceVersion))) + i += copy(dAtA[i:], *m.ResourceVersion) + } return i, nil } @@ -1509,11 +1677,11 @@ func (m *Status) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0xa i++ i = encodeVarintGenerated(dAtA, i, uint64(m.ListMeta.Size())) - n9, err := m.ListMeta.MarshalTo(dAtA[i:]) + n12, err := m.ListMeta.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n9 + i += n12 dAtA[i] = 0x12 i++ i = encodeVarintGenerated(dAtA, i, uint64(len(m.Status))) @@ -1530,11 +1698,11 @@ func (m *Status) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0x2a i++ i = encodeVarintGenerated(dAtA, i, uint64(m.Details.Size())) - n10, err := m.Details.MarshalTo(dAtA[i:]) + n13, err := m.Details.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n10 + i += n13 } dAtA[i] = 0x30 i++ @@ -1701,6 +1869,10 @@ func (m *UpdateOptions) MarshalTo(dAtA []byte) (int, error) { i += copy(dAtA[i:], s) } } + dAtA[i] = 0x12 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.FieldManager))) + i += copy(dAtA[i:], m.FieldManager) return i, nil } @@ -1759,11 +1931,11 @@ func (m *WatchEvent) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0x12 i++ i = encodeVarintGenerated(dAtA, i, uint64(m.Object.Size())) - n11, err := m.Object.MarshalTo(dAtA[i:]) + n14, err := m.Object.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n11 + i += n14 return i, nil } @@ -1840,6 +2012,8 @@ func (m *APIResource) Size() (n int) { n += 1 + l + sovGenerated(uint64(l)) l = len(m.Version) n += 1 + l + sovGenerated(uint64(l)) + l = len(m.StorageVersionHash) + n += 1 + l + sovGenerated(uint64(l)) return n } @@ -1884,7 +2058,8 @@ func (m *CreateOptions) Size() (n int) { n += 1 + l + sovGenerated(uint64(l)) } } - n += 2 + l = len(m.FieldManager) + n += 1 + l + sovGenerated(uint64(l)) return n } @@ -1929,12 +2104,26 @@ func (m *ExportOptions) Size() (n int) { return n } +func (m *Fields) Size() (n int) { + var l int + _ = l + if len(m.Map) > 0 { + for k, v := range m.Map { + _ = k + _ = v + l = v.Size() + mapEntrySize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + l + sovGenerated(uint64(l)) + n += mapEntrySize + 1 + sovGenerated(uint64(mapEntrySize)) + } + } + return n +} + func (m *GetOptions) Size() (n int) { var l int _ = l l = len(m.ResourceVersion) n += 1 + l + sovGenerated(uint64(l)) - n += 2 return n } @@ -2101,13 +2290,32 @@ func (m *ListOptions) Size() (n int) { if m.TimeoutSeconds != nil { n += 1 + sovGenerated(uint64(*m.TimeoutSeconds)) } - n += 2 n += 1 + sovGenerated(uint64(m.Limit)) l = len(m.Continue) n += 1 + l + sovGenerated(uint64(l)) return n } +func (m *ManagedFieldsEntry) Size() (n int) { + var l int + _ = l + l = len(m.Manager) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Operation) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.APIVersion) + n += 1 + l + sovGenerated(uint64(l)) + if m.Time != nil { + l = m.Time.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if m.Fields != nil { + l = m.Fields.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + return n +} + func (m *ObjectMeta) Size() (n int) { var l int _ = l @@ -2167,6 +2375,12 @@ func (m *ObjectMeta) Size() (n int) { l = m.Initializers.Size() n += 2 + l + sovGenerated(uint64(l)) } + if len(m.ManagedFields) > 0 { + for _, e := range m.ManagedFields { + l = e.Size() + n += 2 + l + sovGenerated(uint64(l)) + } + } return n } @@ -2196,6 +2410,23 @@ func (m *Patch) Size() (n int) { return n } +func (m *PatchOptions) Size() (n int) { + var l int + _ = l + if len(m.DryRun) > 0 { + for _, s := range m.DryRun { + l = len(s) + n += 1 + l + sovGenerated(uint64(l)) + } + } + if m.Force != nil { + n += 2 + } + l = len(m.FieldManager) + n += 1 + l + sovGenerated(uint64(l)) + return n +} + func (m *Preconditions) Size() (n int) { var l int _ = l @@ -2203,6 +2434,10 @@ func (m *Preconditions) Size() (n int) { l = len(*m.UID) n += 1 + l + sovGenerated(uint64(l)) } + if m.ResourceVersion != nil { + l = len(*m.ResourceVersion) + n += 1 + l + sovGenerated(uint64(l)) + } return n } @@ -2307,6 +2542,8 @@ func (m *UpdateOptions) Size() (n int) { n += 1 + l + sovGenerated(uint64(l)) } } + l = len(m.FieldManager) + n += 1 + l + sovGenerated(uint64(l)) return n } @@ -2382,6 +2619,7 @@ func (this *APIResource) String() string { `Categories:` + fmt.Sprintf("%v", this.Categories) + `,`, `Group:` + fmt.Sprintf("%v", this.Group) + `,`, `Version:` + fmt.Sprintf("%v", this.Version) + `,`, + `StorageVersionHash:` + fmt.Sprintf("%v", this.StorageVersionHash) + `,`, `}`, }, "") return s @@ -2403,7 +2641,7 @@ func (this *CreateOptions) String() string { } s := strings.Join([]string{`&CreateOptions{`, `DryRun:` + fmt.Sprintf("%v", this.DryRun) + `,`, - `IncludeUninitialized:` + fmt.Sprintf("%v", this.IncludeUninitialized) + `,`, + `FieldManager:` + fmt.Sprintf("%v", this.FieldManager) + `,`, `}`, }, "") return s @@ -2443,13 +2681,32 @@ func (this *ExportOptions) String() string { }, "") return s } +func (this *Fields) String() string { + if this == nil { + return "nil" + } + keysForMap := make([]string, 0, len(this.Map)) + for k := range this.Map { + keysForMap = append(keysForMap, k) + } + github_com_gogo_protobuf_sortkeys.Strings(keysForMap) + mapStringForMap := "map[string]Fields{" + for _, k := range keysForMap { + mapStringForMap += fmt.Sprintf("%v: %v,", k, this.Map[k]) + } + mapStringForMap += "}" + s := strings.Join([]string{`&Fields{`, + `Map:` + mapStringForMap + `,`, + `}`, + }, "") + return s +} func (this *GetOptions) String() string { if this == nil { return "nil" } s := strings.Join([]string{`&GetOptions{`, `ResourceVersion:` + fmt.Sprintf("%v", this.ResourceVersion) + `,`, - `IncludeUninitialized:` + fmt.Sprintf("%v", this.IncludeUninitialized) + `,`, `}`, }, "") return s @@ -2552,13 +2809,26 @@ func (this *ListOptions) String() string { `Watch:` + fmt.Sprintf("%v", this.Watch) + `,`, `ResourceVersion:` + fmt.Sprintf("%v", this.ResourceVersion) + `,`, `TimeoutSeconds:` + valueToStringGenerated(this.TimeoutSeconds) + `,`, - `IncludeUninitialized:` + fmt.Sprintf("%v", this.IncludeUninitialized) + `,`, `Limit:` + fmt.Sprintf("%v", this.Limit) + `,`, `Continue:` + fmt.Sprintf("%v", this.Continue) + `,`, `}`, }, "") return s } +func (this *ManagedFieldsEntry) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&ManagedFieldsEntry{`, + `Manager:` + fmt.Sprintf("%v", this.Manager) + `,`, + `Operation:` + fmt.Sprintf("%v", this.Operation) + `,`, + `APIVersion:` + fmt.Sprintf("%v", this.APIVersion) + `,`, + `Time:` + strings.Replace(fmt.Sprintf("%v", this.Time), "Time", "Time", 1) + `,`, + `Fields:` + strings.Replace(fmt.Sprintf("%v", this.Fields), "Fields", "Fields", 1) + `,`, + `}`, + }, "") + return s +} func (this *ObjectMeta) String() string { if this == nil { return "nil" @@ -2600,6 +2870,7 @@ func (this *ObjectMeta) String() string { `Finalizers:` + fmt.Sprintf("%v", this.Finalizers) + `,`, `ClusterName:` + fmt.Sprintf("%v", this.ClusterName) + `,`, `Initializers:` + strings.Replace(fmt.Sprintf("%v", this.Initializers), "Initializers", "Initializers", 1) + `,`, + `ManagedFields:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ManagedFields), "ManagedFieldsEntry", "ManagedFieldsEntry", 1), `&`, ``, 1) + `,`, `}`, }, "") return s @@ -2628,12 +2899,25 @@ func (this *Patch) String() string { }, "") return s } +func (this *PatchOptions) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&PatchOptions{`, + `DryRun:` + fmt.Sprintf("%v", this.DryRun) + `,`, + `Force:` + valueToStringGenerated(this.Force) + `,`, + `FieldManager:` + fmt.Sprintf("%v", this.FieldManager) + `,`, + `}`, + }, "") + return s +} func (this *Preconditions) String() string { if this == nil { return "nil" } s := strings.Join([]string{`&Preconditions{`, `UID:` + valueToStringGenerated(this.UID) + `,`, + `ResourceVersion:` + valueToStringGenerated(this.ResourceVersion) + `,`, `}`, }, "") return s @@ -2729,6 +3013,7 @@ func (this *UpdateOptions) String() string { } s := strings.Join([]string{`&UpdateOptions{`, `DryRun:` + fmt.Sprintf("%v", this.DryRun) + `,`, + `FieldManager:` + fmt.Sprintf("%v", this.FieldManager) + `,`, `}`, }, "") return s @@ -3289,13 +3574,42 @@ func (m *APIResource) Unmarshal(dAtA []byte) error { } m.Version = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err + case 10: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field StorageVersionHash", wireType) } - if skippy < 0 { + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.StorageVersionHash = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -3588,11 +3902,11 @@ func (m *CreateOptions) Unmarshal(dAtA []byte) error { } m.DryRun = append(m.DryRun, string(dAtA[iNdEx:postIndex])) iNdEx = postIndex - case 2: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field IncludeUninitialized", wireType) + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field FieldManager", wireType) } - var v int + var stringLen uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -3602,12 +3916,21 @@ func (m *CreateOptions) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - v |= (int(b) & 0x7F) << shift + stringLen |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } } - m.IncludeUninitialized = bool(v != 0) + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.FieldManager = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex default: iNdEx = preIndex skippy, err := skipGenerated(dAtA[iNdEx:]) @@ -3971,7 +4294,7 @@ func (m *ExportOptions) Unmarshal(dAtA []byte) error { } return nil } -func (m *GetOptions) Unmarshal(dAtA []byte) error { +func (m *Fields) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -3994,17 +4317,17 @@ func (m *GetOptions) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: GetOptions: wiretype end group for non-group") + return fmt.Errorf("proto: Fields: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: GetOptions: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: Fields: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ResourceVersion", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Map", wireType) } - var stringLen uint64 + var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -4014,26 +4337,170 @@ func (m *GetOptions) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } } - intStringLen := int(stringLen) - if intStringLen < 0 { + if msglen < 0 { return ErrInvalidLengthGenerated } - postIndex := iNdEx + intStringLen + postIndex := iNdEx + msglen if postIndex > l { return io.ErrUnexpectedEOF } - m.ResourceVersion = string(dAtA[iNdEx:postIndex]) + if m.Map == nil { + m.Map = make(map[string]Fields) + } + var mapkey string + mapvalue := &Fields{} + for iNdEx < postIndex { + entryPreIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + if fieldNum == 1 { + var stringLenmapkey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapkey |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapkey := int(stringLenmapkey) + if intStringLenmapkey < 0 { + return ErrInvalidLengthGenerated + } + postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey > l { + return io.ErrUnexpectedEOF + } + mapkey = string(dAtA[iNdEx:postStringIndexmapkey]) + iNdEx = postStringIndexmapkey + } else if fieldNum == 2 { + var mapmsglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + mapmsglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if mapmsglen < 0 { + return ErrInvalidLengthGenerated + } + postmsgIndex := iNdEx + mapmsglen + if mapmsglen < 0 { + return ErrInvalidLengthGenerated + } + if postmsgIndex > l { + return io.ErrUnexpectedEOF + } + mapvalue = &Fields{} + if err := mapvalue.Unmarshal(dAtA[iNdEx:postmsgIndex]); err != nil { + return err + } + iNdEx = postmsgIndex + } else { + iNdEx = entryPreIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > postIndex { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + m.Map[mapkey] = *mapvalue iNdEx = postIndex - case 2: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field IncludeUninitialized", wireType) + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err } - var v int + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *GetOptions) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: GetOptions: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: GetOptions: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ResourceVersion", wireType) + } + var stringLen uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -4043,12 +4510,21 @@ func (m *GetOptions) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - v |= (int(b) & 0x7F) << shift + stringLen |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } } - m.IncludeUninitialized = bool(v != 0) + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ResourceVersion = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex default: iNdEx = preIndex skippy, err := skipGenerated(dAtA[iNdEx:]) @@ -5709,26 +6185,6 @@ func (m *ListOptions) Unmarshal(dAtA []byte) error { } } m.TimeoutSeconds = &v - case 6: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field IncludeUninitialized", wireType) - } - var v int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - v |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - m.IncludeUninitialized = bool(v != 0) case 7: if wireType != 0 { return fmt.Errorf("proto: wrong wireType = %d for field Limit", wireType) @@ -5798,7 +6254,7 @@ func (m *ListOptions) Unmarshal(dAtA []byte) error { } return nil } -func (m *ObjectMeta) Unmarshal(dAtA []byte) error { +func (m *ManagedFieldsEntry) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -5821,15 +6277,15 @@ func (m *ObjectMeta) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: ObjectMeta: wiretype end group for non-group") + return fmt.Errorf("proto: ManagedFieldsEntry: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: ObjectMeta: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: ManagedFieldsEntry: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Manager", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { @@ -5854,11 +6310,11 @@ func (m *ObjectMeta) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.Name = string(dAtA[iNdEx:postIndex]) + m.Manager = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex case 2: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field GenerateName", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Operation", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { @@ -5883,11 +6339,11 @@ func (m *ObjectMeta) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.GenerateName = string(dAtA[iNdEx:postIndex]) + m.Operation = ManagedFieldsOperationType(dAtA[iNdEx:postIndex]) iNdEx = postIndex case 3: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Namespace", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field APIVersion", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { @@ -5912,13 +6368,13 @@ func (m *ObjectMeta) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.Namespace = string(dAtA[iNdEx:postIndex]) + m.APIVersion = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex case 4: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field SelfLink", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Time", wireType) } - var stringLen uint64 + var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -5928,26 +6384,30 @@ func (m *ObjectMeta) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } } - intStringLen := int(stringLen) - if intStringLen < 0 { + if msglen < 0 { return ErrInvalidLengthGenerated } - postIndex := iNdEx + intStringLen + postIndex := iNdEx + msglen if postIndex > l { return io.ErrUnexpectedEOF } - m.SelfLink = string(dAtA[iNdEx:postIndex]) + if m.Time == nil { + m.Time = &Time{} + } + if err := m.Time.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } iNdEx = postIndex case 5: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field UID", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Fields", wireType) } - var stringLen uint64 + var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -5957,26 +6417,225 @@ func (m *ObjectMeta) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } } - intStringLen := int(stringLen) - if intStringLen < 0 { + if msglen < 0 { return ErrInvalidLengthGenerated } - postIndex := iNdEx + intStringLen + postIndex := iNdEx + msglen if postIndex > l { return io.ErrUnexpectedEOF } - m.UID = k8s_io_apimachinery_pkg_types.UID(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 6: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ResourceVersion", wireType) + if m.Fields == nil { + m.Fields = &Fields{} } - var stringLen uint64 + if err := m.Fields.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ObjectMeta) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ObjectMeta: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ObjectMeta: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Name = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field GenerateName", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.GenerateName = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Namespace", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Namespace = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field SelfLink", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.SelfLink = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field UID", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.UID = k8s_io_apimachinery_pkg_types.UID(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 6: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ResourceVersion", wireType) + } + var stringLen uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -6461,6 +7120,37 @@ func (m *ObjectMeta) Unmarshal(dAtA []byte) error { return err } iNdEx = postIndex + case 17: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ManagedFields", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ManagedFields = append(m.ManagedFields, ManagedFieldsEntry{}) + if err := m.ManagedFields[len(m.ManagedFields)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex default: iNdEx = preIndex skippy, err := skipGenerated(dAtA[iNdEx:]) @@ -6740,6 +7430,135 @@ func (m *Patch) Unmarshal(dAtA []byte) error { } return nil } +func (m *PatchOptions) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: PatchOptions: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: PatchOptions: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field DryRun", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.DryRun = append(m.DryRun, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Force", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + b := bool(v != 0) + m.Force = &b + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field FieldManager", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.FieldManager = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} func (m *Preconditions) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 @@ -6799,6 +7618,36 @@ func (m *Preconditions) Unmarshal(dAtA []byte) error { s := k8s_io_apimachinery_pkg_types.UID(dAtA[iNdEx:postIndex]) m.UID = &s iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ResourceVersion", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + s := string(dAtA[iNdEx:postIndex]) + m.ResourceVersion = &s + iNdEx = postIndex default: iNdEx = preIndex skippy, err := skipGenerated(dAtA[iNdEx:]) @@ -7833,6 +8682,35 @@ func (m *UpdateOptions) Unmarshal(dAtA []byte) error { } m.DryRun = append(m.DryRun, string(dAtA[iNdEx:postIndex])) iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field FieldManager", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.FieldManager = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex default: iNdEx = preIndex skippy, err := skipGenerated(dAtA[iNdEx:]) @@ -8152,160 +9030,173 @@ func init() { } var fileDescriptorGenerated = []byte{ - // 2465 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xcc, 0x59, 0x4d, 0x6c, 0x23, 0x49, - 0xf5, 0x4f, 0xdb, 0xb1, 0x63, 0x3f, 0xc7, 0xf9, 0xa8, 0xcd, 0xfe, 0xff, 0xde, 0x08, 0xec, 0x6c, - 0x2f, 0x5a, 0x65, 0x61, 0xd6, 0x26, 0x59, 0x58, 0x0d, 0x03, 0x2c, 0xc4, 0x71, 0x66, 0x14, 0xed, - 0x64, 0xc6, 0xaa, 0xec, 0x0c, 0x62, 0x18, 0x21, 0x3a, 0xdd, 0x15, 0xa7, 0x49, 0xbb, 0xdb, 0x5b, - 0xd5, 0xce, 0x8c, 0xe1, 0xc0, 0x1e, 0x40, 0x70, 0x40, 0x68, 0x8e, 0x9c, 0xd0, 0x8e, 0xe0, 0xc2, - 0x95, 0x13, 0x17, 0x38, 0x21, 0x31, 0xc7, 0x91, 0xb8, 0xec, 0x01, 0x59, 0x3b, 0xe6, 0xc0, 0x09, - 0x71, 0xcf, 0x09, 0x55, 0x75, 0x75, 0x75, 0xb7, 0x1d, 0x4f, 0xda, 0x3b, 0xbb, 0x88, 0x53, 0xd2, - 0xef, 0xe3, 0xf7, 0x5e, 0x55, 0xbd, 0x7a, 0xef, 0xd5, 0x33, 0x1c, 0x9c, 0x5e, 0x65, 0x75, 0xdb, - 0x6b, 0x9c, 0xf6, 0x8f, 0x08, 0x75, 0x89, 0x4f, 0x58, 0xe3, 0x8c, 0xb8, 0x96, 0x47, 0x1b, 0x92, - 0x61, 0xf4, 0xec, 0xae, 0x61, 0x9e, 0xd8, 0x2e, 0xa1, 0x83, 0x46, 0xef, 0xb4, 0xc3, 0x09, 0xac, - 0xd1, 0x25, 0xbe, 0xd1, 0x38, 0xdb, 0x6a, 0x74, 0x88, 0x4b, 0xa8, 0xe1, 0x13, 0xab, 0xde, 0xa3, - 0x9e, 0xef, 0xa1, 0x2f, 0x04, 0x5a, 0xf5, 0xb8, 0x56, 0xbd, 0x77, 0xda, 0xe1, 0x04, 0x56, 0xe7, - 0x5a, 0xf5, 0xb3, 0xad, 0xf5, 0x37, 0x3b, 0xb6, 0x7f, 0xd2, 0x3f, 0xaa, 0x9b, 0x5e, 0xb7, 0xd1, - 0xf1, 0x3a, 0x5e, 0x43, 0x28, 0x1f, 0xf5, 0x8f, 0xc5, 0x97, 0xf8, 0x10, 0xff, 0x05, 0xa0, 0xeb, - 0x53, 0x5d, 0xa1, 0x7d, 0xd7, 0xb7, 0xbb, 0x64, 0xdc, 0x8b, 0xf5, 0xb7, 0x2f, 0x53, 0x60, 0xe6, - 0x09, 0xe9, 0x1a, 0xe3, 0x7a, 0xfa, 0x5f, 0xb3, 0x50, 0xd8, 0x69, 0xef, 0xdf, 0xa0, 0x5e, 0xbf, - 0x87, 0x36, 0x60, 0xde, 0x35, 0xba, 0xa4, 0xa2, 0x6d, 0x68, 0x9b, 0xc5, 0xe6, 0xe2, 0x93, 0x61, - 0x6d, 0x6e, 0x34, 0xac, 0xcd, 0xdf, 0x32, 0xba, 0x04, 0x0b, 0x0e, 0x72, 0xa0, 0x70, 0x46, 0x28, - 0xb3, 0x3d, 0x97, 0x55, 0x32, 0x1b, 0xd9, 0xcd, 0xd2, 0xf6, 0x3b, 0xf5, 0x34, 0xeb, 0xaf, 0x0b, - 0x03, 0x77, 0x03, 0xd5, 0xeb, 0x1e, 0x6d, 0xd9, 0xcc, 0xf4, 0xce, 0x08, 0x1d, 0x34, 0x57, 0xa4, - 0x95, 0x82, 0x64, 0x32, 0xac, 0x2c, 0xa0, 0x9f, 0x6a, 0xb0, 0xd2, 0xa3, 0xe4, 0x98, 0x50, 0x4a, - 0x2c, 0xc9, 0xaf, 0x64, 0x37, 0xb4, 0x4f, 0xc1, 0x6c, 0x45, 0x9a, 0x5d, 0x69, 0x8f, 0xe1, 0xe3, - 0x09, 0x8b, 0xe8, 0xb7, 0x1a, 0xac, 0x33, 0x42, 0xcf, 0x08, 0xdd, 0xb1, 0x2c, 0x4a, 0x18, 0x6b, - 0x0e, 0x76, 0x1d, 0x9b, 0xb8, 0xfe, 0xee, 0x7e, 0x0b, 0xb3, 0xca, 0xbc, 0xd8, 0x87, 0x6f, 0xa5, - 0x73, 0xe8, 0x70, 0x1a, 0x4e, 0x53, 0x97, 0x1e, 0xad, 0x4f, 0x15, 0x61, 0xf8, 0x39, 0x6e, 0xe8, - 0xc7, 0xb0, 0x18, 0x1e, 0xe4, 0x4d, 0x9b, 0xf9, 0xe8, 0x2e, 0xe4, 0x3b, 0xfc, 0x83, 0x55, 0x34, - 0xe1, 0x60, 0x3d, 0x9d, 0x83, 0x21, 0x46, 0x73, 0x49, 0xfa, 0x93, 0x17, 0x9f, 0x0c, 0x4b, 0x34, - 0xfd, 0x4f, 0x59, 0x28, 0xed, 0xb4, 0xf7, 0x31, 0x61, 0x5e, 0x9f, 0x9a, 0x24, 0x45, 0xd0, 0x6c, - 0x03, 0xf0, 0xbf, 0xac, 0x67, 0x98, 0xc4, 0xaa, 0x64, 0x36, 0xb4, 0xcd, 0x42, 0x13, 0x49, 0x39, - 0xb8, 0xa5, 0x38, 0x38, 0x26, 0xc5, 0x51, 0x4f, 0x6d, 0xd7, 0x12, 0xa7, 0x1d, 0x43, 0x7d, 0xd7, - 0x76, 0x2d, 0x2c, 0x38, 0xe8, 0x26, 0xe4, 0xce, 0x08, 0x3d, 0xe2, 0xfb, 0xcf, 0x03, 0xe2, 0x4b, - 0xe9, 0x96, 0x77, 0x97, 0xab, 0x34, 0x8b, 0xa3, 0x61, 0x2d, 0x27, 0xfe, 0xc5, 0x01, 0x08, 0xaa, - 0x03, 0xb0, 0x13, 0x8f, 0xfa, 0xc2, 0x9d, 0x4a, 0x6e, 0x23, 0xbb, 0x59, 0x6c, 0x2e, 0x71, 0xff, - 0x0e, 0x15, 0x15, 0xc7, 0x24, 0xd0, 0x55, 0x58, 0x64, 0xb6, 0xdb, 0xe9, 0x3b, 0x06, 0xe5, 0x84, - 0x4a, 0x5e, 0xf8, 0xb9, 0x26, 0xfd, 0x5c, 0x3c, 0x8c, 0xf1, 0x70, 0x42, 0x92, 0x5b, 0x32, 0x0d, - 0x9f, 0x74, 0x3c, 0x6a, 0x13, 0x56, 0x59, 0x88, 0x2c, 0xed, 0x2a, 0x2a, 0x8e, 0x49, 0xa0, 0xd7, - 0x20, 0x27, 0x76, 0xbe, 0x52, 0x10, 0x26, 0xca, 0xd2, 0x44, 0x4e, 0x1c, 0x0b, 0x0e, 0x78, 0xe8, - 0x0d, 0x58, 0x90, 0xb7, 0xa6, 0x52, 0x14, 0x62, 0xcb, 0x52, 0x6c, 0x21, 0x0c, 0xeb, 0x90, 0xaf, - 0xff, 0x41, 0x83, 0xe5, 0xd8, 0xf9, 0x89, 0x58, 0xb9, 0x0a, 0x8b, 0x9d, 0xd8, 0x4d, 0x91, 0x67, - 0xa9, 0x56, 0x13, 0xbf, 0x45, 0x38, 0x21, 0x89, 0x08, 0x14, 0xa9, 0x44, 0x0a, 0x33, 0xc2, 0x56, - 0xea, 0x40, 0x0b, 0x7d, 0x88, 0x2c, 0xc5, 0x88, 0x0c, 0x47, 0xc8, 0xfa, 0x3f, 0x35, 0x11, 0x74, - 0x61, 0x8e, 0x40, 0x9b, 0xb1, 0x3c, 0xa4, 0x89, 0x2d, 0x5c, 0x9c, 0x92, 0x43, 0x2e, 0xb9, 0xbc, - 0x99, 0xff, 0x89, 0xcb, 0x7b, 0xad, 0xf0, 0xeb, 0x0f, 0x6b, 0x73, 0x1f, 0xfc, 0x7d, 0x63, 0x4e, - 0xff, 0x99, 0x06, 0xe5, 0x5d, 0x4a, 0x0c, 0x9f, 0xdc, 0xee, 0xf9, 0x62, 0x05, 0x3a, 0xe4, 0x2d, - 0x3a, 0xc0, 0x7d, 0x57, 0xae, 0x14, 0xf8, 0xa5, 0x6c, 0x09, 0x0a, 0x96, 0x1c, 0xd4, 0x86, 0x35, - 0xdb, 0x35, 0x9d, 0xbe, 0x45, 0xee, 0xb8, 0xb6, 0x6b, 0xfb, 0xb6, 0xe1, 0xd8, 0x3f, 0x52, 0x97, - 0xed, 0x73, 0xd2, 0xbb, 0xb5, 0xfd, 0x0b, 0x64, 0xf0, 0x85, 0x9a, 0xfa, 0xcf, 0xb3, 0x50, 0x6e, - 0x11, 0x87, 0x44, 0x7e, 0x5c, 0x07, 0xd4, 0xa1, 0x86, 0x49, 0xda, 0x84, 0xda, 0x9e, 0x75, 0x48, - 0x4c, 0xcf, 0xb5, 0x98, 0x08, 0x95, 0x6c, 0xf3, 0xff, 0x46, 0xc3, 0x1a, 0xba, 0x31, 0xc1, 0xc5, - 0x17, 0x68, 0x20, 0x07, 0xca, 0x3d, 0x2a, 0xfe, 0xb7, 0x7d, 0x59, 0x48, 0xf8, 0x05, 0x7e, 0x2b, - 0xdd, 0x19, 0xb4, 0xe3, 0xaa, 0xcd, 0xd5, 0xd1, 0xb0, 0x56, 0x4e, 0x90, 0x70, 0x12, 0x1c, 0x7d, - 0x1b, 0x56, 0x3c, 0xda, 0x3b, 0x31, 0xdc, 0x16, 0xe9, 0x11, 0xd7, 0x22, 0xae, 0xcf, 0x44, 0x52, - 0x29, 0x34, 0xd7, 0x78, 0xfa, 0xbf, 0x3d, 0xc6, 0xc3, 0x13, 0xd2, 0xe8, 0x1e, 0xac, 0xf6, 0xa8, - 0xd7, 0x33, 0x3a, 0x06, 0x47, 0x6c, 0x7b, 0x8e, 0x6d, 0x0e, 0x44, 0xd2, 0x29, 0x36, 0xaf, 0x8c, - 0x86, 0xb5, 0xd5, 0xf6, 0x38, 0xf3, 0x7c, 0x58, 0x7b, 0x49, 0x6c, 0x1d, 0xa7, 0x44, 0x4c, 0x3c, - 0x09, 0x13, 0x3b, 0xdb, 0xdc, 0xb4, 0xb3, 0xd5, 0xf7, 0xa1, 0xd0, 0xea, 0x53, 0xa1, 0x85, 0xbe, - 0x09, 0x05, 0x4b, 0xfe, 0x2f, 0x77, 0xfe, 0xd5, 0xb0, 0x7e, 0x86, 0x32, 0xe7, 0xc3, 0x5a, 0x99, - 0x57, 0xfc, 0x7a, 0x48, 0xc0, 0x4a, 0x45, 0xbf, 0x0f, 0xe5, 0xbd, 0x87, 0x3d, 0x8f, 0xfa, 0xe1, - 0x99, 0xbe, 0x0e, 0x79, 0x22, 0x08, 0x02, 0xad, 0x10, 0x25, 0xfd, 0x40, 0x0c, 0x4b, 0x2e, 0x4f, - 0x42, 0xe4, 0xa1, 0x61, 0xfa, 0x32, 0xa0, 0x54, 0x12, 0xda, 0xe3, 0x44, 0x1c, 0xf0, 0xf4, 0xc7, - 0x1a, 0xc0, 0x0d, 0xa2, 0xb0, 0x77, 0x60, 0x39, 0xbc, 0xc0, 0xc9, 0xbc, 0xf2, 0xff, 0x52, 0x7b, - 0x19, 0x27, 0xd9, 0x78, 0x5c, 0xfe, 0x33, 0x08, 0xeb, 0xfb, 0x50, 0x14, 0xd9, 0x8c, 0x17, 0x92, - 0x28, 0xb5, 0x6a, 0xcf, 0x49, 0xad, 0x61, 0x25, 0xca, 0x4c, 0xab, 0x44, 0xb1, 0xcb, 0xeb, 0x40, - 0x39, 0xd0, 0x0d, 0x8b, 0x63, 0x2a, 0x0b, 0x57, 0xa0, 0x10, 0x2e, 0x5c, 0x5a, 0x51, 0x4d, 0x51, - 0x08, 0x84, 0x95, 0x44, 0xcc, 0xda, 0x09, 0x24, 0x32, 0x73, 0x3a, 0x63, 0xb1, 0x4a, 0x91, 0x79, - 0x7e, 0xa5, 0x88, 0x59, 0xfa, 0x09, 0x54, 0xa6, 0x75, 0x52, 0x2f, 0x50, 0x3b, 0xd2, 0xbb, 0xa2, - 0xff, 0x4a, 0x83, 0x95, 0x38, 0x52, 0xfa, 0xe3, 0x4b, 0x6f, 0xe4, 0xf2, 0x9e, 0x23, 0xb6, 0x23, - 0xbf, 0xd1, 0x60, 0x2d, 0xb1, 0xb4, 0x99, 0x4e, 0x7c, 0x06, 0xa7, 0xe2, 0xc1, 0x91, 0x9d, 0x21, - 0x38, 0x1a, 0x50, 0xda, 0x57, 0x71, 0x4f, 0x2f, 0xef, 0xd2, 0xf4, 0x3f, 0x6b, 0xb0, 0x18, 0xd3, - 0x60, 0xe8, 0x3e, 0x2c, 0xf0, 0x1c, 0x68, 0xbb, 0x1d, 0xd9, 0x41, 0xa6, 0x2c, 0xec, 0x31, 0x90, - 0x68, 0x5d, 0xed, 0x00, 0x09, 0x87, 0x90, 0xa8, 0x0d, 0x79, 0x4a, 0x58, 0xdf, 0xf1, 0x65, 0xfa, - 0xbf, 0x92, 0xb2, 0x04, 0xfb, 0x86, 0xdf, 0x67, 0x41, 0x9e, 0xc4, 0x42, 0x1f, 0x4b, 0x1c, 0xfd, - 0x6f, 0x19, 0x28, 0xdf, 0x34, 0x8e, 0x88, 0x73, 0x48, 0x1c, 0x62, 0xfa, 0x1e, 0x45, 0x3f, 0x86, - 0x52, 0xd7, 0xf0, 0xcd, 0x13, 0x41, 0x0d, 0xfb, 0xe0, 0x56, 0x3a, 0x43, 0x09, 0xa4, 0xfa, 0x41, - 0x04, 0xb3, 0xe7, 0xfa, 0x74, 0xd0, 0x7c, 0x49, 0x2e, 0xac, 0x14, 0xe3, 0xe0, 0xb8, 0x35, 0xf1, - 0x78, 0x11, 0xdf, 0x7b, 0x0f, 0x7b, 0xbc, 0xe0, 0xcf, 0xfe, 0x66, 0x4a, 0xb8, 0x80, 0xc9, 0xfb, - 0x7d, 0x9b, 0x92, 0x2e, 0x71, 0xfd, 0xe8, 0xf1, 0x72, 0x30, 0x86, 0x8f, 0x27, 0x2c, 0xae, 0xbf, - 0x03, 0x2b, 0xe3, 0xce, 0xa3, 0x15, 0xc8, 0x9e, 0x92, 0x41, 0x10, 0x0b, 0x98, 0xff, 0x8b, 0xd6, - 0x20, 0x77, 0x66, 0x38, 0x7d, 0x99, 0x7f, 0x70, 0xf0, 0x71, 0x2d, 0x73, 0x55, 0xd3, 0x7f, 0xa7, - 0x41, 0x65, 0x9a, 0x23, 0xe8, 0xf3, 0x31, 0xa0, 0x66, 0x49, 0x7a, 0x95, 0x7d, 0x97, 0x0c, 0x02, - 0xd4, 0x3d, 0x28, 0x78, 0x3d, 0xfe, 0xdc, 0xf4, 0xa8, 0x8c, 0xf3, 0x37, 0xc2, 0xd8, 0xbd, 0x2d, - 0xe9, 0xe7, 0xc3, 0xda, 0xcb, 0x09, 0xf8, 0x90, 0x81, 0x95, 0x2a, 0x2f, 0x92, 0xc2, 0x1f, 0x5e, - 0xb8, 0x55, 0x91, 0xbc, 0x2b, 0x28, 0x58, 0x72, 0xf4, 0x3f, 0x6a, 0x30, 0x2f, 0x5a, 0xd9, 0xfb, - 0x50, 0xe0, 0xfb, 0x67, 0x19, 0xbe, 0x21, 0xfc, 0x4a, 0xfd, 0xf0, 0xe1, 0xda, 0x07, 0xc4, 0x37, - 0xa2, 0xfb, 0x15, 0x52, 0xb0, 0x42, 0x44, 0x18, 0x72, 0xb6, 0x4f, 0xba, 0xe1, 0x41, 0xbe, 0x39, - 0x15, 0x5a, 0x3e, 0xbb, 0xeb, 0xd8, 0x78, 0xb0, 0xf7, 0xd0, 0x27, 0x2e, 0x3f, 0x8c, 0x28, 0x19, - 0xec, 0x73, 0x0c, 0x1c, 0x40, 0xe9, 0xbf, 0xd7, 0x40, 0x99, 0xe2, 0xd7, 0x9d, 0x11, 0xe7, 0xf8, - 0xa6, 0xed, 0x9e, 0xca, 0x6d, 0x55, 0xee, 0x1c, 0x4a, 0x3a, 0x56, 0x12, 0x17, 0x95, 0xd8, 0xcc, - 0x8c, 0x25, 0xf6, 0x0a, 0x14, 0x4c, 0xcf, 0xf5, 0x6d, 0xb7, 0x3f, 0x91, 0x5f, 0x76, 0x25, 0x1d, - 0x2b, 0x09, 0xfd, 0x69, 0x16, 0x4a, 0xdc, 0xd7, 0xb0, 0xc6, 0x7f, 0x1d, 0xca, 0x4e, 0xfc, 0xf4, - 0xa4, 0xcf, 0x2f, 0x4b, 0x88, 0xe4, 0x7d, 0xc4, 0x49, 0x59, 0xae, 0x7c, 0x6c, 0x13, 0xc7, 0x52, - 0xca, 0x99, 0xa4, 0xf2, 0xf5, 0x38, 0x13, 0x27, 0x65, 0x79, 0x9e, 0x7d, 0xc0, 0xe3, 0x5a, 0x36, - 0x73, 0x6a, 0x6b, 0xbf, 0xc3, 0x89, 0x38, 0xe0, 0x5d, 0xb4, 0x3f, 0xf3, 0x33, 0xee, 0xcf, 0x35, - 0x58, 0xe2, 0x07, 0xe9, 0xf5, 0xfd, 0xb0, 0xe3, 0xcd, 0x89, 0xbe, 0x0b, 0x8d, 0x86, 0xb5, 0xa5, - 0xf7, 0x12, 0x1c, 0x3c, 0x26, 0x39, 0xb5, 0x7d, 0xc9, 0x7f, 0xd2, 0xf6, 0x85, 0xaf, 0xda, 0xb1, - 0xbb, 0xb6, 0x5f, 0x59, 0x10, 0x4e, 0xa8, 0x55, 0xdf, 0xe4, 0x44, 0x1c, 0xf0, 0x12, 0x47, 0x5a, - 0xb8, 0xf4, 0x48, 0xdf, 0x87, 0xe2, 0x81, 0x6d, 0x52, 0x8f, 0xaf, 0x85, 0x17, 0x26, 0x96, 0x68, - 0xec, 0x55, 0x02, 0x0f, 0xd7, 0x18, 0xf2, 0xb9, 0x2b, 0xae, 0xe1, 0x7a, 0x41, 0xfb, 0x9e, 0x8b, - 0x5c, 0xb9, 0xc5, 0x89, 0x38, 0xe0, 0x5d, 0x5b, 0xe3, 0xf5, 0xe8, 0x17, 0x8f, 0x6b, 0x73, 0x8f, - 0x1e, 0xd7, 0xe6, 0x3e, 0x7c, 0x2c, 0x6b, 0xd3, 0xbf, 0x00, 0xe0, 0xf6, 0xd1, 0x0f, 0x89, 0x19, - 0xc4, 0xfc, 0xe5, 0x13, 0x04, 0xde, 0x63, 0xc8, 0xc1, 0x95, 0x78, 0x6d, 0x67, 0xc6, 0x7a, 0x8c, - 0x18, 0x0f, 0x27, 0x24, 0x51, 0x03, 0x8a, 0x6a, 0xaa, 0x20, 0xe3, 0x7b, 0x55, 0xaa, 0x15, 0xd5, - 0xe8, 0x01, 0x47, 0x32, 0x89, 0x0b, 0x38, 0x7f, 0xe9, 0x05, 0x6c, 0x42, 0xb6, 0x6f, 0x5b, 0x22, - 0x24, 0x8a, 0xcd, 0x2f, 0x87, 0x09, 0xf0, 0xce, 0x7e, 0xeb, 0x7c, 0x58, 0x7b, 0x75, 0xda, 0x48, - 0xce, 0x1f, 0xf4, 0x08, 0xab, 0xdf, 0xd9, 0x6f, 0x61, 0xae, 0x7c, 0x51, 0x90, 0xe6, 0x67, 0x0c, - 0xd2, 0x6d, 0x00, 0xb9, 0x6a, 0xae, 0x1d, 0xc4, 0x86, 0x9a, 0xb0, 0xdc, 0x50, 0x1c, 0x1c, 0x93, - 0x42, 0x0c, 0x56, 0x4d, 0xfe, 0xce, 0xb4, 0x3d, 0x97, 0x1f, 0x3d, 0xf3, 0x8d, 0x6e, 0x30, 0x63, - 0x28, 0x6d, 0x7f, 0x31, 0x5d, 0xc6, 0xe4, 0x6a, 0xcd, 0x57, 0xa4, 0x99, 0xd5, 0xdd, 0x71, 0x30, - 0x3c, 0x89, 0x8f, 0x3c, 0x58, 0xb5, 0xe4, 0xcb, 0x28, 0x32, 0x5a, 0x9c, 0xd9, 0xe8, 0xcb, 0xdc, - 0x60, 0x6b, 0x1c, 0x08, 0x4f, 0x62, 0xa3, 0xef, 0xc3, 0x7a, 0x48, 0x9c, 0x7c, 0x9e, 0x56, 0x40, - 0xec, 0x54, 0x95, 0x3f, 0xdc, 0x5b, 0x53, 0xa5, 0xf0, 0x73, 0x10, 0x90, 0x05, 0x79, 0x27, 0xe8, - 0x2e, 0x4a, 0xa2, 0x22, 0x7c, 0x23, 0xdd, 0x2a, 0xa2, 0xe8, 0xaf, 0xc7, 0xbb, 0x0a, 0xf5, 0xfc, - 0x92, 0x0d, 0x85, 0xc4, 0x46, 0x0f, 0xa1, 0x64, 0xb8, 0xae, 0xe7, 0x1b, 0xc1, 0x83, 0x79, 0x51, - 0x98, 0xda, 0x99, 0xd9, 0xd4, 0x4e, 0x84, 0x31, 0xd6, 0xc5, 0xc4, 0x38, 0x38, 0x6e, 0x0a, 0x3d, - 0x80, 0x65, 0xef, 0x81, 0x4b, 0x28, 0x26, 0xc7, 0x84, 0x12, 0xd7, 0x24, 0xac, 0x52, 0x16, 0xd6, - 0xbf, 0x92, 0xd2, 0x7a, 0x42, 0x39, 0x0a, 0xe9, 0x24, 0x9d, 0xe1, 0x71, 0x2b, 0xa8, 0x0e, 0x70, - 0x6c, 0xbb, 0xb2, 0x17, 0xad, 0x2c, 0x45, 0x63, 0xb2, 0xeb, 0x8a, 0x8a, 0x63, 0x12, 0xe8, 0xab, - 0x50, 0x32, 0x9d, 0x3e, 0xf3, 0x49, 0x30, 0x8f, 0x5b, 0x16, 0x37, 0x48, 0xad, 0x6f, 0x37, 0x62, - 0xe1, 0xb8, 0x1c, 0x3a, 0x81, 0x45, 0x3b, 0xd6, 0xf4, 0x56, 0x56, 0x44, 0x2c, 0x6e, 0xcf, 0xdc, - 0xe9, 0xb2, 0xe6, 0x0a, 0xcf, 0x44, 0x71, 0x0a, 0x4e, 0x20, 0xaf, 0x7f, 0x0d, 0x4a, 0x9f, 0xb0, - 0x07, 0xe3, 0x3d, 0xdc, 0xf8, 0xd1, 0xcd, 0xd4, 0xc3, 0xfd, 0x25, 0x03, 0x4b, 0xc9, 0x0d, 0x57, - 0x6f, 0x1d, 0x6d, 0xea, 0x7c, 0x35, 0xcc, 0xca, 0xd9, 0xa9, 0x59, 0x59, 0x26, 0xbf, 0xf9, 0x17, - 0x49, 0x7e, 0xdb, 0x00, 0x46, 0xcf, 0x0e, 0xf3, 0x5e, 0x90, 0x47, 0x55, 0xe6, 0x8a, 0x26, 0x7e, - 0x38, 0x26, 0x25, 0x26, 0xa8, 0x9e, 0xeb, 0x53, 0xcf, 0x71, 0x08, 0x95, 0xc5, 0x34, 0x98, 0xa0, - 0x2a, 0x2a, 0x8e, 0x49, 0xa0, 0xeb, 0x80, 0x8e, 0x1c, 0xcf, 0x3c, 0x15, 0x5b, 0x10, 0xde, 0x73, - 0x91, 0x25, 0x0b, 0xc1, 0xe0, 0xaa, 0x39, 0xc1, 0xc5, 0x17, 0x68, 0xe8, 0x0b, 0x90, 0x6b, 0xf3, - 0xb6, 0x42, 0xbf, 0x0d, 0xc9, 0x99, 0x13, 0x7a, 0x27, 0xd8, 0x09, 0x4d, 0x0d, 0x85, 0x66, 0xdb, - 0x05, 0xfd, 0x0a, 0x14, 0xb1, 0xe7, 0xf9, 0x6d, 0xc3, 0x3f, 0x61, 0xa8, 0x06, 0xb9, 0x1e, 0xff, - 0x47, 0x8e, 0xfb, 0xc4, 0xac, 0x5a, 0x70, 0x70, 0x40, 0xd7, 0x7f, 0xa9, 0xc1, 0x2b, 0x53, 0xe7, - 0x8c, 0x7c, 0x47, 0x4d, 0xf5, 0x25, 0x5d, 0x52, 0x3b, 0x1a, 0xc9, 0xe1, 0x98, 0x14, 0xef, 0xc4, - 0x12, 0xc3, 0xc9, 0xf1, 0x4e, 0x2c, 0x61, 0x0d, 0x27, 0x65, 0xf5, 0x7f, 0x67, 0x20, 0x1f, 0x3c, - 0xcb, 0x3e, 0xe3, 0xe6, 0xfb, 0x75, 0xc8, 0x33, 0x61, 0x47, 0xba, 0xa7, 0xb2, 0x65, 0x60, 0x1d, - 0x4b, 0x2e, 0x6f, 0x62, 0xba, 0x84, 0x31, 0xa3, 0x13, 0x06, 0xaf, 0x6a, 0x62, 0x0e, 0x02, 0x32, - 0x0e, 0xf9, 0xe8, 0x6d, 0xfe, 0x0a, 0x35, 0x98, 0xea, 0x0b, 0xab, 0x21, 0x24, 0x16, 0xd4, 0xf3, - 0x61, 0x6d, 0x51, 0x82, 0x8b, 0x6f, 0x2c, 0xa5, 0xd1, 0x3d, 0x58, 0xb0, 0x88, 0x6f, 0xd8, 0x4e, - 0xd0, 0x0e, 0xa6, 0x9e, 0x5e, 0x06, 0x60, 0xad, 0x40, 0xb5, 0x59, 0xe2, 0x3e, 0xc9, 0x0f, 0x1c, - 0x02, 0xf2, 0x8b, 0x67, 0x7a, 0x56, 0xf0, 0x93, 0x42, 0x2e, 0xba, 0x78, 0xbb, 0x9e, 0x45, 0xb0, - 0xe0, 0xe8, 0x8f, 0x34, 0x28, 0x05, 0x48, 0xbb, 0x46, 0x9f, 0x11, 0xb4, 0xa5, 0x56, 0x11, 0x1c, - 0x77, 0x58, 0x93, 0xe7, 0xdf, 0x1b, 0xf4, 0xc8, 0xf9, 0xb0, 0x56, 0x14, 0x62, 0xfc, 0x43, 0x2d, - 0x20, 0xb6, 0x47, 0x99, 0x4b, 0xf6, 0xe8, 0x35, 0xc8, 0x89, 0xd6, 0x5b, 0x6e, 0xa6, 0x6a, 0xf4, - 0x44, 0x7b, 0x8e, 0x03, 0x9e, 0xfe, 0x71, 0x06, 0xca, 0x89, 0xc5, 0xa5, 0xe8, 0xea, 0xd4, 0xa8, - 0x24, 0x93, 0x62, 0xfc, 0x36, 0xfd, 0x87, 0xa0, 0xef, 0x42, 0xde, 0xe4, 0xeb, 0x0b, 0x7f, 0x89, - 0xdb, 0x9a, 0xe5, 0x28, 0xc4, 0xce, 0x44, 0x91, 0x24, 0x3e, 0x19, 0x96, 0x80, 0xe8, 0x06, 0xac, - 0x52, 0xe2, 0xd3, 0xc1, 0xce, 0xb1, 0x4f, 0x68, 0xbc, 0xff, 0xcf, 0x45, 0x7d, 0x0f, 0x1e, 0x17, - 0xc0, 0x93, 0x3a, 0x61, 0xaa, 0xcc, 0xbf, 0x40, 0xaa, 0xd4, 0x1d, 0x98, 0xff, 0x2f, 0xf6, 0xe8, - 0xdf, 0x83, 0x62, 0xd4, 0x45, 0x7d, 0xca, 0x26, 0xf5, 0x1f, 0x40, 0x81, 0x47, 0x63, 0xd8, 0xfd, - 0x5f, 0x52, 0x89, 0x92, 0x35, 0x22, 0x93, 0xa6, 0x46, 0xe8, 0x6f, 0x41, 0xf9, 0x4e, 0xcf, 0x9a, - 0xed, 0x57, 0x14, 0x7d, 0x1b, 0x82, 0x1f, 0x05, 0x79, 0x0a, 0x0e, 0x9e, 0xf9, 0xb1, 0x14, 0x1c, - 0x7f, 0xb3, 0x27, 0x7f, 0xaf, 0x01, 0xf1, 0xe6, 0xdc, 0x3b, 0x23, 0xae, 0xcf, 0x57, 0xc3, 0x8f, - 0x6d, 0x7c, 0x35, 0xe2, 0xee, 0x09, 0x0e, 0xba, 0x03, 0x79, 0x4f, 0xb4, 0x64, 0x72, 0xf0, 0x35, - 0xe3, 0x0c, 0x41, 0x85, 0x6a, 0xd0, 0xd7, 0x61, 0x09, 0xd6, 0xdc, 0x7c, 0xf2, 0xac, 0x3a, 0xf7, - 0xf4, 0x59, 0x75, 0xee, 0xa3, 0x67, 0xd5, 0xb9, 0x0f, 0x46, 0x55, 0xed, 0xc9, 0xa8, 0xaa, 0x3d, - 0x1d, 0x55, 0xb5, 0x8f, 0x46, 0x55, 0xed, 0xe3, 0x51, 0x55, 0x7b, 0xf4, 0x8f, 0xea, 0xdc, 0xbd, - 0xcc, 0xd9, 0xd6, 0x7f, 0x02, 0x00, 0x00, 0xff, 0xff, 0xab, 0xec, 0x02, 0x4a, 0x00, 0x21, 0x00, - 0x00, + // 2674 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xcc, 0x19, 0x4d, 0x6c, 0x23, 0x57, + 0x39, 0x63, 0xc7, 0x8e, 0xfd, 0x39, 0xce, 0xcf, 0xeb, 0x16, 0x5c, 0x4b, 0xc4, 0xe9, 0x14, 0x55, + 0x29, 0x6c, 0x6d, 0x92, 0xd2, 0x6a, 0x59, 0x68, 0x21, 0x8e, 0x93, 0x6d, 0xe8, 0xa6, 0x89, 0x5e, + 0xba, 0x8b, 0x58, 0x56, 0x88, 0x89, 0xe7, 0xc5, 0x19, 0x62, 0xcf, 0x4c, 0xdf, 0x1b, 0x67, 0x37, + 0x70, 0xa0, 0x07, 0x10, 0x20, 0x41, 0xb5, 0x47, 0x4e, 0xa8, 0x2b, 0xb8, 0x70, 0xe5, 0xc4, 0x89, + 0x53, 0x25, 0xf6, 0x58, 0x89, 0x4b, 0x0f, 0xc8, 0xea, 0x06, 0x24, 0xb8, 0x71, 0xcf, 0x01, 0xa1, + 0xf7, 0x33, 0x33, 0x6f, 0xec, 0x78, 0x33, 0x66, 0x0b, 0xe2, 0x14, 0xcf, 0xf7, 0xff, 0xbe, 0xef, + 0x7b, 0xdf, 0xf7, 0xbd, 0x2f, 0xb0, 0x73, 0x7c, 0x8d, 0xd5, 0x1d, 0xaf, 0x71, 0xdc, 0x3f, 0x20, + 0xd4, 0x25, 0x01, 0x61, 0x8d, 0x13, 0xe2, 0xda, 0x1e, 0x6d, 0x28, 0x84, 0xe5, 0x3b, 0x3d, 0xab, + 0x7d, 0xe4, 0xb8, 0x84, 0x9e, 0x36, 0xfc, 0xe3, 0x0e, 0x07, 0xb0, 0x46, 0x8f, 0x04, 0x56, 0xe3, + 0x64, 0xb5, 0xd1, 0x21, 0x2e, 0xa1, 0x56, 0x40, 0xec, 0xba, 0x4f, 0xbd, 0xc0, 0x43, 0x9f, 0x97, + 0x5c, 0x75, 0x9d, 0xab, 0xee, 0x1f, 0x77, 0x38, 0x80, 0xd5, 0x39, 0x57, 0xfd, 0x64, 0xb5, 0xfa, + 0x72, 0xc7, 0x09, 0x8e, 0xfa, 0x07, 0xf5, 0xb6, 0xd7, 0x6b, 0x74, 0xbc, 0x8e, 0xd7, 0x10, 0xcc, + 0x07, 0xfd, 0x43, 0xf1, 0x25, 0x3e, 0xc4, 0x2f, 0x29, 0xb4, 0x3a, 0xd6, 0x14, 0xda, 0x77, 0x03, + 0xa7, 0x47, 0x86, 0xad, 0xa8, 0xbe, 0x76, 0x19, 0x03, 0x6b, 0x1f, 0x91, 0x9e, 0x35, 0xcc, 0x67, + 0xfe, 0x29, 0x0b, 0x85, 0xf5, 0xbd, 0xed, 0x1b, 0xd4, 0xeb, 0xfb, 0x68, 0x19, 0xa6, 0x5d, 0xab, + 0x47, 0x2a, 0xc6, 0xb2, 0xb1, 0x52, 0x6c, 0xce, 0x3e, 0x1a, 0xd4, 0xa6, 0xce, 0x06, 0xb5, 0xe9, + 0xb7, 0xad, 0x1e, 0xc1, 0x02, 0x83, 0xba, 0x50, 0x38, 0x21, 0x94, 0x39, 0x9e, 0xcb, 0x2a, 0x99, + 0xe5, 0xec, 0x4a, 0x69, 0xed, 0x8d, 0x7a, 0x9a, 0xf3, 0xd7, 0x85, 0x82, 0xdb, 0x92, 0x75, 0xcb, + 0xa3, 0x2d, 0x87, 0xb5, 0xbd, 0x13, 0x42, 0x4f, 0x9b, 0x0b, 0x4a, 0x4b, 0x41, 0x21, 0x19, 0x8e, + 0x34, 0xa0, 0x1f, 0x1b, 0xb0, 0xe0, 0x53, 0x72, 0x48, 0x28, 0x25, 0xb6, 0xc2, 0x57, 0xb2, 0xcb, + 0xc6, 0xa7, 0xa0, 0xb6, 0xa2, 0xd4, 0x2e, 0xec, 0x0d, 0xc9, 0xc7, 0x23, 0x1a, 0xd1, 0x6f, 0x0c, + 0xa8, 0x32, 0x42, 0x4f, 0x08, 0x5d, 0xb7, 0x6d, 0x4a, 0x18, 0x6b, 0x9e, 0x6e, 0x74, 0x1d, 0xe2, + 0x06, 0x1b, 0xdb, 0x2d, 0xcc, 0x2a, 0xd3, 0xc2, 0x0f, 0x5f, 0x4f, 0x67, 0xd0, 0xfe, 0x38, 0x39, + 0x4d, 0x53, 0x59, 0x54, 0x1d, 0x4b, 0xc2, 0xf0, 0x13, 0xcc, 0x30, 0x0f, 0x61, 0x36, 0x0c, 0xe4, + 0x4d, 0x87, 0x05, 0xe8, 0x36, 0xe4, 0x3b, 0xfc, 0x83, 0x55, 0x0c, 0x61, 0x60, 0x3d, 0x9d, 0x81, + 0xa1, 0x8c, 0xe6, 0x9c, 0xb2, 0x27, 0x2f, 0x3e, 0x19, 0x56, 0xd2, 0xcc, 0x9f, 0x4f, 0x43, 0x69, + 0x7d, 0x6f, 0x1b, 0x13, 0xe6, 0xf5, 0x69, 0x9b, 0xa4, 0x48, 0x9a, 0x35, 0x00, 0xfe, 0x97, 0xf9, + 0x56, 0x9b, 0xd8, 0x95, 0xcc, 0xb2, 0xb1, 0x52, 0x68, 0x22, 0x45, 0x07, 0x6f, 0x47, 0x18, 0xac, + 0x51, 0x71, 0xa9, 0xc7, 0x8e, 0x6b, 0x8b, 0x68, 0x6b, 0x52, 0xdf, 0x72, 0x5c, 0x1b, 0x0b, 0x0c, + 0xba, 0x09, 0xb9, 0x13, 0x42, 0x0f, 0xb8, 0xff, 0x79, 0x42, 0x7c, 0x31, 0xdd, 0xf1, 0x6e, 0x73, + 0x96, 0x66, 0xf1, 0x6c, 0x50, 0xcb, 0x89, 0x9f, 0x58, 0x0a, 0x41, 0x75, 0x00, 0x76, 0xe4, 0xd1, + 0x40, 0x98, 0x53, 0xc9, 0x2d, 0x67, 0x57, 0x8a, 0xcd, 0x39, 0x6e, 0xdf, 0x7e, 0x04, 0xc5, 0x1a, + 0x05, 0xba, 0x06, 0xb3, 0xcc, 0x71, 0x3b, 0xfd, 0xae, 0x45, 0x39, 0xa0, 0x92, 0x17, 0x76, 0x5e, + 0x51, 0x76, 0xce, 0xee, 0x6b, 0x38, 0x9c, 0xa0, 0xe4, 0x9a, 0xda, 0x56, 0x40, 0x3a, 0x1e, 0x75, + 0x08, 0xab, 0xcc, 0xc4, 0x9a, 0x36, 0x22, 0x28, 0xd6, 0x28, 0xd0, 0x0b, 0x90, 0x13, 0x9e, 0xaf, + 0x14, 0x84, 0x8a, 0xb2, 0x52, 0x91, 0x13, 0x61, 0xc1, 0x12, 0x87, 0x5e, 0x82, 0x19, 0x75, 0x6b, + 0x2a, 0x45, 0x41, 0x36, 0xaf, 0xc8, 0x66, 0xc2, 0xb4, 0x0e, 0xf1, 0xe8, 0x9b, 0x80, 0x58, 0xe0, + 0x51, 0xab, 0x43, 0x14, 0xea, 0x4d, 0x8b, 0x1d, 0x55, 0x40, 0x70, 0x55, 0x15, 0x17, 0xda, 0x1f, + 0xa1, 0xc0, 0x17, 0x70, 0x99, 0xbf, 0x37, 0x60, 0x5e, 0xcb, 0x05, 0x91, 0x77, 0xd7, 0x60, 0xb6, + 0xa3, 0xdd, 0x3a, 0x95, 0x17, 0x91, 0x67, 0xf4, 0x1b, 0x89, 0x13, 0x94, 0x88, 0x40, 0x91, 0x2a, + 0x49, 0x61, 0x75, 0x59, 0x4d, 0x9d, 0xb4, 0xa1, 0x0d, 0xb1, 0x26, 0x0d, 0xc8, 0x70, 0x2c, 0xd9, + 0xfc, 0xbb, 0x21, 0x12, 0x38, 0xac, 0x37, 0x68, 0x45, 0xab, 0x69, 0x86, 0x08, 0xc7, 0xec, 0x98, + 0x7a, 0x74, 0x49, 0x21, 0xc8, 0xfc, 0x5f, 0x14, 0x82, 0xeb, 0x85, 0x5f, 0x7d, 0x50, 0x9b, 0x7a, + 0xef, 0x2f, 0xcb, 0x53, 0x66, 0x0f, 0xca, 0x1b, 0x94, 0x58, 0x01, 0xd9, 0xf5, 0x03, 0x71, 0x00, + 0x13, 0xf2, 0x36, 0x3d, 0xc5, 0x7d, 0x57, 0x1d, 0x14, 0xf8, 0xfd, 0x6e, 0x09, 0x08, 0x56, 0x18, + 0x1e, 0xbf, 0x43, 0x87, 0x74, 0xed, 0x1d, 0xcb, 0xb5, 0x3a, 0x84, 0xaa, 0x1b, 0x18, 0x79, 0x75, + 0x4b, 0xc3, 0xe1, 0x04, 0xa5, 0xf9, 0xd3, 0x2c, 0x94, 0x5b, 0xa4, 0x4b, 0x62, 0x7d, 0x5b, 0x80, + 0x3a, 0xd4, 0x6a, 0x93, 0x3d, 0x42, 0x1d, 0xcf, 0xde, 0x27, 0x6d, 0xcf, 0xb5, 0x99, 0xc8, 0x88, + 0x6c, 0xf3, 0x33, 0x3c, 0xcf, 0x6e, 0x8c, 0x60, 0xf1, 0x05, 0x1c, 0xa8, 0x0b, 0x65, 0x9f, 0x8a, + 0xdf, 0x4e, 0xa0, 0x7a, 0x0f, 0xbf, 0xf3, 0xaf, 0xa4, 0x73, 0xf5, 0x9e, 0xce, 0xda, 0x5c, 0x3c, + 0x1b, 0xd4, 0xca, 0x09, 0x10, 0x4e, 0x0a, 0x47, 0xdf, 0x80, 0x05, 0x8f, 0xfa, 0x47, 0x96, 0xdb, + 0x22, 0x3e, 0x71, 0x6d, 0xe2, 0x06, 0x4c, 0x78, 0xa1, 0xd0, 0xbc, 0xc2, 0x3b, 0xc6, 0xee, 0x10, + 0x0e, 0x8f, 0x50, 0xa3, 0x3b, 0xb0, 0xe8, 0x53, 0xcf, 0xb7, 0x3a, 0x16, 0x97, 0xb8, 0xe7, 0x75, + 0x9d, 0xf6, 0xa9, 0xa8, 0x53, 0xc5, 0xe6, 0xd5, 0xb3, 0x41, 0x6d, 0x71, 0x6f, 0x18, 0x79, 0x3e, + 0xa8, 0x3d, 0x23, 0x5c, 0xc7, 0x21, 0x31, 0x12, 0x8f, 0x8a, 0xd1, 0x62, 0x98, 0x1b, 0x17, 0x43, + 0x73, 0x1b, 0x0a, 0xad, 0x3e, 0x15, 0x5c, 0xe8, 0x75, 0x28, 0xd8, 0xea, 0xb7, 0xf2, 0xfc, 0xf3, + 0x61, 0xcb, 0x0d, 0x69, 0xce, 0x07, 0xb5, 0x32, 0x1f, 0x12, 0xea, 0x21, 0x00, 0x47, 0x2c, 0xe6, + 0x5d, 0x28, 0x6f, 0xde, 0xf7, 0x3d, 0x1a, 0x84, 0x31, 0x7d, 0x11, 0xf2, 0x44, 0x00, 0x84, 0xb4, + 0x42, 0xdc, 0x27, 0x24, 0x19, 0x56, 0x58, 0x5e, 0xb7, 0xc8, 0x7d, 0xab, 0x1d, 0xa8, 0x82, 0x1f, + 0xd5, 0xad, 0x4d, 0x0e, 0xc4, 0x12, 0x67, 0x7e, 0x68, 0x40, 0x5e, 0x64, 0x14, 0x43, 0xef, 0x40, + 0xb6, 0x67, 0xf9, 0xaa, 0x59, 0xbd, 0x9a, 0x2e, 0xb2, 0x92, 0xb5, 0xbe, 0x63, 0xf9, 0x9b, 0x6e, + 0x40, 0x4f, 0x9b, 0x25, 0xa5, 0x24, 0xbb, 0x63, 0xf9, 0x98, 0x8b, 0xab, 0xda, 0x50, 0x08, 0xb1, + 0x68, 0x01, 0xb2, 0xc7, 0xe4, 0x54, 0x16, 0x24, 0xcc, 0x7f, 0xa2, 0x26, 0xe4, 0x4e, 0xac, 0x6e, + 0x9f, 0xa8, 0x7c, 0xba, 0x3a, 0x89, 0x56, 0x2c, 0x59, 0xaf, 0x67, 0xae, 0x19, 0xe6, 0x2e, 0xc0, + 0x0d, 0x12, 0x79, 0x68, 0x1d, 0xe6, 0xc3, 0x6a, 0x93, 0x2c, 0x82, 0x9f, 0x55, 0xe6, 0xcd, 0xe3, + 0x24, 0x1a, 0x0f, 0xd3, 0x9b, 0x77, 0xa1, 0x28, 0x0a, 0x25, 0xef, 0x77, 0x71, 0x07, 0x30, 0x9e, + 0xd0, 0x01, 0xc2, 0x86, 0x99, 0x19, 0xd7, 0x30, 0xb5, 0xba, 0xd0, 0x85, 0xb2, 0xe4, 0x0d, 0x7b, + 0x78, 0x2a, 0x0d, 0x57, 0xa1, 0x10, 0x9a, 0xa9, 0xb4, 0x44, 0xb3, 0x5b, 0x28, 0x08, 0x47, 0x14, + 0x9a, 0xb6, 0x23, 0x48, 0x14, 0xfd, 0x74, 0xca, 0xb4, 0x86, 0x96, 0x79, 0x72, 0x43, 0xd3, 0x34, + 0xfd, 0x08, 0x2a, 0xe3, 0x06, 0xbe, 0xa7, 0x68, 0x4b, 0xe9, 0x4d, 0x31, 0xdf, 0x37, 0x60, 0x41, + 0x97, 0x94, 0x3e, 0x7c, 0xe9, 0x95, 0x5c, 0x3e, 0x1a, 0x69, 0x1e, 0xf9, 0xb5, 0x01, 0x57, 0x12, + 0x47, 0x9b, 0x28, 0xe2, 0x13, 0x18, 0xa5, 0x27, 0x47, 0x76, 0x82, 0xe4, 0x68, 0x40, 0x69, 0xdb, + 0x75, 0x02, 0xc7, 0xea, 0x3a, 0x3f, 0x20, 0xf4, 0xf2, 0x61, 0xd2, 0xfc, 0xa3, 0x01, 0xb3, 0x1a, + 0x07, 0x43, 0x77, 0x61, 0x86, 0xd7, 0x5d, 0xc7, 0xed, 0xa8, 0xda, 0x91, 0x72, 0x66, 0xd0, 0x84, + 0xc4, 0xe7, 0xda, 0x93, 0x92, 0x70, 0x28, 0x12, 0xed, 0x41, 0x9e, 0x12, 0xd6, 0xef, 0x06, 0x93, + 0x95, 0x88, 0xfd, 0xc0, 0x0a, 0xfa, 0x4c, 0xd6, 0x66, 0x2c, 0xf8, 0xb1, 0x92, 0x63, 0xfe, 0x39, + 0x03, 0xe5, 0x9b, 0xd6, 0x01, 0xe9, 0xee, 0x93, 0x2e, 0x69, 0x07, 0x1e, 0x45, 0x3f, 0x84, 0x52, + 0xcf, 0x0a, 0xda, 0x47, 0x02, 0x1a, 0x8e, 0xeb, 0xad, 0x74, 0x8a, 0x12, 0x92, 0xea, 0x3b, 0xb1, + 0x18, 0x59, 0x10, 0x9f, 0x51, 0x07, 0x2b, 0x69, 0x18, 0xac, 0x6b, 0x13, 0x6f, 0x2c, 0xf1, 0xbd, + 0x79, 0xdf, 0xe7, 0xb3, 0xc4, 0xe4, 0x4f, 0xbb, 0x84, 0x09, 0x98, 0xbc, 0xdb, 0x77, 0x28, 0xe9, + 0x11, 0x37, 0x88, 0xdf, 0x58, 0x3b, 0x43, 0xf2, 0xf1, 0x88, 0xc6, 0xea, 0x1b, 0xb0, 0x30, 0x6c, + 0xfc, 0x05, 0xf5, 0xfa, 0x8a, 0x5e, 0xaf, 0x8b, 0x7a, 0x05, 0xfe, 0xad, 0x01, 0x95, 0x71, 0x86, + 0xa0, 0xcf, 0x69, 0x82, 0xe2, 0x1e, 0xf1, 0x16, 0x39, 0x95, 0x52, 0x37, 0xa1, 0xe0, 0xf9, 0xfc, + 0x55, 0xec, 0x51, 0x95, 0xe7, 0x2f, 0x85, 0xb9, 0xbb, 0xab, 0xe0, 0xe7, 0x83, 0xda, 0xb3, 0x09, + 0xf1, 0x21, 0x02, 0x47, 0xac, 0xbc, 0x31, 0x0b, 0x7b, 0xf8, 0xb0, 0x10, 0x35, 0xe6, 0xdb, 0x02, + 0x82, 0x15, 0xc6, 0xfc, 0x83, 0x01, 0xd3, 0x62, 0x4a, 0xbe, 0x0b, 0x05, 0xee, 0x3f, 0xdb, 0x0a, + 0x2c, 0x61, 0x57, 0xea, 0xf7, 0x19, 0xe7, 0xde, 0x21, 0x81, 0x15, 0xdf, 0xaf, 0x10, 0x82, 0x23, + 0x89, 0x08, 0x43, 0xce, 0x09, 0x48, 0x2f, 0x0c, 0xe4, 0xcb, 0x63, 0x45, 0xab, 0xed, 0x40, 0x1d, + 0x5b, 0xf7, 0x36, 0xef, 0x07, 0xc4, 0xe5, 0xc1, 0x88, 0x8b, 0xc1, 0x36, 0x97, 0x81, 0xa5, 0x28, + 0xf3, 0x77, 0x06, 0x44, 0xaa, 0xf8, 0x75, 0x67, 0xa4, 0x7b, 0x78, 0xd3, 0x71, 0x8f, 0x95, 0x5b, + 0x23, 0x73, 0xf6, 0x15, 0x1c, 0x47, 0x14, 0x17, 0x35, 0xc4, 0xcc, 0x64, 0x0d, 0x91, 0x2b, 0x6c, + 0x7b, 0x6e, 0xe0, 0xb8, 0xfd, 0x91, 0xfa, 0xb2, 0xa1, 0xe0, 0x38, 0xa2, 0x30, 0xff, 0x95, 0x81, + 0x12, 0xb7, 0x35, 0xec, 0xc8, 0x5f, 0x85, 0x72, 0x57, 0x8f, 0x9e, 0xb2, 0xf9, 0x59, 0x25, 0x22, + 0x79, 0x1f, 0x71, 0x92, 0x96, 0x33, 0x8b, 0x31, 0x37, 0x62, 0xce, 0x24, 0x99, 0xb7, 0x74, 0x24, + 0x4e, 0xd2, 0xf2, 0x3a, 0x7b, 0x8f, 0xe7, 0xb5, 0x1a, 0x20, 0x23, 0xd7, 0x7e, 0x8b, 0x03, 0xb1, + 0xc4, 0x5d, 0xe4, 0x9f, 0xe9, 0x09, 0xfd, 0x73, 0x1d, 0xe6, 0x78, 0x20, 0xbd, 0x7e, 0x10, 0x4e, + 0xd9, 0x39, 0x31, 0xeb, 0xa1, 0xb3, 0x41, 0x6d, 0xee, 0x9d, 0x04, 0x06, 0x0f, 0x51, 0x72, 0x1b, + 0xbb, 0x4e, 0xcf, 0x09, 0x2a, 0x33, 0x82, 0x25, 0xb2, 0xf1, 0x26, 0x07, 0x62, 0x89, 0x4b, 0x04, + 0xa0, 0x70, 0x69, 0x00, 0xfe, 0x91, 0x01, 0x24, 0x9f, 0x05, 0xb6, 0x9c, 0x96, 0xe4, 0x8d, 0x7e, + 0x09, 0x66, 0x7a, 0xea, 0x59, 0x61, 0x24, 0x1b, 0x4a, 0xf8, 0xa2, 0x08, 0xf1, 0x68, 0x07, 0x8a, + 0xf2, 0x66, 0xc5, 0xd9, 0xd2, 0x50, 0xc4, 0xc5, 0xdd, 0x10, 0x71, 0x3e, 0xa8, 0x55, 0x13, 0x6a, + 0x22, 0xcc, 0x3b, 0xa7, 0x3e, 0xc1, 0xb1, 0x04, 0xb4, 0x06, 0x60, 0xf9, 0x8e, 0xbe, 0x43, 0x2a, + 0xc6, 0x3b, 0x88, 0xf8, 0x35, 0x88, 0x35, 0x2a, 0xf4, 0x26, 0x4c, 0x73, 0x4f, 0xa9, 0x05, 0xc3, + 0x17, 0xd2, 0xdd, 0x4f, 0xee, 0xeb, 0x66, 0x81, 0x37, 0x2d, 0xfe, 0x0b, 0x0b, 0x09, 0xe8, 0x0e, + 0xe4, 0x45, 0x5a, 0xc8, 0xa8, 0x4c, 0x38, 0x68, 0x8a, 0x57, 0x87, 0x9a, 0x92, 0xcf, 0xa3, 0x5f, + 0x58, 0x49, 0x34, 0xdf, 0x85, 0xe2, 0x8e, 0xd3, 0xa6, 0x1e, 0x57, 0xc7, 0x1d, 0xcc, 0x12, 0xaf, + 0xac, 0xc8, 0xc1, 0x61, 0xf0, 0x43, 0x3c, 0x8f, 0xba, 0x6b, 0xb9, 0x9e, 0x7c, 0x4b, 0xe5, 0xe2, + 0xa8, 0xbf, 0xcd, 0x81, 0x58, 0xe2, 0xae, 0x5f, 0xe1, 0x8d, 0xfa, 0x67, 0x0f, 0x6b, 0x53, 0x0f, + 0x1e, 0xd6, 0xa6, 0x3e, 0x78, 0xa8, 0x9a, 0xf6, 0xdf, 0x4a, 0x00, 0xbb, 0x07, 0xdf, 0x27, 0x6d, + 0x59, 0x0c, 0x2e, 0xdf, 0x00, 0xf1, 0xe1, 0x4b, 0x2d, 0x1e, 0xc5, 0xb6, 0x24, 0x33, 0x34, 0x7c, + 0x69, 0x38, 0x9c, 0xa0, 0x44, 0x0d, 0x28, 0x46, 0x5b, 0x21, 0x15, 0xb6, 0xc5, 0x30, 0x0d, 0xa2, + 0xd5, 0x11, 0x8e, 0x69, 0x12, 0x95, 0x69, 0xfa, 0xd2, 0xca, 0xd4, 0x84, 0x6c, 0xdf, 0xb1, 0x45, + 0x54, 0x8a, 0xcd, 0x2f, 0x85, 0x9d, 0xe1, 0xd6, 0x76, 0xeb, 0x7c, 0x50, 0x7b, 0x7e, 0xdc, 0x4a, + 0x35, 0x38, 0xf5, 0x09, 0xab, 0xdf, 0xda, 0x6e, 0x61, 0xce, 0x7c, 0xd1, 0xed, 0xcd, 0x4f, 0x78, + 0x7b, 0xd7, 0x00, 0xd4, 0xa9, 0x39, 0xb7, 0xbc, 0x86, 0x51, 0x76, 0xde, 0x88, 0x30, 0x58, 0xa3, + 0x42, 0x0c, 0x16, 0xdb, 0xfc, 0x71, 0xcf, 0x93, 0xdd, 0xe9, 0x11, 0x16, 0x58, 0x3d, 0xb9, 0x23, + 0x9a, 0x2c, 0x55, 0x9f, 0x53, 0x6a, 0x16, 0x37, 0x86, 0x85, 0xe1, 0x51, 0xf9, 0xc8, 0x83, 0x45, + 0x5b, 0x3d, 0x53, 0x63, 0xa5, 0xc5, 0x89, 0x95, 0x3e, 0xcb, 0x15, 0xb6, 0x86, 0x05, 0xe1, 0x51, + 0xd9, 0xe8, 0xbb, 0x50, 0x0d, 0x81, 0xa3, 0xbb, 0x02, 0xb1, 0xb5, 0xca, 0x36, 0x97, 0xce, 0x06, + 0xb5, 0x6a, 0x6b, 0x2c, 0x15, 0x7e, 0x82, 0x04, 0x64, 0x43, 0xbe, 0x2b, 0xc7, 0xae, 0x92, 0x68, + 0x95, 0x5f, 0x4b, 0x77, 0x8a, 0x38, 0xfb, 0xeb, 0xfa, 0xb8, 0x15, 0xbd, 0x85, 0xd5, 0xa4, 0xa5, + 0x64, 0xa3, 0xfb, 0x50, 0xb2, 0x5c, 0xd7, 0x0b, 0x2c, 0xb9, 0xbd, 0x98, 0x15, 0xaa, 0xd6, 0x27, + 0x56, 0xb5, 0x1e, 0xcb, 0x18, 0x1a, 0xef, 0x34, 0x0c, 0xd6, 0x55, 0xa1, 0x7b, 0x30, 0xef, 0xdd, + 0x73, 0x09, 0xc5, 0xe4, 0x90, 0x50, 0xe2, 0xb6, 0x09, 0xab, 0x94, 0x85, 0xf6, 0x2f, 0xa7, 0xd4, + 0x9e, 0x60, 0x8e, 0x53, 0x3a, 0x09, 0x67, 0x78, 0x58, 0x0b, 0xaa, 0x03, 0x1c, 0x3a, 0xae, 0x1a, + 0xd2, 0x2b, 0x73, 0xf1, 0x9a, 0x73, 0x2b, 0x82, 0x62, 0x8d, 0x02, 0xbd, 0x0a, 0xa5, 0x76, 0xb7, + 0xcf, 0x02, 0x22, 0xf7, 0xa9, 0xf3, 0xe2, 0x06, 0x45, 0xe7, 0xdb, 0x88, 0x51, 0x58, 0xa7, 0x43, + 0x47, 0x30, 0xeb, 0x68, 0xaf, 0x81, 0xca, 0x82, 0xc8, 0xc5, 0xb5, 0x89, 0x9f, 0x00, 0xac, 0xb9, + 0xc0, 0x2b, 0x91, 0x0e, 0xc1, 0x09, 0xc9, 0xa8, 0x0f, 0xe5, 0x9e, 0xde, 0x6a, 0x2a, 0x8b, 0xc2, + 0x8f, 0xd7, 0xd2, 0xa9, 0x1a, 0x6d, 0x86, 0xf1, 0x00, 0x91, 0xc0, 0xe1, 0xa4, 0x96, 0xea, 0x57, + 0xa0, 0xf4, 0x1f, 0xce, 0xc4, 0x7c, 0xa6, 0x1e, 0xce, 0x98, 0x89, 0x66, 0xea, 0x0f, 0x33, 0x30, + 0x97, 0x8c, 0x73, 0xf4, 0xf6, 0x34, 0xc6, 0xae, 0xe5, 0xc3, 0x66, 0x90, 0x1d, 0xdb, 0x0c, 0x54, + 0xcd, 0x9d, 0x7e, 0x9a, 0x9a, 0x9b, 0x6c, 0xe7, 0xb9, 0x54, 0xed, 0xbc, 0x0e, 0xc0, 0xe7, 0x13, + 0xea, 0x75, 0xbb, 0x84, 0x8a, 0x12, 0x5d, 0x50, 0x8b, 0xf7, 0x08, 0x8a, 0x35, 0x0a, 0xb4, 0x05, + 0xe8, 0xa0, 0xeb, 0xb5, 0x8f, 0x85, 0x0b, 0xc2, 0xf2, 0x22, 0x8a, 0x73, 0x41, 0x2e, 0x2f, 0x9b, + 0x23, 0x58, 0x7c, 0x01, 0x87, 0x39, 0x03, 0xb9, 0x3d, 0x3e, 0xe6, 0x99, 0xbf, 0x34, 0x60, 0x56, + 0xfc, 0x9a, 0x64, 0x1d, 0x5b, 0x83, 0xdc, 0xa1, 0x17, 0xae, 0x5c, 0x0a, 0xf2, 0x3f, 0x17, 0x5b, + 0x1c, 0x80, 0x25, 0xfc, 0x29, 0xf6, 0xb5, 0xef, 0x1b, 0x90, 0x5c, 0x84, 0xa2, 0x37, 0x64, 0x68, + 0x8c, 0x68, 0x53, 0x39, 0x61, 0x58, 0x5e, 0x1f, 0x37, 0xe8, 0x3f, 0x93, 0x6a, 0xeb, 0x75, 0x15, + 0x8a, 0xd8, 0xf3, 0x82, 0x3d, 0x2b, 0x38, 0x62, 0xfc, 0xe0, 0x3e, 0xff, 0xa1, 0x7c, 0x23, 0x0e, + 0x2e, 0x30, 0x58, 0xc2, 0xcd, 0x5f, 0x18, 0xf0, 0xdc, 0xd8, 0x15, 0x39, 0xcf, 0x90, 0x76, 0xf4, + 0xa5, 0x4e, 0x14, 0x65, 0x48, 0x4c, 0x87, 0x35, 0x2a, 0x3e, 0xe9, 0x27, 0xf6, 0xea, 0xc3, 0x93, + 0x7e, 0x42, 0x1b, 0x4e, 0xd2, 0x9a, 0xff, 0xcc, 0x40, 0x5e, 0x3e, 0xfb, 0xff, 0xcb, 0x8f, 0xbb, + 0x17, 0x21, 0xcf, 0x84, 0x1e, 0x65, 0x5e, 0xd4, 0x74, 0xa4, 0x76, 0xac, 0xb0, 0x62, 0xd8, 0x26, + 0x8c, 0x59, 0x9d, 0xf0, 0x32, 0xc6, 0xc3, 0xb6, 0x04, 0xe3, 0x10, 0x8f, 0x5e, 0x83, 0x3c, 0x25, + 0x16, 0x8b, 0xde, 0x1d, 0x4b, 0xa1, 0x48, 0x2c, 0xa0, 0xe7, 0x83, 0xda, 0xac, 0x12, 0x2e, 0xbe, + 0xb1, 0xa2, 0x46, 0x77, 0x60, 0xc6, 0x26, 0x81, 0xe5, 0x74, 0xc3, 0xc1, 0xf6, 0x95, 0x49, 0xd6, + 0x23, 0x2d, 0xc9, 0xda, 0x2c, 0x71, 0x9b, 0xd4, 0x07, 0x0e, 0x05, 0xf2, 0x42, 0xd2, 0xf6, 0x6c, + 0xf9, 0x9f, 0xb5, 0x5c, 0x5c, 0x48, 0x36, 0x3c, 0x9b, 0x60, 0x81, 0x31, 0x1f, 0x18, 0x50, 0x92, + 0x92, 0x36, 0xac, 0x3e, 0x23, 0x68, 0x35, 0x3a, 0x85, 0x0c, 0x77, 0x38, 0xda, 0x4c, 0xf3, 0xc7, + 0xc0, 0xf9, 0xa0, 0x56, 0x14, 0x64, 0xe2, 0x65, 0x10, 0x1e, 0x40, 0xf3, 0x51, 0xe6, 0x12, 0x1f, + 0xbd, 0x00, 0x39, 0x71, 0x7b, 0x94, 0x33, 0xa3, 0x79, 0x59, 0x5c, 0x30, 0x2c, 0x71, 0xe6, 0x27, + 0x19, 0x28, 0x27, 0x0e, 0x97, 0x62, 0x38, 0x8e, 0x56, 0x71, 0x99, 0x14, 0xeb, 0xdd, 0xf1, 0xff, + 0x0f, 0xfd, 0x36, 0xe4, 0xdb, 0xfc, 0x7c, 0xe1, 0x3f, 0xa4, 0x57, 0x27, 0x09, 0x85, 0xf0, 0x4c, + 0x9c, 0x49, 0xe2, 0x93, 0x61, 0x25, 0x10, 0xdd, 0x80, 0x45, 0x4a, 0x02, 0x7a, 0xba, 0x7e, 0x18, + 0x10, 0xaa, 0xbf, 0x2f, 0x73, 0xf1, 0xf8, 0x88, 0x87, 0x09, 0xf0, 0x28, 0x4f, 0x58, 0xfa, 0xf3, + 0x4f, 0x51, 0xfa, 0xcd, 0x2e, 0x4c, 0xff, 0x0f, 0x9f, 0x3a, 0xdf, 0x81, 0x62, 0x3c, 0x8c, 0x7e, + 0xca, 0x2a, 0xcd, 0xef, 0x41, 0x81, 0x67, 0x63, 0xf8, 0x88, 0xba, 0xa4, 0xb3, 0x26, 0x7b, 0x5e, + 0x26, 0x4d, 0xcf, 0x33, 0x7b, 0x50, 0xbe, 0xe5, 0xdb, 0x4f, 0xf9, 0x1f, 0xc0, 0x4c, 0xea, 0x8e, + 0xb2, 0x06, 0xf2, 0xbf, 0xea, 0xbc, 0x78, 0xcb, 0x05, 0x94, 0x56, 0xbc, 0xf5, 0x6d, 0x92, 0xb6, + 0x01, 0xfe, 0x89, 0x01, 0x20, 0xb6, 0x21, 0x9b, 0x27, 0xc4, 0x0d, 0xb8, 0x1f, 0x78, 0xc0, 0x87, + 0xfd, 0x20, 0x6e, 0xad, 0xc0, 0xa0, 0x5b, 0x90, 0xf7, 0xc4, 0x4c, 0xac, 0x56, 0xb2, 0x13, 0x6e, + 0xb7, 0xa2, 0x24, 0x97, 0x83, 0x35, 0x56, 0xc2, 0x9a, 0x2b, 0x8f, 0x1e, 0x2f, 0x4d, 0x7d, 0xf4, + 0x78, 0x69, 0xea, 0xe3, 0xc7, 0x4b, 0x53, 0xef, 0x9d, 0x2d, 0x19, 0x8f, 0xce, 0x96, 0x8c, 0x8f, + 0xce, 0x96, 0x8c, 0x8f, 0xcf, 0x96, 0x8c, 0x4f, 0xce, 0x96, 0x8c, 0x07, 0x7f, 0x5d, 0x9a, 0xba, + 0x93, 0x39, 0x59, 0xfd, 0x77, 0x00, 0x00, 0x00, 0xff, 0xff, 0xc3, 0x66, 0x55, 0x2c, 0x41, 0x24, + 0x00, 0x00, } diff --git a/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/generated.proto b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/generated.proto index 989f076a10..36bda1fe59 100644 --- a/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/generated.proto +++ b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/generated.proto @@ -92,6 +92,16 @@ message APIResource { // categories is a list of the grouped resources this resource belongs to (e.g. 'all') repeated string categories = 7; + + // The hash value of the storage version, the version this resource is + // converted to when written to the data store. Value must be treated + // as opaque by clients. Only equality comparison on the value is valid. + // This is an alpha feature and may change or be removed in the future. + // The field is populated by the apiserver only if the + // StorageVersionHash feature gate is enabled. + // This field will remain optional even if it graduates. + // +optional + optional string storageVersionHash = 10; } // APIResourceList is a list of APIResource, it is used to expose the name of the @@ -134,9 +144,12 @@ message CreateOptions { // +optional repeated string dryRun = 1; - // If IncludeUninitialized is specified, the object may be - // returned without completing initialization. - optional bool includeUninitialized = 2; + // fieldManager is a name associated with the actor or entity + // that is making these changes. The value must be less than or + // 128 characters long, and only contain printable characters, + // as defined by https://golang.org/pkg/unicode/#IsPrint. + // +optional + optional string fieldManager = 3; } // DeleteOptions may be provided when deleting an API object. @@ -188,14 +201,34 @@ message Duration { } // ExportOptions is the query options to the standard REST get call. +// Deprecated. Planned for removal in 1.18. message ExportOptions { // Should this value be exported. Export strips fields that a user can not specify. + // Deprecated. Planned for removal in 1.18. optional bool export = 1; // Should the export be exact. Exact export maintains cluster-specific fields like 'Namespace'. + // Deprecated. Planned for removal in 1.18. optional bool exact = 2; } +// Fields stores a set of fields in a data structure like a Trie. +// To understand how this is used, see: https://github.com/kubernetes-sigs/structured-merge-diff +message Fields { + // Map stores a set of fields in a data structure like a Trie. + // + // Each key is either a '.' representing the field itself, and will always map to an empty set, + // or a string representing a sub-field or item. The string will follow one of these four formats: + // 'f:', where is the name of a field in a struct, or key in a map + // 'v:', where is the exact json formatted value of a list item + // 'i:', where is position of a item in a list + // 'k:', where is a map of a list item's key fields to their unique values + // If a key maps to an empty Fields value, the field that key represents is part of the set. + // + // The exact format is defined in k8s.io/apiserver/pkg/endpoints/handlers/fieldmanager/internal + map map = 1; +} + // GetOptions is the standard query options to the standard REST get call. message GetOptions { // When specified: @@ -203,10 +236,6 @@ message GetOptions { // - if it's 0, then we simply return what we currently have in cache, no guarantee; // - if set to non zero, then the result is at least as fresh as given rv. optional string resourceVersion = 1; - - // If true, partially initialized resources are included in the response. - // +optional - optional bool includeUninitialized = 2; } // GroupKind specifies a Group and a Kind, but does not force a version. This is useful for identifying @@ -380,10 +409,6 @@ message ListOptions { // +optional optional string fieldSelector = 2; - // If true, partially initialized resources are included in the response. - // +optional - optional bool includeUninitialized = 6; - // Watch for changes to the described resources and return them as a stream of // add, update, and remove notifications. Specify resourceVersion. // +optional @@ -438,6 +463,31 @@ message ListOptions { optional string continue = 8; } +// ManagedFieldsEntry is a workflow-id, a FieldSet and the group version of the resource +// that the fieldset applies to. +message ManagedFieldsEntry { + // Manager is an identifier of the workflow managing these fields. + optional string manager = 1; + + // Operation is the type of operation which lead to this ManagedFieldsEntry being created. + // The only valid values for this field are 'Apply' and 'Update'. + optional string operation = 2; + + // APIVersion defines the version of this resource that this field set + // applies to. The format is "group/version" just like the top-level + // APIVersion field. It is necessary to track the version of a field + // set because it cannot be automatically converted. + optional string apiVersion = 3; + + // Time is timestamp of when these fields were set. It should always be empty if Operation is 'Apply' + // +optional + optional Time time = 4; + + // Fields identifies a set of fields. + // +optional + optional Fields fields = 5; +} + // MicroTime is version of Time with microsecond level precision. // // +protobuf.options.marshal=false @@ -602,6 +652,8 @@ message ObjectMeta { // When an object is created, the system will populate this list with the current set of initializers. // Only privileged users may set or modify this list. Once it is empty, it may not be modified further // by any user. + // + // DEPRECATED - initializers are an alpha field and will be removed in v1.15. optional Initializers initializers = 16; // Must be empty before the object is deleted from the registry. Each entry @@ -617,6 +669,19 @@ message ObjectMeta { // This field is not set anywhere right now and apiserver is going to ignore it if set in create or update request. // +optional optional string clusterName = 15; + + // ManagedFields maps workflow-id and version to the set of fields + // that are managed by that workflow. This is mostly for internal + // housekeeping, and users typically shouldn't need to set or + // understand this field. A workflow can be the user's name, a + // controller's name, or the name of a specific apply path like + // "ci-cd". The set of fields is always in the version that the + // workflow used when modifying the object. + // + // This field is alpha and can be changed or removed without notice. + // + // +optional + repeated ManagedFieldsEntry managedFields = 17; } // OwnerReference contains enough information to let you identify an owning @@ -656,11 +721,43 @@ message OwnerReference { message Patch { } +// PatchOptions may be provided when patching an API object. +// PatchOptions is meant to be a superset of UpdateOptions. +message PatchOptions { + // When present, indicates that modifications should not be + // persisted. An invalid or unrecognized dryRun directive will + // result in an error response and no further processing of the + // request. Valid values are: + // - All: all dry run stages will be processed + // +optional + repeated string dryRun = 1; + + // Force is going to "force" Apply requests. It means user will + // re-acquire conflicting fields owned by other people. Force + // flag must be unset for non-apply patch requests. + // +optional + optional bool force = 2; + + // fieldManager is a name associated with the actor or entity + // that is making these changes. The value must be less than or + // 128 characters long, and only contain printable characters, + // as defined by https://golang.org/pkg/unicode/#IsPrint. This + // field is required for apply requests + // (application/apply-patch) but optional for non-apply patch + // types (JsonPatch, MergePatch, StrategicMergePatch). + // +optional + optional string fieldManager = 3; +} + // Preconditions must be fulfilled before an operation (update, delete, etc.) is carried out. message Preconditions { // Specifies the target UID. // +optional optional string uid = 1; + + // Specifies the target ResourceVersion + // +optional + optional string resourceVersion = 2; } // RootPaths lists the paths available at root. @@ -841,6 +938,7 @@ message TypeMeta { } // UpdateOptions may be provided when updating an API object. +// All fields in UpdateOptions should also be present in PatchOptions. message UpdateOptions { // When present, indicates that modifications should not be // persisted. An invalid or unrecognized dryRun directive will @@ -849,6 +947,13 @@ message UpdateOptions { // - All: all dry run stages will be processed // +optional repeated string dryRun = 1; + + // fieldManager is a name associated with the actor or entity + // that is making these changes. The value must be less than or + // 128 characters long, and only contain printable characters, + // as defined by https://golang.org/pkg/unicode/#IsPrint. + // +optional + optional string fieldManager = 2; } // Verbs masks the value so protobuf can generate diff --git a/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/helpers.go b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/helpers.go index d845d7b0ff..b4dc78b3ea 100644 --- a/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/helpers.go +++ b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/helpers.go @@ -17,6 +17,7 @@ limitations under the License. package v1 import ( + "encoding/json" "fmt" "k8s.io/apimachinery/pkg/fields" @@ -227,8 +228,40 @@ func NewUIDPreconditions(uid string) *Preconditions { return &Preconditions{UID: &u} } +// NewRVDeletionPrecondition returns a DeleteOptions with a ResourceVersion precondition set. +func NewRVDeletionPrecondition(rv string) *DeleteOptions { + p := Preconditions{ResourceVersion: &rv} + return &DeleteOptions{Preconditions: &p} +} + // HasObjectMetaSystemFieldValues returns true if fields that are managed by the system on ObjectMeta have values. func HasObjectMetaSystemFieldValues(meta Object) bool { return !meta.GetCreationTimestamp().Time.IsZero() || len(meta.GetUID()) != 0 } + +// ResetObjectMetaForStatus forces the meta fields for a status update to match the meta fields +// for a pre-existing object. This is opt-in for new objects with Status subresource. +func ResetObjectMetaForStatus(meta, existingMeta Object) { + meta.SetDeletionTimestamp(existingMeta.GetDeletionTimestamp()) + meta.SetGeneration(existingMeta.GetGeneration()) + meta.SetSelfLink(existingMeta.GetSelfLink()) + meta.SetLabels(existingMeta.GetLabels()) + meta.SetAnnotations(existingMeta.GetAnnotations()) + meta.SetFinalizers(existingMeta.GetFinalizers()) + meta.SetOwnerReferences(existingMeta.GetOwnerReferences()) + meta.SetManagedFields(existingMeta.GetManagedFields()) +} + +// MarshalJSON implements json.Marshaler +func (f Fields) MarshalJSON() ([]byte, error) { + return json.Marshal(&f.Map) +} + +// UnmarshalJSON implements json.Unmarshaler +func (f *Fields) UnmarshalJSON(b []byte) error { + return json.Unmarshal(b, &f.Map) +} + +var _ json.Marshaler = Fields{} +var _ json.Unmarshaler = &Fields{} diff --git a/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/meta.go b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/meta.go index ee1447541f..05f07adf1b 100644 --- a/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/meta.go +++ b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/meta.go @@ -63,6 +63,8 @@ type Object interface { SetOwnerReferences([]OwnerReference) GetClusterName() string SetClusterName(clusterName string) + GetManagedFields() []ManagedFieldsEntry + SetManagedFields(managedFields []ManagedFieldsEntry) } // ListMetaAccessor retrieves the list interface from an object @@ -166,5 +168,9 @@ func (meta *ObjectMeta) GetOwnerReferences() []OwnerReference { return m func (meta *ObjectMeta) SetOwnerReferences(references []OwnerReference) { meta.OwnerReferences = references } -func (meta *ObjectMeta) GetClusterName() string { return meta.ClusterName } -func (meta *ObjectMeta) SetClusterName(clusterName string) { meta.ClusterName = clusterName } +func (meta *ObjectMeta) GetClusterName() string { return meta.ClusterName } +func (meta *ObjectMeta) SetClusterName(clusterName string) { meta.ClusterName = clusterName } +func (meta *ObjectMeta) GetManagedFields() []ManagedFieldsEntry { return meta.ManagedFields } +func (meta *ObjectMeta) SetManagedFields(managedFields []ManagedFieldsEntry) { + meta.ManagedFields = managedFields +} diff --git a/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/register.go b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/register.go index 0827729d08..76d042a966 100644 --- a/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/register.go +++ b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/register.go @@ -55,6 +55,7 @@ func AddToGroupVersion(scheme *runtime.Scheme, groupVersion schema.GroupVersion) &DeleteOptions{}, &CreateOptions{}, &UpdateOptions{}, + &PatchOptions{}, ) utilruntime.Must(scheme.AddConversionFuncs( Convert_v1_WatchEvent_To_watch_Event, @@ -90,6 +91,7 @@ func init() { &DeleteOptions{}, &CreateOptions{}, &UpdateOptions{}, + &PatchOptions{}, ) // register manually. This usually goes through the SchemeBuilder, which we cannot use here. diff --git a/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/types.go b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/types.go index 65f87546d2..fd6395256a 100644 --- a/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/types.go +++ b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/types.go @@ -235,6 +235,8 @@ type ObjectMeta struct { // When an object is created, the system will populate this list with the current set of initializers. // Only privileged users may set or modify this list. Once it is empty, it may not be modified further // by any user. + // + // DEPRECATED - initializers are an alpha field and will be removed in v1.15. Initializers *Initializers `json:"initializers,omitempty" protobuf:"bytes,16,opt,name=initializers"` // Must be empty before the object is deleted from the registry. Each entry @@ -250,6 +252,19 @@ type ObjectMeta struct { // This field is not set anywhere right now and apiserver is going to ignore it if set in create or update request. // +optional ClusterName string `json:"clusterName,omitempty" protobuf:"bytes,15,opt,name=clusterName"` + + // ManagedFields maps workflow-id and version to the set of fields + // that are managed by that workflow. This is mostly for internal + // housekeeping, and users typically shouldn't need to set or + // understand this field. A workflow can be the user's name, a + // controller's name, or the name of a specific apply path like + // "ci-cd". The set of fields is always in the version that the + // workflow used when modifying the object. + // + // This field is alpha and can be changed or removed without notice. + // + // +optional + ManagedFields []ManagedFieldsEntry `json:"managedFields,omitempty" protobuf:"bytes,17,rep,name=managedFields"` } // Initializers tracks the progress of initialization. @@ -327,9 +342,9 @@ type ListOptions struct { // Defaults to everything. // +optional FieldSelector string `json:"fieldSelector,omitempty" protobuf:"bytes,2,opt,name=fieldSelector"` - // If true, partially initialized resources are included in the response. - // +optional - IncludeUninitialized bool `json:"includeUninitialized,omitempty" protobuf:"varint,6,opt,name=includeUninitialized"` + + // +k8s:deprecated=includeUninitialized,protobuf=6 + // Watch for changes to the described resources and return them as a stream of // add, update, and remove notifications. Specify resourceVersion. // +optional @@ -384,11 +399,14 @@ type ListOptions struct { // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object // ExportOptions is the query options to the standard REST get call. +// Deprecated. Planned for removal in 1.18. type ExportOptions struct { TypeMeta `json:",inline"` // Should this value be exported. Export strips fields that a user can not specify. + // Deprecated. Planned for removal in 1.18. Export bool `json:"export" protobuf:"varint,1,opt,name=export"` // Should the export be exact. Exact export maintains cluster-specific fields like 'Namespace'. + // Deprecated. Planned for removal in 1.18. Exact bool `json:"exact" protobuf:"varint,2,opt,name=exact"` } @@ -402,9 +420,7 @@ type GetOptions struct { // - if it's 0, then we simply return what we currently have in cache, no guarantee; // - if set to non zero, then the result is at least as fresh as given rv. ResourceVersion string `json:"resourceVersion,omitempty" protobuf:"bytes,1,opt,name=resourceVersion"` - // If true, partially initialized resources are included in the response. - // +optional - IncludeUninitialized bool `json:"includeUninitialized,omitempty" protobuf:"varint,2,opt,name=includeUninitialized"` + // +k8s:deprecated=includeUninitialized,protobuf=2 } // DeletionPropagation decides if a deletion will propagate to the dependents of @@ -482,6 +498,30 @@ type DeleteOptions struct { type CreateOptions struct { TypeMeta `json:",inline"` + // When present, indicates that modifications should not be + // persisted. An invalid or unrecognized dryRun directive will + // result in an error response and no further processing of the + // request. Valid values are: + // - All: all dry run stages will be processed + // +optional + DryRun []string `json:"dryRun,omitempty" protobuf:"bytes,1,rep,name=dryRun"` + // +k8s:deprecated=includeUninitialized,protobuf=2 + + // fieldManager is a name associated with the actor or entity + // that is making these changes. The value must be less than or + // 128 characters long, and only contain printable characters, + // as defined by https://golang.org/pkg/unicode/#IsPrint. + // +optional + FieldManager string `json:"fieldManager,omitempty" protobuf:"bytes,3,name=fieldManager"` +} + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// PatchOptions may be provided when patching an API object. +// PatchOptions is meant to be a superset of UpdateOptions. +type PatchOptions struct { + TypeMeta `json:",inline"` + // When present, indicates that modifications should not be // persisted. An invalid or unrecognized dryRun directive will // result in an error response and no further processing of the @@ -490,14 +530,27 @@ type CreateOptions struct { // +optional DryRun []string `json:"dryRun,omitempty" protobuf:"bytes,1,rep,name=dryRun"` - // If IncludeUninitialized is specified, the object may be - // returned without completing initialization. - IncludeUninitialized bool `json:"includeUninitialized,omitempty" protobuf:"varint,2,opt,name=includeUninitialized"` + // Force is going to "force" Apply requests. It means user will + // re-acquire conflicting fields owned by other people. Force + // flag must be unset for non-apply patch requests. + // +optional + Force *bool `json:"force,omitempty" protobuf:"varint,2,opt,name=force"` + + // fieldManager is a name associated with the actor or entity + // that is making these changes. The value must be less than or + // 128 characters long, and only contain printable characters, + // as defined by https://golang.org/pkg/unicode/#IsPrint. This + // field is required for apply requests + // (application/apply-patch) but optional for non-apply patch + // types (JsonPatch, MergePatch, StrategicMergePatch). + // +optional + FieldManager string `json:"fieldManager,omitempty" protobuf:"bytes,3,name=fieldManager"` } // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object // UpdateOptions may be provided when updating an API object. +// All fields in UpdateOptions should also be present in PatchOptions. type UpdateOptions struct { TypeMeta `json:",inline"` @@ -508,6 +561,13 @@ type UpdateOptions struct { // - All: all dry run stages will be processed // +optional DryRun []string `json:"dryRun,omitempty" protobuf:"bytes,1,rep,name=dryRun"` + + // fieldManager is a name associated with the actor or entity + // that is making these changes. The value must be less than or + // 128 characters long, and only contain printable characters, + // as defined by https://golang.org/pkg/unicode/#IsPrint. + // +optional + FieldManager string `json:"fieldManager,omitempty" protobuf:"bytes,2,name=fieldManager"` } // Preconditions must be fulfilled before an operation (update, delete, etc.) is carried out. @@ -515,6 +575,9 @@ type Preconditions struct { // Specifies the target UID. // +optional UID *types.UID `json:"uid,omitempty" protobuf:"bytes,1,opt,name=uid,casttype=k8s.io/apimachinery/pkg/types.UID"` + // Specifies the target ResourceVersion + // +optional + ResourceVersion *string `json:"resourceVersion,omitempty" protobuf:"bytes,2,opt,name=resourceVersion"` } // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object @@ -792,6 +855,9 @@ const ( // without the expected return type. The presence of this cause indicates the error may be // due to an intervening proxy or the server software malfunctioning. CauseTypeUnexpectedServerResponse CauseType = "UnexpectedServerResponse" + // FieldManagerConflict is used to report when another client claims to manage this field, + // It should only be returned for a request using server-side apply. + CauseTypeFieldManagerConflict CauseType = "FieldManagerConflict" ) // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object @@ -906,6 +972,15 @@ type APIResource struct { ShortNames []string `json:"shortNames,omitempty" protobuf:"bytes,5,rep,name=shortNames"` // categories is a list of the grouped resources this resource belongs to (e.g. 'all') Categories []string `json:"categories,omitempty" protobuf:"bytes,7,rep,name=categories"` + // The hash value of the storage version, the version this resource is + // converted to when written to the data store. Value must be treated + // as opaque by clients. Only equality comparison on the value is valid. + // This is an alpha feature and may change or be removed in the future. + // The field is populated by the apiserver only if the + // StorageVersionHash feature gate is enabled. + // This field will remain optional even if it graduates. + // +optional + StorageVersionHash string `json:"storageVersionHash,omitempty" protobuf:"bytes,10,opt,name=storageVersionHash"` } // Verbs masks the value so protobuf can generate @@ -1007,3 +1082,49 @@ const ( LabelSelectorOpExists LabelSelectorOperator = "Exists" LabelSelectorOpDoesNotExist LabelSelectorOperator = "DoesNotExist" ) + +// ManagedFieldsEntry is a workflow-id, a FieldSet and the group version of the resource +// that the fieldset applies to. +type ManagedFieldsEntry struct { + // Manager is an identifier of the workflow managing these fields. + Manager string `json:"manager,omitempty" protobuf:"bytes,1,opt,name=manager"` + // Operation is the type of operation which lead to this ManagedFieldsEntry being created. + // The only valid values for this field are 'Apply' and 'Update'. + Operation ManagedFieldsOperationType `json:"operation,omitempty" protobuf:"bytes,2,opt,name=operation,casttype=ManagedFieldsOperationType"` + // APIVersion defines the version of this resource that this field set + // applies to. The format is "group/version" just like the top-level + // APIVersion field. It is necessary to track the version of a field + // set because it cannot be automatically converted. + APIVersion string `json:"apiVersion,omitempty" protobuf:"bytes,3,opt,name=apiVersion"` + // Time is timestamp of when these fields were set. It should always be empty if Operation is 'Apply' + // +optional + Time *Time `json:"time,omitempty" protobuf:"bytes,4,opt,name=time"` + // Fields identifies a set of fields. + // +optional + Fields *Fields `json:"fields,omitempty" protobuf:"bytes,5,opt,name=fields,casttype=Fields"` +} + +// ManagedFieldsOperationType is the type of operation which lead to a ManagedFieldsEntry being created. +type ManagedFieldsOperationType string + +const ( + ManagedFieldsOperationApply ManagedFieldsOperationType = "Apply" + ManagedFieldsOperationUpdate ManagedFieldsOperationType = "Update" +) + +// Fields stores a set of fields in a data structure like a Trie. +// To understand how this is used, see: https://github.com/kubernetes-sigs/structured-merge-diff +type Fields struct { + // Map stores a set of fields in a data structure like a Trie. + // + // Each key is either a '.' representing the field itself, and will always map to an empty set, + // or a string representing a sub-field or item. The string will follow one of these four formats: + // 'f:', where is the name of a field in a struct, or key in a map + // 'v:', where is the exact json formatted value of a list item + // 'i:', where is position of a item in a list + // 'k:', where is a map of a list item's key fields to their unique values + // If a key maps to an empty Fields value, the field that key represents is part of the set. + // + // The exact format is defined in k8s.io/apiserver/pkg/endpoints/handlers/fieldmanager/internal + Map map[string]Fields `json:",inline" protobuf:"bytes,1,rep,name=map"` +} diff --git a/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/types_swagger_doc_generated.go b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/types_swagger_doc_generated.go index 679e709e8e..3b1a09e57f 100644 --- a/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/types_swagger_doc_generated.go +++ b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/types_swagger_doc_generated.go @@ -49,16 +49,17 @@ func (APIGroupList) SwaggerDoc() map[string]string { } var map_APIResource = map[string]string{ - "": "APIResource specifies the name of a resource and whether it is namespaced.", - "name": "name is the plural name of the resource.", - "singularName": "singularName is the singular name of the resource. This allows clients to handle plural and singular opaquely. The singularName is more correct for reporting status on a single item and both singular and plural are allowed from the kubectl CLI interface.", - "namespaced": "namespaced indicates if a resource is namespaced or not.", - "group": "group is the preferred group of the resource. Empty implies the group of the containing resource list. For subresources, this may have a different value, for example: Scale\".", - "version": "version is the preferred version of the resource. Empty implies the version of the containing resource list For subresources, this may have a different value, for example: v1 (while inside a v1beta1 version of the core resource's group)\".", - "kind": "kind is the kind for the resource (e.g. 'Foo' is the kind for a resource 'foo')", - "verbs": "verbs is a list of supported kube verbs (this includes get, list, watch, create, update, patch, delete, deletecollection, and proxy)", - "shortNames": "shortNames is a list of suggested short names of the resource.", - "categories": "categories is a list of the grouped resources this resource belongs to (e.g. 'all')", + "": "APIResource specifies the name of a resource and whether it is namespaced.", + "name": "name is the plural name of the resource.", + "singularName": "singularName is the singular name of the resource. This allows clients to handle plural and singular opaquely. The singularName is more correct for reporting status on a single item and both singular and plural are allowed from the kubectl CLI interface.", + "namespaced": "namespaced indicates if a resource is namespaced or not.", + "group": "group is the preferred group of the resource. Empty implies the group of the containing resource list. For subresources, this may have a different value, for example: Scale\".", + "version": "version is the preferred version of the resource. Empty implies the version of the containing resource list For subresources, this may have a different value, for example: v1 (while inside a v1beta1 version of the core resource's group)\".", + "kind": "kind is the kind for the resource (e.g. 'Foo' is the kind for a resource 'foo')", + "verbs": "verbs is a list of supported kube verbs (this includes get, list, watch, create, update, patch, delete, deletecollection, and proxy)", + "shortNames": "shortNames is a list of suggested short names of the resource.", + "categories": "categories is a list of the grouped resources this resource belongs to (e.g. 'all')", + "storageVersionHash": "The hash value of the storage version, the version this resource is converted to when written to the data store. Value must be treated as opaque by clients. Only equality comparison on the value is valid. This is an alpha feature and may change or be removed in the future. The field is populated by the apiserver only if the StorageVersionHash feature gate is enabled. This field will remain optional even if it graduates.", } func (APIResource) SwaggerDoc() map[string]string { @@ -86,9 +87,9 @@ func (APIVersions) SwaggerDoc() map[string]string { } var map_CreateOptions = map[string]string{ - "": "CreateOptions may be provided when creating an API object.", - "dryRun": "When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed", - "includeUninitialized": "If IncludeUninitialized is specified, the object may be returned without completing initialization.", + "": "CreateOptions may be provided when creating an API object.", + "dryRun": "When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed", + "fieldManager": "fieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint.", } func (CreateOptions) SwaggerDoc() map[string]string { @@ -109,19 +110,26 @@ func (DeleteOptions) SwaggerDoc() map[string]string { } var map_ExportOptions = map[string]string{ - "": "ExportOptions is the query options to the standard REST get call.", - "export": "Should this value be exported. Export strips fields that a user can not specify.", - "exact": "Should the export be exact. Exact export maintains cluster-specific fields like 'Namespace'.", + "": "ExportOptions is the query options to the standard REST get call. Deprecated. Planned for removal in 1.18.", + "export": "Should this value be exported. Export strips fields that a user can not specify. Deprecated. Planned for removal in 1.18.", + "exact": "Should the export be exact. Exact export maintains cluster-specific fields like 'Namespace'. Deprecated. Planned for removal in 1.18.", } func (ExportOptions) SwaggerDoc() map[string]string { return map_ExportOptions } +var map_Fields = map[string]string{ + "": "Fields stores a set of fields in a data structure like a Trie. To understand how this is used, see: https://github.com/kubernetes-sigs/structured-merge-diff", +} + +func (Fields) SwaggerDoc() map[string]string { + return map_Fields +} + var map_GetOptions = map[string]string{ - "": "GetOptions is the standard query options to the standard REST get call.", - "resourceVersion": "When specified: - if unset, then the result is returned from remote storage based on quorum-read flag; - if it's 0, then we simply return what we currently have in cache, no guarantee; - if set to non zero, then the result is at least as fresh as given rv.", - "includeUninitialized": "If true, partially initialized resources are included in the response.", + "": "GetOptions is the standard query options to the standard REST get call.", + "resourceVersion": "When specified: - if unset, then the result is returned from remote storage based on quorum-read flag; - if it's 0, then we simply return what we currently have in cache, no guarantee; - if set to non zero, then the result is at least as fresh as given rv.", } func (GetOptions) SwaggerDoc() map[string]string { @@ -200,21 +208,33 @@ func (ListMeta) SwaggerDoc() map[string]string { } var map_ListOptions = map[string]string{ - "": "ListOptions is the query options to a standard REST list call.", - "labelSelector": "A selector to restrict the list of returned objects by their labels. Defaults to everything.", - "fieldSelector": "A selector to restrict the list of returned objects by their fields. Defaults to everything.", - "includeUninitialized": "If true, partially initialized resources are included in the response.", - "watch": "Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion.", - "resourceVersion": "When specified with a watch call, shows changes that occur after that particular version of a resource. Defaults to changes from the beginning of history. When specified for list: - if unset, then the result is returned from remote storage based on quorum-read flag; - if it's 0, then we simply return what we currently have in cache, no guarantee; - if set to non zero, then the result is at least as fresh as given rv.", - "timeoutSeconds": "Timeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity.", - "limit": "limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true.\n\nThe server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned.", - "continue": "The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the \"next key\".\n\nThis field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications.", + "": "ListOptions is the query options to a standard REST list call.", + "labelSelector": "A selector to restrict the list of returned objects by their labels. Defaults to everything.", + "fieldSelector": "A selector to restrict the list of returned objects by their fields. Defaults to everything.", + "watch": "Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion.", + "resourceVersion": "When specified with a watch call, shows changes that occur after that particular version of a resource. Defaults to changes from the beginning of history. When specified for list: - if unset, then the result is returned from remote storage based on quorum-read flag; - if it's 0, then we simply return what we currently have in cache, no guarantee; - if set to non zero, then the result is at least as fresh as given rv.", + "timeoutSeconds": "Timeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity.", + "limit": "limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true.\n\nThe server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned.", + "continue": "The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the \"next key\".\n\nThis field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications.", } func (ListOptions) SwaggerDoc() map[string]string { return map_ListOptions } +var map_ManagedFieldsEntry = map[string]string{ + "": "ManagedFieldsEntry is a workflow-id, a FieldSet and the group version of the resource that the fieldset applies to.", + "manager": "Manager is an identifier of the workflow managing these fields.", + "operation": "Operation is the type of operation which lead to this ManagedFieldsEntry being created. The only valid values for this field are 'Apply' and 'Update'.", + "apiVersion": "APIVersion defines the version of this resource that this field set applies to. The format is \"group/version\" just like the top-level APIVersion field. It is necessary to track the version of a field set because it cannot be automatically converted.", + "time": "Time is timestamp of when these fields were set. It should always be empty if Operation is 'Apply'", + "fields": "Fields identifies a set of fields.", +} + +func (ManagedFieldsEntry) SwaggerDoc() map[string]string { + return map_ManagedFieldsEntry +} + var map_ObjectMeta = map[string]string{ "": "ObjectMeta is metadata that all persisted resources must have, which includes all objects users must create.", "name": "Name must be unique within a namespace. Is required when creating resources, although some resources may allow a client to request the generation of an appropriate name automatically. Name is primarily intended for creation idempotence and configuration definition. Cannot be updated. More info: http://kubernetes.io/docs/user-guide/identifiers#names", @@ -230,9 +250,10 @@ var map_ObjectMeta = map[string]string{ "labels": "Map of string keys and values that can be used to organize and categorize (scope and select) objects. May match selectors of replication controllers and services. More info: http://kubernetes.io/docs/user-guide/labels", "annotations": "Annotations is an unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. More info: http://kubernetes.io/docs/user-guide/annotations", "ownerReferences": "List of objects depended by this object. If ALL objects in the list have been deleted, this object will be garbage collected. If this object is managed by a controller, then an entry in this list will point to this controller, with the controller field set to true. There cannot be more than one managing controller.", - "initializers": "An initializer is a controller which enforces some system invariant at object creation time. This field is a list of initializers that have not yet acted on this object. If nil or empty, this object has been completely initialized. Otherwise, the object is considered uninitialized and is hidden (in list/watch and get calls) from clients that haven't explicitly asked to observe uninitialized objects.\n\nWhen an object is created, the system will populate this list with the current set of initializers. Only privileged users may set or modify this list. Once it is empty, it may not be modified further by any user.", + "initializers": "An initializer is a controller which enforces some system invariant at object creation time. This field is a list of initializers that have not yet acted on this object. If nil or empty, this object has been completely initialized. Otherwise, the object is considered uninitialized and is hidden (in list/watch and get calls) from clients that haven't explicitly asked to observe uninitialized objects.\n\nWhen an object is created, the system will populate this list with the current set of initializers. Only privileged users may set or modify this list. Once it is empty, it may not be modified further by any user.\n\nDEPRECATED - initializers are an alpha field and will be removed in v1.15.", "finalizers": "Must be empty before the object is deleted from the registry. Each entry is an identifier for the responsible component that will remove the entry from the list. If the deletionTimestamp of the object is non-nil, entries in this list can only be removed.", "clusterName": "The name of the cluster which the object belongs to. This is used to distinguish resources with same name and namespace in different clusters. This field is not set anywhere right now and apiserver is going to ignore it if set in create or update request.", + "managedFields": "ManagedFields maps workflow-id and version to the set of fields that are managed by that workflow. This is mostly for internal housekeeping, and users typically shouldn't need to set or understand this field. A workflow can be the user's name, a controller's name, or the name of a specific apply path like \"ci-cd\". The set of fields is always in the version that the workflow used when modifying the object.\n\nThis field is alpha and can be changed or removed without notice.", } func (ObjectMeta) SwaggerDoc() map[string]string { @@ -261,9 +282,21 @@ func (Patch) SwaggerDoc() map[string]string { return map_Patch } +var map_PatchOptions = map[string]string{ + "": "PatchOptions may be provided when patching an API object. PatchOptions is meant to be a superset of UpdateOptions.", + "dryRun": "When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed", + "force": "Force is going to \"force\" Apply requests. It means user will re-acquire conflicting fields owned by other people. Force flag must be unset for non-apply patch requests.", + "fieldManager": "fieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint. This field is required for apply requests (application/apply-patch) but optional for non-apply patch types (JsonPatch, MergePatch, StrategicMergePatch).", +} + +func (PatchOptions) SwaggerDoc() map[string]string { + return map_PatchOptions +} + var map_Preconditions = map[string]string{ - "": "Preconditions must be fulfilled before an operation (update, delete, etc.) is carried out.", - "uid": "Specifies the target UID.", + "": "Preconditions must be fulfilled before an operation (update, delete, etc.) is carried out.", + "uid": "Specifies the target UID.", + "resourceVersion": "Specifies the target ResourceVersion", } func (Preconditions) SwaggerDoc() map[string]string { @@ -339,8 +372,9 @@ func (TypeMeta) SwaggerDoc() map[string]string { } var map_UpdateOptions = map[string]string{ - "": "UpdateOptions may be provided when updating an API object.", - "dryRun": "When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed", + "": "UpdateOptions may be provided when updating an API object. All fields in UpdateOptions should also be present in PatchOptions.", + "dryRun": "When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed", + "fieldManager": "fieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint.", } func (UpdateOptions) SwaggerDoc() map[string]string { diff --git a/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/unstructured/helpers.go b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/unstructured/helpers.go index fc138e75aa..75ac693fe4 100644 --- a/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/unstructured/helpers.go +++ b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/unstructured/helpers.go @@ -47,6 +47,9 @@ func NestedFieldNoCopy(obj map[string]interface{}, fields ...string) (interface{ var val interface{} = obj for i, field := range fields { + if val == nil { + return nil, false, nil + } if m, ok := val.(map[string]interface{}); ok { val, ok = m[field] if !ok { diff --git a/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/unstructured/unstructured.go b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/unstructured/unstructured.go index 781469ec26..1eaa85804f 100644 --- a/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/unstructured/unstructured.go +++ b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/unstructured/unstructured.go @@ -143,13 +143,20 @@ func (u *Unstructured) setNestedField(value interface{}, fields ...string) { SetNestedField(u.Object, value, fields...) } -func (u *Unstructured) setNestedSlice(value []string, fields ...string) { +func (u *Unstructured) setNestedStringSlice(value []string, fields ...string) { if u.Object == nil { u.Object = make(map[string]interface{}) } SetNestedStringSlice(u.Object, value, fields...) } +func (u *Unstructured) setNestedSlice(value []interface{}, fields ...string) { + if u.Object == nil { + u.Object = make(map[string]interface{}) + } + SetNestedSlice(u.Object, value, fields...) +} + func (u *Unstructured) setNestedMap(value map[string]string, fields ...string) { if u.Object == nil { u.Object = make(map[string]interface{}) @@ -436,7 +443,7 @@ func (u *Unstructured) SetFinalizers(finalizers []string) { RemoveNestedField(u.Object, "metadata", "finalizers") return } - u.setNestedSlice(finalizers, "metadata", "finalizers") + u.setNestedStringSlice(finalizers, "metadata", "finalizers") } func (u *Unstructured) GetClusterName() string { @@ -450,3 +457,42 @@ func (u *Unstructured) SetClusterName(clusterName string) { } u.setNestedField(clusterName, "metadata", "clusterName") } + +func (u *Unstructured) GetManagedFields() []metav1.ManagedFieldsEntry { + items, found, err := NestedSlice(u.Object, "metadata", "managedFields") + if !found || err != nil { + return nil + } + managedFields := []metav1.ManagedFieldsEntry{} + for _, item := range items { + m, ok := item.(map[string]interface{}) + if !ok { + utilruntime.HandleError(fmt.Errorf("unable to retrieve managedFields for object, item %v is not a map", item)) + return nil + } + out := metav1.ManagedFieldsEntry{} + if err := runtime.DefaultUnstructuredConverter.FromUnstructured(m, &out); err != nil { + utilruntime.HandleError(fmt.Errorf("unable to retrieve managedFields for object: %v", err)) + return nil + } + managedFields = append(managedFields, out) + } + return managedFields +} + +func (u *Unstructured) SetManagedFields(managedFields []metav1.ManagedFieldsEntry) { + if managedFields == nil { + RemoveNestedField(u.Object, "metadata", "managedFields") + return + } + items := []interface{}{} + for _, managedFieldsEntry := range managedFields { + out, err := runtime.DefaultUnstructuredConverter.ToUnstructured(&managedFieldsEntry) + if err != nil { + utilruntime.HandleError(fmt.Errorf("unable to set managedFields for object: %v", err)) + return + } + items = append(items, out) + } + u.setNestedSlice(items, "metadata", "managedFields") +} diff --git a/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/validation/validation.go b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/validation/validation.go index 81f86fb306..2e36ee23bb 100644 --- a/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/validation/validation.go +++ b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/validation/validation.go @@ -17,7 +17,11 @@ limitations under the License. package validation import ( + "fmt" + "unicode" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/types" "k8s.io/apimachinery/pkg/util/sets" "k8s.io/apimachinery/pkg/util/validation" "k8s.io/apimachinery/pkg/util/validation/field" @@ -85,21 +89,67 @@ func ValidateDeleteOptions(options *metav1.DeleteOptions) field.ErrorList { *options.PropagationPolicy != metav1.DeletePropagationOrphan { allErrs = append(allErrs, field.NotSupported(field.NewPath("propagationPolicy"), options.PropagationPolicy, []string{string(metav1.DeletePropagationForeground), string(metav1.DeletePropagationBackground), string(metav1.DeletePropagationOrphan), "nil"})) } - allErrs = append(allErrs, validateDryRun(field.NewPath("dryRun"), options.DryRun)...) + allErrs = append(allErrs, ValidateDryRun(field.NewPath("dryRun"), options.DryRun)...) return allErrs } func ValidateCreateOptions(options *metav1.CreateOptions) field.ErrorList { - return validateDryRun(field.NewPath("dryRun"), options.DryRun) + return append( + ValidateFieldManager(options.FieldManager, field.NewPath("fieldManager")), + ValidateDryRun(field.NewPath("dryRun"), options.DryRun)..., + ) } func ValidateUpdateOptions(options *metav1.UpdateOptions) field.ErrorList { - return validateDryRun(field.NewPath("dryRun"), options.DryRun) + return append( + ValidateFieldManager(options.FieldManager, field.NewPath("fieldManager")), + ValidateDryRun(field.NewPath("dryRun"), options.DryRun)..., + ) +} + +func ValidatePatchOptions(options *metav1.PatchOptions, patchType types.PatchType) field.ErrorList { + allErrs := field.ErrorList{} + if patchType != types.ApplyPatchType { + if options.Force != nil { + allErrs = append(allErrs, field.Forbidden(field.NewPath("force"), "may not be specified for non-apply patch")) + } + } else { + if options.FieldManager == "" { + // This field is defaulted to "kubectl" by kubectl, but HAS TO be explicitly set by controllers. + allErrs = append(allErrs, field.Required(field.NewPath("fieldManager"), "is required for apply patch")) + } + } + allErrs = append(allErrs, ValidateFieldManager(options.FieldManager, field.NewPath("fieldManager"))...) + allErrs = append(allErrs, ValidateDryRun(field.NewPath("dryRun"), options.DryRun)...) + return allErrs +} + +var FieldManagerMaxLength = 128 + +// ValidateFieldManager valides that the fieldManager is the proper length and +// only has printable characters. +func ValidateFieldManager(fieldManager string, fldPath *field.Path) field.ErrorList { + allErrs := field.ErrorList{} + // the field can not be set as a `*string`, so a empty string ("") is + // considered as not set and is defaulted by the rest of the process + // (unless apply is used, in which case it is required). + if len(fieldManager) > FieldManagerMaxLength { + allErrs = append(allErrs, field.TooLong(fldPath, fieldManager, FieldManagerMaxLength)) + } + // Verify that all characters are printable. + for i, r := range fieldManager { + if !unicode.IsPrint(r) { + allErrs = append(allErrs, field.Invalid(fldPath, fieldManager, fmt.Sprintf("invalid character %#U (at position %d)", r, i))) + } + } + + return allErrs } var allowedDryRunValues = sets.NewString(metav1.DryRunAll) -func validateDryRun(fldPath *field.Path, dryRun []string) field.ErrorList { +// ValidateDryRun validates that a dryRun query param only contains allowed values. +func ValidateDryRun(fldPath *field.Path, dryRun []string) field.ErrorList { allErrs := field.ErrorList{} if !allowedDryRunValues.HasAll(dryRun...) { allErrs = append(allErrs, field.NotSupported(fldPath, dryRun, allowedDryRunValues.List())) diff --git a/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/zz_generated.deepcopy.go b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/zz_generated.deepcopy.go index 10845993e2..68498b8d20 100644 --- a/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/zz_generated.deepcopy.go +++ b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/zz_generated.deepcopy.go @@ -312,6 +312,29 @@ func (in *ExportOptions) DeepCopyObject() runtime.Object { return nil } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Fields) DeepCopyInto(out *Fields) { + *out = *in + if in.Map != nil { + in, out := &in.Map, &out.Map + *out = make(map[string]Fields, len(*in)) + for key, val := range *in { + (*out)[key] = *val.DeepCopy() + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Fields. +func (in *Fields) DeepCopy() *Fields { + if in == nil { + return nil + } + out := new(Fields) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *GetOptions) DeepCopyInto(out *GetOptions) { *out = *in @@ -624,6 +647,31 @@ func (in *ListOptions) DeepCopyObject() runtime.Object { return nil } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ManagedFieldsEntry) DeepCopyInto(out *ManagedFieldsEntry) { + *out = *in + if in.Time != nil { + in, out := &in.Time, &out.Time + *out = (*in).DeepCopy() + } + if in.Fields != nil { + in, out := &in.Fields, &out.Fields + *out = new(Fields) + (*in).DeepCopyInto(*out) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ManagedFieldsEntry. +func (in *ManagedFieldsEntry) DeepCopy() *ManagedFieldsEntry { + if in == nil { + return nil + } + out := new(ManagedFieldsEntry) + in.DeepCopyInto(out) + return out +} + // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MicroTime. func (in *MicroTime) DeepCopy() *MicroTime { if in == nil { @@ -678,6 +726,13 @@ func (in *ObjectMeta) DeepCopyInto(out *ObjectMeta) { *out = make([]string, len(*in)) copy(*out, *in) } + if in.ManagedFields != nil { + in, out := &in.ManagedFields, &out.ManagedFields + *out = make([]ManagedFieldsEntry, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } return } @@ -733,6 +788,41 @@ func (in *Patch) DeepCopy() *Patch { return out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PatchOptions) DeepCopyInto(out *PatchOptions) { + *out = *in + out.TypeMeta = in.TypeMeta + if in.DryRun != nil { + in, out := &in.DryRun, &out.DryRun + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.Force != nil { + in, out := &in.Force, &out.Force + *out = new(bool) + **out = **in + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PatchOptions. +func (in *PatchOptions) DeepCopy() *PatchOptions { + if in == nil { + return nil + } + out := new(PatchOptions) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *PatchOptions) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *Preconditions) DeepCopyInto(out *Preconditions) { *out = *in @@ -741,6 +831,11 @@ func (in *Preconditions) DeepCopyInto(out *Preconditions) { *out = new(types.UID) **out = **in } + if in.ResourceVersion != nil { + in, out := &in.ResourceVersion, &out.ResourceVersion + *out = new(string) + **out = **in + } return } diff --git a/vendor/k8s.io/apimachinery/pkg/apis/meta/v1beta1/doc.go b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1beta1/doc.go index 46b0e133c3..20c9d2ec73 100644 --- a/vendor/k8s.io/apimachinery/pkg/apis/meta/v1beta1/doc.go +++ b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1beta1/doc.go @@ -20,4 +20,4 @@ limitations under the License. // +groupName=meta.k8s.io -package v1beta1 +package v1beta1 // import "k8s.io/apimachinery/pkg/apis/meta/v1beta1" diff --git a/vendor/k8s.io/apimachinery/pkg/runtime/converter.go b/vendor/k8s.io/apimachinery/pkg/runtime/converter.go index dff56e0340..80343081f5 100644 --- a/vendor/k8s.io/apimachinery/pkg/runtime/converter.go +++ b/vendor/k8s.io/apimachinery/pkg/runtime/converter.go @@ -746,7 +746,7 @@ func isZero(v reflect.Value) bool { func structToUnstructured(sv, dv reflect.Value) error { st, dt := sv.Type(), dv.Type() if dt.Kind() == reflect.Interface && dv.NumMethod() == 0 { - dv.Set(reflect.MakeMap(mapStringInterfaceType)) + dv.Set(reflect.MakeMapWithSize(mapStringInterfaceType, st.NumField())) dv = dv.Elem() dt = dv.Type() } diff --git a/vendor/k8s.io/apimachinery/pkg/runtime/types.go b/vendor/k8s.io/apimachinery/pkg/runtime/types.go index e4515d8ed0..1f7f662e07 100644 --- a/vendor/k8s.io/apimachinery/pkg/runtime/types.go +++ b/vendor/k8s.io/apimachinery/pkg/runtime/types.go @@ -42,6 +42,7 @@ type TypeMeta struct { const ( ContentTypeJSON string = "application/json" + ContentTypeYAML string = "application/yaml" ) // RawExtension is used to hold extensions in external versions. diff --git a/vendor/k8s.io/apimachinery/pkg/types/patch.go b/vendor/k8s.io/apimachinery/pkg/types/patch.go index d522d1dbdc..fe8ecaaffa 100644 --- a/vendor/k8s.io/apimachinery/pkg/types/patch.go +++ b/vendor/k8s.io/apimachinery/pkg/types/patch.go @@ -25,4 +25,5 @@ const ( JSONPatchType PatchType = "application/json-patch+json" MergePatchType PatchType = "application/merge-patch+json" StrategicMergePatchType PatchType = "application/strategic-merge-patch+json" + ApplyPatchType PatchType = "application/apply-patch+yaml" ) diff --git a/vendor/k8s.io/apimachinery/pkg/util/errors/errors.go b/vendor/k8s.io/apimachinery/pkg/util/errors/errors.go index 88e937679d..62a73f34eb 100644 --- a/vendor/k8s.io/apimachinery/pkg/util/errors/errors.go +++ b/vendor/k8s.io/apimachinery/pkg/util/errors/errors.go @@ -19,6 +19,8 @@ package errors import ( "errors" "fmt" + + "k8s.io/apimachinery/pkg/util/sets" ) // MessageCountMap contains occurrence for each error message. @@ -67,12 +69,38 @@ func (agg aggregate) Error() string { if len(agg) == 1 { return agg[0].Error() } - result := fmt.Sprintf("[%s", agg[0].Error()) - for i := 1; i < len(agg); i++ { - result += fmt.Sprintf(", %s", agg[i].Error()) + seenerrs := sets.NewString() + result := "" + agg.visit(func(err error) { + msg := err.Error() + if seenerrs.Has(msg) { + return + } + seenerrs.Insert(msg) + if len(seenerrs) > 1 { + result += ", " + } + result += msg + }) + if len(seenerrs) == 1 { + return result + } + return "[" + result + "]" +} + +func (agg aggregate) visit(f func(err error)) { + for _, err := range agg { + switch err := err.(type) { + case aggregate: + err.visit(f) + case Aggregate: + for _, nestedErr := range err.Errors() { + f(nestedErr) + } + default: + f(err) + } } - result += "]" - return result } // Errors is part of the Aggregate interface. diff --git a/vendor/k8s.io/apimachinery/pkg/util/mergepatch/OWNERS b/vendor/k8s.io/apimachinery/pkg/util/mergepatch/OWNERS index 8e8d9fce8e..3f72c69ba3 100644 --- a/vendor/k8s.io/apimachinery/pkg/util/mergepatch/OWNERS +++ b/vendor/k8s.io/apimachinery/pkg/util/mergepatch/OWNERS @@ -1,3 +1,5 @@ +# See the OWNERS docs at https://go.k8s.io/owners + approvers: - pwittrock reviewers: diff --git a/vendor/k8s.io/apimachinery/pkg/util/net/http.go b/vendor/k8s.io/apimachinery/pkg/util/net/http.go index 155667cdfc..078f00d9b9 100644 --- a/vendor/k8s.io/apimachinery/pkg/util/net/http.go +++ b/vendor/k8s.io/apimachinery/pkg/util/net/http.go @@ -68,14 +68,17 @@ func IsProbableEOF(err error) bool { if uerr, ok := err.(*url.Error); ok { err = uerr.Err } + msg := err.Error() switch { case err == io.EOF: return true - case err.Error() == "http: can't write HTTP request on broken connection": + case msg == "http: can't write HTTP request on broken connection": return true - case strings.Contains(err.Error(), "connection reset by peer"): + case strings.Contains(msg, "http2: server sent GOAWAY and closed the connection"): return true - case strings.Contains(strings.ToLower(err.Error()), "use of closed network connection"): + case strings.Contains(msg, "connection reset by peer"): + return true + case strings.Contains(strings.ToLower(msg), "use of closed network connection"): return true } return false diff --git a/vendor/k8s.io/apimachinery/pkg/util/strategicpatch/OWNERS b/vendor/k8s.io/apimachinery/pkg/util/strategicpatch/OWNERS index dbbe0de4ca..cfee199fa0 100644 --- a/vendor/k8s.io/apimachinery/pkg/util/strategicpatch/OWNERS +++ b/vendor/k8s.io/apimachinery/pkg/util/strategicpatch/OWNERS @@ -1,3 +1,5 @@ +# See the OWNERS docs at https://go.k8s.io/owners + approvers: - pwittrock - mengqiy diff --git a/vendor/k8s.io/apimachinery/pkg/util/validation/validation.go b/vendor/k8s.io/apimachinery/pkg/util/validation/validation.go index e0d1715420..2dd99992dc 100644 --- a/vendor/k8s.io/apimachinery/pkg/util/validation/validation.go +++ b/vendor/k8s.io/apimachinery/pkg/util/validation/validation.go @@ -87,6 +87,8 @@ func IsFullyQualifiedName(fldPath *field.Path, name string) field.ErrorList { const labelValueFmt string = "(" + qualifiedNameFmt + ")?" const labelValueErrMsg string = "a valid label must be an empty string or consist of alphanumeric characters, '-', '_' or '.', and must start and end with an alphanumeric character" + +// LabelValueMaxLength is a label's max length const LabelValueMaxLength int = 63 var labelValueRegexp = regexp.MustCompile("^" + labelValueFmt + "$") @@ -107,6 +109,8 @@ func IsValidLabelValue(value string) []string { const dns1123LabelFmt string = "[a-z0-9]([-a-z0-9]*[a-z0-9])?" const dns1123LabelErrMsg string = "a DNS-1123 label must consist of lower case alphanumeric characters or '-', and must start and end with an alphanumeric character" + +// DNS1123LabelMaxLength is a label's max length in DNS (RFC 1123) const DNS1123LabelMaxLength int = 63 var dns1123LabelRegexp = regexp.MustCompile("^" + dns1123LabelFmt + "$") @@ -126,6 +130,8 @@ func IsDNS1123Label(value string) []string { const dns1123SubdomainFmt string = dns1123LabelFmt + "(\\." + dns1123LabelFmt + ")*" const dns1123SubdomainErrorMsg string = "a DNS-1123 subdomain must consist of lower case alphanumeric characters, '-' or '.', and must start and end with an alphanumeric character" + +// DNS1123SubdomainMaxLength is a subdomain's max length in DNS (RFC 1123) const DNS1123SubdomainMaxLength int = 253 var dns1123SubdomainRegexp = regexp.MustCompile("^" + dns1123SubdomainFmt + "$") @@ -145,6 +151,8 @@ func IsDNS1123Subdomain(value string) []string { const dns1035LabelFmt string = "[a-z]([-a-z0-9]*[a-z0-9])?" const dns1035LabelErrMsg string = "a DNS-1035 label must consist of lower case alphanumeric characters or '-', start with an alphabetic character, and end with an alphanumeric character" + +// DNS1035LabelMaxLength is a label's max length in DNS (RFC 1035) const DNS1035LabelMaxLength int = 63 var dns1035LabelRegexp = regexp.MustCompile("^" + dns1035LabelFmt + "$") @@ -282,6 +290,7 @@ const percentErrMsg string = "a valid percent string must be a numeric string fo var percentRegexp = regexp.MustCompile("^" + percentFmt + "$") +// IsValidPercent checks that string is in the form of a percentage func IsValidPercent(percent string) []string { if !percentRegexp.MatchString(percent) { return []string{RegexError(percentErrMsg, percentFmt, "1%", "93%")} @@ -391,13 +400,13 @@ func hasChDirPrefix(value string) []string { return errs } -// IsSocketAddr checks that a string conforms is a valid socket address +// IsValidSocketAddr checks that string represents a valid socket address // as defined in RFC 789. (e.g 0.0.0.0:10254 or [::]:10254)) func IsValidSocketAddr(value string) []string { var errs []string ip, port, err := net.SplitHostPort(value) if err != nil { - return append(errs, "must be a valid socket address format, (e.g. 0.0.0.0:10254 or [::]:10254)") + errs = append(errs, "must be a valid socket address format, (e.g. 0.0.0.0:10254 or [::]:10254)") return errs } portInt, _ := strconv.Atoi(port) diff --git a/vendor/k8s.io/apimachinery/pkg/util/version/version.go b/vendor/k8s.io/apimachinery/pkg/util/version/version.go index 24724e50ac..9bc928a4e8 100644 --- a/vendor/k8s.io/apimachinery/pkg/util/version/version.go +++ b/vendor/k8s.io/apimachinery/pkg/util/version/version.go @@ -154,6 +154,35 @@ func (v *Version) Components() []uint { return v.components } +// WithMajor returns copy of the version object with requested major number +func (v *Version) WithMajor(major uint) *Version { + result := *v + result.components = []uint{major, v.Minor(), v.Patch()} + return &result +} + +// WithMinor returns copy of the version object with requested minor number +func (v *Version) WithMinor(minor uint) *Version { + result := *v + result.components = []uint{v.Major(), minor, v.Patch()} + return &result +} + +// WithPatch returns copy of the version object with requested patch number +func (v *Version) WithPatch(patch uint) *Version { + result := *v + result.components = []uint{v.Major(), v.Minor(), patch} + return &result +} + +// WithPreRelease returns copy of the version object with requested prerelease +func (v *Version) WithPreRelease(preRelease string) *Version { + result := *v + result.components = []uint{v.Major(), v.Minor(), v.Patch()} + result.preRelease = preRelease + return &result +} + // String converts a Version back to a string; note that for versions parsed with // ParseGeneric, this will not include the trailing uninterpreted portion of the version // number. diff --git a/vendor/k8s.io/apimachinery/pkg/util/wait/wait.go b/vendor/k8s.io/apimachinery/pkg/util/wait/wait.go index ca61168cd4..204177563b 100644 --- a/vendor/k8s.io/apimachinery/pkg/util/wait/wait.go +++ b/vendor/k8s.io/apimachinery/pkg/util/wait/wait.go @@ -88,6 +88,15 @@ func Until(f func(), period time.Duration, stopCh <-chan struct{}) { JitterUntil(f, period, 0.0, true, stopCh) } +// UntilWithContext loops until context is done, running f every period. +// +// UntilWithContext is syntactic sugar on top of JitterUntilWithContext +// with zero jitter factor and with sliding = true (which means the timer +// for period starts after the f completes). +func UntilWithContext(ctx context.Context, f func(context.Context), period time.Duration) { + JitterUntilWithContext(ctx, f, period, 0.0, true) +} + // NonSlidingUntil loops until stop channel is closed, running f every // period. // @@ -98,6 +107,16 @@ func NonSlidingUntil(f func(), period time.Duration, stopCh <-chan struct{}) { JitterUntil(f, period, 0.0, false, stopCh) } +// NonSlidingUntilWithContext loops until context is done, running f every +// period. +// +// NonSlidingUntilWithContext is syntactic sugar on top of JitterUntilWithContext +// with zero jitter factor, with sliding = false (meaning the timer for period +// starts at the same time as the function starts). +func NonSlidingUntilWithContext(ctx context.Context, f func(context.Context), period time.Duration) { + JitterUntilWithContext(ctx, f, period, 0.0, false) +} + // JitterUntil loops until stop channel is closed, running f every period. // // If jitterFactor is positive, the period is jittered before every run of f. @@ -151,6 +170,19 @@ func JitterUntil(f func(), period time.Duration, jitterFactor float64, sliding b } } +// JitterUntilWithContext loops until context is done, running f every period. +// +// If jitterFactor is positive, the period is jittered before every run of f. +// If jitterFactor is not positive, the period is unchanged and not jittered. +// +// If sliding is true, the period is computed after f runs. If it is false then +// period includes the runtime for f. +// +// Cancel context to stop. f may not be invoked if context is already expired. +func JitterUntilWithContext(ctx context.Context, f func(context.Context), period time.Duration, jitterFactor float64, sliding bool) { + JitterUntil(func() { f(ctx) }, period, jitterFactor, sliding, ctx.Done()) +} + // Jitter returns a time.Duration between duration and duration + maxFactor * // duration. // @@ -173,10 +205,49 @@ type ConditionFunc func() (done bool, err error) // Backoff holds parameters applied to a Backoff function. type Backoff struct { - Duration time.Duration // the base duration - Factor float64 // Duration is multiplied by factor each iteration - Jitter float64 // The amount of jitter applied each iteration - Steps int // Exit with error after this many steps + // The initial duration. + Duration time.Duration + // Duration is multiplied by factor each iteration. Must be greater + // than or equal to zero. + Factor float64 + // The amount of jitter applied each iteration. Jitter is applied after + // cap. + Jitter float64 + // The number of steps before duration stops changing. If zero, initial + // duration is always used. Used for exponential backoff in combination + // with Factor. + Steps int + // The returned duration will never be greater than cap *before* jitter + // is applied. The actual maximum cap is `cap * (1.0 + jitter)`. + Cap time.Duration +} + +// Step returns the next interval in the exponential backoff. This method +// will mutate the provided backoff. +func (b *Backoff) Step() time.Duration { + if b.Steps < 1 { + if b.Jitter > 0 { + return Jitter(b.Duration, b.Jitter) + } + return b.Duration + } + b.Steps-- + + duration := b.Duration + + // calculate the next step + if b.Factor != 0 { + b.Duration = time.Duration(float64(b.Duration) * b.Factor) + if b.Cap > 0 && b.Duration > b.Cap { + b.Duration = b.Cap + b.Steps = 0 + } + } + + if b.Jitter > 0 { + duration = Jitter(duration, b.Jitter) + } + return duration } // ExponentialBackoff repeats a condition check with exponential backoff. @@ -190,19 +261,14 @@ type Backoff struct { // If the condition never returns true, ErrWaitTimeout is returned. All other // errors terminate immediately. func ExponentialBackoff(backoff Backoff, condition ConditionFunc) error { - duration := backoff.Duration - for i := 0; i < backoff.Steps; i++ { - if i != 0 { - adjusted := duration - if backoff.Jitter > 0.0 { - adjusted = Jitter(duration, backoff.Jitter) - } - time.Sleep(adjusted) - duration = time.Duration(float64(duration) * backoff.Factor) - } + for backoff.Steps > 0 { if ok, err := condition(); err != nil || ok { return err } + if backoff.Steps == 1 { + break + } + time.Sleep(backoff.Step()) } return ErrWaitTimeout } @@ -317,29 +383,39 @@ type WaitFunc func(done <-chan struct{}) <-chan struct{} // WaitFor continually checks 'fn' as driven by 'wait'. // // WaitFor gets a channel from 'wait()'', and then invokes 'fn' once for every value -// placed on the channel and once more when the channel is closed. +// placed on the channel and once more when the channel is closed. If the channel is closed +// and 'fn' returns false without error, WaitFor returns ErrWaitTimeout. // -// If 'fn' returns an error the loop ends and that error is returned, and if +// If 'fn' returns an error the loop ends and that error is returned. If // 'fn' returns true the loop ends and nil is returned. // -// ErrWaitTimeout will be returned if the channel is closed without fn ever +// ErrWaitTimeout will be returned if the 'done' channel is closed without fn ever // returning true. +// +// When the done channel is closed, because the golang `select` statement is +// "uniform pseudo-random", the `fn` might still run one or multiple time, +// though eventually `WaitFor` will return. func WaitFor(wait WaitFunc, fn ConditionFunc, done <-chan struct{}) error { - c := wait(done) + stopCh := make(chan struct{}) + defer close(stopCh) + c := wait(stopCh) for { - _, open := <-c - ok, err := fn() - if err != nil { - return err - } - if ok { - return nil - } - if !open { - break + select { + case _, open := <-c: + ok, err := fn() + if err != nil { + return err + } + if ok { + return nil + } + if !open { + return ErrWaitTimeout + } + case <-done: + return ErrWaitTimeout } } - return ErrWaitTimeout } // poller returns a WaitFunc that will send to the channel every interval until diff --git a/vendor/k8s.io/apimachinery/pkg/util/yaml/decoder.go b/vendor/k8s.io/apimachinery/pkg/util/yaml/decoder.go index 63d735a804..a9a3853ac3 100644 --- a/vendor/k8s.io/apimachinery/pkg/util/yaml/decoder.go +++ b/vendor/k8s.io/apimachinery/pkg/util/yaml/decoder.go @@ -217,11 +217,9 @@ func (d *YAMLOrJSONDecoder) Decode(into interface{}) error { if d.decoder == nil { buffer, origData, isJSON := GuessJSONStream(d.r, d.bufferSize) if isJSON { - klog.V(4).Infof("decoding stream as JSON") d.decoder = json.NewDecoder(buffer) d.rawData = origData } else { - klog.V(4).Infof("decoding stream as YAML") d.decoder = NewYAMLToJSONDecoder(buffer) } } diff --git a/vendor/k8s.io/apimachinery/third_party/forked/golang/json/OWNERS b/vendor/k8s.io/apimachinery/third_party/forked/golang/json/OWNERS index 8e8d9fce8e..3f72c69ba3 100644 --- a/vendor/k8s.io/apimachinery/third_party/forked/golang/json/OWNERS +++ b/vendor/k8s.io/apimachinery/third_party/forked/golang/json/OWNERS @@ -1,3 +1,5 @@ +# See the OWNERS docs at https://go.k8s.io/owners + approvers: - pwittrock reviewers: diff --git a/vendor/k8s.io/client-go/discovery/cached_discovery.go b/vendor/k8s.io/client-go/discovery/cached_discovery.go deleted file mode 100644 index df69d6a193..0000000000 --- a/vendor/k8s.io/client-go/discovery/cached_discovery.go +++ /dev/null @@ -1,295 +0,0 @@ -/* -Copyright 2016 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package discovery - -import ( - "errors" - "io/ioutil" - "net/http" - "os" - "path/filepath" - "sync" - "time" - - "github.com/googleapis/gnostic/OpenAPIv2" - "k8s.io/klog" - - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/runtime" - "k8s.io/apimachinery/pkg/version" - "k8s.io/client-go/kubernetes/scheme" - restclient "k8s.io/client-go/rest" -) - -// CachedDiscoveryClient implements the functions that discovery server-supported API groups, -// versions and resources. -type CachedDiscoveryClient struct { - delegate DiscoveryInterface - - // cacheDirectory is the directory where discovery docs are held. It must be unique per host:port combination to work well. - cacheDirectory string - - // ttl is how long the cache should be considered valid - ttl time.Duration - - // mutex protects the variables below - mutex sync.Mutex - - // ourFiles are all filenames of cache files created by this process - ourFiles map[string]struct{} - // invalidated is true if all cache files should be ignored that are not ours (e.g. after Invalidate() was called) - invalidated bool - // fresh is true if all used cache files were ours - fresh bool -} - -var _ CachedDiscoveryInterface = &CachedDiscoveryClient{} - -// ServerResourcesForGroupVersion returns the supported resources for a group and version. -func (d *CachedDiscoveryClient) ServerResourcesForGroupVersion(groupVersion string) (*metav1.APIResourceList, error) { - filename := filepath.Join(d.cacheDirectory, groupVersion, "serverresources.json") - cachedBytes, err := d.getCachedFile(filename) - // don't fail on errors, we either don't have a file or won't be able to run the cached check. Either way we can fallback. - if err == nil { - cachedResources := &metav1.APIResourceList{} - if err := runtime.DecodeInto(scheme.Codecs.UniversalDecoder(), cachedBytes, cachedResources); err == nil { - klog.V(10).Infof("returning cached discovery info from %v", filename) - return cachedResources, nil - } - } - - liveResources, err := d.delegate.ServerResourcesForGroupVersion(groupVersion) - if err != nil { - klog.V(3).Infof("skipped caching discovery info due to %v", err) - return liveResources, err - } - if liveResources == nil || len(liveResources.APIResources) == 0 { - klog.V(3).Infof("skipped caching discovery info, no resources found") - return liveResources, err - } - - if err := d.writeCachedFile(filename, liveResources); err != nil { - klog.V(1).Infof("failed to write cache to %v due to %v", filename, err) - } - - return liveResources, nil -} - -// ServerResources returns the supported resources for all groups and versions. -func (d *CachedDiscoveryClient) ServerResources() ([]*metav1.APIResourceList, error) { - return ServerResources(d) -} - -// ServerGroups returns the supported groups, with information like supported versions and the -// preferred version. -func (d *CachedDiscoveryClient) ServerGroups() (*metav1.APIGroupList, error) { - filename := filepath.Join(d.cacheDirectory, "servergroups.json") - cachedBytes, err := d.getCachedFile(filename) - // don't fail on errors, we either don't have a file or won't be able to run the cached check. Either way we can fallback. - if err == nil { - cachedGroups := &metav1.APIGroupList{} - if err := runtime.DecodeInto(scheme.Codecs.UniversalDecoder(), cachedBytes, cachedGroups); err == nil { - klog.V(10).Infof("returning cached discovery info from %v", filename) - return cachedGroups, nil - } - } - - liveGroups, err := d.delegate.ServerGroups() - if err != nil { - klog.V(3).Infof("skipped caching discovery info due to %v", err) - return liveGroups, err - } - if liveGroups == nil || len(liveGroups.Groups) == 0 { - klog.V(3).Infof("skipped caching discovery info, no groups found") - return liveGroups, err - } - - if err := d.writeCachedFile(filename, liveGroups); err != nil { - klog.V(1).Infof("failed to write cache to %v due to %v", filename, err) - } - - return liveGroups, nil -} - -func (d *CachedDiscoveryClient) getCachedFile(filename string) ([]byte, error) { - // after invalidation ignore cache files not created by this process - d.mutex.Lock() - _, ourFile := d.ourFiles[filename] - if d.invalidated && !ourFile { - d.mutex.Unlock() - return nil, errors.New("cache invalidated") - } - d.mutex.Unlock() - - file, err := os.Open(filename) - if err != nil { - return nil, err - } - defer file.Close() - - fileInfo, err := file.Stat() - if err != nil { - return nil, err - } - - if time.Now().After(fileInfo.ModTime().Add(d.ttl)) { - return nil, errors.New("cache expired") - } - - // the cache is present and its valid. Try to read and use it. - cachedBytes, err := ioutil.ReadAll(file) - if err != nil { - return nil, err - } - - d.mutex.Lock() - defer d.mutex.Unlock() - d.fresh = d.fresh && ourFile - - return cachedBytes, nil -} - -func (d *CachedDiscoveryClient) writeCachedFile(filename string, obj runtime.Object) error { - if err := os.MkdirAll(filepath.Dir(filename), 0755); err != nil { - return err - } - - bytes, err := runtime.Encode(scheme.Codecs.LegacyCodec(), obj) - if err != nil { - return err - } - - f, err := ioutil.TempFile(filepath.Dir(filename), filepath.Base(filename)+".") - if err != nil { - return err - } - defer os.Remove(f.Name()) - _, err = f.Write(bytes) - if err != nil { - return err - } - - err = os.Chmod(f.Name(), 0755) - if err != nil { - return err - } - - name := f.Name() - err = f.Close() - if err != nil { - return err - } - - // atomic rename - d.mutex.Lock() - defer d.mutex.Unlock() - err = os.Rename(name, filename) - if err == nil { - d.ourFiles[filename] = struct{}{} - } - return err -} - -// RESTClient returns a RESTClient that is used to communicate with API server -// by this client implementation. -func (d *CachedDiscoveryClient) RESTClient() restclient.Interface { - return d.delegate.RESTClient() -} - -// ServerPreferredResources returns the supported resources with the version preferred by the -// server. -func (d *CachedDiscoveryClient) ServerPreferredResources() ([]*metav1.APIResourceList, error) { - return ServerPreferredResources(d) -} - -// ServerPreferredNamespacedResources returns the supported namespaced resources with the -// version preferred by the server. -func (d *CachedDiscoveryClient) ServerPreferredNamespacedResources() ([]*metav1.APIResourceList, error) { - return ServerPreferredNamespacedResources(d) -} - -// ServerVersion retrieves and parses the server's version (git version). -func (d *CachedDiscoveryClient) ServerVersion() (*version.Info, error) { - return d.delegate.ServerVersion() -} - -// OpenAPISchema retrieves and parses the swagger API schema the server supports. -func (d *CachedDiscoveryClient) OpenAPISchema() (*openapi_v2.Document, error) { - return d.delegate.OpenAPISchema() -} - -// Fresh is supposed to tell the caller whether or not to retry if the cache -// fails to find something (false = retry, true = no need to retry). -func (d *CachedDiscoveryClient) Fresh() bool { - d.mutex.Lock() - defer d.mutex.Unlock() - - return d.fresh -} - -// Invalidate enforces that no cached data is used in the future that is older than the current time. -func (d *CachedDiscoveryClient) Invalidate() { - d.mutex.Lock() - defer d.mutex.Unlock() - - d.ourFiles = map[string]struct{}{} - d.fresh = true - d.invalidated = true -} - -// NewCachedDiscoveryClientForConfig creates a new DiscoveryClient for the given config, and wraps -// the created client in a CachedDiscoveryClient. The provided configuration is updated with a -// custom transport that understands cache responses. -// We receive two distinct cache directories for now, in order to preserve old behavior -// which makes use of the --cache-dir flag value for storing cache data from the CacheRoundTripper, -// and makes use of the hardcoded destination (~/.kube/cache/discovery/...) for storing -// CachedDiscoveryClient cache data. If httpCacheDir is empty, the restconfig's transport will not -// be updated with a roundtripper that understands cache responses. -// If discoveryCacheDir is empty, cached server resource data will be looked up in the current directory. -// TODO(juanvallejo): the value of "--cache-dir" should be honored. Consolidate discoveryCacheDir with httpCacheDir -// so that server resources and http-cache data are stored in the same location, provided via config flags. -func NewCachedDiscoveryClientForConfig(config *restclient.Config, discoveryCacheDir, httpCacheDir string, ttl time.Duration) (*CachedDiscoveryClient, error) { - if len(httpCacheDir) > 0 { - // update the given restconfig with a custom roundtripper that - // understands how to handle cache responses. - wt := config.WrapTransport - config.WrapTransport = func(rt http.RoundTripper) http.RoundTripper { - if wt != nil { - rt = wt(rt) - } - return newCacheRoundTripper(httpCacheDir, rt) - } - } - - discoveryClient, err := NewDiscoveryClientForConfig(config) - if err != nil { - return nil, err - } - - return newCachedDiscoveryClient(discoveryClient, discoveryCacheDir, ttl), nil -} - -// NewCachedDiscoveryClient creates a new DiscoveryClient. cacheDirectory is the directory where discovery docs are held. It must be unique per host:port combination to work well. -func newCachedDiscoveryClient(delegate DiscoveryInterface, cacheDirectory string, ttl time.Duration) *CachedDiscoveryClient { - return &CachedDiscoveryClient{ - delegate: delegate, - cacheDirectory: cacheDirectory, - ttl: ttl, - ourFiles: map[string]struct{}{}, - fresh: true, - } -} diff --git a/vendor/k8s.io/client-go/discovery/discovery_client.go b/vendor/k8s.io/client-go/discovery/discovery_client.go index 17b39de053..61b9c4481b 100644 --- a/vendor/k8s.io/client-go/discovery/discovery_client.go +++ b/vendor/k8s.io/client-go/discovery/discovery_client.go @@ -26,7 +26,7 @@ import ( "time" "github.com/golang/protobuf/proto" - "github.com/googleapis/gnostic/OpenAPIv2" + openapi_v2 "github.com/googleapis/gnostic/OpenAPIv2" "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" @@ -60,6 +60,9 @@ type DiscoveryInterface interface { } // CachedDiscoveryInterface is a DiscoveryInterface with cache invalidation and freshness. +// Note that If the ServerResourcesForGroupVersion method returns a cache miss +// error, the user needs to explicitly call Invalidate to clear the cache, +// otherwise the same cache miss error will be returned next time. type CachedDiscoveryInterface interface { DiscoveryInterface // Fresh is supposed to tell the caller whether or not to retry if the cache @@ -68,7 +71,8 @@ type CachedDiscoveryInterface interface { // TODO: this needs to be revisited, this interface can't be locked properly // and doesn't make a lot of sense. Fresh() bool - // Invalidate enforces that no cached data is used in the future that is older than the current time. + // Invalidate enforces that no cached data that is older than the current time + // is used. Invalidate() } @@ -84,12 +88,28 @@ type ServerResourcesInterface interface { // ServerResourcesForGroupVersion returns the supported resources for a group and version. ServerResourcesForGroupVersion(groupVersion string) (*metav1.APIResourceList, error) // ServerResources returns the supported resources for all groups and versions. + // + // The returned resource list might be non-nil with partial results even in the case of + // non-nil error. + // + // Deprecated: use ServerGroupsAndResources instead. ServerResources() ([]*metav1.APIResourceList, error) + // ServerResources returns the supported groups and resources for all groups and versions. + // + // The returned group and resource lists might be non-nil with partial results even in the + // case of non-nil error. + ServerGroupsAndResources() ([]*metav1.APIGroup, []*metav1.APIResourceList, error) // ServerPreferredResources returns the supported resources with the version preferred by the // server. + // + // The returned group and resource lists might be non-nil with partial results even in the + // case of non-nil error. ServerPreferredResources() ([]*metav1.APIResourceList, error) // ServerPreferredNamespacedResources returns the supported namespaced resources with the // version preferred by the server. + // + // The returned resource list might be non-nil with partial results even in the case of + // non-nil error. ServerPreferredNamespacedResources() ([]*metav1.APIResourceList, error) } @@ -187,14 +207,18 @@ func (d *DiscoveryClient) ServerResourcesForGroupVersion(groupVersion string) (r return resources, nil } -// serverResources returns the supported resources for all groups and versions. -func (d *DiscoveryClient) serverResources() ([]*metav1.APIResourceList, error) { - return ServerResources(d) -} - // ServerResources returns the supported resources for all groups and versions. +// Deprecated: use ServerGroupsAndResources instead. func (d *DiscoveryClient) ServerResources() ([]*metav1.APIResourceList, error) { - return withRetries(defaultRetries, d.serverResources) + _, rs, err := d.ServerGroupsAndResources() + return rs, err +} + +// ServerGroupsAndResources returns the supported resources for all groups and versions. +func (d *DiscoveryClient) ServerGroupsAndResources() ([]*metav1.APIGroup, []*metav1.APIResourceList, error) { + return withRetries(defaultRetries, func() ([]*metav1.APIGroup, []*metav1.APIResourceList, error) { + return ServerGroupsAndResources(d) + }) } // ErrGroupDiscoveryFailed is returned if one or more API groups fail to load. @@ -220,23 +244,28 @@ func IsGroupDiscoveryFailedError(err error) bool { return err != nil && ok } -// serverPreferredResources returns the supported resources with the version preferred by the server. -func (d *DiscoveryClient) serverPreferredResources() ([]*metav1.APIResourceList, error) { - return ServerPreferredResources(d) -} - // ServerResources uses the provided discovery interface to look up supported resources for all groups and versions. +// Deprecated: use ServerGroupsAndResources instead. func ServerResources(d DiscoveryInterface) ([]*metav1.APIResourceList, error) { - apiGroups, err := d.ServerGroups() - if err != nil { - return nil, err + _, rs, err := ServerGroupsAndResources(d) + return rs, err +} + +func ServerGroupsAndResources(d DiscoveryInterface) ([]*metav1.APIGroup, []*metav1.APIResourceList, error) { + sgs, err := d.ServerGroups() + if sgs == nil { + return nil, nil, err + } + resultGroups := []*metav1.APIGroup{} + for i := range sgs.Groups { + resultGroups = append(resultGroups, &sgs.Groups[i]) } - groupVersionResources, failedGroups := fetchGroupVersionResources(d, apiGroups) + groupVersionResources, failedGroups := fetchGroupVersionResources(d, sgs) // order results by group/version discovery order result := []*metav1.APIResourceList{} - for _, apiGroup := range apiGroups.Groups { + for _, apiGroup := range sgs.Groups { for _, version := range apiGroup.Versions { gv := schema.GroupVersion{Group: apiGroup.Name, Version: version.Version} if resources, ok := groupVersionResources[gv]; ok { @@ -246,10 +275,10 @@ func ServerResources(d DiscoveryInterface) ([]*metav1.APIResourceList, error) { } if len(failedGroups) == 0 { - return result, nil + return resultGroups, result, nil } - return result, &ErrGroupDiscoveryFailed{Groups: failedGroups} + return resultGroups, result, &ErrGroupDiscoveryFailed{Groups: failedGroups} } // ServerPreferredResources uses the provided discovery interface to look up preferred resources @@ -313,7 +342,7 @@ func ServerPreferredResources(d DiscoveryInterface) ([]*metav1.APIResourceList, return result, &ErrGroupDiscoveryFailed{Groups: failedGroups} } -// fetchServerResourcesForGroupVersions uses the discovery client to fetch the resources for the specified groups in parallel +// fetchServerResourcesForGroupVersions uses the discovery client to fetch the resources for the specified groups in parallel. func fetchGroupVersionResources(d DiscoveryInterface, apiGroups *metav1.APIGroupList) (map[schema.GroupVersion]*metav1.APIResourceList, map[schema.GroupVersion]error) { groupVersionResources := make(map[schema.GroupVersion]*metav1.APIResourceList) failedGroups := make(map[schema.GroupVersion]error) @@ -337,7 +366,9 @@ func fetchGroupVersionResources(d DiscoveryInterface, apiGroups *metav1.APIGroup if err != nil { // TODO: maybe restrict this to NotFound errors failedGroups[groupVersion] = err - } else { + } + if apiResourceList != nil { + // even in case of error, some fallback might have been returned groupVersionResources[groupVersion] = apiResourceList } }() @@ -351,7 +382,11 @@ func fetchGroupVersionResources(d DiscoveryInterface, apiGroups *metav1.APIGroup // ServerPreferredResources returns the supported resources with the version preferred by the // server. func (d *DiscoveryClient) ServerPreferredResources() ([]*metav1.APIResourceList, error) { - return withRetries(defaultRetries, d.serverPreferredResources) + _, rs, err := withRetries(defaultRetries, func() ([]*metav1.APIGroup, []*metav1.APIResourceList, error) { + rs, err := ServerPreferredResources(d) + return nil, rs, err + }) + return rs, err } // ServerPreferredNamespacedResources returns the supported namespaced resources with the @@ -377,7 +412,7 @@ func (d *DiscoveryClient) ServerVersion() (*version.Info, error) { var info version.Info err = json.Unmarshal(body, &info) if err != nil { - return nil, fmt.Errorf("got '%s': %v", string(body), err) + return nil, fmt.Errorf("unable to parse the server version: %v", err) } return &info, nil } @@ -388,7 +423,7 @@ func (d *DiscoveryClient) OpenAPISchema() (*openapi_v2.Document, error) { if err != nil { if errors.IsForbidden(err) || errors.IsNotFound(err) || errors.IsNotAcceptable(err) { // single endpoint not found/registered in old server, try to fetch old endpoint - // TODO(roycaihw): remove this in 1.11 + // TODO: remove this when kubectl/client-go don't work with 1.9 server data, err = d.restClient.Get().AbsPath("/swagger-2.0.0.pb-v1").Do().Raw() if err != nil { return nil, err @@ -406,19 +441,20 @@ func (d *DiscoveryClient) OpenAPISchema() (*openapi_v2.Document, error) { } // withRetries retries the given recovery function in case the groups supported by the server change after ServerGroup() returns. -func withRetries(maxRetries int, f func() ([]*metav1.APIResourceList, error)) ([]*metav1.APIResourceList, error) { +func withRetries(maxRetries int, f func() ([]*metav1.APIGroup, []*metav1.APIResourceList, error)) ([]*metav1.APIGroup, []*metav1.APIResourceList, error) { var result []*metav1.APIResourceList + var resultGroups []*metav1.APIGroup var err error for i := 0; i < maxRetries; i++ { - result, err = f() + resultGroups, result, err = f() if err == nil { - return result, nil + return resultGroups, result, nil } if _, ok := err.(*ErrGroupDiscoveryFailed); !ok { - return nil, err + return nil, nil, err } } - return result, err + return resultGroups, result, err } func setDiscoveryDefaults(config *restclient.Config) error { diff --git a/vendor/k8s.io/client-go/discovery/round_tripper.go b/vendor/k8s.io/client-go/discovery/round_tripper.go deleted file mode 100644 index 4e2bc24e77..0000000000 --- a/vendor/k8s.io/client-go/discovery/round_tripper.go +++ /dev/null @@ -1,62 +0,0 @@ -/* -Copyright 2017 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package discovery - -import ( - "net/http" - "path/filepath" - - "github.com/gregjones/httpcache" - "github.com/gregjones/httpcache/diskcache" - "github.com/peterbourgon/diskv" - "k8s.io/klog" -) - -type cacheRoundTripper struct { - rt *httpcache.Transport -} - -// newCacheRoundTripper creates a roundtripper that reads the ETag on -// response headers and send the If-None-Match header on subsequent -// corresponding requests. -func newCacheRoundTripper(cacheDir string, rt http.RoundTripper) http.RoundTripper { - d := diskv.New(diskv.Options{ - BasePath: cacheDir, - TempDir: filepath.Join(cacheDir, ".diskv-temp"), - }) - t := httpcache.NewTransport(diskcache.NewWithDiskv(d)) - t.Transport = rt - - return &cacheRoundTripper{rt: t} -} - -func (rt *cacheRoundTripper) RoundTrip(req *http.Request) (*http.Response, error) { - return rt.rt.RoundTrip(req) -} - -func (rt *cacheRoundTripper) CancelRequest(req *http.Request) { - type canceler interface { - CancelRequest(*http.Request) - } - if cr, ok := rt.rt.Transport.(canceler); ok { - cr.CancelRequest(req) - } else { - klog.Errorf("CancelRequest not implemented by %T", rt.rt.Transport) - } -} - -func (rt *cacheRoundTripper) WrappedRoundTripper() http.RoundTripper { return rt.rt.Transport } diff --git a/vendor/k8s.io/client-go/dynamic/interface.go b/vendor/k8s.io/client-go/dynamic/interface.go index c457be1780..70756a4f58 100644 --- a/vendor/k8s.io/client-go/dynamic/interface.go +++ b/vendor/k8s.io/client-go/dynamic/interface.go @@ -37,7 +37,7 @@ type ResourceInterface interface { Get(name string, options metav1.GetOptions, subresources ...string) (*unstructured.Unstructured, error) List(opts metav1.ListOptions) (*unstructured.UnstructuredList, error) Watch(opts metav1.ListOptions) (watch.Interface, error) - Patch(name string, pt types.PatchType, data []byte, options metav1.UpdateOptions, subresources ...string) (*unstructured.Unstructured, error) + Patch(name string, pt types.PatchType, data []byte, options metav1.PatchOptions, subresources ...string) (*unstructured.Unstructured, error) } type NamespaceableResourceInterface interface { diff --git a/vendor/k8s.io/client-go/dynamic/simple.go b/vendor/k8s.io/client-go/dynamic/simple.go index 9e21cda6e3..852f0c5120 100644 --- a/vendor/k8s.io/client-go/dynamic/simple.go +++ b/vendor/k8s.io/client-go/dynamic/simple.go @@ -283,7 +283,7 @@ func (c *dynamicResourceClient) Watch(opts metav1.ListOptions) (watch.Interface, WatchWithSpecificDecoders(wrappedDecoderFn, unstructured.UnstructuredJSONScheme) } -func (c *dynamicResourceClient) Patch(name string, pt types.PatchType, data []byte, opts metav1.UpdateOptions, subresources ...string) (*unstructured.Unstructured, error) { +func (c *dynamicResourceClient) Patch(name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (*unstructured.Unstructured, error) { result := c.client.client. Patch(pt). AbsPath(append(c.makeURLSegments(name), subresources...)...). diff --git a/vendor/k8s.io/client-go/kubernetes/clientset.go b/vendor/k8s.io/client-go/kubernetes/clientset.go index 6ad01d6db1..fb889e6df5 100644 --- a/vendor/k8s.io/client-go/kubernetes/clientset.go +++ b/vendor/k8s.io/client-go/kubernetes/clientset.go @@ -20,7 +20,6 @@ package kubernetes import ( discovery "k8s.io/client-go/discovery" - admissionregistrationv1alpha1 "k8s.io/client-go/kubernetes/typed/admissionregistration/v1alpha1" admissionregistrationv1beta1 "k8s.io/client-go/kubernetes/typed/admissionregistration/v1beta1" appsv1 "k8s.io/client-go/kubernetes/typed/apps/v1" appsv1beta1 "k8s.io/client-go/kubernetes/typed/apps/v1beta1" @@ -37,15 +36,20 @@ import ( batchv1beta1 "k8s.io/client-go/kubernetes/typed/batch/v1beta1" batchv2alpha1 "k8s.io/client-go/kubernetes/typed/batch/v2alpha1" certificatesv1beta1 "k8s.io/client-go/kubernetes/typed/certificates/v1beta1" + coordinationv1 "k8s.io/client-go/kubernetes/typed/coordination/v1" coordinationv1beta1 "k8s.io/client-go/kubernetes/typed/coordination/v1beta1" corev1 "k8s.io/client-go/kubernetes/typed/core/v1" eventsv1beta1 "k8s.io/client-go/kubernetes/typed/events/v1beta1" extensionsv1beta1 "k8s.io/client-go/kubernetes/typed/extensions/v1beta1" networkingv1 "k8s.io/client-go/kubernetes/typed/networking/v1" + networkingv1beta1 "k8s.io/client-go/kubernetes/typed/networking/v1beta1" + nodev1alpha1 "k8s.io/client-go/kubernetes/typed/node/v1alpha1" + nodev1beta1 "k8s.io/client-go/kubernetes/typed/node/v1beta1" policyv1beta1 "k8s.io/client-go/kubernetes/typed/policy/v1beta1" rbacv1 "k8s.io/client-go/kubernetes/typed/rbac/v1" rbacv1alpha1 "k8s.io/client-go/kubernetes/typed/rbac/v1alpha1" rbacv1beta1 "k8s.io/client-go/kubernetes/typed/rbac/v1beta1" + schedulingv1 "k8s.io/client-go/kubernetes/typed/scheduling/v1" schedulingv1alpha1 "k8s.io/client-go/kubernetes/typed/scheduling/v1alpha1" schedulingv1beta1 "k8s.io/client-go/kubernetes/typed/scheduling/v1beta1" settingsv1alpha1 "k8s.io/client-go/kubernetes/typed/settings/v1alpha1" @@ -58,73 +62,41 @@ import ( type Interface interface { Discovery() discovery.DiscoveryInterface - AdmissionregistrationV1alpha1() admissionregistrationv1alpha1.AdmissionregistrationV1alpha1Interface AdmissionregistrationV1beta1() admissionregistrationv1beta1.AdmissionregistrationV1beta1Interface - // Deprecated: please explicitly pick a version if possible. - Admissionregistration() admissionregistrationv1beta1.AdmissionregistrationV1beta1Interface + AppsV1() appsv1.AppsV1Interface AppsV1beta1() appsv1beta1.AppsV1beta1Interface AppsV1beta2() appsv1beta2.AppsV1beta2Interface - AppsV1() appsv1.AppsV1Interface - // Deprecated: please explicitly pick a version if possible. - Apps() appsv1.AppsV1Interface AuditregistrationV1alpha1() auditregistrationv1alpha1.AuditregistrationV1alpha1Interface - // Deprecated: please explicitly pick a version if possible. - Auditregistration() auditregistrationv1alpha1.AuditregistrationV1alpha1Interface AuthenticationV1() authenticationv1.AuthenticationV1Interface - // Deprecated: please explicitly pick a version if possible. - Authentication() authenticationv1.AuthenticationV1Interface AuthenticationV1beta1() authenticationv1beta1.AuthenticationV1beta1Interface AuthorizationV1() authorizationv1.AuthorizationV1Interface - // Deprecated: please explicitly pick a version if possible. - Authorization() authorizationv1.AuthorizationV1Interface AuthorizationV1beta1() authorizationv1beta1.AuthorizationV1beta1Interface AutoscalingV1() autoscalingv1.AutoscalingV1Interface - // Deprecated: please explicitly pick a version if possible. - Autoscaling() autoscalingv1.AutoscalingV1Interface AutoscalingV2beta1() autoscalingv2beta1.AutoscalingV2beta1Interface AutoscalingV2beta2() autoscalingv2beta2.AutoscalingV2beta2Interface BatchV1() batchv1.BatchV1Interface - // Deprecated: please explicitly pick a version if possible. - Batch() batchv1.BatchV1Interface BatchV1beta1() batchv1beta1.BatchV1beta1Interface BatchV2alpha1() batchv2alpha1.BatchV2alpha1Interface CertificatesV1beta1() certificatesv1beta1.CertificatesV1beta1Interface - // Deprecated: please explicitly pick a version if possible. - Certificates() certificatesv1beta1.CertificatesV1beta1Interface CoordinationV1beta1() coordinationv1beta1.CoordinationV1beta1Interface - // Deprecated: please explicitly pick a version if possible. - Coordination() coordinationv1beta1.CoordinationV1beta1Interface + CoordinationV1() coordinationv1.CoordinationV1Interface CoreV1() corev1.CoreV1Interface - // Deprecated: please explicitly pick a version if possible. - Core() corev1.CoreV1Interface EventsV1beta1() eventsv1beta1.EventsV1beta1Interface - // Deprecated: please explicitly pick a version if possible. - Events() eventsv1beta1.EventsV1beta1Interface ExtensionsV1beta1() extensionsv1beta1.ExtensionsV1beta1Interface - // Deprecated: please explicitly pick a version if possible. - Extensions() extensionsv1beta1.ExtensionsV1beta1Interface NetworkingV1() networkingv1.NetworkingV1Interface - // Deprecated: please explicitly pick a version if possible. - Networking() networkingv1.NetworkingV1Interface + NetworkingV1beta1() networkingv1beta1.NetworkingV1beta1Interface + NodeV1alpha1() nodev1alpha1.NodeV1alpha1Interface + NodeV1beta1() nodev1beta1.NodeV1beta1Interface PolicyV1beta1() policyv1beta1.PolicyV1beta1Interface - // Deprecated: please explicitly pick a version if possible. - Policy() policyv1beta1.PolicyV1beta1Interface RbacV1() rbacv1.RbacV1Interface - // Deprecated: please explicitly pick a version if possible. - Rbac() rbacv1.RbacV1Interface RbacV1beta1() rbacv1beta1.RbacV1beta1Interface RbacV1alpha1() rbacv1alpha1.RbacV1alpha1Interface SchedulingV1alpha1() schedulingv1alpha1.SchedulingV1alpha1Interface SchedulingV1beta1() schedulingv1beta1.SchedulingV1beta1Interface - // Deprecated: please explicitly pick a version if possible. - Scheduling() schedulingv1beta1.SchedulingV1beta1Interface + SchedulingV1() schedulingv1.SchedulingV1Interface SettingsV1alpha1() settingsv1alpha1.SettingsV1alpha1Interface - // Deprecated: please explicitly pick a version if possible. - Settings() settingsv1alpha1.SettingsV1alpha1Interface StorageV1beta1() storagev1beta1.StorageV1beta1Interface StorageV1() storagev1.StorageV1Interface - // Deprecated: please explicitly pick a version if possible. - Storage() storagev1.StorageV1Interface StorageV1alpha1() storagev1alpha1.StorageV1alpha1Interface } @@ -132,43 +104,42 @@ type Interface interface { // version included in a Clientset. type Clientset struct { *discovery.DiscoveryClient - admissionregistrationV1alpha1 *admissionregistrationv1alpha1.AdmissionregistrationV1alpha1Client - admissionregistrationV1beta1 *admissionregistrationv1beta1.AdmissionregistrationV1beta1Client - appsV1beta1 *appsv1beta1.AppsV1beta1Client - appsV1beta2 *appsv1beta2.AppsV1beta2Client - appsV1 *appsv1.AppsV1Client - auditregistrationV1alpha1 *auditregistrationv1alpha1.AuditregistrationV1alpha1Client - authenticationV1 *authenticationv1.AuthenticationV1Client - authenticationV1beta1 *authenticationv1beta1.AuthenticationV1beta1Client - authorizationV1 *authorizationv1.AuthorizationV1Client - authorizationV1beta1 *authorizationv1beta1.AuthorizationV1beta1Client - autoscalingV1 *autoscalingv1.AutoscalingV1Client - autoscalingV2beta1 *autoscalingv2beta1.AutoscalingV2beta1Client - autoscalingV2beta2 *autoscalingv2beta2.AutoscalingV2beta2Client - batchV1 *batchv1.BatchV1Client - batchV1beta1 *batchv1beta1.BatchV1beta1Client - batchV2alpha1 *batchv2alpha1.BatchV2alpha1Client - certificatesV1beta1 *certificatesv1beta1.CertificatesV1beta1Client - coordinationV1beta1 *coordinationv1beta1.CoordinationV1beta1Client - coreV1 *corev1.CoreV1Client - eventsV1beta1 *eventsv1beta1.EventsV1beta1Client - extensionsV1beta1 *extensionsv1beta1.ExtensionsV1beta1Client - networkingV1 *networkingv1.NetworkingV1Client - policyV1beta1 *policyv1beta1.PolicyV1beta1Client - rbacV1 *rbacv1.RbacV1Client - rbacV1beta1 *rbacv1beta1.RbacV1beta1Client - rbacV1alpha1 *rbacv1alpha1.RbacV1alpha1Client - schedulingV1alpha1 *schedulingv1alpha1.SchedulingV1alpha1Client - schedulingV1beta1 *schedulingv1beta1.SchedulingV1beta1Client - settingsV1alpha1 *settingsv1alpha1.SettingsV1alpha1Client - storageV1beta1 *storagev1beta1.StorageV1beta1Client - storageV1 *storagev1.StorageV1Client - storageV1alpha1 *storagev1alpha1.StorageV1alpha1Client -} - -// AdmissionregistrationV1alpha1 retrieves the AdmissionregistrationV1alpha1Client -func (c *Clientset) AdmissionregistrationV1alpha1() admissionregistrationv1alpha1.AdmissionregistrationV1alpha1Interface { - return c.admissionregistrationV1alpha1 + admissionregistrationV1beta1 *admissionregistrationv1beta1.AdmissionregistrationV1beta1Client + appsV1 *appsv1.AppsV1Client + appsV1beta1 *appsv1beta1.AppsV1beta1Client + appsV1beta2 *appsv1beta2.AppsV1beta2Client + auditregistrationV1alpha1 *auditregistrationv1alpha1.AuditregistrationV1alpha1Client + authenticationV1 *authenticationv1.AuthenticationV1Client + authenticationV1beta1 *authenticationv1beta1.AuthenticationV1beta1Client + authorizationV1 *authorizationv1.AuthorizationV1Client + authorizationV1beta1 *authorizationv1beta1.AuthorizationV1beta1Client + autoscalingV1 *autoscalingv1.AutoscalingV1Client + autoscalingV2beta1 *autoscalingv2beta1.AutoscalingV2beta1Client + autoscalingV2beta2 *autoscalingv2beta2.AutoscalingV2beta2Client + batchV1 *batchv1.BatchV1Client + batchV1beta1 *batchv1beta1.BatchV1beta1Client + batchV2alpha1 *batchv2alpha1.BatchV2alpha1Client + certificatesV1beta1 *certificatesv1beta1.CertificatesV1beta1Client + coordinationV1beta1 *coordinationv1beta1.CoordinationV1beta1Client + coordinationV1 *coordinationv1.CoordinationV1Client + coreV1 *corev1.CoreV1Client + eventsV1beta1 *eventsv1beta1.EventsV1beta1Client + extensionsV1beta1 *extensionsv1beta1.ExtensionsV1beta1Client + networkingV1 *networkingv1.NetworkingV1Client + networkingV1beta1 *networkingv1beta1.NetworkingV1beta1Client + nodeV1alpha1 *nodev1alpha1.NodeV1alpha1Client + nodeV1beta1 *nodev1beta1.NodeV1beta1Client + policyV1beta1 *policyv1beta1.PolicyV1beta1Client + rbacV1 *rbacv1.RbacV1Client + rbacV1beta1 *rbacv1beta1.RbacV1beta1Client + rbacV1alpha1 *rbacv1alpha1.RbacV1alpha1Client + schedulingV1alpha1 *schedulingv1alpha1.SchedulingV1alpha1Client + schedulingV1beta1 *schedulingv1beta1.SchedulingV1beta1Client + schedulingV1 *schedulingv1.SchedulingV1Client + settingsV1alpha1 *settingsv1alpha1.SettingsV1alpha1Client + storageV1beta1 *storagev1beta1.StorageV1beta1Client + storageV1 *storagev1.StorageV1Client + storageV1alpha1 *storagev1alpha1.StorageV1alpha1Client } // AdmissionregistrationV1beta1 retrieves the AdmissionregistrationV1beta1Client @@ -176,10 +147,9 @@ func (c *Clientset) AdmissionregistrationV1beta1() admissionregistrationv1beta1. return c.admissionregistrationV1beta1 } -// Deprecated: Admissionregistration retrieves the default version of AdmissionregistrationClient. -// Please explicitly pick a version. -func (c *Clientset) Admissionregistration() admissionregistrationv1beta1.AdmissionregistrationV1beta1Interface { - return c.admissionregistrationV1beta1 +// AppsV1 retrieves the AppsV1Client +func (c *Clientset) AppsV1() appsv1.AppsV1Interface { + return c.appsV1 } // AppsV1beta1 retrieves the AppsV1beta1Client @@ -192,39 +162,16 @@ func (c *Clientset) AppsV1beta2() appsv1beta2.AppsV1beta2Interface { return c.appsV1beta2 } -// AppsV1 retrieves the AppsV1Client -func (c *Clientset) AppsV1() appsv1.AppsV1Interface { - return c.appsV1 -} - -// Deprecated: Apps retrieves the default version of AppsClient. -// Please explicitly pick a version. -func (c *Clientset) Apps() appsv1.AppsV1Interface { - return c.appsV1 -} - // AuditregistrationV1alpha1 retrieves the AuditregistrationV1alpha1Client func (c *Clientset) AuditregistrationV1alpha1() auditregistrationv1alpha1.AuditregistrationV1alpha1Interface { return c.auditregistrationV1alpha1 } -// Deprecated: Auditregistration retrieves the default version of AuditregistrationClient. -// Please explicitly pick a version. -func (c *Clientset) Auditregistration() auditregistrationv1alpha1.AuditregistrationV1alpha1Interface { - return c.auditregistrationV1alpha1 -} - // AuthenticationV1 retrieves the AuthenticationV1Client func (c *Clientset) AuthenticationV1() authenticationv1.AuthenticationV1Interface { return c.authenticationV1 } -// Deprecated: Authentication retrieves the default version of AuthenticationClient. -// Please explicitly pick a version. -func (c *Clientset) Authentication() authenticationv1.AuthenticationV1Interface { - return c.authenticationV1 -} - // AuthenticationV1beta1 retrieves the AuthenticationV1beta1Client func (c *Clientset) AuthenticationV1beta1() authenticationv1beta1.AuthenticationV1beta1Interface { return c.authenticationV1beta1 @@ -235,12 +182,6 @@ func (c *Clientset) AuthorizationV1() authorizationv1.AuthorizationV1Interface { return c.authorizationV1 } -// Deprecated: Authorization retrieves the default version of AuthorizationClient. -// Please explicitly pick a version. -func (c *Clientset) Authorization() authorizationv1.AuthorizationV1Interface { - return c.authorizationV1 -} - // AuthorizationV1beta1 retrieves the AuthorizationV1beta1Client func (c *Clientset) AuthorizationV1beta1() authorizationv1beta1.AuthorizationV1beta1Interface { return c.authorizationV1beta1 @@ -251,12 +192,6 @@ func (c *Clientset) AutoscalingV1() autoscalingv1.AutoscalingV1Interface { return c.autoscalingV1 } -// Deprecated: Autoscaling retrieves the default version of AutoscalingClient. -// Please explicitly pick a version. -func (c *Clientset) Autoscaling() autoscalingv1.AutoscalingV1Interface { - return c.autoscalingV1 -} - // AutoscalingV2beta1 retrieves the AutoscalingV2beta1Client func (c *Clientset) AutoscalingV2beta1() autoscalingv2beta1.AutoscalingV2beta1Interface { return c.autoscalingV2beta1 @@ -272,12 +207,6 @@ func (c *Clientset) BatchV1() batchv1.BatchV1Interface { return c.batchV1 } -// Deprecated: Batch retrieves the default version of BatchClient. -// Please explicitly pick a version. -func (c *Clientset) Batch() batchv1.BatchV1Interface { - return c.batchV1 -} - // BatchV1beta1 retrieves the BatchV1beta1Client func (c *Clientset) BatchV1beta1() batchv1beta1.BatchV1beta1Interface { return c.batchV1beta1 @@ -293,21 +222,14 @@ func (c *Clientset) CertificatesV1beta1() certificatesv1beta1.CertificatesV1beta return c.certificatesV1beta1 } -// Deprecated: Certificates retrieves the default version of CertificatesClient. -// Please explicitly pick a version. -func (c *Clientset) Certificates() certificatesv1beta1.CertificatesV1beta1Interface { - return c.certificatesV1beta1 -} - // CoordinationV1beta1 retrieves the CoordinationV1beta1Client func (c *Clientset) CoordinationV1beta1() coordinationv1beta1.CoordinationV1beta1Interface { return c.coordinationV1beta1 } -// Deprecated: Coordination retrieves the default version of CoordinationClient. -// Please explicitly pick a version. -func (c *Clientset) Coordination() coordinationv1beta1.CoordinationV1beta1Interface { - return c.coordinationV1beta1 +// CoordinationV1 retrieves the CoordinationV1Client +func (c *Clientset) CoordinationV1() coordinationv1.CoordinationV1Interface { + return c.coordinationV1 } // CoreV1 retrieves the CoreV1Client @@ -315,53 +237,38 @@ func (c *Clientset) CoreV1() corev1.CoreV1Interface { return c.coreV1 } -// Deprecated: Core retrieves the default version of CoreClient. -// Please explicitly pick a version. -func (c *Clientset) Core() corev1.CoreV1Interface { - return c.coreV1 -} - // EventsV1beta1 retrieves the EventsV1beta1Client func (c *Clientset) EventsV1beta1() eventsv1beta1.EventsV1beta1Interface { return c.eventsV1beta1 } -// Deprecated: Events retrieves the default version of EventsClient. -// Please explicitly pick a version. -func (c *Clientset) Events() eventsv1beta1.EventsV1beta1Interface { - return c.eventsV1beta1 -} - // ExtensionsV1beta1 retrieves the ExtensionsV1beta1Client func (c *Clientset) ExtensionsV1beta1() extensionsv1beta1.ExtensionsV1beta1Interface { return c.extensionsV1beta1 } -// Deprecated: Extensions retrieves the default version of ExtensionsClient. -// Please explicitly pick a version. -func (c *Clientset) Extensions() extensionsv1beta1.ExtensionsV1beta1Interface { - return c.extensionsV1beta1 -} - // NetworkingV1 retrieves the NetworkingV1Client func (c *Clientset) NetworkingV1() networkingv1.NetworkingV1Interface { return c.networkingV1 } -// Deprecated: Networking retrieves the default version of NetworkingClient. -// Please explicitly pick a version. -func (c *Clientset) Networking() networkingv1.NetworkingV1Interface { - return c.networkingV1 +// NetworkingV1beta1 retrieves the NetworkingV1beta1Client +func (c *Clientset) NetworkingV1beta1() networkingv1beta1.NetworkingV1beta1Interface { + return c.networkingV1beta1 } -// PolicyV1beta1 retrieves the PolicyV1beta1Client -func (c *Clientset) PolicyV1beta1() policyv1beta1.PolicyV1beta1Interface { - return c.policyV1beta1 +// NodeV1alpha1 retrieves the NodeV1alpha1Client +func (c *Clientset) NodeV1alpha1() nodev1alpha1.NodeV1alpha1Interface { + return c.nodeV1alpha1 } -// Deprecated: Policy retrieves the default version of PolicyClient. -// Please explicitly pick a version. -func (c *Clientset) Policy() policyv1beta1.PolicyV1beta1Interface { +// NodeV1beta1 retrieves the NodeV1beta1Client +func (c *Clientset) NodeV1beta1() nodev1beta1.NodeV1beta1Interface { + return c.nodeV1beta1 +} + +// PolicyV1beta1 retrieves the PolicyV1beta1Client +func (c *Clientset) PolicyV1beta1() policyv1beta1.PolicyV1beta1Interface { return c.policyV1beta1 } @@ -370,12 +277,6 @@ func (c *Clientset) RbacV1() rbacv1.RbacV1Interface { return c.rbacV1 } -// Deprecated: Rbac retrieves the default version of RbacClient. -// Please explicitly pick a version. -func (c *Clientset) Rbac() rbacv1.RbacV1Interface { - return c.rbacV1 -} - // RbacV1beta1 retrieves the RbacV1beta1Client func (c *Clientset) RbacV1beta1() rbacv1beta1.RbacV1beta1Interface { return c.rbacV1beta1 @@ -396,10 +297,9 @@ func (c *Clientset) SchedulingV1beta1() schedulingv1beta1.SchedulingV1beta1Inter return c.schedulingV1beta1 } -// Deprecated: Scheduling retrieves the default version of SchedulingClient. -// Please explicitly pick a version. -func (c *Clientset) Scheduling() schedulingv1beta1.SchedulingV1beta1Interface { - return c.schedulingV1beta1 +// SchedulingV1 retrieves the SchedulingV1Client +func (c *Clientset) SchedulingV1() schedulingv1.SchedulingV1Interface { + return c.schedulingV1 } // SettingsV1alpha1 retrieves the SettingsV1alpha1Client @@ -407,12 +307,6 @@ func (c *Clientset) SettingsV1alpha1() settingsv1alpha1.SettingsV1alpha1Interfac return c.settingsV1alpha1 } -// Deprecated: Settings retrieves the default version of SettingsClient. -// Please explicitly pick a version. -func (c *Clientset) Settings() settingsv1alpha1.SettingsV1alpha1Interface { - return c.settingsV1alpha1 -} - // StorageV1beta1 retrieves the StorageV1beta1Client func (c *Clientset) StorageV1beta1() storagev1beta1.StorageV1beta1Interface { return c.storageV1beta1 @@ -423,12 +317,6 @@ func (c *Clientset) StorageV1() storagev1.StorageV1Interface { return c.storageV1 } -// Deprecated: Storage retrieves the default version of StorageClient. -// Please explicitly pick a version. -func (c *Clientset) Storage() storagev1.StorageV1Interface { - return c.storageV1 -} - // StorageV1alpha1 retrieves the StorageV1alpha1Client func (c *Clientset) StorageV1alpha1() storagev1alpha1.StorageV1alpha1Interface { return c.storageV1alpha1 @@ -450,11 +338,11 @@ func NewForConfig(c *rest.Config) (*Clientset, error) { } var cs Clientset var err error - cs.admissionregistrationV1alpha1, err = admissionregistrationv1alpha1.NewForConfig(&configShallowCopy) + cs.admissionregistrationV1beta1, err = admissionregistrationv1beta1.NewForConfig(&configShallowCopy) if err != nil { return nil, err } - cs.admissionregistrationV1beta1, err = admissionregistrationv1beta1.NewForConfig(&configShallowCopy) + cs.appsV1, err = appsv1.NewForConfig(&configShallowCopy) if err != nil { return nil, err } @@ -466,10 +354,6 @@ func NewForConfig(c *rest.Config) (*Clientset, error) { if err != nil { return nil, err } - cs.appsV1, err = appsv1.NewForConfig(&configShallowCopy) - if err != nil { - return nil, err - } cs.auditregistrationV1alpha1, err = auditregistrationv1alpha1.NewForConfig(&configShallowCopy) if err != nil { return nil, err @@ -522,6 +406,10 @@ func NewForConfig(c *rest.Config) (*Clientset, error) { if err != nil { return nil, err } + cs.coordinationV1, err = coordinationv1.NewForConfig(&configShallowCopy) + if err != nil { + return nil, err + } cs.coreV1, err = corev1.NewForConfig(&configShallowCopy) if err != nil { return nil, err @@ -538,6 +426,18 @@ func NewForConfig(c *rest.Config) (*Clientset, error) { if err != nil { return nil, err } + cs.networkingV1beta1, err = networkingv1beta1.NewForConfig(&configShallowCopy) + if err != nil { + return nil, err + } + cs.nodeV1alpha1, err = nodev1alpha1.NewForConfig(&configShallowCopy) + if err != nil { + return nil, err + } + cs.nodeV1beta1, err = nodev1beta1.NewForConfig(&configShallowCopy) + if err != nil { + return nil, err + } cs.policyV1beta1, err = policyv1beta1.NewForConfig(&configShallowCopy) if err != nil { return nil, err @@ -562,6 +462,10 @@ func NewForConfig(c *rest.Config) (*Clientset, error) { if err != nil { return nil, err } + cs.schedulingV1, err = schedulingv1.NewForConfig(&configShallowCopy) + if err != nil { + return nil, err + } cs.settingsV1alpha1, err = settingsv1alpha1.NewForConfig(&configShallowCopy) if err != nil { return nil, err @@ -590,11 +494,10 @@ func NewForConfig(c *rest.Config) (*Clientset, error) { // panics if there is an error in the config. func NewForConfigOrDie(c *rest.Config) *Clientset { var cs Clientset - cs.admissionregistrationV1alpha1 = admissionregistrationv1alpha1.NewForConfigOrDie(c) cs.admissionregistrationV1beta1 = admissionregistrationv1beta1.NewForConfigOrDie(c) + cs.appsV1 = appsv1.NewForConfigOrDie(c) cs.appsV1beta1 = appsv1beta1.NewForConfigOrDie(c) cs.appsV1beta2 = appsv1beta2.NewForConfigOrDie(c) - cs.appsV1 = appsv1.NewForConfigOrDie(c) cs.auditregistrationV1alpha1 = auditregistrationv1alpha1.NewForConfigOrDie(c) cs.authenticationV1 = authenticationv1.NewForConfigOrDie(c) cs.authenticationV1beta1 = authenticationv1beta1.NewForConfigOrDie(c) @@ -608,16 +511,21 @@ func NewForConfigOrDie(c *rest.Config) *Clientset { cs.batchV2alpha1 = batchv2alpha1.NewForConfigOrDie(c) cs.certificatesV1beta1 = certificatesv1beta1.NewForConfigOrDie(c) cs.coordinationV1beta1 = coordinationv1beta1.NewForConfigOrDie(c) + cs.coordinationV1 = coordinationv1.NewForConfigOrDie(c) cs.coreV1 = corev1.NewForConfigOrDie(c) cs.eventsV1beta1 = eventsv1beta1.NewForConfigOrDie(c) cs.extensionsV1beta1 = extensionsv1beta1.NewForConfigOrDie(c) cs.networkingV1 = networkingv1.NewForConfigOrDie(c) + cs.networkingV1beta1 = networkingv1beta1.NewForConfigOrDie(c) + cs.nodeV1alpha1 = nodev1alpha1.NewForConfigOrDie(c) + cs.nodeV1beta1 = nodev1beta1.NewForConfigOrDie(c) cs.policyV1beta1 = policyv1beta1.NewForConfigOrDie(c) cs.rbacV1 = rbacv1.NewForConfigOrDie(c) cs.rbacV1beta1 = rbacv1beta1.NewForConfigOrDie(c) cs.rbacV1alpha1 = rbacv1alpha1.NewForConfigOrDie(c) cs.schedulingV1alpha1 = schedulingv1alpha1.NewForConfigOrDie(c) cs.schedulingV1beta1 = schedulingv1beta1.NewForConfigOrDie(c) + cs.schedulingV1 = schedulingv1.NewForConfigOrDie(c) cs.settingsV1alpha1 = settingsv1alpha1.NewForConfigOrDie(c) cs.storageV1beta1 = storagev1beta1.NewForConfigOrDie(c) cs.storageV1 = storagev1.NewForConfigOrDie(c) @@ -630,11 +538,10 @@ func NewForConfigOrDie(c *rest.Config) *Clientset { // New creates a new Clientset for the given RESTClient. func New(c rest.Interface) *Clientset { var cs Clientset - cs.admissionregistrationV1alpha1 = admissionregistrationv1alpha1.New(c) cs.admissionregistrationV1beta1 = admissionregistrationv1beta1.New(c) + cs.appsV1 = appsv1.New(c) cs.appsV1beta1 = appsv1beta1.New(c) cs.appsV1beta2 = appsv1beta2.New(c) - cs.appsV1 = appsv1.New(c) cs.auditregistrationV1alpha1 = auditregistrationv1alpha1.New(c) cs.authenticationV1 = authenticationv1.New(c) cs.authenticationV1beta1 = authenticationv1beta1.New(c) @@ -648,16 +555,21 @@ func New(c rest.Interface) *Clientset { cs.batchV2alpha1 = batchv2alpha1.New(c) cs.certificatesV1beta1 = certificatesv1beta1.New(c) cs.coordinationV1beta1 = coordinationv1beta1.New(c) + cs.coordinationV1 = coordinationv1.New(c) cs.coreV1 = corev1.New(c) cs.eventsV1beta1 = eventsv1beta1.New(c) cs.extensionsV1beta1 = extensionsv1beta1.New(c) cs.networkingV1 = networkingv1.New(c) + cs.networkingV1beta1 = networkingv1beta1.New(c) + cs.nodeV1alpha1 = nodev1alpha1.New(c) + cs.nodeV1beta1 = nodev1beta1.New(c) cs.policyV1beta1 = policyv1beta1.New(c) cs.rbacV1 = rbacv1.New(c) cs.rbacV1beta1 = rbacv1beta1.New(c) cs.rbacV1alpha1 = rbacv1alpha1.New(c) cs.schedulingV1alpha1 = schedulingv1alpha1.New(c) cs.schedulingV1beta1 = schedulingv1beta1.New(c) + cs.schedulingV1 = schedulingv1.New(c) cs.settingsV1alpha1 = settingsv1alpha1.New(c) cs.storageV1beta1 = storagev1beta1.New(c) cs.storageV1 = storagev1.New(c) diff --git a/vendor/k8s.io/client-go/kubernetes/scheme/register.go b/vendor/k8s.io/client-go/kubernetes/scheme/register.go index e336eb9179..8346d26a5c 100644 --- a/vendor/k8s.io/client-go/kubernetes/scheme/register.go +++ b/vendor/k8s.io/client-go/kubernetes/scheme/register.go @@ -19,7 +19,6 @@ limitations under the License. package scheme import ( - admissionregistrationv1alpha1 "k8s.io/api/admissionregistration/v1alpha1" admissionregistrationv1beta1 "k8s.io/api/admissionregistration/v1beta1" appsv1 "k8s.io/api/apps/v1" appsv1beta1 "k8s.io/api/apps/v1beta1" @@ -36,15 +35,20 @@ import ( batchv1beta1 "k8s.io/api/batch/v1beta1" batchv2alpha1 "k8s.io/api/batch/v2alpha1" certificatesv1beta1 "k8s.io/api/certificates/v1beta1" + coordinationv1 "k8s.io/api/coordination/v1" coordinationv1beta1 "k8s.io/api/coordination/v1beta1" corev1 "k8s.io/api/core/v1" eventsv1beta1 "k8s.io/api/events/v1beta1" extensionsv1beta1 "k8s.io/api/extensions/v1beta1" networkingv1 "k8s.io/api/networking/v1" + networkingv1beta1 "k8s.io/api/networking/v1beta1" + nodev1alpha1 "k8s.io/api/node/v1alpha1" + nodev1beta1 "k8s.io/api/node/v1beta1" policyv1beta1 "k8s.io/api/policy/v1beta1" rbacv1 "k8s.io/api/rbac/v1" rbacv1alpha1 "k8s.io/api/rbac/v1alpha1" rbacv1beta1 "k8s.io/api/rbac/v1beta1" + schedulingv1 "k8s.io/api/scheduling/v1" schedulingv1alpha1 "k8s.io/api/scheduling/v1alpha1" schedulingv1beta1 "k8s.io/api/scheduling/v1beta1" settingsv1alpha1 "k8s.io/api/settings/v1alpha1" @@ -62,11 +66,10 @@ var Scheme = runtime.NewScheme() var Codecs = serializer.NewCodecFactory(Scheme) var ParameterCodec = runtime.NewParameterCodec(Scheme) var localSchemeBuilder = runtime.SchemeBuilder{ - admissionregistrationv1alpha1.AddToScheme, admissionregistrationv1beta1.AddToScheme, + appsv1.AddToScheme, appsv1beta1.AddToScheme, appsv1beta2.AddToScheme, - appsv1.AddToScheme, auditregistrationv1alpha1.AddToScheme, authenticationv1.AddToScheme, authenticationv1beta1.AddToScheme, @@ -80,16 +83,21 @@ var localSchemeBuilder = runtime.SchemeBuilder{ batchv2alpha1.AddToScheme, certificatesv1beta1.AddToScheme, coordinationv1beta1.AddToScheme, + coordinationv1.AddToScheme, corev1.AddToScheme, eventsv1beta1.AddToScheme, extensionsv1beta1.AddToScheme, networkingv1.AddToScheme, + networkingv1beta1.AddToScheme, + nodev1alpha1.AddToScheme, + nodev1beta1.AddToScheme, policyv1beta1.AddToScheme, rbacv1.AddToScheme, rbacv1beta1.AddToScheme, rbacv1alpha1.AddToScheme, schedulingv1alpha1.AddToScheme, schedulingv1beta1.AddToScheme, + schedulingv1.AddToScheme, settingsv1alpha1.AddToScheme, storagev1beta1.AddToScheme, storagev1.AddToScheme, diff --git a/vendor/k8s.io/client-go/kubernetes/typed/admissionregistration/v1alpha1/initializerconfiguration.go b/vendor/k8s.io/client-go/kubernetes/typed/admissionregistration/v1alpha1/initializerconfiguration.go deleted file mode 100644 index 7b8acecee9..0000000000 --- a/vendor/k8s.io/client-go/kubernetes/typed/admissionregistration/v1alpha1/initializerconfiguration.go +++ /dev/null @@ -1,164 +0,0 @@ -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by client-gen. DO NOT EDIT. - -package v1alpha1 - -import ( - "time" - - v1alpha1 "k8s.io/api/admissionregistration/v1alpha1" - v1 "k8s.io/apimachinery/pkg/apis/meta/v1" - types "k8s.io/apimachinery/pkg/types" - watch "k8s.io/apimachinery/pkg/watch" - scheme "k8s.io/client-go/kubernetes/scheme" - rest "k8s.io/client-go/rest" -) - -// InitializerConfigurationsGetter has a method to return a InitializerConfigurationInterface. -// A group's client should implement this interface. -type InitializerConfigurationsGetter interface { - InitializerConfigurations() InitializerConfigurationInterface -} - -// InitializerConfigurationInterface has methods to work with InitializerConfiguration resources. -type InitializerConfigurationInterface interface { - Create(*v1alpha1.InitializerConfiguration) (*v1alpha1.InitializerConfiguration, error) - Update(*v1alpha1.InitializerConfiguration) (*v1alpha1.InitializerConfiguration, error) - Delete(name string, options *v1.DeleteOptions) error - DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error - Get(name string, options v1.GetOptions) (*v1alpha1.InitializerConfiguration, error) - List(opts v1.ListOptions) (*v1alpha1.InitializerConfigurationList, error) - Watch(opts v1.ListOptions) (watch.Interface, error) - Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1alpha1.InitializerConfiguration, err error) - InitializerConfigurationExpansion -} - -// initializerConfigurations implements InitializerConfigurationInterface -type initializerConfigurations struct { - client rest.Interface -} - -// newInitializerConfigurations returns a InitializerConfigurations -func newInitializerConfigurations(c *AdmissionregistrationV1alpha1Client) *initializerConfigurations { - return &initializerConfigurations{ - client: c.RESTClient(), - } -} - -// Get takes name of the initializerConfiguration, and returns the corresponding initializerConfiguration object, and an error if there is any. -func (c *initializerConfigurations) Get(name string, options v1.GetOptions) (result *v1alpha1.InitializerConfiguration, err error) { - result = &v1alpha1.InitializerConfiguration{} - err = c.client.Get(). - Resource("initializerconfigurations"). - Name(name). - VersionedParams(&options, scheme.ParameterCodec). - Do(). - Into(result) - return -} - -// List takes label and field selectors, and returns the list of InitializerConfigurations that match those selectors. -func (c *initializerConfigurations) List(opts v1.ListOptions) (result *v1alpha1.InitializerConfigurationList, err error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } - result = &v1alpha1.InitializerConfigurationList{} - err = c.client.Get(). - Resource("initializerconfigurations"). - VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). - Do(). - Into(result) - return -} - -// Watch returns a watch.Interface that watches the requested initializerConfigurations. -func (c *initializerConfigurations) Watch(opts v1.ListOptions) (watch.Interface, error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } - opts.Watch = true - return c.client.Get(). - Resource("initializerconfigurations"). - VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). - Watch() -} - -// Create takes the representation of a initializerConfiguration and creates it. Returns the server's representation of the initializerConfiguration, and an error, if there is any. -func (c *initializerConfigurations) Create(initializerConfiguration *v1alpha1.InitializerConfiguration) (result *v1alpha1.InitializerConfiguration, err error) { - result = &v1alpha1.InitializerConfiguration{} - err = c.client.Post(). - Resource("initializerconfigurations"). - Body(initializerConfiguration). - Do(). - Into(result) - return -} - -// Update takes the representation of a initializerConfiguration and updates it. Returns the server's representation of the initializerConfiguration, and an error, if there is any. -func (c *initializerConfigurations) Update(initializerConfiguration *v1alpha1.InitializerConfiguration) (result *v1alpha1.InitializerConfiguration, err error) { - result = &v1alpha1.InitializerConfiguration{} - err = c.client.Put(). - Resource("initializerconfigurations"). - Name(initializerConfiguration.Name). - Body(initializerConfiguration). - Do(). - Into(result) - return -} - -// Delete takes name of the initializerConfiguration and deletes it. Returns an error if one occurs. -func (c *initializerConfigurations) Delete(name string, options *v1.DeleteOptions) error { - return c.client.Delete(). - Resource("initializerconfigurations"). - Name(name). - Body(options). - Do(). - Error() -} - -// DeleteCollection deletes a collection of objects. -func (c *initializerConfigurations) DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error { - var timeout time.Duration - if listOptions.TimeoutSeconds != nil { - timeout = time.Duration(*listOptions.TimeoutSeconds) * time.Second - } - return c.client.Delete(). - Resource("initializerconfigurations"). - VersionedParams(&listOptions, scheme.ParameterCodec). - Timeout(timeout). - Body(options). - Do(). - Error() -} - -// Patch applies the patch and returns the patched initializerConfiguration. -func (c *initializerConfigurations) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1alpha1.InitializerConfiguration, err error) { - result = &v1alpha1.InitializerConfiguration{} - err = c.client.Patch(pt). - Resource("initializerconfigurations"). - SubResource(subresources...). - Name(name). - Body(data). - Do(). - Into(result) - return -} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/coordination/v1/coordination_client.go b/vendor/k8s.io/client-go/kubernetes/typed/coordination/v1/coordination_client.go new file mode 100644 index 0000000000..9b566f3106 --- /dev/null +++ b/vendor/k8s.io/client-go/kubernetes/typed/coordination/v1/coordination_client.go @@ -0,0 +1,90 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +package v1 + +import ( + v1 "k8s.io/api/coordination/v1" + serializer "k8s.io/apimachinery/pkg/runtime/serializer" + "k8s.io/client-go/kubernetes/scheme" + rest "k8s.io/client-go/rest" +) + +type CoordinationV1Interface interface { + RESTClient() rest.Interface + LeasesGetter +} + +// CoordinationV1Client is used to interact with features provided by the coordination.k8s.io group. +type CoordinationV1Client struct { + restClient rest.Interface +} + +func (c *CoordinationV1Client) Leases(namespace string) LeaseInterface { + return newLeases(c, namespace) +} + +// NewForConfig creates a new CoordinationV1Client for the given config. +func NewForConfig(c *rest.Config) (*CoordinationV1Client, error) { + config := *c + if err := setConfigDefaults(&config); err != nil { + return nil, err + } + client, err := rest.RESTClientFor(&config) + if err != nil { + return nil, err + } + return &CoordinationV1Client{client}, nil +} + +// NewForConfigOrDie creates a new CoordinationV1Client for the given config and +// panics if there is an error in the config. +func NewForConfigOrDie(c *rest.Config) *CoordinationV1Client { + client, err := NewForConfig(c) + if err != nil { + panic(err) + } + return client +} + +// New creates a new CoordinationV1Client for the given RESTClient. +func New(c rest.Interface) *CoordinationV1Client { + return &CoordinationV1Client{c} +} + +func setConfigDefaults(config *rest.Config) error { + gv := v1.SchemeGroupVersion + config.GroupVersion = &gv + config.APIPath = "/apis" + config.NegotiatedSerializer = serializer.DirectCodecFactory{CodecFactory: scheme.Codecs} + + if config.UserAgent == "" { + config.UserAgent = rest.DefaultKubernetesUserAgent() + } + + return nil +} + +// RESTClient returns a RESTClient that is used to communicate +// with API server by this client implementation. +func (c *CoordinationV1Client) RESTClient() rest.Interface { + if c == nil { + return nil + } + return c.restClient +} diff --git a/vendor/sigs.k8s.io/cluster-api/pkg/client/clientset_generated/clientset/fake/doc.go b/vendor/k8s.io/client-go/kubernetes/typed/coordination/v1/doc.go similarity index 88% rename from vendor/sigs.k8s.io/cluster-api/pkg/client/clientset_generated/clientset/fake/doc.go rename to vendor/k8s.io/client-go/kubernetes/typed/coordination/v1/doc.go index 9b99e71670..3af5d054f1 100644 --- a/vendor/sigs.k8s.io/cluster-api/pkg/client/clientset_generated/clientset/fake/doc.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/coordination/v1/doc.go @@ -16,5 +16,5 @@ limitations under the License. // Code generated by client-gen. DO NOT EDIT. -// This package has the automatically generated fake clientset. -package fake +// This package has the automatically generated typed clients. +package v1 diff --git a/vendor/k8s.io/client-go/kubernetes/typed/coordination/v1/generated_expansion.go b/vendor/k8s.io/client-go/kubernetes/typed/coordination/v1/generated_expansion.go new file mode 100644 index 0000000000..ab24f3734e --- /dev/null +++ b/vendor/k8s.io/client-go/kubernetes/typed/coordination/v1/generated_expansion.go @@ -0,0 +1,21 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +package v1 + +type LeaseExpansion interface{} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/coordination/v1/lease.go b/vendor/k8s.io/client-go/kubernetes/typed/coordination/v1/lease.go new file mode 100644 index 0000000000..b6cf1b64f6 --- /dev/null +++ b/vendor/k8s.io/client-go/kubernetes/typed/coordination/v1/lease.go @@ -0,0 +1,174 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +package v1 + +import ( + "time" + + v1 "k8s.io/api/coordination/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + types "k8s.io/apimachinery/pkg/types" + watch "k8s.io/apimachinery/pkg/watch" + scheme "k8s.io/client-go/kubernetes/scheme" + rest "k8s.io/client-go/rest" +) + +// LeasesGetter has a method to return a LeaseInterface. +// A group's client should implement this interface. +type LeasesGetter interface { + Leases(namespace string) LeaseInterface +} + +// LeaseInterface has methods to work with Lease resources. +type LeaseInterface interface { + Create(*v1.Lease) (*v1.Lease, error) + Update(*v1.Lease) (*v1.Lease, error) + Delete(name string, options *metav1.DeleteOptions) error + DeleteCollection(options *metav1.DeleteOptions, listOptions metav1.ListOptions) error + Get(name string, options metav1.GetOptions) (*v1.Lease, error) + List(opts metav1.ListOptions) (*v1.LeaseList, error) + Watch(opts metav1.ListOptions) (watch.Interface, error) + Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1.Lease, err error) + LeaseExpansion +} + +// leases implements LeaseInterface +type leases struct { + client rest.Interface + ns string +} + +// newLeases returns a Leases +func newLeases(c *CoordinationV1Client, namespace string) *leases { + return &leases{ + client: c.RESTClient(), + ns: namespace, + } +} + +// Get takes name of the lease, and returns the corresponding lease object, and an error if there is any. +func (c *leases) Get(name string, options metav1.GetOptions) (result *v1.Lease, err error) { + result = &v1.Lease{} + err = c.client.Get(). + Namespace(c.ns). + Resource("leases"). + Name(name). + VersionedParams(&options, scheme.ParameterCodec). + Do(). + Into(result) + return +} + +// List takes label and field selectors, and returns the list of Leases that match those selectors. +func (c *leases) List(opts metav1.ListOptions) (result *v1.LeaseList, err error) { + var timeout time.Duration + if opts.TimeoutSeconds != nil { + timeout = time.Duration(*opts.TimeoutSeconds) * time.Second + } + result = &v1.LeaseList{} + err = c.client.Get(). + Namespace(c.ns). + Resource("leases"). + VersionedParams(&opts, scheme.ParameterCodec). + Timeout(timeout). + Do(). + Into(result) + return +} + +// Watch returns a watch.Interface that watches the requested leases. +func (c *leases) Watch(opts metav1.ListOptions) (watch.Interface, error) { + var timeout time.Duration + if opts.TimeoutSeconds != nil { + timeout = time.Duration(*opts.TimeoutSeconds) * time.Second + } + opts.Watch = true + return c.client.Get(). + Namespace(c.ns). + Resource("leases"). + VersionedParams(&opts, scheme.ParameterCodec). + Timeout(timeout). + Watch() +} + +// Create takes the representation of a lease and creates it. Returns the server's representation of the lease, and an error, if there is any. +func (c *leases) Create(lease *v1.Lease) (result *v1.Lease, err error) { + result = &v1.Lease{} + err = c.client.Post(). + Namespace(c.ns). + Resource("leases"). + Body(lease). + Do(). + Into(result) + return +} + +// Update takes the representation of a lease and updates it. Returns the server's representation of the lease, and an error, if there is any. +func (c *leases) Update(lease *v1.Lease) (result *v1.Lease, err error) { + result = &v1.Lease{} + err = c.client.Put(). + Namespace(c.ns). + Resource("leases"). + Name(lease.Name). + Body(lease). + Do(). + Into(result) + return +} + +// Delete takes name of the lease and deletes it. Returns an error if one occurs. +func (c *leases) Delete(name string, options *metav1.DeleteOptions) error { + return c.client.Delete(). + Namespace(c.ns). + Resource("leases"). + Name(name). + Body(options). + Do(). + Error() +} + +// DeleteCollection deletes a collection of objects. +func (c *leases) DeleteCollection(options *metav1.DeleteOptions, listOptions metav1.ListOptions) error { + var timeout time.Duration + if listOptions.TimeoutSeconds != nil { + timeout = time.Duration(*listOptions.TimeoutSeconds) * time.Second + } + return c.client.Delete(). + Namespace(c.ns). + Resource("leases"). + VersionedParams(&listOptions, scheme.ParameterCodec). + Timeout(timeout). + Body(options). + Do(). + Error() +} + +// Patch applies the patch and returns the patched lease. +func (c *leases) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1.Lease, err error) { + result = &v1.Lease{} + err = c.client.Patch(pt). + Namespace(c.ns). + Resource("leases"). + SubResource(subresources...). + Name(name). + Body(data). + Do(). + Into(result) + return +} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/networking/v1beta1/doc.go b/vendor/k8s.io/client-go/kubernetes/typed/networking/v1beta1/doc.go new file mode 100644 index 0000000000..771101956f --- /dev/null +++ b/vendor/k8s.io/client-go/kubernetes/typed/networking/v1beta1/doc.go @@ -0,0 +1,20 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +// This package has the automatically generated typed clients. +package v1beta1 diff --git a/vendor/k8s.io/client-go/kubernetes/typed/networking/v1beta1/generated_expansion.go b/vendor/k8s.io/client-go/kubernetes/typed/networking/v1beta1/generated_expansion.go new file mode 100644 index 0000000000..1442649b37 --- /dev/null +++ b/vendor/k8s.io/client-go/kubernetes/typed/networking/v1beta1/generated_expansion.go @@ -0,0 +1,21 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +package v1beta1 + +type IngressExpansion interface{} diff --git a/vendor/sigs.k8s.io/cluster-api/pkg/client/clientset_generated/clientset/typed/cluster/v1alpha1/machine.go b/vendor/k8s.io/client-go/kubernetes/typed/networking/v1beta1/ingress.go similarity index 50% rename from vendor/sigs.k8s.io/cluster-api/pkg/client/clientset_generated/clientset/typed/cluster/v1alpha1/machine.go rename to vendor/k8s.io/client-go/kubernetes/typed/networking/v1beta1/ingress.go index 5b479833c6..8d76678f16 100644 --- a/vendor/sigs.k8s.io/cluster-api/pkg/client/clientset_generated/clientset/typed/cluster/v1alpha1/machine.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/networking/v1beta1/ingress.go @@ -16,59 +16,59 @@ limitations under the License. // Code generated by client-gen. DO NOT EDIT. -package v1alpha1 +package v1beta1 import ( "time" + v1beta1 "k8s.io/api/networking/v1beta1" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" types "k8s.io/apimachinery/pkg/types" watch "k8s.io/apimachinery/pkg/watch" + scheme "k8s.io/client-go/kubernetes/scheme" rest "k8s.io/client-go/rest" - v1alpha1 "sigs.k8s.io/cluster-api/pkg/apis/cluster/v1alpha1" - scheme "sigs.k8s.io/cluster-api/pkg/client/clientset_generated/clientset/scheme" ) -// MachinesGetter has a method to return a MachineInterface. +// IngressesGetter has a method to return a IngressInterface. // A group's client should implement this interface. -type MachinesGetter interface { - Machines(namespace string) MachineInterface +type IngressesGetter interface { + Ingresses(namespace string) IngressInterface } -// MachineInterface has methods to work with Machine resources. -type MachineInterface interface { - Create(*v1alpha1.Machine) (*v1alpha1.Machine, error) - Update(*v1alpha1.Machine) (*v1alpha1.Machine, error) - UpdateStatus(*v1alpha1.Machine) (*v1alpha1.Machine, error) +// IngressInterface has methods to work with Ingress resources. +type IngressInterface interface { + Create(*v1beta1.Ingress) (*v1beta1.Ingress, error) + Update(*v1beta1.Ingress) (*v1beta1.Ingress, error) + UpdateStatus(*v1beta1.Ingress) (*v1beta1.Ingress, error) Delete(name string, options *v1.DeleteOptions) error DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error - Get(name string, options v1.GetOptions) (*v1alpha1.Machine, error) - List(opts v1.ListOptions) (*v1alpha1.MachineList, error) + Get(name string, options v1.GetOptions) (*v1beta1.Ingress, error) + List(opts v1.ListOptions) (*v1beta1.IngressList, error) Watch(opts v1.ListOptions) (watch.Interface, error) - Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1alpha1.Machine, err error) - MachineExpansion + Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1beta1.Ingress, err error) + IngressExpansion } -// machines implements MachineInterface -type machines struct { +// ingresses implements IngressInterface +type ingresses struct { client rest.Interface ns string } -// newMachines returns a Machines -func newMachines(c *ClusterV1alpha1Client, namespace string) *machines { - return &machines{ +// newIngresses returns a Ingresses +func newIngresses(c *NetworkingV1beta1Client, namespace string) *ingresses { + return &ingresses{ client: c.RESTClient(), ns: namespace, } } -// Get takes name of the machine, and returns the corresponding machine object, and an error if there is any. -func (c *machines) Get(name string, options v1.GetOptions) (result *v1alpha1.Machine, err error) { - result = &v1alpha1.Machine{} +// Get takes name of the ingress, and returns the corresponding ingress object, and an error if there is any. +func (c *ingresses) Get(name string, options v1.GetOptions) (result *v1beta1.Ingress, err error) { + result = &v1beta1.Ingress{} err = c.client.Get(). Namespace(c.ns). - Resource("machines"). + Resource("ingresses"). Name(name). VersionedParams(&options, scheme.ParameterCodec). Do(). @@ -76,16 +76,16 @@ func (c *machines) Get(name string, options v1.GetOptions) (result *v1alpha1.Mac return } -// List takes label and field selectors, and returns the list of Machines that match those selectors. -func (c *machines) List(opts v1.ListOptions) (result *v1alpha1.MachineList, err error) { +// List takes label and field selectors, and returns the list of Ingresses that match those selectors. +func (c *ingresses) List(opts v1.ListOptions) (result *v1beta1.IngressList, err error) { var timeout time.Duration if opts.TimeoutSeconds != nil { timeout = time.Duration(*opts.TimeoutSeconds) * time.Second } - result = &v1alpha1.MachineList{} + result = &v1beta1.IngressList{} err = c.client.Get(). Namespace(c.ns). - Resource("machines"). + Resource("ingresses"). VersionedParams(&opts, scheme.ParameterCodec). Timeout(timeout). Do(). @@ -93,8 +93,8 @@ func (c *machines) List(opts v1.ListOptions) (result *v1alpha1.MachineList, err return } -// Watch returns a watch.Interface that watches the requested machines. -func (c *machines) Watch(opts v1.ListOptions) (watch.Interface, error) { +// Watch returns a watch.Interface that watches the requested ingresses. +func (c *ingresses) Watch(opts v1.ListOptions) (watch.Interface, error) { var timeout time.Duration if opts.TimeoutSeconds != nil { timeout = time.Duration(*opts.TimeoutSeconds) * time.Second @@ -102,32 +102,32 @@ func (c *machines) Watch(opts v1.ListOptions) (watch.Interface, error) { opts.Watch = true return c.client.Get(). Namespace(c.ns). - Resource("machines"). + Resource("ingresses"). VersionedParams(&opts, scheme.ParameterCodec). Timeout(timeout). Watch() } -// Create takes the representation of a machine and creates it. Returns the server's representation of the machine, and an error, if there is any. -func (c *machines) Create(machine *v1alpha1.Machine) (result *v1alpha1.Machine, err error) { - result = &v1alpha1.Machine{} +// Create takes the representation of a ingress and creates it. Returns the server's representation of the ingress, and an error, if there is any. +func (c *ingresses) Create(ingress *v1beta1.Ingress) (result *v1beta1.Ingress, err error) { + result = &v1beta1.Ingress{} err = c.client.Post(). Namespace(c.ns). - Resource("machines"). - Body(machine). + Resource("ingresses"). + Body(ingress). Do(). Into(result) return } -// Update takes the representation of a machine and updates it. Returns the server's representation of the machine, and an error, if there is any. -func (c *machines) Update(machine *v1alpha1.Machine) (result *v1alpha1.Machine, err error) { - result = &v1alpha1.Machine{} +// Update takes the representation of a ingress and updates it. Returns the server's representation of the ingress, and an error, if there is any. +func (c *ingresses) Update(ingress *v1beta1.Ingress) (result *v1beta1.Ingress, err error) { + result = &v1beta1.Ingress{} err = c.client.Put(). Namespace(c.ns). - Resource("machines"). - Name(machine.Name). - Body(machine). + Resource("ingresses"). + Name(ingress.Name). + Body(ingress). Do(). Into(result) return @@ -136,24 +136,24 @@ func (c *machines) Update(machine *v1alpha1.Machine) (result *v1alpha1.Machine, // UpdateStatus was generated because the type contains a Status member. // Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). -func (c *machines) UpdateStatus(machine *v1alpha1.Machine) (result *v1alpha1.Machine, err error) { - result = &v1alpha1.Machine{} +func (c *ingresses) UpdateStatus(ingress *v1beta1.Ingress) (result *v1beta1.Ingress, err error) { + result = &v1beta1.Ingress{} err = c.client.Put(). Namespace(c.ns). - Resource("machines"). - Name(machine.Name). + Resource("ingresses"). + Name(ingress.Name). SubResource("status"). - Body(machine). + Body(ingress). Do(). Into(result) return } -// Delete takes name of the machine and deletes it. Returns an error if one occurs. -func (c *machines) Delete(name string, options *v1.DeleteOptions) error { +// Delete takes name of the ingress and deletes it. Returns an error if one occurs. +func (c *ingresses) Delete(name string, options *v1.DeleteOptions) error { return c.client.Delete(). Namespace(c.ns). - Resource("machines"). + Resource("ingresses"). Name(name). Body(options). Do(). @@ -161,14 +161,14 @@ func (c *machines) Delete(name string, options *v1.DeleteOptions) error { } // DeleteCollection deletes a collection of objects. -func (c *machines) DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error { +func (c *ingresses) DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error { var timeout time.Duration if listOptions.TimeoutSeconds != nil { timeout = time.Duration(*listOptions.TimeoutSeconds) * time.Second } return c.client.Delete(). Namespace(c.ns). - Resource("machines"). + Resource("ingresses"). VersionedParams(&listOptions, scheme.ParameterCodec). Timeout(timeout). Body(options). @@ -176,12 +176,12 @@ func (c *machines) DeleteCollection(options *v1.DeleteOptions, listOptions v1.Li Error() } -// Patch applies the patch and returns the patched machine. -func (c *machines) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1alpha1.Machine, err error) { - result = &v1alpha1.Machine{} +// Patch applies the patch and returns the patched ingress. +func (c *ingresses) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1beta1.Ingress, err error) { + result = &v1beta1.Ingress{} err = c.client.Patch(pt). Namespace(c.ns). - Resource("machines"). + Resource("ingresses"). SubResource(subresources...). Name(name). Body(data). diff --git a/vendor/k8s.io/client-go/kubernetes/typed/networking/v1beta1/networking_client.go b/vendor/k8s.io/client-go/kubernetes/typed/networking/v1beta1/networking_client.go new file mode 100644 index 0000000000..541bf6a9a1 --- /dev/null +++ b/vendor/k8s.io/client-go/kubernetes/typed/networking/v1beta1/networking_client.go @@ -0,0 +1,90 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +package v1beta1 + +import ( + v1beta1 "k8s.io/api/networking/v1beta1" + serializer "k8s.io/apimachinery/pkg/runtime/serializer" + "k8s.io/client-go/kubernetes/scheme" + rest "k8s.io/client-go/rest" +) + +type NetworkingV1beta1Interface interface { + RESTClient() rest.Interface + IngressesGetter +} + +// NetworkingV1beta1Client is used to interact with features provided by the networking.k8s.io group. +type NetworkingV1beta1Client struct { + restClient rest.Interface +} + +func (c *NetworkingV1beta1Client) Ingresses(namespace string) IngressInterface { + return newIngresses(c, namespace) +} + +// NewForConfig creates a new NetworkingV1beta1Client for the given config. +func NewForConfig(c *rest.Config) (*NetworkingV1beta1Client, error) { + config := *c + if err := setConfigDefaults(&config); err != nil { + return nil, err + } + client, err := rest.RESTClientFor(&config) + if err != nil { + return nil, err + } + return &NetworkingV1beta1Client{client}, nil +} + +// NewForConfigOrDie creates a new NetworkingV1beta1Client for the given config and +// panics if there is an error in the config. +func NewForConfigOrDie(c *rest.Config) *NetworkingV1beta1Client { + client, err := NewForConfig(c) + if err != nil { + panic(err) + } + return client +} + +// New creates a new NetworkingV1beta1Client for the given RESTClient. +func New(c rest.Interface) *NetworkingV1beta1Client { + return &NetworkingV1beta1Client{c} +} + +func setConfigDefaults(config *rest.Config) error { + gv := v1beta1.SchemeGroupVersion + config.GroupVersion = &gv + config.APIPath = "/apis" + config.NegotiatedSerializer = serializer.DirectCodecFactory{CodecFactory: scheme.Codecs} + + if config.UserAgent == "" { + config.UserAgent = rest.DefaultKubernetesUserAgent() + } + + return nil +} + +// RESTClient returns a RESTClient that is used to communicate +// with API server by this client implementation. +func (c *NetworkingV1beta1Client) RESTClient() rest.Interface { + if c == nil { + return nil + } + return c.restClient +} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/admissionregistration/v1alpha1/doc.go b/vendor/k8s.io/client-go/kubernetes/typed/node/v1alpha1/doc.go similarity index 100% rename from vendor/k8s.io/client-go/kubernetes/typed/admissionregistration/v1alpha1/doc.go rename to vendor/k8s.io/client-go/kubernetes/typed/node/v1alpha1/doc.go diff --git a/vendor/k8s.io/client-go/kubernetes/typed/admissionregistration/v1alpha1/generated_expansion.go b/vendor/k8s.io/client-go/kubernetes/typed/node/v1alpha1/generated_expansion.go similarity index 92% rename from vendor/k8s.io/client-go/kubernetes/typed/admissionregistration/v1alpha1/generated_expansion.go rename to vendor/k8s.io/client-go/kubernetes/typed/node/v1alpha1/generated_expansion.go index 1e29b96f4d..fcef31d169 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/admissionregistration/v1alpha1/generated_expansion.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/node/v1alpha1/generated_expansion.go @@ -18,4 +18,4 @@ limitations under the License. package v1alpha1 -type InitializerConfigurationExpansion interface{} +type RuntimeClassExpansion interface{} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/admissionregistration/v1alpha1/admissionregistration_client.go b/vendor/k8s.io/client-go/kubernetes/typed/node/v1alpha1/node_client.go similarity index 58% rename from vendor/k8s.io/client-go/kubernetes/typed/admissionregistration/v1alpha1/admissionregistration_client.go rename to vendor/k8s.io/client-go/kubernetes/typed/node/v1alpha1/node_client.go index 5e02f72274..863f2d4dc1 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/admissionregistration/v1alpha1/admissionregistration_client.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/node/v1alpha1/node_client.go @@ -19,28 +19,28 @@ limitations under the License. package v1alpha1 import ( - v1alpha1 "k8s.io/api/admissionregistration/v1alpha1" + v1alpha1 "k8s.io/api/node/v1alpha1" serializer "k8s.io/apimachinery/pkg/runtime/serializer" "k8s.io/client-go/kubernetes/scheme" rest "k8s.io/client-go/rest" ) -type AdmissionregistrationV1alpha1Interface interface { +type NodeV1alpha1Interface interface { RESTClient() rest.Interface - InitializerConfigurationsGetter + RuntimeClassesGetter } -// AdmissionregistrationV1alpha1Client is used to interact with features provided by the admissionregistration.k8s.io group. -type AdmissionregistrationV1alpha1Client struct { +// NodeV1alpha1Client is used to interact with features provided by the node.k8s.io group. +type NodeV1alpha1Client struct { restClient rest.Interface } -func (c *AdmissionregistrationV1alpha1Client) InitializerConfigurations() InitializerConfigurationInterface { - return newInitializerConfigurations(c) +func (c *NodeV1alpha1Client) RuntimeClasses() RuntimeClassInterface { + return newRuntimeClasses(c) } -// NewForConfig creates a new AdmissionregistrationV1alpha1Client for the given config. -func NewForConfig(c *rest.Config) (*AdmissionregistrationV1alpha1Client, error) { +// NewForConfig creates a new NodeV1alpha1Client for the given config. +func NewForConfig(c *rest.Config) (*NodeV1alpha1Client, error) { config := *c if err := setConfigDefaults(&config); err != nil { return nil, err @@ -49,12 +49,12 @@ func NewForConfig(c *rest.Config) (*AdmissionregistrationV1alpha1Client, error) if err != nil { return nil, err } - return &AdmissionregistrationV1alpha1Client{client}, nil + return &NodeV1alpha1Client{client}, nil } -// NewForConfigOrDie creates a new AdmissionregistrationV1alpha1Client for the given config and +// NewForConfigOrDie creates a new NodeV1alpha1Client for the given config and // panics if there is an error in the config. -func NewForConfigOrDie(c *rest.Config) *AdmissionregistrationV1alpha1Client { +func NewForConfigOrDie(c *rest.Config) *NodeV1alpha1Client { client, err := NewForConfig(c) if err != nil { panic(err) @@ -62,9 +62,9 @@ func NewForConfigOrDie(c *rest.Config) *AdmissionregistrationV1alpha1Client { return client } -// New creates a new AdmissionregistrationV1alpha1Client for the given RESTClient. -func New(c rest.Interface) *AdmissionregistrationV1alpha1Client { - return &AdmissionregistrationV1alpha1Client{c} +// New creates a new NodeV1alpha1Client for the given RESTClient. +func New(c rest.Interface) *NodeV1alpha1Client { + return &NodeV1alpha1Client{c} } func setConfigDefaults(config *rest.Config) error { @@ -82,7 +82,7 @@ func setConfigDefaults(config *rest.Config) error { // RESTClient returns a RESTClient that is used to communicate // with API server by this client implementation. -func (c *AdmissionregistrationV1alpha1Client) RESTClient() rest.Interface { +func (c *NodeV1alpha1Client) RESTClient() rest.Interface { if c == nil { return nil } diff --git a/vendor/k8s.io/client-go/kubernetes/typed/node/v1alpha1/runtimeclass.go b/vendor/k8s.io/client-go/kubernetes/typed/node/v1alpha1/runtimeclass.go new file mode 100644 index 0000000000..044460ec0b --- /dev/null +++ b/vendor/k8s.io/client-go/kubernetes/typed/node/v1alpha1/runtimeclass.go @@ -0,0 +1,164 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +package v1alpha1 + +import ( + "time" + + v1alpha1 "k8s.io/api/node/v1alpha1" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + types "k8s.io/apimachinery/pkg/types" + watch "k8s.io/apimachinery/pkg/watch" + scheme "k8s.io/client-go/kubernetes/scheme" + rest "k8s.io/client-go/rest" +) + +// RuntimeClassesGetter has a method to return a RuntimeClassInterface. +// A group's client should implement this interface. +type RuntimeClassesGetter interface { + RuntimeClasses() RuntimeClassInterface +} + +// RuntimeClassInterface has methods to work with RuntimeClass resources. +type RuntimeClassInterface interface { + Create(*v1alpha1.RuntimeClass) (*v1alpha1.RuntimeClass, error) + Update(*v1alpha1.RuntimeClass) (*v1alpha1.RuntimeClass, error) + Delete(name string, options *v1.DeleteOptions) error + DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error + Get(name string, options v1.GetOptions) (*v1alpha1.RuntimeClass, error) + List(opts v1.ListOptions) (*v1alpha1.RuntimeClassList, error) + Watch(opts v1.ListOptions) (watch.Interface, error) + Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1alpha1.RuntimeClass, err error) + RuntimeClassExpansion +} + +// runtimeClasses implements RuntimeClassInterface +type runtimeClasses struct { + client rest.Interface +} + +// newRuntimeClasses returns a RuntimeClasses +func newRuntimeClasses(c *NodeV1alpha1Client) *runtimeClasses { + return &runtimeClasses{ + client: c.RESTClient(), + } +} + +// Get takes name of the runtimeClass, and returns the corresponding runtimeClass object, and an error if there is any. +func (c *runtimeClasses) Get(name string, options v1.GetOptions) (result *v1alpha1.RuntimeClass, err error) { + result = &v1alpha1.RuntimeClass{} + err = c.client.Get(). + Resource("runtimeclasses"). + Name(name). + VersionedParams(&options, scheme.ParameterCodec). + Do(). + Into(result) + return +} + +// List takes label and field selectors, and returns the list of RuntimeClasses that match those selectors. +func (c *runtimeClasses) List(opts v1.ListOptions) (result *v1alpha1.RuntimeClassList, err error) { + var timeout time.Duration + if opts.TimeoutSeconds != nil { + timeout = time.Duration(*opts.TimeoutSeconds) * time.Second + } + result = &v1alpha1.RuntimeClassList{} + err = c.client.Get(). + Resource("runtimeclasses"). + VersionedParams(&opts, scheme.ParameterCodec). + Timeout(timeout). + Do(). + Into(result) + return +} + +// Watch returns a watch.Interface that watches the requested runtimeClasses. +func (c *runtimeClasses) Watch(opts v1.ListOptions) (watch.Interface, error) { + var timeout time.Duration + if opts.TimeoutSeconds != nil { + timeout = time.Duration(*opts.TimeoutSeconds) * time.Second + } + opts.Watch = true + return c.client.Get(). + Resource("runtimeclasses"). + VersionedParams(&opts, scheme.ParameterCodec). + Timeout(timeout). + Watch() +} + +// Create takes the representation of a runtimeClass and creates it. Returns the server's representation of the runtimeClass, and an error, if there is any. +func (c *runtimeClasses) Create(runtimeClass *v1alpha1.RuntimeClass) (result *v1alpha1.RuntimeClass, err error) { + result = &v1alpha1.RuntimeClass{} + err = c.client.Post(). + Resource("runtimeclasses"). + Body(runtimeClass). + Do(). + Into(result) + return +} + +// Update takes the representation of a runtimeClass and updates it. Returns the server's representation of the runtimeClass, and an error, if there is any. +func (c *runtimeClasses) Update(runtimeClass *v1alpha1.RuntimeClass) (result *v1alpha1.RuntimeClass, err error) { + result = &v1alpha1.RuntimeClass{} + err = c.client.Put(). + Resource("runtimeclasses"). + Name(runtimeClass.Name). + Body(runtimeClass). + Do(). + Into(result) + return +} + +// Delete takes name of the runtimeClass and deletes it. Returns an error if one occurs. +func (c *runtimeClasses) Delete(name string, options *v1.DeleteOptions) error { + return c.client.Delete(). + Resource("runtimeclasses"). + Name(name). + Body(options). + Do(). + Error() +} + +// DeleteCollection deletes a collection of objects. +func (c *runtimeClasses) DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error { + var timeout time.Duration + if listOptions.TimeoutSeconds != nil { + timeout = time.Duration(*listOptions.TimeoutSeconds) * time.Second + } + return c.client.Delete(). + Resource("runtimeclasses"). + VersionedParams(&listOptions, scheme.ParameterCodec). + Timeout(timeout). + Body(options). + Do(). + Error() +} + +// Patch applies the patch and returns the patched runtimeClass. +func (c *runtimeClasses) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1alpha1.RuntimeClass, err error) { + result = &v1alpha1.RuntimeClass{} + err = c.client.Patch(pt). + Resource("runtimeclasses"). + SubResource(subresources...). + Name(name). + Body(data). + Do(). + Into(result) + return +} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/node/v1beta1/doc.go b/vendor/k8s.io/client-go/kubernetes/typed/node/v1beta1/doc.go new file mode 100644 index 0000000000..771101956f --- /dev/null +++ b/vendor/k8s.io/client-go/kubernetes/typed/node/v1beta1/doc.go @@ -0,0 +1,20 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +// This package has the automatically generated typed clients. +package v1beta1 diff --git a/vendor/k8s.io/client-go/kubernetes/typed/node/v1beta1/generated_expansion.go b/vendor/k8s.io/client-go/kubernetes/typed/node/v1beta1/generated_expansion.go new file mode 100644 index 0000000000..669dd0282e --- /dev/null +++ b/vendor/k8s.io/client-go/kubernetes/typed/node/v1beta1/generated_expansion.go @@ -0,0 +1,21 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +package v1beta1 + +type RuntimeClassExpansion interface{} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/node/v1beta1/node_client.go b/vendor/k8s.io/client-go/kubernetes/typed/node/v1beta1/node_client.go new file mode 100644 index 0000000000..52683e201c --- /dev/null +++ b/vendor/k8s.io/client-go/kubernetes/typed/node/v1beta1/node_client.go @@ -0,0 +1,90 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +package v1beta1 + +import ( + v1beta1 "k8s.io/api/node/v1beta1" + serializer "k8s.io/apimachinery/pkg/runtime/serializer" + "k8s.io/client-go/kubernetes/scheme" + rest "k8s.io/client-go/rest" +) + +type NodeV1beta1Interface interface { + RESTClient() rest.Interface + RuntimeClassesGetter +} + +// NodeV1beta1Client is used to interact with features provided by the node.k8s.io group. +type NodeV1beta1Client struct { + restClient rest.Interface +} + +func (c *NodeV1beta1Client) RuntimeClasses() RuntimeClassInterface { + return newRuntimeClasses(c) +} + +// NewForConfig creates a new NodeV1beta1Client for the given config. +func NewForConfig(c *rest.Config) (*NodeV1beta1Client, error) { + config := *c + if err := setConfigDefaults(&config); err != nil { + return nil, err + } + client, err := rest.RESTClientFor(&config) + if err != nil { + return nil, err + } + return &NodeV1beta1Client{client}, nil +} + +// NewForConfigOrDie creates a new NodeV1beta1Client for the given config and +// panics if there is an error in the config. +func NewForConfigOrDie(c *rest.Config) *NodeV1beta1Client { + client, err := NewForConfig(c) + if err != nil { + panic(err) + } + return client +} + +// New creates a new NodeV1beta1Client for the given RESTClient. +func New(c rest.Interface) *NodeV1beta1Client { + return &NodeV1beta1Client{c} +} + +func setConfigDefaults(config *rest.Config) error { + gv := v1beta1.SchemeGroupVersion + config.GroupVersion = &gv + config.APIPath = "/apis" + config.NegotiatedSerializer = serializer.DirectCodecFactory{CodecFactory: scheme.Codecs} + + if config.UserAgent == "" { + config.UserAgent = rest.DefaultKubernetesUserAgent() + } + + return nil +} + +// RESTClient returns a RESTClient that is used to communicate +// with API server by this client implementation. +func (c *NodeV1beta1Client) RESTClient() rest.Interface { + if c == nil { + return nil + } + return c.restClient +} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/node/v1beta1/runtimeclass.go b/vendor/k8s.io/client-go/kubernetes/typed/node/v1beta1/runtimeclass.go new file mode 100644 index 0000000000..b3f7c497fb --- /dev/null +++ b/vendor/k8s.io/client-go/kubernetes/typed/node/v1beta1/runtimeclass.go @@ -0,0 +1,164 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +package v1beta1 + +import ( + "time" + + v1beta1 "k8s.io/api/node/v1beta1" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + types "k8s.io/apimachinery/pkg/types" + watch "k8s.io/apimachinery/pkg/watch" + scheme "k8s.io/client-go/kubernetes/scheme" + rest "k8s.io/client-go/rest" +) + +// RuntimeClassesGetter has a method to return a RuntimeClassInterface. +// A group's client should implement this interface. +type RuntimeClassesGetter interface { + RuntimeClasses() RuntimeClassInterface +} + +// RuntimeClassInterface has methods to work with RuntimeClass resources. +type RuntimeClassInterface interface { + Create(*v1beta1.RuntimeClass) (*v1beta1.RuntimeClass, error) + Update(*v1beta1.RuntimeClass) (*v1beta1.RuntimeClass, error) + Delete(name string, options *v1.DeleteOptions) error + DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error + Get(name string, options v1.GetOptions) (*v1beta1.RuntimeClass, error) + List(opts v1.ListOptions) (*v1beta1.RuntimeClassList, error) + Watch(opts v1.ListOptions) (watch.Interface, error) + Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1beta1.RuntimeClass, err error) + RuntimeClassExpansion +} + +// runtimeClasses implements RuntimeClassInterface +type runtimeClasses struct { + client rest.Interface +} + +// newRuntimeClasses returns a RuntimeClasses +func newRuntimeClasses(c *NodeV1beta1Client) *runtimeClasses { + return &runtimeClasses{ + client: c.RESTClient(), + } +} + +// Get takes name of the runtimeClass, and returns the corresponding runtimeClass object, and an error if there is any. +func (c *runtimeClasses) Get(name string, options v1.GetOptions) (result *v1beta1.RuntimeClass, err error) { + result = &v1beta1.RuntimeClass{} + err = c.client.Get(). + Resource("runtimeclasses"). + Name(name). + VersionedParams(&options, scheme.ParameterCodec). + Do(). + Into(result) + return +} + +// List takes label and field selectors, and returns the list of RuntimeClasses that match those selectors. +func (c *runtimeClasses) List(opts v1.ListOptions) (result *v1beta1.RuntimeClassList, err error) { + var timeout time.Duration + if opts.TimeoutSeconds != nil { + timeout = time.Duration(*opts.TimeoutSeconds) * time.Second + } + result = &v1beta1.RuntimeClassList{} + err = c.client.Get(). + Resource("runtimeclasses"). + VersionedParams(&opts, scheme.ParameterCodec). + Timeout(timeout). + Do(). + Into(result) + return +} + +// Watch returns a watch.Interface that watches the requested runtimeClasses. +func (c *runtimeClasses) Watch(opts v1.ListOptions) (watch.Interface, error) { + var timeout time.Duration + if opts.TimeoutSeconds != nil { + timeout = time.Duration(*opts.TimeoutSeconds) * time.Second + } + opts.Watch = true + return c.client.Get(). + Resource("runtimeclasses"). + VersionedParams(&opts, scheme.ParameterCodec). + Timeout(timeout). + Watch() +} + +// Create takes the representation of a runtimeClass and creates it. Returns the server's representation of the runtimeClass, and an error, if there is any. +func (c *runtimeClasses) Create(runtimeClass *v1beta1.RuntimeClass) (result *v1beta1.RuntimeClass, err error) { + result = &v1beta1.RuntimeClass{} + err = c.client.Post(). + Resource("runtimeclasses"). + Body(runtimeClass). + Do(). + Into(result) + return +} + +// Update takes the representation of a runtimeClass and updates it. Returns the server's representation of the runtimeClass, and an error, if there is any. +func (c *runtimeClasses) Update(runtimeClass *v1beta1.RuntimeClass) (result *v1beta1.RuntimeClass, err error) { + result = &v1beta1.RuntimeClass{} + err = c.client.Put(). + Resource("runtimeclasses"). + Name(runtimeClass.Name). + Body(runtimeClass). + Do(). + Into(result) + return +} + +// Delete takes name of the runtimeClass and deletes it. Returns an error if one occurs. +func (c *runtimeClasses) Delete(name string, options *v1.DeleteOptions) error { + return c.client.Delete(). + Resource("runtimeclasses"). + Name(name). + Body(options). + Do(). + Error() +} + +// DeleteCollection deletes a collection of objects. +func (c *runtimeClasses) DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error { + var timeout time.Duration + if listOptions.TimeoutSeconds != nil { + timeout = time.Duration(*listOptions.TimeoutSeconds) * time.Second + } + return c.client.Delete(). + Resource("runtimeclasses"). + VersionedParams(&listOptions, scheme.ParameterCodec). + Timeout(timeout). + Body(options). + Do(). + Error() +} + +// Patch applies the patch and returns the patched runtimeClass. +func (c *runtimeClasses) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1beta1.RuntimeClass, err error) { + result = &v1beta1.RuntimeClass{} + err = c.client.Patch(pt). + Resource("runtimeclasses"). + SubResource(subresources...). + Name(name). + Body(data). + Do(). + Into(result) + return +} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/scheduling/v1/doc.go b/vendor/k8s.io/client-go/kubernetes/typed/scheduling/v1/doc.go new file mode 100644 index 0000000000..3af5d054f1 --- /dev/null +++ b/vendor/k8s.io/client-go/kubernetes/typed/scheduling/v1/doc.go @@ -0,0 +1,20 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +// This package has the automatically generated typed clients. +package v1 diff --git a/vendor/k8s.io/client-go/kubernetes/typed/scheduling/v1/generated_expansion.go b/vendor/k8s.io/client-go/kubernetes/typed/scheduling/v1/generated_expansion.go new file mode 100644 index 0000000000..cc321329b9 --- /dev/null +++ b/vendor/k8s.io/client-go/kubernetes/typed/scheduling/v1/generated_expansion.go @@ -0,0 +1,21 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +package v1 + +type PriorityClassExpansion interface{} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/scheduling/v1/priorityclass.go b/vendor/k8s.io/client-go/kubernetes/typed/scheduling/v1/priorityclass.go new file mode 100644 index 0000000000..3abbb7b8eb --- /dev/null +++ b/vendor/k8s.io/client-go/kubernetes/typed/scheduling/v1/priorityclass.go @@ -0,0 +1,164 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +package v1 + +import ( + "time" + + v1 "k8s.io/api/scheduling/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + types "k8s.io/apimachinery/pkg/types" + watch "k8s.io/apimachinery/pkg/watch" + scheme "k8s.io/client-go/kubernetes/scheme" + rest "k8s.io/client-go/rest" +) + +// PriorityClassesGetter has a method to return a PriorityClassInterface. +// A group's client should implement this interface. +type PriorityClassesGetter interface { + PriorityClasses() PriorityClassInterface +} + +// PriorityClassInterface has methods to work with PriorityClass resources. +type PriorityClassInterface interface { + Create(*v1.PriorityClass) (*v1.PriorityClass, error) + Update(*v1.PriorityClass) (*v1.PriorityClass, error) + Delete(name string, options *metav1.DeleteOptions) error + DeleteCollection(options *metav1.DeleteOptions, listOptions metav1.ListOptions) error + Get(name string, options metav1.GetOptions) (*v1.PriorityClass, error) + List(opts metav1.ListOptions) (*v1.PriorityClassList, error) + Watch(opts metav1.ListOptions) (watch.Interface, error) + Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1.PriorityClass, err error) + PriorityClassExpansion +} + +// priorityClasses implements PriorityClassInterface +type priorityClasses struct { + client rest.Interface +} + +// newPriorityClasses returns a PriorityClasses +func newPriorityClasses(c *SchedulingV1Client) *priorityClasses { + return &priorityClasses{ + client: c.RESTClient(), + } +} + +// Get takes name of the priorityClass, and returns the corresponding priorityClass object, and an error if there is any. +func (c *priorityClasses) Get(name string, options metav1.GetOptions) (result *v1.PriorityClass, err error) { + result = &v1.PriorityClass{} + err = c.client.Get(). + Resource("priorityclasses"). + Name(name). + VersionedParams(&options, scheme.ParameterCodec). + Do(). + Into(result) + return +} + +// List takes label and field selectors, and returns the list of PriorityClasses that match those selectors. +func (c *priorityClasses) List(opts metav1.ListOptions) (result *v1.PriorityClassList, err error) { + var timeout time.Duration + if opts.TimeoutSeconds != nil { + timeout = time.Duration(*opts.TimeoutSeconds) * time.Second + } + result = &v1.PriorityClassList{} + err = c.client.Get(). + Resource("priorityclasses"). + VersionedParams(&opts, scheme.ParameterCodec). + Timeout(timeout). + Do(). + Into(result) + return +} + +// Watch returns a watch.Interface that watches the requested priorityClasses. +func (c *priorityClasses) Watch(opts metav1.ListOptions) (watch.Interface, error) { + var timeout time.Duration + if opts.TimeoutSeconds != nil { + timeout = time.Duration(*opts.TimeoutSeconds) * time.Second + } + opts.Watch = true + return c.client.Get(). + Resource("priorityclasses"). + VersionedParams(&opts, scheme.ParameterCodec). + Timeout(timeout). + Watch() +} + +// Create takes the representation of a priorityClass and creates it. Returns the server's representation of the priorityClass, and an error, if there is any. +func (c *priorityClasses) Create(priorityClass *v1.PriorityClass) (result *v1.PriorityClass, err error) { + result = &v1.PriorityClass{} + err = c.client.Post(). + Resource("priorityclasses"). + Body(priorityClass). + Do(). + Into(result) + return +} + +// Update takes the representation of a priorityClass and updates it. Returns the server's representation of the priorityClass, and an error, if there is any. +func (c *priorityClasses) Update(priorityClass *v1.PriorityClass) (result *v1.PriorityClass, err error) { + result = &v1.PriorityClass{} + err = c.client.Put(). + Resource("priorityclasses"). + Name(priorityClass.Name). + Body(priorityClass). + Do(). + Into(result) + return +} + +// Delete takes name of the priorityClass and deletes it. Returns an error if one occurs. +func (c *priorityClasses) Delete(name string, options *metav1.DeleteOptions) error { + return c.client.Delete(). + Resource("priorityclasses"). + Name(name). + Body(options). + Do(). + Error() +} + +// DeleteCollection deletes a collection of objects. +func (c *priorityClasses) DeleteCollection(options *metav1.DeleteOptions, listOptions metav1.ListOptions) error { + var timeout time.Duration + if listOptions.TimeoutSeconds != nil { + timeout = time.Duration(*listOptions.TimeoutSeconds) * time.Second + } + return c.client.Delete(). + Resource("priorityclasses"). + VersionedParams(&listOptions, scheme.ParameterCodec). + Timeout(timeout). + Body(options). + Do(). + Error() +} + +// Patch applies the patch and returns the patched priorityClass. +func (c *priorityClasses) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1.PriorityClass, err error) { + result = &v1.PriorityClass{} + err = c.client.Patch(pt). + Resource("priorityclasses"). + SubResource(subresources...). + Name(name). + Body(data). + Do(). + Into(result) + return +} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/scheduling/v1/scheduling_client.go b/vendor/k8s.io/client-go/kubernetes/typed/scheduling/v1/scheduling_client.go new file mode 100644 index 0000000000..bd7e1e54f2 --- /dev/null +++ b/vendor/k8s.io/client-go/kubernetes/typed/scheduling/v1/scheduling_client.go @@ -0,0 +1,90 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +package v1 + +import ( + v1 "k8s.io/api/scheduling/v1" + serializer "k8s.io/apimachinery/pkg/runtime/serializer" + "k8s.io/client-go/kubernetes/scheme" + rest "k8s.io/client-go/rest" +) + +type SchedulingV1Interface interface { + RESTClient() rest.Interface + PriorityClassesGetter +} + +// SchedulingV1Client is used to interact with features provided by the scheduling.k8s.io group. +type SchedulingV1Client struct { + restClient rest.Interface +} + +func (c *SchedulingV1Client) PriorityClasses() PriorityClassInterface { + return newPriorityClasses(c) +} + +// NewForConfig creates a new SchedulingV1Client for the given config. +func NewForConfig(c *rest.Config) (*SchedulingV1Client, error) { + config := *c + if err := setConfigDefaults(&config); err != nil { + return nil, err + } + client, err := rest.RESTClientFor(&config) + if err != nil { + return nil, err + } + return &SchedulingV1Client{client}, nil +} + +// NewForConfigOrDie creates a new SchedulingV1Client for the given config and +// panics if there is an error in the config. +func NewForConfigOrDie(c *rest.Config) *SchedulingV1Client { + client, err := NewForConfig(c) + if err != nil { + panic(err) + } + return client +} + +// New creates a new SchedulingV1Client for the given RESTClient. +func New(c rest.Interface) *SchedulingV1Client { + return &SchedulingV1Client{c} +} + +func setConfigDefaults(config *rest.Config) error { + gv := v1.SchemeGroupVersion + config.GroupVersion = &gv + config.APIPath = "/apis" + config.NegotiatedSerializer = serializer.DirectCodecFactory{CodecFactory: scheme.Codecs} + + if config.UserAgent == "" { + config.UserAgent = rest.DefaultKubernetesUserAgent() + } + + return nil +} + +// RESTClient returns a RESTClient that is used to communicate +// with API server by this client implementation. +func (c *SchedulingV1Client) RESTClient() rest.Interface { + if c == nil { + return nil + } + return c.restClient +} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/storage/v1beta1/csidriver.go b/vendor/k8s.io/client-go/kubernetes/typed/storage/v1beta1/csidriver.go new file mode 100644 index 0000000000..86cf9bf180 --- /dev/null +++ b/vendor/k8s.io/client-go/kubernetes/typed/storage/v1beta1/csidriver.go @@ -0,0 +1,164 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +package v1beta1 + +import ( + "time" + + v1beta1 "k8s.io/api/storage/v1beta1" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + types "k8s.io/apimachinery/pkg/types" + watch "k8s.io/apimachinery/pkg/watch" + scheme "k8s.io/client-go/kubernetes/scheme" + rest "k8s.io/client-go/rest" +) + +// CSIDriversGetter has a method to return a CSIDriverInterface. +// A group's client should implement this interface. +type CSIDriversGetter interface { + CSIDrivers() CSIDriverInterface +} + +// CSIDriverInterface has methods to work with CSIDriver resources. +type CSIDriverInterface interface { + Create(*v1beta1.CSIDriver) (*v1beta1.CSIDriver, error) + Update(*v1beta1.CSIDriver) (*v1beta1.CSIDriver, error) + Delete(name string, options *v1.DeleteOptions) error + DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error + Get(name string, options v1.GetOptions) (*v1beta1.CSIDriver, error) + List(opts v1.ListOptions) (*v1beta1.CSIDriverList, error) + Watch(opts v1.ListOptions) (watch.Interface, error) + Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1beta1.CSIDriver, err error) + CSIDriverExpansion +} + +// cSIDrivers implements CSIDriverInterface +type cSIDrivers struct { + client rest.Interface +} + +// newCSIDrivers returns a CSIDrivers +func newCSIDrivers(c *StorageV1beta1Client) *cSIDrivers { + return &cSIDrivers{ + client: c.RESTClient(), + } +} + +// Get takes name of the cSIDriver, and returns the corresponding cSIDriver object, and an error if there is any. +func (c *cSIDrivers) Get(name string, options v1.GetOptions) (result *v1beta1.CSIDriver, err error) { + result = &v1beta1.CSIDriver{} + err = c.client.Get(). + Resource("csidrivers"). + Name(name). + VersionedParams(&options, scheme.ParameterCodec). + Do(). + Into(result) + return +} + +// List takes label and field selectors, and returns the list of CSIDrivers that match those selectors. +func (c *cSIDrivers) List(opts v1.ListOptions) (result *v1beta1.CSIDriverList, err error) { + var timeout time.Duration + if opts.TimeoutSeconds != nil { + timeout = time.Duration(*opts.TimeoutSeconds) * time.Second + } + result = &v1beta1.CSIDriverList{} + err = c.client.Get(). + Resource("csidrivers"). + VersionedParams(&opts, scheme.ParameterCodec). + Timeout(timeout). + Do(). + Into(result) + return +} + +// Watch returns a watch.Interface that watches the requested cSIDrivers. +func (c *cSIDrivers) Watch(opts v1.ListOptions) (watch.Interface, error) { + var timeout time.Duration + if opts.TimeoutSeconds != nil { + timeout = time.Duration(*opts.TimeoutSeconds) * time.Second + } + opts.Watch = true + return c.client.Get(). + Resource("csidrivers"). + VersionedParams(&opts, scheme.ParameterCodec). + Timeout(timeout). + Watch() +} + +// Create takes the representation of a cSIDriver and creates it. Returns the server's representation of the cSIDriver, and an error, if there is any. +func (c *cSIDrivers) Create(cSIDriver *v1beta1.CSIDriver) (result *v1beta1.CSIDriver, err error) { + result = &v1beta1.CSIDriver{} + err = c.client.Post(). + Resource("csidrivers"). + Body(cSIDriver). + Do(). + Into(result) + return +} + +// Update takes the representation of a cSIDriver and updates it. Returns the server's representation of the cSIDriver, and an error, if there is any. +func (c *cSIDrivers) Update(cSIDriver *v1beta1.CSIDriver) (result *v1beta1.CSIDriver, err error) { + result = &v1beta1.CSIDriver{} + err = c.client.Put(). + Resource("csidrivers"). + Name(cSIDriver.Name). + Body(cSIDriver). + Do(). + Into(result) + return +} + +// Delete takes name of the cSIDriver and deletes it. Returns an error if one occurs. +func (c *cSIDrivers) Delete(name string, options *v1.DeleteOptions) error { + return c.client.Delete(). + Resource("csidrivers"). + Name(name). + Body(options). + Do(). + Error() +} + +// DeleteCollection deletes a collection of objects. +func (c *cSIDrivers) DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error { + var timeout time.Duration + if listOptions.TimeoutSeconds != nil { + timeout = time.Duration(*listOptions.TimeoutSeconds) * time.Second + } + return c.client.Delete(). + Resource("csidrivers"). + VersionedParams(&listOptions, scheme.ParameterCodec). + Timeout(timeout). + Body(options). + Do(). + Error() +} + +// Patch applies the patch and returns the patched cSIDriver. +func (c *cSIDrivers) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1beta1.CSIDriver, err error) { + result = &v1beta1.CSIDriver{} + err = c.client.Patch(pt). + Resource("csidrivers"). + SubResource(subresources...). + Name(name). + Body(data). + Do(). + Into(result) + return +} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/storage/v1beta1/csinode.go b/vendor/k8s.io/client-go/kubernetes/typed/storage/v1beta1/csinode.go new file mode 100644 index 0000000000..e5540c1287 --- /dev/null +++ b/vendor/k8s.io/client-go/kubernetes/typed/storage/v1beta1/csinode.go @@ -0,0 +1,164 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +package v1beta1 + +import ( + "time" + + v1beta1 "k8s.io/api/storage/v1beta1" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + types "k8s.io/apimachinery/pkg/types" + watch "k8s.io/apimachinery/pkg/watch" + scheme "k8s.io/client-go/kubernetes/scheme" + rest "k8s.io/client-go/rest" +) + +// CSINodesGetter has a method to return a CSINodeInterface. +// A group's client should implement this interface. +type CSINodesGetter interface { + CSINodes() CSINodeInterface +} + +// CSINodeInterface has methods to work with CSINode resources. +type CSINodeInterface interface { + Create(*v1beta1.CSINode) (*v1beta1.CSINode, error) + Update(*v1beta1.CSINode) (*v1beta1.CSINode, error) + Delete(name string, options *v1.DeleteOptions) error + DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error + Get(name string, options v1.GetOptions) (*v1beta1.CSINode, error) + List(opts v1.ListOptions) (*v1beta1.CSINodeList, error) + Watch(opts v1.ListOptions) (watch.Interface, error) + Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1beta1.CSINode, err error) + CSINodeExpansion +} + +// cSINodes implements CSINodeInterface +type cSINodes struct { + client rest.Interface +} + +// newCSINodes returns a CSINodes +func newCSINodes(c *StorageV1beta1Client) *cSINodes { + return &cSINodes{ + client: c.RESTClient(), + } +} + +// Get takes name of the cSINode, and returns the corresponding cSINode object, and an error if there is any. +func (c *cSINodes) Get(name string, options v1.GetOptions) (result *v1beta1.CSINode, err error) { + result = &v1beta1.CSINode{} + err = c.client.Get(). + Resource("csinodes"). + Name(name). + VersionedParams(&options, scheme.ParameterCodec). + Do(). + Into(result) + return +} + +// List takes label and field selectors, and returns the list of CSINodes that match those selectors. +func (c *cSINodes) List(opts v1.ListOptions) (result *v1beta1.CSINodeList, err error) { + var timeout time.Duration + if opts.TimeoutSeconds != nil { + timeout = time.Duration(*opts.TimeoutSeconds) * time.Second + } + result = &v1beta1.CSINodeList{} + err = c.client.Get(). + Resource("csinodes"). + VersionedParams(&opts, scheme.ParameterCodec). + Timeout(timeout). + Do(). + Into(result) + return +} + +// Watch returns a watch.Interface that watches the requested cSINodes. +func (c *cSINodes) Watch(opts v1.ListOptions) (watch.Interface, error) { + var timeout time.Duration + if opts.TimeoutSeconds != nil { + timeout = time.Duration(*opts.TimeoutSeconds) * time.Second + } + opts.Watch = true + return c.client.Get(). + Resource("csinodes"). + VersionedParams(&opts, scheme.ParameterCodec). + Timeout(timeout). + Watch() +} + +// Create takes the representation of a cSINode and creates it. Returns the server's representation of the cSINode, and an error, if there is any. +func (c *cSINodes) Create(cSINode *v1beta1.CSINode) (result *v1beta1.CSINode, err error) { + result = &v1beta1.CSINode{} + err = c.client.Post(). + Resource("csinodes"). + Body(cSINode). + Do(). + Into(result) + return +} + +// Update takes the representation of a cSINode and updates it. Returns the server's representation of the cSINode, and an error, if there is any. +func (c *cSINodes) Update(cSINode *v1beta1.CSINode) (result *v1beta1.CSINode, err error) { + result = &v1beta1.CSINode{} + err = c.client.Put(). + Resource("csinodes"). + Name(cSINode.Name). + Body(cSINode). + Do(). + Into(result) + return +} + +// Delete takes name of the cSINode and deletes it. Returns an error if one occurs. +func (c *cSINodes) Delete(name string, options *v1.DeleteOptions) error { + return c.client.Delete(). + Resource("csinodes"). + Name(name). + Body(options). + Do(). + Error() +} + +// DeleteCollection deletes a collection of objects. +func (c *cSINodes) DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error { + var timeout time.Duration + if listOptions.TimeoutSeconds != nil { + timeout = time.Duration(*listOptions.TimeoutSeconds) * time.Second + } + return c.client.Delete(). + Resource("csinodes"). + VersionedParams(&listOptions, scheme.ParameterCodec). + Timeout(timeout). + Body(options). + Do(). + Error() +} + +// Patch applies the patch and returns the patched cSINode. +func (c *cSINodes) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1beta1.CSINode, err error) { + result = &v1beta1.CSINode{} + err = c.client.Patch(pt). + Resource("csinodes"). + SubResource(subresources...). + Name(name). + Body(data). + Do(). + Into(result) + return +} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/storage/v1beta1/generated_expansion.go b/vendor/k8s.io/client-go/kubernetes/typed/storage/v1beta1/generated_expansion.go index 559f88f676..7ba93142bf 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/storage/v1beta1/generated_expansion.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/storage/v1beta1/generated_expansion.go @@ -18,6 +18,10 @@ limitations under the License. package v1beta1 +type CSIDriverExpansion interface{} + +type CSINodeExpansion interface{} + type StorageClassExpansion interface{} type VolumeAttachmentExpansion interface{} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/storage/v1beta1/storage_client.go b/vendor/k8s.io/client-go/kubernetes/typed/storage/v1beta1/storage_client.go index 4bdebb8782..e9916bc0aa 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/storage/v1beta1/storage_client.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/storage/v1beta1/storage_client.go @@ -27,6 +27,8 @@ import ( type StorageV1beta1Interface interface { RESTClient() rest.Interface + CSIDriversGetter + CSINodesGetter StorageClassesGetter VolumeAttachmentsGetter } @@ -36,6 +38,14 @@ type StorageV1beta1Client struct { restClient rest.Interface } +func (c *StorageV1beta1Client) CSIDrivers() CSIDriverInterface { + return newCSIDrivers(c) +} + +func (c *StorageV1beta1Client) CSINodes() CSINodeInterface { + return newCSINodes(c) +} + func (c *StorageV1beta1Client) StorageClasses() StorageClassInterface { return newStorageClasses(c) } diff --git a/vendor/k8s.io/client-go/pkg/apis/clientauthentication/OWNERS b/vendor/k8s.io/client-go/pkg/apis/clientauthentication/OWNERS index 3b7ea1b131..e0ec62deb2 100644 --- a/vendor/k8s.io/client-go/pkg/apis/clientauthentication/OWNERS +++ b/vendor/k8s.io/client-go/pkg/apis/clientauthentication/OWNERS @@ -1,3 +1,5 @@ +# See the OWNERS docs at https://go.k8s.io/owners + # approval on api packages bubbles to api-approvers reviewers: - sig-auth-authenticators-approvers diff --git a/vendor/k8s.io/client-go/pkg/apis/clientauthentication/v1alpha1/types.go b/vendor/k8s.io/client-go/pkg/apis/clientauthentication/v1alpha1/types.go index 921f3a2b94..c714e2457a 100644 --- a/vendor/k8s.io/client-go/pkg/apis/clientauthentication/v1alpha1/types.go +++ b/vendor/k8s.io/client-go/pkg/apis/clientauthentication/v1alpha1/types.go @@ -22,7 +22,7 @@ import ( // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object -// ExecCredentials is used by exec-based plugins to communicate credentials to +// ExecCredential is used by exec-based plugins to communicate credentials to // HTTP transports. type ExecCredential struct { metav1.TypeMeta `json:",inline"` diff --git a/vendor/k8s.io/client-go/plugin/pkg/client/auth/OWNERS b/vendor/k8s.io/client-go/plugin/pkg/client/auth/OWNERS deleted file mode 100644 index c607d2aa8c..0000000000 --- a/vendor/k8s.io/client-go/plugin/pkg/client/auth/OWNERS +++ /dev/null @@ -1,7 +0,0 @@ -approvers: -- sig-auth-authenticators-approvers -reviewers: -- sig-auth-authenticators-reviewers -labels: -- sig/auth - diff --git a/vendor/k8s.io/client-go/plugin/pkg/client/auth/azure/README.md b/vendor/k8s.io/client-go/plugin/pkg/client/auth/azure/README.md deleted file mode 100644 index e4ba791ea7..0000000000 --- a/vendor/k8s.io/client-go/plugin/pkg/client/auth/azure/README.md +++ /dev/null @@ -1,50 +0,0 @@ -# Azure Active Directory plugin for client authentication - -This plugin provides an integration with Azure Active Directory device flow. If no tokens are present in the kubectl configuration, it will prompt a device code which can be used to login in a browser. After login it will automatically fetch the tokens and store them in the kubectl configuration. In addition it will refresh and update the tokens in the configuration when expired. - -## Usage - -1. Create an Azure Active Directory *Web App / API* application for `apiserver` following these [instructions](https://docs.microsoft.com/en-us/azure/active-directory/active-directory-app-registration). The callback URL does not matter (just cannot be empty). - -2. Create a second Azure Active Directory native application for `kubectl`. The callback URL does not matter (just cannot be empty). - -3. On `kubectl` application's configuration page in Azure portal grant permissions to `apiserver` application by clicking on *Required Permissions*, click the *Add* button and search for the apiserver application created in step 1. Select "Access apiserver" under the *DELEGATED PERMISSIONS*. Once added click the *Grant Permissions* button to apply the changes. - -4. Configure the `apiserver` to use the Azure Active Directory as an OIDC provider with following options - - ``` - --oidc-client-id="spn:APISERVER_APPLICATION_ID" \ - --oidc-issuer-url="https://sts.windows.net/TENANT_ID/" - --oidc-username-claim="sub" - ``` - - * Replace the `APISERVER_APPLICATION_ID` with the application ID of `apiserver` application - * Replace `TENANT_ID` with your tenant ID. -   * For a list of alternative username claims that are supported by the OIDC issuer check the JSON response at `https://sts.windows.net/TENANT_ID/.well-known/openid-configuration`. - -5. Configure `kubectl` to use the `azure` authentication provider - - ``` - kubectl config set-credentials "USER_NAME" --auth-provider=azure \ - --auth-provider-arg=environment=AzurePublicCloud \ - --auth-provider-arg=client-id=APPLICATION_ID \ - --auth-provider-arg=tenant-id=TENANT_ID \ - --auth-provider-arg=apiserver-id=APISERVER_APPLICATION_ID - ``` - - * Supported environments: `AzurePublicCloud`, `AzureUSGovernmentCloud`, `AzureChinaCloud`, `AzureGermanCloud` - * Replace `USER_NAME` and `TENANT_ID` with your user name and tenant ID - * Replace `APPLICATION_ID` with the application ID of your`kubectl` application ID - * Replace `APISERVER_APPLICATION_ID` with the application ID of your `apiserver` application ID - * Be sure to also (create and) select a context that uses above user - - 6. The access token is acquired when first `kubectl` command is executed - - ``` - kubectl get pods - - To sign in, use a web browser to open the page https://aka.ms/devicelogin and enter the code DEC7D48GA to authenticate. - ``` - - * After signing in a web browser, the token is stored in the configuration, and it will be reused when executing further commands. - * The resulting username in Kubernetes depends on your [configuration of the `--oidc-username-claim` and `--oidc-username-prefix` flags on the API server](https://kubernetes.io/docs/admin/authentication/#configuring-the-api-server). If you are using any authorization method you need to give permissions to that user, e.g. by binding the user to a role in the case of RBAC. diff --git a/vendor/k8s.io/client-go/plugin/pkg/client/auth/azure/azure.go b/vendor/k8s.io/client-go/plugin/pkg/client/auth/azure/azure.go deleted file mode 100644 index d42449fc25..0000000000 --- a/vendor/k8s.io/client-go/plugin/pkg/client/auth/azure/azure.go +++ /dev/null @@ -1,360 +0,0 @@ -/* -Copyright 2017 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package azure - -import ( - "encoding/json" - "errors" - "fmt" - "net/http" - "os" - "sync" - - "github.com/Azure/go-autorest/autorest" - "github.com/Azure/go-autorest/autorest/adal" - "github.com/Azure/go-autorest/autorest/azure" - "k8s.io/klog" - - "k8s.io/apimachinery/pkg/util/net" - restclient "k8s.io/client-go/rest" -) - -const ( - azureTokenKey = "azureTokenKey" - tokenType = "Bearer" - authHeader = "Authorization" - - cfgClientID = "client-id" - cfgTenantID = "tenant-id" - cfgAccessToken = "access-token" - cfgRefreshToken = "refresh-token" - cfgExpiresIn = "expires-in" - cfgExpiresOn = "expires-on" - cfgEnvironment = "environment" - cfgApiserverID = "apiserver-id" -) - -func init() { - if err := restclient.RegisterAuthProviderPlugin("azure", newAzureAuthProvider); err != nil { - klog.Fatalf("Failed to register azure auth plugin: %v", err) - } -} - -var cache = newAzureTokenCache() - -type azureTokenCache struct { - lock sync.Mutex - cache map[string]*azureToken -} - -func newAzureTokenCache() *azureTokenCache { - return &azureTokenCache{cache: make(map[string]*azureToken)} -} - -func (c *azureTokenCache) getToken(tokenKey string) *azureToken { - c.lock.Lock() - defer c.lock.Unlock() - return c.cache[tokenKey] -} - -func (c *azureTokenCache) setToken(tokenKey string, token *azureToken) { - c.lock.Lock() - defer c.lock.Unlock() - c.cache[tokenKey] = token -} - -func newAzureAuthProvider(_ string, cfg map[string]string, persister restclient.AuthProviderConfigPersister) (restclient.AuthProvider, error) { - var ts tokenSource - - environment, err := azure.EnvironmentFromName(cfg[cfgEnvironment]) - if err != nil { - environment = azure.PublicCloud - } - ts, err = newAzureTokenSourceDeviceCode(environment, cfg[cfgClientID], cfg[cfgTenantID], cfg[cfgApiserverID]) - if err != nil { - return nil, fmt.Errorf("creating a new azure token source for device code authentication: %v", err) - } - cacheSource := newAzureTokenSource(ts, cache, cfg, persister) - - return &azureAuthProvider{ - tokenSource: cacheSource, - }, nil -} - -type azureAuthProvider struct { - tokenSource tokenSource -} - -func (p *azureAuthProvider) Login() error { - return errors.New("not yet implemented") -} - -func (p *azureAuthProvider) WrapTransport(rt http.RoundTripper) http.RoundTripper { - return &azureRoundTripper{ - tokenSource: p.tokenSource, - roundTripper: rt, - } -} - -type azureRoundTripper struct { - tokenSource tokenSource - roundTripper http.RoundTripper -} - -var _ net.RoundTripperWrapper = &azureRoundTripper{} - -func (r *azureRoundTripper) RoundTrip(req *http.Request) (*http.Response, error) { - if len(req.Header.Get(authHeader)) != 0 { - return r.roundTripper.RoundTrip(req) - } - - token, err := r.tokenSource.Token() - if err != nil { - klog.Errorf("Failed to acquire a token: %v", err) - return nil, fmt.Errorf("acquiring a token for authorization header: %v", err) - } - - // clone the request in order to avoid modifying the headers of the original request - req2 := new(http.Request) - *req2 = *req - req2.Header = make(http.Header, len(req.Header)) - for k, s := range req.Header { - req2.Header[k] = append([]string(nil), s...) - } - - req2.Header.Set(authHeader, fmt.Sprintf("%s %s", tokenType, token.token.AccessToken)) - - return r.roundTripper.RoundTrip(req2) -} - -func (r *azureRoundTripper) WrappedRoundTripper() http.RoundTripper { return r.roundTripper } - -type azureToken struct { - token adal.Token - clientID string - tenantID string - apiserverID string -} - -type tokenSource interface { - Token() (*azureToken, error) -} - -type azureTokenSource struct { - source tokenSource - cache *azureTokenCache - lock sync.Mutex - cfg map[string]string - persister restclient.AuthProviderConfigPersister -} - -func newAzureTokenSource(source tokenSource, cache *azureTokenCache, cfg map[string]string, persister restclient.AuthProviderConfigPersister) tokenSource { - return &azureTokenSource{ - source: source, - cache: cache, - cfg: cfg, - persister: persister, - } -} - -// Token fetches a token from the cache of configuration if present otherwise -// acquires a new token from the configured source. Automatically refreshes -// the token if expired. -func (ts *azureTokenSource) Token() (*azureToken, error) { - ts.lock.Lock() - defer ts.lock.Unlock() - - var err error - token := ts.cache.getToken(azureTokenKey) - if token == nil { - token, err = ts.retrieveTokenFromCfg() - if err != nil { - token, err = ts.source.Token() - if err != nil { - return nil, fmt.Errorf("acquiring a new fresh token: %v", err) - } - } - if !token.token.IsExpired() { - ts.cache.setToken(azureTokenKey, token) - err = ts.storeTokenInCfg(token) - if err != nil { - return nil, fmt.Errorf("storing the token in configuration: %v", err) - } - } - } - if token.token.IsExpired() { - token, err = ts.refreshToken(token) - if err != nil { - return nil, fmt.Errorf("refreshing the expired token: %v", err) - } - ts.cache.setToken(azureTokenKey, token) - err = ts.storeTokenInCfg(token) - if err != nil { - return nil, fmt.Errorf("storing the refreshed token in configuration: %v", err) - } - } - return token, nil -} - -func (ts *azureTokenSource) retrieveTokenFromCfg() (*azureToken, error) { - accessToken := ts.cfg[cfgAccessToken] - if accessToken == "" { - return nil, fmt.Errorf("no access token in cfg: %s", cfgAccessToken) - } - refreshToken := ts.cfg[cfgRefreshToken] - if refreshToken == "" { - return nil, fmt.Errorf("no refresh token in cfg: %s", cfgRefreshToken) - } - clientID := ts.cfg[cfgClientID] - if clientID == "" { - return nil, fmt.Errorf("no client ID in cfg: %s", cfgClientID) - } - tenantID := ts.cfg[cfgTenantID] - if tenantID == "" { - return nil, fmt.Errorf("no tenant ID in cfg: %s", cfgTenantID) - } - apiserverID := ts.cfg[cfgApiserverID] - if apiserverID == "" { - return nil, fmt.Errorf("no apiserver ID in cfg: %s", apiserverID) - } - expiresIn := ts.cfg[cfgExpiresIn] - if expiresIn == "" { - return nil, fmt.Errorf("no expiresIn in cfg: %s", cfgExpiresIn) - } - expiresOn := ts.cfg[cfgExpiresOn] - if expiresOn == "" { - return nil, fmt.Errorf("no expiresOn in cfg: %s", cfgExpiresOn) - } - - return &azureToken{ - token: adal.Token{ - AccessToken: accessToken, - RefreshToken: refreshToken, - ExpiresIn: json.Number(expiresIn), - ExpiresOn: json.Number(expiresOn), - NotBefore: json.Number(expiresOn), - Resource: fmt.Sprintf("spn:%s", apiserverID), - Type: tokenType, - }, - clientID: clientID, - tenantID: tenantID, - apiserverID: apiserverID, - }, nil -} - -func (ts *azureTokenSource) storeTokenInCfg(token *azureToken) error { - newCfg := make(map[string]string) - newCfg[cfgAccessToken] = token.token.AccessToken - newCfg[cfgRefreshToken] = token.token.RefreshToken - newCfg[cfgClientID] = token.clientID - newCfg[cfgTenantID] = token.tenantID - newCfg[cfgApiserverID] = token.apiserverID - newCfg[cfgExpiresIn] = string(token.token.ExpiresIn) - newCfg[cfgExpiresOn] = string(token.token.ExpiresOn) - - err := ts.persister.Persist(newCfg) - if err != nil { - return fmt.Errorf("persisting the configuration: %v", err) - } - ts.cfg = newCfg - return nil -} - -func (ts *azureTokenSource) refreshToken(token *azureToken) (*azureToken, error) { - oauthConfig, err := adal.NewOAuthConfig(azure.PublicCloud.ActiveDirectoryEndpoint, token.tenantID) - if err != nil { - return nil, fmt.Errorf("building the OAuth configuration for token refresh: %v", err) - } - - callback := func(t adal.Token) error { - return nil - } - spt, err := adal.NewServicePrincipalTokenFromManualToken( - *oauthConfig, - token.clientID, - token.apiserverID, - token.token, - callback) - if err != nil { - return nil, fmt.Errorf("creating new service principal for token refresh: %v", err) - } - - if err := spt.Refresh(); err != nil { - return nil, fmt.Errorf("refreshing token: %v", err) - } - - return &azureToken{ - token: spt.Token(), - clientID: token.clientID, - tenantID: token.tenantID, - apiserverID: token.apiserverID, - }, nil -} - -type azureTokenSourceDeviceCode struct { - environment azure.Environment - clientID string - tenantID string - apiserverID string -} - -func newAzureTokenSourceDeviceCode(environment azure.Environment, clientID string, tenantID string, apiserverID string) (tokenSource, error) { - if clientID == "" { - return nil, errors.New("client-id is empty") - } - if tenantID == "" { - return nil, errors.New("tenant-id is empty") - } - if apiserverID == "" { - return nil, errors.New("apiserver-id is empty") - } - return &azureTokenSourceDeviceCode{ - environment: environment, - clientID: clientID, - tenantID: tenantID, - apiserverID: apiserverID, - }, nil -} - -func (ts *azureTokenSourceDeviceCode) Token() (*azureToken, error) { - oauthConfig, err := adal.NewOAuthConfig(ts.environment.ActiveDirectoryEndpoint, ts.tenantID) - if err != nil { - return nil, fmt.Errorf("building the OAuth configuration for device code authentication: %v", err) - } - client := &autorest.Client{} - deviceCode, err := adal.InitiateDeviceAuth(client, *oauthConfig, ts.clientID, ts.apiserverID) - if err != nil { - return nil, fmt.Errorf("initialing the device code authentication: %v", err) - } - - _, err = fmt.Fprintln(os.Stderr, *deviceCode.Message) - if err != nil { - return nil, fmt.Errorf("prompting the device code message: %v", err) - } - - token, err := adal.WaitForUserCompletion(client, deviceCode) - if err != nil { - return nil, fmt.Errorf("waiting for device code authentication to complete: %v", err) - } - - return &azureToken{ - token: *token, - clientID: ts.clientID, - tenantID: ts.tenantID, - apiserverID: ts.apiserverID, - }, nil -} diff --git a/vendor/k8s.io/client-go/plugin/pkg/client/auth/exec/exec.go b/vendor/k8s.io/client-go/plugin/pkg/client/auth/exec/exec.go index 4d72526583..b88902c103 100644 --- a/vendor/k8s.io/client-go/plugin/pkg/client/auth/exec/exec.go +++ b/vendor/k8s.io/client-go/plugin/pkg/client/auth/exec/exec.go @@ -31,8 +31,9 @@ import ( "sync" "time" + "github.com/davecgh/go-spew/spew" "golang.org/x/crypto/ssh/terminal" - "k8s.io/apimachinery/pkg/apis/meta/v1" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/runtime/schema" "k8s.io/apimachinery/pkg/runtime/serializer" @@ -73,8 +74,10 @@ func newCache() *cache { return &cache{m: make(map[string]*Authenticator)} } +var spewConfig = &spew.ConfigState{DisableMethods: true, Indent: " "} + func cacheKey(c *api.ExecConfig) string { - return fmt.Sprintf("%#v", c) + return spewConfig.Sprint(c) } type cache struct { @@ -172,13 +175,9 @@ type credentials struct { // UpdateTransportConfig updates the transport.Config to use credentials // returned by the plugin. func (a *Authenticator) UpdateTransportConfig(c *transport.Config) error { - wt := c.WrapTransport - c.WrapTransport = func(rt http.RoundTripper) http.RoundTripper { - if wt != nil { - rt = wt(rt) - } + c.Wrap(func(rt http.RoundTripper) http.RoundTripper { return &roundTripper{a, rt} - } + }) if c.TLS.GetCert != nil { return errors.New("can't add TLS certificate callback: transport.Config.TLS.GetCert already set") diff --git a/vendor/k8s.io/client-go/plugin/pkg/client/auth/gcp/OWNERS b/vendor/k8s.io/client-go/plugin/pkg/client/auth/gcp/OWNERS deleted file mode 100644 index 6cf8db8ac9..0000000000 --- a/vendor/k8s.io/client-go/plugin/pkg/client/auth/gcp/OWNERS +++ /dev/null @@ -1,6 +0,0 @@ -approvers: -- cjcullen -- jlowdermilk -reviewers: -- cjcullen -- jlowdermilk diff --git a/vendor/k8s.io/client-go/plugin/pkg/client/auth/gcp/gcp.go b/vendor/k8s.io/client-go/plugin/pkg/client/auth/gcp/gcp.go deleted file mode 100644 index e44c2adabb..0000000000 --- a/vendor/k8s.io/client-go/plugin/pkg/client/auth/gcp/gcp.go +++ /dev/null @@ -1,383 +0,0 @@ -/* -Copyright 2016 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package gcp - -import ( - "bytes" - "context" - "encoding/json" - "fmt" - "net/http" - "os/exec" - "strings" - "sync" - "time" - - "golang.org/x/oauth2" - "golang.org/x/oauth2/google" - "k8s.io/apimachinery/pkg/util/net" - "k8s.io/apimachinery/pkg/util/yaml" - restclient "k8s.io/client-go/rest" - "k8s.io/client-go/util/jsonpath" - "k8s.io/klog" -) - -func init() { - if err := restclient.RegisterAuthProviderPlugin("gcp", newGCPAuthProvider); err != nil { - klog.Fatalf("Failed to register gcp auth plugin: %v", err) - } -} - -var ( - // Stubbable for testing - execCommand = exec.Command - - // defaultScopes: - // - cloud-platform is the base scope to authenticate to GCP. - // - userinfo.email is used to authenticate to GKE APIs with gserviceaccount - // email instead of numeric uniqueID. - defaultScopes = []string{ - "https://www.googleapis.com/auth/cloud-platform", - "https://www.googleapis.com/auth/userinfo.email"} -) - -// gcpAuthProvider is an auth provider plugin that uses GCP credentials to provide -// tokens for kubectl to authenticate itself to the apiserver. A sample json config -// is provided below with all recognized options described. -// -// { -// 'auth-provider': { -// # Required -// "name": "gcp", -// -// 'config': { -// # Authentication options -// # These options are used while getting a token. -// -// # comma-separated list of GCP API scopes. default value of this field -// # is "https://www.googleapis.com/auth/cloud-platform,https://www.googleapis.com/auth/userinfo.email". -// # to override the API scopes, specify this field explicitly. -// "scopes": "https://www.googleapis.com/auth/cloud-platform" -// -// # Caching options -// -// # Raw string data representing cached access token. -// "access-token": "ya29.CjWdA4GiBPTt", -// # RFC3339Nano expiration timestamp for cached access token. -// "expiry": "2016-10-31 22:31:9.123", -// -// # Command execution options -// # These options direct the plugin to execute a specified command and parse -// # token and expiry time from the output of the command. -// -// # Command to execute for access token. Command output will be parsed as JSON. -// # If "cmd-args" is not present, this value will be split on whitespace, with -// # the first element interpreted as the command, remaining elements as args. -// "cmd-path": "/usr/bin/gcloud", -// -// # Arguments to pass to command to execute for access token. -// "cmd-args": "config config-helper --output=json" -// -// # JSONPath to the string field that represents the access token in -// # command output. If omitted, defaults to "{.access_token}". -// "token-key": "{.credential.access_token}", -// -// # JSONPath to the string field that represents expiration timestamp -// # of the access token in the command output. If omitted, defaults to -// # "{.token_expiry}" -// "expiry-key": ""{.credential.token_expiry}", -// -// # golang reference time in the format that the expiration timestamp uses. -// # If omitted, defaults to time.RFC3339Nano -// "time-fmt": "2006-01-02 15:04:05.999999999" -// } -// } -// } -// -type gcpAuthProvider struct { - tokenSource oauth2.TokenSource - persister restclient.AuthProviderConfigPersister -} - -func newGCPAuthProvider(_ string, gcpConfig map[string]string, persister restclient.AuthProviderConfigPersister) (restclient.AuthProvider, error) { - ts, err := tokenSource(isCmdTokenSource(gcpConfig), gcpConfig) - if err != nil { - return nil, err - } - cts, err := newCachedTokenSource(gcpConfig["access-token"], gcpConfig["expiry"], persister, ts, gcpConfig) - if err != nil { - return nil, err - } - return &gcpAuthProvider{cts, persister}, nil -} - -func isCmdTokenSource(gcpConfig map[string]string) bool { - _, ok := gcpConfig["cmd-path"] - return ok -} - -func tokenSource(isCmd bool, gcpConfig map[string]string) (oauth2.TokenSource, error) { - // Command-based token source - if isCmd { - cmd := gcpConfig["cmd-path"] - if len(cmd) == 0 { - return nil, fmt.Errorf("missing access token cmd") - } - if gcpConfig["scopes"] != "" { - return nil, fmt.Errorf("scopes can only be used when kubectl is using a gcp service account key") - } - var args []string - if cmdArgs, ok := gcpConfig["cmd-args"]; ok { - args = strings.Fields(cmdArgs) - } else { - fields := strings.Fields(cmd) - cmd = fields[0] - args = fields[1:] - } - return newCmdTokenSource(cmd, args, gcpConfig["token-key"], gcpConfig["expiry-key"], gcpConfig["time-fmt"]), nil - } - - // Google Application Credentials-based token source - scopes := parseScopes(gcpConfig) - ts, err := google.DefaultTokenSource(context.Background(), scopes...) - if err != nil { - return nil, fmt.Errorf("cannot construct google default token source: %v", err) - } - return ts, nil -} - -// parseScopes constructs a list of scopes that should be included in token source -// from the config map. -func parseScopes(gcpConfig map[string]string) []string { - scopes, ok := gcpConfig["scopes"] - if !ok { - return defaultScopes - } - if scopes == "" { - return []string{} - } - return strings.Split(gcpConfig["scopes"], ",") -} - -func (g *gcpAuthProvider) WrapTransport(rt http.RoundTripper) http.RoundTripper { - var resetCache map[string]string - if cts, ok := g.tokenSource.(*cachedTokenSource); ok { - resetCache = cts.baseCache() - } else { - resetCache = make(map[string]string) - } - return &conditionalTransport{&oauth2.Transport{Source: g.tokenSource, Base: rt}, g.persister, resetCache} -} - -func (g *gcpAuthProvider) Login() error { return nil } - -type cachedTokenSource struct { - lk sync.Mutex - source oauth2.TokenSource - accessToken string - expiry time.Time - persister restclient.AuthProviderConfigPersister - cache map[string]string -} - -func newCachedTokenSource(accessToken, expiry string, persister restclient.AuthProviderConfigPersister, ts oauth2.TokenSource, cache map[string]string) (*cachedTokenSource, error) { - var expiryTime time.Time - if parsedTime, err := time.Parse(time.RFC3339Nano, expiry); err == nil { - expiryTime = parsedTime - } - if cache == nil { - cache = make(map[string]string) - } - return &cachedTokenSource{ - source: ts, - accessToken: accessToken, - expiry: expiryTime, - persister: persister, - cache: cache, - }, nil -} - -func (t *cachedTokenSource) Token() (*oauth2.Token, error) { - tok := t.cachedToken() - if tok.Valid() && !tok.Expiry.IsZero() { - return tok, nil - } - tok, err := t.source.Token() - if err != nil { - return nil, err - } - cache := t.update(tok) - if t.persister != nil { - if err := t.persister.Persist(cache); err != nil { - klog.V(4).Infof("Failed to persist token: %v", err) - } - } - return tok, nil -} - -func (t *cachedTokenSource) cachedToken() *oauth2.Token { - t.lk.Lock() - defer t.lk.Unlock() - return &oauth2.Token{ - AccessToken: t.accessToken, - TokenType: "Bearer", - Expiry: t.expiry, - } -} - -func (t *cachedTokenSource) update(tok *oauth2.Token) map[string]string { - t.lk.Lock() - defer t.lk.Unlock() - t.accessToken = tok.AccessToken - t.expiry = tok.Expiry - ret := map[string]string{} - for k, v := range t.cache { - ret[k] = v - } - ret["access-token"] = t.accessToken - ret["expiry"] = t.expiry.Format(time.RFC3339Nano) - return ret -} - -// baseCache is the base configuration value for this TokenSource, without any cached ephemeral tokens. -func (t *cachedTokenSource) baseCache() map[string]string { - t.lk.Lock() - defer t.lk.Unlock() - ret := map[string]string{} - for k, v := range t.cache { - ret[k] = v - } - delete(ret, "access-token") - delete(ret, "expiry") - return ret -} - -type commandTokenSource struct { - cmd string - args []string - tokenKey string - expiryKey string - timeFmt string -} - -func newCmdTokenSource(cmd string, args []string, tokenKey, expiryKey, timeFmt string) *commandTokenSource { - if len(timeFmt) == 0 { - timeFmt = time.RFC3339Nano - } - if len(tokenKey) == 0 { - tokenKey = "{.access_token}" - } - if len(expiryKey) == 0 { - expiryKey = "{.token_expiry}" - } - return &commandTokenSource{ - cmd: cmd, - args: args, - tokenKey: tokenKey, - expiryKey: expiryKey, - timeFmt: timeFmt, - } -} - -func (c *commandTokenSource) Token() (*oauth2.Token, error) { - fullCmd := strings.Join(append([]string{c.cmd}, c.args...), " ") - cmd := execCommand(c.cmd, c.args...) - var stderr bytes.Buffer - cmd.Stderr = &stderr - output, err := cmd.Output() - if err != nil { - return nil, fmt.Errorf("error executing access token command %q: err=%v output=%s stderr=%s", fullCmd, err, output, string(stderr.Bytes())) - } - token, err := c.parseTokenCmdOutput(output) - if err != nil { - return nil, fmt.Errorf("error parsing output for access token command %q: %v", fullCmd, err) - } - return token, nil -} - -func (c *commandTokenSource) parseTokenCmdOutput(output []byte) (*oauth2.Token, error) { - output, err := yaml.ToJSON(output) - if err != nil { - return nil, err - } - var data interface{} - if err := json.Unmarshal(output, &data); err != nil { - return nil, err - } - - accessToken, err := parseJSONPath(data, "token-key", c.tokenKey) - if err != nil { - return nil, fmt.Errorf("error parsing token-key %q from %q: %v", c.tokenKey, string(output), err) - } - expiryStr, err := parseJSONPath(data, "expiry-key", c.expiryKey) - if err != nil { - return nil, fmt.Errorf("error parsing expiry-key %q from %q: %v", c.expiryKey, string(output), err) - } - var expiry time.Time - if t, err := time.Parse(c.timeFmt, expiryStr); err != nil { - klog.V(4).Infof("Failed to parse token expiry from %s (fmt=%s): %v", expiryStr, c.timeFmt, err) - } else { - expiry = t - } - - return &oauth2.Token{ - AccessToken: accessToken, - TokenType: "Bearer", - Expiry: expiry, - }, nil -} - -func parseJSONPath(input interface{}, name, template string) (string, error) { - j := jsonpath.New(name) - buf := new(bytes.Buffer) - if err := j.Parse(template); err != nil { - return "", err - } - if err := j.Execute(buf, input); err != nil { - return "", err - } - return buf.String(), nil -} - -type conditionalTransport struct { - oauthTransport *oauth2.Transport - persister restclient.AuthProviderConfigPersister - resetCache map[string]string -} - -var _ net.RoundTripperWrapper = &conditionalTransport{} - -func (t *conditionalTransport) RoundTrip(req *http.Request) (*http.Response, error) { - if len(req.Header.Get("Authorization")) != 0 { - return t.oauthTransport.Base.RoundTrip(req) - } - - res, err := t.oauthTransport.RoundTrip(req) - - if err != nil { - return nil, err - } - - if res.StatusCode == 401 { - klog.V(4).Infof("The credentials that were supplied are invalid for the target cluster") - t.persister.Persist(t.resetCache) - } - - return res, nil -} - -func (t *conditionalTransport) WrappedRoundTripper() http.RoundTripper { return t.oauthTransport.Base } diff --git a/vendor/k8s.io/client-go/plugin/pkg/client/auth/oidc/OWNERS b/vendor/k8s.io/client-go/plugin/pkg/client/auth/oidc/OWNERS deleted file mode 100644 index e0f3c6ca98..0000000000 --- a/vendor/k8s.io/client-go/plugin/pkg/client/auth/oidc/OWNERS +++ /dev/null @@ -1,5 +0,0 @@ -approvers: -- ericchiang -reviewers: -- ericchiang -- rithujohn191 diff --git a/vendor/k8s.io/client-go/plugin/pkg/client/auth/oidc/oidc.go b/vendor/k8s.io/client-go/plugin/pkg/client/auth/oidc/oidc.go deleted file mode 100644 index 1383a97c62..0000000000 --- a/vendor/k8s.io/client-go/plugin/pkg/client/auth/oidc/oidc.go +++ /dev/null @@ -1,379 +0,0 @@ -/* -Copyright 2016 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package oidc - -import ( - "context" - "encoding/base64" - "encoding/json" - "errors" - "fmt" - "io/ioutil" - "net/http" - "strings" - "sync" - "time" - - "golang.org/x/oauth2" - "k8s.io/apimachinery/pkg/util/net" - restclient "k8s.io/client-go/rest" - "k8s.io/klog" -) - -const ( - cfgIssuerUrl = "idp-issuer-url" - cfgClientID = "client-id" - cfgClientSecret = "client-secret" - cfgCertificateAuthority = "idp-certificate-authority" - cfgCertificateAuthorityData = "idp-certificate-authority-data" - cfgIDToken = "id-token" - cfgRefreshToken = "refresh-token" - - // Unused. Scopes aren't sent during refreshing. - cfgExtraScopes = "extra-scopes" -) - -func init() { - if err := restclient.RegisterAuthProviderPlugin("oidc", newOIDCAuthProvider); err != nil { - klog.Fatalf("Failed to register oidc auth plugin: %v", err) - } -} - -// expiryDelta determines how earlier a token should be considered -// expired than its actual expiration time. It is used to avoid late -// expirations due to client-server time mismatches. -// -// NOTE(ericchiang): this is take from golang.org/x/oauth2 -const expiryDelta = 10 * time.Second - -var cache = newClientCache() - -// Like TLS transports, keep a cache of OIDC clients indexed by issuer URL. This ensures -// current requests from different clients don't concurrently attempt to refresh the same -// set of credentials. -type clientCache struct { - mu sync.RWMutex - - cache map[cacheKey]*oidcAuthProvider -} - -func newClientCache() *clientCache { - return &clientCache{cache: make(map[cacheKey]*oidcAuthProvider)} -} - -type cacheKey struct { - // Canonical issuer URL string of the provider. - issuerURL string - clientID string -} - -func (c *clientCache) getClient(issuer, clientID string) (*oidcAuthProvider, bool) { - c.mu.RLock() - defer c.mu.RUnlock() - client, ok := c.cache[cacheKey{issuer, clientID}] - return client, ok -} - -// setClient attempts to put the client in the cache but may return any clients -// with the same keys set before. This is so there's only ever one client for a provider. -func (c *clientCache) setClient(issuer, clientID string, client *oidcAuthProvider) *oidcAuthProvider { - c.mu.Lock() - defer c.mu.Unlock() - key := cacheKey{issuer, clientID} - - // If another client has already initialized a client for the given provider we want - // to use that client instead of the one we're trying to set. This is so all transports - // share a client and can coordinate around the same mutex when refreshing and writing - // to the kubeconfig. - if oldClient, ok := c.cache[key]; ok { - return oldClient - } - - c.cache[key] = client - return client -} - -func newOIDCAuthProvider(_ string, cfg map[string]string, persister restclient.AuthProviderConfigPersister) (restclient.AuthProvider, error) { - issuer := cfg[cfgIssuerUrl] - if issuer == "" { - return nil, fmt.Errorf("Must provide %s", cfgIssuerUrl) - } - - clientID := cfg[cfgClientID] - if clientID == "" { - return nil, fmt.Errorf("Must provide %s", cfgClientID) - } - - // Check cache for existing provider. - if provider, ok := cache.getClient(issuer, clientID); ok { - return provider, nil - } - - if len(cfg[cfgExtraScopes]) > 0 { - klog.V(2).Infof("%s auth provider field depricated, refresh request don't send scopes", - cfgExtraScopes) - } - - var certAuthData []byte - var err error - if cfg[cfgCertificateAuthorityData] != "" { - certAuthData, err = base64.StdEncoding.DecodeString(cfg[cfgCertificateAuthorityData]) - if err != nil { - return nil, err - } - } - - clientConfig := restclient.Config{ - TLSClientConfig: restclient.TLSClientConfig{ - CAFile: cfg[cfgCertificateAuthority], - CAData: certAuthData, - }, - } - - trans, err := restclient.TransportFor(&clientConfig) - if err != nil { - return nil, err - } - hc := &http.Client{Transport: trans} - - provider := &oidcAuthProvider{ - client: hc, - now: time.Now, - cfg: cfg, - persister: persister, - } - - return cache.setClient(issuer, clientID, provider), nil -} - -type oidcAuthProvider struct { - client *http.Client - - // Method for determining the current time. - now func() time.Time - - // Mutex guards persisting to the kubeconfig file and allows synchronized - // updates to the in-memory config. It also ensures concurrent calls to - // the RoundTripper only trigger a single refresh request. - mu sync.Mutex - cfg map[string]string - persister restclient.AuthProviderConfigPersister -} - -func (p *oidcAuthProvider) WrapTransport(rt http.RoundTripper) http.RoundTripper { - return &roundTripper{ - wrapped: rt, - provider: p, - } -} - -func (p *oidcAuthProvider) Login() error { - return errors.New("not yet implemented") -} - -type roundTripper struct { - provider *oidcAuthProvider - wrapped http.RoundTripper -} - -var _ net.RoundTripperWrapper = &roundTripper{} - -func (r *roundTripper) RoundTrip(req *http.Request) (*http.Response, error) { - if len(req.Header.Get("Authorization")) != 0 { - return r.wrapped.RoundTrip(req) - } - token, err := r.provider.idToken() - if err != nil { - return nil, err - } - - // shallow copy of the struct - r2 := new(http.Request) - *r2 = *req - // deep copy of the Header so we don't modify the original - // request's Header (as per RoundTripper contract). - r2.Header = make(http.Header) - for k, s := range req.Header { - r2.Header[k] = s - } - r2.Header.Set("Authorization", fmt.Sprintf("Bearer %s", token)) - - return r.wrapped.RoundTrip(r2) -} - -func (t *roundTripper) WrappedRoundTripper() http.RoundTripper { return t.wrapped } - -func (p *oidcAuthProvider) idToken() (string, error) { - p.mu.Lock() - defer p.mu.Unlock() - - if idToken, ok := p.cfg[cfgIDToken]; ok && len(idToken) > 0 { - valid, err := idTokenExpired(p.now, idToken) - if err != nil { - return "", err - } - if valid { - // If the cached id token is still valid use it. - return idToken, nil - } - } - - // Try to request a new token using the refresh token. - rt, ok := p.cfg[cfgRefreshToken] - if !ok || len(rt) == 0 { - return "", errors.New("No valid id-token, and cannot refresh without refresh-token") - } - - // Determine provider's OAuth2 token endpoint. - tokenURL, err := tokenEndpoint(p.client, p.cfg[cfgIssuerUrl]) - if err != nil { - return "", err - } - - config := oauth2.Config{ - ClientID: p.cfg[cfgClientID], - ClientSecret: p.cfg[cfgClientSecret], - Endpoint: oauth2.Endpoint{TokenURL: tokenURL}, - } - - ctx := context.WithValue(context.Background(), oauth2.HTTPClient, p.client) - token, err := config.TokenSource(ctx, &oauth2.Token{RefreshToken: rt}).Token() - if err != nil { - return "", fmt.Errorf("failed to refresh token: %v", err) - } - - idToken, ok := token.Extra("id_token").(string) - if !ok { - // id_token isn't a required part of a refresh token response, so some - // providers (Okta) don't return this value. - // - // See https://github.com/kubernetes/kubernetes/issues/36847 - return "", fmt.Errorf("token response did not contain an id_token, either the scope \"openid\" wasn't requested upon login, or the provider doesn't support id_tokens as part of the refresh response.") - } - - // Create a new config to persist. - newCfg := make(map[string]string) - for key, val := range p.cfg { - newCfg[key] = val - } - - // Update the refresh token if the server returned another one. - if token.RefreshToken != "" && token.RefreshToken != rt { - newCfg[cfgRefreshToken] = token.RefreshToken - } - newCfg[cfgIDToken] = idToken - - // Persist new config and if successful, update the in memory config. - if err = p.persister.Persist(newCfg); err != nil { - return "", fmt.Errorf("could not persist new tokens: %v", err) - } - p.cfg = newCfg - - return idToken, nil -} - -// tokenEndpoint uses OpenID Connect discovery to determine the OAuth2 token -// endpoint for the provider, the endpoint the client will use the refresh -// token against. -func tokenEndpoint(client *http.Client, issuer string) (string, error) { - // Well known URL for getting OpenID Connect metadata. - // - // https://openid.net/specs/openid-connect-discovery-1_0.html#ProviderConfig - wellKnown := strings.TrimSuffix(issuer, "/") + "/.well-known/openid-configuration" - resp, err := client.Get(wellKnown) - if err != nil { - return "", err - } - defer resp.Body.Close() - - body, err := ioutil.ReadAll(resp.Body) - if err != nil { - return "", err - } - if resp.StatusCode != http.StatusOK { - // Don't produce an error that's too huge (e.g. if we get HTML back for some reason). - const n = 80 - if len(body) > n { - body = append(body[:n], []byte("...")...) - } - return "", fmt.Errorf("oidc: failed to query metadata endpoint %s: %q", resp.Status, body) - } - - // Metadata object. We only care about the token_endpoint, the thing endpoint - // we'll be refreshing against. - // - // https://openid.net/specs/openid-connect-discovery-1_0.html#ProviderMetadata - var metadata struct { - TokenURL string `json:"token_endpoint"` - } - if err := json.Unmarshal(body, &metadata); err != nil { - return "", fmt.Errorf("oidc: failed to decode provider discovery object: %v", err) - } - if metadata.TokenURL == "" { - return "", fmt.Errorf("oidc: discovery object doesn't contain a token_endpoint") - } - return metadata.TokenURL, nil -} - -func idTokenExpired(now func() time.Time, idToken string) (bool, error) { - parts := strings.Split(idToken, ".") - if len(parts) != 3 { - return false, fmt.Errorf("ID Token is not a valid JWT") - } - - payload, err := base64.RawURLEncoding.DecodeString(parts[1]) - if err != nil { - return false, err - } - var claims struct { - Expiry jsonTime `json:"exp"` - } - if err := json.Unmarshal(payload, &claims); err != nil { - return false, fmt.Errorf("parsing claims: %v", err) - } - - return now().Add(expiryDelta).Before(time.Time(claims.Expiry)), nil -} - -// jsonTime is a json.Unmarshaler that parses a unix timestamp. -// Because JSON numbers don't differentiate between ints and floats, -// we want to ensure we can parse either. -type jsonTime time.Time - -func (j *jsonTime) UnmarshalJSON(b []byte) error { - var n json.Number - if err := json.Unmarshal(b, &n); err != nil { - return err - } - var unix int64 - - if t, err := n.Int64(); err == nil { - unix = t - } else { - f, err := n.Float64() - if err != nil { - return err - } - unix = int64(f) - } - *j = jsonTime(time.Unix(unix, 0)) - return nil -} - -func (j jsonTime) MarshalJSON() ([]byte, error) { - return json.Marshal(time.Time(j).Unix()) -} diff --git a/vendor/k8s.io/client-go/plugin/pkg/client/auth/openstack/openstack.go b/vendor/k8s.io/client-go/plugin/pkg/client/auth/openstack/openstack.go deleted file mode 100644 index fab5104ef6..0000000000 --- a/vendor/k8s.io/client-go/plugin/pkg/client/auth/openstack/openstack.go +++ /dev/null @@ -1,193 +0,0 @@ -/* -Copyright 2017 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package openstack - -import ( - "fmt" - "net/http" - "sync" - "time" - - "github.com/gophercloud/gophercloud" - "github.com/gophercloud/gophercloud/openstack" - "k8s.io/klog" - - "k8s.io/apimachinery/pkg/util/net" - restclient "k8s.io/client-go/rest" -) - -func init() { - if err := restclient.RegisterAuthProviderPlugin("openstack", newOpenstackAuthProvider); err != nil { - klog.Fatalf("Failed to register openstack auth plugin: %s", err) - } -} - -// DefaultTTLDuration is the time before a token gets expired. -const DefaultTTLDuration = 10 * time.Minute - -// openstackAuthProvider is an authprovider for openstack. this provider reads -// the environment variables to determine the client identity, and generates a -// token which will be inserted into the request header later. -type openstackAuthProvider struct { - ttl time.Duration - tokenGetter TokenGetter -} - -// TokenGetter returns a bearer token that can be inserted into request. -type TokenGetter interface { - Token() (string, error) -} - -type tokenGetter struct { - authOpt *gophercloud.AuthOptions -} - -// Token creates a token by authenticate with keystone. -func (t *tokenGetter) Token() (string, error) { - var options gophercloud.AuthOptions - var err error - if t.authOpt == nil { - // reads the config from the environment - klog.V(4).Info("reading openstack config from the environment variables") - options, err = openstack.AuthOptionsFromEnv() - if err != nil { - return "", fmt.Errorf("failed to read openstack env vars: %s", err) - } - } else { - options = *t.authOpt - } - client, err := openstack.AuthenticatedClient(options) - if err != nil { - return "", fmt.Errorf("authentication failed: %s", err) - } - return client.TokenID, nil -} - -// cachedGetter caches a token until it gets expired, after the expiration, it will -// generate another token and cache it. -type cachedGetter struct { - mutex sync.Mutex - tokenGetter TokenGetter - - token string - born time.Time - ttl time.Duration -} - -// Token returns the current available token, create a new one if expired. -func (c *cachedGetter) Token() (string, error) { - c.mutex.Lock() - defer c.mutex.Unlock() - - var err error - // no token or exceeds the TTL - if c.token == "" || time.Since(c.born) > c.ttl { - c.token, err = c.tokenGetter.Token() - if err != nil { - return "", fmt.Errorf("failed to get token: %s", err) - } - c.born = time.Now() - } - return c.token, nil -} - -// tokenRoundTripper implements the RoundTripper interface: adding the bearer token -// into the request header. -type tokenRoundTripper struct { - http.RoundTripper - - tokenGetter TokenGetter -} - -var _ net.RoundTripperWrapper = &tokenRoundTripper{} - -// RoundTrip adds the bearer token into the request. -func (t *tokenRoundTripper) RoundTrip(req *http.Request) (*http.Response, error) { - // if the authorization header already present, use it. - if req.Header.Get("Authorization") != "" { - return t.RoundTripper.RoundTrip(req) - } - - token, err := t.tokenGetter.Token() - if err == nil { - req.Header.Set("Authorization", "Bearer "+token) - } else { - klog.V(4).Infof("failed to get token: %s", err) - } - - return t.RoundTripper.RoundTrip(req) -} - -func (t *tokenRoundTripper) WrappedRoundTripper() http.RoundTripper { return t.RoundTripper } - -// newOpenstackAuthProvider creates an auth provider which works with openstack -// environment. -func newOpenstackAuthProvider(_ string, config map[string]string, persister restclient.AuthProviderConfigPersister) (restclient.AuthProvider, error) { - var ttlDuration time.Duration - var err error - - klog.Warningf("WARNING: in-tree openstack auth plugin is now deprecated. please use the \"client-keystone-auth\" kubectl/client-go credential plugin instead") - ttl, found := config["ttl"] - if !found { - ttlDuration = DefaultTTLDuration - // persist to config - config["ttl"] = ttlDuration.String() - if err = persister.Persist(config); err != nil { - return nil, fmt.Errorf("failed to persist config: %s", err) - } - } else { - ttlDuration, err = time.ParseDuration(ttl) - if err != nil { - return nil, fmt.Errorf("failed to parse ttl config: %s", err) - } - } - - authOpt := gophercloud.AuthOptions{ - IdentityEndpoint: config["identityEndpoint"], - Username: config["username"], - Password: config["password"], - DomainName: config["name"], - TenantID: config["tenantId"], - TenantName: config["tenantName"], - } - - getter := tokenGetter{} - // not empty - if (authOpt != gophercloud.AuthOptions{}) { - if len(authOpt.IdentityEndpoint) == 0 { - return nil, fmt.Errorf("empty %q in the config for openstack auth provider", "identityEndpoint") - } - getter.authOpt = &authOpt - } - - return &openstackAuthProvider{ - ttl: ttlDuration, - tokenGetter: &getter, - }, nil -} - -func (oap *openstackAuthProvider) WrapTransport(rt http.RoundTripper) http.RoundTripper { - return &tokenRoundTripper{ - RoundTripper: rt, - tokenGetter: &cachedGetter{ - tokenGetter: oap.tokenGetter, - ttl: oap.ttl, - }, - } -} - -func (oap *openstackAuthProvider) Login() error { return nil } diff --git a/vendor/k8s.io/client-go/plugin/pkg/client/auth/plugins.go b/vendor/k8s.io/client-go/plugin/pkg/client/auth/plugins.go deleted file mode 100644 index 42085d7ae1..0000000000 --- a/vendor/k8s.io/client-go/plugin/pkg/client/auth/plugins.go +++ /dev/null @@ -1,25 +0,0 @@ -/* -Copyright 2016 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package auth - -import ( - // Initialize all known client auth plugins. - _ "k8s.io/client-go/plugin/pkg/client/auth/azure" - _ "k8s.io/client-go/plugin/pkg/client/auth/gcp" - _ "k8s.io/client-go/plugin/pkg/client/auth/oidc" - _ "k8s.io/client-go/plugin/pkg/client/auth/openstack" -) diff --git a/vendor/k8s.io/client-go/rest/OWNERS b/vendor/k8s.io/client-go/rest/OWNERS index 8d97da007d..49dabc61b0 100644 --- a/vendor/k8s.io/client-go/rest/OWNERS +++ b/vendor/k8s.io/client-go/rest/OWNERS @@ -1,3 +1,5 @@ +# See the OWNERS docs at https://go.k8s.io/owners + reviewers: - thockin - smarterclayton diff --git a/vendor/k8s.io/client-go/rest/config.go b/vendor/k8s.io/client-go/rest/config.go index 438eb3beda..3f6b9bc236 100644 --- a/vendor/k8s.io/client-go/rest/config.go +++ b/vendor/k8s.io/client-go/rest/config.go @@ -34,6 +34,7 @@ import ( "k8s.io/apimachinery/pkg/runtime/schema" "k8s.io/client-go/pkg/version" clientcmdapi "k8s.io/client-go/tools/clientcmd/api" + "k8s.io/client-go/transport" certutil "k8s.io/client-go/util/cert" "k8s.io/client-go/util/flowcontrol" "k8s.io/klog" @@ -70,6 +71,11 @@ type Config struct { // TODO: demonstrate an OAuth2 compatible client. BearerToken string + // Path to a file containing a BearerToken. + // If set, the contents are periodically read. + // The last successfully read value takes precedence over BearerToken. + BearerTokenFile string + // Impersonate is the configuration that RESTClient will use for impersonation. Impersonate ImpersonationConfig @@ -90,13 +96,16 @@ type Config struct { // Transport may be used for custom HTTP behavior. This attribute may not // be specified with the TLS client certificate options. Use WrapTransport - // for most client level operations. + // to provide additional per-server middleware behavior. Transport http.RoundTripper // WrapTransport will be invoked for custom HTTP behavior after the underlying // transport is initialized (either the transport created from TLSClientConfig, // Transport, or http.DefaultTransport). The config may layer other RoundTrippers // on top of the returned RoundTripper. - WrapTransport func(rt http.RoundTripper) http.RoundTripper + // + // A future release will change this field to an array. Use config.Wrap() + // instead of setting this value directly. + WrapTransport transport.WrapperFunc // QPS indicates the maximum QPS to the master from this client. // If it's zero, the created RESTClient will use DefaultQPS: 5 @@ -120,6 +129,47 @@ type Config struct { // Version string } +var _ fmt.Stringer = new(Config) +var _ fmt.GoStringer = new(Config) + +type sanitizedConfig *Config + +type sanitizedAuthConfigPersister struct{ AuthProviderConfigPersister } + +func (sanitizedAuthConfigPersister) GoString() string { + return "rest.AuthProviderConfigPersister(--- REDACTED ---)" +} +func (sanitizedAuthConfigPersister) String() string { + return "rest.AuthProviderConfigPersister(--- REDACTED ---)" +} + +// GoString implements fmt.GoStringer and sanitizes sensitive fields of Config +// to prevent accidental leaking via logs. +func (c *Config) GoString() string { + return c.String() +} + +// String implements fmt.Stringer and sanitizes sensitive fields of Config to +// prevent accidental leaking via logs. +func (c *Config) String() string { + if c == nil { + return "" + } + cc := sanitizedConfig(CopyConfig(c)) + // Explicitly mark non-empty credential fields as redacted. + if cc.Password != "" { + cc.Password = "--- REDACTED ---" + } + if cc.BearerToken != "" { + cc.BearerToken = "--- REDACTED ---" + } + if cc.AuthConfigPersister != nil { + cc.AuthConfigPersister = sanitizedAuthConfigPersister{cc.AuthConfigPersister} + } + + return fmt.Sprintf("%#v", cc) +} + // ImpersonationConfig has all the available impersonation options type ImpersonationConfig struct { // UserName is the username to impersonate on each request. @@ -159,6 +209,40 @@ type TLSClientConfig struct { CAData []byte } +var _ fmt.Stringer = TLSClientConfig{} +var _ fmt.GoStringer = TLSClientConfig{} + +type sanitizedTLSClientConfig TLSClientConfig + +// GoString implements fmt.GoStringer and sanitizes sensitive fields of +// TLSClientConfig to prevent accidental leaking via logs. +func (c TLSClientConfig) GoString() string { + return c.String() +} + +// String implements fmt.Stringer and sanitizes sensitive fields of +// TLSClientConfig to prevent accidental leaking via logs. +func (c TLSClientConfig) String() string { + cc := sanitizedTLSClientConfig{ + Insecure: c.Insecure, + ServerName: c.ServerName, + CertFile: c.CertFile, + KeyFile: c.KeyFile, + CAFile: c.CAFile, + CertData: c.CertData, + KeyData: c.KeyData, + CAData: c.CAData, + } + // Explicitly mark non-empty credential fields as redacted. + if len(cc.CertData) != 0 { + cc.CertData = []byte("--- TRUNCATED ---") + } + if len(cc.KeyData) != 0 { + cc.KeyData = []byte("--- REDACTED ---") + } + return fmt.Sprintf("%#v", cc) +} + type ContentConfig struct { // AcceptContentTypes specifies the types the client will accept and is optional. // If not set, ContentType will be used to define the Accept header @@ -322,9 +406,8 @@ func InClusterConfig() (*Config, error) { return nil, ErrNotInCluster } - ts := NewCachedFileTokenSource(tokenFile) - - if _, err := ts.Token(); err != nil { + token, err := ioutil.ReadFile(tokenFile) + if err != nil { return nil, err } @@ -340,7 +423,8 @@ func InClusterConfig() (*Config, error) { // TODO: switch to using cluster DNS. Host: "https://" + net.JoinHostPort(host, port), TLSClientConfig: tlsClientConfig, - WrapTransport: TokenSourceWrapTransport(ts), + BearerToken: string(token), + BearerTokenFile: tokenFile, }, nil } @@ -430,12 +514,13 @@ func AnonymousClientConfig(config *Config) *Config { // CopyConfig returns a copy of the given config func CopyConfig(config *Config) *Config { return &Config{ - Host: config.Host, - APIPath: config.APIPath, - ContentConfig: config.ContentConfig, - Username: config.Username, - Password: config.Password, - BearerToken: config.BearerToken, + Host: config.Host, + APIPath: config.APIPath, + ContentConfig: config.ContentConfig, + Username: config.Username, + Password: config.Password, + BearerToken: config.BearerToken, + BearerTokenFile: config.BearerTokenFile, Impersonate: ImpersonationConfig{ Groups: config.Impersonate.Groups, Extra: config.Impersonate.Extra, diff --git a/vendor/k8s.io/client-go/rest/request.go b/vendor/k8s.io/client-go/rest/request.go index 64901fba20..dd0630387a 100644 --- a/vendor/k8s.io/client-go/rest/request.go +++ b/vendor/k8s.io/client-go/rest/request.go @@ -1100,7 +1100,8 @@ func (r Result) Into(obj runtime.Object) error { return fmt.Errorf("serializer for %s doesn't exist", r.contentType) } if len(r.body) == 0 { - return fmt.Errorf("0-length response") + return fmt.Errorf("0-length response with status code: %d and content type: %s", + r.statusCode, r.contentType) } out, _, err := r.decoder.Decode(r.body, nil, obj) @@ -1195,7 +1196,6 @@ func IsValidPathSegmentPrefix(name string) []string { func ValidatePathSegmentName(name string, prefix bool) []string { if prefix { return IsValidPathSegmentPrefix(name) - } else { - return IsValidPathSegmentName(name) } + return IsValidPathSegmentName(name) } diff --git a/vendor/k8s.io/client-go/rest/transport.go b/vendor/k8s.io/client-go/rest/transport.go index 25c1801b67..bd5749dc62 100644 --- a/vendor/k8s.io/client-go/rest/transport.go +++ b/vendor/k8s.io/client-go/rest/transport.go @@ -103,14 +103,15 @@ func (c *Config) TransportConfig() (*transport.Config, error) { if err != nil { return nil, err } - wt := conf.WrapTransport - if wt != nil { - conf.WrapTransport = func(rt http.RoundTripper) http.RoundTripper { - return provider.WrapTransport(wt(rt)) - } - } else { - conf.WrapTransport = provider.WrapTransport - } + conf.Wrap(provider.WrapTransport) } return conf, nil } + +// Wrap adds a transport middleware function that will give the caller +// an opportunity to wrap the underlying http.RoundTripper prior to the +// first API call being made. The provided function is invoked after any +// existing transport wrappers are invoked. +func (c *Config) Wrap(fn transport.WrapperFunc) { + c.WrapTransport = transport.Wrappers(c.WrapTransport, fn) +} diff --git a/vendor/k8s.io/client-go/restmapper/category_expansion.go b/vendor/k8s.io/client-go/restmapper/category_expansion.go index 1620bbcf81..2537a2b4e2 100644 --- a/vendor/k8s.io/client-go/restmapper/category_expansion.go +++ b/vendor/k8s.io/client-go/restmapper/category_expansion.go @@ -21,7 +21,7 @@ import ( "k8s.io/client-go/discovery" ) -// CategoryExpander maps category strings to GroupResouces. +// CategoryExpander maps category strings to GroupResources. // Categories are classification or 'tag' of a group of resources. type CategoryExpander interface { Expand(category string) ([]schema.GroupResource, bool) diff --git a/vendor/k8s.io/client-go/restmapper/discovery.go b/vendor/k8s.io/client-go/restmapper/discovery.go index 84491f4c5d..f8d7080d49 100644 --- a/vendor/k8s.io/client-go/restmapper/discovery.go +++ b/vendor/k8s.io/client-go/restmapper/discovery.go @@ -145,27 +145,26 @@ func NewDiscoveryRESTMapper(groupResources []*APIGroupResources) meta.RESTMapper // GetAPIGroupResources uses the provided discovery client to gather // discovery information and populate a slice of APIGroupResources. func GetAPIGroupResources(cl discovery.DiscoveryInterface) ([]*APIGroupResources, error) { - apiGroups, err := cl.ServerGroups() - if err != nil { - if apiGroups == nil || len(apiGroups.Groups) == 0 { - return nil, err - } + gs, rs, err := cl.ServerGroupsAndResources() + if rs == nil || gs == nil { + return nil, err // TODO track the errors and update callers to handle partial errors. } + rsm := map[string]*metav1.APIResourceList{} + for _, r := range rs { + rsm[r.GroupVersion] = r + } + var result []*APIGroupResources - for _, group := range apiGroups.Groups { + for _, group := range gs { groupResources := &APIGroupResources{ - Group: group, + Group: *group, VersionedResources: make(map[string][]metav1.APIResource), } for _, version := range group.Versions { - resources, err := cl.ServerResourcesForGroupVersion(version.GroupVersion) - if err != nil { - // continue as best we can - // TODO track the errors and update callers to handle partial errors. - if resources == nil || len(resources.APIResources) == 0 { - continue - } + resources, ok := rsm[version.GroupVersion] + if !ok { + continue } groupResources.VersionedResources[version.Version] = resources.APIResources } diff --git a/vendor/k8s.io/client-go/third_party/forked/golang/template/exec.go b/vendor/k8s.io/client-go/third_party/forked/golang/template/exec.go deleted file mode 100644 index 739fd3509c..0000000000 --- a/vendor/k8s.io/client-go/third_party/forked/golang/template/exec.go +++ /dev/null @@ -1,94 +0,0 @@ -//This package is copied from Go library text/template. -//The original private functions indirect and printableValue -//are exported as public functions. -package template - -import ( - "fmt" - "reflect" -) - -var Indirect = indirect -var PrintableValue = printableValue - -var ( - errorType = reflect.TypeOf((*error)(nil)).Elem() - fmtStringerType = reflect.TypeOf((*fmt.Stringer)(nil)).Elem() -) - -// indirect returns the item at the end of indirection, and a bool to indicate if it's nil. -// We indirect through pointers and empty interfaces (only) because -// non-empty interfaces have methods we might need. -func indirect(v reflect.Value) (rv reflect.Value, isNil bool) { - for ; v.Kind() == reflect.Ptr || v.Kind() == reflect.Interface; v = v.Elem() { - if v.IsNil() { - return v, true - } - if v.Kind() == reflect.Interface && v.NumMethod() > 0 { - break - } - } - return v, false -} - -// printableValue returns the, possibly indirected, interface value inside v that -// is best for a call to formatted printer. -func printableValue(v reflect.Value) (interface{}, bool) { - if v.Kind() == reflect.Ptr { - v, _ = indirect(v) // fmt.Fprint handles nil. - } - if !v.IsValid() { - return "", true - } - - if !v.Type().Implements(errorType) && !v.Type().Implements(fmtStringerType) { - if v.CanAddr() && (reflect.PtrTo(v.Type()).Implements(errorType) || reflect.PtrTo(v.Type()).Implements(fmtStringerType)) { - v = v.Addr() - } else { - switch v.Kind() { - case reflect.Chan, reflect.Func: - return nil, false - } - } - } - return v.Interface(), true -} - -// canBeNil reports whether an untyped nil can be assigned to the type. See reflect.Zero. -func canBeNil(typ reflect.Type) bool { - switch typ.Kind() { - case reflect.Chan, reflect.Func, reflect.Interface, reflect.Map, reflect.Ptr, reflect.Slice: - return true - } - return false -} - -// isTrue reports whether the value is 'true', in the sense of not the zero of its type, -// and whether the value has a meaningful truth value. -func isTrue(val reflect.Value) (truth, ok bool) { - if !val.IsValid() { - // Something like var x interface{}, never set. It's a form of nil. - return false, true - } - switch val.Kind() { - case reflect.Array, reflect.Map, reflect.Slice, reflect.String: - truth = val.Len() > 0 - case reflect.Bool: - truth = val.Bool() - case reflect.Complex64, reflect.Complex128: - truth = val.Complex() != 0 - case reflect.Chan, reflect.Func, reflect.Ptr, reflect.Interface: - truth = !val.IsNil() - case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: - truth = val.Int() != 0 - case reflect.Float32, reflect.Float64: - truth = val.Float() != 0 - case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: - truth = val.Uint() != 0 - case reflect.Struct: - truth = true // Struct values are always true. - default: - return - } - return truth, true -} diff --git a/vendor/k8s.io/client-go/third_party/forked/golang/template/funcs.go b/vendor/k8s.io/client-go/third_party/forked/golang/template/funcs.go deleted file mode 100644 index 27a008b0a7..0000000000 --- a/vendor/k8s.io/client-go/third_party/forked/golang/template/funcs.go +++ /dev/null @@ -1,599 +0,0 @@ -//This package is copied from Go library text/template. -//The original private functions eq, ge, gt, le, lt, and ne -//are exported as public functions. -package template - -import ( - "bytes" - "errors" - "fmt" - "io" - "net/url" - "reflect" - "strings" - "unicode" - "unicode/utf8" -) - -var Equal = eq -var GreaterEqual = ge -var Greater = gt -var LessEqual = le -var Less = lt -var NotEqual = ne - -// FuncMap is the type of the map defining the mapping from names to functions. -// Each function must have either a single return value, or two return values of -// which the second has type error. In that case, if the second (error) -// return value evaluates to non-nil during execution, execution terminates and -// Execute returns that error. -type FuncMap map[string]interface{} - -var builtins = FuncMap{ - "and": and, - "call": call, - "html": HTMLEscaper, - "index": index, - "js": JSEscaper, - "len": length, - "not": not, - "or": or, - "print": fmt.Sprint, - "printf": fmt.Sprintf, - "println": fmt.Sprintln, - "urlquery": URLQueryEscaper, - - // Comparisons - "eq": eq, // == - "ge": ge, // >= - "gt": gt, // > - "le": le, // <= - "lt": lt, // < - "ne": ne, // != -} - -var builtinFuncs = createValueFuncs(builtins) - -// createValueFuncs turns a FuncMap into a map[string]reflect.Value -func createValueFuncs(funcMap FuncMap) map[string]reflect.Value { - m := make(map[string]reflect.Value) - addValueFuncs(m, funcMap) - return m -} - -// addValueFuncs adds to values the functions in funcs, converting them to reflect.Values. -func addValueFuncs(out map[string]reflect.Value, in FuncMap) { - for name, fn := range in { - v := reflect.ValueOf(fn) - if v.Kind() != reflect.Func { - panic("value for " + name + " not a function") - } - if !goodFunc(v.Type()) { - panic(fmt.Errorf("can't install method/function %q with %d results", name, v.Type().NumOut())) - } - out[name] = v - } -} - -// AddFuncs adds to values the functions in funcs. It does no checking of the input - -// call addValueFuncs first. -func addFuncs(out, in FuncMap) { - for name, fn := range in { - out[name] = fn - } -} - -// goodFunc checks that the function or method has the right result signature. -func goodFunc(typ reflect.Type) bool { - // We allow functions with 1 result or 2 results where the second is an error. - switch { - case typ.NumOut() == 1: - return true - case typ.NumOut() == 2 && typ.Out(1) == errorType: - return true - } - return false -} - -// findFunction looks for a function in the template, and global map. -func findFunction(name string) (reflect.Value, bool) { - if fn := builtinFuncs[name]; fn.IsValid() { - return fn, true - } - return reflect.Value{}, false -} - -// Indexing. - -// index returns the result of indexing its first argument by the following -// arguments. Thus "index x 1 2 3" is, in Go syntax, x[1][2][3]. Each -// indexed item must be a map, slice, or array. -func index(item interface{}, indices ...interface{}) (interface{}, error) { - v := reflect.ValueOf(item) - for _, i := range indices { - index := reflect.ValueOf(i) - var isNil bool - if v, isNil = indirect(v); isNil { - return nil, fmt.Errorf("index of nil pointer") - } - switch v.Kind() { - case reflect.Array, reflect.Slice, reflect.String: - var x int64 - switch index.Kind() { - case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: - x = index.Int() - case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: - x = int64(index.Uint()) - default: - return nil, fmt.Errorf("cannot index slice/array with type %s", index.Type()) - } - if x < 0 || x >= int64(v.Len()) { - return nil, fmt.Errorf("index out of range: %d", x) - } - v = v.Index(int(x)) - case reflect.Map: - if !index.IsValid() { - index = reflect.Zero(v.Type().Key()) - } - if !index.Type().AssignableTo(v.Type().Key()) { - return nil, fmt.Errorf("%s is not index type for %s", index.Type(), v.Type()) - } - if x := v.MapIndex(index); x.IsValid() { - v = x - } else { - v = reflect.Zero(v.Type().Elem()) - } - default: - return nil, fmt.Errorf("can't index item of type %s", v.Type()) - } - } - return v.Interface(), nil -} - -// Length - -// length returns the length of the item, with an error if it has no defined length. -func length(item interface{}) (int, error) { - v, isNil := indirect(reflect.ValueOf(item)) - if isNil { - return 0, fmt.Errorf("len of nil pointer") - } - switch v.Kind() { - case reflect.Array, reflect.Chan, reflect.Map, reflect.Slice, reflect.String: - return v.Len(), nil - } - return 0, fmt.Errorf("len of type %s", v.Type()) -} - -// Function invocation - -// call returns the result of evaluating the first argument as a function. -// The function must return 1 result, or 2 results, the second of which is an error. -func call(fn interface{}, args ...interface{}) (interface{}, error) { - v := reflect.ValueOf(fn) - typ := v.Type() - if typ.Kind() != reflect.Func { - return nil, fmt.Errorf("non-function of type %s", typ) - } - if !goodFunc(typ) { - return nil, fmt.Errorf("function called with %d args; should be 1 or 2", typ.NumOut()) - } - numIn := typ.NumIn() - var dddType reflect.Type - if typ.IsVariadic() { - if len(args) < numIn-1 { - return nil, fmt.Errorf("wrong number of args: got %d want at least %d", len(args), numIn-1) - } - dddType = typ.In(numIn - 1).Elem() - } else { - if len(args) != numIn { - return nil, fmt.Errorf("wrong number of args: got %d want %d", len(args), numIn) - } - } - argv := make([]reflect.Value, len(args)) - for i, arg := range args { - value := reflect.ValueOf(arg) - // Compute the expected type. Clumsy because of variadics. - var argType reflect.Type - if !typ.IsVariadic() || i < numIn-1 { - argType = typ.In(i) - } else { - argType = dddType - } - if !value.IsValid() && canBeNil(argType) { - value = reflect.Zero(argType) - } - if !value.Type().AssignableTo(argType) { - return nil, fmt.Errorf("arg %d has type %s; should be %s", i, value.Type(), argType) - } - argv[i] = value - } - result := v.Call(argv) - if len(result) == 2 && !result[1].IsNil() { - return result[0].Interface(), result[1].Interface().(error) - } - return result[0].Interface(), nil -} - -// Boolean logic. - -func truth(a interface{}) bool { - t, _ := isTrue(reflect.ValueOf(a)) - return t -} - -// and computes the Boolean AND of its arguments, returning -// the first false argument it encounters, or the last argument. -func and(arg0 interface{}, args ...interface{}) interface{} { - if !truth(arg0) { - return arg0 - } - for i := range args { - arg0 = args[i] - if !truth(arg0) { - break - } - } - return arg0 -} - -// or computes the Boolean OR of its arguments, returning -// the first true argument it encounters, or the last argument. -func or(arg0 interface{}, args ...interface{}) interface{} { - if truth(arg0) { - return arg0 - } - for i := range args { - arg0 = args[i] - if truth(arg0) { - break - } - } - return arg0 -} - -// not returns the Boolean negation of its argument. -func not(arg interface{}) (truth bool) { - truth, _ = isTrue(reflect.ValueOf(arg)) - return !truth -} - -// Comparison. - -// TODO: Perhaps allow comparison between signed and unsigned integers. - -var ( - errBadComparisonType = errors.New("invalid type for comparison") - errBadComparison = errors.New("incompatible types for comparison") - errNoComparison = errors.New("missing argument for comparison") -) - -type kind int - -const ( - invalidKind kind = iota - boolKind - complexKind - intKind - floatKind - integerKind - stringKind - uintKind -) - -func basicKind(v reflect.Value) (kind, error) { - switch v.Kind() { - case reflect.Bool: - return boolKind, nil - case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: - return intKind, nil - case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: - return uintKind, nil - case reflect.Float32, reflect.Float64: - return floatKind, nil - case reflect.Complex64, reflect.Complex128: - return complexKind, nil - case reflect.String: - return stringKind, nil - } - return invalidKind, errBadComparisonType -} - -// eq evaluates the comparison a == b || a == c || ... -func eq(arg1 interface{}, arg2 ...interface{}) (bool, error) { - v1 := reflect.ValueOf(arg1) - k1, err := basicKind(v1) - if err != nil { - return false, err - } - if len(arg2) == 0 { - return false, errNoComparison - } - for _, arg := range arg2 { - v2 := reflect.ValueOf(arg) - k2, err := basicKind(v2) - if err != nil { - return false, err - } - truth := false - if k1 != k2 { - // Special case: Can compare integer values regardless of type's sign. - switch { - case k1 == intKind && k2 == uintKind: - truth = v1.Int() >= 0 && uint64(v1.Int()) == v2.Uint() - case k1 == uintKind && k2 == intKind: - truth = v2.Int() >= 0 && v1.Uint() == uint64(v2.Int()) - default: - return false, errBadComparison - } - } else { - switch k1 { - case boolKind: - truth = v1.Bool() == v2.Bool() - case complexKind: - truth = v1.Complex() == v2.Complex() - case floatKind: - truth = v1.Float() == v2.Float() - case intKind: - truth = v1.Int() == v2.Int() - case stringKind: - truth = v1.String() == v2.String() - case uintKind: - truth = v1.Uint() == v2.Uint() - default: - panic("invalid kind") - } - } - if truth { - return true, nil - } - } - return false, nil -} - -// ne evaluates the comparison a != b. -func ne(arg1, arg2 interface{}) (bool, error) { - // != is the inverse of ==. - equal, err := eq(arg1, arg2) - return !equal, err -} - -// lt evaluates the comparison a < b. -func lt(arg1, arg2 interface{}) (bool, error) { - v1 := reflect.ValueOf(arg1) - k1, err := basicKind(v1) - if err != nil { - return false, err - } - v2 := reflect.ValueOf(arg2) - k2, err := basicKind(v2) - if err != nil { - return false, err - } - truth := false - if k1 != k2 { - // Special case: Can compare integer values regardless of type's sign. - switch { - case k1 == intKind && k2 == uintKind: - truth = v1.Int() < 0 || uint64(v1.Int()) < v2.Uint() - case k1 == uintKind && k2 == intKind: - truth = v2.Int() >= 0 && v1.Uint() < uint64(v2.Int()) - default: - return false, errBadComparison - } - } else { - switch k1 { - case boolKind, complexKind: - return false, errBadComparisonType - case floatKind: - truth = v1.Float() < v2.Float() - case intKind: - truth = v1.Int() < v2.Int() - case stringKind: - truth = v1.String() < v2.String() - case uintKind: - truth = v1.Uint() < v2.Uint() - default: - panic("invalid kind") - } - } - return truth, nil -} - -// le evaluates the comparison <= b. -func le(arg1, arg2 interface{}) (bool, error) { - // <= is < or ==. - lessThan, err := lt(arg1, arg2) - if lessThan || err != nil { - return lessThan, err - } - return eq(arg1, arg2) -} - -// gt evaluates the comparison a > b. -func gt(arg1, arg2 interface{}) (bool, error) { - // > is the inverse of <=. - lessOrEqual, err := le(arg1, arg2) - if err != nil { - return false, err - } - return !lessOrEqual, nil -} - -// ge evaluates the comparison a >= b. -func ge(arg1, arg2 interface{}) (bool, error) { - // >= is the inverse of <. - lessThan, err := lt(arg1, arg2) - if err != nil { - return false, err - } - return !lessThan, nil -} - -// HTML escaping. - -var ( - htmlQuot = []byte(""") // shorter than """ - htmlApos = []byte("'") // shorter than "'" and apos was not in HTML until HTML5 - htmlAmp = []byte("&") - htmlLt = []byte("<") - htmlGt = []byte(">") -) - -// HTMLEscape writes to w the escaped HTML equivalent of the plain text data b. -func HTMLEscape(w io.Writer, b []byte) { - last := 0 - for i, c := range b { - var html []byte - switch c { - case '"': - html = htmlQuot - case '\'': - html = htmlApos - case '&': - html = htmlAmp - case '<': - html = htmlLt - case '>': - html = htmlGt - default: - continue - } - w.Write(b[last:i]) - w.Write(html) - last = i + 1 - } - w.Write(b[last:]) -} - -// HTMLEscapeString returns the escaped HTML equivalent of the plain text data s. -func HTMLEscapeString(s string) string { - // Avoid allocation if we can. - if strings.IndexAny(s, `'"&<>`) < 0 { - return s - } - var b bytes.Buffer - HTMLEscape(&b, []byte(s)) - return b.String() -} - -// HTMLEscaper returns the escaped HTML equivalent of the textual -// representation of its arguments. -func HTMLEscaper(args ...interface{}) string { - return HTMLEscapeString(evalArgs(args)) -} - -// JavaScript escaping. - -var ( - jsLowUni = []byte(`\u00`) - hex = []byte("0123456789ABCDEF") - - jsBackslash = []byte(`\\`) - jsApos = []byte(`\'`) - jsQuot = []byte(`\"`) - jsLt = []byte(`\x3C`) - jsGt = []byte(`\x3E`) -) - -// JSEscape writes to w the escaped JavaScript equivalent of the plain text data b. -func JSEscape(w io.Writer, b []byte) { - last := 0 - for i := 0; i < len(b); i++ { - c := b[i] - - if !jsIsSpecial(rune(c)) { - // fast path: nothing to do - continue - } - w.Write(b[last:i]) - - if c < utf8.RuneSelf { - // Quotes, slashes and angle brackets get quoted. - // Control characters get written as \u00XX. - switch c { - case '\\': - w.Write(jsBackslash) - case '\'': - w.Write(jsApos) - case '"': - w.Write(jsQuot) - case '<': - w.Write(jsLt) - case '>': - w.Write(jsGt) - default: - w.Write(jsLowUni) - t, b := c>>4, c&0x0f - w.Write(hex[t : t+1]) - w.Write(hex[b : b+1]) - } - } else { - // Unicode rune. - r, size := utf8.DecodeRune(b[i:]) - if unicode.IsPrint(r) { - w.Write(b[i : i+size]) - } else { - fmt.Fprintf(w, "\\u%04X", r) - } - i += size - 1 - } - last = i + 1 - } - w.Write(b[last:]) -} - -// JSEscapeString returns the escaped JavaScript equivalent of the plain text data s. -func JSEscapeString(s string) string { - // Avoid allocation if we can. - if strings.IndexFunc(s, jsIsSpecial) < 0 { - return s - } - var b bytes.Buffer - JSEscape(&b, []byte(s)) - return b.String() -} - -func jsIsSpecial(r rune) bool { - switch r { - case '\\', '\'', '"', '<', '>': - return true - } - return r < ' ' || utf8.RuneSelf <= r -} - -// JSEscaper returns the escaped JavaScript equivalent of the textual -// representation of its arguments. -func JSEscaper(args ...interface{}) string { - return JSEscapeString(evalArgs(args)) -} - -// URLQueryEscaper returns the escaped value of the textual representation of -// its arguments in a form suitable for embedding in a URL query. -func URLQueryEscaper(args ...interface{}) string { - return url.QueryEscape(evalArgs(args)) -} - -// evalArgs formats the list of arguments into a string. It is therefore equivalent to -// fmt.Sprint(args...) -// except that each argument is indirected (if a pointer), as required, -// using the same rules as the default string evaluation during template -// execution. -func evalArgs(args []interface{}) string { - ok := false - var s string - // Fast path for simple common case. - if len(args) == 1 { - s, ok = args[0].(string) - } - if !ok { - for i, arg := range args { - a, ok := printableValue(reflect.ValueOf(arg)) - if ok { - args[i] = a - } // else left fmt do its thing - } - s = fmt.Sprint(args...) - } - return s -} diff --git a/vendor/k8s.io/client-go/tools/auth/OWNERS b/vendor/k8s.io/client-go/tools/auth/OWNERS index c607d2aa8c..3e05d309be 100644 --- a/vendor/k8s.io/client-go/tools/auth/OWNERS +++ b/vendor/k8s.io/client-go/tools/auth/OWNERS @@ -1,3 +1,5 @@ +# See the OWNERS docs at https://go.k8s.io/owners + approvers: - sig-auth-authenticators-approvers reviewers: diff --git a/vendor/k8s.io/client-go/tools/cache/OWNERS b/vendor/k8s.io/client-go/tools/cache/OWNERS index 727377f9df..a8cd4b432b 100644 --- a/vendor/k8s.io/client-go/tools/cache/OWNERS +++ b/vendor/k8s.io/client-go/tools/cache/OWNERS @@ -1,3 +1,5 @@ +# See the OWNERS docs at https://go.k8s.io/owners + approvers: - thockin - lavalamp @@ -22,7 +24,6 @@ reviewers: - erictune - davidopp - pmorie -- kargakis - janetkuo - justinsb - eparis diff --git a/vendor/k8s.io/client-go/tools/cache/controller.go b/vendor/k8s.io/client-go/tools/cache/controller.go index 028c75e8e1..b5d392520b 100644 --- a/vendor/k8s.io/client-go/tools/cache/controller.go +++ b/vendor/k8s.io/client-go/tools/cache/controller.go @@ -28,9 +28,9 @@ import ( // Config contains all the settings for a Controller. type Config struct { - // The queue for your objects; either a FIFO or - // a DeltaFIFO. Your Process() function should accept - // the output of this Queue's Pop() method. + // The queue for your objects - has to be a DeltaFIFO due to + // assumptions in the implementation. Your Process() function + // should accept the output of this Queue's Pop() method. Queue // Something that can list and watch your objects. @@ -285,45 +285,7 @@ func NewInformer( // This will hold the client state, as we know it. clientState := NewStore(DeletionHandlingMetaNamespaceKeyFunc) - // This will hold incoming changes. Note how we pass clientState in as a - // KeyLister, that way resync operations will result in the correct set - // of update/delete deltas. - fifo := NewDeltaFIFO(MetaNamespaceKeyFunc, clientState) - - cfg := &Config{ - Queue: fifo, - ListerWatcher: lw, - ObjectType: objType, - FullResyncPeriod: resyncPeriod, - RetryOnError: false, - - Process: func(obj interface{}) error { - // from oldest to newest - for _, d := range obj.(Deltas) { - switch d.Type { - case Sync, Added, Updated: - if old, exists, err := clientState.Get(d.Object); err == nil && exists { - if err := clientState.Update(d.Object); err != nil { - return err - } - h.OnUpdate(old, d.Object) - } else { - if err := clientState.Add(d.Object); err != nil { - return err - } - h.OnAdd(d.Object) - } - case Deleted: - if err := clientState.Delete(d.Object); err != nil { - return err - } - h.OnDelete(d.Object) - } - } - return nil - }, - } - return clientState, New(cfg) + return clientState, newInformer(lw, objType, resyncPeriod, h, clientState) } // NewIndexerInformer returns a Indexer and a controller for populating the index @@ -352,6 +314,30 @@ func NewIndexerInformer( // This will hold the client state, as we know it. clientState := NewIndexer(DeletionHandlingMetaNamespaceKeyFunc, indexers) + return clientState, newInformer(lw, objType, resyncPeriod, h, clientState) +} + +// newInformer returns a controller for populating the store while also +// providing event notifications. +// +// Parameters +// * lw is list and watch functions for the source of the resource you want to +// be informed of. +// * objType is an object of the type that you expect to receive. +// * resyncPeriod: if non-zero, will re-list this often (you will get OnUpdate +// calls, even if nothing changed). Otherwise, re-list will be delayed as +// long as possible (until the upstream source closes the watch or times out, +// or you stop the controller). +// * h is the object you want notifications sent to. +// * clientState is the store you want to populate +// +func newInformer( + lw ListerWatcher, + objType runtime.Object, + resyncPeriod time.Duration, + h ResourceEventHandler, + clientState Store, +) Controller { // This will hold incoming changes. Note how we pass clientState in as a // KeyLister, that way resync operations will result in the correct set // of update/delete deltas. @@ -390,5 +376,5 @@ func NewIndexerInformer( return nil }, } - return clientState, New(cfg) + return New(cfg) } diff --git a/vendor/k8s.io/client-go/tools/cache/delta_fifo.go b/vendor/k8s.io/client-go/tools/cache/delta_fifo.go index f818a293a6..f24eec2541 100644 --- a/vendor/k8s.io/client-go/tools/cache/delta_fifo.go +++ b/vendor/k8s.io/client-go/tools/cache/delta_fifo.go @@ -466,6 +466,7 @@ func (f *DeltaFIFO) Replace(list []interface{}, resourceVersion string) error { if f.knownObjects == nil { // Do deletion detection against our own list. + queuedDeletions := 0 for k, oldItem := range f.items { if keys.Has(k) { continue @@ -474,6 +475,7 @@ func (f *DeltaFIFO) Replace(list []interface{}, resourceVersion string) error { if n := oldItem.Newest(); n != nil { deletedObj = n.Object } + queuedDeletions++ if err := f.queueActionLocked(Deleted, DeletedFinalStateUnknown{k, deletedObj}); err != nil { return err } @@ -481,7 +483,9 @@ func (f *DeltaFIFO) Replace(list []interface{}, resourceVersion string) error { if !f.populated { f.populated = true - f.initialPopulationCount = len(list) + // While there shouldn't be any queued deletions in the initial + // population of the queue, it's better to be on the safe side. + f.initialPopulationCount = len(list) + queuedDeletions } return nil diff --git a/vendor/k8s.io/client-go/tools/cache/expiration_cache.go b/vendor/k8s.io/client-go/tools/cache/expiration_cache.go index b38fe70b95..68d41c8ece 100644 --- a/vendor/k8s.io/client-go/tools/cache/expiration_cache.go +++ b/vendor/k8s.io/client-go/tools/cache/expiration_cache.go @@ -144,13 +144,13 @@ func (c *ExpirationCache) ListKeys() []string { // Add timestamps an item and inserts it into the cache, overwriting entries // that might exist under the same key. func (c *ExpirationCache) Add(obj interface{}) error { - c.expirationLock.Lock() - defer c.expirationLock.Unlock() - key, err := c.keyFunc(obj) if err != nil { return KeyError{obj, err} } + c.expirationLock.Lock() + defer c.expirationLock.Unlock() + c.cacheStorage.Add(key, ×tampedEntry{obj, c.clock.Now()}) return nil } @@ -163,12 +163,12 @@ func (c *ExpirationCache) Update(obj interface{}) error { // Delete removes an item from the cache. func (c *ExpirationCache) Delete(obj interface{}) error { - c.expirationLock.Lock() - defer c.expirationLock.Unlock() key, err := c.keyFunc(obj) if err != nil { return KeyError{obj, err} } + c.expirationLock.Lock() + defer c.expirationLock.Unlock() c.cacheStorage.Delete(key) return nil } @@ -177,8 +177,6 @@ func (c *ExpirationCache) Delete(obj interface{}) error { // before attempting the replace operation. The replace operation will // delete the contents of the ExpirationCache `c`. func (c *ExpirationCache) Replace(list []interface{}, resourceVersion string) error { - c.expirationLock.Lock() - defer c.expirationLock.Unlock() items := make(map[string]interface{}, len(list)) ts := c.clock.Now() for _, item := range list { @@ -188,6 +186,8 @@ func (c *ExpirationCache) Replace(list []interface{}, resourceVersion string) er } items[key] = ×tampedEntry{item, ts} } + c.expirationLock.Lock() + defer c.expirationLock.Unlock() c.cacheStorage.Replace(items, resourceVersion) return nil } diff --git a/vendor/k8s.io/client-go/tools/cache/fake_custom_store.go b/vendor/k8s.io/client-go/tools/cache/fake_custom_store.go index 8d71c24749..b59e2eb272 100644 --- a/vendor/k8s.io/client-go/tools/cache/fake_custom_store.go +++ b/vendor/k8s.io/client-go/tools/cache/fake_custom_store.go @@ -40,7 +40,7 @@ func (f *FakeCustomStore) Add(obj interface{}) error { // Update calls the custom Update function if defined func (f *FakeCustomStore) Update(obj interface{}) error { if f.UpdateFunc != nil { - return f.Update(obj) + return f.UpdateFunc(obj) } return nil } diff --git a/vendor/k8s.io/client-go/tools/cache/listers.go b/vendor/k8s.io/client-go/tools/cache/listers.go index ce377329c7..311ff8c493 100644 --- a/vendor/k8s.io/client-go/tools/cache/listers.go +++ b/vendor/k8s.io/client-go/tools/cache/listers.go @@ -31,7 +31,14 @@ import ( type AppendFunc func(interface{}) func ListAll(store Store, selector labels.Selector, appendFn AppendFunc) error { + selectAll := selector.Empty() for _, m := range store.List() { + if selectAll { + // Avoid computing labels of the objects to speed up common flows + // of listing all objects. + appendFn(m) + continue + } metadata, err := meta.Accessor(m) if err != nil { return err @@ -44,8 +51,15 @@ func ListAll(store Store, selector labels.Selector, appendFn AppendFunc) error { } func ListAllByNamespace(indexer Indexer, namespace string, selector labels.Selector, appendFn AppendFunc) error { + selectAll := selector.Empty() if namespace == metav1.NamespaceAll { for _, m := range indexer.List() { + if selectAll { + // Avoid computing labels of the objects to speed up common flows + // of listing all objects. + appendFn(m) + continue + } metadata, err := meta.Accessor(m) if err != nil { return err @@ -74,6 +88,12 @@ func ListAllByNamespace(indexer Indexer, namespace string, selector labels.Selec return nil } for _, m := range items { + if selectAll { + // Avoid computing labels of the objects to speed up common flows + // of listing all objects. + appendFn(m) + continue + } metadata, err := meta.Accessor(m) if err != nil { return err diff --git a/vendor/k8s.io/client-go/tools/cache/listwatch.go b/vendor/k8s.io/client-go/tools/cache/listwatch.go index f86791650e..8227b73b69 100644 --- a/vendor/k8s.io/client-go/tools/cache/listwatch.go +++ b/vendor/k8s.io/client-go/tools/cache/listwatch.go @@ -27,15 +27,25 @@ import ( "k8s.io/client-go/tools/pager" ) -// ListerWatcher is any object that knows how to perform an initial list and start a watch on a resource. -type ListerWatcher interface { +// Lister is any object that knows how to perform an initial list. +type Lister interface { // List should return a list type object; the Items field will be extracted, and the // ResourceVersion field will be used to start the watch in the right place. List(options metav1.ListOptions) (runtime.Object, error) +} + +// Watcher is any object that knows how to start a watch on a resource. +type Watcher interface { // Watch should begin a watch at the specified version. Watch(options metav1.ListOptions) (watch.Interface, error) } +// ListerWatcher is any object that knows how to perform an initial list and start a watch on a resource. +type ListerWatcher interface { + Lister + Watcher +} + // ListFunc knows how to list resources type ListFunc func(options metav1.ListOptions) (runtime.Object, error) diff --git a/vendor/k8s.io/client-go/tools/cache/reflector.go b/vendor/k8s.io/client-go/tools/cache/reflector.go index 12e2a33422..c43b7fc52d 100644 --- a/vendor/k8s.io/client-go/tools/cache/reflector.go +++ b/vendor/k8s.io/client-go/tools/cache/reflector.go @@ -24,10 +24,8 @@ import ( "net" "net/url" "reflect" - "strconv" "strings" "sync" - "sync/atomic" "syscall" "time" @@ -41,6 +39,7 @@ import ( "k8s.io/apimachinery/pkg/util/wait" "k8s.io/apimachinery/pkg/watch" "k8s.io/klog" + "k8s.io/utils/trace" ) // Reflector watches a specified resource and causes all changes to be reflected in the given store. @@ -95,17 +94,10 @@ func NewReflector(lw ListerWatcher, expectedType interface{}, store Store, resyn return NewNamedReflector(naming.GetNameFromCallsite(internalPackages...), lw, expectedType, store, resyncPeriod) } -// reflectorDisambiguator is used to disambiguate started reflectors. -// initialized to an unstable value to ensure meaning isn't attributed to the suffix. -var reflectorDisambiguator = int64(time.Now().UnixNano() % 12345) - // NewNamedReflector same as NewReflector, but with a specified name for logging func NewNamedReflector(name string, lw ListerWatcher, expectedType interface{}, store Store, resyncPeriod time.Duration) *Reflector { - reflectorSuffix := atomic.AddInt64(&reflectorDisambiguator, 1) r := &Reflector{ - name: name, - // we need this to be unique per process (some names are still the same) but obvious who it belongs to - metrics: newReflectorMetrics(makeValidPrometheusMetricLabel(fmt.Sprintf("reflector_"+name+"_%d", reflectorSuffix))), + name: name, listerWatcher: lw, store: store, expectedType: reflect.TypeOf(expectedType), @@ -173,27 +165,55 @@ func (r *Reflector) ListAndWatch(stopCh <-chan struct{}) error { // to be served from cache and potentially be delayed relative to // etcd contents. Reflector framework will catch up via Watch() eventually. options := metav1.ListOptions{ResourceVersion: "0"} - r.metrics.numberOfLists.Inc() - start := r.clock.Now() - list, err := r.listerWatcher.List(options) - if err != nil { - return fmt.Errorf("%s: Failed to list %v: %v", r.name, r.expectedType, err) - } - r.metrics.listDuration.Observe(time.Since(start).Seconds()) - listMetaInterface, err := meta.ListAccessor(list) - if err != nil { - return fmt.Errorf("%s: Unable to understand list result %#v: %v", r.name, list, err) - } - resourceVersion = listMetaInterface.GetResourceVersion() - items, err := meta.ExtractList(list) - if err != nil { - return fmt.Errorf("%s: Unable to understand list result %#v (%v)", r.name, list, err) - } - r.metrics.numberOfItemsInList.Observe(float64(len(items))) - if err := r.syncWith(items, resourceVersion); err != nil { - return fmt.Errorf("%s: Unable to sync list result: %v", r.name, err) + + if err := func() error { + initTrace := trace.New("Reflector " + r.name + " ListAndWatch") + defer initTrace.LogIfLong(10 * time.Second) + var list runtime.Object + var err error + listCh := make(chan struct{}, 1) + panicCh := make(chan interface{}, 1) + go func() { + defer func() { + if r := recover(); r != nil { + panicCh <- r + } + }() + list, err = r.listerWatcher.List(options) + close(listCh) + }() + select { + case <-stopCh: + return nil + case r := <-panicCh: + panic(r) + case <-listCh: + } + if err != nil { + return fmt.Errorf("%s: Failed to list %v: %v", r.name, r.expectedType, err) + } + initTrace.Step("Objects listed") + listMetaInterface, err := meta.ListAccessor(list) + if err != nil { + return fmt.Errorf("%s: Unable to understand list result %#v: %v", r.name, list, err) + } + resourceVersion = listMetaInterface.GetResourceVersion() + initTrace.Step("Resource version extracted") + items, err := meta.ExtractList(list) + if err != nil { + return fmt.Errorf("%s: Unable to understand list result %#v (%v)", r.name, list, err) + } + initTrace.Step("Objects extracted") + if err := r.syncWith(items, resourceVersion); err != nil { + return fmt.Errorf("%s: Unable to sync list result: %v", r.name, err) + } + initTrace.Step("SyncWith done") + r.setLastSyncResourceVersion(resourceVersion) + initTrace.Step("Resource version updated") + return nil + }(); err != nil { + return err } - r.setLastSyncResourceVersion(resourceVersion) resyncerrc := make(chan error, 1) cancelCh := make(chan struct{}) @@ -239,7 +259,6 @@ func (r *Reflector) ListAndWatch(stopCh <-chan struct{}) error { TimeoutSeconds: &timeoutSeconds, } - r.metrics.numberOfWatches.Inc() w, err := r.listerWatcher.Watch(options) if err != nil { switch err { @@ -291,11 +310,6 @@ func (r *Reflector) watchHandler(w watch.Interface, resourceVersion *string, err // Stopping the watcher should be idempotent and if we return from this function there's no way // we're coming back in with the same watch interface. defer w.Stop() - // update metrics - defer func() { - r.metrics.numberOfItemsInWatch.Observe(float64(eventCount)) - r.metrics.watchDuration.Observe(time.Since(start).Seconds()) - }() loop: for { @@ -351,7 +365,6 @@ loop: watchDuration := r.clock.Now().Sub(start) if watchDuration < 1*time.Second && eventCount == 0 { - r.metrics.numberOfShortWatches.Inc() return fmt.Errorf("very short watch: %s: Unexpected watch close - watch lasted less than a second and no items received", r.name) } klog.V(4).Infof("%s: Watch close - %v total %v items received", r.name, r.expectedType, eventCount) @@ -370,9 +383,4 @@ func (r *Reflector) setLastSyncResourceVersion(v string) { r.lastSyncResourceVersionMutex.Lock() defer r.lastSyncResourceVersionMutex.Unlock() r.lastSyncResourceVersion = v - - rv, err := strconv.Atoi(v) - if err == nil { - r.metrics.lastResourceVersion.Set(float64(rv)) - } } diff --git a/vendor/k8s.io/client-go/tools/cache/shared_informer.go b/vendor/k8s.io/client-go/tools/cache/shared_informer.go index e91fc9e955..b2f3dba07c 100644 --- a/vendor/k8s.io/client-go/tools/cache/shared_informer.go +++ b/vendor/k8s.io/client-go/tools/cache/shared_informer.go @@ -25,8 +25,8 @@ import ( "k8s.io/apimachinery/pkg/util/clock" utilruntime "k8s.io/apimachinery/pkg/util/runtime" "k8s.io/apimachinery/pkg/util/wait" - "k8s.io/client-go/util/buffer" "k8s.io/client-go/util/retry" + "k8s.io/utils/buffer" "k8s.io/klog" ) diff --git a/vendor/k8s.io/client-go/tools/cache/thread_safe_store.go b/vendor/k8s.io/client-go/tools/cache/thread_safe_store.go index 1c201efb62..5cbdd17ed4 100644 --- a/vendor/k8s.io/client-go/tools/cache/thread_safe_store.go +++ b/vendor/k8s.io/client-go/tools/cache/thread_safe_store.go @@ -148,12 +148,19 @@ func (c *threadSafeMap) Index(indexName string, obj interface{}) ([]interface{}, } index := c.indices[indexName] - // need to de-dupe the return list. Since multiple keys are allowed, this can happen. - returnKeySet := sets.String{} - for _, indexKey := range indexKeys { - set := index[indexKey] - for _, key := range set.UnsortedList() { - returnKeySet.Insert(key) + var returnKeySet sets.String + if len(indexKeys) == 1 { + // In majority of cases, there is exactly one value matching. + // Optimize the most common path - deduping is not needed here. + returnKeySet = index[indexKeys[0]] + } else { + // Need to de-dupe the return list. + // Since multiple keys are allowed, this can happen. + returnKeySet = sets.String{} + for _, indexKey := range indexKeys { + for key := range index[indexKey] { + returnKeySet.Insert(key) + } } } diff --git a/vendor/k8s.io/client-go/tools/clientcmd/api/types.go b/vendor/k8s.io/client-go/tools/clientcmd/api/types.go index 1391df7021..990a440c66 100644 --- a/vendor/k8s.io/client-go/tools/clientcmd/api/types.go +++ b/vendor/k8s.io/client-go/tools/clientcmd/api/types.go @@ -17,6 +17,8 @@ limitations under the License. package api import ( + "fmt" + "k8s.io/apimachinery/pkg/runtime" ) @@ -150,6 +152,25 @@ type AuthProviderConfig struct { Config map[string]string `json:"config,omitempty"` } +var _ fmt.Stringer = new(AuthProviderConfig) +var _ fmt.GoStringer = new(AuthProviderConfig) + +// GoString implements fmt.GoStringer and sanitizes sensitive fields of +// AuthProviderConfig to prevent accidental leaking via logs. +func (c AuthProviderConfig) GoString() string { + return c.String() +} + +// String implements fmt.Stringer and sanitizes sensitive fields of +// AuthProviderConfig to prevent accidental leaking via logs. +func (c AuthProviderConfig) String() string { + cfg := "" + if c.Config != nil { + cfg = "--- REDACTED ---" + } + return fmt.Sprintf("api.AuthProviderConfig{Name: %q, Config: map[string]string{%s}}", c.Name, cfg) +} + // ExecConfig specifies a command to provide client credentials. The command is exec'd // and outputs structured stdout holding credentials. // @@ -172,6 +193,29 @@ type ExecConfig struct { APIVersion string `json:"apiVersion,omitempty"` } +var _ fmt.Stringer = new(ExecConfig) +var _ fmt.GoStringer = new(ExecConfig) + +// GoString implements fmt.GoStringer and sanitizes sensitive fields of +// ExecConfig to prevent accidental leaking via logs. +func (c ExecConfig) GoString() string { + return c.String() +} + +// String implements fmt.Stringer and sanitizes sensitive fields of ExecConfig +// to prevent accidental leaking via logs. +func (c ExecConfig) String() string { + var args []string + if len(c.Args) > 0 { + args = []string{"--- REDACTED ---"} + } + env := "[]ExecEnvVar(nil)" + if len(c.Env) > 0 { + env = "[]ExecEnvVar{--- REDACTED ---}" + } + return fmt.Sprintf("api.AuthProviderConfig{Command: %q, Args: %#v, Env: %s, APIVersion: %q}", c.Command, args, env, c.APIVersion) +} + // ExecEnvVar is used for setting environment variables when executing an exec-based // credential plugin. type ExecEnvVar struct { diff --git a/vendor/k8s.io/client-go/tools/clientcmd/client_config.go b/vendor/k8s.io/client-go/tools/clientcmd/client_config.go index dea229c918..a7b8c1c6e4 100644 --- a/vendor/k8s.io/client-go/tools/clientcmd/client_config.go +++ b/vendor/k8s.io/client-go/tools/clientcmd/client_config.go @@ -229,11 +229,12 @@ func (config *DirectClientConfig) getUserIdentificationPartialConfig(configAuthI if len(configAuthInfo.Token) > 0 { mergedConfig.BearerToken = configAuthInfo.Token } else if len(configAuthInfo.TokenFile) > 0 { - ts := restclient.NewCachedFileTokenSource(configAuthInfo.TokenFile) - if _, err := ts.Token(); err != nil { + tokenBytes, err := ioutil.ReadFile(configAuthInfo.TokenFile) + if err != nil { return nil, err } - mergedConfig.WrapTransport = restclient.TokenSourceWrapTransport(ts) + mergedConfig.BearerToken = string(tokenBytes) + mergedConfig.BearerTokenFile = configAuthInfo.TokenFile } if len(configAuthInfo.Impersonate) > 0 { mergedConfig.Impersonate = restclient.ImpersonationConfig{ diff --git a/vendor/k8s.io/client-go/tools/clientcmd/merged_client_builder.go b/vendor/k8s.io/client-go/tools/clientcmd/merged_client_builder.go index 76380db82a..9cc112a280 100644 --- a/vendor/k8s.io/client-go/tools/clientcmd/merged_client_builder.go +++ b/vendor/k8s.io/client-go/tools/clientcmd/merged_client_builder.go @@ -150,7 +150,12 @@ func (config *DeferredLoadingClientConfig) Namespace() (string, bool, error) { // if we got a default namespace, determine whether it was explicit or implicit if raw, err := mergedKubeConfig.RawConfig(); err == nil { - if context := raw.Contexts[raw.CurrentContext]; context != nil && len(context.Namespace) > 0 { + // determine the current context + currentContext := raw.CurrentContext + if config.overrides != nil && len(config.overrides.CurrentContext) > 0 { + currentContext = config.overrides.CurrentContext + } + if context := raw.Contexts[currentContext]; context != nil && len(context.Namespace) > 0 { return ns, false, nil } } diff --git a/vendor/k8s.io/client-go/tools/leaderelection/OWNERS b/vendor/k8s.io/client-go/tools/leaderelection/OWNERS index d9516f8a81..03ec598b32 100644 --- a/vendor/k8s.io/client-go/tools/leaderelection/OWNERS +++ b/vendor/k8s.io/client-go/tools/leaderelection/OWNERS @@ -1,3 +1,5 @@ +# See the OWNERS docs at https://go.k8s.io/owners + approvers: - mikedanese - timothysc diff --git a/vendor/k8s.io/client-go/tools/leaderelection/leaderelection.go b/vendor/k8s.io/client-go/tools/leaderelection/leaderelection.go index 2096a5996b..02bdebd1d3 100644 --- a/vendor/k8s.io/client-go/tools/leaderelection/leaderelection.go +++ b/vendor/k8s.io/client-go/tools/leaderelection/leaderelection.go @@ -89,10 +89,13 @@ func NewLeaderElector(lec LeaderElectionConfig) (*LeaderElector, error) { if lec.Lock == nil { return nil, fmt.Errorf("Lock must not be nil.") } - return &LeaderElector{ - config: lec, - clock: clock.RealClock{}, - }, nil + le := LeaderElector{ + config: lec, + clock: clock.RealClock{}, + metrics: globalMetricsFactory.newLeaderMetrics(), + } + le.metrics.leaderOff(le.config.Name) + return &le, nil } type LeaderElectionConfig struct { @@ -118,6 +121,13 @@ type LeaderElectionConfig struct { // WatchDog may be null if its not needed/configured. WatchDog *HealthzAdaptor + // ReleaseOnCancel should be set true if the lock should be released + // when the run context is cancelled. If you set this to true, you must + // ensure all code guarded by this lease has successfully completed + // prior to cancelling the context, or you may have two processes + // simultaneously acting on the critical path. + ReleaseOnCancel bool + // Name is the name of the resource lock for debugging Name string } @@ -152,6 +162,8 @@ type LeaderElector struct { // clock is wrapper around time to allow for less flaky testing clock clock.Clock + metrics leaderMetricsAdapter + // name is the name of the resource lock for debugging name string } @@ -211,6 +223,7 @@ func (le *LeaderElector) acquire(ctx context.Context) bool { return } le.config.Lock.RecordEvent("became leader") + le.metrics.leaderOn(le.config.Name) klog.Infof("successfully acquired lease %v", desc) cancel() }, le.config.RetryPeriod, JitterFactor, true, ctx.Done()) @@ -246,9 +259,32 @@ func (le *LeaderElector) renew(ctx context.Context) { return } le.config.Lock.RecordEvent("stopped leading") + le.metrics.leaderOff(le.config.Name) klog.Infof("failed to renew lease %v: %v", desc, err) cancel() }, le.config.RetryPeriod, ctx.Done()) + + // if we hold the lease, give it up + if le.config.ReleaseOnCancel { + le.release() + } +} + +// release attempts to release the leader lease if we have acquired it. +func (le *LeaderElector) release() bool { + if !le.IsLeader() { + return true + } + leaderElectionRecord := rl.LeaderElectionRecord{ + LeaderTransitions: le.observedRecord.LeaderTransitions, + } + if err := le.config.Lock.Update(leaderElectionRecord); err != nil { + klog.Errorf("Failed to release lock: %v", err) + return false + } + le.observedRecord = leaderElectionRecord + le.observedTime = le.clock.Now() + return true } // tryAcquireOrRenew tries to acquire a leader lease if it is not already acquired, @@ -284,7 +320,8 @@ func (le *LeaderElector) tryAcquireOrRenew() bool { le.observedRecord = *oldLeaderElectionRecord le.observedTime = le.clock.Now() } - if le.observedTime.Add(le.config.LeaseDuration).After(now.Time) && + if len(oldLeaderElectionRecord.HolderIdentity) > 0 && + le.observedTime.Add(le.config.LeaseDuration).After(now.Time) && !le.IsLeader() { klog.V(4).Infof("lock is held by %v and has not yet expired", oldLeaderElectionRecord.HolderIdentity) return false diff --git a/vendor/k8s.io/client-go/tools/leaderelection/metrics.go b/vendor/k8s.io/client-go/tools/leaderelection/metrics.go new file mode 100644 index 0000000000..65917bf88e --- /dev/null +++ b/vendor/k8s.io/client-go/tools/leaderelection/metrics.go @@ -0,0 +1,109 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package leaderelection + +import ( + "sync" +) + +// This file provides abstractions for setting the provider (e.g., prometheus) +// of metrics. + +type leaderMetricsAdapter interface { + leaderOn(name string) + leaderOff(name string) +} + +// GaugeMetric represents a single numerical value that can arbitrarily go up +// and down. +type SwitchMetric interface { + On(name string) + Off(name string) +} + +type noopMetric struct{} + +func (noopMetric) On(name string) {} +func (noopMetric) Off(name string) {} + +// defaultLeaderMetrics expects the caller to lock before setting any metrics. +type defaultLeaderMetrics struct { + // leader's value indicates if the current process is the owner of name lease + leader SwitchMetric +} + +func (m *defaultLeaderMetrics) leaderOn(name string) { + if m == nil { + return + } + m.leader.On(name) +} + +func (m *defaultLeaderMetrics) leaderOff(name string) { + if m == nil { + return + } + m.leader.Off(name) +} + +type noMetrics struct{} + +func (noMetrics) leaderOn(name string) {} +func (noMetrics) leaderOff(name string) {} + +// MetricsProvider generates various metrics used by the leader election. +type MetricsProvider interface { + NewLeaderMetric() SwitchMetric +} + +type noopMetricsProvider struct{} + +func (_ noopMetricsProvider) NewLeaderMetric() SwitchMetric { + return noopMetric{} +} + +var globalMetricsFactory = leaderMetricsFactory{ + metricsProvider: noopMetricsProvider{}, +} + +type leaderMetricsFactory struct { + metricsProvider MetricsProvider + + onlyOnce sync.Once +} + +func (f *leaderMetricsFactory) setProvider(mp MetricsProvider) { + f.onlyOnce.Do(func() { + f.metricsProvider = mp + }) +} + +func (f *leaderMetricsFactory) newLeaderMetrics() leaderMetricsAdapter { + mp := f.metricsProvider + if mp == (noopMetricsProvider{}) { + return noMetrics{} + } + return &defaultLeaderMetrics{ + leader: mp.NewLeaderMetric(), + } +} + +// SetProvider sets the metrics provider for all subsequently created work +// queues. Only the first call has an effect. +func SetProvider(metricsProvider MetricsProvider) { + globalMetricsFactory.setProvider(metricsProvider) +} diff --git a/vendor/k8s.io/client-go/tools/leaderelection/resourcelock/configmaplock.go b/vendor/k8s.io/client-go/tools/leaderelection/resourcelock/configmaplock.go index c12daad022..785356894f 100644 --- a/vendor/k8s.io/client-go/tools/leaderelection/resourcelock/configmaplock.go +++ b/vendor/k8s.io/client-go/tools/leaderelection/resourcelock/configmaplock.go @@ -93,6 +93,9 @@ func (cml *ConfigMapLock) Update(ler LeaderElectionRecord) error { // RecordEvent in leader election while adding meta-data func (cml *ConfigMapLock) RecordEvent(s string) { + if cml.LockConfig.EventRecorder == nil { + return + } events := fmt.Sprintf("%v %v", cml.LockConfig.Identity, s) cml.LockConfig.EventRecorder.Eventf(&v1.ConfigMap{ObjectMeta: cml.cm.ObjectMeta}, v1.EventTypeNormal, "LeaderElection", events) } diff --git a/vendor/k8s.io/client-go/tools/leaderelection/resourcelock/endpointslock.go b/vendor/k8s.io/client-go/tools/leaderelection/resourcelock/endpointslock.go index 6f7dcfb0cc..bfe5e8b1bb 100644 --- a/vendor/k8s.io/client-go/tools/leaderelection/resourcelock/endpointslock.go +++ b/vendor/k8s.io/client-go/tools/leaderelection/resourcelock/endpointslock.go @@ -88,6 +88,9 @@ func (el *EndpointsLock) Update(ler LeaderElectionRecord) error { // RecordEvent in leader election while adding meta-data func (el *EndpointsLock) RecordEvent(s string) { + if el.LockConfig.EventRecorder == nil { + return + } events := fmt.Sprintf("%v %v", el.LockConfig.Identity, s) el.LockConfig.EventRecorder.Eventf(&v1.Endpoints{ObjectMeta: el.e.ObjectMeta}, v1.EventTypeNormal, "LeaderElection", events) } diff --git a/vendor/k8s.io/client-go/tools/leaderelection/resourcelock/interface.go b/vendor/k8s.io/client-go/tools/leaderelection/resourcelock/interface.go index 676fd1d7db..050d41a25f 100644 --- a/vendor/k8s.io/client-go/tools/leaderelection/resourcelock/interface.go +++ b/vendor/k8s.io/client-go/tools/leaderelection/resourcelock/interface.go @@ -20,14 +20,16 @@ import ( "fmt" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + coordinationv1 "k8s.io/client-go/kubernetes/typed/coordination/v1" corev1 "k8s.io/client-go/kubernetes/typed/core/v1" - "k8s.io/client-go/tools/record" ) const ( LeaderElectionRecordAnnotationKey = "control-plane.alpha.kubernetes.io/leader" EndpointsResourceLock = "endpoints" ConfigMapsResourceLock = "configmaps" + LeasesResourceLock = "leases" ) // LeaderElectionRecord is the record that is stored in the leader election annotation. @@ -35,6 +37,11 @@ const ( // with a random string (e.g. UUID) with only slight modification of this code. // TODO(mikedanese): this should potentially be versioned type LeaderElectionRecord struct { + // HolderIdentity is the ID that owns the lease. If empty, no one owns this lease and + // all callers may acquire. Versions of this library prior to Kubernetes 1.14 will not + // attempt to acquire leases with empty identities and will wait for the full lease + // interval to expire before attempting to reacquire. This value is set to empty when + // a client voluntarily steps down. HolderIdentity string `json:"holderIdentity"` LeaseDurationSeconds int `json:"leaseDurationSeconds"` AcquireTime metav1.Time `json:"acquireTime"` @@ -42,11 +49,19 @@ type LeaderElectionRecord struct { LeaderTransitions int `json:"leaderTransitions"` } +// EventRecorder records a change in the ResourceLock. +type EventRecorder interface { + Eventf(obj runtime.Object, eventType, reason, message string, args ...interface{}) +} + // ResourceLockConfig common data that exists across different // resource locks type ResourceLockConfig struct { - Identity string - EventRecorder record.EventRecorder + // Identity is the unique string identifying a lease holder across + // all participants in an election. + Identity string + // EventRecorder is optional. + EventRecorder EventRecorder } // Interface offers a common interface for locking on arbitrary @@ -76,7 +91,7 @@ type Interface interface { } // Manufacture will create a lock of a given type according to the input parameters -func New(lockType string, ns string, name string, client corev1.CoreV1Interface, rlc ResourceLockConfig) (Interface, error) { +func New(lockType string, ns string, name string, coreClient corev1.CoreV1Interface, coordinationClient coordinationv1.CoordinationV1Interface, rlc ResourceLockConfig) (Interface, error) { switch lockType { case EndpointsResourceLock: return &EndpointsLock{ @@ -84,7 +99,7 @@ func New(lockType string, ns string, name string, client corev1.CoreV1Interface, Namespace: ns, Name: name, }, - Client: client, + Client: coreClient, LockConfig: rlc, }, nil case ConfigMapsResourceLock: @@ -93,7 +108,16 @@ func New(lockType string, ns string, name string, client corev1.CoreV1Interface, Namespace: ns, Name: name, }, - Client: client, + Client: coreClient, + LockConfig: rlc, + }, nil + case LeasesResourceLock: + return &LeaseLock{ + LeaseMeta: metav1.ObjectMeta{ + Namespace: ns, + Name: name, + }, + Client: coordinationClient, LockConfig: rlc, }, nil default: diff --git a/vendor/k8s.io/client-go/tools/leaderelection/resourcelock/leaselock.go b/vendor/k8s.io/client-go/tools/leaderelection/resourcelock/leaselock.go new file mode 100644 index 0000000000..285f944054 --- /dev/null +++ b/vendor/k8s.io/client-go/tools/leaderelection/resourcelock/leaselock.go @@ -0,0 +1,124 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package resourcelock + +import ( + "errors" + "fmt" + + coordinationv1 "k8s.io/api/coordination/v1" + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + coordinationv1client "k8s.io/client-go/kubernetes/typed/coordination/v1" +) + +type LeaseLock struct { + // LeaseMeta should contain a Name and a Namespace of a + // LeaseMeta object that the LeaderElector will attempt to lead. + LeaseMeta metav1.ObjectMeta + Client coordinationv1client.LeasesGetter + LockConfig ResourceLockConfig + lease *coordinationv1.Lease +} + +// Get returns the election record from a Lease spec +func (ll *LeaseLock) Get() (*LeaderElectionRecord, error) { + var err error + ll.lease, err = ll.Client.Leases(ll.LeaseMeta.Namespace).Get(ll.LeaseMeta.Name, metav1.GetOptions{}) + if err != nil { + return nil, err + } + return LeaseSpecToLeaderElectionRecord(&ll.lease.Spec), nil +} + +// Create attempts to create a Lease +func (ll *LeaseLock) Create(ler LeaderElectionRecord) error { + var err error + ll.lease, err = ll.Client.Leases(ll.LeaseMeta.Namespace).Create(&coordinationv1.Lease{ + ObjectMeta: metav1.ObjectMeta{ + Name: ll.LeaseMeta.Name, + Namespace: ll.LeaseMeta.Namespace, + }, + Spec: LeaderElectionRecordToLeaseSpec(&ler), + }) + return err +} + +// Update will update an existing Lease spec. +func (ll *LeaseLock) Update(ler LeaderElectionRecord) error { + if ll.lease == nil { + return errors.New("lease not initialized, call get or create first") + } + ll.lease.Spec = LeaderElectionRecordToLeaseSpec(&ler) + var err error + ll.lease, err = ll.Client.Leases(ll.LeaseMeta.Namespace).Update(ll.lease) + return err +} + +// RecordEvent in leader election while adding meta-data +func (ll *LeaseLock) RecordEvent(s string) { + if ll.LockConfig.EventRecorder == nil { + return + } + events := fmt.Sprintf("%v %v", ll.LockConfig.Identity, s) + ll.LockConfig.EventRecorder.Eventf(&coordinationv1.Lease{ObjectMeta: ll.lease.ObjectMeta}, corev1.EventTypeNormal, "LeaderElection", events) +} + +// Describe is used to convert details on current resource lock +// into a string +func (ll *LeaseLock) Describe() string { + return fmt.Sprintf("%v/%v", ll.LeaseMeta.Namespace, ll.LeaseMeta.Name) +} + +// returns the Identity of the lock +func (ll *LeaseLock) Identity() string { + return ll.LockConfig.Identity +} + +func LeaseSpecToLeaderElectionRecord(spec *coordinationv1.LeaseSpec) *LeaderElectionRecord { + holderIdentity := "" + if spec.HolderIdentity != nil { + holderIdentity = *spec.HolderIdentity + } + leaseDurationSeconds := 0 + if spec.LeaseDurationSeconds != nil { + leaseDurationSeconds = int(*spec.LeaseDurationSeconds) + } + leaseTransitions := 0 + if spec.LeaseTransitions != nil { + leaseTransitions = int(*spec.LeaseTransitions) + } + return &LeaderElectionRecord{ + HolderIdentity: holderIdentity, + LeaseDurationSeconds: leaseDurationSeconds, + AcquireTime: metav1.Time{spec.AcquireTime.Time}, + RenewTime: metav1.Time{spec.RenewTime.Time}, + LeaderTransitions: leaseTransitions, + } +} + +func LeaderElectionRecordToLeaseSpec(ler *LeaderElectionRecord) coordinationv1.LeaseSpec { + leaseDurationSeconds := int32(ler.LeaseDurationSeconds) + leaseTransitions := int32(ler.LeaderTransitions) + return coordinationv1.LeaseSpec{ + HolderIdentity: &ler.HolderIdentity, + LeaseDurationSeconds: &leaseDurationSeconds, + AcquireTime: &metav1.MicroTime{ler.AcquireTime.Time}, + RenewTime: &metav1.MicroTime{ler.RenewTime.Time}, + LeaseTransitions: &leaseTransitions, + } +} diff --git a/vendor/k8s.io/client-go/tools/metrics/OWNERS b/vendor/k8s.io/client-go/tools/metrics/OWNERS index ff51798071..f150be5369 100644 --- a/vendor/k8s.io/client-go/tools/metrics/OWNERS +++ b/vendor/k8s.io/client-go/tools/metrics/OWNERS @@ -1,3 +1,5 @@ +# See the OWNERS docs at https://go.k8s.io/owners + reviewers: - wojtek-t - eparis diff --git a/vendor/k8s.io/client-go/tools/record/OWNERS b/vendor/k8s.io/client-go/tools/record/OWNERS index 4dd54bbce9..6ce73bb5cd 100644 --- a/vendor/k8s.io/client-go/tools/record/OWNERS +++ b/vendor/k8s.io/client-go/tools/record/OWNERS @@ -1,3 +1,5 @@ +# See the OWNERS docs at https://go.k8s.io/owners + reviewers: - lavalamp - smarterclayton diff --git a/vendor/k8s.io/client-go/tools/record/event.go b/vendor/k8s.io/client-go/tools/record/event.go index 2ee69589c6..565e72802a 100644 --- a/vendor/k8s.io/client-go/tools/record/event.go +++ b/vendor/k8s.io/client-go/tools/record/event.go @@ -21,7 +21,7 @@ import ( "math/rand" "time" - "k8s.io/api/core/v1" + v1 "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/runtime" @@ -29,10 +29,8 @@ import ( utilruntime "k8s.io/apimachinery/pkg/util/runtime" "k8s.io/apimachinery/pkg/watch" restclient "k8s.io/client-go/rest" + "k8s.io/client-go/tools/record/util" ref "k8s.io/client-go/tools/reference" - - "net/http" - "k8s.io/klog" ) @@ -157,16 +155,6 @@ func recordToSink(sink EventSink, event *v1.Event, eventCorrelator *EventCorrela } } -func isKeyNotFoundError(err error) bool { - statusErr, _ := err.(*errors.StatusError) - - if statusErr != nil && statusErr.Status().Code == http.StatusNotFound { - return true - } - - return false -} - // recordEvent attempts to write event to a sink. It returns true if the event // was successfully recorded or discarded, false if it should be retried. // If updateExistingEvent is false, it creates a new event, otherwise it updates @@ -178,7 +166,7 @@ func recordEvent(sink EventSink, event *v1.Event, patch []byte, updateExistingEv newEvent, err = sink.Patch(event, patch) } // Update can fail because the event may have been removed and it no longer exists. - if !updateExistingEvent || (updateExistingEvent && isKeyNotFoundError(err)) { + if !updateExistingEvent || (updateExistingEvent && util.IsKeyNotFoundError(err)) { // Making sure that ResourceVersion is empty on creation event.ResourceVersion = "" newEvent, err = sink.Create(event) @@ -260,7 +248,7 @@ func (recorder *recorderImpl) generateEvent(object runtime.Object, annotations m return } - if !validateEventType(eventtype) { + if !util.ValidateEventType(eventtype) { klog.Errorf("Unsupported event type: '%v'", eventtype) return } @@ -275,14 +263,6 @@ func (recorder *recorderImpl) generateEvent(object runtime.Object, annotations m }() } -func validateEventType(eventtype string) bool { - switch eventtype { - case v1.EventTypeNormal, v1.EventTypeWarning: - return true - } - return false -} - func (recorder *recorderImpl) Event(object runtime.Object, eventtype, reason, message string) { recorder.generateEvent(object, nil, metav1.Now(), eventtype, reason, message) } diff --git a/vendor/k8s.io/client-go/tools/record/util/util.go b/vendor/k8s.io/client-go/tools/record/util/util.go new file mode 100644 index 0000000000..d1818a8d90 --- /dev/null +++ b/vendor/k8s.io/client-go/tools/record/util/util.go @@ -0,0 +1,44 @@ +/* +Copyright 2019 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package util + +import ( + "net/http" + + v1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/api/errors" +) + +// ValidateEventType checks that eventtype is an expected type of event +func ValidateEventType(eventtype string) bool { + switch eventtype { + case v1.EventTypeNormal, v1.EventTypeWarning: + return true + } + return false +} + +// IsKeyNotFoundError is utility function that checks if an error is not found error +func IsKeyNotFoundError(err error) bool { + statusErr, _ := err.(*errors.StatusError) + + if statusErr != nil && statusErr.Status().Code == http.StatusNotFound { + return true + } + + return false +} diff --git a/vendor/k8s.io/client-go/transport/OWNERS b/vendor/k8s.io/client-go/transport/OWNERS index bf0ba5b9f9..a52176903c 100644 --- a/vendor/k8s.io/client-go/transport/OWNERS +++ b/vendor/k8s.io/client-go/transport/OWNERS @@ -1,3 +1,5 @@ +# See the OWNERS docs at https://go.k8s.io/owners + reviewers: - smarterclayton - wojtek-t diff --git a/vendor/k8s.io/client-go/transport/config.go b/vendor/k8s.io/client-go/transport/config.go index 4081c23e7f..5de0a2cb10 100644 --- a/vendor/k8s.io/client-go/transport/config.go +++ b/vendor/k8s.io/client-go/transport/config.go @@ -39,6 +39,11 @@ type Config struct { // Bearer token for authentication BearerToken string + // Path to a file containing a BearerToken. + // If set, the contents are periodically read. + // The last successfully read value takes precedence over BearerToken. + BearerTokenFile string + // Impersonate is the config that this Config will impersonate using Impersonate ImpersonationConfig @@ -52,7 +57,10 @@ type Config struct { // from TLSClientConfig, Transport, or http.DefaultTransport). The // config may layer other RoundTrippers on top of the returned // RoundTripper. - WrapTransport func(rt http.RoundTripper) http.RoundTripper + // + // A future release will change this field to an array. Use config.Wrap() + // instead of setting this value directly. + WrapTransport WrapperFunc // Dial specifies the dial function for creating unencrypted TCP connections. Dial func(ctx context.Context, network, address string) (net.Conn, error) @@ -80,7 +88,7 @@ func (c *Config) HasBasicAuth() bool { // HasTokenAuth returns whether the configuration has token authentication or not. func (c *Config) HasTokenAuth() bool { - return len(c.BearerToken) != 0 + return len(c.BearerToken) != 0 || len(c.BearerTokenFile) != 0 } // HasCertAuth returns whether the configuration has certificate authentication or not. @@ -93,6 +101,14 @@ func (c *Config) HasCertCallback() bool { return c.TLS.GetCert != nil } +// Wrap adds a transport middleware function that will give the caller +// an opportunity to wrap the underlying http.RoundTripper prior to the +// first API call being made. The provided function is invoked after any +// existing transport wrappers are invoked. +func (c *Config) Wrap(fn WrapperFunc) { + c.WrapTransport = Wrappers(c.WrapTransport, fn) +} + // TLSConfig holds the information needed to set up a TLS transport. type TLSConfig struct { CAFile string // Path of the PEM-encoded server trusted root certificates. diff --git a/vendor/k8s.io/client-go/transport/round_trippers.go b/vendor/k8s.io/client-go/transport/round_trippers.go index da417cf96e..117a9c8c4d 100644 --- a/vendor/k8s.io/client-go/transport/round_trippers.go +++ b/vendor/k8s.io/client-go/transport/round_trippers.go @@ -22,6 +22,7 @@ import ( "strings" "time" + "golang.org/x/oauth2" "k8s.io/klog" utilnet "k8s.io/apimachinery/pkg/util/net" @@ -44,7 +45,11 @@ func HTTPWrappersForConfig(config *Config, rt http.RoundTripper) (http.RoundTrip case config.HasBasicAuth() && config.HasTokenAuth(): return nil, fmt.Errorf("username/password or bearer token may be set, but not both") case config.HasTokenAuth(): - rt = NewBearerAuthRoundTripper(config.BearerToken, rt) + var err error + rt, err = NewBearerAuthWithRefreshRoundTripper(config.BearerToken, config.BearerTokenFile, rt) + if err != nil { + return nil, err + } case config.HasBasicAuth(): rt = NewBasicAuthRoundTripper(config.Username, config.Password, rt) } @@ -265,13 +270,35 @@ func (rt *impersonatingRoundTripper) WrappedRoundTripper() http.RoundTripper { r type bearerAuthRoundTripper struct { bearer string + source oauth2.TokenSource rt http.RoundTripper } // NewBearerAuthRoundTripper adds the provided bearer token to a request // unless the authorization header has already been set. func NewBearerAuthRoundTripper(bearer string, rt http.RoundTripper) http.RoundTripper { - return &bearerAuthRoundTripper{bearer, rt} + return &bearerAuthRoundTripper{bearer, nil, rt} +} + +// NewBearerAuthRoundTripper adds the provided bearer token to a request +// unless the authorization header has already been set. +// If tokenFile is non-empty, it is periodically read, +// and the last successfully read content is used as the bearer token. +// If tokenFile is non-empty and bearer is empty, the tokenFile is read +// immediately to populate the initial bearer token. +func NewBearerAuthWithRefreshRoundTripper(bearer string, tokenFile string, rt http.RoundTripper) (http.RoundTripper, error) { + if len(tokenFile) == 0 { + return &bearerAuthRoundTripper{bearer, nil, rt}, nil + } + source := NewCachedFileTokenSource(tokenFile) + if len(bearer) == 0 { + token, err := source.Token() + if err != nil { + return nil, err + } + bearer = token.AccessToken + } + return &bearerAuthRoundTripper{bearer, source, rt}, nil } func (rt *bearerAuthRoundTripper) RoundTrip(req *http.Request) (*http.Response, error) { @@ -280,7 +307,13 @@ func (rt *bearerAuthRoundTripper) RoundTrip(req *http.Request) (*http.Response, } req = utilnet.CloneRequest(req) - req.Header.Set("Authorization", fmt.Sprintf("Bearer %s", rt.bearer)) + token := rt.bearer + if rt.source != nil { + if refreshedToken, err := rt.source.Token(); err == nil { + token = refreshedToken.AccessToken + } + } + req.Header.Set("Authorization", fmt.Sprintf("Bearer %s", token)) return rt.rt.RoundTrip(req) } diff --git a/vendor/k8s.io/client-go/rest/token_source.go b/vendor/k8s.io/client-go/transport/token_source.go similarity index 87% rename from vendor/k8s.io/client-go/rest/token_source.go rename to vendor/k8s.io/client-go/transport/token_source.go index c251b5eb0b..8595df2716 100644 --- a/vendor/k8s.io/client-go/rest/token_source.go +++ b/vendor/k8s.io/client-go/transport/token_source.go @@ -14,7 +14,7 @@ See the License for the specific language governing permissions and limitations under the License. */ -package rest +package transport import ( "fmt" @@ -47,14 +47,14 @@ func TokenSourceWrapTransport(ts oauth2.TokenSource) func(http.RoundTripper) htt func NewCachedFileTokenSource(path string) oauth2.TokenSource { return &cachingTokenSource{ now: time.Now, - leeway: 1 * time.Minute, + leeway: 10 * time.Second, base: &fileTokenSource{ path: path, - // This period was picked because it is half of the minimum validity - // duration for a token provisioned by they TokenRequest API. This is - // unsophisticated and should induce rotation at a frequency that should - // work with the token volume source. - period: 5 * time.Minute, + // This period was picked because it is half of the duration between when the kubelet + // refreshes a projected service account token and when the original token expires. + // Default token lifetime is 10 minutes, and the kubelet starts refreshing at 80% of lifetime. + // This should induce re-reading at a frequency that works with the token volume source. + period: time.Minute, }, } } diff --git a/vendor/k8s.io/client-go/transport/transport.go b/vendor/k8s.io/client-go/transport/transport.go index c19739fdfe..2a145c971a 100644 --- a/vendor/k8s.io/client-go/transport/transport.go +++ b/vendor/k8s.io/client-go/transport/transport.go @@ -17,6 +17,7 @@ limitations under the License. package transport import ( + "context" "crypto/tls" "crypto/x509" "fmt" @@ -167,3 +168,60 @@ func rootCertPool(caData []byte) *x509.CertPool { certPool.AppendCertsFromPEM(caData) return certPool } + +// WrapperFunc wraps an http.RoundTripper when a new transport +// is created for a client, allowing per connection behavior +// to be injected. +type WrapperFunc func(rt http.RoundTripper) http.RoundTripper + +// Wrappers accepts any number of wrappers and returns a wrapper +// function that is the equivalent of calling each of them in order. Nil +// values are ignored, which makes this function convenient for incrementally +// wrapping a function. +func Wrappers(fns ...WrapperFunc) WrapperFunc { + if len(fns) == 0 { + return nil + } + // optimize the common case of wrapping a possibly nil transport wrapper + // with an additional wrapper + if len(fns) == 2 && fns[0] == nil { + return fns[1] + } + return func(rt http.RoundTripper) http.RoundTripper { + base := rt + for _, fn := range fns { + if fn != nil { + base = fn(base) + } + } + return base + } +} + +// ContextCanceller prevents new requests after the provided context is finished. +// err is returned when the context is closed, allowing the caller to provide a context +// appropriate error. +func ContextCanceller(ctx context.Context, err error) WrapperFunc { + return func(rt http.RoundTripper) http.RoundTripper { + return &contextCanceller{ + ctx: ctx, + rt: rt, + err: err, + } + } +} + +type contextCanceller struct { + ctx context.Context + rt http.RoundTripper + err error +} + +func (b *contextCanceller) RoundTrip(req *http.Request) (*http.Response, error) { + select { + case <-b.ctx.Done(): + return nil, b.err + default: + return b.rt.RoundTrip(req) + } +} diff --git a/vendor/k8s.io/client-go/util/cert/OWNERS b/vendor/k8s.io/client-go/util/cert/OWNERS index 470b7a1c92..3cf0364383 100644 --- a/vendor/k8s.io/client-go/util/cert/OWNERS +++ b/vendor/k8s.io/client-go/util/cert/OWNERS @@ -1,3 +1,5 @@ +# See the OWNERS docs at https://go.k8s.io/owners + approvers: - sig-auth-certificates-approvers reviewers: diff --git a/vendor/k8s.io/client-go/util/cert/cert.go b/vendor/k8s.io/client-go/util/cert/cert.go index 3429c82cdf..9fd097af5e 100644 --- a/vendor/k8s.io/client-go/util/cert/cert.go +++ b/vendor/k8s.io/client-go/util/cert/cert.go @@ -19,30 +19,24 @@ package cert import ( "bytes" "crypto" - "crypto/ecdsa" - "crypto/elliptic" - "crypto/rand" cryptorand "crypto/rand" "crypto/rsa" "crypto/x509" "crypto/x509/pkix" "encoding/pem" - "errors" "fmt" "io/ioutil" - "math" "math/big" "net" "path" "strings" "time" -) -const ( - rsaKeySize = 2048 - duration365d = time.Hour * 24 * 365 + "k8s.io/client-go/util/keyutil" ) +const duration365d = time.Hour * 24 * 365 + // Config contains the basic fields required for creating a certificate type Config struct { CommonName string @@ -59,11 +53,6 @@ type AltNames struct { IPs []net.IP } -// NewPrivateKey creates an RSA private key -func NewPrivateKey() (*rsa.PrivateKey, error) { - return rsa.GenerateKey(cryptorand.Reader, rsaKeySize) -} - // NewSelfSignedCACert creates a CA certificate func NewSelfSignedCACert(cfg Config, key crypto.Signer) (*x509.Certificate, error) { now := time.Now() @@ -87,58 +76,6 @@ func NewSelfSignedCACert(cfg Config, key crypto.Signer) (*x509.Certificate, erro return x509.ParseCertificate(certDERBytes) } -// NewSignedCert creates a signed certificate using the given CA certificate and key -func NewSignedCert(cfg Config, key crypto.Signer, caCert *x509.Certificate, caKey crypto.Signer) (*x509.Certificate, error) { - serial, err := rand.Int(rand.Reader, new(big.Int).SetInt64(math.MaxInt64)) - if err != nil { - return nil, err - } - if len(cfg.CommonName) == 0 { - return nil, errors.New("must specify a CommonName") - } - if len(cfg.Usages) == 0 { - return nil, errors.New("must specify at least one ExtKeyUsage") - } - - certTmpl := x509.Certificate{ - Subject: pkix.Name{ - CommonName: cfg.CommonName, - Organization: cfg.Organization, - }, - DNSNames: cfg.AltNames.DNSNames, - IPAddresses: cfg.AltNames.IPs, - SerialNumber: serial, - NotBefore: caCert.NotBefore, - NotAfter: time.Now().Add(duration365d).UTC(), - KeyUsage: x509.KeyUsageKeyEncipherment | x509.KeyUsageDigitalSignature, - ExtKeyUsage: cfg.Usages, - } - certDERBytes, err := x509.CreateCertificate(cryptorand.Reader, &certTmpl, caCert, key.Public(), caKey) - if err != nil { - return nil, err - } - return x509.ParseCertificate(certDERBytes) -} - -// MakeEllipticPrivateKeyPEM creates an ECDSA private key -func MakeEllipticPrivateKeyPEM() ([]byte, error) { - privateKey, err := ecdsa.GenerateKey(elliptic.P256(), cryptorand.Reader) - if err != nil { - return nil, err - } - - derBytes, err := x509.MarshalECPrivateKey(privateKey) - if err != nil { - return nil, err - } - - privateKeyPemBlock := &pem.Block{ - Type: ECPrivateKeyBlockType, - Bytes: derBytes, - } - return pem.EncodeToMemory(privateKeyPemBlock), nil -} - // GenerateSelfSignedCertKey creates a self-signed certificate and key for the given host. // Host may be an IP or a DNS name // You may also specify additional subject alt names (either ip or dns names) for the certificate. @@ -244,7 +181,7 @@ func GenerateSelfSignedCertKeyWithFixtures(host string, alternateIPs []net.IP, a // Generate key keyBuffer := bytes.Buffer{} - if err := pem.Encode(&keyBuffer, &pem.Block{Type: RSAPrivateKeyBlockType, Bytes: x509.MarshalPKCS1PrivateKey(priv)}); err != nil { + if err := pem.Encode(&keyBuffer, &pem.Block{Type: keyutil.RSAPrivateKeyBlockType, Bytes: x509.MarshalPKCS1PrivateKey(priv)}); err != nil { return nil, nil, err } diff --git a/vendor/k8s.io/client-go/util/cert/io.go b/vendor/k8s.io/client-go/util/cert/io.go index a57bf09d5e..5efb248948 100644 --- a/vendor/k8s.io/client-go/util/cert/io.go +++ b/vendor/k8s.io/client-go/util/cert/io.go @@ -17,11 +17,7 @@ limitations under the License. package cert import ( - "crypto" - "crypto/ecdsa" - "crypto/rsa" "crypto/x509" - "encoding/pem" "fmt" "io/ioutil" "os" @@ -73,60 +69,6 @@ func WriteCert(certPath string, data []byte) error { return ioutil.WriteFile(certPath, data, os.FileMode(0644)) } -// WriteKey writes the pem-encoded key data to keyPath. -// The key file will be created with file mode 0600. -// If the key file already exists, it will be overwritten. -// The parent directory of the keyPath will be created as needed with file mode 0755. -func WriteKey(keyPath string, data []byte) error { - if err := os.MkdirAll(filepath.Dir(keyPath), os.FileMode(0755)); err != nil { - return err - } - return ioutil.WriteFile(keyPath, data, os.FileMode(0600)) -} - -// LoadOrGenerateKeyFile looks for a key in the file at the given path. If it -// can't find one, it will generate a new key and store it there. -func LoadOrGenerateKeyFile(keyPath string) (data []byte, wasGenerated bool, err error) { - loadedData, err := ioutil.ReadFile(keyPath) - // Call verifyKeyData to ensure the file wasn't empty/corrupt. - if err == nil && verifyKeyData(loadedData) { - return loadedData, false, err - } - if !os.IsNotExist(err) { - return nil, false, fmt.Errorf("error loading key from %s: %v", keyPath, err) - } - - generatedData, err := MakeEllipticPrivateKeyPEM() - if err != nil { - return nil, false, fmt.Errorf("error generating key: %v", err) - } - if err := WriteKey(keyPath, generatedData); err != nil { - return nil, false, fmt.Errorf("error writing key to %s: %v", keyPath, err) - } - return generatedData, true, nil -} - -// MarshalPrivateKeyToPEM converts a known private key type of RSA or ECDSA to -// a PEM encoded block or returns an error. -func MarshalPrivateKeyToPEM(privateKey crypto.PrivateKey) ([]byte, error) { - switch t := privateKey.(type) { - case *ecdsa.PrivateKey: - derBytes, err := x509.MarshalECPrivateKey(t) - if err != nil { - return nil, err - } - privateKeyPemBlock := &pem.Block{ - Type: ECPrivateKeyBlockType, - Bytes: derBytes, - } - return pem.EncodeToMemory(privateKeyPemBlock), nil - case *rsa.PrivateKey: - return EncodePrivateKeyPEM(t), nil - default: - return nil, fmt.Errorf("private key is not a recognized type: %T", privateKey) - } -} - // NewPool returns an x509.CertPool containing the certificates in the given PEM-encoded file. // Returns an error if the file could not be read, a certificate could not be parsed, or if the file does not contain any certificates func NewPool(filename string) (*x509.CertPool, error) { @@ -154,40 +96,3 @@ func CertsFromFile(file string) ([]*x509.Certificate, error) { } return certs, nil } - -// PrivateKeyFromFile returns the private key in rsa.PrivateKey or ecdsa.PrivateKey format from a given PEM-encoded file. -// Returns an error if the file could not be read or if the private key could not be parsed. -func PrivateKeyFromFile(file string) (interface{}, error) { - data, err := ioutil.ReadFile(file) - if err != nil { - return nil, err - } - key, err := ParsePrivateKeyPEM(data) - if err != nil { - return nil, fmt.Errorf("error reading private key file %s: %v", file, err) - } - return key, nil -} - -// PublicKeysFromFile returns the public keys in rsa.PublicKey or ecdsa.PublicKey format from a given PEM-encoded file. -// Reads public keys from both public and private key files. -func PublicKeysFromFile(file string) ([]interface{}, error) { - data, err := ioutil.ReadFile(file) - if err != nil { - return nil, err - } - keys, err := ParsePublicKeysPEM(data) - if err != nil { - return nil, fmt.Errorf("error reading public key file %s: %v", file, err) - } - return keys, nil -} - -// verifyKeyData returns true if the provided data appears to be a valid private key. -func verifyKeyData(data []byte) bool { - if len(data) == 0 { - return false - } - _, err := ParsePrivateKeyPEM(data) - return err == nil -} diff --git a/vendor/k8s.io/client-go/util/cert/pem.go b/vendor/k8s.io/client-go/util/cert/pem.go index b99e366519..9185e2e22d 100644 --- a/vendor/k8s.io/client-go/util/cert/pem.go +++ b/vendor/k8s.io/client-go/util/cert/pem.go @@ -17,136 +17,18 @@ limitations under the License. package cert import ( - "crypto/ecdsa" - "crypto/rsa" "crypto/x509" "encoding/pem" "errors" - "fmt" ) const ( - // ECPrivateKeyBlockType is a possible value for pem.Block.Type. - ECPrivateKeyBlockType = "EC PRIVATE KEY" - // RSAPrivateKeyBlockType is a possible value for pem.Block.Type. - RSAPrivateKeyBlockType = "RSA PRIVATE KEY" - // PrivateKeyBlockType is a possible value for pem.Block.Type. - PrivateKeyBlockType = "PRIVATE KEY" - // PublicKeyBlockType is a possible value for pem.Block.Type. - PublicKeyBlockType = "PUBLIC KEY" // CertificateBlockType is a possible value for pem.Block.Type. CertificateBlockType = "CERTIFICATE" // CertificateRequestBlockType is a possible value for pem.Block.Type. CertificateRequestBlockType = "CERTIFICATE REQUEST" ) -// EncodePublicKeyPEM returns PEM-encoded public data -func EncodePublicKeyPEM(key *rsa.PublicKey) ([]byte, error) { - der, err := x509.MarshalPKIXPublicKey(key) - if err != nil { - return []byte{}, err - } - block := pem.Block{ - Type: PublicKeyBlockType, - Bytes: der, - } - return pem.EncodeToMemory(&block), nil -} - -// EncodePrivateKeyPEM returns PEM-encoded private key data -func EncodePrivateKeyPEM(key *rsa.PrivateKey) []byte { - block := pem.Block{ - Type: RSAPrivateKeyBlockType, - Bytes: x509.MarshalPKCS1PrivateKey(key), - } - return pem.EncodeToMemory(&block) -} - -// EncodeCertPEM returns PEM-endcoded certificate data -func EncodeCertPEM(cert *x509.Certificate) []byte { - block := pem.Block{ - Type: CertificateBlockType, - Bytes: cert.Raw, - } - return pem.EncodeToMemory(&block) -} - -// ParsePrivateKeyPEM returns a private key parsed from a PEM block in the supplied data. -// Recognizes PEM blocks for "EC PRIVATE KEY", "RSA PRIVATE KEY", or "PRIVATE KEY" -func ParsePrivateKeyPEM(keyData []byte) (interface{}, error) { - var privateKeyPemBlock *pem.Block - for { - privateKeyPemBlock, keyData = pem.Decode(keyData) - if privateKeyPemBlock == nil { - break - } - - switch privateKeyPemBlock.Type { - case ECPrivateKeyBlockType: - // ECDSA Private Key in ASN.1 format - if key, err := x509.ParseECPrivateKey(privateKeyPemBlock.Bytes); err == nil { - return key, nil - } - case RSAPrivateKeyBlockType: - // RSA Private Key in PKCS#1 format - if key, err := x509.ParsePKCS1PrivateKey(privateKeyPemBlock.Bytes); err == nil { - return key, nil - } - case PrivateKeyBlockType: - // RSA or ECDSA Private Key in unencrypted PKCS#8 format - if key, err := x509.ParsePKCS8PrivateKey(privateKeyPemBlock.Bytes); err == nil { - return key, nil - } - } - - // tolerate non-key PEM blocks for compatibility with things like "EC PARAMETERS" blocks - // originally, only the first PEM block was parsed and expected to be a key block - } - - // we read all the PEM blocks and didn't recognize one - return nil, fmt.Errorf("data does not contain a valid RSA or ECDSA private key") -} - -// ParsePublicKeysPEM is a helper function for reading an array of rsa.PublicKey or ecdsa.PublicKey from a PEM-encoded byte array. -// Reads public keys from both public and private key files. -func ParsePublicKeysPEM(keyData []byte) ([]interface{}, error) { - var block *pem.Block - keys := []interface{}{} - for { - // read the next block - block, keyData = pem.Decode(keyData) - if block == nil { - break - } - - // test block against parsing functions - if privateKey, err := parseRSAPrivateKey(block.Bytes); err == nil { - keys = append(keys, &privateKey.PublicKey) - continue - } - if publicKey, err := parseRSAPublicKey(block.Bytes); err == nil { - keys = append(keys, publicKey) - continue - } - if privateKey, err := parseECPrivateKey(block.Bytes); err == nil { - keys = append(keys, &privateKey.PublicKey) - continue - } - if publicKey, err := parseECPublicKey(block.Bytes); err == nil { - keys = append(keys, publicKey) - continue - } - - // tolerate non-key PEM blocks for backwards compatibility - // originally, only the first PEM block was parsed and expected to be a key block - } - - if len(keys) == 0 { - return nil, fmt.Errorf("data does not contain any valid RSA or ECDSA public keys") - } - return keys, nil -} - // ParseCertsPEM returns the x509.Certificates contained in the given PEM-encoded byte array // Returns an error if a certificate could not be parsed, or if the data does not contain any certificates func ParseCertsPEM(pemCerts []byte) ([]*x509.Certificate, error) { @@ -177,93 +59,3 @@ func ParseCertsPEM(pemCerts []byte) ([]*x509.Certificate, error) { } return certs, nil } - -// parseRSAPublicKey parses a single RSA public key from the provided data -func parseRSAPublicKey(data []byte) (*rsa.PublicKey, error) { - var err error - - // Parse the key - var parsedKey interface{} - if parsedKey, err = x509.ParsePKIXPublicKey(data); err != nil { - if cert, err := x509.ParseCertificate(data); err == nil { - parsedKey = cert.PublicKey - } else { - return nil, err - } - } - - // Test if parsed key is an RSA Public Key - var pubKey *rsa.PublicKey - var ok bool - if pubKey, ok = parsedKey.(*rsa.PublicKey); !ok { - return nil, fmt.Errorf("data doesn't contain valid RSA Public Key") - } - - return pubKey, nil -} - -// parseRSAPrivateKey parses a single RSA private key from the provided data -func parseRSAPrivateKey(data []byte) (*rsa.PrivateKey, error) { - var err error - - // Parse the key - var parsedKey interface{} - if parsedKey, err = x509.ParsePKCS1PrivateKey(data); err != nil { - if parsedKey, err = x509.ParsePKCS8PrivateKey(data); err != nil { - return nil, err - } - } - - // Test if parsed key is an RSA Private Key - var privKey *rsa.PrivateKey - var ok bool - if privKey, ok = parsedKey.(*rsa.PrivateKey); !ok { - return nil, fmt.Errorf("data doesn't contain valid RSA Private Key") - } - - return privKey, nil -} - -// parseECPublicKey parses a single ECDSA public key from the provided data -func parseECPublicKey(data []byte) (*ecdsa.PublicKey, error) { - var err error - - // Parse the key - var parsedKey interface{} - if parsedKey, err = x509.ParsePKIXPublicKey(data); err != nil { - if cert, err := x509.ParseCertificate(data); err == nil { - parsedKey = cert.PublicKey - } else { - return nil, err - } - } - - // Test if parsed key is an ECDSA Public Key - var pubKey *ecdsa.PublicKey - var ok bool - if pubKey, ok = parsedKey.(*ecdsa.PublicKey); !ok { - return nil, fmt.Errorf("data doesn't contain valid ECDSA Public Key") - } - - return pubKey, nil -} - -// parseECPrivateKey parses a single ECDSA private key from the provided data -func parseECPrivateKey(data []byte) (*ecdsa.PrivateKey, error) { - var err error - - // Parse the key - var parsedKey interface{} - if parsedKey, err = x509.ParseECPrivateKey(data); err != nil { - return nil, err - } - - // Test if parsed key is an ECDSA Private Key - var privKey *ecdsa.PrivateKey - var ok bool - if privKey, ok = parsedKey.(*ecdsa.PrivateKey); !ok { - return nil, fmt.Errorf("data doesn't contain valid ECDSA Private Key") - } - - return privKey, nil -} diff --git a/vendor/k8s.io/client-go/util/flowcontrol/backoff.go b/vendor/k8s.io/client-go/util/flowcontrol/backoff.go index 71d442a62b..b7cb70ea74 100644 --- a/vendor/k8s.io/client-go/util/flowcontrol/backoff.go +++ b/vendor/k8s.io/client-go/util/flowcontrol/backoff.go @@ -21,7 +21,7 @@ import ( "time" "k8s.io/apimachinery/pkg/util/clock" - "k8s.io/client-go/util/integer" + "k8s.io/utils/integer" ) type backoffEntry struct { diff --git a/vendor/k8s.io/client-go/util/jsonpath/jsonpath.go b/vendor/k8s.io/client-go/util/jsonpath/jsonpath.go deleted file mode 100644 index 6633ca0d65..0000000000 --- a/vendor/k8s.io/client-go/util/jsonpath/jsonpath.go +++ /dev/null @@ -1,517 +0,0 @@ -/* -Copyright 2015 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package jsonpath - -import ( - "bytes" - "fmt" - "io" - "reflect" - "strings" - - "k8s.io/client-go/third_party/forked/golang/template" -) - -type JSONPath struct { - name string - parser *Parser - stack [][]reflect.Value // push and pop values in different scopes - cur []reflect.Value // current scope values - beginRange int - inRange int - endRange int - - allowMissingKeys bool -} - -// New creates a new JSONPath with the given name. -func New(name string) *JSONPath { - return &JSONPath{ - name: name, - beginRange: 0, - inRange: 0, - endRange: 0, - } -} - -// AllowMissingKeys allows a caller to specify whether they want an error if a field or map key -// cannot be located, or simply an empty result. The receiver is returned for chaining. -func (j *JSONPath) AllowMissingKeys(allow bool) *JSONPath { - j.allowMissingKeys = allow - return j -} - -// Parse parses the given template and returns an error. -func (j *JSONPath) Parse(text string) error { - var err error - j.parser, err = Parse(j.name, text) - return err -} - -// Execute bounds data into template and writes the result. -func (j *JSONPath) Execute(wr io.Writer, data interface{}) error { - fullResults, err := j.FindResults(data) - if err != nil { - return err - } - for ix := range fullResults { - if err := j.PrintResults(wr, fullResults[ix]); err != nil { - return err - } - } - return nil -} - -func (j *JSONPath) FindResults(data interface{}) ([][]reflect.Value, error) { - if j.parser == nil { - return nil, fmt.Errorf("%s is an incomplete jsonpath template", j.name) - } - - j.cur = []reflect.Value{reflect.ValueOf(data)} - nodes := j.parser.Root.Nodes - fullResult := [][]reflect.Value{} - for i := 0; i < len(nodes); i++ { - node := nodes[i] - results, err := j.walk(j.cur, node) - if err != nil { - return nil, err - } - - // encounter an end node, break the current block - if j.endRange > 0 && j.endRange <= j.inRange { - j.endRange -= 1 - break - } - // encounter a range node, start a range loop - if j.beginRange > 0 { - j.beginRange -= 1 - j.inRange += 1 - for k, value := range results { - j.parser.Root.Nodes = nodes[i+1:] - if k == len(results)-1 { - j.inRange -= 1 - } - nextResults, err := j.FindResults(value.Interface()) - if err != nil { - return nil, err - } - fullResult = append(fullResult, nextResults...) - } - break - } - fullResult = append(fullResult, results) - } - return fullResult, nil -} - -// PrintResults writes the results into writer -func (j *JSONPath) PrintResults(wr io.Writer, results []reflect.Value) error { - for i, r := range results { - text, err := j.evalToText(r) - if err != nil { - return err - } - if i != len(results)-1 { - text = append(text, ' ') - } - if _, err = wr.Write(text); err != nil { - return err - } - } - return nil -} - -// walk visits tree rooted at the given node in DFS order -func (j *JSONPath) walk(value []reflect.Value, node Node) ([]reflect.Value, error) { - switch node := node.(type) { - case *ListNode: - return j.evalList(value, node) - case *TextNode: - return []reflect.Value{reflect.ValueOf(node.Text)}, nil - case *FieldNode: - return j.evalField(value, node) - case *ArrayNode: - return j.evalArray(value, node) - case *FilterNode: - return j.evalFilter(value, node) - case *IntNode: - return j.evalInt(value, node) - case *BoolNode: - return j.evalBool(value, node) - case *FloatNode: - return j.evalFloat(value, node) - case *WildcardNode: - return j.evalWildcard(value, node) - case *RecursiveNode: - return j.evalRecursive(value, node) - case *UnionNode: - return j.evalUnion(value, node) - case *IdentifierNode: - return j.evalIdentifier(value, node) - default: - return value, fmt.Errorf("unexpected Node %v", node) - } -} - -// evalInt evaluates IntNode -func (j *JSONPath) evalInt(input []reflect.Value, node *IntNode) ([]reflect.Value, error) { - result := make([]reflect.Value, len(input)) - for i := range input { - result[i] = reflect.ValueOf(node.Value) - } - return result, nil -} - -// evalFloat evaluates FloatNode -func (j *JSONPath) evalFloat(input []reflect.Value, node *FloatNode) ([]reflect.Value, error) { - result := make([]reflect.Value, len(input)) - for i := range input { - result[i] = reflect.ValueOf(node.Value) - } - return result, nil -} - -// evalBool evaluates BoolNode -func (j *JSONPath) evalBool(input []reflect.Value, node *BoolNode) ([]reflect.Value, error) { - result := make([]reflect.Value, len(input)) - for i := range input { - result[i] = reflect.ValueOf(node.Value) - } - return result, nil -} - -// evalList evaluates ListNode -func (j *JSONPath) evalList(value []reflect.Value, node *ListNode) ([]reflect.Value, error) { - var err error - curValue := value - for _, node := range node.Nodes { - curValue, err = j.walk(curValue, node) - if err != nil { - return curValue, err - } - } - return curValue, nil -} - -// evalIdentifier evaluates IdentifierNode -func (j *JSONPath) evalIdentifier(input []reflect.Value, node *IdentifierNode) ([]reflect.Value, error) { - results := []reflect.Value{} - switch node.Name { - case "range": - j.stack = append(j.stack, j.cur) - j.beginRange += 1 - results = input - case "end": - if j.endRange < j.inRange { // inside a loop, break the current block - j.endRange += 1 - break - } - // the loop is about to end, pop value and continue the following execution - if len(j.stack) > 0 { - j.cur, j.stack = j.stack[len(j.stack)-1], j.stack[:len(j.stack)-1] - } else { - return results, fmt.Errorf("not in range, nothing to end") - } - default: - return input, fmt.Errorf("unrecognized identifier %v", node.Name) - } - return results, nil -} - -// evalArray evaluates ArrayNode -func (j *JSONPath) evalArray(input []reflect.Value, node *ArrayNode) ([]reflect.Value, error) { - result := []reflect.Value{} - for _, value := range input { - - value, isNil := template.Indirect(value) - if isNil { - continue - } - if value.Kind() != reflect.Array && value.Kind() != reflect.Slice { - return input, fmt.Errorf("%v is not array or slice", value.Type()) - } - params := node.Params - if !params[0].Known { - params[0].Value = 0 - } - if params[0].Value < 0 { - params[0].Value += value.Len() - } - if !params[1].Known { - params[1].Value = value.Len() - } - - if params[1].Value < 0 { - params[1].Value += value.Len() - } - - sliceLength := value.Len() - if params[1].Value != params[0].Value { // if you're requesting zero elements, allow it through. - if params[0].Value >= sliceLength || params[0].Value < 0 { - return input, fmt.Errorf("array index out of bounds: index %d, length %d", params[0].Value, sliceLength) - } - if params[1].Value > sliceLength || params[1].Value < 0 { - return input, fmt.Errorf("array index out of bounds: index %d, length %d", params[1].Value-1, sliceLength) - } - } - - if !params[2].Known { - value = value.Slice(params[0].Value, params[1].Value) - } else { - value = value.Slice3(params[0].Value, params[1].Value, params[2].Value) - } - for i := 0; i < value.Len(); i++ { - result = append(result, value.Index(i)) - } - } - return result, nil -} - -// evalUnion evaluates UnionNode -func (j *JSONPath) evalUnion(input []reflect.Value, node *UnionNode) ([]reflect.Value, error) { - result := []reflect.Value{} - for _, listNode := range node.Nodes { - temp, err := j.evalList(input, listNode) - if err != nil { - return input, err - } - result = append(result, temp...) - } - return result, nil -} - -func (j *JSONPath) findFieldInValue(value *reflect.Value, node *FieldNode) (reflect.Value, error) { - t := value.Type() - var inlineValue *reflect.Value - for ix := 0; ix < t.NumField(); ix++ { - f := t.Field(ix) - jsonTag := f.Tag.Get("json") - parts := strings.Split(jsonTag, ",") - if len(parts) == 0 { - continue - } - if parts[0] == node.Value { - return value.Field(ix), nil - } - if len(parts[0]) == 0 { - val := value.Field(ix) - inlineValue = &val - } - } - if inlineValue != nil { - if inlineValue.Kind() == reflect.Struct { - // handle 'inline' - match, err := j.findFieldInValue(inlineValue, node) - if err != nil { - return reflect.Value{}, err - } - if match.IsValid() { - return match, nil - } - } - } - return value.FieldByName(node.Value), nil -} - -// evalField evaluates field of struct or key of map. -func (j *JSONPath) evalField(input []reflect.Value, node *FieldNode) ([]reflect.Value, error) { - results := []reflect.Value{} - // If there's no input, there's no output - if len(input) == 0 { - return results, nil - } - for _, value := range input { - var result reflect.Value - value, isNil := template.Indirect(value) - if isNil { - continue - } - - if value.Kind() == reflect.Struct { - var err error - if result, err = j.findFieldInValue(&value, node); err != nil { - return nil, err - } - } else if value.Kind() == reflect.Map { - mapKeyType := value.Type().Key() - nodeValue := reflect.ValueOf(node.Value) - // node value type must be convertible to map key type - if !nodeValue.Type().ConvertibleTo(mapKeyType) { - return results, fmt.Errorf("%s is not convertible to %s", nodeValue, mapKeyType) - } - result = value.MapIndex(nodeValue.Convert(mapKeyType)) - } - if result.IsValid() { - results = append(results, result) - } - } - if len(results) == 0 { - if j.allowMissingKeys { - return results, nil - } - return results, fmt.Errorf("%s is not found", node.Value) - } - return results, nil -} - -// evalWildcard extracts all contents of the given value -func (j *JSONPath) evalWildcard(input []reflect.Value, node *WildcardNode) ([]reflect.Value, error) { - results := []reflect.Value{} - for _, value := range input { - value, isNil := template.Indirect(value) - if isNil { - continue - } - - kind := value.Kind() - if kind == reflect.Struct { - for i := 0; i < value.NumField(); i++ { - results = append(results, value.Field(i)) - } - } else if kind == reflect.Map { - for _, key := range value.MapKeys() { - results = append(results, value.MapIndex(key)) - } - } else if kind == reflect.Array || kind == reflect.Slice || kind == reflect.String { - for i := 0; i < value.Len(); i++ { - results = append(results, value.Index(i)) - } - } - } - return results, nil -} - -// evalRecursive visits the given value recursively and pushes all of them to result -func (j *JSONPath) evalRecursive(input []reflect.Value, node *RecursiveNode) ([]reflect.Value, error) { - result := []reflect.Value{} - for _, value := range input { - results := []reflect.Value{} - value, isNil := template.Indirect(value) - if isNil { - continue - } - - kind := value.Kind() - if kind == reflect.Struct { - for i := 0; i < value.NumField(); i++ { - results = append(results, value.Field(i)) - } - } else if kind == reflect.Map { - for _, key := range value.MapKeys() { - results = append(results, value.MapIndex(key)) - } - } else if kind == reflect.Array || kind == reflect.Slice || kind == reflect.String { - for i := 0; i < value.Len(); i++ { - results = append(results, value.Index(i)) - } - } - if len(results) != 0 { - result = append(result, value) - output, err := j.evalRecursive(results, node) - if err != nil { - return result, err - } - result = append(result, output...) - } - } - return result, nil -} - -// evalFilter filters array according to FilterNode -func (j *JSONPath) evalFilter(input []reflect.Value, node *FilterNode) ([]reflect.Value, error) { - results := []reflect.Value{} - for _, value := range input { - value, _ = template.Indirect(value) - - if value.Kind() != reflect.Array && value.Kind() != reflect.Slice { - return input, fmt.Errorf("%v is not array or slice and cannot be filtered", value) - } - for i := 0; i < value.Len(); i++ { - temp := []reflect.Value{value.Index(i)} - lefts, err := j.evalList(temp, node.Left) - - //case exists - if node.Operator == "exists" { - if len(lefts) > 0 { - results = append(results, value.Index(i)) - } - continue - } - - if err != nil { - return input, err - } - - var left, right interface{} - switch { - case len(lefts) == 0: - continue - case len(lefts) > 1: - return input, fmt.Errorf("can only compare one element at a time") - } - left = lefts[0].Interface() - - rights, err := j.evalList(temp, node.Right) - if err != nil { - return input, err - } - switch { - case len(rights) == 0: - continue - case len(rights) > 1: - return input, fmt.Errorf("can only compare one element at a time") - } - right = rights[0].Interface() - - pass := false - switch node.Operator { - case "<": - pass, err = template.Less(left, right) - case ">": - pass, err = template.Greater(left, right) - case "==": - pass, err = template.Equal(left, right) - case "!=": - pass, err = template.NotEqual(left, right) - case "<=": - pass, err = template.LessEqual(left, right) - case ">=": - pass, err = template.GreaterEqual(left, right) - default: - return results, fmt.Errorf("unrecognized filter operator %s", node.Operator) - } - if err != nil { - return results, err - } - if pass { - results = append(results, value.Index(i)) - } - } - } - return results, nil -} - -// evalToText translates reflect value to corresponding text -func (j *JSONPath) evalToText(v reflect.Value) ([]byte, error) { - iface, ok := template.PrintableValue(v) - if !ok { - return nil, fmt.Errorf("can't print type %s", v.Type()) - } - var buffer bytes.Buffer - fmt.Fprint(&buffer, iface) - return buffer.Bytes(), nil -} diff --git a/vendor/k8s.io/client-go/util/jsonpath/node.go b/vendor/k8s.io/client-go/util/jsonpath/node.go deleted file mode 100644 index 2f612b188f..0000000000 --- a/vendor/k8s.io/client-go/util/jsonpath/node.go +++ /dev/null @@ -1,255 +0,0 @@ -/* -Copyright 2015 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package jsonpath - -import "fmt" - -// NodeType identifies the type of a parse tree node. -type NodeType int - -// Type returns itself and provides an easy default implementation -func (t NodeType) Type() NodeType { - return t -} - -func (t NodeType) String() string { - return NodeTypeName[t] -} - -const ( - NodeText NodeType = iota - NodeArray - NodeList - NodeField - NodeIdentifier - NodeFilter - NodeInt - NodeFloat - NodeWildcard - NodeRecursive - NodeUnion - NodeBool -) - -var NodeTypeName = map[NodeType]string{ - NodeText: "NodeText", - NodeArray: "NodeArray", - NodeList: "NodeList", - NodeField: "NodeField", - NodeIdentifier: "NodeIdentifier", - NodeFilter: "NodeFilter", - NodeInt: "NodeInt", - NodeFloat: "NodeFloat", - NodeWildcard: "NodeWildcard", - NodeRecursive: "NodeRecursive", - NodeUnion: "NodeUnion", - NodeBool: "NodeBool", -} - -type Node interface { - Type() NodeType - String() string -} - -// ListNode holds a sequence of nodes. -type ListNode struct { - NodeType - Nodes []Node // The element nodes in lexical order. -} - -func newList() *ListNode { - return &ListNode{NodeType: NodeList} -} - -func (l *ListNode) append(n Node) { - l.Nodes = append(l.Nodes, n) -} - -func (l *ListNode) String() string { - return l.Type().String() -} - -// TextNode holds plain text. -type TextNode struct { - NodeType - Text string // The text; may span newlines. -} - -func newText(text string) *TextNode { - return &TextNode{NodeType: NodeText, Text: text} -} - -func (t *TextNode) String() string { - return fmt.Sprintf("%s: %s", t.Type(), t.Text) -} - -// FieldNode holds field of struct -type FieldNode struct { - NodeType - Value string -} - -func newField(value string) *FieldNode { - return &FieldNode{NodeType: NodeField, Value: value} -} - -func (f *FieldNode) String() string { - return fmt.Sprintf("%s: %s", f.Type(), f.Value) -} - -// IdentifierNode holds an identifier -type IdentifierNode struct { - NodeType - Name string -} - -func newIdentifier(value string) *IdentifierNode { - return &IdentifierNode{ - NodeType: NodeIdentifier, - Name: value, - } -} - -func (f *IdentifierNode) String() string { - return fmt.Sprintf("%s: %s", f.Type(), f.Name) -} - -// ParamsEntry holds param information for ArrayNode -type ParamsEntry struct { - Value int - Known bool // whether the value is known when parse it -} - -// ArrayNode holds start, end, step information for array index selection -type ArrayNode struct { - NodeType - Params [3]ParamsEntry // start, end, step -} - -func newArray(params [3]ParamsEntry) *ArrayNode { - return &ArrayNode{ - NodeType: NodeArray, - Params: params, - } -} - -func (a *ArrayNode) String() string { - return fmt.Sprintf("%s: %v", a.Type(), a.Params) -} - -// FilterNode holds operand and operator information for filter -type FilterNode struct { - NodeType - Left *ListNode - Right *ListNode - Operator string -} - -func newFilter(left, right *ListNode, operator string) *FilterNode { - return &FilterNode{ - NodeType: NodeFilter, - Left: left, - Right: right, - Operator: operator, - } -} - -func (f *FilterNode) String() string { - return fmt.Sprintf("%s: %s %s %s", f.Type(), f.Left, f.Operator, f.Right) -} - -// IntNode holds integer value -type IntNode struct { - NodeType - Value int -} - -func newInt(num int) *IntNode { - return &IntNode{NodeType: NodeInt, Value: num} -} - -func (i *IntNode) String() string { - return fmt.Sprintf("%s: %d", i.Type(), i.Value) -} - -// FloatNode holds float value -type FloatNode struct { - NodeType - Value float64 -} - -func newFloat(num float64) *FloatNode { - return &FloatNode{NodeType: NodeFloat, Value: num} -} - -func (i *FloatNode) String() string { - return fmt.Sprintf("%s: %f", i.Type(), i.Value) -} - -// WildcardNode means a wildcard -type WildcardNode struct { - NodeType -} - -func newWildcard() *WildcardNode { - return &WildcardNode{NodeType: NodeWildcard} -} - -func (i *WildcardNode) String() string { - return i.Type().String() -} - -// RecursiveNode means a recursive descent operator -type RecursiveNode struct { - NodeType -} - -func newRecursive() *RecursiveNode { - return &RecursiveNode{NodeType: NodeRecursive} -} - -func (r *RecursiveNode) String() string { - return r.Type().String() -} - -// UnionNode is union of ListNode -type UnionNode struct { - NodeType - Nodes []*ListNode -} - -func newUnion(nodes []*ListNode) *UnionNode { - return &UnionNode{NodeType: NodeUnion, Nodes: nodes} -} - -func (u *UnionNode) String() string { - return u.Type().String() -} - -// BoolNode holds bool value -type BoolNode struct { - NodeType - Value bool -} - -func newBool(value bool) *BoolNode { - return &BoolNode{NodeType: NodeBool, Value: value} -} - -func (b *BoolNode) String() string { - return fmt.Sprintf("%s: %t", b.Type(), b.Value) -} diff --git a/vendor/k8s.io/client-go/util/jsonpath/parser.go b/vendor/k8s.io/client-go/util/jsonpath/parser.go deleted file mode 100644 index 99b45849c9..0000000000 --- a/vendor/k8s.io/client-go/util/jsonpath/parser.go +++ /dev/null @@ -1,525 +0,0 @@ -/* -Copyright 2015 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package jsonpath - -import ( - "errors" - "fmt" - "regexp" - "strconv" - "strings" - "unicode" - "unicode/utf8" -) - -const eof = -1 - -const ( - leftDelim = "{" - rightDelim = "}" -) - -type Parser struct { - Name string - Root *ListNode - input string - cur *ListNode - pos int - start int - width int -} - -var ( - ErrSyntax = errors.New("invalid syntax") - dictKeyRex = regexp.MustCompile(`^'([^']*)'$`) - sliceOperatorRex = regexp.MustCompile(`^(-?[\d]*)(:-?[\d]*)?(:[\d]*)?$`) -) - -// Parse parsed the given text and return a node Parser. -// If an error is encountered, parsing stops and an empty -// Parser is returned with the error -func Parse(name, text string) (*Parser, error) { - p := NewParser(name) - err := p.Parse(text) - if err != nil { - p = nil - } - return p, err -} - -func NewParser(name string) *Parser { - return &Parser{ - Name: name, - } -} - -// parseAction parsed the expression inside delimiter -func parseAction(name, text string) (*Parser, error) { - p, err := Parse(name, fmt.Sprintf("%s%s%s", leftDelim, text, rightDelim)) - // when error happens, p will be nil, so we need to return here - if err != nil { - return p, err - } - p.Root = p.Root.Nodes[0].(*ListNode) - return p, nil -} - -func (p *Parser) Parse(text string) error { - p.input = text - p.Root = newList() - p.pos = 0 - return p.parseText(p.Root) -} - -// consumeText return the parsed text since last cosumeText -func (p *Parser) consumeText() string { - value := p.input[p.start:p.pos] - p.start = p.pos - return value -} - -// next returns the next rune in the input. -func (p *Parser) next() rune { - if p.pos >= len(p.input) { - p.width = 0 - return eof - } - r, w := utf8.DecodeRuneInString(p.input[p.pos:]) - p.width = w - p.pos += p.width - return r -} - -// peek returns but does not consume the next rune in the input. -func (p *Parser) peek() rune { - r := p.next() - p.backup() - return r -} - -// backup steps back one rune. Can only be called once per call of next. -func (p *Parser) backup() { - p.pos -= p.width -} - -func (p *Parser) parseText(cur *ListNode) error { - for { - if strings.HasPrefix(p.input[p.pos:], leftDelim) { - if p.pos > p.start { - cur.append(newText(p.consumeText())) - } - return p.parseLeftDelim(cur) - } - if p.next() == eof { - break - } - } - // Correctly reached EOF. - if p.pos > p.start { - cur.append(newText(p.consumeText())) - } - return nil -} - -// parseLeftDelim scans the left delimiter, which is known to be present. -func (p *Parser) parseLeftDelim(cur *ListNode) error { - p.pos += len(leftDelim) - p.consumeText() - newNode := newList() - cur.append(newNode) - cur = newNode - return p.parseInsideAction(cur) -} - -func (p *Parser) parseInsideAction(cur *ListNode) error { - prefixMap := map[string]func(*ListNode) error{ - rightDelim: p.parseRightDelim, - "[?(": p.parseFilter, - "..": p.parseRecursive, - } - for prefix, parseFunc := range prefixMap { - if strings.HasPrefix(p.input[p.pos:], prefix) { - return parseFunc(cur) - } - } - - switch r := p.next(); { - case r == eof || isEndOfLine(r): - return fmt.Errorf("unclosed action") - case r == ' ': - p.consumeText() - case r == '@' || r == '$': //the current object, just pass it - p.consumeText() - case r == '[': - return p.parseArray(cur) - case r == '"' || r == '\'': - return p.parseQuote(cur, r) - case r == '.': - return p.parseField(cur) - case r == '+' || r == '-' || unicode.IsDigit(r): - p.backup() - return p.parseNumber(cur) - case isAlphaNumeric(r): - p.backup() - return p.parseIdentifier(cur) - default: - return fmt.Errorf("unrecognized character in action: %#U", r) - } - return p.parseInsideAction(cur) -} - -// parseRightDelim scans the right delimiter, which is known to be present. -func (p *Parser) parseRightDelim(cur *ListNode) error { - p.pos += len(rightDelim) - p.consumeText() - cur = p.Root - return p.parseText(cur) -} - -// parseIdentifier scans build-in keywords, like "range" "end" -func (p *Parser) parseIdentifier(cur *ListNode) error { - var r rune - for { - r = p.next() - if isTerminator(r) { - p.backup() - break - } - } - value := p.consumeText() - - if isBool(value) { - v, err := strconv.ParseBool(value) - if err != nil { - return fmt.Errorf("can not parse bool '%s': %s", value, err.Error()) - } - - cur.append(newBool(v)) - } else { - cur.append(newIdentifier(value)) - } - - return p.parseInsideAction(cur) -} - -// parseRecursive scans the recursive desent operator .. -func (p *Parser) parseRecursive(cur *ListNode) error { - p.pos += len("..") - p.consumeText() - cur.append(newRecursive()) - if r := p.peek(); isAlphaNumeric(r) { - return p.parseField(cur) - } - return p.parseInsideAction(cur) -} - -// parseNumber scans number -func (p *Parser) parseNumber(cur *ListNode) error { - r := p.peek() - if r == '+' || r == '-' { - r = p.next() - } - for { - r = p.next() - if r != '.' && !unicode.IsDigit(r) { - p.backup() - break - } - } - value := p.consumeText() - i, err := strconv.Atoi(value) - if err == nil { - cur.append(newInt(i)) - return p.parseInsideAction(cur) - } - d, err := strconv.ParseFloat(value, 64) - if err == nil { - cur.append(newFloat(d)) - return p.parseInsideAction(cur) - } - return fmt.Errorf("cannot parse number %s", value) -} - -// parseArray scans array index selection -func (p *Parser) parseArray(cur *ListNode) error { -Loop: - for { - switch p.next() { - case eof, '\n': - return fmt.Errorf("unterminated array") - case ']': - break Loop - } - } - text := p.consumeText() - text = text[1 : len(text)-1] - if text == "*" { - text = ":" - } - - //union operator - strs := strings.Split(text, ",") - if len(strs) > 1 { - union := []*ListNode{} - for _, str := range strs { - parser, err := parseAction("union", fmt.Sprintf("[%s]", strings.Trim(str, " "))) - if err != nil { - return err - } - union = append(union, parser.Root) - } - cur.append(newUnion(union)) - return p.parseInsideAction(cur) - } - - // dict key - value := dictKeyRex.FindStringSubmatch(text) - if value != nil { - parser, err := parseAction("arraydict", fmt.Sprintf(".%s", value[1])) - if err != nil { - return err - } - for _, node := range parser.Root.Nodes { - cur.append(node) - } - return p.parseInsideAction(cur) - } - - //slice operator - value = sliceOperatorRex.FindStringSubmatch(text) - if value == nil { - return fmt.Errorf("invalid array index %s", text) - } - value = value[1:] - params := [3]ParamsEntry{} - for i := 0; i < 3; i++ { - if value[i] != "" { - if i > 0 { - value[i] = value[i][1:] - } - if i > 0 && value[i] == "" { - params[i].Known = false - } else { - var err error - params[i].Known = true - params[i].Value, err = strconv.Atoi(value[i]) - if err != nil { - return fmt.Errorf("array index %s is not a number", value[i]) - } - } - } else { - if i == 1 { - params[i].Known = true - params[i].Value = params[0].Value + 1 - } else { - params[i].Known = false - params[i].Value = 0 - } - } - } - cur.append(newArray(params)) - return p.parseInsideAction(cur) -} - -// parseFilter scans filter inside array selection -func (p *Parser) parseFilter(cur *ListNode) error { - p.pos += len("[?(") - p.consumeText() - begin := false - end := false - var pair rune - -Loop: - for { - r := p.next() - switch r { - case eof, '\n': - return fmt.Errorf("unterminated filter") - case '"', '\'': - if begin == false { - //save the paired rune - begin = true - pair = r - continue - } - //only add when met paired rune - if p.input[p.pos-2] != '\\' && r == pair { - end = true - } - case ')': - //in rightParser below quotes only appear zero or once - //and must be paired at the beginning and end - if begin == end { - break Loop - } - } - } - if p.next() != ']' { - return fmt.Errorf("unclosed array expect ]") - } - reg := regexp.MustCompile(`^([^!<>=]+)([!<>=]+)(.+?)$`) - text := p.consumeText() - text = text[:len(text)-2] - value := reg.FindStringSubmatch(text) - if value == nil { - parser, err := parseAction("text", text) - if err != nil { - return err - } - cur.append(newFilter(parser.Root, newList(), "exists")) - } else { - leftParser, err := parseAction("left", value[1]) - if err != nil { - return err - } - rightParser, err := parseAction("right", value[3]) - if err != nil { - return err - } - cur.append(newFilter(leftParser.Root, rightParser.Root, value[2])) - } - return p.parseInsideAction(cur) -} - -// parseQuote unquotes string inside double or single quote -func (p *Parser) parseQuote(cur *ListNode, end rune) error { -Loop: - for { - switch p.next() { - case eof, '\n': - return fmt.Errorf("unterminated quoted string") - case end: - //if it's not escape break the Loop - if p.input[p.pos-2] != '\\' { - break Loop - } - } - } - value := p.consumeText() - s, err := UnquoteExtend(value) - if err != nil { - return fmt.Errorf("unquote string %s error %v", value, err) - } - cur.append(newText(s)) - return p.parseInsideAction(cur) -} - -// parseField scans a field until a terminator -func (p *Parser) parseField(cur *ListNode) error { - p.consumeText() - for p.advance() { - } - value := p.consumeText() - if value == "*" { - cur.append(newWildcard()) - } else { - cur.append(newField(strings.Replace(value, "\\", "", -1))) - } - return p.parseInsideAction(cur) -} - -// advance scans until next non-escaped terminator -func (p *Parser) advance() bool { - r := p.next() - if r == '\\' { - p.next() - } else if isTerminator(r) { - p.backup() - return false - } - return true -} - -// isTerminator reports whether the input is at valid termination character to appear after an identifier. -func isTerminator(r rune) bool { - if isSpace(r) || isEndOfLine(r) { - return true - } - switch r { - case eof, '.', ',', '[', ']', '$', '@', '{', '}': - return true - } - return false -} - -// isSpace reports whether r is a space character. -func isSpace(r rune) bool { - return r == ' ' || r == '\t' -} - -// isEndOfLine reports whether r is an end-of-line character. -func isEndOfLine(r rune) bool { - return r == '\r' || r == '\n' -} - -// isAlphaNumeric reports whether r is an alphabetic, digit, or underscore. -func isAlphaNumeric(r rune) bool { - return r == '_' || unicode.IsLetter(r) || unicode.IsDigit(r) -} - -// isBool reports whether s is a boolean value. -func isBool(s string) bool { - return s == "true" || s == "false" -} - -//UnquoteExtend is almost same as strconv.Unquote(), but it support parse single quotes as a string -func UnquoteExtend(s string) (string, error) { - n := len(s) - if n < 2 { - return "", ErrSyntax - } - quote := s[0] - if quote != s[n-1] { - return "", ErrSyntax - } - s = s[1 : n-1] - - if quote != '"' && quote != '\'' { - return "", ErrSyntax - } - - // Is it trivial? Avoid allocation. - if !contains(s, '\\') && !contains(s, quote) { - return s, nil - } - - var runeTmp [utf8.UTFMax]byte - buf := make([]byte, 0, 3*len(s)/2) // Try to avoid more allocations. - for len(s) > 0 { - c, multibyte, ss, err := strconv.UnquoteChar(s, quote) - if err != nil { - return "", err - } - s = ss - if c < utf8.RuneSelf || !multibyte { - buf = append(buf, byte(c)) - } else { - n := utf8.EncodeRune(runeTmp[:], c) - buf = append(buf, runeTmp[:n]...) - } - } - return string(buf), nil -} - -func contains(s string, c byte) bool { - for i := 0; i < len(s); i++ { - if s[i] == c { - return true - } - } - return false -} diff --git a/vendor/k8s.io/client-go/util/keyutil/OWNERS b/vendor/k8s.io/client-go/util/keyutil/OWNERS new file mode 100644 index 0000000000..470b7a1c92 --- /dev/null +++ b/vendor/k8s.io/client-go/util/keyutil/OWNERS @@ -0,0 +1,7 @@ +approvers: +- sig-auth-certificates-approvers +reviewers: +- sig-auth-certificates-reviewers +labels: +- sig/auth + diff --git a/vendor/k8s.io/client-go/util/keyutil/key.go b/vendor/k8s.io/client-go/util/keyutil/key.go new file mode 100644 index 0000000000..83c2c6254e --- /dev/null +++ b/vendor/k8s.io/client-go/util/keyutil/key.go @@ -0,0 +1,323 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Package keyutil contains utilities for managing public/private key pairs. +package keyutil + +import ( + "crypto" + "crypto/ecdsa" + "crypto/elliptic" + cryptorand "crypto/rand" + "crypto/rsa" + "crypto/x509" + "encoding/pem" + "fmt" + "io/ioutil" + "os" + "path/filepath" +) + +const ( + // ECPrivateKeyBlockType is a possible value for pem.Block.Type. + ECPrivateKeyBlockType = "EC PRIVATE KEY" + // RSAPrivateKeyBlockType is a possible value for pem.Block.Type. + RSAPrivateKeyBlockType = "RSA PRIVATE KEY" + // PrivateKeyBlockType is a possible value for pem.Block.Type. + PrivateKeyBlockType = "PRIVATE KEY" + // PublicKeyBlockType is a possible value for pem.Block.Type. + PublicKeyBlockType = "PUBLIC KEY" +) + +// MakeEllipticPrivateKeyPEM creates an ECDSA private key +func MakeEllipticPrivateKeyPEM() ([]byte, error) { + privateKey, err := ecdsa.GenerateKey(elliptic.P256(), cryptorand.Reader) + if err != nil { + return nil, err + } + + derBytes, err := x509.MarshalECPrivateKey(privateKey) + if err != nil { + return nil, err + } + + privateKeyPemBlock := &pem.Block{ + Type: ECPrivateKeyBlockType, + Bytes: derBytes, + } + return pem.EncodeToMemory(privateKeyPemBlock), nil +} + +// WriteKey writes the pem-encoded key data to keyPath. +// The key file will be created with file mode 0600. +// If the key file already exists, it will be overwritten. +// The parent directory of the keyPath will be created as needed with file mode 0755. +func WriteKey(keyPath string, data []byte) error { + if err := os.MkdirAll(filepath.Dir(keyPath), os.FileMode(0755)); err != nil { + return err + } + return ioutil.WriteFile(keyPath, data, os.FileMode(0600)) +} + +// LoadOrGenerateKeyFile looks for a key in the file at the given path. If it +// can't find one, it will generate a new key and store it there. +func LoadOrGenerateKeyFile(keyPath string) (data []byte, wasGenerated bool, err error) { + loadedData, err := ioutil.ReadFile(keyPath) + // Call verifyKeyData to ensure the file wasn't empty/corrupt. + if err == nil && verifyKeyData(loadedData) { + return loadedData, false, err + } + if !os.IsNotExist(err) { + return nil, false, fmt.Errorf("error loading key from %s: %v", keyPath, err) + } + + generatedData, err := MakeEllipticPrivateKeyPEM() + if err != nil { + return nil, false, fmt.Errorf("error generating key: %v", err) + } + if err := WriteKey(keyPath, generatedData); err != nil { + return nil, false, fmt.Errorf("error writing key to %s: %v", keyPath, err) + } + return generatedData, true, nil +} + +// MarshalPrivateKeyToPEM converts a known private key type of RSA or ECDSA to +// a PEM encoded block or returns an error. +func MarshalPrivateKeyToPEM(privateKey crypto.PrivateKey) ([]byte, error) { + switch t := privateKey.(type) { + case *ecdsa.PrivateKey: + derBytes, err := x509.MarshalECPrivateKey(t) + if err != nil { + return nil, err + } + block := &pem.Block{ + Type: ECPrivateKeyBlockType, + Bytes: derBytes, + } + return pem.EncodeToMemory(block), nil + case *rsa.PrivateKey: + block := &pem.Block{ + Type: RSAPrivateKeyBlockType, + Bytes: x509.MarshalPKCS1PrivateKey(t), + } + return pem.EncodeToMemory(block), nil + default: + return nil, fmt.Errorf("private key is not a recognized type: %T", privateKey) + } +} + +// PrivateKeyFromFile returns the private key in rsa.PrivateKey or ecdsa.PrivateKey format from a given PEM-encoded file. +// Returns an error if the file could not be read or if the private key could not be parsed. +func PrivateKeyFromFile(file string) (interface{}, error) { + data, err := ioutil.ReadFile(file) + if err != nil { + return nil, err + } + key, err := ParsePrivateKeyPEM(data) + if err != nil { + return nil, fmt.Errorf("error reading private key file %s: %v", file, err) + } + return key, nil +} + +// PublicKeysFromFile returns the public keys in rsa.PublicKey or ecdsa.PublicKey format from a given PEM-encoded file. +// Reads public keys from both public and private key files. +func PublicKeysFromFile(file string) ([]interface{}, error) { + data, err := ioutil.ReadFile(file) + if err != nil { + return nil, err + } + keys, err := ParsePublicKeysPEM(data) + if err != nil { + return nil, fmt.Errorf("error reading public key file %s: %v", file, err) + } + return keys, nil +} + +// verifyKeyData returns true if the provided data appears to be a valid private key. +func verifyKeyData(data []byte) bool { + if len(data) == 0 { + return false + } + _, err := ParsePrivateKeyPEM(data) + return err == nil +} + +// ParsePrivateKeyPEM returns a private key parsed from a PEM block in the supplied data. +// Recognizes PEM blocks for "EC PRIVATE KEY", "RSA PRIVATE KEY", or "PRIVATE KEY" +func ParsePrivateKeyPEM(keyData []byte) (interface{}, error) { + var privateKeyPemBlock *pem.Block + for { + privateKeyPemBlock, keyData = pem.Decode(keyData) + if privateKeyPemBlock == nil { + break + } + + switch privateKeyPemBlock.Type { + case ECPrivateKeyBlockType: + // ECDSA Private Key in ASN.1 format + if key, err := x509.ParseECPrivateKey(privateKeyPemBlock.Bytes); err == nil { + return key, nil + } + case RSAPrivateKeyBlockType: + // RSA Private Key in PKCS#1 format + if key, err := x509.ParsePKCS1PrivateKey(privateKeyPemBlock.Bytes); err == nil { + return key, nil + } + case PrivateKeyBlockType: + // RSA or ECDSA Private Key in unencrypted PKCS#8 format + if key, err := x509.ParsePKCS8PrivateKey(privateKeyPemBlock.Bytes); err == nil { + return key, nil + } + } + + // tolerate non-key PEM blocks for compatibility with things like "EC PARAMETERS" blocks + // originally, only the first PEM block was parsed and expected to be a key block + } + + // we read all the PEM blocks and didn't recognize one + return nil, fmt.Errorf("data does not contain a valid RSA or ECDSA private key") +} + +// ParsePublicKeysPEM is a helper function for reading an array of rsa.PublicKey or ecdsa.PublicKey from a PEM-encoded byte array. +// Reads public keys from both public and private key files. +func ParsePublicKeysPEM(keyData []byte) ([]interface{}, error) { + var block *pem.Block + keys := []interface{}{} + for { + // read the next block + block, keyData = pem.Decode(keyData) + if block == nil { + break + } + + // test block against parsing functions + if privateKey, err := parseRSAPrivateKey(block.Bytes); err == nil { + keys = append(keys, &privateKey.PublicKey) + continue + } + if publicKey, err := parseRSAPublicKey(block.Bytes); err == nil { + keys = append(keys, publicKey) + continue + } + if privateKey, err := parseECPrivateKey(block.Bytes); err == nil { + keys = append(keys, &privateKey.PublicKey) + continue + } + if publicKey, err := parseECPublicKey(block.Bytes); err == nil { + keys = append(keys, publicKey) + continue + } + + // tolerate non-key PEM blocks for backwards compatibility + // originally, only the first PEM block was parsed and expected to be a key block + } + + if len(keys) == 0 { + return nil, fmt.Errorf("data does not contain any valid RSA or ECDSA public keys") + } + return keys, nil +} + +// parseRSAPublicKey parses a single RSA public key from the provided data +func parseRSAPublicKey(data []byte) (*rsa.PublicKey, error) { + var err error + + // Parse the key + var parsedKey interface{} + if parsedKey, err = x509.ParsePKIXPublicKey(data); err != nil { + if cert, err := x509.ParseCertificate(data); err == nil { + parsedKey = cert.PublicKey + } else { + return nil, err + } + } + + // Test if parsed key is an RSA Public Key + var pubKey *rsa.PublicKey + var ok bool + if pubKey, ok = parsedKey.(*rsa.PublicKey); !ok { + return nil, fmt.Errorf("data doesn't contain valid RSA Public Key") + } + + return pubKey, nil +} + +// parseRSAPrivateKey parses a single RSA private key from the provided data +func parseRSAPrivateKey(data []byte) (*rsa.PrivateKey, error) { + var err error + + // Parse the key + var parsedKey interface{} + if parsedKey, err = x509.ParsePKCS1PrivateKey(data); err != nil { + if parsedKey, err = x509.ParsePKCS8PrivateKey(data); err != nil { + return nil, err + } + } + + // Test if parsed key is an RSA Private Key + var privKey *rsa.PrivateKey + var ok bool + if privKey, ok = parsedKey.(*rsa.PrivateKey); !ok { + return nil, fmt.Errorf("data doesn't contain valid RSA Private Key") + } + + return privKey, nil +} + +// parseECPublicKey parses a single ECDSA public key from the provided data +func parseECPublicKey(data []byte) (*ecdsa.PublicKey, error) { + var err error + + // Parse the key + var parsedKey interface{} + if parsedKey, err = x509.ParsePKIXPublicKey(data); err != nil { + if cert, err := x509.ParseCertificate(data); err == nil { + parsedKey = cert.PublicKey + } else { + return nil, err + } + } + + // Test if parsed key is an ECDSA Public Key + var pubKey *ecdsa.PublicKey + var ok bool + if pubKey, ok = parsedKey.(*ecdsa.PublicKey); !ok { + return nil, fmt.Errorf("data doesn't contain valid ECDSA Public Key") + } + + return pubKey, nil +} + +// parseECPrivateKey parses a single ECDSA private key from the provided data +func parseECPrivateKey(data []byte) (*ecdsa.PrivateKey, error) { + var err error + + // Parse the key + var parsedKey interface{} + if parsedKey, err = x509.ParseECPrivateKey(data); err != nil { + return nil, err + } + + // Test if parsed key is an ECDSA Private Key + var privKey *ecdsa.PrivateKey + var ok bool + if privKey, ok = parsedKey.(*ecdsa.PrivateKey); !ok { + return nil, fmt.Errorf("data doesn't contain valid ECDSA Private Key") + } + + return privKey, nil +} diff --git a/vendor/k8s.io/client-go/util/retry/OWNERS b/vendor/k8s.io/client-go/util/retry/OWNERS index a4c1c2d4f8..dec3e88d63 100644 --- a/vendor/k8s.io/client-go/util/retry/OWNERS +++ b/vendor/k8s.io/client-go/util/retry/OWNERS @@ -1,2 +1,4 @@ +# See the OWNERS docs at https://go.k8s.io/owners + reviewers: - caesarxuchao diff --git a/vendor/k8s.io/client-go/util/workqueue/default_rate_limiters.go b/vendor/k8s.io/client-go/util/workqueue/default_rate_limiters.go index a5bed29e00..71bb6322e0 100644 --- a/vendor/k8s.io/client-go/util/workqueue/default_rate_limiters.go +++ b/vendor/k8s.io/client-go/util/workqueue/default_rate_limiters.go @@ -35,7 +35,7 @@ type RateLimiter interface { } // DefaultControllerRateLimiter is a no-arg constructor for a default rate limiter for a workqueue. It has -// both overall and per-item rate limitting. The overall is a token bucket and the per-item is exponential +// both overall and per-item rate limiting. The overall is a token bucket and the per-item is exponential func DefaultControllerRateLimiter() RateLimiter { return NewMaxOfRateLimiter( NewItemExponentialFailureRateLimiter(5*time.Millisecond, 1000*time.Second), @@ -62,7 +62,7 @@ func (r *BucketRateLimiter) NumRequeues(item interface{}) int { func (r *BucketRateLimiter) Forget(item interface{}) { } -// ItemExponentialFailureRateLimiter does a simple baseDelay*10^ limit +// ItemExponentialFailureRateLimiter does a simple baseDelay*2^ limit // dealing with max failures and expiration are up to the caller type ItemExponentialFailureRateLimiter struct { failuresLock sync.Mutex diff --git a/vendor/k8s.io/client-go/util/workqueue/delaying_queue.go b/vendor/k8s.io/client-go/util/workqueue/delaying_queue.go index a37177425d..bd654bf311 100644 --- a/vendor/k8s.io/client-go/util/workqueue/delaying_queue.go +++ b/vendor/k8s.io/client-go/util/workqueue/delaying_queue.go @@ -43,12 +43,13 @@ func NewNamedDelayingQueue(name string) DelayingInterface { func newDelayingQueue(clock clock.Clock, name string) DelayingInterface { ret := &delayingType{ - Interface: NewNamed(name), - clock: clock, - heartbeat: clock.NewTicker(maxWait), - stopCh: make(chan struct{}), - waitingForAddCh: make(chan *waitFor, 1000), - metrics: newRetryMetrics(name), + Interface: NewNamed(name), + clock: clock, + heartbeat: clock.NewTicker(maxWait), + stopCh: make(chan struct{}), + waitingForAddCh: make(chan *waitFor, 1000), + metrics: newRetryMetrics(name), + deprecatedMetrics: newDeprecatedRetryMetrics(name), } go ret.waitingLoop() @@ -73,7 +74,8 @@ type delayingType struct { waitingForAddCh chan *waitFor // metrics counts the number of retries - metrics retryMetrics + metrics retryMetrics + deprecatedMetrics retryMetrics } // waitFor holds the data to add and the time it should be added @@ -146,6 +148,7 @@ func (q *delayingType) AddAfter(item interface{}, duration time.Duration) { } q.metrics.retry() + q.deprecatedMetrics.retry() // immediately add things with no delay if duration <= 0 { diff --git a/vendor/k8s.io/client-go/util/workqueue/metrics.go b/vendor/k8s.io/client-go/util/workqueue/metrics.go index d4c03d8378..be23ddd05f 100644 --- a/vendor/k8s.io/client-go/util/workqueue/metrics.go +++ b/vendor/k8s.io/client-go/util/workqueue/metrics.go @@ -57,6 +57,11 @@ type SummaryMetric interface { Observe(float64) } +// HistogramMetric counts individual observations. +type HistogramMetric interface { + Observe(float64) +} + type noopMetric struct{} func (noopMetric) Inc() {} @@ -73,15 +78,23 @@ type defaultQueueMetrics struct { // total number of adds handled by a workqueue adds CounterMetric // how long an item stays in a workqueue - latency SummaryMetric + latency HistogramMetric // how long processing an item from a workqueue takes - workDuration SummaryMetric + workDuration HistogramMetric addTimes map[t]time.Time processingStartTimes map[t]time.Time // how long have current threads been working? unfinishedWorkSeconds SettableGaugeMetric longestRunningProcessor SettableGaugeMetric + + // TODO(danielqsj): Remove the following metrics, they are deprecated + deprecatedDepth GaugeMetric + deprecatedAdds CounterMetric + deprecatedLatency SummaryMetric + deprecatedWorkDuration SummaryMetric + deprecatedUnfinishedWorkSeconds SettableGaugeMetric + deprecatedLongestRunningProcessor SettableGaugeMetric } func (m *defaultQueueMetrics) add(item t) { @@ -90,7 +103,9 @@ func (m *defaultQueueMetrics) add(item t) { } m.adds.Inc() + m.deprecatedAdds.Inc() m.depth.Inc() + m.deprecatedDepth.Inc() if _, exists := m.addTimes[item]; !exists { m.addTimes[item] = m.clock.Now() } @@ -102,9 +117,11 @@ func (m *defaultQueueMetrics) get(item t) { } m.depth.Dec() + m.deprecatedDepth.Dec() m.processingStartTimes[item] = m.clock.Now() if startTime, exists := m.addTimes[item]; exists { - m.latency.Observe(m.sinceInMicroseconds(startTime)) + m.latency.Observe(m.sinceInSeconds(startTime)) + m.deprecatedLatency.Observe(m.sinceInMicroseconds(startTime)) delete(m.addTimes, item) } } @@ -115,7 +132,8 @@ func (m *defaultQueueMetrics) done(item t) { } if startTime, exists := m.processingStartTimes[item]; exists { - m.workDuration.Observe(m.sinceInMicroseconds(startTime)) + m.workDuration.Observe(m.sinceInSeconds(startTime)) + m.deprecatedWorkDuration.Observe(m.sinceInMicroseconds(startTime)) delete(m.processingStartTimes, item) } } @@ -135,7 +153,9 @@ func (m *defaultQueueMetrics) updateUnfinishedWork() { // Convert to seconds; microseconds is unhelpfully granular for this. total /= 1000000 m.unfinishedWorkSeconds.Set(total) - m.longestRunningProcessor.Set(oldest) // in microseconds. + m.deprecatedUnfinishedWorkSeconds.Set(total) + m.longestRunningProcessor.Set(oldest / 1000000) + m.deprecatedLongestRunningProcessor.Set(oldest) // in microseconds. } type noMetrics struct{} @@ -150,6 +170,11 @@ func (m *defaultQueueMetrics) sinceInMicroseconds(start time.Time) float64 { return float64(m.clock.Since(start).Nanoseconds() / time.Microsecond.Nanoseconds()) } +// Gets the time since the specified start in seconds. +func (m *defaultQueueMetrics) sinceInSeconds(start time.Time) float64 { + return m.clock.Since(start).Seconds() +} + type retryMetrics interface { retry() } @@ -170,11 +195,18 @@ func (m *defaultRetryMetrics) retry() { type MetricsProvider interface { NewDepthMetric(name string) GaugeMetric NewAddsMetric(name string) CounterMetric - NewLatencyMetric(name string) SummaryMetric - NewWorkDurationMetric(name string) SummaryMetric + NewLatencyMetric(name string) HistogramMetric + NewWorkDurationMetric(name string) HistogramMetric NewUnfinishedWorkSecondsMetric(name string) SettableGaugeMetric - NewLongestRunningProcessorMicrosecondsMetric(name string) SettableGaugeMetric + NewLongestRunningProcessorSecondsMetric(name string) SettableGaugeMetric NewRetriesMetric(name string) CounterMetric + NewDeprecatedDepthMetric(name string) GaugeMetric + NewDeprecatedAddsMetric(name string) CounterMetric + NewDeprecatedLatencyMetric(name string) SummaryMetric + NewDeprecatedWorkDurationMetric(name string) SummaryMetric + NewDeprecatedUnfinishedWorkSecondsMetric(name string) SettableGaugeMetric + NewDeprecatedLongestRunningProcessorMicrosecondsMetric(name string) SettableGaugeMetric + NewDeprecatedRetriesMetric(name string) CounterMetric } type noopMetricsProvider struct{} @@ -187,11 +219,11 @@ func (_ noopMetricsProvider) NewAddsMetric(name string) CounterMetric { return noopMetric{} } -func (_ noopMetricsProvider) NewLatencyMetric(name string) SummaryMetric { +func (_ noopMetricsProvider) NewLatencyMetric(name string) HistogramMetric { return noopMetric{} } -func (_ noopMetricsProvider) NewWorkDurationMetric(name string) SummaryMetric { +func (_ noopMetricsProvider) NewWorkDurationMetric(name string) HistogramMetric { return noopMetric{} } @@ -199,7 +231,7 @@ func (_ noopMetricsProvider) NewUnfinishedWorkSecondsMetric(name string) Settabl return noopMetric{} } -func (_ noopMetricsProvider) NewLongestRunningProcessorMicrosecondsMetric(name string) SettableGaugeMetric { +func (_ noopMetricsProvider) NewLongestRunningProcessorSecondsMetric(name string) SettableGaugeMetric { return noopMetric{} } @@ -207,6 +239,34 @@ func (_ noopMetricsProvider) NewRetriesMetric(name string) CounterMetric { return noopMetric{} } +func (_ noopMetricsProvider) NewDeprecatedDepthMetric(name string) GaugeMetric { + return noopMetric{} +} + +func (_ noopMetricsProvider) NewDeprecatedAddsMetric(name string) CounterMetric { + return noopMetric{} +} + +func (_ noopMetricsProvider) NewDeprecatedLatencyMetric(name string) SummaryMetric { + return noopMetric{} +} + +func (_ noopMetricsProvider) NewDeprecatedWorkDurationMetric(name string) SummaryMetric { + return noopMetric{} +} + +func (_ noopMetricsProvider) NewDeprecatedUnfinishedWorkSecondsMetric(name string) SettableGaugeMetric { + return noopMetric{} +} + +func (_ noopMetricsProvider) NewDeprecatedLongestRunningProcessorMicrosecondsMetric(name string) SettableGaugeMetric { + return noopMetric{} +} + +func (_ noopMetricsProvider) NewDeprecatedRetriesMetric(name string) CounterMetric { + return noopMetric{} +} + var globalMetricsFactory = queueMetricsFactory{ metricsProvider: noopMetricsProvider{}, } @@ -229,15 +289,21 @@ func (f *queueMetricsFactory) newQueueMetrics(name string, clock clock.Clock) qu return noMetrics{} } return &defaultQueueMetrics{ - clock: clock, - depth: mp.NewDepthMetric(name), - adds: mp.NewAddsMetric(name), - latency: mp.NewLatencyMetric(name), - workDuration: mp.NewWorkDurationMetric(name), - unfinishedWorkSeconds: mp.NewUnfinishedWorkSecondsMetric(name), - longestRunningProcessor: mp.NewLongestRunningProcessorMicrosecondsMetric(name), - addTimes: map[t]time.Time{}, - processingStartTimes: map[t]time.Time{}, + clock: clock, + depth: mp.NewDepthMetric(name), + adds: mp.NewAddsMetric(name), + latency: mp.NewLatencyMetric(name), + workDuration: mp.NewWorkDurationMetric(name), + unfinishedWorkSeconds: mp.NewUnfinishedWorkSecondsMetric(name), + longestRunningProcessor: mp.NewLongestRunningProcessorSecondsMetric(name), + deprecatedDepth: mp.NewDeprecatedDepthMetric(name), + deprecatedAdds: mp.NewDeprecatedAddsMetric(name), + deprecatedLatency: mp.NewDeprecatedLatencyMetric(name), + deprecatedWorkDuration: mp.NewDeprecatedWorkDurationMetric(name), + deprecatedUnfinishedWorkSeconds: mp.NewDeprecatedUnfinishedWorkSecondsMetric(name), + deprecatedLongestRunningProcessor: mp.NewDeprecatedLongestRunningProcessorMicrosecondsMetric(name), + addTimes: map[t]time.Time{}, + processingStartTimes: map[t]time.Time{}, } } @@ -251,6 +317,16 @@ func newRetryMetrics(name string) retryMetrics { } } +func newDeprecatedRetryMetrics(name string) retryMetrics { + var ret *defaultRetryMetrics + if len(name) == 0 { + return ret + } + return &defaultRetryMetrics{ + retries: globalMetricsFactory.metricsProvider.NewDeprecatedRetriesMetric(name), + } +} + // SetProvider sets the metrics provider for all subsequently created work // queues. Only the first call has an effect. func SetProvider(metricsProvider MetricsProvider) { diff --git a/vendor/k8s.io/client-go/util/workqueue/rate_limitting_queue.go b/vendor/k8s.io/client-go/util/workqueue/rate_limiting_queue.go similarity index 100% rename from vendor/k8s.io/client-go/util/workqueue/rate_limitting_queue.go rename to vendor/k8s.io/client-go/util/workqueue/rate_limiting_queue.go diff --git a/vendor/k8s.io/code-generator/cmd/client-gen/OWNERS b/vendor/k8s.io/code-generator/cmd/client-gen/OWNERS index 0c408a1aa9..62866d0b19 100644 --- a/vendor/k8s.io/code-generator/cmd/client-gen/OWNERS +++ b/vendor/k8s.io/code-generator/cmd/client-gen/OWNERS @@ -1,3 +1,5 @@ +# See the OWNERS docs at https://go.k8s.io/owners + approvers: - lavalamp - wojtek-t diff --git a/vendor/k8s.io/code-generator/cmd/client-gen/README.md b/vendor/k8s.io/code-generator/cmd/client-gen/README.md index d1d67abdf9..092a61151c 100644 --- a/vendor/k8s.io/code-generator/cmd/client-gen/README.md +++ b/vendor/k8s.io/code-generator/cmd/client-gen/README.md @@ -1,4 +1,4 @@ -See [generating-clientset.md](https://git.k8s.io/community/contributors/devel/generating-clientset.md) +See [generating-clientset.md](https://git.k8s.io/community/contributors/devel/sig-api-machinery/generating-clientset.md) [![Analytics](https://kubernetes-site.appspot.com/UA-36037335-10/GitHub/staging/src/k8s.io/code-generator/client-gen/README.md?pixel)]() diff --git a/vendor/k8s.io/code-generator/cmd/client-gen/generators/client_generator.go b/vendor/k8s.io/code-generator/cmd/client-gen/generators/client_generator.go index ee6ebbcf09..18980744f0 100644 --- a/vendor/k8s.io/code-generator/cmd/client-gen/generators/client_generator.go +++ b/vendor/k8s.io/code-generator/cmd/client-gen/generators/client_generator.go @@ -27,6 +27,7 @@ import ( "k8s.io/code-generator/cmd/client-gen/generators/util" "k8s.io/code-generator/cmd/client-gen/path" clientgentypes "k8s.io/code-generator/cmd/client-gen/types" + codegennamer "k8s.io/code-generator/pkg/namer" "k8s.io/gengo/args" "k8s.io/gengo/generator" "k8s.io/gengo/namer" @@ -101,7 +102,7 @@ func NameSystems() namer.NameSystems { "publicPlural": publicPluralNamer, "privatePlural": privatePluralNamer, "allLowercasePlural": lowercaseNamer, - "resource": NewTagOverrideNamer("resourceName", lowercaseNamer), + "resource": codegennamer.NewTagOverrideNamer("resourceName", lowercaseNamer), } } @@ -400,27 +401,3 @@ func Packages(context *generator.Context, arguments *args.GeneratorArgs) generat return generator.Packages(packageList) } - -// tagOverrideNamer is a namer which pulls names from a given tag, if specified, -// and otherwise falls back to a different namer. -type tagOverrideNamer struct { - tagName string - fallback namer.Namer -} - -func (n *tagOverrideNamer) Name(t *types.Type) string { - if nameOverride := extractTag(n.tagName, append(t.SecondClosestCommentLines, t.CommentLines...)); nameOverride != "" { - return nameOverride - } - - return n.fallback.Name(t) -} - -// NewTagOverrideNamer creates a namer.Namer which uses the contents of the given tag as -// the name, or falls back to another Namer if the tag is not present. -func NewTagOverrideNamer(tagName string, fallback namer.Namer) namer.Namer { - return &tagOverrideNamer{ - tagName: tagName, - fallback: fallback, - } -} diff --git a/vendor/k8s.io/code-generator/cmd/client-gen/generators/fake/generator_fake_for_clientset.go b/vendor/k8s.io/code-generator/cmd/client-gen/generators/fake/generator_fake_for_clientset.go index 61b3334f40..80e4361b77 100644 --- a/vendor/k8s.io/code-generator/cmd/client-gen/generators/fake/generator_fake_for_clientset.go +++ b/vendor/k8s.io/code-generator/cmd/client-gen/generators/fake/generator_fake_for_clientset.go @@ -102,10 +102,6 @@ func (g *genClientset) GenerateType(c *generator.Context, t *types.Type, w io.Wr } sw.Do(clientsetInterfaceImplTemplate, m) - // don't generated the default method if generating internalversion clientset - if group.IsDefaultVersion && group.Version != "" { - sw.Do(clientsetInterfaceDefaultVersionImpl, m) - } } return sw.Error() diff --git a/vendor/k8s.io/code-generator/cmd/client-gen/generators/generator_for_clientset.go b/vendor/k8s.io/code-generator/cmd/client-gen/generators/generator_for_clientset.go index 6fdb29a94a..a1e67dcbdf 100644 --- a/vendor/k8s.io/code-generator/cmd/client-gen/generators/generator_for_clientset.go +++ b/vendor/k8s.io/code-generator/cmd/client-gen/generators/generator_for_clientset.go @@ -88,10 +88,6 @@ func (g *genClientset) GenerateType(c *generator.Context, t *types.Type, w io.Wr sw.Do(clientsetTemplate, m) for _, g := range allGroups { sw.Do(clientsetInterfaceImplTemplate, g) - // don't generated the default method if generating internalversion clientset - if g.IsDefaultVersion && g.Version != "" { - sw.Do(clientsetInterfaceDefaultVersionImpl, g) - } } sw.Do(getDiscoveryTemplate, m) sw.Do(newClientsetForConfigTemplate, m) @@ -105,9 +101,7 @@ var clientsetInterface = ` type Interface interface { Discovery() $.DiscoveryInterface|raw$ $range .allGroups$$.GroupGoName$$.Version$() $.PackageAlias$.$.GroupGoName$$.Version$Interface - $if .IsDefaultVersion$// Deprecated: please explicitly pick a version if possible. - $.GroupGoName$() $.PackageAlias$.$.GroupGoName$$.Version$Interface - $end$$end$ + $end$ } ` @@ -128,14 +122,6 @@ func (c *Clientset) $.GroupGoName$$.Version$() $.PackageAlias$.$.GroupGoName$$.V } ` -var clientsetInterfaceDefaultVersionImpl = ` -// Deprecated: $.GroupGoName$ retrieves the default version of $.GroupGoName$Client. -// Please explicitly pick a version. -func (c *Clientset) $.GroupGoName$() $.PackageAlias$.$.GroupGoName$$.Version$Interface { - return c.$.LowerCaseGroupGoName$$.Version$ -} -` - var getDiscoveryTemplate = ` // Discovery retrieves the DiscoveryClient func (c *Clientset) Discovery() $.DiscoveryInterface|raw$ { diff --git a/vendor/k8s.io/code-generator/cmd/client-gen/types/helpers.go b/vendor/k8s.io/code-generator/cmd/client-gen/types/helpers.go index 33e6ac451b..59f2fd4449 100644 --- a/vendor/k8s.io/code-generator/cmd/client-gen/types/helpers.go +++ b/vendor/k8s.io/code-generator/cmd/client-gen/types/helpers.go @@ -73,7 +73,7 @@ func (a sortableSliceOfVersions) Less(i, j int) bool { } // Determine the default version among versions. If a user calls a group client -// without specifying the version (e.g., c.Core(), instead of c.CoreV1()), the +// without specifying the version (e.g., c.CoreV1(), instead of c.CoreV1()), the // default version will be returned. func defaultVersion(versions []PackageVersion) Version { var versionStrings []string @@ -88,14 +88,12 @@ func defaultVersion(versions []PackageVersion) Version { func ToGroupVersionInfo(groups []GroupVersions, groupGoNames map[GroupVersion]string) []GroupVersionInfo { var groupVersionPackages []GroupVersionInfo for _, group := range groups { - defaultVersion := defaultVersion(group.Versions) for _, version := range group.Versions { groupGoName := groupGoNames[GroupVersion{Group: group.Group, Version: version.Version}] groupVersionPackages = append(groupVersionPackages, GroupVersionInfo{ Group: Group(namer.IC(group.Group.NonEmpty())), Version: Version(namer.IC(version.Version.String())), PackageAlias: strings.ToLower(groupGoName + version.Version.NonEmpty()), - IsDefaultVersion: version.Version == defaultVersion && version.Version != "", GroupGoName: groupGoName, LowerCaseGroupGoName: namer.IL(groupGoName), }) diff --git a/vendor/k8s.io/code-generator/cmd/client-gen/types/types.go b/vendor/k8s.io/code-generator/cmd/client-gen/types/types.go index 17fd6e92a7..7d1606c508 100644 --- a/vendor/k8s.io/code-generator/cmd/client-gen/types/types.go +++ b/vendor/k8s.io/code-generator/cmd/client-gen/types/types.go @@ -62,11 +62,8 @@ type GroupVersions struct { // GroupVersionInfo contains all the info around a group version. type GroupVersionInfo struct { - Group Group - Version Version - // If a user calls a group client without specifying the version (e.g., - // c.Core(), instead of c.CoreV1()), the default version will be returned. - IsDefaultVersion bool + Group Group + Version Version PackageAlias string GroupGoName string LowerCaseGroupGoName string diff --git a/vendor/k8s.io/code-generator/cmd/conversion-gen/main.go b/vendor/k8s.io/code-generator/cmd/conversion-gen/main.go index 698baa7db7..215b17bdee 100644 --- a/vendor/k8s.io/code-generator/cmd/conversion-gen/main.go +++ b/vendor/k8s.io/code-generator/cmd/conversion-gen/main.go @@ -14,20 +14,59 @@ See the License for the specific language governing permissions and limitations under the License. */ -// conversion-gen is a tool for auto-generating Conversion functions. +// conversion-gen is a tool for auto-generating functions that convert +// between internal and external types. A general conversion code +// generation task involves three sets of packages: (1) a set of +// packages containing internal types, (2) a single package containing +// the external types, and (3) a single destination package (i.e., +// where the generated conversion functions go, and where the +// developer-authored conversion functions are). The packages +// containing the internal types play the role known as "peer +// packages" in the general code-generation framework of Kubernetes. // -// Given a list of input directories, it will scan for "peer" packages and -// generate functions that efficiently convert between same-name types in each -// package. For any pair of types that has a -// `Convert___To____To__ +// for each such pair of types --- both with (pkg1,pkg2) = +// (internal,external) and (pkg1,pkg2) = (external,internal). +// Additionally: if the destination package does not contain one in a +// non-generated file then a function named +// Convert___To__ +// is also generated and it simply calls the `autoConvert...` +// function. The generated conversion functions use standard value +// assignment wherever possible. For compound types, the generated +// conversion functions call the `Convert...` functions for the +// subsidiary types. Thus developers can override the behavior for +// selected types. For a top-level object type (i.e., the type of an +// object that will be input to an apiserver), for such an override to +// be used by the apiserver the developer-maintained conversion +// functions must also be registered by invoking the +// `AddConversionFuncs` method of the relevant `Scheme` object from +// k8s.io/apimachinery/pkg/runtime. // -// Generation is governed by comment tags in the source. Any package may -// request Conversion generation by including a comment in the file-comments of -// one file, of the form: -// // +k8s:conversion-gen= +// `conversion-gen` will scan its `--input-dirs`, looking at the +// package defined in each of those directories for comment tags that +// define a conversion code generation task. A package requests +// conversion code generation by including one or more comment in the +// package's `doc.go` file (currently anywhere in that file is +// acceptable, but the recommended location is above the `package` +// statement), of the form: +// // +k8s:conversion-gen= +// This introduces a conversion task, for which the destination +// package is the one containing the file with the tag and the tag +// identifies a package containing internal types. If there is also a +// tag of the form +// // +k8s:conversion-gen-external-types= +// then it identifies the package containing the external types; +// otherwise they are in the destination package. +// +// For each conversion code generation task, the full set of internal +// packages (AKA peer packages) consists of the ones specified in the +// `k8s:conversion-gen` tags PLUS any specified in the +// `--base-peer-dirs` and `--extra-peer-dirs` flags on the command +// line. // // When generating for a package, individual types or fields of structs may opt // out of Conversion generation by specifying a comment on the of the form: diff --git a/vendor/k8s.io/code-generator/cmd/informer-gen/generators/factory.go b/vendor/k8s.io/code-generator/cmd/informer-gen/generators/factory.go index 62ae109a4a..6e5793109b 100644 --- a/vendor/k8s.io/code-generator/cmd/informer-gen/generators/factory.go +++ b/vendor/k8s.io/code-generator/cmd/informer-gen/generators/factory.go @@ -152,7 +152,7 @@ func NewSharedInformerFactory(client {{.clientSetInterface|raw}}, defaultResync // as specified here. // Deprecated: Please use NewSharedInformerFactoryWithOptions instead func NewFilteredSharedInformerFactory(client {{.clientSetInterface|raw}}, defaultResync {{.timeDuration|raw}}, namespace string, tweakListOptions {{.interfacesTweakListOptionsFunc|raw}}) SharedInformerFactory { - return NewSharedInformerFactoryWithOptions(client, defaultResync, WithNamespace(namespace), WithTweakListOptions(tweakListOptions)) + return NewSharedInformerFactoryWithOptions(client, defaultResync, WithNamespace(namespace), WithTweakListOptions(tweakListOptions)) } // NewSharedInformerFactoryWithOptions constructs a new instance of a SharedInformerFactory with additional options. @@ -165,7 +165,7 @@ func NewSharedInformerFactoryWithOptions(client {{.clientSetInterface|raw}}, def startedInformers: make(map[{{.reflectType|raw}}]bool), customResync: make(map[{{.reflectType|raw}}]{{.timeDuration|raw}}), } - + // Apply all options for _, opt := range options { factory = opt(factory) diff --git a/vendor/k8s.io/code-generator/cmd/informer-gen/generators/generic.go b/vendor/k8s.io/code-generator/cmd/informer-gen/generators/generic.go index 54632de053..cad907990f 100644 --- a/vendor/k8s.io/code-generator/cmd/informer-gen/generators/generic.go +++ b/vendor/k8s.io/code-generator/cmd/informer-gen/generators/generic.go @@ -22,6 +22,7 @@ import ( "strings" clientgentypes "k8s.io/code-generator/cmd/client-gen/types" + codegennamer "k8s.io/code-generator/pkg/namer" "k8s.io/gengo/generator" "k8s.io/gengo/namer" "k8s.io/gengo/types" @@ -56,6 +57,7 @@ func (g *genericGenerator) Namers(c *generator.Context) namer.NameSystems { "raw": namer.NewRawNamer(g.outputPackage, g.imports), "allLowercasePlural": namer.NewAllLowercasePluralNamer(pluralExceptions), "publicPlural": namer.NewPublicPluralNamer(pluralExceptions), + "resource": codegennamer.NewTagOverrideNamer("resourceName", namer.NewAllLowercasePluralNamer(pluralExceptions)), } } @@ -111,7 +113,9 @@ func (g *genericGenerator) GenerateType(c *generator.Context, t *types.Type, w i GoName: namer.IC(v.Version.NonEmpty()), Resources: orderer.OrderTypes(g.typesForGroupVersion[gv]), } - schemeGVs[version] = c.Universe.Variable(types.Name{Package: g.typesForGroupVersion[gv][0].Name.Package, Name: "SchemeGroupVersion"}) + func() { + schemeGVs[version] = c.Universe.Variable(types.Name{Package: g.typesForGroupVersion[gv][0].Name.Package, Name: "SchemeGroupVersion"}) + }() group.Versions = append(group.Versions, version) } sort.Sort(versionSort(group.Versions)) @@ -168,7 +172,7 @@ func (f *sharedInformerFactory) ForResource(resource {{.schemaGroupVersionResour {{range $version := .Versions -}} // Group={{$group.Name}}, Version={{.Name}} {{range .Resources -}} - case {{index $.schemeGVs $version|raw}}.WithResource("{{.|allLowercasePlural}}"): + case {{index $.schemeGVs $version|raw}}.WithResource("{{.|resource}}"): return &genericInformer{resource: resource.GroupResource(), informer: f.{{$GroupGoName}}().{{$version.GoName}}().{{.|publicPlural}}().Informer()}, nil {{end}} {{end}} diff --git a/vendor/k8s.io/code-generator/pkg/namer/tag-override.go b/vendor/k8s.io/code-generator/pkg/namer/tag-override.go new file mode 100644 index 0000000000..fd8c3a8553 --- /dev/null +++ b/vendor/k8s.io/code-generator/pkg/namer/tag-override.go @@ -0,0 +1,58 @@ +/* +Copyright 2016 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package namer + +import ( + "k8s.io/gengo/namer" + "k8s.io/gengo/types" +) + +// TagOverrideNamer is a namer which pulls names from a given tag, if specified, +// and otherwise falls back to a different namer. +type TagOverrideNamer struct { + tagName string + fallback namer.Namer +} + +// Name returns the tag value if it exists. It no tag was found the fallback namer will be used +func (n *TagOverrideNamer) Name(t *types.Type) string { + if nameOverride := extractTag(n.tagName, append(t.SecondClosestCommentLines, t.CommentLines...)); nameOverride != "" { + return nameOverride + } + + return n.fallback.Name(t) +} + +// NewTagOverrideNamer creates a namer.Namer which uses the contents of the given tag as +// the name, or falls back to another Namer if the tag is not present. +func NewTagOverrideNamer(tagName string, fallback namer.Namer) namer.Namer { + return &TagOverrideNamer{ + tagName: tagName, + fallback: fallback, + } +} + +// extractTag gets the comment-tags for the key. If the tag did not exist, it +// returns the empty string. +func extractTag(key string, lines []string) string { + val, present := types.ExtractCommentTags("+", lines)[key] + if !present || len(val) < 1 { + return "" + } + + return val[0] +} diff --git a/vendor/k8s.io/component-base/cli/flag/ciphersuites_flag.go b/vendor/k8s.io/component-base/cli/flag/ciphersuites_flag.go deleted file mode 100644 index 4fecd17329..0000000000 --- a/vendor/k8s.io/component-base/cli/flag/ciphersuites_flag.go +++ /dev/null @@ -1,106 +0,0 @@ -/* -Copyright 2017 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package flag - -import ( - "crypto/tls" - "fmt" - - "k8s.io/apimachinery/pkg/util/sets" -) - -// ciphers maps strings into tls package cipher constants in -// https://golang.org/pkg/crypto/tls/#pkg-constants -var ciphers = map[string]uint16{ - "TLS_RSA_WITH_RC4_128_SHA": tls.TLS_RSA_WITH_RC4_128_SHA, - "TLS_RSA_WITH_3DES_EDE_CBC_SHA": tls.TLS_RSA_WITH_3DES_EDE_CBC_SHA, - "TLS_RSA_WITH_AES_128_CBC_SHA": tls.TLS_RSA_WITH_AES_128_CBC_SHA, - "TLS_RSA_WITH_AES_256_CBC_SHA": tls.TLS_RSA_WITH_AES_256_CBC_SHA, - "TLS_RSA_WITH_AES_128_CBC_SHA256": tls.TLS_RSA_WITH_AES_128_CBC_SHA256, - "TLS_RSA_WITH_AES_128_GCM_SHA256": tls.TLS_RSA_WITH_AES_128_GCM_SHA256, - "TLS_RSA_WITH_AES_256_GCM_SHA384": tls.TLS_RSA_WITH_AES_256_GCM_SHA384, - "TLS_ECDHE_ECDSA_WITH_RC4_128_SHA": tls.TLS_ECDHE_ECDSA_WITH_RC4_128_SHA, - "TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA": tls.TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA, - "TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA": tls.TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA, - "TLS_ECDHE_RSA_WITH_RC4_128_SHA": tls.TLS_ECDHE_RSA_WITH_RC4_128_SHA, - "TLS_ECDHE_RSA_WITH_3DES_EDE_CBC_SHA": tls.TLS_ECDHE_RSA_WITH_3DES_EDE_CBC_SHA, - "TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA": tls.TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA, - "TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA": tls.TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA, - "TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256": tls.TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256, - "TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256": tls.TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256, - "TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256": tls.TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256, - "TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256": tls.TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256, - "TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384": tls.TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384, - "TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384": tls.TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384, - "TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305": tls.TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305, - "TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305": tls.TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305, -} - -func TLSCipherPossibleValues() []string { - cipherKeys := sets.NewString() - for key := range ciphers { - cipherKeys.Insert(key) - } - return cipherKeys.List() -} - -func TLSCipherSuites(cipherNames []string) ([]uint16, error) { - if len(cipherNames) == 0 { - return nil, nil - } - ciphersIntSlice := make([]uint16, 0) - for _, cipher := range cipherNames { - intValue, ok := ciphers[cipher] - if !ok { - return nil, fmt.Errorf("Cipher suite %s not supported or doesn't exist", cipher) - } - ciphersIntSlice = append(ciphersIntSlice, intValue) - } - return ciphersIntSlice, nil -} - -var versions = map[string]uint16{ - "VersionTLS10": tls.VersionTLS10, - "VersionTLS11": tls.VersionTLS11, - "VersionTLS12": tls.VersionTLS12, - "VersionTLS13": tls.VersionTLS13, -} - -func TLSPossibleVersions() []string { - versionsKeys := sets.NewString() - for key := range versions { - versionsKeys.Insert(key) - } - return versionsKeys.List() -} - -func TLSVersion(versionName string) (uint16, error) { - if len(versionName) == 0 { - return DefaultTLSVersion(), nil - } - if version, ok := versions[versionName]; ok { - return version, nil - } - return 0, fmt.Errorf("unknown tls version %q", versionName) -} - -func DefaultTLSVersion() uint16 { - // Can't use SSLv3 because of POODLE and BEAST - // Can't use TLSv1.0 because of POODLE and BEAST using CBC cipher - // Can't use TLSv1.1 because of RC4 cipher usage - return tls.VersionTLS12 -} diff --git a/vendor/k8s.io/component-base/cli/flag/colon_separated_multimap_string_string.go b/vendor/k8s.io/component-base/cli/flag/colon_separated_multimap_string_string.go deleted file mode 100644 index bd2cf5f875..0000000000 --- a/vendor/k8s.io/component-base/cli/flag/colon_separated_multimap_string_string.go +++ /dev/null @@ -1,102 +0,0 @@ -/* -Copyright 2017 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package flag - -import ( - "fmt" - "sort" - "strings" -) - -// ColonSeparatedMultimapStringString supports setting a map[string][]string from an encoding -// that separates keys from values with ':' and separates key-value pairs with ','. -// A key can be repeated multiple times, in which case the values are appended to a -// slice of strings associated with that key. Items in the list associated with a given -// key will appear in the order provided. -// For example: `a:hello,b:again,c:world,b:beautiful` results in `{"a": ["hello"], "b": ["again", "beautiful"], "c": ["world"]}` -// The first call to Set will clear the map before adding entries; subsequent calls will simply append to the map. -// This makes it possible to override default values with a command-line option rather than appending to defaults, -// while still allowing the distribution of key-value pairs across multiple flag invocations. -// For example: `--flag "a:hello" --flag "b:again" --flag "b:beautiful" --flag "c:world"` results in `{"a": ["hello"], "b": ["again", "beautiful"], "c": ["world"]}` -type ColonSeparatedMultimapStringString struct { - Multimap *map[string][]string - initialized bool // set to true after the first Set call -} - -// NewColonSeparatedMultimapStringString takes a pointer to a map[string][]string and returns the -// ColonSeparatedMultimapStringString flag parsing shim for that map. -func NewColonSeparatedMultimapStringString(m *map[string][]string) *ColonSeparatedMultimapStringString { - return &ColonSeparatedMultimapStringString{Multimap: m} -} - -// Set implements github.com/spf13/pflag.Value -func (m *ColonSeparatedMultimapStringString) Set(value string) error { - if m.Multimap == nil { - return fmt.Errorf("no target (nil pointer to map[string][]string)") - } - if !m.initialized || *m.Multimap == nil { - // clear default values, or allocate if no existing map - *m.Multimap = make(map[string][]string) - m.initialized = true - } - for _, pair := range strings.Split(value, ",") { - if len(pair) == 0 { - continue - } - kv := strings.SplitN(pair, ":", 2) - if len(kv) != 2 { - return fmt.Errorf("malformed pair, expect string:string") - } - k := strings.TrimSpace(kv[0]) - v := strings.TrimSpace(kv[1]) - (*m.Multimap)[k] = append((*m.Multimap)[k], v) - } - return nil -} - -// String implements github.com/spf13/pflag.Value -func (m *ColonSeparatedMultimapStringString) String() string { - type kv struct { - k string - v string - } - kvs := make([]kv, 0, len(*m.Multimap)) - for k, vs := range *m.Multimap { - for i := range vs { - kvs = append(kvs, kv{k: k, v: vs[i]}) - } - } - // stable sort by keys, order of values should be preserved - sort.SliceStable(kvs, func(i, j int) bool { - return kvs[i].k < kvs[j].k - }) - pairs := make([]string, 0, len(kvs)) - for i := range kvs { - pairs = append(pairs, fmt.Sprintf("%s:%s", kvs[i].k, kvs[i].v)) - } - return strings.Join(pairs, ",") -} - -// Type implements github.com/spf13/pflag.Value -func (m *ColonSeparatedMultimapStringString) Type() string { - return "colonSeparatedMultimapStringString" -} - -// Empty implements OmitEmpty -func (m *ColonSeparatedMultimapStringString) Empty() bool { - return len(*m.Multimap) == 0 -} diff --git a/vendor/k8s.io/component-base/cli/flag/configuration_map.go b/vendor/k8s.io/component-base/cli/flag/configuration_map.go deleted file mode 100644 index 911b05ec6c..0000000000 --- a/vendor/k8s.io/component-base/cli/flag/configuration_map.go +++ /dev/null @@ -1,53 +0,0 @@ -/* -Copyright 2014 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package flag - -import ( - "fmt" - "sort" - "strings" -) - -type ConfigurationMap map[string]string - -func (m *ConfigurationMap) String() string { - pairs := []string{} - for k, v := range *m { - pairs = append(pairs, fmt.Sprintf("%s=%s", k, v)) - } - sort.Strings(pairs) - return strings.Join(pairs, ",") -} - -func (m *ConfigurationMap) Set(value string) error { - for _, s := range strings.Split(value, ",") { - if len(s) == 0 { - continue - } - arr := strings.SplitN(s, "=", 2) - if len(arr) == 2 { - (*m)[strings.TrimSpace(arr[0])] = strings.TrimSpace(arr[1]) - } else { - (*m)[strings.TrimSpace(arr[0])] = "" - } - } - return nil -} - -func (*ConfigurationMap) Type() string { - return "mapStringString" -} diff --git a/vendor/k8s.io/component-base/cli/flag/flags.go b/vendor/k8s.io/component-base/cli/flag/flags.go deleted file mode 100644 index d0fff8db2e..0000000000 --- a/vendor/k8s.io/component-base/cli/flag/flags.go +++ /dev/null @@ -1,54 +0,0 @@ -/* -Copyright 2014 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package flag - -import ( - goflag "flag" - "strings" - - "github.com/spf13/pflag" - "k8s.io/klog" -) - -// WordSepNormalizeFunc changes all flags that contain "_" separators -func WordSepNormalizeFunc(f *pflag.FlagSet, name string) pflag.NormalizedName { - if strings.Contains(name, "_") { - return pflag.NormalizedName(strings.Replace(name, "_", "-", -1)) - } - return pflag.NormalizedName(name) -} - -// WarnWordSepNormalizeFunc changes and warns for flags that contain "_" separators -func WarnWordSepNormalizeFunc(f *pflag.FlagSet, name string) pflag.NormalizedName { - if strings.Contains(name, "_") { - nname := strings.Replace(name, "_", "-", -1) - klog.Warningf("%s is DEPRECATED and will be removed in a future version. Use %s instead.", name, nname) - - return pflag.NormalizedName(nname) - } - return pflag.NormalizedName(name) -} - -// InitFlags normalizes, parses, then logs the command line flags -func InitFlags() { - pflag.CommandLine.SetNormalizeFunc(WordSepNormalizeFunc) - pflag.CommandLine.AddGoFlagSet(goflag.CommandLine) - pflag.Parse() - pflag.VisitAll(func(flag *pflag.Flag) { - klog.V(2).Infof("FLAG: --%s=%q", flag.Name, flag.Value) - }) -} diff --git a/vendor/k8s.io/component-base/cli/flag/langle_separated_map_string_string.go b/vendor/k8s.io/component-base/cli/flag/langle_separated_map_string_string.go deleted file mode 100644 index bf8dbfb9bf..0000000000 --- a/vendor/k8s.io/component-base/cli/flag/langle_separated_map_string_string.go +++ /dev/null @@ -1,82 +0,0 @@ -/* -Copyright 2017 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package flag - -import ( - "fmt" - "sort" - "strings" -) - -// LangleSeparatedMapStringString can be set from the command line with the format `--flag "string 0 { - s = s + ":" + strings.Join(nkc.Names, ",") - } - return s -} - -func (nkc *NamedCertKey) Set(value string) error { - cs := strings.SplitN(value, ":", 2) - var keycert string - if len(cs) == 2 { - var names string - keycert, names = strings.TrimSpace(cs[0]), strings.TrimSpace(cs[1]) - if names == "" { - return errors.New("empty names list is not allowed") - } - nkc.Names = nil - for _, name := range strings.Split(names, ",") { - nkc.Names = append(nkc.Names, strings.TrimSpace(name)) - } - } else { - nkc.Names = nil - keycert = strings.TrimSpace(cs[0]) - } - cs = strings.Split(keycert, ",") - if len(cs) != 2 { - return errors.New("expected comma separated certificate and key file paths") - } - nkc.CertFile = strings.TrimSpace(cs[0]) - nkc.KeyFile = strings.TrimSpace(cs[1]) - return nil -} - -func (*NamedCertKey) Type() string { - return "namedCertKey" -} - -// NamedCertKeyArray is a flag value parsing NamedCertKeys, each passed with its own -// flag instance (in contrast to comma separated slices). -type NamedCertKeyArray struct { - value *[]NamedCertKey - changed bool -} - -var _ flag.Value = &NamedCertKey{} - -// NewNamedKeyCertArray creates a new NamedCertKeyArray with the internal value -// pointing to p. -func NewNamedCertKeyArray(p *[]NamedCertKey) *NamedCertKeyArray { - return &NamedCertKeyArray{ - value: p, - } -} - -func (a *NamedCertKeyArray) Set(val string) error { - nkc := NamedCertKey{} - err := nkc.Set(val) - if err != nil { - return err - } - if !a.changed { - *a.value = []NamedCertKey{nkc} - a.changed = true - } else { - *a.value = append(*a.value, nkc) - } - return nil -} - -func (a *NamedCertKeyArray) Type() string { - return "namedCertKey" -} - -func (a *NamedCertKeyArray) String() string { - nkcs := make([]string, 0, len(*a.value)) - for i := range *a.value { - nkcs = append(nkcs, (*a.value)[i].String()) - } - return "[" + strings.Join(nkcs, ";") + "]" -} diff --git a/vendor/k8s.io/component-base/cli/flag/omitempty.go b/vendor/k8s.io/component-base/cli/flag/omitempty.go deleted file mode 100644 index c354754ea7..0000000000 --- a/vendor/k8s.io/component-base/cli/flag/omitempty.go +++ /dev/null @@ -1,24 +0,0 @@ -/* -Copyright 2017 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package flag - -// OmitEmpty is an interface for flags to report whether their underlying value -// is "empty." If a flag implements OmitEmpty and returns true for a call to Empty(), -// it is assumed that flag may be omitted from the command line. -type OmitEmpty interface { - Empty() bool -} diff --git a/vendor/k8s.io/component-base/cli/flag/sectioned.go b/vendor/k8s.io/component-base/cli/flag/sectioned.go deleted file mode 100644 index 493a6c0f0f..0000000000 --- a/vendor/k8s.io/component-base/cli/flag/sectioned.go +++ /dev/null @@ -1,79 +0,0 @@ -/* -Copyright 2018 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package flag - -import ( - "bytes" - "fmt" - "io" - "strings" - - "github.com/spf13/pflag" -) - -// NamedFlagSets stores named flag sets in the order of calling FlagSet. -type NamedFlagSets struct { - // Order is an ordered list of flag set names. - Order []string - // FlagSets stores the flag sets by name. - FlagSets map[string]*pflag.FlagSet -} - -// FlagSet returns the flag set with the given name and adds it to the -// ordered name list if it is not in there yet. -func (nfs *NamedFlagSets) FlagSet(name string) *pflag.FlagSet { - if nfs.FlagSets == nil { - nfs.FlagSets = map[string]*pflag.FlagSet{} - } - if _, ok := nfs.FlagSets[name]; !ok { - nfs.FlagSets[name] = pflag.NewFlagSet(name, pflag.ExitOnError) - nfs.Order = append(nfs.Order, name) - } - return nfs.FlagSets[name] -} - -// PrintSections prints the given names flag sets in sections, with the maximal given column number. -// If cols is zero, lines are not wrapped. -func PrintSections(w io.Writer, fss NamedFlagSets, cols int) { - for _, name := range fss.Order { - fs := fss.FlagSets[name] - if !fs.HasFlags() { - continue - } - - wideFS := pflag.NewFlagSet("", pflag.ExitOnError) - wideFS.AddFlagSet(fs) - - var zzz string - if cols > 24 { - zzz = strings.Repeat("z", cols-24) - wideFS.Int(zzz, 0, strings.Repeat("z", cols-24)) - } - - var buf bytes.Buffer - fmt.Fprintf(&buf, "\n%s flags:\n\n%s", strings.ToUpper(name[:1])+name[1:], wideFS.FlagUsagesWrapped(cols)) - - if cols > 24 { - i := strings.Index(buf.String(), zzz) - lines := strings.Split(buf.String()[:i], "\n") - fmt.Fprint(w, strings.Join(lines[:len(lines)-1], "\n")) - fmt.Fprintln(w) - } else { - fmt.Fprint(w, buf.String()) - } - } -} diff --git a/vendor/k8s.io/component-base/cli/flag/string_flag.go b/vendor/k8s.io/component-base/cli/flag/string_flag.go deleted file mode 100644 index 331bdb66e2..0000000000 --- a/vendor/k8s.io/component-base/cli/flag/string_flag.go +++ /dev/null @@ -1,56 +0,0 @@ -/* -Copyright 2014 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package flag - -// StringFlag is a string flag compatible with flags and pflags that keeps track of whether it had a value supplied or not. -type StringFlag struct { - // If Set has been invoked this value is true - provided bool - // The exact value provided on the flag - value string -} - -func NewStringFlag(defaultVal string) StringFlag { - return StringFlag{value: defaultVal} -} - -func (f *StringFlag) Default(value string) { - f.value = value -} - -func (f StringFlag) String() string { - return f.value -} - -func (f StringFlag) Value() string { - return f.value -} - -func (f *StringFlag) Set(value string) error { - f.value = value - f.provided = true - - return nil -} - -func (f StringFlag) Provided() bool { - return f.provided -} - -func (f *StringFlag) Type() string { - return "string" -} diff --git a/vendor/k8s.io/component-base/cli/flag/tristate.go b/vendor/k8s.io/component-base/cli/flag/tristate.go deleted file mode 100644 index cf16376bf9..0000000000 --- a/vendor/k8s.io/component-base/cli/flag/tristate.go +++ /dev/null @@ -1,83 +0,0 @@ -/* -Copyright 2014 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package flag - -import ( - "fmt" - "strconv" -) - -// Tristate is a flag compatible with flags and pflags that -// keeps track of whether it had a value supplied or not. -type Tristate int - -const ( - Unset Tristate = iota // 0 - True - False -) - -func (f *Tristate) Default(value bool) { - *f = triFromBool(value) -} - -func (f Tristate) String() string { - b := boolFromTri(f) - return fmt.Sprintf("%t", b) -} - -func (f Tristate) Value() bool { - b := boolFromTri(f) - return b -} - -func (f *Tristate) Set(value string) error { - boolVal, err := strconv.ParseBool(value) - if err != nil { - return err - } - - *f = triFromBool(boolVal) - return nil -} - -func (f Tristate) Provided() bool { - if f != Unset { - return true - } - return false -} - -func (f *Tristate) Type() string { - return "tristate" -} - -func boolFromTri(t Tristate) bool { - if t == True { - return true - } else { - return false - } -} - -func triFromBool(b bool) Tristate { - if b { - return True - } else { - return False - } -} diff --git a/vendor/k8s.io/component-base/config/OWNERS b/vendor/k8s.io/component-base/config/OWNERS new file mode 100644 index 0000000000..11d499d75d --- /dev/null +++ b/vendor/k8s.io/component-base/config/OWNERS @@ -0,0 +1,14 @@ +# See the OWNERS docs at https://go.k8s.io/owners + +# Disable inheritance as this is an api owners file +options: + no_parent_owners: true +approvers: +- api-approvers +reviewers: +- api-reviewers +- luxas +- mtaufen +- sttts +labels: +- kind/api-change diff --git a/vendor/k8s.io/apimachinery/pkg/apis/config/doc.go b/vendor/k8s.io/component-base/config/doc.go similarity index 90% rename from vendor/k8s.io/apimachinery/pkg/apis/config/doc.go rename to vendor/k8s.io/component-base/config/doc.go index d849c7aa3b..dd0a5a53a7 100644 --- a/vendor/k8s.io/apimachinery/pkg/apis/config/doc.go +++ b/vendor/k8s.io/component-base/config/doc.go @@ -16,4 +16,4 @@ limitations under the License. // +k8s:deepcopy-gen=package -package config // import "k8s.io/apimachinery/pkg/apis/config" +package config // import "k8s.io/component-base/config" diff --git a/vendor/k8s.io/component-base/config/types.go b/vendor/k8s.io/component-base/config/types.go new file mode 100644 index 0000000000..f0a20915cc --- /dev/null +++ b/vendor/k8s.io/component-base/config/types.go @@ -0,0 +1,74 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package config + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +// ClientConnectionConfiguration contains details for constructing a client. +type ClientConnectionConfiguration struct { + // kubeconfig is the path to a KubeConfig file. + Kubeconfig string + // acceptContentTypes defines the Accept header sent by clients when connecting to a server, overriding the + // default value of 'application/json'. This field will control all connections to the server used by a particular + // client. + AcceptContentTypes string + // contentType is the content type used when sending data to the server from this client. + ContentType string + // qps controls the number of queries per second allowed for this connection. + QPS float32 + // burst allows extra queries to accumulate when a client is exceeding its rate. + Burst int32 +} + +// LeaderElectionConfiguration defines the configuration of leader election +// clients for components that can run with leader election enabled. +type LeaderElectionConfiguration struct { + // leaderElect enables a leader election client to gain leadership + // before executing the main loop. Enable this when running replicated + // components for high availability. + LeaderElect bool + // leaseDuration is the duration that non-leader candidates will wait + // after observing a leadership renewal until attempting to acquire + // leadership of a led but unrenewed leader slot. This is effectively the + // maximum duration that a leader can be stopped before it is replaced + // by another candidate. This is only applicable if leader election is + // enabled. + LeaseDuration metav1.Duration + // renewDeadline is the interval between attempts by the acting master to + // renew a leadership slot before it stops leading. This must be less + // than or equal to the lease duration. This is only applicable if leader + // election is enabled. + RenewDeadline metav1.Duration + // retryPeriod is the duration the clients should wait between attempting + // acquisition and renewal of a leadership. This is only applicable if + // leader election is enabled. + RetryPeriod metav1.Duration + // resourceLock indicates the resource object type that will be used to lock + // during leader election cycles. + ResourceLock string +} + +// DebuggingConfiguration holds configuration for Debugging related features. +type DebuggingConfiguration struct { + // enableProfiling enables profiling via web interface host:port/debug/pprof/ + EnableProfiling bool + // enableContentionProfiling enables lock contention profiling, if + // enableProfiling is true. + EnableContentionProfiling bool +} diff --git a/vendor/k8s.io/apimachinery/pkg/apis/config/zz_generated.deepcopy.go b/vendor/k8s.io/component-base/config/zz_generated.deepcopy.go similarity index 51% rename from vendor/k8s.io/apimachinery/pkg/apis/config/zz_generated.deepcopy.go rename to vendor/k8s.io/component-base/config/zz_generated.deepcopy.go index f09beb0e38..9812234f13 100644 --- a/vendor/k8s.io/apimachinery/pkg/apis/config/zz_generated.deepcopy.go +++ b/vendor/k8s.io/component-base/config/zz_generated.deepcopy.go @@ -35,3 +35,38 @@ func (in *ClientConnectionConfiguration) DeepCopy() *ClientConnectionConfigurati in.DeepCopyInto(out) return out } + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DebuggingConfiguration) DeepCopyInto(out *DebuggingConfiguration) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DebuggingConfiguration. +func (in *DebuggingConfiguration) DeepCopy() *DebuggingConfiguration { + if in == nil { + return nil + } + out := new(DebuggingConfiguration) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LeaderElectionConfiguration) DeepCopyInto(out *LeaderElectionConfiguration) { + *out = *in + out.LeaseDuration = in.LeaseDuration + out.RenewDeadline = in.RenewDeadline + out.RetryPeriod = in.RetryPeriod + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LeaderElectionConfiguration. +func (in *LeaderElectionConfiguration) DeepCopy() *LeaderElectionConfiguration { + if in == nil { + return nil + } + out := new(LeaderElectionConfiguration) + in.DeepCopyInto(out) + return out +} diff --git a/vendor/k8s.io/gengo/examples/deepcopy-gen/generators/deepcopy.go b/vendor/k8s.io/gengo/examples/deepcopy-gen/generators/deepcopy.go index 4548108e8b..40f1306d5d 100644 --- a/vendor/k8s.io/gengo/examples/deepcopy-gen/generators/deepcopy.go +++ b/vendor/k8s.io/gengo/examples/deepcopy-gen/generators/deepcopy.go @@ -718,7 +718,7 @@ func (g *genDeepCopy) doMap(t *types.Type, sw *generator.SnippetWriter) { } if !ut.Key.IsAssignable() { - klog.Fatalf("Hit an unsupported type %v.", uet) + klog.Fatalf("Hit an unsupported type %v for: %v", uet, t) } sw.Do("*out = make($.|raw$, len(*in))\n", t) @@ -745,6 +745,10 @@ func (g *genDeepCopy) doMap(t *types.Type, sw *generator.SnippetWriter) { case uet.IsAssignable(): sw.Do("(*out)[key] = val\n", nil) case uet.Kind == types.Interface: + // Note: do not generate code that won't compile as `DeepCopyinterface{}()` is not a valid function + if uet.Name.Name == "interface{}" { + klog.Fatalf("DeepCopy of %q is unsupported. Instead, use named interfaces with DeepCopy as one of the methods.", uet.Name.Name) + } sw.Do("if val == nil {(*out)[key]=nil} else {\n", nil) // Note: if t.Elem has been an alias "J" of an interface "I" in Go, we will see it // as kind Interface of name "J" here, i.e. generate val.DeepCopyJ(). The golang @@ -761,7 +765,7 @@ func (g *genDeepCopy) doMap(t *types.Type, sw *generator.SnippetWriter) { case uet.Kind == types.Struct: sw.Do("(*out)[key] = *val.DeepCopy()\n", uet) default: - klog.Fatalf("Hit an unsupported type %v.", uet) + klog.Fatalf("Hit an unsupported type %v for %v", uet, t) } sw.Do("}\n", nil) } @@ -793,6 +797,10 @@ func (g *genDeepCopy) doSlice(t *types.Type, sw *generator.SnippetWriter) { g.generateFor(ut.Elem, sw) sw.Do("}\n", nil) } else if uet.Kind == types.Interface { + // Note: do not generate code that won't compile as `DeepCopyinterface{}()` is not a valid function + if uet.Name.Name == "interface{}" { + klog.Fatalf("DeepCopy of %q is unsupported. Instead, use named interfaces with DeepCopy as one of the methods.", uet.Name.Name) + } sw.Do("if (*in)[i] != nil {\n", nil) // Note: if t.Elem has been an alias "J" of an interface "I" in Go, we will see it // as kind Interface of name "J" here, i.e. generate val.DeepCopyJ(). The golang @@ -802,7 +810,7 @@ func (g *genDeepCopy) doSlice(t *types.Type, sw *generator.SnippetWriter) { } else if uet.Kind == types.Struct { sw.Do("(*in)[i].DeepCopyInto(&(*out)[i])\n", nil) } else { - klog.Fatalf("Hit an unsupported type %v.", uet) + klog.Fatalf("Hit an unsupported type %v for %v", uet, t) } sw.Do("}\n", nil) } @@ -863,6 +871,10 @@ func (g *genDeepCopy) doStruct(t *types.Type, sw *generator.SnippetWriter) { sw.Do("in.$.name$.DeepCopyInto(&out.$.name$)\n", args) } case uft.Kind == types.Interface: + // Note: do not generate code that won't compile as `DeepCopyinterface{}()` is not a valid function + if uft.Name.Name == "interface{}" { + klog.Fatalf("DeepCopy of %q is unsupported. Instead, use named interfaces with DeepCopy as one of the methods.", uft.Name.Name) + } sw.Do("if in.$.name$ != nil {\n", args) // Note: if t.Elem has been an alias "J" of an interface "I" in Go, we will see it // as kind Interface of name "J" here, i.e. generate val.DeepCopyJ(). The golang @@ -870,7 +882,7 @@ func (g *genDeepCopy) doStruct(t *types.Type, sw *generator.SnippetWriter) { sw.Do(fmt.Sprintf("out.$.name$ = in.$.name$.DeepCopy%s()\n", uft.Name.Name), args) sw.Do("}\n", nil) default: - klog.Fatalf("Hit an unsupported type %v.", uft) + klog.Fatalf("Hit an unsupported type %v for %v, from %v", uft, ft, t) } } } @@ -907,6 +919,6 @@ func (g *genDeepCopy) doPointer(t *types.Type, sw *generator.SnippetWriter) { sw.Do("*out = new($.Elem|raw$)\n", ut) sw.Do("(*in).DeepCopyInto(*out)\n", nil) default: - klog.Fatalf("Hit an unsupported type %v.", uet) + klog.Fatalf("Hit an unsupported type %v for %v", uet, t) } } diff --git a/vendor/k8s.io/gengo/examples/set-gen/generators/sets.go b/vendor/k8s.io/gengo/examples/set-gen/generators/sets.go index d0698d33cd..8ddce7e3aa 100644 --- a/vendor/k8s.io/gengo/examples/set-gen/generators/sets.go +++ b/vendor/k8s.io/gengo/examples/set-gen/generators/sets.go @@ -205,17 +205,19 @@ func $.type|public$KeySet(theMap interface{}) $.type|public$ { } // Insert adds items to the set. -func (s $.type|public$) Insert(items ...$.type|raw$) { +func (s $.type|public$) Insert(items ...$.type|raw$) $.type|public$ { for _, item := range items { s[item] = Empty{} } + return s } // Delete removes all items from the set. -func (s $.type|public$) Delete(items ...$.type|raw$) { +func (s $.type|public$) Delete(items ...$.type|raw$) $.type|public$ { for _, item := range items { delete(s, item) } + return s } // Has returns true if and only if item is contained in the set. diff --git a/vendor/k8s.io/gengo/examples/set-gen/sets/byte.go b/vendor/k8s.io/gengo/examples/set-gen/sets/byte.go index 766f4501e0..9bfa85d43d 100644 --- a/vendor/k8s.io/gengo/examples/set-gen/sets/byte.go +++ b/vendor/k8s.io/gengo/examples/set-gen/sets/byte.go @@ -46,17 +46,19 @@ func ByteKeySet(theMap interface{}) Byte { } // Insert adds items to the set. -func (s Byte) Insert(items ...byte) { +func (s Byte) Insert(items ...byte) Byte { for _, item := range items { s[item] = Empty{} } + return s } // Delete removes all items from the set. -func (s Byte) Delete(items ...byte) { +func (s Byte) Delete(items ...byte) Byte { for _, item := range items { delete(s, item) } + return s } // Has returns true if and only if item is contained in the set. diff --git a/vendor/k8s.io/gengo/examples/set-gen/sets/int.go b/vendor/k8s.io/gengo/examples/set-gen/sets/int.go index a0a513cd9b..88bd709679 100644 --- a/vendor/k8s.io/gengo/examples/set-gen/sets/int.go +++ b/vendor/k8s.io/gengo/examples/set-gen/sets/int.go @@ -46,17 +46,19 @@ func IntKeySet(theMap interface{}) Int { } // Insert adds items to the set. -func (s Int) Insert(items ...int) { +func (s Int) Insert(items ...int) Int { for _, item := range items { s[item] = Empty{} } + return s } // Delete removes all items from the set. -func (s Int) Delete(items ...int) { +func (s Int) Delete(items ...int) Int { for _, item := range items { delete(s, item) } + return s } // Has returns true if and only if item is contained in the set. diff --git a/vendor/k8s.io/gengo/examples/set-gen/sets/int64.go b/vendor/k8s.io/gengo/examples/set-gen/sets/int64.go index 9ca9af0c59..b375a1b065 100644 --- a/vendor/k8s.io/gengo/examples/set-gen/sets/int64.go +++ b/vendor/k8s.io/gengo/examples/set-gen/sets/int64.go @@ -46,17 +46,19 @@ func Int64KeySet(theMap interface{}) Int64 { } // Insert adds items to the set. -func (s Int64) Insert(items ...int64) { +func (s Int64) Insert(items ...int64) Int64 { for _, item := range items { s[item] = Empty{} } + return s } // Delete removes all items from the set. -func (s Int64) Delete(items ...int64) { +func (s Int64) Delete(items ...int64) Int64 { for _, item := range items { delete(s, item) } + return s } // Has returns true if and only if item is contained in the set. diff --git a/vendor/k8s.io/gengo/examples/set-gen/sets/string.go b/vendor/k8s.io/gengo/examples/set-gen/sets/string.go index ba00ad7df4..e6f37db887 100644 --- a/vendor/k8s.io/gengo/examples/set-gen/sets/string.go +++ b/vendor/k8s.io/gengo/examples/set-gen/sets/string.go @@ -46,17 +46,19 @@ func StringKeySet(theMap interface{}) String { } // Insert adds items to the set. -func (s String) Insert(items ...string) { +func (s String) Insert(items ...string) String { for _, item := range items { s[item] = Empty{} } + return s } // Delete removes all items from the set. -func (s String) Delete(items ...string) { +func (s String) Delete(items ...string) String { for _, item := range items { delete(s, item) } + return s } // Has returns true if and only if item is contained in the set. diff --git a/vendor/k8s.io/klog/klogr/README.md b/vendor/k8s.io/klog/klogr/README.md new file mode 100644 index 0000000000..cfa5de617d --- /dev/null +++ b/vendor/k8s.io/klog/klogr/README.md @@ -0,0 +1,8 @@ +# Minimal Go logging using klog + +This package implements the [logr interface](https://github.com/go-logr/logr) +in terms of Kubernetes' [klog](https://github.com/kubernetes/klog). This +provides a relatively minimalist API to logging in Go, backed by a well-proven +implementation. + +This is a BETA grade implementation. diff --git a/vendor/k8s.io/klog/klogr/klogr.go b/vendor/k8s.io/klog/klogr/klogr.go new file mode 100644 index 0000000000..f63557daac --- /dev/null +++ b/vendor/k8s.io/klog/klogr/klogr.go @@ -0,0 +1,194 @@ +// Package klogr implements github.com/go-logr/logr.Logger in terms of +// k8s.io/klog. +package klogr + +import ( + "bytes" + "encoding/json" + "fmt" + "runtime" + "sort" + + "github.com/go-logr/logr" + "k8s.io/klog" +) + +// New returns a logr.Logger which is implemented by klog. +func New() logr.Logger { + return klogger{ + level: 0, + prefix: "", + values: nil, + } +} + +type klogger struct { + level int + prefix string + values []interface{} +} + +func (l klogger) clone() klogger { + return klogger{ + level: l.level, + prefix: l.prefix, + values: copySlice(l.values), + } +} + +func copySlice(in []interface{}) []interface{} { + out := make([]interface{}, len(in)) + copy(out, in) + return out +} + +// Magic string for intermediate frames that we should ignore. +const autogeneratedFrameName = "" + +// Discover how many frames we need to climb to find the caller. This approach +// was suggested by Ian Lance Taylor of the Go team, so it *should* be safe +// enough (famous last words). +func framesToCaller() int { + // 1 is the immediate caller. 3 should be too many. + for i := 1; i < 3; i++ { + _, file, _, _ := runtime.Caller(i + 1) // +1 for this function's frame + if file != autogeneratedFrameName { + return i + } + } + return 1 // something went wrong, this is safe +} + +// trimDuplicates will deduplicates elements provided in multiple KV tuple +// slices, whilst maintaining the distinction between where the items are +// contained. +func trimDuplicates(kvLists ...[]interface{}) [][]interface{} { + // maintain a map of all seen keys + seenKeys := map[interface{}]struct{}{} + // build the same number of output slices as inputs + outs := make([][]interface{}, len(kvLists)) + // iterate over the input slices backwards, as 'later' kv specifications + // of the same key will take precedence over earlier ones + for i := len(kvLists) - 1; i >= 0; i-- { + // initialise this output slice + outs[i] = []interface{}{} + // obtain a reference to the kvList we are processing + kvList := kvLists[i] + + // start iterating at len(kvList) - 2 (i.e. the 2nd last item) for + // slices that have an even number of elements. + // We add (len(kvList) % 2) here to handle the case where there is an + // odd number of elements in a kvList. + // If there is an odd number, then the last element in the slice will + // have the value 'null'. + for i2 := len(kvList) - 2 + (len(kvList) % 2); i2 >= 0; i2 -= 2 { + k := kvList[i2] + // if we have already seen this key, do not include it again + if _, ok := seenKeys[k]; ok { + continue + } + // make a note that we've observed a new key + seenKeys[k] = struct{}{} + // attempt to obtain the value of the key + var v interface{} + // i2+1 should only ever be out of bounds if we handling the first + // iteration over a slice with an odd number of elements + if i2+1 < len(kvList) { + v = kvList[i2+1] + } + // add this KV tuple to the *start* of the output list to maintain + // the original order as we are iterating over the slice backwards + outs[i] = append([]interface{}{k, v}, outs[i]...) + } + } + return outs +} + +func flatten(kvList ...interface{}) string { + keys := make([]string, 0, len(kvList)) + vals := make(map[string]interface{}, len(kvList)) + for i := 0; i < len(kvList); i += 2 { + k, ok := kvList[i].(string) + if !ok { + panic(fmt.Sprintf("key is not a string: %s", pretty(kvList[i]))) + } + var v interface{} + if i+1 < len(kvList) { + v = kvList[i+1] + } + keys = append(keys, k) + vals[k] = v + } + sort.Strings(keys) + buf := bytes.Buffer{} + for i, k := range keys { + v := vals[k] + if i > 0 { + buf.WriteRune(' ') + } + buf.WriteString(pretty(k)) + buf.WriteString("=") + buf.WriteString(pretty(v)) + } + return buf.String() +} + +func pretty(value interface{}) string { + jb, _ := json.Marshal(value) + return string(jb) +} + +func (l klogger) Info(msg string, kvList ...interface{}) { + if l.Enabled() { + lvlStr := flatten("level", l.level) + msgStr := flatten("msg", msg) + trimmed := trimDuplicates(l.values, kvList) + fixedStr := flatten(trimmed[0]...) + userStr := flatten(trimmed[1]...) + klog.InfoDepth(framesToCaller(), l.prefix, " ", lvlStr, " ", msgStr, " ", fixedStr, " ", userStr) + } +} + +func (l klogger) Enabled() bool { + return bool(klog.V(klog.Level(l.level))) +} + +func (l klogger) Error(err error, msg string, kvList ...interface{}) { + msgStr := flatten("msg", msg) + var loggableErr interface{} + if err != nil { + loggableErr = err.Error() + } + errStr := flatten("error", loggableErr) + trimmed := trimDuplicates(l.values, kvList) + fixedStr := flatten(trimmed[0]...) + userStr := flatten(trimmed[1]...) + klog.ErrorDepth(framesToCaller(), l.prefix, " ", msgStr, " ", errStr, " ", fixedStr, " ", userStr) +} + +func (l klogger) V(level int) logr.InfoLogger { + new := l.clone() + new.level = level + return new +} + +// WithName returns a new logr.Logger with the specified name appended. klogr +// uses '/' characters to separate name elements. Callers should not pass '/' +// in the provided name string, but this library does not actually enforce that. +func (l klogger) WithName(name string) logr.Logger { + new := l.clone() + if len(l.prefix) > 0 { + new.prefix = l.prefix + "/" + } + new.prefix += name + return new +} + +func (l klogger) WithValues(kvList ...interface{}) logr.Logger { + new := l.clone() + new.values = append(new.values, kvList...) + return new +} + +var _ logr.Logger = klogger{} +var _ logr.InfoLogger = klogger{} diff --git a/vendor/k8s.io/kube-openapi/pkg/util/proto/document.go b/vendor/k8s.io/kube-openapi/pkg/util/proto/document.go index 890a39399f..a57dcd363f 100644 --- a/vendor/k8s.io/kube-openapi/pkg/util/proto/document.go +++ b/vendor/k8s.io/kube-openapi/pkg/util/proto/document.go @@ -196,24 +196,20 @@ func (d *Definitions) parseKind(s *openapi_v2.Schema, path *Path) (Schema, error } fields := map[string]Schema{} - fieldOrder := []string{} for _, namedSchema := range s.GetProperties().GetAdditionalProperties() { var err error - name := namedSchema.GetName() - path := path.FieldPath(name) - fields[name], err = d.ParseSchema(namedSchema.GetValue(), &path) + path := path.FieldPath(namedSchema.GetName()) + fields[namedSchema.GetName()], err = d.ParseSchema(namedSchema.GetValue(), &path) if err != nil { return nil, err } - fieldOrder = append(fieldOrder, name) } return &Kind{ BaseSchema: d.parseBaseSchema(s, path), RequiredFields: s.GetRequired(), Fields: fields, - FieldOrder: fieldOrder, }, nil } diff --git a/vendor/k8s.io/kube-openapi/pkg/util/proto/openapi.go b/vendor/k8s.io/kube-openapi/pkg/util/proto/openapi.go index 46643aa508..f26b5ef881 100644 --- a/vendor/k8s.io/kube-openapi/pkg/util/proto/openapi.go +++ b/vendor/k8s.io/kube-openapi/pkg/util/proto/openapi.go @@ -173,8 +173,6 @@ type Kind struct { RequiredFields []string // Maps field names to types. Fields map[string]Schema - // FieldOrder reports the canonical order for the fields. - FieldOrder []string } var _ Schema = &Kind{} diff --git a/vendor/k8s.io/kubernetes/cmd/kubeadm/app/apis/kubeadm/types.go b/vendor/k8s.io/kubernetes/cmd/kubeadm/app/apis/kubeadm/types.go index ebbd20d025..d641ecc6e6 100644 --- a/vendor/k8s.io/kubernetes/cmd/kubeadm/app/apis/kubeadm/types.go +++ b/vendor/k8s.io/kubernetes/cmd/kubeadm/app/apis/kubeadm/types.go @@ -17,7 +17,7 @@ limitations under the License. package kubeadm import ( - "k8s.io/api/core/v1" + v1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" kubeletconfig "k8s.io/kubernetes/pkg/kubelet/apis/config" kubeproxyconfig "k8s.io/kubernetes/pkg/proxy/apis/config" @@ -34,16 +34,13 @@ type InitConfiguration struct { // ClusterConfiguration holds the cluster-wide information, and embeds that struct (which can be (un)marshalled separately as well) // When InitConfiguration is marshalled to bytes in the external version, this information IS NOT preserved (which can be seen from - // the `json:"-"` tag in the external variant of these API types. Here, in the internal version `json:",inline"` is used, which means - // that all of ClusterConfiguration's fields will appear as they would be InitConfiguration's fields. This is used in practice solely - // in kubeadm API roundtrip unit testing. Check out `cmd/kubeadm/app/util/config/*_test.go` for more information. Normally, the internal - // type is NEVER marshalled, but always converted to some external version first. - ClusterConfiguration `json:",inline"` + // the `json:"-"` tag in the external variant of these API types. + ClusterConfiguration `json:"-"` // BootstrapTokens is respected at `kubeadm init` time and describes a set of Bootstrap Tokens to create. BootstrapTokens []BootstrapToken - // NodeRegistration holds fields that relate to registering the new master node to the cluster + // NodeRegistration holds fields that relate to registering the new control-plane node to the cluster NodeRegistration NodeRegistrationOptions // LocalAPIEndpoint represents the endpoint of the API server instance that's deployed on this control plane node @@ -208,10 +205,10 @@ type APIEndpoint struct { BindPort int32 } -// NodeRegistrationOptions holds fields that relate to registering a new master or node to the cluster, either via "kubeadm init" or "kubeadm join" +// NodeRegistrationOptions holds fields that relate to registering a new control-plane or node to the cluster, either via "kubeadm init" or "kubeadm join" type NodeRegistrationOptions struct { - // Name is the `.Metadata.Name` field of the Node API object that will be created in this `kubeadm init` or `kubeadm joiń` operation. + // Name is the `.Metadata.Name` field of the Node API object that will be created in this `kubeadm init` or `kubeadm join` operation. // This field is also used in the CommonName field of the kubelet's client certificate to the API server. // Defaults to the hostname of the node if not provided. Name string @@ -220,7 +217,7 @@ type NodeRegistrationOptions struct { CRISocket string // Taints specifies the taints the Node API object should be registered with. If this field is unset, i.e. nil, in the `kubeadm init` process - // it will be defaulted to []v1.Taint{'node-role.kubernetes.io/master=""'}. If you don't want to taint your master node, set this field to an + // it will be defaulted to []v1.Taint{'node-role.kubernetes.io/master=""'}. If you don't want to taint your control-plane node, set this field to an // empty slice, i.e. `taints: {}` in the YAML file. This field is solely used for Node registration. Taints []v1.Taint @@ -244,7 +241,7 @@ type Networking struct { // TODO: The BootstrapToken object should move out to either k8s.io/client-go or k8s.io/api in the future // (probably as part of Bootstrap Tokens going GA). It should not be staged under the kubeadm API as it is now. type BootstrapToken struct { - // Token is used for establishing bidirectional trust between nodes and masters. + // Token is used for establishing bidirectional trust between nodes and control-planes. // Used for joining nodes in the cluster. Token *BootstrapTokenString // Description sets a human-friendly message why this token exists and what it's used @@ -315,11 +312,11 @@ type ExternalEtcd struct { type JoinConfiguration struct { metav1.TypeMeta - // NodeRegistration holds fields that relate to registering the new master node to the cluster + // NodeRegistration holds fields that relate to registering the new control-plane node to the cluster NodeRegistration NodeRegistrationOptions // CACertPath is the path to the SSL certificate authority used to - // secure comunications between node and master. + // secure comunications between node and control-plane. // Defaults to "/etc/kubernetes/pki/ca.crt". CACertPath string @@ -359,7 +356,7 @@ type Discovery struct { // BootstrapTokenDiscovery is used to set the options for bootstrap token based discovery type BootstrapTokenDiscovery struct { // Token is a token used to validate cluster information - // fetched from the master. + // fetched from the control-plane. Token string // APIServerEndpoint is an IP or domain name to the API server from which info will be fetched. @@ -377,7 +374,7 @@ type BootstrapTokenDiscovery struct { // UnsafeSkipCAVerification allows token-based discovery // without CA verification via CACertHashes. This can weaken - // the security of kubeadm since other nodes can impersonate the master. + // the security of kubeadm since other nodes can impersonate the control-plane. UnsafeSkipCAVerification bool } diff --git a/vendor/k8s.io/kubernetes/cmd/kubeadm/app/apis/kubeadm/v1beta1/defaults.go b/vendor/k8s.io/kubernetes/cmd/kubeadm/app/apis/kubeadm/v1beta1/defaults.go index 60c72a73d3..a241f07cc5 100644 --- a/vendor/k8s.io/kubernetes/cmd/kubeadm/app/apis/kubeadm/v1beta1/defaults.go +++ b/vendor/k8s.io/kubernetes/cmd/kubeadm/app/apis/kubeadm/v1beta1/defaults.go @@ -68,7 +68,6 @@ func addDefaultingFuncs(scheme *runtime.Scheme) error { // SetDefaults_InitConfiguration assigns default values for the InitConfiguration func SetDefaults_InitConfiguration(obj *InitConfiguration) { SetDefaults_ClusterConfiguration(&obj.ClusterConfiguration) - SetDefaults_NodeRegistrationOptions(&obj.NodeRegistration) SetDefaults_BootstrapTokens(obj) SetDefaults_APIEndpoint(&obj.LocalAPIEndpoint) } @@ -138,17 +137,10 @@ func SetDefaults_JoinConfiguration(obj *JoinConfiguration) { obj.CACertPath = DefaultCACertPath } - SetDefaults_NodeRegistrationOptions(&obj.NodeRegistration) SetDefaults_JoinControlPlane(obj.ControlPlane) SetDefaults_Discovery(&obj.Discovery) } -func SetDefaults_NodeRegistrationOptions(obj *NodeRegistrationOptions) { - if obj.CRISocket == "" { - obj.CRISocket = DefaultCRISocket - } -} - func SetDefaults_JoinControlPlane(obj *JoinControlPlane) { if obj != nil { SetDefaults_APIEndpoint(&obj.LocalAPIEndpoint) diff --git a/vendor/k8s.io/kubernetes/cmd/kubeadm/app/apis/kubeadm/v1beta1/defaults_unix.go b/vendor/k8s.io/kubernetes/cmd/kubeadm/app/apis/kubeadm/v1beta1/defaults_unix.go index f4a50f9972..18510d4c86 100644 --- a/vendor/k8s.io/kubernetes/cmd/kubeadm/app/apis/kubeadm/v1beta1/defaults_unix.go +++ b/vendor/k8s.io/kubernetes/cmd/kubeadm/app/apis/kubeadm/v1beta1/defaults_unix.go @@ -23,6 +23,4 @@ const ( DefaultCACertPath = "/etc/kubernetes/pki/ca.crt" // DefaultUrlScheme defines default socket url prefix DefaultUrlScheme = "unix" - // DefaultCRISocket defines the default cri socket - DefaultCRISocket = "/var/run/dockershim.sock" ) diff --git a/vendor/k8s.io/kubernetes/cmd/kubeadm/app/apis/kubeadm/v1beta1/defaults_windows.go b/vendor/k8s.io/kubernetes/cmd/kubeadm/app/apis/kubeadm/v1beta1/defaults_windows.go index 13b1d88ffc..8523188019 100644 --- a/vendor/k8s.io/kubernetes/cmd/kubeadm/app/apis/kubeadm/v1beta1/defaults_windows.go +++ b/vendor/k8s.io/kubernetes/cmd/kubeadm/app/apis/kubeadm/v1beta1/defaults_windows.go @@ -23,6 +23,4 @@ const ( DefaultCACertPath = "C:/etc/kubernetes/pki/ca.crt" // DefaultUrlScheme defines default socket url prefix DefaultUrlScheme = "tcp" - // DefaultCRISocket defines the default cri socket - DefaultCRISocket = "tcp://localhost:2375" ) diff --git a/vendor/k8s.io/kubernetes/cmd/kubeadm/app/apis/kubeadm/v1beta1/doc.go b/vendor/k8s.io/kubernetes/cmd/kubeadm/app/apis/kubeadm/v1beta1/doc.go index 8b45d89be6..e88276027b 100644 --- a/vendor/k8s.io/kubernetes/cmd/kubeadm/app/apis/kubeadm/v1beta1/doc.go +++ b/vendor/k8s.io/kubernetes/cmd/kubeadm/app/apis/kubeadm/v1beta1/doc.go @@ -154,7 +154,7 @@ limitations under the License. // deployed in the cluster. If this object is not provided or provided only partially, kubeadm applies defaults. // // See https://kubernetes.io/docs/reference/command-line-tools-reference/kubelet/ or https://godoc.org/k8s.io/kubelet/config/v1beta1#KubeletConfiguration -// for kube proxy official documentation. +// for kubelet official documentation. // // Here is a fully populated example of a single YAML file containing multiple // configuration types to be used during a `kubeadm init` run. @@ -181,7 +181,7 @@ limitations under the License. // effect: "NoSchedule" // kubeletExtraArgs: // cgroupDriver: "cgroupfs" -// localApiEndpoint: +// localAPIEndpoint: // advertiseAddress: "10.100.0.1" // bindPort: 6443 // --- diff --git a/vendor/k8s.io/kubernetes/cmd/kubeadm/app/apis/kubeadm/v1beta1/types.go b/vendor/k8s.io/kubernetes/cmd/kubeadm/app/apis/kubeadm/v1beta1/types.go index 9daf711450..596aedb9b9 100644 --- a/vendor/k8s.io/kubernetes/cmd/kubeadm/app/apis/kubeadm/v1beta1/types.go +++ b/vendor/k8s.io/kubernetes/cmd/kubeadm/app/apis/kubeadm/v1beta1/types.go @@ -42,7 +42,7 @@ type InitConfiguration struct { // This information IS NOT uploaded to the kubeadm cluster configmap, partly because of its sensitive nature BootstrapTokens []BootstrapToken `json:"bootstrapTokens,omitempty"` - // NodeRegistration holds fields that relate to registering the new master node to the cluster + // NodeRegistration holds fields that relate to registering the new control-plane node to the cluster NodeRegistration NodeRegistrationOptions `json:"nodeRegistration,omitempty"` // LocalAPIEndpoint represents the endpoint of the API server instance that's deployed on this control plane node @@ -191,10 +191,10 @@ type APIEndpoint struct { BindPort int32 `json:"bindPort"` } -// NodeRegistrationOptions holds fields that relate to registering a new master or node to the cluster, either via "kubeadm init" or "kubeadm join" +// NodeRegistrationOptions holds fields that relate to registering a new control-plane or node to the cluster, either via "kubeadm init" or "kubeadm join" type NodeRegistrationOptions struct { - // Name is the `.Metadata.Name` field of the Node API object that will be created in this `kubeadm init` or `kubeadm joiń` operation. + // Name is the `.Metadata.Name` field of the Node API object that will be created in this `kubeadm init` or `kubeadm join` operation. // This field is also used in the CommonName field of the kubelet's client certificate to the API server. // Defaults to the hostname of the node if not provided. Name string `json:"name,omitempty"` @@ -203,7 +203,7 @@ type NodeRegistrationOptions struct { CRISocket string `json:"criSocket,omitempty"` // Taints specifies the taints the Node API object should be registered with. If this field is unset, i.e. nil, in the `kubeadm init` process - // it will be defaulted to []v1.Taint{'node-role.kubernetes.io/master=""'}. If you don't want to taint your master node, set this field to an + // it will be defaulted to []v1.Taint{'node-role.kubernetes.io/master=""'}. If you don't want to taint your control-plane node, set this field to an // empty slice, i.e. `taints: {}` in the YAML file. This field is solely used for Node registration. Taints []v1.Taint `json:"taints,omitempty"` @@ -225,7 +225,7 @@ type Networking struct { // BootstrapToken describes one bootstrap token, stored as a Secret in the cluster type BootstrapToken struct { - // Token is used for establishing bidirectional trust between nodes and masters. + // Token is used for establishing bidirectional trust between nodes and control-planes. // Used for joining nodes in the cluster. Token *BootstrapTokenString `json:"token"` // Description sets a human-friendly message why this token exists and what it's used @@ -301,11 +301,11 @@ type ExternalEtcd struct { type JoinConfiguration struct { metav1.TypeMeta `json:",inline"` - // NodeRegistration holds fields that relate to registering the new master node to the cluster + // NodeRegistration holds fields that relate to registering the new control-plane node to the cluster NodeRegistration NodeRegistrationOptions `json:"nodeRegistration"` // CACertPath is the path to the SSL certificate authority used to - // secure comunications between node and master. + // secure comunications between node and control-plane. // Defaults to "/etc/kubernetes/pki/ca.crt". CACertPath string `json:"caCertPath"` @@ -345,7 +345,7 @@ type Discovery struct { // BootstrapTokenDiscovery is used to set the options for bootstrap token based discovery type BootstrapTokenDiscovery struct { // Token is a token used to validate cluster information - // fetched from the master. + // fetched from the control-plane. Token string `json:"token"` // APIServerEndpoint is an IP or domain name to the API server from which info will be fetched. @@ -363,7 +363,7 @@ type BootstrapTokenDiscovery struct { // UnsafeSkipCAVerification allows token-based discovery // without CA verification via CACertHashes. This can weaken - // the security of kubeadm since other nodes can impersonate the master. + // the security of kubeadm since other nodes can impersonate the control-plane. UnsafeSkipCAVerification bool `json:"unsafeSkipCAVerification"` } diff --git a/vendor/k8s.io/kubernetes/cmd/kubeadm/app/apis/kubeadm/v1beta1/zz_generated.defaults.go b/vendor/k8s.io/kubernetes/cmd/kubeadm/app/apis/kubeadm/v1beta1/zz_generated.defaults.go index 4860aec6a2..95a8e67963 100644 --- a/vendor/k8s.io/kubernetes/cmd/kubeadm/app/apis/kubeadm/v1beta1/zz_generated.defaults.go +++ b/vendor/k8s.io/kubernetes/cmd/kubeadm/app/apis/kubeadm/v1beta1/zz_generated.defaults.go @@ -50,13 +50,11 @@ func SetObjectDefaults_InitConfiguration(in *InitConfiguration) { a := &in.BootstrapTokens[i] SetDefaults_BootstrapToken(a) } - SetDefaults_NodeRegistrationOptions(&in.NodeRegistration) SetDefaults_APIEndpoint(&in.LocalAPIEndpoint) } func SetObjectDefaults_JoinConfiguration(in *JoinConfiguration) { SetDefaults_JoinConfiguration(in) - SetDefaults_NodeRegistrationOptions(&in.NodeRegistration) SetDefaults_Discovery(&in.Discovery) if in.Discovery.File != nil { SetDefaults_FileDiscovery(in.Discovery.File) diff --git a/vendor/k8s.io/kubernetes/cmd/kubeadm/app/constants/BUILD b/vendor/k8s.io/kubernetes/cmd/kubeadm/app/constants/BUILD index 7c1d85a611..acd3daff0d 100644 --- a/vendor/k8s.io/kubernetes/cmd/kubeadm/app/constants/BUILD +++ b/vendor/k8s.io/kubernetes/cmd/kubeadm/app/constants/BUILD @@ -8,7 +8,11 @@ load( go_library( name = "go_default_library", - srcs = ["constants.go"], + srcs = [ + "constants.go", + "constants_unix.go", + "constants_windows.go", + ], importpath = "k8s.io/kubernetes/cmd/kubeadm/app/constants", deps = [ "//cmd/kubeadm/app/apis/kubeadm:go_default_library", diff --git a/vendor/k8s.io/kubernetes/cmd/kubeadm/app/constants/constants.go b/vendor/k8s.io/kubernetes/cmd/kubeadm/app/constants/constants.go index da728133f7..808b94380a 100644 --- a/vendor/k8s.io/kubernetes/cmd/kubeadm/app/constants/constants.go +++ b/vendor/k8s.io/kubernetes/cmd/kubeadm/app/constants/constants.go @@ -1,5 +1,5 @@ /* -Copyright 2016 The Kubernetes Authors. +Copyright 2019 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -26,7 +26,7 @@ import ( "time" "github.com/pkg/errors" - "k8s.io/api/core/v1" + v1 "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/util/version" bootstrapapi "k8s.io/cluster-bootstrap/token/api" kubeadmapi "k8s.io/kubernetes/cmd/kubeadm/app/apis/kubeadm" @@ -143,7 +143,7 @@ const ( // the TLS bootstrap to get itself an unique credential KubeletBootstrapKubeConfigFileName = "bootstrap-kubelet.conf" - // KubeletKubeConfigFileName defines the file name for the kubeconfig that the master kubelet will use for talking + // KubeletKubeConfigFileName defines the file name for the kubeconfig that the control-plane kubelet will use for talking // to the API server KubeletKubeConfigFileName = "kubelet.conf" // ControllerManagerKubeConfigFileName defines the file name for the controller manager's kubeconfig file @@ -157,9 +157,9 @@ const ( ControllerManagerUser = "system:kube-controller-manager" // SchedulerUser defines the well-known user the scheduler should be authenticated as SchedulerUser = "system:kube-scheduler" - // MastersGroup defines the well-known group for the apiservers. This group is also superuser by default + // SystemPrivilegedGroup defines the well-known group for the apiservers. This group is also superuser by default // (i.e. bound to the cluster-admin ClusterRole) - MastersGroup = "system:masters" + SystemPrivilegedGroup = "system:masters" // NodesGroup defines the well-known group for all nodes. NodesGroup = "system:nodes" // NodesUserPrefix defines the user name prefix as requested by the Node authorizer. @@ -171,9 +171,9 @@ const ( // APICallRetryInterval defines how long kubeadm should wait before retrying a failed API operation APICallRetryInterval = 500 * time.Millisecond - // DiscoveryRetryInterval specifies how long kubeadm should wait before retrying to connect to the master when doing discovery + // DiscoveryRetryInterval specifies how long kubeadm should wait before retrying to connect to the control-plane when doing discovery DiscoveryRetryInterval = 5 * time.Second - // PatchNodeTimeout specifies how long kubeadm should wait for applying the label and taint on the master before timing out + // PatchNodeTimeout specifies how long kubeadm should wait for applying the label and taint on the control-plane before timing out PatchNodeTimeout = 2 * time.Minute // UpdateNodeTimeout specifies how long kubeadm should wait for updating node with the initial remote configuration of kubelet before timing out UpdateNodeTimeout = 2 * time.Minute @@ -191,7 +191,14 @@ const ( // Default behaviour is 24 hours DefaultTokenDuration = 24 * time.Hour - // LabelNodeRoleMaster specifies that a node is a master + // DefaultCertTokenDuration specifies the default amount of time that the token used by upload certs will be valid + // Default behaviour is 2 hours + DefaultCertTokenDuration = 2 * time.Hour + + // CertificateKeySize specifies the size of the key used to encrypt certificates on uploadcerts phase + CertificateKeySize = 32 + + // LabelNodeRoleMaster specifies that a node is a control-plane // This is a duplicate definition of the constant in pkg/controller/service/service_controller.go LabelNodeRoleMaster = "node-role.kubernetes.io/master" @@ -250,7 +257,7 @@ const ( MinExternalEtcdVersion = "3.2.18" // DefaultEtcdVersion indicates the default etcd version that kubeadm uses - DefaultEtcdVersion = "3.2.24" + DefaultEtcdVersion = "3.3.10" // PauseVersion indicates the default pause image version for kubeadm PauseVersion = "3.1" @@ -332,7 +339,7 @@ const ( KubeDNSVersion = "1.14.13" // CoreDNSVersion is the version of CoreDNS to be deployed if it is used - CoreDNSVersion = "1.2.6" + CoreDNSVersion = "1.3.1" // ClusterConfigurationKind is the string kind value for the ClusterConfiguration struct ClusterConfigurationKind = "ClusterConfiguration" @@ -350,19 +357,22 @@ const ( // DefaultAPIServerBindAddress is the default bind address for the API Server DefaultAPIServerBindAddress = "0.0.0.0" - // MasterNumCPU is the number of CPUs required on master - MasterNumCPU = 2 + // ControlPlaneNumCPU is the number of CPUs required on control-plane + ControlPlaneNumCPU = 2 + + // KubeadmCertsSecret specifies in what Secret in the kube-system namespace the certificates should be stored + KubeadmCertsSecret = "kubeadm-certs" ) var ( - // MasterTaint is the taint to apply on the PodSpec for being able to run that Pod on the master - MasterTaint = v1.Taint{ + // ControlPlaneTaint is the taint to apply on the PodSpec for being able to run that Pod on the control-plane + ControlPlaneTaint = v1.Taint{ Key: LabelNodeRoleMaster, Effect: v1.TaintEffectNoSchedule, } - // MasterToleration is the toleration to apply on the PodSpec for being able to run that Pod on the master - MasterToleration = v1.Toleration{ + // ControlPlaneToleration is the toleration to apply on the PodSpec for being able to run that Pod on the control-plane + ControlPlaneToleration = v1.Toleration{ Key: LabelNodeRoleMaster, Effect: v1.TaintEffectNoSchedule, } @@ -373,22 +383,28 @@ var ( // DefaultTokenGroups specifies the default groups that this token will authenticate as when used for authentication DefaultTokenGroups = []string{NodeBootstrapTokenAuthGroup} - // MasterComponents defines the master component names - MasterComponents = []string{KubeAPIServer, KubeControllerManager, KubeScheduler} + // ControlPlaneComponents defines the control-plane component names + ControlPlaneComponents = []string{KubeAPIServer, KubeControllerManager, KubeScheduler} // MinimumControlPlaneVersion specifies the minimum control plane version kubeadm can deploy - MinimumControlPlaneVersion = version.MustParseSemantic("v1.12.0") + MinimumControlPlaneVersion = version.MustParseSemantic("v1.13.0") // MinimumKubeletVersion specifies the minimum version of kubelet which kubeadm supports - MinimumKubeletVersion = version.MustParseSemantic("v1.12.0") + MinimumKubeletVersion = version.MustParseSemantic("v1.13.0") + + // CurrentKubernetesVersion specifies current Kubernetes version supported by kubeadm + CurrentKubernetesVersion = version.MustParseSemantic("v1.14.0") // SupportedEtcdVersion lists officially supported etcd versions with corresponding Kubernetes releases SupportedEtcdVersion = map[uint8]string{ - 10: "3.1.12", - 11: "3.2.18", 12: "3.2.24", 13: "3.2.24", + 14: "3.3.10", } + + // KubeadmCertsClusterRoleName sets the name for the ClusterRole that allows + // the bootstrap tokens to access the kubeadm-certs Secret during the join of a new control-plane + KubeadmCertsClusterRoleName = fmt.Sprintf("kubeadm:%s", KubeadmCertsSecret) ) // EtcdSupportedVersion returns officially supported version of etcd for a specific Kubernetes release diff --git a/vendor/k8s.io/kubernetes/cmd/kubeadm/app/constants/constants_unix.go b/vendor/k8s.io/kubernetes/cmd/kubeadm/app/constants/constants_unix.go new file mode 100644 index 0000000000..16ff72d555 --- /dev/null +++ b/vendor/k8s.io/kubernetes/cmd/kubeadm/app/constants/constants_unix.go @@ -0,0 +1,24 @@ +// +build !windows + +/* +Copyright 2019 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package constants + +const ( + // DefaultDockerCRISocket defines the default Docker CRI socket + DefaultDockerCRISocket = "/var/run/dockershim.sock" +) diff --git a/vendor/k8s.io/kubernetes/cmd/kubeadm/app/constants/constants_windows.go b/vendor/k8s.io/kubernetes/cmd/kubeadm/app/constants/constants_windows.go new file mode 100644 index 0000000000..4ee63a3ef4 --- /dev/null +++ b/vendor/k8s.io/kubernetes/cmd/kubeadm/app/constants/constants_windows.go @@ -0,0 +1,24 @@ +// +build windows + +/* +Copyright 2019 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package constants + +const ( + // DefaultDockerCRISocket defines the default Docker CRI socket + DefaultDockerCRISocket = "tcp://localhost:2375" +) diff --git a/vendor/k8s.io/kubernetes/cmd/kubeadm/app/util/BUILD b/vendor/k8s.io/kubernetes/cmd/kubeadm/app/util/BUILD index becafa093b..259e1d97ca 100644 --- a/vendor/k8s.io/kubernetes/cmd/kubeadm/app/util/BUILD +++ b/vendor/k8s.io/kubernetes/cmd/kubeadm/app/util/BUILD @@ -80,7 +80,9 @@ filegroup( ":package-srcs", "//cmd/kubeadm/app/util/apiclient:all-srcs", "//cmd/kubeadm/app/util/audit:all-srcs", + "//cmd/kubeadm/app/util/certs:all-srcs", "//cmd/kubeadm/app/util/config:all-srcs", + "//cmd/kubeadm/app/util/crypto:all-srcs", "//cmd/kubeadm/app/util/dryrun:all-srcs", "//cmd/kubeadm/app/util/etcd:all-srcs", "//cmd/kubeadm/app/util/kubeconfig:all-srcs", diff --git a/vendor/k8s.io/kubernetes/cmd/kubeadm/app/util/arguments.go b/vendor/k8s.io/kubernetes/cmd/kubeadm/app/util/arguments.go index 619aba2eb4..afba856c42 100644 --- a/vendor/k8s.io/kubernetes/cmd/kubeadm/app/util/arguments.go +++ b/vendor/k8s.io/kubernetes/cmd/kubeadm/app/util/arguments.go @@ -30,26 +30,26 @@ import ( func BuildArgumentListFromMap(baseArguments map[string]string, overrideArguments map[string]string) []string { var command []string var keys []string - for k := range overrideArguments { - keys = append(keys, k) + + argsMap := make(map[string]string) + + for k, v := range baseArguments { + argsMap[k] = v } - sort.Strings(keys) - for _, k := range keys { - v := overrideArguments[k] - // values of "" are allowed as well - command = append(command, fmt.Sprintf("--%s=%s", k, v)) + + for k, v := range overrideArguments { + argsMap[k] = v } - keys = []string{} - for k := range baseArguments { + + for k := range argsMap { keys = append(keys, k) } + sort.Strings(keys) for _, k := range keys { - v := baseArguments[k] - if _, overrideExists := overrideArguments[k]; !overrideExists { - command = append(command, fmt.Sprintf("--%s=%s", k, v)) - } + command = append(command, fmt.Sprintf("--%s=%s", k, argsMap[k])) } + return command } diff --git a/vendor/k8s.io/kubernetes/cmd/kubeadm/app/util/cgroupdriver.go b/vendor/k8s.io/kubernetes/cmd/kubeadm/app/util/cgroupdriver.go index 2d1e450689..602f6c3f6f 100644 --- a/vendor/k8s.io/kubernetes/cmd/kubeadm/app/util/cgroupdriver.go +++ b/vendor/k8s.io/kubernetes/cmd/kubeadm/app/util/cgroupdriver.go @@ -24,6 +24,13 @@ import ( utilsexec "k8s.io/utils/exec" ) +const ( + // CgroupDriverSystemd holds the systemd driver type + CgroupDriverSystemd = "systemd" + // CgroupDriverCgroupfs holds the cgroupfs driver type + CgroupDriverCgroupfs = "cgroupfs" +) + // TODO: add support for detecting the cgroup driver for CRI other than // Docker. Currently only Docker driver detection is supported: // Discussion: @@ -39,7 +46,7 @@ func GetCgroupDriverDocker(execer utilsexec.Interface) (string, error) { } func validateCgroupDriver(driver string) error { - if driver != "cgroupfs" && driver != "systemd" { + if driver != CgroupDriverCgroupfs && driver != CgroupDriverSystemd { return errors.Errorf("unknown cgroup driver %q", driver) } return nil diff --git a/vendor/k8s.io/kubernetes/cmd/kubeadm/app/util/endpoint.go b/vendor/k8s.io/kubernetes/cmd/kubeadm/app/util/endpoint.go index 8760aec6b0..790f020fd1 100644 --- a/vendor/k8s.io/kubernetes/cmd/kubeadm/app/util/endpoint.go +++ b/vendor/k8s.io/kubernetes/cmd/kubeadm/app/util/endpoint.go @@ -28,36 +28,36 @@ import ( kubeadmapi "k8s.io/kubernetes/cmd/kubeadm/app/apis/kubeadm" ) -// GetMasterEndpoint returns a properly formatted endpoint for the control plane built according following rules: -// - If the ControlPlaneEndpoint is defined, use it. -// - if the ControlPlaneEndpoint is defined but without a port number, use the ControlPlaneEndpoint + api.BindPort is used. -// - Otherwise, in case the ControlPlaneEndpoint is not defined, use the api.AdvertiseAddress + the api.BindPort. -func GetMasterEndpoint(cfg *kubeadmapi.InitConfiguration) (string, error) { +// GetControlPlaneEndpoint returns a properly formatted endpoint for the control plane built according following rules: +// - If the controlPlaneEndpoint is defined, use it. +// - if the controlPlaneEndpoint is defined but without a port number, use the controlPlaneEndpoint + localEndpoint.BindPort is used. +// - Otherwise, in case the controlPlaneEndpoint is not defined, use the localEndpoint.AdvertiseAddress + the localEndpoint.BindPort. +func GetControlPlaneEndpoint(controlPlaneEndpoint string, localEndpoint *kubeadmapi.APIEndpoint) (string, error) { // parse the bind port - bindPortString := strconv.Itoa(int(cfg.LocalAPIEndpoint.BindPort)) + bindPortString := strconv.Itoa(int(localEndpoint.BindPort)) if _, err := ParsePort(bindPortString); err != nil { - return "", errors.Wrapf(err, "invalid value %q given for api.bindPort", cfg.LocalAPIEndpoint.BindPort) + return "", errors.Wrapf(err, "invalid value %q given for api.bindPort", localEndpoint.BindPort) } // parse the AdvertiseAddress - var ip = net.ParseIP(cfg.LocalAPIEndpoint.AdvertiseAddress) + var ip = net.ParseIP(localEndpoint.AdvertiseAddress) if ip == nil { - return "", errors.Errorf("invalid value `%s` given for api.advertiseAddress", cfg.LocalAPIEndpoint.AdvertiseAddress) + return "", errors.Errorf("invalid value `%s` given for api.advertiseAddress", localEndpoint.AdvertiseAddress) } - // set the master url using cfg.API.AdvertiseAddress + the cfg.API.BindPort - masterURL := &url.URL{ + // set the control-plane url using localEndpoint.AdvertiseAddress + the localEndpoint.BindPort + controlPlaneURL := &url.URL{ Scheme: "https", Host: net.JoinHostPort(ip.String(), bindPortString), } // if the controlplane endpoint is defined - if len(cfg.ControlPlaneEndpoint) > 0 { + if len(controlPlaneEndpoint) > 0 { // parse the controlplane endpoint var host, port string var err error - if host, port, err = ParseHostPort(cfg.ControlPlaneEndpoint); err != nil { - return "", errors.Wrapf(err, "invalid value %q given for controlPlaneEndpoint", cfg.ControlPlaneEndpoint) + if host, port, err = ParseHostPort(controlPlaneEndpoint); err != nil { + return "", errors.Wrapf(err, "invalid value %q given for controlPlaneEndpoint", controlPlaneEndpoint) } // if a port is provided within the controlPlaneAddress warn the users we are using it, else use the bindport @@ -69,14 +69,14 @@ func GetMasterEndpoint(cfg *kubeadmapi.InitConfiguration) (string, error) { port = bindPortString } - // overrides the master url using the controlPlaneAddress (and eventually the bindport) - masterURL = &url.URL{ + // overrides the control-plane url using the controlPlaneAddress (and eventually the bindport) + controlPlaneURL = &url.URL{ Scheme: "https", Host: net.JoinHostPort(host, port), } } - return masterURL.String(), nil + return controlPlaneURL.String(), nil } // ParseHostPort parses a network address of the form "host:port", "ipv4:port", "[ipv6]:port" into host and port; @@ -95,7 +95,7 @@ func ParseHostPort(hostport string) (string, string, error) { // if port is defined, parse and validate it if port != "" { if _, err := ParsePort(port); err != nil { - return "", "", errors.New("port must be a valid number between 1 and 65535, inclusive") + return "", "", errors.Errorf("hostport %s: port %s must be a valid number between 1 and 65535, inclusive", hostport, port) } } @@ -109,7 +109,7 @@ func ParseHostPort(hostport string) (string, string, error) { return host, port, nil } - return "", "", errors.New("host must be a valid IP address or a valid RFC-1123 DNS subdomain") + return "", "", errors.Errorf("hostport %s: host '%s' must be a valid IP address or a valid RFC-1123 DNS subdomain", hostport, host) } // ParsePort parses a string representing a TCP port. diff --git a/vendor/k8s.io/kubernetes/cmd/kubeadm/app/util/error.go b/vendor/k8s.io/kubernetes/cmd/kubeadm/app/util/error.go index 4eaf984fd4..e026f71ebc 100644 --- a/vendor/k8s.io/kubernetes/cmd/kubeadm/app/util/error.go +++ b/vendor/k8s.io/kubernetes/cmd/kubeadm/app/util/error.go @@ -21,7 +21,7 @@ import ( "os" "strings" - utilerrors "k8s.io/apimachinery/pkg/util/errors" + errorsutil "k8s.io/apimachinery/pkg/util/errors" ) const ( @@ -69,7 +69,7 @@ func checkErr(err error, handleErr func(string, int)) { return case preflightError: handleErr(err.Error(), PreFlightExitCode) - case utilerrors.Aggregate: + case errorsutil.Aggregate: handleErr(err.Error(), ValidationExitCode) default: diff --git a/vendor/k8s.io/kubernetes/cmd/kubeadm/app/util/marshal.go b/vendor/k8s.io/kubernetes/cmd/kubeadm/app/util/marshal.go index d38dce75f2..5fca9950ad 100644 --- a/vendor/k8s.io/kubernetes/cmd/kubeadm/app/util/marshal.go +++ b/vendor/k8s.io/kubernetes/cmd/kubeadm/app/util/marshal.go @@ -21,13 +21,13 @@ import ( "bytes" "io" - pkgerrors "github.com/pkg/errors" + "github.com/pkg/errors" "sigs.k8s.io/yaml" "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/runtime/schema" "k8s.io/apimachinery/pkg/runtime/serializer" - "k8s.io/apimachinery/pkg/util/errors" + errorsutil "k8s.io/apimachinery/pkg/util/errors" utilyaml "k8s.io/apimachinery/pkg/util/yaml" clientsetscheme "k8s.io/client-go/kubernetes/scheme" "k8s.io/kubernetes/cmd/kubeadm/app/constants" @@ -45,7 +45,7 @@ func MarshalToYamlForCodecs(obj runtime.Object, gv schema.GroupVersion, codecs s mediaType := "application/yaml" info, ok := runtime.SerializerInfoForMediaType(codecs.SupportedMediaTypes(), mediaType) if !ok { - return []byte{}, pkgerrors.Errorf("unsupported media type %q", mediaType) + return []byte{}, errors.Errorf("unsupported media type %q", mediaType) } encoder := codecs.EncoderForVersion(info.Serializer, gv) @@ -64,7 +64,7 @@ func UnmarshalFromYamlForCodecs(buffer []byte, gv schema.GroupVersion, codecs se mediaType := "application/yaml" info, ok := runtime.SerializerInfoForMediaType(codecs.SupportedMediaTypes(), mediaType) if !ok { - return nil, pkgerrors.Errorf("unsupported media type %q", mediaType) + return nil, errors.Errorf("unsupported media type %q", mediaType) } decoder := codecs.DecoderToVersion(info.Serializer, gv) @@ -97,12 +97,12 @@ func SplitYAMLDocuments(yamlBytes []byte) (map[schema.GroupVersionKind][]byte, e } // Require TypeMeta information to be present if len(typeMetaInfo.APIVersion) == 0 || len(typeMetaInfo.Kind) == 0 { - errs = append(errs, pkgerrors.New("invalid configuration: kind and apiVersion is mandatory information that needs to be specified in all YAML documents")) + errs = append(errs, errors.New("invalid configuration: kind and apiVersion is mandatory information that needs to be specified in all YAML documents")) continue } // Check whether the kind has been registered before. If it has, throw an error if known := knownKinds[typeMetaInfo.Kind]; known { - errs = append(errs, pkgerrors.Errorf("invalid configuration: kind %q is specified twice in YAML file", typeMetaInfo.Kind)) + errs = append(errs, errors.Errorf("invalid configuration: kind %q is specified twice in YAML file", typeMetaInfo.Kind)) continue } knownKinds[typeMetaInfo.Kind] = true @@ -110,7 +110,7 @@ func SplitYAMLDocuments(yamlBytes []byte) (map[schema.GroupVersionKind][]byte, e // Build a GroupVersionKind object from the deserialized TypeMeta object gv, err := schema.ParseGroupVersion(typeMetaInfo.APIVersion) if err != nil { - errs = append(errs, pkgerrors.Wrap(err, "unable to parse apiVersion")) + errs = append(errs, errors.Wrap(err, "unable to parse apiVersion")) continue } gvk := gv.WithKind(typeMetaInfo.Kind) @@ -118,7 +118,7 @@ func SplitYAMLDocuments(yamlBytes []byte) (map[schema.GroupVersionKind][]byte, e // Save the mapping between the gvk and the bytes that object consists of gvkmap[gvk] = b } - if err := errors.NewAggregate(errs); err != nil { + if err := errorsutil.NewAggregate(errs); err != nil { return nil, err } return gvkmap, nil diff --git a/vendor/k8s.io/kubernetes/cmd/kubeadm/app/util/version.go b/vendor/k8s.io/kubernetes/cmd/kubeadm/app/util/version.go index 4da6ce48ae..198e6d9d6b 100644 --- a/vendor/k8s.io/kubernetes/cmd/kubeadm/app/util/version.go +++ b/vendor/k8s.io/kubernetes/cmd/kubeadm/app/util/version.go @@ -17,7 +17,6 @@ limitations under the License. package util import ( - "errors" "fmt" "io/ioutil" "net/http" @@ -25,7 +24,8 @@ import ( "strings" "time" - pkgerrors "github.com/pkg/errors" + "github.com/pkg/errors" + netutil "k8s.io/apimachinery/pkg/util/net" versionutil "k8s.io/apimachinery/pkg/util/version" "k8s.io/klog" @@ -79,9 +79,11 @@ func KubernetesReleaseVersion(version string) (string, error) { // kubeReleaseLabelRegex matches labels such as: latest, latest-1, latest-1.10 if kubeReleaseLabelRegex.MatchString(versionLabel) { - var clientVersion string // Try to obtain a client version. - clientVersion, _ = kubeadmVersion(pkgversion.Get().String()) + // pkgversion.Get().String() should always return a correct version added by the golang + // linker and the build system. The version can still be missing when doing unit tests + // on individual packages. + clientVersion, clientVersionErr := kubeadmVersion(pkgversion.Get().String()) // Fetch version from the internet. url := fmt.Sprintf("%s/%s.txt", bucketURL, versionLabel) body, err := fetchFromURL(url, getReleaseVersionTimeout) @@ -95,6 +97,12 @@ func KubernetesReleaseVersion(version string) (string, error) { klog.Infof("falling back to the local client version: %s", clientVersion) return KubernetesReleaseVersion(clientVersion) } + + if clientVersionErr != nil { + klog.Warningf("could not obtain client version; using remote version: %s", body) + return KubernetesReleaseVersion(body) + } + // both the client and the remote version are obtained; validate them and pick a stable version body, err = validateStableVersion(body, clientVersion) if err != nil { @@ -103,7 +111,7 @@ func KubernetesReleaseVersion(version string) (string, error) { // Re-validate received version and return. return KubernetesReleaseVersion(body) } - return "", pkgerrors.Errorf("version %q doesn't match patterns for neither semantic version nor labels (stable, latest, ...)", version) + return "", errors.Errorf("version %q doesn't match patterns for neither semantic version nor labels (stable, latest, ...)", version) } // KubernetesVersionToImageTag is helper function that replaces all @@ -144,7 +152,7 @@ func splitVersion(version string) (string, string, error) { var urlSuffix string subs := kubeBucketPrefixes.FindAllStringSubmatch(version, 1) if len(subs) != 1 || len(subs[0]) != 4 { - return "", "", pkgerrors.Errorf("invalid version %q", version) + return "", "", errors.Errorf("invalid version %q", version) } switch { @@ -164,12 +172,12 @@ func fetchFromURL(url string, timeout time.Duration) (string, error) { client := &http.Client{Timeout: timeout, Transport: netutil.SetOldTransportDefaults(&http.Transport{})} resp, err := client.Get(url) if err != nil { - return "", pkgerrors.Errorf("unable to get URL %q: %s", url, err.Error()) + return "", errors.Errorf("unable to get URL %q: %s", url, err.Error()) } defer resp.Body.Close() body, err := ioutil.ReadAll(resp.Body) if err != nil { - return "", pkgerrors.Errorf("unable to read content of URL %q: %s", url, err.Error()) + return "", errors.Errorf("unable to read content of URL %q: %s", url, err.Error()) } bodyString := strings.TrimSpace(string(body)) @@ -184,7 +192,7 @@ func fetchFromURL(url string, timeout time.Duration) (string, error) { func kubeadmVersion(info string) (string, error) { v, err := versionutil.ParseSemantic(info) if err != nil { - return "", pkgerrors.Wrap(err, "kubeadm version error") + return "", errors.Wrap(err, "kubeadm version error") } // There is no utility in versionutil to get the version without the metadata, // so this needs some manual formatting. @@ -216,18 +224,13 @@ func kubeadmVersion(info string) (string, error) { // This is done to conform with "stable-X" and only allow remote versions from // the same Patch level release. func validateStableVersion(remoteVersion, clientVersion string) (string, error) { - if clientVersion == "" { - klog.Infof("could not obtain client version; using remote version: %s", remoteVersion) - return remoteVersion, nil - } - verRemote, err := versionutil.ParseGeneric(remoteVersion) if err != nil { - return "", pkgerrors.Wrap(err, "remote version error") + return "", errors.Wrap(err, "remote version error") } verClient, err := versionutil.ParseGeneric(clientVersion) if err != nil { - return "", pkgerrors.Wrap(err, "client version error") + return "", errors.Wrap(err, "client version error") } // If the remote Major version is bigger or if the Major versions are the same, // but the remote Minor is bigger use the client version release. This handles Major bumps too. diff --git a/vendor/k8s.io/kubernetes/pkg/apis/core/OWNERS b/vendor/k8s.io/kubernetes/pkg/apis/core/OWNERS index 7e195eca38..f062ebd6b8 100644 --- a/vendor/k8s.io/kubernetes/pkg/apis/core/OWNERS +++ b/vendor/k8s.io/kubernetes/pkg/apis/core/OWNERS @@ -1,3 +1,5 @@ +# See the OWNERS docs at https://go.k8s.io/owners + approvers: - erictune - lavalamp diff --git a/vendor/k8s.io/kubernetes/pkg/apis/core/annotation_key_constants.go b/vendor/k8s.io/kubernetes/pkg/apis/core/annotation_key_constants.go index bef73c0db0..688287611e 100644 --- a/vendor/k8s.io/kubernetes/pkg/apis/core/annotation_key_constants.go +++ b/vendor/k8s.io/kubernetes/pkg/apis/core/annotation_key_constants.go @@ -101,4 +101,10 @@ const ( // This annotation will be used to compute the in-cluster network programming latency SLI, see // https://github.com/kubernetes/community/blob/master/sig-scalability/slos/network_programming_latency.md EndpointsLastChangeTriggerTime = "endpoints.kubernetes.io/last-change-trigger-time" + + // MigratedPluginsAnnotationKey is the annotation key, set for CSINode objects, that is a comma-separated + // list of in-tree plugins that will be serviced by the CSI backend on the Node represented by CSINode. + // This annotation is used by the Attach Detach Controller to determine whether to use the in-tree or + // CSI Backend for a volume plugin on a specific node. + MigratedPluginsAnnotationKey = "storage.alpha.kubernetes.io/migrated-plugins" ) diff --git a/vendor/k8s.io/kubernetes/pkg/apis/core/types.go b/vendor/k8s.io/kubernetes/pkg/apis/core/types.go index 251547f601..157d38ced5 100644 --- a/vendor/k8s.io/kubernetes/pkg/apis/core/types.go +++ b/vendor/k8s.io/kubernetes/pkg/apis/core/types.go @@ -154,6 +154,9 @@ type VolumeSource struct { // StorageOS represents a StorageOS volume that is attached to the kubelet's host machine and mounted into the pod // +optional StorageOS *StorageOSVolumeSource + // CSI (Container Storage Interface) represents storage that is handled by an external CSI driver (Alpha feature). + // +optional + CSI *CSIVolumeSource } // Similar to VolumeSource but meant for the administrator who creates PVs. @@ -229,7 +232,7 @@ type PersistentVolumeSource struct { // More info: https://releases.k8s.io/HEAD/examples/volumes/storageos/README.md // +optional StorageOS *StorageOSPersistentVolumeSource - // CSI (Container Storage Interface) represents storage that handled by an external CSI driver. + // CSI (Container Storage Interface) represents storage that is handled by an external CSI driver. // +optional CSI *CSIPersistentVolumeSource } @@ -918,6 +921,11 @@ type QuobyteVolumeSource struct { // Default is no group // +optional Group string + + // Tenant owning the given Quobyte volume in the Backend + // Used with dynamically provisioned Quobyte volumes, value is set by the plugin + // +optional + Tenant string } // Represents a Glusterfs mount that lasts the lifetime of a pod. @@ -1598,6 +1606,38 @@ type CSIPersistentVolumeSource struct { NodePublishSecretRef *SecretReference } +// Represents a source location of a volume to mount, managed by an external CSI driver +type CSIVolumeSource struct { + // Driver is the name of the CSI driver that handles this volume. + // Consult with your admin for the correct name as registered in the cluster. + // Required. + Driver string + + // Specifies a read-only configuration for the volume. + // Defaults to false (read/write). + // +optional + ReadOnly *bool + + // Filesystem type to mount. Ex. "ext4", "xfs", "ntfs". + // If not provided, the empty value is passed to the associated CSI driver + // which will determine the default filesystem to apply. + // +optional + FSType *string + + // VolumeAttributes stores driver-specific properties that are passed to the CSI + // driver. Consult your driver's documentation for supported values. + // +optional + VolumeAttributes map[string]string + + // NodePublishSecretRef is a reference to the secret object containing + // sensitive information to pass to the CSI driver to complete the CSI + // NodePublishVolume and NodeUnpublishVolume calls. + // This field is optional, and may be empty if no secret is required. If the + // secret object contains more than one secret, all secret references are passed. + // +optional + NodePublishSecretRef *LocalObjectReference +} + // ContainerPort represents a network port in a single container type ContainerPort struct { // Optional: If specified, this must be an IANA_SVC_NAME Each named port @@ -1639,6 +1679,13 @@ type VolumeMount struct { // This field is beta in 1.10. // +optional MountPropagation *MountPropagationMode + // Expanded path within the volume from which the container's volume should be mounted. + // Behaves similarly to SubPath but environment variable references $(VAR_NAME) are expanded using the container's environment. + // Defaults to "" (volume's root). + // SubPathExpr and SubPath are mutually exclusive. + // This field is alpha in 1.14. + // +optional + SubPathExpr string } // MountPropagationMode describes mount propagation. @@ -2023,8 +2070,15 @@ type Lifecycle struct { // is terminated and restarted. // +optional PostStart *Handler - // PreStop is called immediately before a container is terminated. The reason for termination is - // passed to the handler. Regardless of the outcome of the handler, the container is eventually terminated. + // PreStop is called immediately before a container is terminated due to an + // API request or management event such as liveness probe failure, + // preemption, resource contention, etc. The handler is not called if the + // container crashes or exits. The reason for termination is passed to the + // handler. The Pod's termination grace period countdown begins before the + // PreStop hooked is executed. Regardless of the outcome of the handler, the + // container will eventually terminate within the Pod's termination grace + // period. Other management of the container blocks until the hook completes + // or until the termination grace period is reached. // +optional PreStop *Handler } @@ -2610,14 +2664,14 @@ type PodSpec struct { // If specified, all readiness gates will be evaluated for pod readiness. // A pod is ready when all its containers are ready AND // all conditions specified in the readiness gates have status equal to "True" - // More info: https://github.com/kubernetes/community/blob/master/keps/sig-network/0007-pod-ready%2B%2B.md + // More info: https://git.k8s.io/enhancements/keps/sig-network/0007-pod-ready%2B%2B.md // +optional ReadinessGates []PodReadinessGate // RuntimeClassName refers to a RuntimeClass object in the node.k8s.io group, which should be used // to run this pod. If no RuntimeClass resource matches the named class, the pod will not be run. // If unset or empty, the "legacy" RuntimeClass will be used, which is an implicit class with an // empty definition that uses the default runtime handler. - // More info: https://github.com/kubernetes/community/blob/master/keps/sig-node/0014-runtime-class.md + // More info: https://git.k8s.io/enhancements/keps/sig-node/runtime-class.md // This is an alpha feature and may change in the future. // +optional RuntimeClassName *string diff --git a/vendor/k8s.io/kubernetes/pkg/apis/core/zz_generated.deepcopy.go b/vendor/k8s.io/kubernetes/pkg/apis/core/zz_generated.deepcopy.go index a4801c2e31..07347542b6 100644 --- a/vendor/k8s.io/kubernetes/pkg/apis/core/zz_generated.deepcopy.go +++ b/vendor/k8s.io/kubernetes/pkg/apis/core/zz_generated.deepcopy.go @@ -250,6 +250,44 @@ func (in *CSIPersistentVolumeSource) DeepCopy() *CSIPersistentVolumeSource { return out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CSIVolumeSource) DeepCopyInto(out *CSIVolumeSource) { + *out = *in + if in.ReadOnly != nil { + in, out := &in.ReadOnly, &out.ReadOnly + *out = new(bool) + **out = **in + } + if in.FSType != nil { + in, out := &in.FSType, &out.FSType + *out = new(string) + **out = **in + } + if in.VolumeAttributes != nil { + in, out := &in.VolumeAttributes, &out.VolumeAttributes + *out = make(map[string]string, len(*in)) + for key, val := range *in { + (*out)[key] = val + } + } + if in.NodePublishSecretRef != nil { + in, out := &in.NodePublishSecretRef, &out.NodePublishSecretRef + *out = new(LocalObjectReference) + **out = **in + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CSIVolumeSource. +func (in *CSIVolumeSource) DeepCopy() *CSIVolumeSource { + if in == nil { + return nil + } + out := new(CSIVolumeSource) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *Capabilities) DeepCopyInto(out *Capabilities) { *out = *in @@ -5368,6 +5406,11 @@ func (in *VolumeSource) DeepCopyInto(out *VolumeSource) { *out = new(StorageOSVolumeSource) (*in).DeepCopyInto(*out) } + if in.CSI != nil { + in, out := &in.CSI, &out.CSI + *out = new(CSIVolumeSource) + (*in).DeepCopyInto(*out) + } return } diff --git a/vendor/k8s.io/kubernetes/pkg/kubelet/apis/config/OWNERS b/vendor/k8s.io/kubernetes/pkg/kubelet/apis/config/OWNERS index 7eed1121ed..b8b2acd3b6 100644 --- a/vendor/k8s.io/kubernetes/pkg/kubelet/apis/config/OWNERS +++ b/vendor/k8s.io/kubernetes/pkg/kubelet/apis/config/OWNERS @@ -1,3 +1,5 @@ +# See the OWNERS docs at https://go.k8s.io/owners + approvers: - mtaufen reviewers: diff --git a/vendor/k8s.io/kubernetes/pkg/kubelet/apis/config/types.go b/vendor/k8s.io/kubernetes/pkg/kubelet/apis/config/types.go index 941d837486..079cb19253 100644 --- a/vendor/k8s.io/kubernetes/pkg/kubelet/apis/config/types.go +++ b/vendor/k8s.io/kubernetes/pkg/kubelet/apis/config/types.go @@ -218,7 +218,7 @@ type KubeletConfiguration struct { // The CIDR to use for pod IP addresses, only used in standalone mode. // In cluster mode, this is obtained from the master. PodCIDR string - // PodPidsLimit is the maximum number of pids in any pod. + // The maximum number of processes per pod. If -1, the kubelet defaults to the node allocatable pid capacity. PodPidsLimit int64 // ResolverConfig is the resolver configuration file used as the basis // for the container DNS resolution configuration. @@ -291,12 +291,12 @@ type KubeletConfiguration struct { /* the following fields are meant for Node Allocatable */ - // A set of ResourceName=ResourceQuantity (e.g. cpu=200m,memory=150G) pairs + // A set of ResourceName=ResourceQuantity (e.g. cpu=200m,memory=150G,pids=100) pairs // that describe resources reserved for non-kubernetes components. // Currently only cpu and memory are supported. // See http://kubernetes.io/docs/user-guide/compute-resources for more detail. SystemReserved map[string]string - // A set of ResourceName=ResourceQuantity (e.g. cpu=200m,memory=150G) pairs + // A set of ResourceName=ResourceQuantity (e.g. cpu=200m,memory=150G,pids=100) pairs // that describe resources reserved for kubernetes system components. // Currently cpu, memory and local ephemeral storage for root file system are supported. // See http://kubernetes.io/docs/user-guide/compute-resources for more detail. diff --git a/vendor/k8s.io/kubernetes/pkg/proxy/apis/config/BUILD b/vendor/k8s.io/kubernetes/pkg/proxy/apis/config/BUILD index 475d4233b4..bc94d8d062 100644 --- a/vendor/k8s.io/kubernetes/pkg/proxy/apis/config/BUILD +++ b/vendor/k8s.io/kubernetes/pkg/proxy/apis/config/BUILD @@ -15,10 +15,10 @@ go_library( ], importpath = "k8s.io/kubernetes/pkg/proxy/apis/config", deps = [ - "//staging/src/k8s.io/apimachinery/pkg/apis/config:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/runtime:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/runtime/schema:go_default_library", + "//staging/src/k8s.io/component-base/config:go_default_library", ], ) diff --git a/vendor/k8s.io/kubernetes/pkg/proxy/apis/config/OWNERS b/vendor/k8s.io/kubernetes/pkg/proxy/apis/config/OWNERS index 34823356d2..8b0c7feef3 100644 --- a/vendor/k8s.io/kubernetes/pkg/proxy/apis/config/OWNERS +++ b/vendor/k8s.io/kubernetes/pkg/proxy/apis/config/OWNERS @@ -1,4 +1,8 @@ +# See the OWNERS docs at https://go.k8s.io/owners + approvers: - thockin reviewers: - sig-network-reviewers +labels: +- sig/network diff --git a/vendor/k8s.io/kubernetes/pkg/proxy/apis/config/types.go b/vendor/k8s.io/kubernetes/pkg/proxy/apis/config/types.go index dacee32bb3..30ca9de393 100644 --- a/vendor/k8s.io/kubernetes/pkg/proxy/apis/config/types.go +++ b/vendor/k8s.io/kubernetes/pkg/proxy/apis/config/types.go @@ -21,8 +21,8 @@ import ( "sort" "strings" - apimachineryconfig "k8s.io/apimachinery/pkg/apis/config" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + componentbaseconfig "k8s.io/component-base/config" ) // KubeProxyIPTablesConfiguration contains iptables-related configuration @@ -55,6 +55,9 @@ type KubeProxyIPVSConfiguration struct { // excludeCIDRs is a list of CIDR's which the ipvs proxier should not touch // when cleaning up ipvs services. ExcludeCIDRs []string + // strict ARP configure arp_ignore and arp_announce to avoid answering ARP queries + // from kube-ipvs0 interface + StrictARP bool } // KubeProxyConntrackConfiguration contains conntrack settings for @@ -78,6 +81,20 @@ type KubeProxyConntrackConfiguration struct { TCPCloseWaitTimeout *metav1.Duration } +// KubeProxyWinkernelConfiguration contains Windows/HNS settings for +// the Kubernetes proxy server. +type KubeProxyWinkernelConfiguration struct { + // networkName is the name of the network kube-proxy will use + // to create endpoints and policies + NetworkName string + // sourceVip is the IP address of the source VIP endoint used for + // NAT when loadbalancing + SourceVip string + // enableDSR tells kube-proxy whether HNS policies should be created + // with DSR + EnableDSR bool +} + // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object // KubeProxyConfiguration contains everything necessary to configure the @@ -108,7 +125,7 @@ type KubeProxyConfiguration struct { HostnameOverride string // clientConnection specifies the kubeconfig file and client connection settings for the proxy // server to use when communicating with the apiserver. - ClientConnection apimachineryconfig.ClientConnectionConfiguration + ClientConnection componentbaseconfig.ClientConnectionConfiguration // iptables contains iptables-related configuration options. IPTables KubeProxyIPTablesConfiguration // ipvs contains ipvs-related configuration options. @@ -140,6 +157,8 @@ type KubeProxyConfiguration struct { // If set it to a non-zero IP block, kube-proxy will filter that down to just the IPs that applied to the node. // An empty string slice is meant to select all network interfaces. NodePortAddresses []string + // winkernel contains winkernel-related configuration options. + Winkernel KubeProxyWinkernelConfiguration } // Currently, three modes of proxy are available in Linux platform: 'userspace' (older, going to be EOL), 'iptables' diff --git a/vendor/k8s.io/kubernetes/pkg/proxy/apis/config/zz_generated.deepcopy.go b/vendor/k8s.io/kubernetes/pkg/proxy/apis/config/zz_generated.deepcopy.go index 983b5113b7..04a5dbfe7a 100644 --- a/vendor/k8s.io/kubernetes/pkg/proxy/apis/config/zz_generated.deepcopy.go +++ b/vendor/k8s.io/kubernetes/pkg/proxy/apis/config/zz_generated.deepcopy.go @@ -74,6 +74,7 @@ func (in *KubeProxyConfiguration) DeepCopyInto(out *KubeProxyConfiguration) { *out = make([]string, len(*in)) copy(*out, *in) } + out.Winkernel = in.Winkernel return } @@ -181,3 +182,19 @@ func (in *KubeProxyIPVSConfiguration) DeepCopy() *KubeProxyIPVSConfiguration { in.DeepCopyInto(out) return out } + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *KubeProxyWinkernelConfiguration) DeepCopyInto(out *KubeProxyWinkernelConfiguration) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new KubeProxyWinkernelConfiguration. +func (in *KubeProxyWinkernelConfiguration) DeepCopy() *KubeProxyWinkernelConfiguration { + if in == nil { + return nil + } + out := new(KubeProxyWinkernelConfiguration) + in.DeepCopyInto(out) + return out +} diff --git a/vendor/k8s.io/client-go/util/buffer/ring_growing.go b/vendor/k8s.io/utils/buffer/ring_growing.go similarity index 100% rename from vendor/k8s.io/client-go/util/buffer/ring_growing.go rename to vendor/k8s.io/utils/buffer/ring_growing.go diff --git a/vendor/k8s.io/client-go/util/integer/integer.go b/vendor/k8s.io/utils/integer/integer.go similarity index 81% rename from vendor/k8s.io/client-go/util/integer/integer.go rename to vendor/k8s.io/utils/integer/integer.go index c6ea106f9b..e4e740cad4 100644 --- a/vendor/k8s.io/client-go/util/integer/integer.go +++ b/vendor/k8s.io/utils/integer/integer.go @@ -16,6 +16,7 @@ limitations under the License. package integer +// IntMax returns the maximum of the params func IntMax(a, b int) int { if b > a { return b @@ -23,6 +24,7 @@ func IntMax(a, b int) int { return a } +// IntMin returns the minimum of the params func IntMin(a, b int) int { if b < a { return b @@ -30,6 +32,7 @@ func IntMin(a, b int) int { return a } +// Int32Max returns the maximum of the params func Int32Max(a, b int32) int32 { if b > a { return b @@ -37,6 +40,7 @@ func Int32Max(a, b int32) int32 { return a } +// Int32Min returns the minimum of the params func Int32Min(a, b int32) int32 { if b < a { return b @@ -44,6 +48,7 @@ func Int32Min(a, b int32) int32 { return a } +// Int64Max returns the maximum of the params func Int64Max(a, b int64) int64 { if b > a { return b @@ -51,6 +56,7 @@ func Int64Max(a, b int64) int64 { return a } +// Int64Min returns the minimum of the params func Int64Min(a, b int64) int64 { if b < a { return b diff --git a/vendor/k8s.io/utils/pointer/OWNERS b/vendor/k8s.io/utils/pointer/OWNERS new file mode 100644 index 0000000000..0d6392752a --- /dev/null +++ b/vendor/k8s.io/utils/pointer/OWNERS @@ -0,0 +1,10 @@ +# See the OWNERS docs at https://go.k8s.io/owners + +approvers: +- apelisse +- stewart-yu +- thockin +reviewers: +- apelisse +- stewart-yu +- thockin diff --git a/vendor/k8s.io/utils/pointer/pointer.go b/vendor/k8s.io/utils/pointer/pointer.go new file mode 100644 index 0000000000..a11a540f46 --- /dev/null +++ b/vendor/k8s.io/utils/pointer/pointer.go @@ -0,0 +1,86 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package pointer + +import ( + "fmt" + "reflect" +) + +// AllPtrFieldsNil tests whether all pointer fields in a struct are nil. This is useful when, +// for example, an API struct is handled by plugins which need to distinguish +// "no plugin accepted this spec" from "this spec is empty". +// +// This function is only valid for structs and pointers to structs. Any other +// type will cause a panic. Passing a typed nil pointer will return true. +func AllPtrFieldsNil(obj interface{}) bool { + v := reflect.ValueOf(obj) + if !v.IsValid() { + panic(fmt.Sprintf("reflect.ValueOf() produced a non-valid Value for %#v", obj)) + } + if v.Kind() == reflect.Ptr { + if v.IsNil() { + return true + } + v = v.Elem() + } + for i := 0; i < v.NumField(); i++ { + if v.Field(i).Kind() == reflect.Ptr && !v.Field(i).IsNil() { + return false + } + } + return true +} + +// Int32Ptr returns a pointer to an int32 +func Int32Ptr(i int32) *int32 { + return &i +} + +// Int64Ptr returns a pointer to an int64 +func Int64Ptr(i int64) *int64 { + return &i +} + +// Int32PtrDerefOr dereference the int32 ptr and returns it i not nil, +// else returns def. +func Int32PtrDerefOr(ptr *int32, def int32) int32 { + if ptr != nil { + return *ptr + } + return def +} + +// BoolPtr returns a pointer to a bool +func BoolPtr(b bool) *bool { + return &b +} + +// StringPtr returns a pointer to the passed string. +func StringPtr(s string) *string { + return &s +} + +// Float32Ptr returns a pointer to the passed float32. +func Float32Ptr(i float32) *float32 { + return &i +} + +// Float64Ptr returns a pointer to the passed float64. +func Float64Ptr(i float64) *float64 { + return &i +} diff --git a/vendor/k8s.io/utils/trace/trace.go b/vendor/k8s.io/utils/trace/trace.go new file mode 100644 index 0000000000..3a1ecfc715 --- /dev/null +++ b/vendor/k8s.io/utils/trace/trace.go @@ -0,0 +1,96 @@ +/* +Copyright 2015 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package trace + +import ( + "bytes" + "fmt" + "math/rand" + "time" + + "k8s.io/klog" +) + +type traceStep struct { + stepTime time.Time + msg string +} + +// Trace keeps track of a set of "steps" and allows us to log a specific +// step if it took longer than its share of the total allowed time +type Trace struct { + name string + startTime time.Time + steps []traceStep +} + +// New creates a Trace with the specified name +func New(name string) *Trace { + return &Trace{name, time.Now(), nil} +} + +// Step adds a new step with a specific message +func (t *Trace) Step(msg string) { + if t.steps == nil { + // traces almost always have less than 6 steps, do this to avoid more than a single allocation + t.steps = make([]traceStep, 0, 6) + } + t.steps = append(t.steps, traceStep{time.Now(), msg}) +} + +// Log is used to dump all the steps in the Trace +func (t *Trace) Log() { + // an explicit logging request should dump all the steps out at the higher level + t.logWithStepThreshold(0) +} + +func (t *Trace) logWithStepThreshold(stepThreshold time.Duration) { + var buffer bytes.Buffer + tracenum := rand.Int31() + endTime := time.Now() + + totalTime := endTime.Sub(t.startTime) + buffer.WriteString(fmt.Sprintf("Trace[%d]: %q (started: %v) (total time: %v):\n", tracenum, t.name, t.startTime, totalTime)) + lastStepTime := t.startTime + for _, step := range t.steps { + stepDuration := step.stepTime.Sub(lastStepTime) + if stepThreshold == 0 || stepDuration > stepThreshold || klog.V(4) { + buffer.WriteString(fmt.Sprintf("Trace[%d]: [%v] [%v] %v\n", tracenum, step.stepTime.Sub(t.startTime), stepDuration, step.msg)) + } + lastStepTime = step.stepTime + } + stepDuration := endTime.Sub(lastStepTime) + if stepThreshold == 0 || stepDuration > stepThreshold || klog.V(4) { + buffer.WriteString(fmt.Sprintf("Trace[%d]: [%v] [%v] END\n", tracenum, endTime.Sub(t.startTime), stepDuration)) + } + + klog.Info(buffer.String()) +} + +// LogIfLong is used to dump steps that took longer than its share +func (t *Trace) LogIfLong(threshold time.Duration) { + if time.Since(t.startTime) >= threshold { + // if any step took more than it's share of the total allowed time, it deserves a higher log level + stepThreshold := threshold / time.Duration(len(t.steps)+1) + t.logWithStepThreshold(stepThreshold) + } +} + +// TotalTime can be used to figure out how long it took since the Trace was created +func (t *Trace) TotalTime() time.Duration { + return time.Since(t.startTime) +} diff --git a/vendor/modules.txt b/vendor/modules.txt index 5080661ff3..b6f8c5a7fd 100644 --- a/vendor/modules.txt +++ b/vendor/modules.txt @@ -1,205 +1,179 @@ -# cloud.google.com/go v0.35.1 -cloud.google.com/go/compute/metadata -# contrib.go.opencensus.io/exporter/ocagent v0.2.0 -contrib.go.opencensus.io/exporter/ocagent -# github.com/Azure/go-autorest v11.5.0+incompatible -github.com/Azure/go-autorest/autorest -github.com/Azure/go-autorest/autorest/adal -github.com/Azure/go-autorest/autorest/azure -github.com/Azure/go-autorest/logger -github.com/Azure/go-autorest/tracing -github.com/Azure/go-autorest/autorest/date # github.com/ajeddeloh/go-json v0.0.0-20170920214419-6a2fe990e083 github.com/ajeddeloh/go-json # github.com/ajeddeloh/yaml v0.0.0-20170912190910-6b94386aeefd github.com/ajeddeloh/yaml # github.com/alecthomas/units v0.0.0-20151022065526-2efee857e7cf github.com/alecthomas/units -# github.com/appscode/jsonpatch v0.0.0-20190108182946-7c0e3b262f30 -github.com/appscode/jsonpatch # github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973 github.com/beorn7/perks/quantile -# github.com/census-instrumentation/opencensus-proto v0.1.0 -github.com/census-instrumentation/opencensus-proto/gen-go/agent/common/v1 -github.com/census-instrumentation/opencensus-proto/gen-go/agent/trace/v1 -github.com/census-instrumentation/opencensus-proto/gen-go/trace/v1 -github.com/census-instrumentation/opencensus-proto/gen-go/resource/v1 # github.com/coreos/container-linux-config-transpiler v0.9.0 github.com/coreos/container-linux-config-transpiler/config github.com/coreos/container-linux-config-transpiler/config/astyaml github.com/coreos/container-linux-config-transpiler/config/platform -github.com/coreos/container-linux-config-transpiler/config/types github.com/coreos/container-linux-config-transpiler/config/templating +github.com/coreos/container-linux-config-transpiler/config/types github.com/coreos/container-linux-config-transpiler/config/types/util github.com/coreos/container-linux-config-transpiler/internal/util -# github.com/coreos/go-semver v0.2.0 +# github.com/coreos/go-semver v0.3.0 github.com/coreos/go-semver/semver -# github.com/coreos/go-systemd v0.0.0-20181012123002-c6f51f82210d +# github.com/coreos/go-systemd v0.0.0-20190719114852-fd7a80b32e1f github.com/coreos/go-systemd/unit # github.com/coreos/ignition v0.33.0 +github.com/coreos/ignition/config/shared/errors +github.com/coreos/ignition/config/shared/validations github.com/coreos/ignition/config/v2_2/types github.com/coreos/ignition/config/validate +github.com/coreos/ignition/config/validate/astjson github.com/coreos/ignition/config/validate/astnode github.com/coreos/ignition/config/validate/report -github.com/coreos/ignition/config/shared/errors -github.com/coreos/ignition/config/shared/validations -github.com/coreos/ignition/config/validate/astjson # github.com/davecgh/go-spew v1.1.1 github.com/davecgh/go-spew/spew -# github.com/dgrijalva/jwt-go v3.2.0+incompatible -github.com/dgrijalva/jwt-go -# github.com/ghodss/yaml v1.0.0 -github.com/ghodss/yaml +# github.com/evanphx/json-patch v4.5.0+incompatible +github.com/evanphx/json-patch +# github.com/fatih/color v1.7.0 +github.com/fatih/color # github.com/go-logr/logr v0.1.0 github.com/go-logr/logr # github.com/go-logr/zapr v0.1.0 github.com/go-logr/zapr -# github.com/gobuffalo/envy v1.6.12 -github.com/gobuffalo/envy -# github.com/gobuffalo/flect v0.0.0-20190104192022-4af577e09bf2 +# github.com/gobuffalo/flect v0.1.5 github.com/gobuffalo/flect -# github.com/gogo/protobuf v1.2.0 +# github.com/gogo/protobuf v1.2.1 github.com/gogo/protobuf/proto github.com/gogo/protobuf/sortkeys -# github.com/golang/groupcache v0.0.0-20181024230925-c65c006176ff +# github.com/golang/groupcache v0.0.0-20180513044358-24b0969c4cb7 github.com/golang/groupcache/lru -# github.com/golang/protobuf v1.2.0 +# github.com/golang/protobuf v1.3.1 github.com/golang/protobuf/proto -github.com/golang/protobuf/ptypes/any github.com/golang/protobuf/ptypes +github.com/golang/protobuf/ptypes/any github.com/golang/protobuf/ptypes/duration github.com/golang/protobuf/ptypes/timestamp -github.com/golang/protobuf/ptypes/wrappers -# github.com/google/btree v1.0.0 -github.com/google/btree -# github.com/google/gofuzz v1.0.0 +# github.com/google/gofuzz v0.0.0-20170612174753-24818f796faf github.com/google/gofuzz -# github.com/google/uuid v1.1.1 -github.com/google/uuid # github.com/googleapis/gnostic v0.2.0 github.com/googleapis/gnostic/OpenAPIv2 github.com/googleapis/gnostic/compiler github.com/googleapis/gnostic/extensions # github.com/gophercloud/gophercloud v0.3.0 github.com/gophercloud/gophercloud +github.com/gophercloud/gophercloud/internal github.com/gophercloud/gophercloud/openstack github.com/gophercloud/gophercloud/openstack/common/extensions github.com/gophercloud/gophercloud/openstack/compute/v2/extensions/attachinterfaces github.com/gophercloud/gophercloud/openstack/compute/v2/extensions/bootfromvolume github.com/gophercloud/gophercloud/openstack/compute/v2/extensions/floatingips github.com/gophercloud/gophercloud/openstack/compute/v2/extensions/keypairs +github.com/gophercloud/gophercloud/openstack/compute/v2/flavors +github.com/gophercloud/gophercloud/openstack/compute/v2/images github.com/gophercloud/gophercloud/openstack/compute/v2/servers +github.com/gophercloud/gophercloud/openstack/identity/v2/tenants +github.com/gophercloud/gophercloud/openstack/identity/v2/tokens github.com/gophercloud/gophercloud/openstack/identity/v3/tokens github.com/gophercloud/gophercloud/openstack/imageservice/v2/images -github.com/gophercloud/gophercloud/openstack/networking/v2/extensions -github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/attributestags -github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/security/groups -github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/trunks -github.com/gophercloud/gophercloud/openstack/networking/v2/networks -github.com/gophercloud/gophercloud/openstack/networking/v2/ports -github.com/gophercloud/gophercloud/openstack/networking/v2/subnets -github.com/gophercloud/gophercloud/pagination +github.com/gophercloud/gophercloud/openstack/loadbalancer/v2/l7policies github.com/gophercloud/gophercloud/openstack/loadbalancer/v2/listeners github.com/gophercloud/gophercloud/openstack/loadbalancer/v2/loadbalancers github.com/gophercloud/gophercloud/openstack/loadbalancer/v2/monitors github.com/gophercloud/gophercloud/openstack/loadbalancer/v2/pools +github.com/gophercloud/gophercloud/openstack/networking/v2/extensions +github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/attributestags github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/layer3/floatingips github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/layer3/routers +github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/security/groups github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/security/rules -github.com/gophercloud/gophercloud/openstack/identity/v2/tokens +github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/trunks +github.com/gophercloud/gophercloud/openstack/networking/v2/networks +github.com/gophercloud/gophercloud/openstack/networking/v2/ports +github.com/gophercloud/gophercloud/openstack/networking/v2/subnets github.com/gophercloud/gophercloud/openstack/utils -github.com/gophercloud/gophercloud/openstack/compute/v2/flavors -github.com/gophercloud/gophercloud/openstack/compute/v2/images -github.com/gophercloud/gophercloud/internal -github.com/gophercloud/gophercloud/openstack/loadbalancer/v2/l7policies -github.com/gophercloud/gophercloud/openstack/identity/v2/tenants +github.com/gophercloud/gophercloud/pagination # github.com/gophercloud/utils v0.0.0-20190527093828-25f1b77b8c03 github.com/gophercloud/utils/openstack/clientconfig -# github.com/gregjones/httpcache v0.0.0-20181110185634-c63ab54fda8f -github.com/gregjones/httpcache -github.com/gregjones/httpcache/diskcache # github.com/hashicorp/golang-lru v0.5.0 github.com/hashicorp/golang-lru github.com/hashicorp/golang-lru/simplelru -# github.com/imdario/mergo v0.3.7 +# github.com/hpcloud/tail v1.0.0 +github.com/hpcloud/tail +github.com/hpcloud/tail/ratelimiter +github.com/hpcloud/tail/util +github.com/hpcloud/tail/watch +github.com/hpcloud/tail/winfile +# github.com/imdario/mergo v0.3.6 github.com/imdario/mergo # github.com/inconshreveable/mousetrap v1.0.0 github.com/inconshreveable/mousetrap -# github.com/joho/godotenv v1.3.0 -github.com/joho/godotenv # github.com/json-iterator/go v1.1.6 github.com/json-iterator/go +# github.com/mattn/go-colorable v0.1.2 +github.com/mattn/go-colorable +# github.com/mattn/go-isatty v0.0.8 +github.com/mattn/go-isatty # github.com/matttproud/golang_protobuf_extensions v1.0.1 github.com/matttproud/golang_protobuf_extensions/pbutil # github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd github.com/modern-go/concurrent # github.com/modern-go/reflect2 v1.0.1 github.com/modern-go/reflect2 +# github.com/onsi/ginkgo v1.8.0 +github.com/onsi/ginkgo +github.com/onsi/ginkgo/config +github.com/onsi/ginkgo/internal/codelocation +github.com/onsi/ginkgo/internal/containernode +github.com/onsi/ginkgo/internal/failer +github.com/onsi/ginkgo/internal/leafnodes +github.com/onsi/ginkgo/internal/remote +github.com/onsi/ginkgo/internal/spec +github.com/onsi/ginkgo/internal/spec_iterator +github.com/onsi/ginkgo/internal/specrunner +github.com/onsi/ginkgo/internal/suite +github.com/onsi/ginkgo/internal/testingtproxy +github.com/onsi/ginkgo/internal/writer +github.com/onsi/ginkgo/reporters +github.com/onsi/ginkgo/reporters/stenographer +github.com/onsi/ginkgo/reporters/stenographer/support/go-colorable +github.com/onsi/ginkgo/reporters/stenographer/support/go-isatty +github.com/onsi/ginkgo/types # github.com/onsi/gomega v1.5.0 +github.com/onsi/gomega +github.com/onsi/gomega/format github.com/onsi/gomega/gbytes github.com/onsi/gomega/gexec -github.com/onsi/gomega/format -github.com/onsi/gomega github.com/onsi/gomega/internal/assertion github.com/onsi/gomega/internal/asyncassertion +github.com/onsi/gomega/internal/oraclematcher github.com/onsi/gomega/internal/testingtsupport github.com/onsi/gomega/matchers -github.com/onsi/gomega/types -github.com/onsi/gomega/internal/oraclematcher github.com/onsi/gomega/matchers/support/goraph/bipartitegraph github.com/onsi/gomega/matchers/support/goraph/edge github.com/onsi/gomega/matchers/support/goraph/node github.com/onsi/gomega/matchers/support/goraph/util -# github.com/pborman/uuid v1.2.0 +github.com/onsi/gomega/types +# github.com/pborman/uuid v0.0.0-20170612153648-e790cca94e6c github.com/pborman/uuid -# github.com/peterbourgon/diskv v2.0.1+incompatible -github.com/peterbourgon/diskv # github.com/pkg/errors v0.8.1 github.com/pkg/errors -# github.com/prometheus/client_golang v0.9.2 -github.com/prometheus/client_golang/prometheus/promhttp +# github.com/prometheus/client_golang v0.9.3-0.20190127221311-3c4408c8b829 github.com/prometheus/client_golang/prometheus github.com/prometheus/client_golang/prometheus/internal +github.com/prometheus/client_golang/prometheus/promhttp # github.com/prometheus/client_model v0.0.0-20190115171406-56726106282f github.com/prometheus/client_model/go -# github.com/prometheus/common v0.1.0 +# github.com/prometheus/common v0.2.0 github.com/prometheus/common/expfmt -github.com/prometheus/common/model github.com/prometheus/common/internal/bitbucket.org/ww/goautoneg +github.com/prometheus/common/model # github.com/prometheus/procfs v0.0.0-20190117184657-bf6a532e95b1 github.com/prometheus/procfs +github.com/prometheus/procfs/internal/util github.com/prometheus/procfs/nfs github.com/prometheus/procfs/xfs -github.com/prometheus/procfs/internal/util -# github.com/rogpeppe/go-internal v1.1.0 -github.com/rogpeppe/go-internal/modfile -github.com/rogpeppe/go-internal/module -github.com/rogpeppe/go-internal/semver -# github.com/spf13/afero v1.2.2 -github.com/spf13/afero -github.com/spf13/afero/mem # github.com/spf13/cobra v0.0.3 github.com/spf13/cobra # github.com/spf13/pflag v1.0.3 github.com/spf13/pflag # github.com/vincent-petithory/dataurl v0.0.0-20160330182126-9a301d65acbb github.com/vincent-petithory/dataurl -# go.opencensus.io v0.18.0 -go.opencensus.io/plugin/ochttp -go.opencensus.io/plugin/ochttp/propagation/tracecontext -go.opencensus.io/stats/view -go.opencensus.io/trace -go.opencensus.io -go.opencensus.io/trace/tracestate -go.opencensus.io/plugin/ochttp/propagation/b3 -go.opencensus.io/stats -go.opencensus.io/tag -go.opencensus.io/trace/propagation -go.opencensus.io/exemplar -go.opencensus.io/internal/tagencoding -go.opencensus.io/stats/internal -go.opencensus.io/internal -go.opencensus.io/trace/internal # go.uber.org/atomic v1.3.2 go.uber.org/atomic # go.uber.org/multierr v1.1.0 @@ -207,129 +181,86 @@ go.uber.org/multierr # go.uber.org/zap v1.9.1 go.uber.org/zap go.uber.org/zap/buffer -go.uber.org/zap/zapcore go.uber.org/zap/internal/bufferpool go.uber.org/zap/internal/color go.uber.org/zap/internal/exit -# go4.org v0.0.0-20180809161055-417644f6feb5 +go.uber.org/zap/zapcore +# go4.org v0.0.0-20190313082347-94abd6928b1d go4.org/errorutil # golang.org/x/crypto v0.0.0-20190611184440-5c40567a22f8 golang.org/x/crypto/ssh/terminal # golang.org/x/net v0.0.0-20190613194153-d28f0bde5980 -golang.org/x/net/http2 -golang.org/x/net/http/httpguts -golang.org/x/net/http2/hpack -golang.org/x/net/idna -golang.org/x/net/context/ctxhttp golang.org/x/net/context -golang.org/x/net/html/charset +golang.org/x/net/context/ctxhttp golang.org/x/net/html golang.org/x/net/html/atom -golang.org/x/net/trace -golang.org/x/net/internal/timeseries -# golang.org/x/oauth2 v0.0.0-20190115181402-5dab4167f31c +golang.org/x/net/html/charset +golang.org/x/net/http/httpguts +golang.org/x/net/http2 +golang.org/x/net/http2/hpack +golang.org/x/net/idna +# golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421 golang.org/x/oauth2 golang.org/x/oauth2/internal -golang.org/x/oauth2/google -golang.org/x/oauth2/jws -golang.org/x/oauth2/jwt -# golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4 -golang.org/x/sync/semaphore -# golang.org/x/sys v0.0.0-20190616124812-15dcb6c0061f +# golang.org/x/sys v0.0.0-20190429190828-d89cdac9e872 golang.org/x/sys/unix golang.org/x/sys/windows # golang.org/x/text v0.3.2 -golang.org/x/text/transform -golang.org/x/text/unicode/norm -golang.org/x/text/secure/bidirule -golang.org/x/text/unicode/bidi golang.org/x/text/encoding golang.org/x/text/encoding/charmap golang.org/x/text/encoding/htmlindex -golang.org/x/text/encoding/internal/identifier golang.org/x/text/encoding/internal +golang.org/x/text/encoding/internal/identifier golang.org/x/text/encoding/japanese golang.org/x/text/encoding/korean golang.org/x/text/encoding/simplifiedchinese golang.org/x/text/encoding/traditionalchinese golang.org/x/text/encoding/unicode -golang.org/x/text/language -golang.org/x/text/internal/utf8internal -golang.org/x/text/runes golang.org/x/text/internal/language golang.org/x/text/internal/language/compact golang.org/x/text/internal/tag -# golang.org/x/time v0.0.0-20181108054448-85acf8d2951c +golang.org/x/text/internal/utf8internal +golang.org/x/text/language +golang.org/x/text/runes +golang.org/x/text/secure/bidirule +golang.org/x/text/transform +golang.org/x/text/unicode/bidi +golang.org/x/text/unicode/norm +# golang.org/x/time v0.0.0-20180412165947-fbb02b2291d2 golang.org/x/time/rate -# golang.org/x/tools v0.0.0-20190124215303-cc6a436ffe6b -golang.org/x/tools/imports +# golang.org/x/tools v0.0.0-20190501045030-23463209683d golang.org/x/tools/go/ast/astutil +golang.org/x/tools/go/gcexportdata +golang.org/x/tools/go/internal/gcimporter +golang.org/x/tools/go/internal/packagesdriver golang.org/x/tools/go/packages +golang.org/x/tools/imports +golang.org/x/tools/internal/fastwalk golang.org/x/tools/internal/gopathwalk golang.org/x/tools/internal/module -golang.org/x/tools/go/gcexportdata -golang.org/x/tools/go/internal/cgo -golang.org/x/tools/go/internal/packagesdriver golang.org/x/tools/internal/semver -golang.org/x/tools/internal/fastwalk -golang.org/x/tools/go/internal/gcimporter -# google.golang.org/api v0.1.0 -google.golang.org/api/support/bundler +# gomodules.xyz/jsonpatch/v2 v2.0.0 => gomodules.xyz/jsonpatch/v2 v2.0.0-20190626003512-87910169748d +gomodules.xyz/jsonpatch/v2 # google.golang.org/appengine v1.4.0 -google.golang.org/appengine/urlfetch -google.golang.org/appengine google.golang.org/appengine/internal -google.golang.org/appengine/internal/urlfetch -google.golang.org/appengine/internal/app_identity -google.golang.org/appengine/internal/modules google.golang.org/appengine/internal/base google.golang.org/appengine/internal/datastore google.golang.org/appengine/internal/log google.golang.org/appengine/internal/remote_api -# google.golang.org/genproto v0.0.0-20190219182410-082222b4a5c5 -google.golang.org/genproto/googleapis/rpc/status -# google.golang.org/grpc v1.18.0 -google.golang.org/grpc -google.golang.org/grpc/balancer -google.golang.org/grpc/balancer/roundrobin -google.golang.org/grpc/codes -google.golang.org/grpc/connectivity -google.golang.org/grpc/credentials -google.golang.org/grpc/encoding -google.golang.org/grpc/encoding/proto -google.golang.org/grpc/grpclog -google.golang.org/grpc/internal -google.golang.org/grpc/internal/backoff -google.golang.org/grpc/internal/binarylog -google.golang.org/grpc/internal/channelz -google.golang.org/grpc/internal/envconfig -google.golang.org/grpc/internal/grpcrand -google.golang.org/grpc/internal/grpcsync -google.golang.org/grpc/internal/transport -google.golang.org/grpc/keepalive -google.golang.org/grpc/metadata -google.golang.org/grpc/naming -google.golang.org/grpc/peer -google.golang.org/grpc/resolver -google.golang.org/grpc/resolver/dns -google.golang.org/grpc/resolver/passthrough -google.golang.org/grpc/stats -google.golang.org/grpc/status -google.golang.org/grpc/tap -google.golang.org/grpc/balancer/base -google.golang.org/grpc/credentials/internal -google.golang.org/grpc/binarylog/grpc_binarylog_v1 -google.golang.org/grpc/internal/syscall +google.golang.org/appengine/internal/urlfetch +google.golang.org/appengine/urlfetch +# gopkg.in/fsnotify.v1 v1.4.7 +gopkg.in/fsnotify.v1 # gopkg.in/inf.v0 v0.9.1 gopkg.in/inf.v0 +# gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7 +gopkg.in/tomb.v1 # gopkg.in/yaml.v2 v2.2.2 gopkg.in/yaml.v2 -# k8s.io/api v0.0.0-20190814101207-0772a1bdf941 => k8s.io/api v0.0.0-20190222213804-5cb15d344471 -k8s.io/api/core/v1 -k8s.io/api/autoscaling/v1 -k8s.io/api/apps/v1 -k8s.io/api/admissionregistration/v1alpha1 +# k8s.io/api v0.0.0-20190711103429-37c3b8b1ca65 => k8s.io/api v0.0.0-20190704095032-f4ca3d3bdf1d +k8s.io/api/admission/v1beta1 k8s.io/api/admissionregistration/v1beta1 +k8s.io/api/apps/v1 k8s.io/api/apps/v1beta1 k8s.io/api/apps/v1beta2 k8s.io/api/auditregistration/v1alpha1 @@ -337,84 +268,91 @@ k8s.io/api/authentication/v1 k8s.io/api/authentication/v1beta1 k8s.io/api/authorization/v1 k8s.io/api/authorization/v1beta1 +k8s.io/api/autoscaling/v1 k8s.io/api/autoscaling/v2beta1 k8s.io/api/autoscaling/v2beta2 k8s.io/api/batch/v1 k8s.io/api/batch/v1beta1 k8s.io/api/batch/v2alpha1 k8s.io/api/certificates/v1beta1 +k8s.io/api/coordination/v1 k8s.io/api/coordination/v1beta1 -k8s.io/api/policy/v1beta1 +k8s.io/api/core/v1 k8s.io/api/events/v1beta1 k8s.io/api/extensions/v1beta1 k8s.io/api/networking/v1 +k8s.io/api/networking/v1beta1 +k8s.io/api/node/v1alpha1 +k8s.io/api/node/v1beta1 +k8s.io/api/policy/v1beta1 k8s.io/api/rbac/v1 k8s.io/api/rbac/v1alpha1 k8s.io/api/rbac/v1beta1 +k8s.io/api/scheduling/v1 k8s.io/api/scheduling/v1alpha1 k8s.io/api/scheduling/v1beta1 k8s.io/api/settings/v1alpha1 k8s.io/api/storage/v1 k8s.io/api/storage/v1alpha1 k8s.io/api/storage/v1beta1 -k8s.io/api/admission/v1beta1 -# k8s.io/apiextensions-apiserver v0.0.0-20190228180357-d002e88f6236 -k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1 +# k8s.io/apiextensions-apiserver v0.0.0-20190409022649-727a075fdec8 k8s.io/apiextensions-apiserver/pkg/apis/apiextensions -# k8s.io/apimachinery v0.0.0-20190814100815-533d101be9a6 => k8s.io/apimachinery v0.0.0-20190221213512-86fb29eff628 -k8s.io/apimachinery/pkg/util/wait -k8s.io/apimachinery/pkg/runtime -k8s.io/apimachinery/pkg/apis/meta/v1 -k8s.io/apimachinery/pkg/runtime/schema -k8s.io/apimachinery/pkg/util/json +k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1 +k8s.io/apiextensions-apiserver/pkg/client/clientset/clientset +k8s.io/apiextensions-apiserver/pkg/client/clientset/clientset/scheme +k8s.io/apiextensions-apiserver/pkg/client/clientset/clientset/typed/apiextensions/v1beta1 +# k8s.io/apimachinery v0.0.0-20190711103026-7bf792636534 => k8s.io/apimachinery v0.0.0-20190704094733-8f6ac2502e51 +k8s.io/apimachinery/pkg/api/equality k8s.io/apimachinery/pkg/api/errors -k8s.io/apimachinery/pkg/types -k8s.io/apimachinery/pkg/runtime/serializer/json -k8s.io/apimachinery/pkg/runtime/serializer -k8s.io/apimachinery/pkg/util/runtime -k8s.io/apimachinery/pkg/util/errors -k8s.io/apimachinery/pkg/util/validation k8s.io/apimachinery/pkg/api/meta -k8s.io/apimachinery/pkg/labels +k8s.io/apimachinery/pkg/api/resource +k8s.io/apimachinery/pkg/apis/meta/internalversion +k8s.io/apimachinery/pkg/apis/meta/v1 +k8s.io/apimachinery/pkg/apis/meta/v1/unstructured +k8s.io/apimachinery/pkg/apis/meta/v1/validation +k8s.io/apimachinery/pkg/apis/meta/v1beta1 k8s.io/apimachinery/pkg/conversion k8s.io/apimachinery/pkg/conversion/queryparams -k8s.io/apimachinery/pkg/util/naming -k8s.io/apimachinery/pkg/util/sets -k8s.io/apimachinery/pkg/api/resource -k8s.io/apimachinery/pkg/util/intstr k8s.io/apimachinery/pkg/fields +k8s.io/apimachinery/pkg/labels +k8s.io/apimachinery/pkg/runtime +k8s.io/apimachinery/pkg/runtime/schema +k8s.io/apimachinery/pkg/runtime/serializer +k8s.io/apimachinery/pkg/runtime/serializer/json +k8s.io/apimachinery/pkg/runtime/serializer/protobuf +k8s.io/apimachinery/pkg/runtime/serializer/recognizer +k8s.io/apimachinery/pkg/runtime/serializer/streaming +k8s.io/apimachinery/pkg/runtime/serializer/versioning k8s.io/apimachinery/pkg/selection -k8s.io/apimachinery/pkg/watch -k8s.io/apimachinery/pkg/apis/meta/v1/validation -k8s.io/apimachinery/pkg/util/validation/field +k8s.io/apimachinery/pkg/types +k8s.io/apimachinery/pkg/util/cache k8s.io/apimachinery/pkg/util/clock -k8s.io/apimachinery/pkg/util/strategicpatch -k8s.io/apimachinery/pkg/apis/meta/v1/unstructured -k8s.io/apimachinery/pkg/runtime/serializer/recognizer +k8s.io/apimachinery/pkg/util/diff +k8s.io/apimachinery/pkg/util/errors k8s.io/apimachinery/pkg/util/framer -k8s.io/apimachinery/pkg/util/yaml -k8s.io/apimachinery/pkg/runtime/serializer/protobuf -k8s.io/apimachinery/pkg/runtime/serializer/versioning +k8s.io/apimachinery/pkg/util/intstr +k8s.io/apimachinery/pkg/util/json +k8s.io/apimachinery/pkg/util/mergepatch +k8s.io/apimachinery/pkg/util/naming k8s.io/apimachinery/pkg/util/net +k8s.io/apimachinery/pkg/util/runtime +k8s.io/apimachinery/pkg/util/sets +k8s.io/apimachinery/pkg/util/strategicpatch +k8s.io/apimachinery/pkg/util/uuid +k8s.io/apimachinery/pkg/util/validation +k8s.io/apimachinery/pkg/util/validation/field k8s.io/apimachinery/pkg/util/version +k8s.io/apimachinery/pkg/util/wait +k8s.io/apimachinery/pkg/util/yaml k8s.io/apimachinery/pkg/version -k8s.io/apimachinery/pkg/runtime/serializer/streaming -k8s.io/apimachinery/pkg/apis/meta/v1beta1 -k8s.io/apimachinery/pkg/apis/meta/internalversion -k8s.io/apimachinery/pkg/util/uuid -k8s.io/apimachinery/third_party/forked/golang/reflect -k8s.io/apimachinery/pkg/util/mergepatch +k8s.io/apimachinery/pkg/watch k8s.io/apimachinery/third_party/forked/golang/json -k8s.io/apimachinery/pkg/util/cache -k8s.io/apimachinery/pkg/util/diff -k8s.io/apimachinery/pkg/apis/config -# k8s.io/client-go v10.0.0+incompatible -k8s.io/client-go/kubernetes -k8s.io/client-go/tools/clientcmd -k8s.io/client-go/tools/record -k8s.io/client-go/tools/clientcmd/api +k8s.io/apimachinery/third_party/forked/golang/reflect +# k8s.io/client-go v11.0.1-0.20190409021438-1a26190bd76a+incompatible k8s.io/client-go/discovery -k8s.io/client-go/kubernetes/typed/admissionregistration/v1alpha1 +k8s.io/client-go/dynamic +k8s.io/client-go/kubernetes +k8s.io/client-go/kubernetes/scheme k8s.io/client-go/kubernetes/typed/admissionregistration/v1beta1 k8s.io/client-go/kubernetes/typed/apps/v1 k8s.io/client-go/kubernetes/typed/apps/v1beta1 @@ -431,182 +369,177 @@ k8s.io/client-go/kubernetes/typed/batch/v1 k8s.io/client-go/kubernetes/typed/batch/v1beta1 k8s.io/client-go/kubernetes/typed/batch/v2alpha1 k8s.io/client-go/kubernetes/typed/certificates/v1beta1 +k8s.io/client-go/kubernetes/typed/coordination/v1 k8s.io/client-go/kubernetes/typed/coordination/v1beta1 k8s.io/client-go/kubernetes/typed/core/v1 k8s.io/client-go/kubernetes/typed/events/v1beta1 k8s.io/client-go/kubernetes/typed/extensions/v1beta1 k8s.io/client-go/kubernetes/typed/networking/v1 +k8s.io/client-go/kubernetes/typed/networking/v1beta1 +k8s.io/client-go/kubernetes/typed/node/v1alpha1 +k8s.io/client-go/kubernetes/typed/node/v1beta1 k8s.io/client-go/kubernetes/typed/policy/v1beta1 k8s.io/client-go/kubernetes/typed/rbac/v1 k8s.io/client-go/kubernetes/typed/rbac/v1alpha1 k8s.io/client-go/kubernetes/typed/rbac/v1beta1 +k8s.io/client-go/kubernetes/typed/scheduling/v1 k8s.io/client-go/kubernetes/typed/scheduling/v1alpha1 k8s.io/client-go/kubernetes/typed/scheduling/v1beta1 k8s.io/client-go/kubernetes/typed/settings/v1alpha1 k8s.io/client-go/kubernetes/typed/storage/v1 k8s.io/client-go/kubernetes/typed/storage/v1alpha1 k8s.io/client-go/kubernetes/typed/storage/v1beta1 +k8s.io/client-go/pkg/apis/clientauthentication +k8s.io/client-go/pkg/apis/clientauthentication/v1alpha1 +k8s.io/client-go/pkg/apis/clientauthentication/v1beta1 +k8s.io/client-go/pkg/version +k8s.io/client-go/plugin/pkg/client/auth/exec k8s.io/client-go/rest -k8s.io/client-go/util/flowcontrol +k8s.io/client-go/rest/watch +k8s.io/client-go/restmapper k8s.io/client-go/tools/auth +k8s.io/client-go/tools/cache +k8s.io/client-go/tools/clientcmd +k8s.io/client-go/tools/clientcmd/api k8s.io/client-go/tools/clientcmd/api/latest -k8s.io/client-go/util/homedir -k8s.io/client-go/tools/pager -k8s.io/client-go/util/retry -k8s.io/client-go/kubernetes/scheme +k8s.io/client-go/tools/clientcmd/api/v1 k8s.io/client-go/tools/leaderelection k8s.io/client-go/tools/leaderelection/resourcelock -k8s.io/client-go/tools/reference -k8s.io/client-go/dynamic -k8s.io/client-go/plugin/pkg/client/auth -k8s.io/client-go/pkg/version -k8s.io/client-go/plugin/pkg/client/auth/exec -k8s.io/client-go/rest/watch k8s.io/client-go/tools/metrics +k8s.io/client-go/tools/pager +k8s.io/client-go/tools/record +k8s.io/client-go/tools/record/util +k8s.io/client-go/tools/reference k8s.io/client-go/transport k8s.io/client-go/util/cert -k8s.io/client-go/util/integer -k8s.io/client-go/tools/clientcmd/api/v1 -k8s.io/client-go/util/workqueue -k8s.io/client-go/tools/cache -k8s.io/client-go/restmapper -k8s.io/client-go/plugin/pkg/client/auth/azure -k8s.io/client-go/plugin/pkg/client/auth/gcp -k8s.io/client-go/plugin/pkg/client/auth/oidc -k8s.io/client-go/plugin/pkg/client/auth/openstack -k8s.io/client-go/pkg/apis/clientauthentication -k8s.io/client-go/pkg/apis/clientauthentication/v1alpha1 -k8s.io/client-go/pkg/apis/clientauthentication/v1beta1 k8s.io/client-go/util/connrotation -k8s.io/client-go/util/buffer -k8s.io/client-go/util/jsonpath -k8s.io/client-go/third_party/forked/golang/template -# k8s.io/cluster-bootstrap v0.0.0-20190814110523-aeed25068f87 +k8s.io/client-go/util/flowcontrol +k8s.io/client-go/util/homedir +k8s.io/client-go/util/keyutil +k8s.io/client-go/util/retry +k8s.io/client-go/util/workqueue +# k8s.io/cluster-bootstrap v0.0.0-20190711112844-b7409fb13d1b k8s.io/cluster-bootstrap/token/api k8s.io/cluster-bootstrap/token/util -# k8s.io/code-generator v0.0.0-20181117043124-c2090bec4d9b +# k8s.io/code-generator v0.0.0-20190311093542-50b561225d70 k8s.io/code-generator/cmd/client-gen -k8s.io/code-generator/cmd/conversion-gen -k8s.io/code-generator/cmd/deepcopy-gen -k8s.io/code-generator/cmd/defaulter-gen -k8s.io/code-generator/cmd/informer-gen -k8s.io/code-generator/cmd/lister-gen -k8s.io/code-generator/cmd/register-gen -k8s.io/code-generator/cmd/set-gen k8s.io/code-generator/cmd/client-gen/args k8s.io/code-generator/cmd/client-gen/generators -k8s.io/code-generator/pkg/util +k8s.io/code-generator/cmd/client-gen/generators/fake +k8s.io/code-generator/cmd/client-gen/generators/scheme +k8s.io/code-generator/cmd/client-gen/generators/util +k8s.io/code-generator/cmd/client-gen/path +k8s.io/code-generator/cmd/client-gen/types +k8s.io/code-generator/cmd/conversion-gen k8s.io/code-generator/cmd/conversion-gen/args k8s.io/code-generator/cmd/conversion-gen/generators +k8s.io/code-generator/cmd/deepcopy-gen k8s.io/code-generator/cmd/deepcopy-gen/args +k8s.io/code-generator/cmd/defaulter-gen k8s.io/code-generator/cmd/defaulter-gen/args +k8s.io/code-generator/cmd/informer-gen k8s.io/code-generator/cmd/informer-gen/args k8s.io/code-generator/cmd/informer-gen/generators +k8s.io/code-generator/cmd/lister-gen k8s.io/code-generator/cmd/lister-gen/args k8s.io/code-generator/cmd/lister-gen/generators +k8s.io/code-generator/cmd/register-gen k8s.io/code-generator/cmd/register-gen/args k8s.io/code-generator/cmd/register-gen/generators -k8s.io/code-generator/cmd/client-gen/types -k8s.io/code-generator/cmd/client-gen/generators/fake -k8s.io/code-generator/cmd/client-gen/generators/scheme -k8s.io/code-generator/cmd/client-gen/generators/util -k8s.io/code-generator/cmd/client-gen/path -# k8s.io/component-base v0.0.0-20190703210340-65d72cfeb85d -k8s.io/component-base/cli/flag -# k8s.io/gengo v0.0.0-20190116091435-f8a0810f38af +k8s.io/code-generator/cmd/set-gen +k8s.io/code-generator/pkg/namer +k8s.io/code-generator/pkg/util +# k8s.io/component-base v0.0.0-20190409021516-bd2732e5c3f7 +k8s.io/component-base/config +# k8s.io/gengo v0.0.0-20190813173942-955ffa8fcfc9 k8s.io/gengo/args k8s.io/gengo/examples/deepcopy-gen/generators k8s.io/gengo/examples/defaulter-gen/generators k8s.io/gengo/examples/set-gen/generators +k8s.io/gengo/examples/set-gen/sets k8s.io/gengo/generator k8s.io/gengo/namer -k8s.io/gengo/types k8s.io/gengo/parser -k8s.io/gengo/examples/set-gen/sets +k8s.io/gengo/types # k8s.io/klog v0.4.0 k8s.io/klog -# k8s.io/kube-openapi v0.0.0-20190228160746-b3a7cee44a30 +k8s.io/klog/klogr +# k8s.io/kube-openapi v0.0.0-20180731170545-e3762e86a74c k8s.io/kube-openapi/pkg/util/proto -# k8s.io/kubernetes v1.13.4 -k8s.io/kubernetes/cmd/kubeadm/app/apis/kubeadm/v1beta1 -k8s.io/kubernetes/cmd/kubeadm/app/util +# k8s.io/kubernetes v1.14.2 k8s.io/kubernetes/cmd/kubeadm/app/apis/kubeadm +k8s.io/kubernetes/cmd/kubeadm/app/apis/kubeadm/v1beta1 k8s.io/kubernetes/cmd/kubeadm/app/constants -k8s.io/kubernetes/pkg/version +k8s.io/kubernetes/cmd/kubeadm/app/util +k8s.io/kubernetes/pkg/apis/core k8s.io/kubernetes/pkg/kubelet/apis/config k8s.io/kubernetes/pkg/proxy/apis/config -k8s.io/kubernetes/pkg/registry/core/service/ipallocator -k8s.io/kubernetes/pkg/apis/core k8s.io/kubernetes/pkg/registry/core/service/allocator -# k8s.io/utils v0.0.0-20190607212802-c55fbcfc754a +k8s.io/kubernetes/pkg/registry/core/service/ipallocator +k8s.io/kubernetes/pkg/version +# k8s.io/utils v0.0.0-20190506122338-8fab8cb257d5 +k8s.io/utils/buffer k8s.io/utils/exec -# sigs.k8s.io/cluster-api v0.1.9 -sigs.k8s.io/cluster-api/cmd/clusterctl/cmd -sigs.k8s.io/cluster-api/pkg/apis/cluster/common -sigs.k8s.io/cluster-api/pkg/apis -sigs.k8s.io/cluster-api/pkg/client/clientset_generated/clientset -sigs.k8s.io/cluster-api/pkg/controller/cluster -sigs.k8s.io/cluster-api/pkg/controller/machine -sigs.k8s.io/cluster-api/pkg/apis/cluster/v1alpha1 -sigs.k8s.io/cluster-api/pkg/client/clientset_generated/clientset/typed/cluster/v1alpha1 -sigs.k8s.io/cluster-api/pkg/controller/remote +k8s.io/utils/integer +k8s.io/utils/pointer +k8s.io/utils/trace +# sigs.k8s.io/cluster-api v0.1.9 => sigs.k8s.io/cluster-api v0.0.0-20190813192342-65800b3b20e8 +sigs.k8s.io/cluster-api/api/v1alpha2 +sigs.k8s.io/cluster-api/pkg/controller/noderefutil sigs.k8s.io/cluster-api/pkg/errors sigs.k8s.io/cluster-api/pkg/util -sigs.k8s.io/cluster-api/cmd/clusterctl/clientcmd -sigs.k8s.io/cluster-api/cmd/clusterctl/clusterdeployer -sigs.k8s.io/cluster-api/cmd/clusterctl/clusterdeployer/bootstrap -sigs.k8s.io/cluster-api/cmd/clusterctl/clusterdeployer/clusterclient -sigs.k8s.io/cluster-api/cmd/clusterctl/clusterdeployer/provider -sigs.k8s.io/cluster-api/cmd/clusterctl/phases -sigs.k8s.io/cluster-api/cmd/clusterctl/providercomponents -sigs.k8s.io/cluster-api/cmd/clusterctl/validation -sigs.k8s.io/cluster-api/pkg/controller/error -sigs.k8s.io/cluster-api/pkg/client/clientset_generated/clientset/scheme -sigs.k8s.io/cluster-api/cmd/clusterctl/clusterdeployer/bootstrap/existing -sigs.k8s.io/cluster-api/cmd/clusterctl/clusterdeployer/bootstrap/kind -sigs.k8s.io/cluster-api/cmd/clusterctl/clusterdeployer/bootstrap/minikube -sigs.k8s.io/cluster-api/pkg/controller/noderefutil -# sigs.k8s.io/controller-runtime v0.1.12 -sigs.k8s.io/controller-runtime/pkg/manager -sigs.k8s.io/controller-runtime/pkg/runtime/signals -sigs.k8s.io/controller-runtime/pkg/runtime/scheme +# sigs.k8s.io/controller-runtime v0.2.0-rc.0 +sigs.k8s.io/controller-runtime +sigs.k8s.io/controller-runtime/pkg/builder +sigs.k8s.io/controller-runtime/pkg/cache +sigs.k8s.io/controller-runtime/pkg/cache/internal sigs.k8s.io/controller-runtime/pkg/client -sigs.k8s.io/controller-runtime/pkg/patch +sigs.k8s.io/controller-runtime/pkg/client/apiutil sigs.k8s.io/controller-runtime/pkg/client/config sigs.k8s.io/controller-runtime/pkg/controller +sigs.k8s.io/controller-runtime/pkg/controller/controllerutil +sigs.k8s.io/controller-runtime/pkg/conversion +sigs.k8s.io/controller-runtime/pkg/envtest +sigs.k8s.io/controller-runtime/pkg/envtest/printer +sigs.k8s.io/controller-runtime/pkg/event sigs.k8s.io/controller-runtime/pkg/handler -sigs.k8s.io/controller-runtime/pkg/reconcile -sigs.k8s.io/controller-runtime/pkg/source -sigs.k8s.io/controller-runtime/pkg/cache -sigs.k8s.io/controller-runtime/pkg/client/apiutil +sigs.k8s.io/controller-runtime/pkg/internal/controller +sigs.k8s.io/controller-runtime/pkg/internal/controller/metrics +sigs.k8s.io/controller-runtime/pkg/internal/log +sigs.k8s.io/controller-runtime/pkg/internal/objectutil sigs.k8s.io/controller-runtime/pkg/internal/recorder sigs.k8s.io/controller-runtime/pkg/leaderelection +sigs.k8s.io/controller-runtime/pkg/log +sigs.k8s.io/controller-runtime/pkg/log/zap +sigs.k8s.io/controller-runtime/pkg/manager +sigs.k8s.io/controller-runtime/pkg/manager/signals sigs.k8s.io/controller-runtime/pkg/metrics +sigs.k8s.io/controller-runtime/pkg/predicate +sigs.k8s.io/controller-runtime/pkg/reconcile sigs.k8s.io/controller-runtime/pkg/recorder sigs.k8s.io/controller-runtime/pkg/runtime/inject -sigs.k8s.io/controller-runtime/pkg/runtime/log -sigs.k8s.io/controller-runtime/pkg/webhook/admission -sigs.k8s.io/controller-runtime/pkg/webhook/admission/types -sigs.k8s.io/controller-runtime/pkg/internal/controller -sigs.k8s.io/controller-runtime/pkg/predicate -sigs.k8s.io/controller-runtime/pkg/event +sigs.k8s.io/controller-runtime/pkg/runtime/scheme +sigs.k8s.io/controller-runtime/pkg/scheme +sigs.k8s.io/controller-runtime/pkg/source sigs.k8s.io/controller-runtime/pkg/source/internal -sigs.k8s.io/controller-runtime/pkg/cache/internal +sigs.k8s.io/controller-runtime/pkg/webhook +sigs.k8s.io/controller-runtime/pkg/webhook/admission +sigs.k8s.io/controller-runtime/pkg/webhook/conversion +sigs.k8s.io/controller-runtime/pkg/webhook/internal/certwatcher sigs.k8s.io/controller-runtime/pkg/webhook/internal/metrics -sigs.k8s.io/controller-runtime/pkg/webhook/types -sigs.k8s.io/controller-runtime/pkg/internal/controller/metrics -sigs.k8s.io/controller-runtime/pkg/internal/objectutil -# sigs.k8s.io/controller-tools v0.1.12 +# sigs.k8s.io/controller-tools v0.2.0-rc.0 sigs.k8s.io/controller-tools/cmd/controller-gen -sigs.k8s.io/controller-tools/pkg/crd/generator +sigs.k8s.io/controller-tools/pkg/crd +sigs.k8s.io/controller-tools/pkg/crd/markers +sigs.k8s.io/controller-tools/pkg/deepcopy +sigs.k8s.io/controller-tools/pkg/genall +sigs.k8s.io/controller-tools/pkg/genall/help +sigs.k8s.io/controller-tools/pkg/genall/help/pretty +sigs.k8s.io/controller-tools/pkg/loader +sigs.k8s.io/controller-tools/pkg/markers sigs.k8s.io/controller-tools/pkg/rbac sigs.k8s.io/controller-tools/pkg/webhook -sigs.k8s.io/controller-tools/pkg/crd/util -sigs.k8s.io/controller-tools/pkg/internal/codegen -sigs.k8s.io/controller-tools/pkg/internal/codegen/parse -sigs.k8s.io/controller-tools/pkg/util -sigs.k8s.io/controller-tools/pkg/internal/general -# sigs.k8s.io/testing_frameworks v0.1.1 +# sigs.k8s.io/testing_frameworks v0.1.2-0.20190130140139-57f07443c2d4 sigs.k8s.io/testing_frameworks/integration sigs.k8s.io/testing_frameworks/integration/addr sigs.k8s.io/testing_frameworks/integration/internal diff --git a/vendor/sigs.k8s.io/cluster-api/.github/ISSUE_TEMPLATE/bug_report.md b/vendor/sigs.k8s.io/cluster-api/.github/ISSUE_TEMPLATE/bug_report.md new file mode 100644 index 0000000000..ab086f02a8 --- /dev/null +++ b/vendor/sigs.k8s.io/cluster-api/.github/ISSUE_TEMPLATE/bug_report.md @@ -0,0 +1,25 @@ +--- +name: Bug report +about: Tell us about a problem you are experiencing + +--- + +/kind bug + +**What steps did you take and what happened:** +[A clear and concise description of what the bug is.] + + +**What did you expect to happen:** + + +**Anything else you would like to add:** +[Miscellaneous information that will assist in solving the issue.] + + +**Environment:** + +- Cluster-api version: +- Minikube/KIND version: +- Kubernetes version: (use `kubectl version`): +- OS (e.g. from `/etc/os-release`): diff --git a/vendor/sigs.k8s.io/cluster-api/.github/ISSUE_TEMPLATE/feature_request.md b/vendor/sigs.k8s.io/cluster-api/.github/ISSUE_TEMPLATE/feature_request.md new file mode 100644 index 0000000000..ec7c001bdc --- /dev/null +++ b/vendor/sigs.k8s.io/cluster-api/.github/ISSUE_TEMPLATE/feature_request.md @@ -0,0 +1,14 @@ +--- +name: Feature enhancement request +about: Suggest an idea for this project + +--- + +/kind feature + +**Describe the solution you'd like** +[A clear and concise description of what you want to happen.] + + +**Anything else you would like to add:** +[Miscellaneous information that will assist in solving the issue.] diff --git a/vendor/sigs.k8s.io/cluster-api/.github/PULL_REQUEST_TEMPLATE.md b/vendor/sigs.k8s.io/cluster-api/.github/PULL_REQUEST_TEMPLATE.md new file mode 100644 index 0000000000..c841aa48b8 --- /dev/null +++ b/vendor/sigs.k8s.io/cluster-api/.github/PULL_REQUEST_TEMPLATE.md @@ -0,0 +1,25 @@ + + +**What this PR does / why we need it**: + +**Which issue(s) this PR fixes** *(optional, in `fixes #(, fixes #, ...)` format, will close the issue(s) when PR gets merged)*: +Fixes # + +**Special notes for your reviewer**: + +_Please confirm that if this PR changes any image versions, then that's the sole change this PR makes._ + +**Release note**: + +```release-note + +``` diff --git a/vendor/sigs.k8s.io/cluster-api/.gitignore b/vendor/sigs.k8s.io/cluster-api/.gitignore new file mode 100644 index 0000000000..b334b4a2fa --- /dev/null +++ b/vendor/sigs.k8s.io/cluster-api/.gitignore @@ -0,0 +1,48 @@ +# Binaries for programs and plugins +*.exe +*.dll +*.so +*.dylib +cmd/clusterctl/clusterctl +cmd/manager/manager +bin + +# Test binary, build with `go test -c` +*.test + +# Output of the go coverage tool, specifically when used with LiteIDE +*.out + +# Project-local glide cache, RE: https://github.com/Masterminds/glide/issues/736 +.glide/ + +# IntelliJ +.idea/ +*.iml + +# Bazel symlinks +bazel-bin +bazel-cluster-api +bazel-genfiles +bazel-out +bazel-testlogs + +# kubeconfigs +minikube.kubeconfig + +# GitBook - do not check in node_modules for a specific arch and os. +docs/book/node_modules/ +docs/book/_book/ + +# Common editor / temporary files +*~ +*.tmp + +# rbac and manager config for example provider +config/ci/rbac/role_binding.yaml +config/ci/rbac/role.yaml +config/ci/manager/manager.yaml +config/crds-v1alpha1 + +# Sample config files auto-generated by kubebuilder +config/samples diff --git a/vendor/sigs.k8s.io/cluster-api/.golangci.yml b/vendor/sigs.k8s.io/cluster-api/.golangci.yml new file mode 100644 index 0000000000..58e0451508 --- /dev/null +++ b/vendor/sigs.k8s.io/cluster-api/.golangci.yml @@ -0,0 +1,26 @@ +linters: + enable: + - govet + - golint + - gofmt + - goimports + - structcheck + - varcheck + - interfacer + - unconvert + - ineffassign + - goconst + - gocyclo + - misspell + - nakedret + - prealloc + - gosec + disable-all: true + # Run with --fast=false for more extensive checks + fast: true +issue: + max-same-issues: 0 + max-per-linter: 0 +run: + skip-files: + - ".*\\.deepcopy\\.go$" diff --git a/vendor/sigs.k8s.io/cluster-api/BUILD.bazel b/vendor/sigs.k8s.io/cluster-api/BUILD.bazel index be252e22dc..e0ee8b50a2 100644 --- a/vendor/sigs.k8s.io/cluster-api/BUILD.bazel +++ b/vendor/sigs.k8s.io/cluster-api/BUILD.bazel @@ -1,8 +1,10 @@ +load("@io_bazel_rules_go//go:def.bzl", "go_binary", "go_library") load("@bazel_gazelle//:def.bzl", "gazelle") load("//build:run_in_workspace_with_goroot.bzl", "workspace_binary") # gazelle:prefix sigs.k8s.io/cluster-api # gazelle:proto disable_global +# gazelle:resolve go github.com/grpc-ecosystem/grpc-gateway/internal //vendor/github.com/grpc-ecosystem/grpc-gateway/internal:go_default_library gazelle( name = "gazelle", external = "vendored", @@ -19,3 +21,25 @@ workspace_binary( args = ["run --fast=false"], cmd = "@com_github_golangci_golangci-lint//cmd/golangci-lint", ) + +go_library( + name = "go_default_library", + srcs = ["main.go"], + importpath = "sigs.k8s.io/cluster-api", + visibility = ["//visibility:private"], + deps = [ + "//api/v1alpha2:go_default_library", + "//controllers:go_default_library", + "//vendor/k8s.io/apimachinery/pkg/runtime:go_default_library", + "//vendor/k8s.io/client-go/kubernetes/scheme:go_default_library", + "//vendor/k8s.io/klog:go_default_library", + "//vendor/k8s.io/klog/klogr:go_default_library", + "//vendor/sigs.k8s.io/controller-runtime:go_default_library", + ], +) + +go_binary( + name = "cluster-api-manager", + embed = [":go_default_library"], + visibility = ["//visibility:public"], +) diff --git a/vendor/sigs.k8s.io/cluster-api/CONTRIBUTING.md b/vendor/sigs.k8s.io/cluster-api/CONTRIBUTING.md index b0be9a45fb..f7ef7392bc 100644 --- a/vendor/sigs.k8s.io/cluster-api/CONTRIBUTING.md +++ b/vendor/sigs.k8s.io/cluster-api/CONTRIBUTING.md @@ -1,4 +1,27 @@ # Contributing Guidelines + + + + +- [Contributor License Agreements](#contributor-license-agreements) +- [Finding Things That Need Help](#finding-things-that-need-help) +- [Contributing a Patch](#contributing-a-patch) +- [Backporting a Patch](#backporting-a-patch) + - [Merge Approval](#merge-approval) + - [Google Doc Viewing Permissions](#google-doc-viewing-permissions) + - [Issue and Pull Request Management](#issue-and-pull-request-management) +- [Cloud Provider Developer Guide](#cloud-provider-developer-guide) + - [Overview](#overview) + - [Resources](#resources) + - [Boostrapping](#boostrapping) + - [A new Machine can be created in a declarative way](#a-new-machine-can-be-created-in-a-declarative-way) + - [Configurable Machine Setup](#configurable-machine-setup) + - [GCE Implementation](#gce-implementation) + - [A specific Machine can be deleted, freeing external resources associated with it.](#a-specific-machine-can-be-deleted-freeing-external-resources-associated-with-it) + - [A specific Machine can be upgraded or downgraded](#a-specific-machine-can-be-upgraded-or-downgraded) +- [Support Channels](#support-channels) + + Read the following guide if you're interested in contributing to cluster-api. @@ -22,6 +45,12 @@ If you're new to the project and want to help, but don't know where to start, we All changes must be code reviewed. Coding conventions and standards are explained in the official [developer docs](https://github.com/kubernetes/community/tree/master/contributors/devel). Expect reviewers to request that you avoid common [go style mistakes](https://github.com/golang/go/wiki/CodeReviewComments) in your PRs. +## Backporting a Patch + +Cluster API ships older versions through `release-X.X` branches, usually backports are reserved to critical bug-fixes. +Some release branches might ship with both Go modules and dep (e.g. `release-0.1`), users backporting patches should always make sure +that the vendored Go modules dependencies match the Gopkg.lock and Gopkg.toml ones by running `dep ensure` + ### Merge Approval Cluster API maintainers may add "LGTM" (Looks Good To Me) or an equivalent comment to indicate that a PR is acceptable. Any change requires at least one LGTM. No pull requests can be merged until at least one Cluster API maintainer signs off with an LGTM. @@ -30,6 +59,25 @@ Cluster API maintainers may add "LGTM" (Looks Good To Me) or an equivalent comme To gain viewing permissions to google docs in this project, please join either the [kubernetes-dev](https://groups.google.com/forum/#!forum/kubernetes-dev) or [kubernetes-sig-cluster-lifecycle](https://groups.google.com/forum/#!forum/kubernetes-sig-cluster-lifecycle) google group. +### Issue and Pull Request Management + +Anyone may comment on issues and submit reviews for pull requests. However, in +order to be assigned an issue or pull request, you must be a member of the +[Kubernetes SIGs](https://github.com/kubernetes-sigs) GitHub organization. + +If you are a Kubernetes GitHub organization member, you are eligible for +membership in the Kubernetes SIGs GitHub organization and can request +membership by [opening an issue](https://github.com/kubernetes/org/issues/new?template=membership.md&title=REQUEST%3A%20New%20membership%20for%20%3Cyour-GH-handle%3E) +against the kubernetes/org repo. + +However, if you are a member of any of the related Kubernetes GitHub +organizations but not of the Kubernetes org, you will need explicit sponsorship +for your membership request. You can read more about Kubernetes membership and +sponsorship [here](https://github.com/kubernetes/community/blob/master/community-membership.md). + +Cluster API maintainers can assign you an issue or pull request by leaving a +`/assign ` comment on the issue or pull request. + ## Cloud Provider Developer Guide ### Overview @@ -46,8 +94,8 @@ The machine controller should be able to act on a subset of machines that form a ### Resources * [Cluster Management API KEP](https://github.com/kubernetes/enhancements/blob/master/keps/sig-cluster-lifecycle/0003-cluster-api.md) -* [Cluster type](https://github.com/kubernetes-sigs/cluster-api/blob/master/pkg/apis/cluster/v1alpha1/cluster_types.go#L40) -* [Machine type](https://github.com/kubernetes-sigs/cluster-api/blob/master/pkg/apis/cluster/v1alpha1/machine_types.go#L42) +* [Cluster type](https://github.com/kubernetes-sigs/cluster-api/blob/master/pkg/apis/deprecated/v1alpha1/cluster_types.go#L40) +* [Machine type](https://github.com/kubernetes-sigs/cluster-api/blob/master/pkg/apis/deprecated/v1alpha1/machine_types.go#L42) ### Boostrapping diff --git a/vendor/sigs.k8s.io/cluster-api/Dockerfile b/vendor/sigs.k8s.io/cluster-api/Dockerfile index 086a6898b9..10b1a07d4d 100644 --- a/vendor/sigs.k8s.io/cluster-api/Dockerfile +++ b/vendor/sigs.k8s.io/cluster-api/Dockerfile @@ -13,20 +13,25 @@ # limitations under the License. # Build the manager binary -FROM golang:1.12.6 as builder +FROM golang:1.12.7 as builder + +ARG ARCH # Copy in the go src WORKDIR ${GOPATH}/src/sigs.k8s.io/cluster-api -COPY pkg/ pkg/ -COPY cmd/ cmd/ +COPY pkg/ pkg/ +COPY cmd/ cmd/ +COPY api/ api/ +COPY controllers/ controllers/ COPY vendor/ vendor/ COPY go.mod go.mod COPY go.sum go.sum +COPY main.go main.go # Build RUN CGO_ENABLED=0 GOOS=linux GOARCH=${ARCH} GO111MODULE=on GOFLAGS="-mod=vendor" \ go build -a -ldflags '-extldflags "-static"' \ - -o manager sigs.k8s.io/cluster-api/cmd/manager + -o manager . # Copy the controller-manager into a thin image FROM gcr.io/distroless/static:latest diff --git a/vendor/sigs.k8s.io/cluster-api/Gopkg.toml b/vendor/sigs.k8s.io/cluster-api/Gopkg.toml deleted file mode 100644 index aca24451b6..0000000000 --- a/vendor/sigs.k8s.io/cluster-api/Gopkg.toml +++ /dev/null @@ -1,41 +0,0 @@ -required = [ - "github.com/emicklei/go-restful", - "k8s.io/client-go/plugin/pkg/client/auth/gcp", # for development against gcp - "k8s.io/code-generator/cmd/deepcopy-gen", # for go generate - "k8s.io/code-generator/cmd/client-gen", # for go generate - "k8s.io/code-generator/cmd/informer-gen", # for go generate - "k8s.io/code-generator/cmd/lister-gen", # for go generate - "sigs.k8s.io/controller-tools/cmd/controller-gen", # for crd/rbac generation - "sigs.k8s.io/controller-runtime/pkg/client/config", - "sigs.k8s.io/controller-runtime/pkg/controller", - "sigs.k8s.io/controller-runtime/pkg/handler", - "sigs.k8s.io/controller-runtime/pkg/manager", - "sigs.k8s.io/controller-runtime/pkg/runtime/signals", - "sigs.k8s.io/controller-runtime/pkg/source", - "sigs.k8s.io/testing_frameworks/integration", # for integration testing - "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1", - ] - -[prune] - go-tests = true - unused-packages = true - -# STANZAS BELOW ARE GENERATED AND MAY BE WRITTEN - DO NOT MODIFY BELOW THIS LINE. - -[[constraint]] - name = "sigs.k8s.io/controller-runtime" - version = "v0.1.12" - -[[constraint]] - name = "sigs.k8s.io/controller-tools" - version = "v0.1.11" - -[[constraint]] - name = "k8s.io/code-generator" - branch = "release-1.13" - -# For dependency below: Refer to issue https://github.com/golang/dep/issues/1799 -[[override]] -name = "gopkg.in/fsnotify.v1" -source = "https://github.com/fsnotify/fsnotify.git" -version = "v1.4.7" diff --git a/vendor/sigs.k8s.io/cluster-api/Makefile b/vendor/sigs.k8s.io/cluster-api/Makefile index 68969c6607..a1ee1ec0b5 100644 --- a/vendor/sigs.k8s.io/cluster-api/Makefile +++ b/vendor/sigs.k8s.io/cluster-api/Makefile @@ -21,60 +21,65 @@ export KUBEBUILDER_CONTROLPLANE_START_TIMEOUT ?=60s export KUBEBUILDER_CONTROLPLANE_STOP_TIMEOUT ?=60s +# This option is for running docker manifest command +export DOCKER_CLI_EXPERIMENTAL := enabled + # Image URL to use all building/pushing image targets -export CONTROLLER_IMG ?= gcr.io/k8s-staging-cluster-api/cluster-api-controller:v0.1.9 -export EXAMPLE_PROVIDER_IMG ?= gcr.io/k8s-cluster-api/example-provider-controller:latest +REGISTRY ?= gcr.io/$(shell gcloud config get-value project) +CONTROLLER_IMG ?= $(REGISTRY)/cluster-api-controller +EXAMPLE_PROVIDER_IMG ?= $(REGISTRY)/example-provider-controller +TAG ?= dev + +ARCH?=amd64 +ALL_ARCH = amd64 arm arm64 ppc64le s390x +GOOS?=linux all: test manager clusterctl help: ## Display this help @awk 'BEGIN {FS = ":.*##"; printf "\nUsage:\n make \033[36m\033[0m\n"} /^[a-zA-Z_-]+:.*?##/ { printf " \033[36m%-15s\033[0m %s\n", $$1, $$2 } /^##@/ { printf "\n\033[1m%s\033[0m\n", substr($$0, 5) } ' $(MAKEFILE_LIST) -.PHONY: gazelle -gazelle: ## Run Bazel Gazelle - (which bazel && ./hack/update-bazel.sh) || true - -.PHONY: vendor -vendor: ## Runs go mod to ensure proper vendoring. - ./hack/update-vendor.sh - $(MAKE) gazelle - -bin/%-gen: ./vendor/k8s.io/code-generator/cmd/%-gen ## Build code-generator binaries - go build -o $@ ./$< +## -------------------------------------- +## Testing +## -------------------------------------- .PHONY: test -test: gazelle verify generate fmt vet manifests ## Run tests +test: generate lint ## Run tests + $(MAKE) test-go + +.PHONY: test-go +test-go: ## Run tests go test -v -tags=integration ./pkg/... ./cmd/... +## -------------------------------------- +## Binaries +## -------------------------------------- + .PHONY: manager -manager: generate fmt vet ## Build manager binary +manager: lint-full ## Build manager binary go build -o bin/manager sigs.k8s.io/cluster-api/cmd/manager +.PHONY: docker-build-manager +docker-build-manager: lint-full ## Build manager binary in docker + docker run --rm \ + -v "${PWD}:/go/src/sigs.k8s.io/cluster-api" \ + -v "${PWD}/bin:/go/bin" \ + -w "/go/src/sigs.k8s.io/cluster-api" \ + -e CGO_ENABLED=0 -e GOOS=${GOOS} -e GOARCH=${ARCH} -e GO111MODULE=on -e GOFLAGS="-mod=vendor" \ + golang:1.12.6 \ + go build -a -ldflags '-extldflags "-static"' -o /go/bin/manager sigs.k8s.io/cluster-api/cmd/manager + .PHONY: clusterctl -clusterctl: generate fmt vet ## Build clusterctl binary +clusterctl: lint-full ## Build clusterctl binary go build -o bin/clusterctl sigs.k8s.io/cluster-api/cmd/clusterctl .PHONY: run -run: generate fmt vet ## Run against the configured Kubernetes cluster in ~/.kube/config +run: lint ## Run against the configured Kubernetes cluster in ~/.kube/config go run ./cmd/manager/main.go -.PHONY: deploy -deploy: manifests ## Deploy controller in the configured Kubernetes cluster in ~/.kube/config - kustomize build config/default | kubectl apply -f - - -.PHONY: manifests -manifests: ## Generate manifests e.g. CRD, RBAC etc. - go run vendor/sigs.k8s.io/controller-tools/cmd/controller-gen/main.go all - cp -f ./config/rbac/manager*.yaml ./config/ci/rbac/ - cp -f ./config/manager/manager*.yaml ./config/ci/manager/ - -.PHONY: fmt -fmt: ## Run go fmt against code - go fmt ./pkg/... ./cmd/... - -.PHONY: vet -vet: ## Run go vet against code - go vet ./pkg/... ./cmd/... +## -------------------------------------- +## Linting +## -------------------------------------- .PHONY: lint lint: ## Lint codebase @@ -83,53 +88,136 @@ lint: ## Lint codebase lint-full: ## Run slower linters to detect possible issues bazel run //:lint-full $(BAZEL_ARGS) +## -------------------------------------- +## Generate / Manifests +## -------------------------------------- + .PHONY: generate -generate: clientset ## Generate code - go generate ./pkg/... ./cmd/... +generate: ## Generate code + $(MAKE) generate-manifests + $(MAKE) generate-deepcopy $(MAKE) gazelle -.PHONY: clientset -clientset: bin/client-gen bin/lister-gen bin/informer-gen ## Generate a typed clientset - rm -rf pkg/client - bin/client-gen --clientset-name clientset --input-base sigs.k8s.io/cluster-api/pkg/apis \ - --input cluster/v1alpha1 --output-package sigs.k8s.io/cluster-api/pkg/client/clientset_generated \ - --go-header-file=./hack/boilerplate/boilerplate.generatego.txt - bin/lister-gen --input-dirs sigs.k8s.io/cluster-api/pkg/apis/cluster/v1alpha1 \ - --output-package sigs.k8s.io/cluster-api/pkg/client/listers_generated \ - --go-header-file=./hack/boilerplate/boilerplate.generatego.txt - bin/informer-gen --input-dirs sigs.k8s.io/cluster-api/pkg/apis/cluster/v1alpha1 \ - --versioned-clientset-package sigs.k8s.io/cluster-api/pkg/client/clientset_generated/clientset \ - --listers-package sigs.k8s.io/cluster-api/pkg/client/listers_generated \ - --output-package sigs.k8s.io/cluster-api/pkg/client/informers_generated \ - --go-header-file=./hack/boilerplate/boilerplate.generatego.txt +.PHONY: generate-deepcopy +generate-deepcopy: ## Runs controller-gen to generate deepcopy files + go run vendor/sigs.k8s.io/controller-tools/cmd/controller-gen/main.go \ + object:headerFile=./hack/boilerplate/boilerplate.generatego.txt \ + paths=./api/... + +.PHONY: generate-manifests +generate-manifests: ## Generate manifests e.g. CRD, RBAC etc. + go run vendor/sigs.k8s.io/controller-tools/cmd/controller-gen/main.go \ + paths=./api/... \ + paths=./pkg/controller/... \ + crd:trivialVersions=true \ + rbac:roleName=manager-role \ + output:crd:dir=./config/crd/bases + ## Copy files in CI folders. + cp -f ./config/rbac/role*.yaml ./config/ci/rbac/ + cp -f ./config/manager/manager*.yaml ./config/ci/manager/ + +.PHONY: gazelle +gazelle: ## Run Bazel Gazelle + (which bazel && ./hack/update-bazel.sh) || true + +.PHONY: vendor +vendor: ## Runs go mod to ensure proper vendoring. + ./hack/update-vendor.sh $(MAKE) gazelle -.PHONY: clean -clean: ## Remove all generated files - rm -f bazel-* +## -------------------------------------- +## Docker +## -------------------------------------- .PHONY: docker-build -docker-build: generate fmt vet manifests ## Build the docker image for controller-manager - docker build --pull . -t ${CONTROLLER_IMG} +docker-build: generate lint-full ## Build the docker image for controller-manager + docker build --pull --build-arg ARCH=$(ARCH) . -t $(CONTROLLER_IMG)-$(ARCH):$(TAG) @echo "updating kustomize image patch file for manager resource" - hack/sed.sh -i.tmp -e 's@image: .*@image: '"${CONTROLLER_IMG}"'@' ./config/default/manager_image_patch.yaml + hack/sed.sh -i.tmp -e 's@image: .*@image: '"${CONTROLLER_IMG}-$(ARCH):$(TAG)"'@' ./config/default/manager_image_patch.yaml .PHONY: docker-push docker-push: docker-build ## Push the docker image - docker push "$(CONTROLLER_IMG)" + docker push $(CONTROLLER_IMG)-$(ARCH):$(TAG) + +.PHONY: all-docker-build +all-docker-build: $(addprefix sub-docker-build-,$(ALL_ARCH)) + @echo "updating kustomize image patch file for manager resource" + hack/sed.sh -i.tmp -e 's@image: .*@image: '"$(CONTROLLER_IMG):$(TAG)"'@' ./config/default/manager_image_patch.yaml + +sub-docker-build-%: + $(MAKE) ARCH=$* docker-build + +.PHONY:all-push ## Push all the architecture docker images and fat manifest docker image +all-push: all-docker-push docker-push-manifest + +.PHONY:all-docker-push ## Push all the architecture docker images +all-docker-push: $(addprefix sub-docker-push-,$(ALL_ARCH)) + +sub-docker-push-%: + $(MAKE) ARCH=$* docker-push .PHONY: docker-build-ci -docker-build-ci: generate fmt vet manifests ## Build the docker image for example provider - docker build --pull . -f ./pkg/provider/example/container/Dockerfile -t ${EXAMPLE_PROVIDER_IMG} +docker-build-ci: generate lint-full ## Build the docker image for example provider + docker build --pull --build-arg ARCH=$(ARCH) . -f ./pkg/provider/example/container/Dockerfile -t $(EXAMPLE_PROVIDER_IMG)-$(ARCH):$(TAG) @echo "updating kustomize image patch file for ci" - hack/sed.sh -i.tmp -e 's@image: .*@image: '"${EXAMPLE_PROVIDER_IMG}"'@' ./config/ci/manager_image_patch.yaml + hack/sed.sh -i.tmp -e 's@image: .*@image: '"${EXAMPLE_PROVIDER_IMG}-$(ARCH):$(TAG)"'@' ./config/ci/manager_image_patch.yaml .PHONY: docker-push-ci docker-push-ci: docker-build-ci ## Build the docker image for ci - docker push "$(EXAMPLE_PROVIDER_IMG)" + docker push "$(EXAMPLE_PROVIDER_IMG)-$(ARCH):$(TAG)" + +.PHONY: docker-push-manifest +docker-push-manifest: ## Push the fat manifest docker image. TODO: Update bazel build to push manifest once https://github.com/bazelbuild/rules_docker/issues/300 get merged + ## Minimum docker version 18.06.0 is required for creating and pushing manifest images + docker manifest create --amend $(CONTROLLER_IMG):$(TAG) $(shell echo $(ALL_ARCH) | sed -e "s~[^ ]*~$(CONTROLLER_IMG)\-&:$(TAG)~g") + @for arch in $(ALL_ARCH); do docker manifest annotate --arch $${arch} ${CONTROLLER_IMG}:${TAG} ${CONTROLLER_IMG}-$${arch}:${TAG}; done + docker manifest push --purge ${CONTROLLER_IMG}:${TAG} + +## -------------------------------------- +## Cleanup / Verification +## -------------------------------------- + +.PHONY: clean +clean: ## Remove all generated files + $(MAKE) clean-bazel + $(MAKE) clean-bin + $(MAKE) clean-book + +.PHONY: clean-bazel +clean-bazel: ## Remove all generated bazel symlinks + bazel clean + +.PHONY: clean-bin +clean-bin: ## Remove all generated binaries + rm -rf bin + +.PHONY: clean-clientset +clean-clientset: ## Remove all generated clientset files + rm -rf pkg/client .PHONY: verify verify: ./hack/verify-boilerplate.sh - ./hack/verify_clientset.sh + ./hack/verify-clientset.sh ./hack/verify-bazel.sh + ./hack/verify-doctoc.sh + +.PHONY: clean-book +clean-book: ## Remove all generated GitBook files + rm -rf ./docs/book/_book + +## -------------------------------------- +## Others / Utilities +## -------------------------------------- + +.PHONY: diagrams +diagrams: ## Build proposal diagrams + $(MAKE) -C docs/proposals/images + +.PHONY: book +book: ## Build the GitBook + ./hack/build-gitbook.sh + +.PHONY: serve-book +serve-book: ## Build and serve the GitBook with live-reloading enabled + (cd ./docs/book && gitbook serve --live --open) diff --git a/vendor/sigs.k8s.io/cluster-api/PROJECT b/vendor/sigs.k8s.io/cluster-api/PROJECT index 46134a0621..f9cb88368f 100644 --- a/vendor/sigs.k8s.io/cluster-api/PROJECT +++ b/vendor/sigs.k8s.io/cluster-api/PROJECT @@ -1,3 +1,16 @@ -version: "1" -domain: k8s.io +version: "2" +domain: x-k8s.io repo: sigs.k8s.io/cluster-api +resources: +- group: cluster + version: v1alpha2 + kind: Cluster +- group: cluster + version: v1alpha2 + kind: Machine +- group: cluster + version: v1alpha2 + kind: MachineSet +- group: cluster + version: v1alpha2 + kind: MachineDeployment diff --git a/vendor/sigs.k8s.io/cluster-api/README.md b/vendor/sigs.k8s.io/cluster-api/README.md index 90d3a7a899..0ea4cf179a 100644 --- a/vendor/sigs.k8s.io/cluster-api/README.md +++ b/vendor/sigs.k8s.io/cluster-api/README.md @@ -1,4 +1,19 @@ # Cluster API + + + + +- [What is the Cluster API?](#what-is-the-cluster-api) +- [Getting Started](#getting-started) + - [Resources](#resources) + - [Prerequisites](#prerequisites) + - [Using `clusterctl` to create a cluster](#using-clusterctl-to-create-a-cluster) + - [How does Cluster API compare to Kubernetes Cloud Providers?](#how-does-cluster-api-compare-to-kubernetes-cloud-providers) +- [Get involved!](#get-involved) +- [Provider Implementations](#provider-implementations) +- [API Adoption](#api-adoption) + + ## What is the Cluster API? The Cluster API is a Kubernetes project to bring declarative, Kubernetes-style @@ -10,6 +25,19 @@ feedback on the API types themselves. All of the code here is to experiment with the API and demo its abilities, in order to drive more technical feedback to the API design. Because of this, all of the prototype code is rapidly changing. +## Getting Started + +### Resources + +* GitBook: [cluster-api.sigs.k8s.io](https://cluster-api.sigs.k8s.io) + +### Prerequisites +* `kubectl` is required, see [here](http://kubernetes.io/docs/user-guide/prereqs/). +* `clusterctl` is a SIG-cluster-lifecycle sponsored tool to manage Cluster API clusters. See [here](cmd/clusterctl) + +### Using `clusterctl` to create a cluster +* Doc [here](./docs/how-to-use-clusterctl.md) + ![Cluster API Architecture](./docs/book/common_code/architecture.svg "Cluster API Architecture") Learn more about the project's [scope, objectives, goals and requirements](./docs/scope-and-objectives.md), [feature proposals](./docs/proposals/) and [reference use cases](./docs/staging-use-cases.md). @@ -54,6 +82,7 @@ are also sponsored by SIG-cluster-lifecycle: * GCP, https://github.com/kubernetes-sigs/cluster-api-provider-gcp * IBM Cloud, https://github.com/kubernetes-sigs/cluster-api-provider-ibmcloud * OpenStack, https://github.com/kubernetes-sigs/cluster-api-provider-openstack + * Packet, https://github.com/packethost/cluster-api-provider-packet * Talos, https://github.com/talos-systems/cluster-api-provider-talos * Tencent Cloud, https://github.com/TencentCloud/cluster-api-provider-tencent * vSphere, https://github.com/kubernetes-sigs/cluster-api-provider-vsphere @@ -74,15 +103,7 @@ Following are the implementations managed by third-parties adopting the standard - The _master_ branch is where development happens, this might include breaking changes. - The _release-X_ branches contain stable, backward compatible code. A new _release-X_ branch is created at every major (X) release. -## Getting Started - -### Resources - -* GitBook: [kubernetes-sigs.github.io/cluster-api](https://kubernetes-sigs.github.io/cluster-api) -### Prerequisites -* `kubectl` is required, see [here](http://kubernetes.io/docs/user-guide/prereqs/). -* `clusterctl` is a SIG-cluster-lifecycle sponsored tool to manage Cluster API clusters. See [here](cmd/clusterctl) [notes]: https://docs.google.com/document/d/1Ys-DOR5UsgbMEeciuG0HOgDQc8kZsaWIWJeKJ1-UfbY/edit [recordings]: https://www.youtube.com/playlist?list=PL69nYSiGNLP29D0nYgAGWt1ZFqS9Z7lw4 diff --git a/vendor/sigs.k8s.io/cluster-api/api/v1alpha2/BUILD.bazel b/vendor/sigs.k8s.io/cluster-api/api/v1alpha2/BUILD.bazel new file mode 100644 index 0000000000..d03e38e1af --- /dev/null +++ b/vendor/sigs.k8s.io/cluster-api/api/v1alpha2/BUILD.bazel @@ -0,0 +1,31 @@ +load("@io_bazel_rules_go//go:def.bzl", "go_library") + +go_library( + name = "go_default_library", + srcs = [ + "cluster_phase_types.go", + "cluster_types.go", + "common_types.go", + "defaults.go", + "groupversion_info.go", + "machine_phase_types.go", + "machine_types.go", + "machinedeployment_types.go", + "machineset_types.go", + "zz_generated.deepcopy.go", + ], + importpath = "sigs.k8s.io/cluster-api/api/v1alpha2", + visibility = ["//visibility:public"], + deps = [ + "//pkg/errors:go_default_library", + "//vendor/k8s.io/api/core/v1:go_default_library", + "//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", + "//vendor/k8s.io/apimachinery/pkg/apis/meta/v1/validation:go_default_library", + "//vendor/k8s.io/apimachinery/pkg/labels:go_default_library", + "//vendor/k8s.io/apimachinery/pkg/runtime:go_default_library", + "//vendor/k8s.io/apimachinery/pkg/runtime/schema:go_default_library", + "//vendor/k8s.io/apimachinery/pkg/util/intstr:go_default_library", + "//vendor/k8s.io/apimachinery/pkg/util/validation/field:go_default_library", + "//vendor/sigs.k8s.io/controller-runtime/pkg/scheme:go_default_library", + ], +) diff --git a/vendor/sigs.k8s.io/cluster-api/api/v1alpha2/cluster_phase_types.go b/vendor/sigs.k8s.io/cluster-api/api/v1alpha2/cluster_phase_types.go new file mode 100644 index 0000000000..044963b0b9 --- /dev/null +++ b/vendor/sigs.k8s.io/cluster-api/api/v1alpha2/cluster_phase_types.go @@ -0,0 +1,55 @@ +/* +Copyright 2019 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1alpha2 + +// ClusterPhase is a string representation of a Cluster Phase. +// +// This type is a high-level indicator of the status of the Cluster as it is provisioned, +// from the API user’s perspective. +// +// The value should not be interpreted by any software components as a reliable indication +// of the actual state of the Cluster, and controllers should not use the Cluster Phase field +// value when making decisions about what action to take. +// +// Controllers should always look at the actual state of the Cluster’s fields to make those decisions. +type ClusterPhase string + +var ( + // ClusterPhasePending is the first state a Cluster is assigned by + // Cluster API Cluster controller after being created. + ClusterPhasePending = ClusterPhase("pending") + + // ClusterPhaseProvisioning is the state when the Cluster has a provider infrastructure + // object associated and can start provisioning. + ClusterPhaseProvisioning = ClusterPhase("provisioning") + + // ClusterPhaseProvisioned is the state when its + // infrastructure has been created and configured. + ClusterPhaseProvisioned = ClusterPhase("provisioned") + + // ClusterPhaseDeleting is the Cluster state when a delete + // request has been sent to the API Server, + // but its infrastructure has not yet been fully deleted. + ClusterPhaseDeleting = ClusterPhase("deleting") + + // ClusterPhaseFailed is the Cluster state when the system + // might require user intervention. + ClusterPhaseFailed = ClusterPhase("failed") + + // ClusterPhaseUnknown is returned if the Cluster state cannot be determined. + ClusterPhaseUnknown = ClusterPhase("") +) diff --git a/vendor/sigs.k8s.io/cluster-api/api/v1alpha2/cluster_types.go b/vendor/sigs.k8s.io/cluster-api/api/v1alpha2/cluster_types.go new file mode 100644 index 0000000000..1060b0f88c --- /dev/null +++ b/vendor/sigs.k8s.io/cluster-api/api/v1alpha2/cluster_types.go @@ -0,0 +1,157 @@ +/* +Copyright 2019 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1alpha2 + +import ( + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + capierrors "sigs.k8s.io/cluster-api/pkg/errors" +) + +const ( + ClusterFinalizer = "cluster.cluster.x-k8s.io" + ClusterAnnotationControlPlaneReady = "cluster.x-k8s.io/control-plane-ready" +) + +/// [ClusterSpec] +// ClusterSpec defines the desired state of Cluster +type ClusterSpec struct { + // Cluster network configuration + // +optional + ClusterNetwork *ClusterNetworkingConfig `json:"clusterNetwork,omitempty"` + + // InfrastructureRef is a reference to a provider-specific resource that holds the details + // for provisioning infrastructure for a cluster in said provider. + // +optional + InfrastructureRef *corev1.ObjectReference `json:"infrastructureRef,omitempty"` +} + +/// [ClusterSpec] + +/// [ClusterNetworkingConfig] +// ClusterNetworkingConfig specifies the different networking +// parameters for a cluster. +type ClusterNetworkingConfig struct { + // The network ranges from which service VIPs are allocated. + Services NetworkRanges `json:"services"` + + // The network ranges from which Pod networks are allocated. + Pods NetworkRanges `json:"pods"` + + // Domain name for services. + ServiceDomain string `json:"serviceDomain"` +} + +/// [ClusterNetworkingConfig] + +/// [NetworkRanges] +// NetworkRanges represents ranges of network addresses. +type NetworkRanges struct { + CIDRBlocks []string `json:"cidrBlocks"` +} + +/// [NetworkRanges] + +/// [ClusterStatus] +// ClusterStatus defines the observed state of Cluster +type ClusterStatus struct { + // APIEndpoints represents the endpoints to communicate with the control plane. + // +optional + APIEndpoints []APIEndpoint `json:"apiEndpoints,omitempty"` + + // ErrorReason indicates that there is a problem reconciling the + // state, and will be set to a token value suitable for + // programmatic interpretation. + // +optional + ErrorReason *capierrors.ClusterStatusError `json:"errorReason,omitempty"` + + // ErrorMessage indicates that there is a problem reconciling the + // state, and will be set to a descriptive error message. + // +optional + ErrorMessage *string `json:"errorMessage,omitempty"` + + // Phase represents the current phase of cluster actuation. + // E.g. Pending, Running, Terminating, Failed etc. + // +optional + Phase string `json:"phase,omitempty"` + + // InfrastructureReady is the state of the infrastructure provider. + // +optional + InfrastructureReady bool `json:"infrastructureReady"` +} + +// SetTypedPhase sets the Phase field to the string representation of ClusterPhase. +func (c *ClusterStatus) SetTypedPhase(p ClusterPhase) { + c.Phase = string(p) +} + +// GetTypedPhase attempts to parse the Phase field and return +// the typed ClusterPhase representation as described in `machine_phase_types.go`. +func (c *ClusterStatus) GetTypedPhase() ClusterPhase { + switch phase := ClusterPhase(c.Phase); phase { + case + ClusterPhasePending, + ClusterPhaseProvisioning, + ClusterPhaseProvisioned, + ClusterPhaseDeleting, + ClusterPhaseFailed: + return phase + default: + return ClusterPhaseUnknown + } +} + +/// [ClusterStatus] + +/// [APIEndpoint] +// APIEndpoint represents a reachable Kubernetes API endpoint. +type APIEndpoint struct { + // The hostname on which the API server is serving. + Host string `json:"host"` + + // The port on which the API server is serving. + Port int `json:"port"` +} + +/// [APIEndpoint] + +// +kubebuilder:object:root=true +// +kubebuilder:resource:path=clusters,shortName=cl,scope=Namespaced +// +kubebuilder:storageversion +// +kubebuilder:subresource:status + +// Cluster is the Schema for the clusters API +type Cluster struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + + Spec ClusterSpec `json:"spec,omitempty"` + Status ClusterStatus `json:"status,omitempty"` +} + +// +kubebuilder:object:root=true + +// ClusterList contains a list of Cluster +type ClusterList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty"` + Items []Cluster `json:"items"` +} + +func init() { + SchemeBuilder.Register(&Cluster{}, &ClusterList{}) +} diff --git a/vendor/sigs.k8s.io/cluster-api/api/v1alpha2/common_types.go b/vendor/sigs.k8s.io/cluster-api/api/v1alpha2/common_types.go new file mode 100644 index 0000000000..919077c158 --- /dev/null +++ b/vendor/sigs.k8s.io/cluster-api/api/v1alpha2/common_types.go @@ -0,0 +1,128 @@ +/* +Copyright 2019 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1alpha2 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +// MachineAddressType describes a valid MachineAddress type. +type MachineAddressType string + +const ( + MachineHostName MachineAddressType = "Hostname" + MachineExternalIP MachineAddressType = "ExternalIP" + MachineInternalIP MachineAddressType = "InternalIP" + MachineExternalDNS MachineAddressType = "ExternalDNS" + MachineInternalDNS MachineAddressType = "InternalDNS" +) + +// MachineAddress contains information for the node's address. +type MachineAddress struct { + // Machine address type, one of Hostname, ExternalIP or InternalIP. + Type MachineAddressType `json:"type"` + + // The machine address. + Address string `json:"address"` +} + +// MachineAddresses is a slice of MachineAddress items to be used by infrastructure providers. +type MachineAddresses []MachineAddress + +// ObjectMeta is metadata that all persisted resources must have, which includes all objects +// users must create. This is a copy of customizable fields from metav1.ObjectMeta. +// +// ObjectMeta is embedded in `Machine.Spec`, `MachineDeployment.Template` and `MachineSet.Template`, +// which are not top-level Kubernetes objects. Given that metav1.ObjectMeta has lots of special cases +// and read-only fields which end up in the generated CRD validation, having it as a subset simplifies +// the API and some issues that can impact user experience. +// +// During the [upgrade to controller-tools@v2](https://github.com/kubernetes-sigs/cluster-api/pull/1054) +// for v1alpha2, we noticed a failure would occur running Cluster API test suite against the new CRDs, +// specifically `spec.metadata.creationTimestamp in body must be of type string: "null"`. +// The investigation showed that `controller-tools@v2` behaves differently than its previous version +// when handling types from [metav1](k8s.io/apimachinery/pkg/apis/meta/v1) package. +// +// In more details, we found that embedded (non-top level) types that embedded `metav1.ObjectMeta` +// had validation properties, including for `creationTimestamp` (metav1.Time). +// The `metav1.Time` type specifies a custom json marshaller that, when IsZero() is true, returns `null` +// which breaks validation because the field isn't marked as nullable. +// +// In future versions, controller-tools@v2 might allow overriding the type and validation for embedded +// types. When that happens, this hack should be revisited. +type ObjectMeta struct { + // Name must be unique within a namespace. Is required when creating resources, although + // some resources may allow a client to request the generation of an appropriate name + // automatically. Name is primarily intended for creation idempotence and configuration + // definition. + // Cannot be updated. + // More info: http://kubernetes.io/docs/user-guide/identifiers#names + // +optional + Name string `json:"name,omitempty" protobuf:"bytes,1,opt,name=name"` + + // GenerateName is an optional prefix, used by the server, to generate a unique + // name ONLY IF the Name field has not been provided. + // If this field is used, the name returned to the client will be different + // than the name passed. This value will also be combined with a unique suffix. + // The provided value has the same validation rules as the Name field, + // and may be truncated by the length of the suffix required to make the value + // unique on the server. + // + // If this field is specified and the generated name exists, the server will + // NOT return a 409 - instead, it will either return 201 Created or 500 with Reason + // ServerTimeout indicating a unique name could not be found in the time allotted, and the client + // should retry (optionally after the time indicated in the Retry-After header). + // + // Applied only if Name is not specified. + // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#idempotency + // +optional + GenerateName string `json:"generateName,omitempty" protobuf:"bytes,2,opt,name=generateName"` + + // Namespace defines the space within each name must be unique. An empty namespace is + // equivalent to the "default" namespace, but "default" is the canonical representation. + // Not all objects are required to be scoped to a namespace - the value of this field for + // those objects will be empty. + // + // Must be a DNS_LABEL. + // Cannot be updated. + // More info: http://kubernetes.io/docs/user-guide/namespaces + // +optional + Namespace string `json:"namespace,omitempty" protobuf:"bytes,3,opt,name=namespace"` + + // Map of string keys and values that can be used to organize and categorize + // (scope and select) objects. May match selectors of replication controllers + // and services. + // More info: http://kubernetes.io/docs/user-guide/labels + // +optional + Labels map[string]string `json:"labels,omitempty" protobuf:"bytes,11,rep,name=labels"` + + // Annotations is an unstructured key value map stored with a resource that may be + // set by external tools to store and retrieve arbitrary metadata. They are not + // queryable and should be preserved when modifying objects. + // More info: http://kubernetes.io/docs/user-guide/annotations + // +optional + Annotations map[string]string `json:"annotations,omitempty" protobuf:"bytes,12,rep,name=annotations"` + + // List of objects depended by this object. If ALL objects in the list have + // been deleted, this object will be garbage collected. If this object is managed by a controller, + // then an entry in this list will point to this controller, with the controller field set to true. + // There cannot be more than one managing controller. + // +optional + // +patchMergeKey=uid + // +patchStrategy=merge + OwnerReferences []metav1.OwnerReference `json:"ownerReferences,omitempty" patchStrategy:"merge" patchMergeKey:"uid" protobuf:"bytes,13,rep,name=ownerReferences"` +} diff --git a/vendor/sigs.k8s.io/cluster-api/api/v1alpha2/defaults.go b/vendor/sigs.k8s.io/cluster-api/api/v1alpha2/defaults.go new file mode 100644 index 0000000000..15c40afdaa --- /dev/null +++ b/vendor/sigs.k8s.io/cluster-api/api/v1alpha2/defaults.go @@ -0,0 +1,73 @@ +/* +Copyright 2019 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1alpha2 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/util/intstr" +) + +// PopulateDefaultsMachineDeployment fills in default field values +// Currently it is called after reading objects, but it could be called in an admission webhook also +func PopulateDefaultsMachineDeployment(d *MachineDeployment) { + if d.Spec.Replicas == nil { + d.Spec.Replicas = new(int32) + *d.Spec.Replicas = 1 + } + + if d.Spec.MinReadySeconds == nil { + d.Spec.MinReadySeconds = new(int32) + *d.Spec.MinReadySeconds = 0 + } + + if d.Spec.RevisionHistoryLimit == nil { + d.Spec.RevisionHistoryLimit = new(int32) + *d.Spec.RevisionHistoryLimit = 1 + } + + if d.Spec.ProgressDeadlineSeconds == nil { + d.Spec.ProgressDeadlineSeconds = new(int32) + *d.Spec.ProgressDeadlineSeconds = 600 + } + + if d.Spec.Strategy == nil { + d.Spec.Strategy = &MachineDeploymentStrategy{} + } + + if d.Spec.Strategy.Type == "" { + d.Spec.Strategy.Type = RollingUpdateMachineDeploymentStrategyType + } + + // Default RollingUpdate strategy only if strategy type is RollingUpdate. + if d.Spec.Strategy.Type == RollingUpdateMachineDeploymentStrategyType { + if d.Spec.Strategy.RollingUpdate == nil { + d.Spec.Strategy.RollingUpdate = &MachineRollingUpdateDeployment{} + } + if d.Spec.Strategy.RollingUpdate.MaxSurge == nil { + ios1 := intstr.FromInt(1) + d.Spec.Strategy.RollingUpdate.MaxSurge = &ios1 + } + if d.Spec.Strategy.RollingUpdate.MaxUnavailable == nil { + ios0 := intstr.FromInt(0) + d.Spec.Strategy.RollingUpdate.MaxUnavailable = &ios0 + } + } + + if len(d.Namespace) == 0 { + d.Namespace = metav1.NamespaceDefault + } +} diff --git a/vendor/sigs.k8s.io/cluster-api/api/v1alpha2/groupversion_info.go b/vendor/sigs.k8s.io/cluster-api/api/v1alpha2/groupversion_info.go new file mode 100644 index 0000000000..f64f9b4c22 --- /dev/null +++ b/vendor/sigs.k8s.io/cluster-api/api/v1alpha2/groupversion_info.go @@ -0,0 +1,36 @@ +/* +Copyright 2019 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Package v1alpha2 contains API Schema definitions for the cluster v1alpha2 API group +// +kubebuilder:object:generate=true +// +groupName=cluster.x-k8s.io +package v1alpha2 + +import ( + "k8s.io/apimachinery/pkg/runtime/schema" + "sigs.k8s.io/controller-runtime/pkg/scheme" +) + +var ( + // GroupVersion is group version used to register these objects + GroupVersion = schema.GroupVersion{Group: "cluster.x-k8s.io", Version: "v1alpha2"} + + // SchemeBuilder is used to add go types to the GroupVersionKind scheme + SchemeBuilder = &scheme.Builder{GroupVersion: GroupVersion} + + // AddToScheme adds the types in this group-version to the given scheme. + AddToScheme = SchemeBuilder.AddToScheme +) diff --git a/vendor/sigs.k8s.io/cluster-api/api/v1alpha2/machine_phase_types.go b/vendor/sigs.k8s.io/cluster-api/api/v1alpha2/machine_phase_types.go new file mode 100644 index 0000000000..4faef32215 --- /dev/null +++ b/vendor/sigs.k8s.io/cluster-api/api/v1alpha2/machine_phase_types.go @@ -0,0 +1,64 @@ +/* +Copyright 2019 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1alpha2 + +// MachinePhase is a string representation of a Machine Phase. +// +// This type is a high-level indicator of the status of the Machine as it is provisioned, +// from the API user’s perspective. +// +// The value should not be interpreted by any software components as a reliable indication +// of the actual state of the Machine, and controllers should not use the Machine Phase field +// value when making decisions about what action to take. +// +// Controllers should always look at the actual state of the Machine’s fields to make those decisions. +type MachinePhase string + +var ( + // MachinePhasePending is the first state a Machine is assigned by + // Cluster API Machine controller after being created. + MachinePhasePending = MachinePhase("pending") + + // MachinePhaseProvisioning is the state when the + // Machine infrastructure is being created. + MachinePhaseProvisioning = MachinePhase("provisioning") + + // MachinePhaseProvisioned is the state when its + // infrastructure has been created and configured. + MachinePhaseProvisioned = MachinePhase("provisioned") + + // MachinePhaseRunning is the Machine state when it has + // become a Kubernetes Node in a Ready state. + MachinePhaseRunning = MachinePhase("running") + + // MachinePhaseDeleting is the Machine state when a delete + // request has been sent to the API Server, + // but its infrastructure has not yet been fully deleted. + MachinePhaseDeleting = MachinePhase("deleting") + + // MachinePhaseDeleted is the Machine state when the object + // and the related infrastructure is deleted and + // ready to be garbage collected by the API Server. + MachinePhaseDeleted = MachinePhase("deleted") + + // MachinePhaseFailed is the Machine state when the system + // might require user intervention. + MachinePhaseFailed = MachinePhase("failed") + + // MachinePhaseUnknown is returned if the Machine state cannot be determined. + MachinePhaseUnknown = MachinePhase("") +) diff --git a/vendor/sigs.k8s.io/cluster-api/api/v1alpha2/machine_types.go b/vendor/sigs.k8s.io/cluster-api/api/v1alpha2/machine_types.go new file mode 100644 index 0000000000..d76bbda337 --- /dev/null +++ b/vendor/sigs.k8s.io/cluster-api/api/v1alpha2/machine_types.go @@ -0,0 +1,223 @@ +/* +Copyright 2019 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1alpha2 + +import ( + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + capierrors "sigs.k8s.io/cluster-api/pkg/errors" +) + +const ( + // MachineFinalizer is set on PrepareForCreate callback. + MachineFinalizer = "machine.cluster.x-k8s.io" + + // MachineClusterLabelName is the label set on machines linked to a cluster. + MachineClusterLabelName = "cluster.x-k8s.io/cluster-name" + + // MachineControlPlaneLabelName is the label set on machines part of a control plane. + MachineControlPlaneLabelName = "cluster.x-k8s.io/control-plane" +) + +/// [MachineSpec] +// MachineSpec defines the desired state of Machine +type MachineSpec struct { + // ObjectMeta will autopopulate the Node created. Use this to + // indicate what labels, annotations, name prefix, etc., should be used + // when creating the Node. + // +optional + ObjectMeta `json:"metadata,omitempty"` + + // Bootstrap is a reference to a local struct which encapsulates + // fields to configure the Machine’s bootstrapping mechanism. + Bootstrap Bootstrap `json:"bootstrap"` + + // InfrastructureRef is a required reference to a custom resource + // offered by an infrastructure provider. + InfrastructureRef corev1.ObjectReference `json:"infrastructureRef"` + + // Version defines the desired Kubernetes version. + // This field is meant to be optionally used by bootstrap providers. + // +optional + Version *string `json:"version,omitempty"` + + // ProviderID is the identification ID of the machine provided by the provider. + // This field must match the provider ID as seen on the node object corresponding to this machine. + // This field is required by higher level consumers of cluster-api. Example use case is cluster autoscaler + // with cluster-api as provider. Clean-up logic in the autoscaler compares machines to nodes to find out + // machines at provider which could not get registered as Kubernetes nodes. With cluster-api as a + // generic out-of-tree provider for autoscaler, this field is required by autoscaler to be + // able to have a provider view of the list of machines. Another list of nodes is queried from the k8s apiserver + // and then a comparison is done to find out unregistered machines and are marked for delete. + // This field will be set by the actuators and consumed by higher level entities like autoscaler that will + // be interfacing with cluster-api as generic provider. + // +optional + ProviderID *string `json:"providerID,omitempty"` +} + +/// [MachineSpec] + +/// [MachineStatus] +// MachineStatus defines the observed state of Machine +type MachineStatus struct { + // NodeRef will point to the corresponding Node if it exists. + // +optional + NodeRef *corev1.ObjectReference `json:"nodeRef,omitempty"` + + // LastUpdated identifies when this status was last observed. + // +optional + LastUpdated *metav1.Time `json:"lastUpdated,omitempty"` + + // Version specifies the current version of Kubernetes running + // on the corresponding Node. This is meant to be a means of bubbling + // up status from the Node to the Machine. + // It is entirely optional, but useful for end-user UX if it’s present. + // +optional + Version *string `json:"version,omitempty"` + + // ErrorReason will be set in the event that there is a terminal problem + // reconciling the Machine and will contain a succinct value suitable + // for machine interpretation. + // + // This field should not be set for transitive errors that a controller + // faces that are expected to be fixed automatically over + // time (like service outages), but instead indicate that something is + // fundamentally wrong with the Machine's spec or the configuration of + // the controller, and that manual intervention is required. Examples + // of terminal errors would be invalid combinations of settings in the + // spec, values that are unsupported by the controller, or the + // responsible controller itself being critically misconfigured. + // + // Any transient errors that occur during the reconciliation of Machines + // can be added as events to the Machine object and/or logged in the + // controller's output. + // +optional + ErrorReason *capierrors.MachineStatusError `json:"errorReason,omitempty"` + + // ErrorMessage will be set in the event that there is a terminal problem + // reconciling the Machine and will contain a more verbose string suitable + // for logging and human consumption. + // + // This field should not be set for transitive errors that a controller + // faces that are expected to be fixed automatically over + // time (like service outages), but instead indicate that something is + // fundamentally wrong with the Machine's spec or the configuration of + // the controller, and that manual intervention is required. Examples + // of terminal errors would be invalid combinations of settings in the + // spec, values that are unsupported by the controller, or the + // responsible controller itself being critically misconfigured. + // + // Any transient errors that occur during the reconciliation of Machines + // can be added as events to the Machine object and/or logged in the + // controller's output. + // +optional + ErrorMessage *string `json:"errorMessage,omitempty"` + + // Addresses is a list of addresses assigned to the machine. + // This field is copied from the infrastructure provider reference. + // +optional + Addresses MachineAddresses `json:"addresses,omitempty"` + + // Phase represents the current phase of machine actuation. + // E.g. Pending, Running, Terminating, Failed etc. + // +optional + Phase string `json:"phase,omitempty"` + + // BootstrapReady is the state of the bootstrap provider. + // +optional + BootstrapReady bool `json:"bootstrapReady"` + + // InfrastructureReady is the state of the infrastructure provider. + // +optional + InfrastructureReady bool `json:"infrastructureReady"` +} + +// SetTypedPhase sets the Phase field to the string representation of MachinePhase. +func (m *MachineStatus) SetTypedPhase(p MachinePhase) { + m.Phase = string(p) +} + +// GetTypedPhase attempts to parse the Phase field and return +// the typed MachinePhase representation as described in `machine_phase_types.go`. +func (m *MachineStatus) GetTypedPhase() MachinePhase { + switch phase := MachinePhase(m.Phase); phase { + case + MachinePhasePending, + MachinePhaseProvisioning, + MachinePhaseProvisioned, + MachinePhaseRunning, + MachinePhaseDeleting, + MachinePhaseDeleted, + MachinePhaseFailed: + return phase + default: + return MachinePhaseUnknown + } +} + +/// [MachineStatus] + +/// [Bootstrap] +// Bootstrap capsulates fields to configure the Machine’s bootstrapping mechanism. +type Bootstrap struct { + // ConfigRef is a reference to a bootstrap provider-specific resource + // that holds configuration details. The reference is optional to + // allow users/operators to specify Bootstrap.Data without + // the need of a controller. + // +optional + ConfigRef *corev1.ObjectReference `json:"configRef,omitempty"` + + // Data contains the bootstrap data, such as cloud-init details scripts. + // If nil, the Machine should remain in the Pending state. + // +optional + Data *string `json:"data,omitempty"` +} + +/// [Bootstrap] + +// +kubebuilder:object:root=true +// +kubebuilder:resource:path=machines,shortName=ma,scope=Namespaced +// +kubebuilder:subresource:status +// +kubebuilder:storageversion +// +kubebuilder:printcolumn:name="ProviderID",type="string",JSONPath=".spec.providerID",description="Provider ID" +// +kubebuilder:printcolumn:name="Phase",type="string",JSONPath=".status.phase",description="Machine status such as Terminating/Pending/Running/Failed etc" +// +kubebuilder:printcolumn:name="NodeName",type="string",JSONPath=".status.nodeRef.name",description="Node name associated with this machine",priority=1 + +/// [Machine] +// Machine is the Schema for the machines API +type Machine struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + + Spec MachineSpec `json:"spec,omitempty"` + Status MachineStatus `json:"status,omitempty"` +} + +/// [Machine] + +// +kubebuilder:object:root=true + +// MachineList contains a list of Machine +type MachineList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty"` + Items []Machine `json:"items"` +} + +func init() { + SchemeBuilder.Register(&Machine{}, &MachineList{}) +} diff --git a/vendor/sigs.k8s.io/cluster-api/api/v1alpha2/machinedeployment_types.go b/vendor/sigs.k8s.io/cluster-api/api/v1alpha2/machinedeployment_types.go new file mode 100644 index 0000000000..e520519e24 --- /dev/null +++ b/vendor/sigs.k8s.io/cluster-api/api/v1alpha2/machinedeployment_types.go @@ -0,0 +1,200 @@ +/* +Copyright 2019 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1alpha2 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/util/intstr" +) + +type MachineDeploymentStrategyType string + +const ( + // Replace the old MachineSet by new one using rolling update + // i.e. gradually scale down the old MachineSet and scale up the new one. + RollingUpdateMachineDeploymentStrategyType MachineDeploymentStrategyType = "RollingUpdate" +) + +/// [MachineDeploymentSpec] +// MachineDeploymentSpec defines the desired state of MachineDeployment +type MachineDeploymentSpec struct { + // Number of desired machines. Defaults to 1. + // This is a pointer to distinguish between explicit zero and not specified. + Replicas *int32 `json:"replicas,omitempty"` + + // Label selector for machines. Existing MachineSets whose machines are + // selected by this will be the ones affected by this deployment. + // It must match the machine template's labels. + Selector metav1.LabelSelector `json:"selector"` + + // Template describes the machines that will be created. + Template MachineTemplateSpec `json:"template"` + + // The deployment strategy to use to replace existing machines with + // new ones. + // +optional + Strategy *MachineDeploymentStrategy `json:"strategy,omitempty"` + + // Minimum number of seconds for which a newly created machine should + // be ready. + // Defaults to 0 (machine will be considered available as soon as it + // is ready) + // +optional + MinReadySeconds *int32 `json:"minReadySeconds,omitempty"` + + // The number of old MachineSets to retain to allow rollback. + // This is a pointer to distinguish between explicit zero and not specified. + // Defaults to 1. + // +optional + RevisionHistoryLimit *int32 `json:"revisionHistoryLimit,omitempty"` + + // Indicates that the deployment is paused. + // +optional + Paused bool `json:"paused,omitempty"` + + // The maximum time in seconds for a deployment to make progress before it + // is considered to be failed. The deployment controller will continue to + // process failed deployments and a condition with a ProgressDeadlineExceeded + // reason will be surfaced in the deployment status. Note that progress will + // not be estimated during the time a deployment is paused. Defaults to 600s. + ProgressDeadlineSeconds *int32 `json:"progressDeadlineSeconds,omitempty"` +} + +/// [MachineDeploymentSpec] + +/// [MachineDeploymentStrategy] +// MachineDeploymentStrategy describes how to replace existing machines +// with new ones. +type MachineDeploymentStrategy struct { + // Type of deployment. Currently the only supported strategy is + // "RollingUpdate". + // Default is RollingUpdate. + // +optional + Type MachineDeploymentStrategyType `json:"type,omitempty"` + + // Rolling update config params. Present only if + // MachineDeploymentStrategyType = RollingUpdate. + // +optional + RollingUpdate *MachineRollingUpdateDeployment `json:"rollingUpdate,omitempty"` +} + +/// [MachineDeploymentStrategy] + +/// [MachineRollingUpdateDeployment] +// Spec to control the desired behavior of rolling update. +type MachineRollingUpdateDeployment struct { + // The maximum number of machines that can be unavailable during the update. + // Value can be an absolute number (ex: 5) or a percentage of desired + // machines (ex: 10%). + // Absolute number is calculated from percentage by rounding down. + // This can not be 0 if MaxSurge is 0. + // Defaults to 0. + // Example: when this is set to 30%, the old MachineSet can be scaled + // down to 70% of desired machines immediately when the rolling update + // starts. Once new machines are ready, old MachineSet can be scaled + // down further, followed by scaling up the new MachineSet, ensuring + // that the total number of machines available at all times + // during the update is at least 70% of desired machines. + // +optional + MaxUnavailable *intstr.IntOrString `json:"maxUnavailable,omitempty" protobuf:"bytes,1,opt,name=maxUnavailable"` + + // The maximum number of machines that can be scheduled above the + // desired number of machines. + // Value can be an absolute number (ex: 5) or a percentage of + // desired machines (ex: 10%). + // This can not be 0 if MaxUnavailable is 0. + // Absolute number is calculated from percentage by rounding up. + // Defaults to 1. + // Example: when this is set to 30%, the new MachineSet can be scaled + // up immediately when the rolling update starts, such that the total + // number of old and new machines do not exceed 130% of desired + // machines. Once old machines have been killed, new MachineSet can + // be scaled up further, ensuring that total number of machines running + // at any time during the update is at most 130% of desired machines. + // +optional + MaxSurge *intstr.IntOrString `json:"maxSurge,omitempty" protobuf:"bytes,2,opt,name=maxSurge"` +} + +/// [MachineRollingUpdateDeployment] + +/// [MachineDeploymentStatus] +// MachineDeploymentStatus defines the observed state of MachineDeployment +type MachineDeploymentStatus struct { + // The generation observed by the deployment controller. + // +optional + ObservedGeneration int64 `json:"observedGeneration,omitempty" protobuf:"varint,1,opt,name=observedGeneration"` + + // Total number of non-terminated machines targeted by this deployment + // (their labels match the selector). + // +optional + Replicas int32 `json:"replicas,omitempty" protobuf:"varint,2,opt,name=replicas"` + + // Total number of non-terminated machines targeted by this deployment + // that have the desired template spec. + // +optional + UpdatedReplicas int32 `json:"updatedReplicas,omitempty" protobuf:"varint,3,opt,name=updatedReplicas"` + + // Total number of ready machines targeted by this deployment. + // +optional + ReadyReplicas int32 `json:"readyReplicas,omitempty" protobuf:"varint,7,opt,name=readyReplicas"` + + // Total number of available machines (ready for at least minReadySeconds) + // targeted by this deployment. + // +optional + AvailableReplicas int32 `json:"availableReplicas,omitempty" protobuf:"varint,4,opt,name=availableReplicas"` + + // Total number of unavailable machines targeted by this deployment. + // This is the total number of machines that are still required for + // the deployment to have 100% available capacity. They may either + // be machines that are running but not yet available or machines + // that still have not been created. + // +optional + UnavailableReplicas int32 `json:"unavailableReplicas,omitempty" protobuf:"varint,5,opt,name=unavailableReplicas"` +} + +/// [MachineDeploymentStatus] + +// +kubebuilder:object:root=true +// +kubebuilder:resource:path=machinedeployments,shortName=md,scope=Namespaced +// +kubebuilder:storageversion +// +kubebuilder:subresource:status +// +kubebuilder:subresource:scale:specpath=.spec.replicas,statuspath=.status.replicas,selectorpath=.status.labelSelector + +/// [MachineDeployment] +// MachineDeployment is the Schema for the machinedeployments API +type MachineDeployment struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + + Spec MachineDeploymentSpec `json:"spec,omitempty"` + Status MachineDeploymentStatus `json:"status,omitempty"` +} + +/// [MachineDeployment] + +// +kubebuilder:object:root=true + +// MachineDeploymentList contains a list of MachineDeployment +type MachineDeploymentList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty"` + Items []MachineDeployment `json:"items"` +} + +func init() { + SchemeBuilder.Register(&MachineDeployment{}, &MachineDeploymentList{}) +} diff --git a/vendor/sigs.k8s.io/cluster-api/api/v1alpha2/machineset_types.go b/vendor/sigs.k8s.io/cluster-api/api/v1alpha2/machineset_types.go new file mode 100644 index 0000000000..6492c5c1b5 --- /dev/null +++ b/vendor/sigs.k8s.io/cluster-api/api/v1alpha2/machineset_types.go @@ -0,0 +1,223 @@ +/* +Copyright 2019 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1alpha2 + +import ( + "log" + + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + metav1validation "k8s.io/apimachinery/pkg/apis/meta/v1/validation" + "k8s.io/apimachinery/pkg/labels" + "k8s.io/apimachinery/pkg/util/validation/field" + capierrors "sigs.k8s.io/cluster-api/pkg/errors" +) + +/// [MachineSetSpec] +// MachineSetSpec defines the desired state of MachineSet +type MachineSetSpec struct { + // Replicas is the number of desired replicas. + // This is a pointer to distinguish between explicit zero and unspecified. + // Defaults to 1. + // +optional + Replicas *int32 `json:"replicas,omitempty"` + + // MinReadySeconds is the minimum number of seconds for which a newly created machine should be ready. + // Defaults to 0 (machine will be considered available as soon as it is ready) + // +optional + MinReadySeconds int32 `json:"minReadySeconds,omitempty"` + + // DeletePolicy defines the policy used to identify nodes to delete when downscaling. + // Defaults to "Random". Valid values are "Random, "Newest", "Oldest" + // +kubebuilder:validation:Enum=Random;Newest;Oldest + DeletePolicy string `json:"deletePolicy,omitempty"` + + // Selector is a label query over machines that should match the replica count. + // Label keys and values that must match in order to be controlled by this MachineSet. + // It must match the machine template's labels. + // More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/#label-selectors + Selector metav1.LabelSelector `json:"selector"` + + // Template is the object that describes the machine that will be created if + // insufficient replicas are detected. + // Object references to custom resources resources are treated as templates. + // +optional + Template MachineTemplateSpec `json:"template,omitempty"` +} + +/// [MachineSetSpec] + +/// [MachineTemplateSpec] +// MachineTemplateSpec describes the data needed to create a Machine from a template +type MachineTemplateSpec struct { + // Standard object's metadata. + // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata + // +optional + ObjectMeta `json:"metadata,omitempty"` + + // Specification of the desired behavior of the machine. + // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status + // +optional + Spec MachineSpec `json:"spec,omitempty"` +} + +/// [MachineTemplateSpec] + +// MachineSetDeletePolicy defines how priority is assigned to nodes to delete when +// downscaling a MachineSet. Defaults to "Random". +type MachineSetDeletePolicy string + +const ( + // RandomMachineSetDeletePolicy prioritizes both Machines that have the annotation + // "cluster.x-k8s.io/delete-machine=yes" and Machines that are unhealthy + // (Status.ErrorReason or Status.ErrorMessage are set to a non-empty value). + // Finally, it picks Machines at random to delete. + RandomMachineSetDeletePolicy MachineSetDeletePolicy = "Random" + + // NewestMachineSetDeletePolicy prioritizes both Machines that have the annotation + // "cluster.x-k8s.io/delete-machine=yes" and Machines that are unhealthy + // (Status.ErrorReason or Status.ErrorMessage are set to a non-empty value). + // It then prioritizes the newest Machines for deletion based on the Machine's CreationTimestamp. + NewestMachineSetDeletePolicy MachineSetDeletePolicy = "Newest" + + // OldestMachineSetDeletePolicy prioritizes both Machines that have the annotation + // "cluster.x-k8s.io/delete-machine=yes" and Machines that are unhealthy + // (Status.ErrorReason or Status.ErrorMessage are set to a non-empty value). + // It then prioritizes the oldest Machines for deletion based on the Machine's CreationTimestamp. + OldestMachineSetDeletePolicy MachineSetDeletePolicy = "Oldest" +) + +/// [MachineSetStatus] +// MachineSetStatus defines the observed state of MachineSet +type MachineSetStatus struct { + // Replicas is the most recently observed number of replicas. + Replicas int32 `json:"replicas"` + + // The number of replicas that have labels matching the labels of the machine template of the MachineSet. + // +optional + FullyLabeledReplicas int32 `json:"fullyLabeledReplicas,omitempty"` + + // The number of ready replicas for this MachineSet. A machine is considered ready when the node has been created and is "Ready". + // +optional + ReadyReplicas int32 `json:"readyReplicas,omitempty"` + + // The number of available replicas (ready for at least minReadySeconds) for this MachineSet. + // +optional + AvailableReplicas int32 `json:"availableReplicas,omitempty"` + + // ObservedGeneration reflects the generation of the most recently observed MachineSet. + // +optional + ObservedGeneration int64 `json:"observedGeneration,omitempty"` + + // In the event that there is a terminal problem reconciling the + // replicas, both ErrorReason and ErrorMessage will be set. ErrorReason + // will be populated with a succinct value suitable for machine + // interpretation, while ErrorMessage will contain a more verbose + // string suitable for logging and human consumption. + // + // These fields should not be set for transitive errors that a + // controller faces that are expected to be fixed automatically over + // time (like service outages), but instead indicate that something is + // fundamentally wrong with the MachineTemplate's spec or the configuration of + // the machine controller, and that manual intervention is required. Examples + // of terminal errors would be invalid combinations of settings in the + // spec, values that are unsupported by the machine controller, or the + // responsible machine controller itself being critically misconfigured. + // + // Any transient errors that occur during the reconciliation of Machines + // can be added as events to the MachineSet object and/or logged in the + // controller's output. + // +optional + ErrorReason *capierrors.MachineSetStatusError `json:"errorReason,omitempty"` + // +optional + ErrorMessage *string `json:"errorMessage,omitempty"` +} + +// Validate validates the MachineSet fields. +func (m *MachineSet) Validate() field.ErrorList { + errors := field.ErrorList{} + + // validate spec.selector and spec.template.labels + fldPath := field.NewPath("spec") + errors = append(errors, metav1validation.ValidateLabelSelector(&m.Spec.Selector, fldPath.Child("selector"))...) + if len(m.Spec.Selector.MatchLabels)+len(m.Spec.Selector.MatchExpressions) == 0 { + errors = append(errors, field.Invalid(fldPath.Child("selector"), m.Spec.Selector, "empty selector is not valid for MachineSet.")) + } + selector, err := metav1.LabelSelectorAsSelector(&m.Spec.Selector) + if err != nil { + errors = append(errors, field.Invalid(fldPath.Child("selector"), m.Spec.Selector, "invalid label selector.")) + } else { + labels := labels.Set(m.Spec.Template.Labels) + if !selector.Matches(labels) { + errors = append(errors, field.Invalid(fldPath.Child("template", "metadata", "labels"), m.Spec.Template.Labels, "`selector` does not match template `labels`")) + } + } + + return errors +} + +// DefaultingFunction sets default MachineSet field values. +func (m *MachineSet) Default() { + log.Printf("Defaulting fields for MachineSet %s\n", m.Name) + + if m.Spec.Replicas == nil { + m.Spec.Replicas = new(int32) + *m.Spec.Replicas = 1 + } + + if len(m.Namespace) == 0 { + m.Namespace = metav1.NamespaceDefault + } + + if m.Spec.DeletePolicy == "" { + randomPolicy := string(RandomMachineSetDeletePolicy) + log.Printf("Defaulting to %s\n", randomPolicy) + m.Spec.DeletePolicy = randomPolicy + } +} + +/// [MachineSetStatus] + +// +kubebuilder:object:root=true +// +kubebuilder:resource:path=machinesets,shortName=ms,scope=Namespaced +// +kubebuilder:storageversion +// +kubebuilder:subresource:status +// +kubebuilder:subresource:scale:specpath=.spec.replicas,statuspath=.status.replicas,selectorpath=.status.labelSelector + +/// [MachineSet] +// MachineSet is the Schema for the machinesets API +type MachineSet struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + + Spec MachineSetSpec `json:"spec,omitempty"` + Status MachineSetStatus `json:"status,omitempty"` +} + +/// [MachineSet] + +// +kubebuilder:object:root=true + +// MachineSetList contains a list of MachineSet +type MachineSetList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty"` + Items []MachineSet `json:"items"` +} + +func init() { + SchemeBuilder.Register(&MachineSet{}, &MachineSetList{}) +} diff --git a/vendor/sigs.k8s.io/cluster-api/api/v1alpha2/zz_generated.deepcopy.go b/vendor/sigs.k8s.io/cluster-api/api/v1alpha2/zz_generated.deepcopy.go new file mode 100644 index 0000000000..a3bf7b4aa3 --- /dev/null +++ b/vendor/sigs.k8s.io/cluster-api/api/v1alpha2/zz_generated.deepcopy.go @@ -0,0 +1,705 @@ +// +build !ignore_autogenerated + +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// autogenerated by controller-gen object, do not modify manually + +package v1alpha2 + +import ( + "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + runtime "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/util/intstr" + "sigs.k8s.io/cluster-api/pkg/errors" +) + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *APIEndpoint) DeepCopyInto(out *APIEndpoint) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new APIEndpoint. +func (in *APIEndpoint) DeepCopy() *APIEndpoint { + if in == nil { + return nil + } + out := new(APIEndpoint) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Bootstrap) DeepCopyInto(out *Bootstrap) { + *out = *in + if in.ConfigRef != nil { + in, out := &in.ConfigRef, &out.ConfigRef + *out = new(v1.ObjectReference) + **out = **in + } + if in.Data != nil { + in, out := &in.Data, &out.Data + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Bootstrap. +func (in *Bootstrap) DeepCopy() *Bootstrap { + if in == nil { + return nil + } + out := new(Bootstrap) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Cluster) DeepCopyInto(out *Cluster) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Cluster. +func (in *Cluster) DeepCopy() *Cluster { + if in == nil { + return nil + } + out := new(Cluster) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *Cluster) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ClusterList) DeepCopyInto(out *ClusterList) { + *out = *in + out.TypeMeta = in.TypeMeta + out.ListMeta = in.ListMeta + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]Cluster, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClusterList. +func (in *ClusterList) DeepCopy() *ClusterList { + if in == nil { + return nil + } + out := new(ClusterList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *ClusterList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ClusterNetworkingConfig) DeepCopyInto(out *ClusterNetworkingConfig) { + *out = *in + in.Services.DeepCopyInto(&out.Services) + in.Pods.DeepCopyInto(&out.Pods) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClusterNetworkingConfig. +func (in *ClusterNetworkingConfig) DeepCopy() *ClusterNetworkingConfig { + if in == nil { + return nil + } + out := new(ClusterNetworkingConfig) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ClusterSpec) DeepCopyInto(out *ClusterSpec) { + *out = *in + if in.ClusterNetwork != nil { + in, out := &in.ClusterNetwork, &out.ClusterNetwork + *out = new(ClusterNetworkingConfig) + (*in).DeepCopyInto(*out) + } + if in.InfrastructureRef != nil { + in, out := &in.InfrastructureRef, &out.InfrastructureRef + *out = new(v1.ObjectReference) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClusterSpec. +func (in *ClusterSpec) DeepCopy() *ClusterSpec { + if in == nil { + return nil + } + out := new(ClusterSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ClusterStatus) DeepCopyInto(out *ClusterStatus) { + *out = *in + if in.APIEndpoints != nil { + in, out := &in.APIEndpoints, &out.APIEndpoints + *out = make([]APIEndpoint, len(*in)) + copy(*out, *in) + } + if in.ErrorReason != nil { + in, out := &in.ErrorReason, &out.ErrorReason + *out = new(errors.ClusterStatusError) + **out = **in + } + if in.ErrorMessage != nil { + in, out := &in.ErrorMessage, &out.ErrorMessage + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClusterStatus. +func (in *ClusterStatus) DeepCopy() *ClusterStatus { + if in == nil { + return nil + } + out := new(ClusterStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Machine) DeepCopyInto(out *Machine) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Machine. +func (in *Machine) DeepCopy() *Machine { + if in == nil { + return nil + } + out := new(Machine) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *Machine) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MachineAddress) DeepCopyInto(out *MachineAddress) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MachineAddress. +func (in *MachineAddress) DeepCopy() *MachineAddress { + if in == nil { + return nil + } + out := new(MachineAddress) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in MachineAddresses) DeepCopyInto(out *MachineAddresses) { + { + in := &in + *out = make(MachineAddresses, len(*in)) + copy(*out, *in) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MachineAddresses. +func (in MachineAddresses) DeepCopy() MachineAddresses { + if in == nil { + return nil + } + out := new(MachineAddresses) + in.DeepCopyInto(out) + return *out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MachineDeployment) DeepCopyInto(out *MachineDeployment) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + out.Status = in.Status +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MachineDeployment. +func (in *MachineDeployment) DeepCopy() *MachineDeployment { + if in == nil { + return nil + } + out := new(MachineDeployment) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *MachineDeployment) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MachineDeploymentList) DeepCopyInto(out *MachineDeploymentList) { + *out = *in + out.TypeMeta = in.TypeMeta + out.ListMeta = in.ListMeta + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]MachineDeployment, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MachineDeploymentList. +func (in *MachineDeploymentList) DeepCopy() *MachineDeploymentList { + if in == nil { + return nil + } + out := new(MachineDeploymentList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *MachineDeploymentList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MachineDeploymentSpec) DeepCopyInto(out *MachineDeploymentSpec) { + *out = *in + if in.Replicas != nil { + in, out := &in.Replicas, &out.Replicas + *out = new(int32) + **out = **in + } + in.Selector.DeepCopyInto(&out.Selector) + in.Template.DeepCopyInto(&out.Template) + if in.Strategy != nil { + in, out := &in.Strategy, &out.Strategy + *out = new(MachineDeploymentStrategy) + (*in).DeepCopyInto(*out) + } + if in.MinReadySeconds != nil { + in, out := &in.MinReadySeconds, &out.MinReadySeconds + *out = new(int32) + **out = **in + } + if in.RevisionHistoryLimit != nil { + in, out := &in.RevisionHistoryLimit, &out.RevisionHistoryLimit + *out = new(int32) + **out = **in + } + if in.ProgressDeadlineSeconds != nil { + in, out := &in.ProgressDeadlineSeconds, &out.ProgressDeadlineSeconds + *out = new(int32) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MachineDeploymentSpec. +func (in *MachineDeploymentSpec) DeepCopy() *MachineDeploymentSpec { + if in == nil { + return nil + } + out := new(MachineDeploymentSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MachineDeploymentStatus) DeepCopyInto(out *MachineDeploymentStatus) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MachineDeploymentStatus. +func (in *MachineDeploymentStatus) DeepCopy() *MachineDeploymentStatus { + if in == nil { + return nil + } + out := new(MachineDeploymentStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MachineDeploymentStrategy) DeepCopyInto(out *MachineDeploymentStrategy) { + *out = *in + if in.RollingUpdate != nil { + in, out := &in.RollingUpdate, &out.RollingUpdate + *out = new(MachineRollingUpdateDeployment) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MachineDeploymentStrategy. +func (in *MachineDeploymentStrategy) DeepCopy() *MachineDeploymentStrategy { + if in == nil { + return nil + } + out := new(MachineDeploymentStrategy) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MachineList) DeepCopyInto(out *MachineList) { + *out = *in + out.TypeMeta = in.TypeMeta + out.ListMeta = in.ListMeta + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]Machine, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MachineList. +func (in *MachineList) DeepCopy() *MachineList { + if in == nil { + return nil + } + out := new(MachineList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *MachineList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MachineRollingUpdateDeployment) DeepCopyInto(out *MachineRollingUpdateDeployment) { + *out = *in + if in.MaxUnavailable != nil { + in, out := &in.MaxUnavailable, &out.MaxUnavailable + *out = new(intstr.IntOrString) + **out = **in + } + if in.MaxSurge != nil { + in, out := &in.MaxSurge, &out.MaxSurge + *out = new(intstr.IntOrString) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MachineRollingUpdateDeployment. +func (in *MachineRollingUpdateDeployment) DeepCopy() *MachineRollingUpdateDeployment { + if in == nil { + return nil + } + out := new(MachineRollingUpdateDeployment) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MachineSet) DeepCopyInto(out *MachineSet) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MachineSet. +func (in *MachineSet) DeepCopy() *MachineSet { + if in == nil { + return nil + } + out := new(MachineSet) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *MachineSet) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MachineSetList) DeepCopyInto(out *MachineSetList) { + *out = *in + out.TypeMeta = in.TypeMeta + out.ListMeta = in.ListMeta + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]MachineSet, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MachineSetList. +func (in *MachineSetList) DeepCopy() *MachineSetList { + if in == nil { + return nil + } + out := new(MachineSetList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *MachineSetList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MachineSetSpec) DeepCopyInto(out *MachineSetSpec) { + *out = *in + if in.Replicas != nil { + in, out := &in.Replicas, &out.Replicas + *out = new(int32) + **out = **in + } + in.Selector.DeepCopyInto(&out.Selector) + in.Template.DeepCopyInto(&out.Template) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MachineSetSpec. +func (in *MachineSetSpec) DeepCopy() *MachineSetSpec { + if in == nil { + return nil + } + out := new(MachineSetSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MachineSetStatus) DeepCopyInto(out *MachineSetStatus) { + *out = *in + if in.ErrorReason != nil { + in, out := &in.ErrorReason, &out.ErrorReason + *out = new(errors.MachineSetStatusError) + **out = **in + } + if in.ErrorMessage != nil { + in, out := &in.ErrorMessage, &out.ErrorMessage + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MachineSetStatus. +func (in *MachineSetStatus) DeepCopy() *MachineSetStatus { + if in == nil { + return nil + } + out := new(MachineSetStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MachineSpec) DeepCopyInto(out *MachineSpec) { + *out = *in + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Bootstrap.DeepCopyInto(&out.Bootstrap) + out.InfrastructureRef = in.InfrastructureRef + if in.Version != nil { + in, out := &in.Version, &out.Version + *out = new(string) + **out = **in + } + if in.ProviderID != nil { + in, out := &in.ProviderID, &out.ProviderID + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MachineSpec. +func (in *MachineSpec) DeepCopy() *MachineSpec { + if in == nil { + return nil + } + out := new(MachineSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MachineStatus) DeepCopyInto(out *MachineStatus) { + *out = *in + if in.NodeRef != nil { + in, out := &in.NodeRef, &out.NodeRef + *out = new(v1.ObjectReference) + **out = **in + } + if in.LastUpdated != nil { + in, out := &in.LastUpdated, &out.LastUpdated + *out = (*in).DeepCopy() + } + if in.Version != nil { + in, out := &in.Version, &out.Version + *out = new(string) + **out = **in + } + if in.ErrorReason != nil { + in, out := &in.ErrorReason, &out.ErrorReason + *out = new(errors.MachineStatusError) + **out = **in + } + if in.ErrorMessage != nil { + in, out := &in.ErrorMessage, &out.ErrorMessage + *out = new(string) + **out = **in + } + if in.Addresses != nil { + in, out := &in.Addresses, &out.Addresses + *out = make(MachineAddresses, len(*in)) + copy(*out, *in) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MachineStatus. +func (in *MachineStatus) DeepCopy() *MachineStatus { + if in == nil { + return nil + } + out := new(MachineStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MachineTemplateSpec) DeepCopyInto(out *MachineTemplateSpec) { + *out = *in + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MachineTemplateSpec. +func (in *MachineTemplateSpec) DeepCopy() *MachineTemplateSpec { + if in == nil { + return nil + } + out := new(MachineTemplateSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *NetworkRanges) DeepCopyInto(out *NetworkRanges) { + *out = *in + if in.CIDRBlocks != nil { + in, out := &in.CIDRBlocks, &out.CIDRBlocks + *out = make([]string, len(*in)) + copy(*out, *in) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NetworkRanges. +func (in *NetworkRanges) DeepCopy() *NetworkRanges { + if in == nil { + return nil + } + out := new(NetworkRanges) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ObjectMeta) DeepCopyInto(out *ObjectMeta) { + *out = *in + if in.Labels != nil { + in, out := &in.Labels, &out.Labels + *out = make(map[string]string, len(*in)) + for key, val := range *in { + (*out)[key] = val + } + } + if in.Annotations != nil { + in, out := &in.Annotations, &out.Annotations + *out = make(map[string]string, len(*in)) + for key, val := range *in { + (*out)[key] = val + } + } + if in.OwnerReferences != nil { + in, out := &in.OwnerReferences, &out.OwnerReferences + *out = make([]metav1.OwnerReference, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ObjectMeta. +func (in *ObjectMeta) DeepCopy() *ObjectMeta { + if in == nil { + return nil + } + out := new(ObjectMeta) + in.DeepCopyInto(out) + return out +} diff --git a/vendor/sigs.k8s.io/cluster-api/build/run_in_workspace_with_goroot.bzl b/vendor/sigs.k8s.io/cluster-api/build/run_in_workspace_with_goroot.bzl index a4cbebd804..81c4bebf92 100644 --- a/vendor/sigs.k8s.io/cluster-api/build/run_in_workspace_with_goroot.bzl +++ b/vendor/sigs.k8s.io/cluster-api/build/run_in_workspace_with_goroot.bzl @@ -67,15 +67,6 @@ _workspace_binary_script = rule( ) # Wraps a binary to be run in the workspace root via bazel run. -# -# For example, one might do something like -# -# workspace_binary( -# name = "dep", -# cmd = "//vendor/github.com/golang/dep/cmd/dep", -# ) -# -# which would allow running dep with bazel run. def workspace_binary( name, cmd, diff --git a/vendor/sigs.k8s.io/cluster-api/cmd/clusterctl/BUILD.bazel b/vendor/sigs.k8s.io/cluster-api/cmd/clusterctl/BUILD.bazel index 5344a11d3a..d08843b9d9 100644 --- a/vendor/sigs.k8s.io/cluster-api/cmd/clusterctl/BUILD.bazel +++ b/vendor/sigs.k8s.io/cluster-api/cmd/clusterctl/BUILD.bazel @@ -5,7 +5,11 @@ go_library( srcs = ["main.go"], importpath = "sigs.k8s.io/cluster-api/cmd/clusterctl", visibility = ["//visibility:private"], - deps = ["//cmd/clusterctl/cmd:go_default_library"], + deps = [ + "//api/v1alpha2:go_default_library", + "//cmd/clusterctl/cmd:go_default_library", + "//vendor/k8s.io/client-go/kubernetes/scheme:go_default_library", + ], ) go_binary( diff --git a/vendor/sigs.k8s.io/cluster-api/cmd/clusterctl/README.md b/vendor/sigs.k8s.io/cluster-api/cmd/clusterctl/README.md index 222af00d02..aa2b7e689a 100644 --- a/vendor/sigs.k8s.io/cluster-api/cmd/clusterctl/README.md +++ b/vendor/sigs.k8s.io/cluster-api/cmd/clusterctl/README.md @@ -1,4 +1,20 @@ # clusterctl + + + + +- [Getting Started](#getting-started) + - [Prerequisites](#prerequisites) + - [Limitations](#limitations) + - [Creating a cluster](#creating-a-cluster) + - [Interacting with your cluster](#interacting-with-your-cluster) + - [Scaling your cluster](#scaling-your-cluster) + - [Upgrading your cluster](#upgrading-your-cluster) + - [Node repair](#node-repair) + - [Deleting a cluster](#deleting-a-cluster) +- [Contributing](#contributing) + + `clusterctl` is the SIG-cluster-lifecycle sponsored tool that implements the Cluster API. diff --git a/vendor/sigs.k8s.io/cluster-api/cmd/clusterctl/clientcmd/BUILD.bazel b/vendor/sigs.k8s.io/cluster-api/cmd/clusterctl/clientcmd/BUILD.bazel index 45af598ffe..7f190adf56 100644 --- a/vendor/sigs.k8s.io/cluster-api/cmd/clusterctl/clientcmd/BUILD.bazel +++ b/vendor/sigs.k8s.io/cluster-api/cmd/clusterctl/clientcmd/BUILD.bazel @@ -6,10 +6,10 @@ go_library( importpath = "sigs.k8s.io/cluster-api/cmd/clusterctl/clientcmd", visibility = ["//visibility:public"], deps = [ - "//pkg/client/clientset_generated/clientset:go_default_library", "//vendor/k8s.io/client-go/kubernetes:go_default_library", "//vendor/k8s.io/client-go/rest:go_default_library", "//vendor/k8s.io/client-go/tools/clientcmd:go_default_library", "//vendor/k8s.io/client-go/tools/clientcmd/api:go_default_library", + "//vendor/sigs.k8s.io/controller-runtime/pkg/client:go_default_library", ], ) diff --git a/vendor/sigs.k8s.io/cluster-api/cmd/clusterctl/clientcmd/configutil.go b/vendor/sigs.k8s.io/cluster-api/cmd/clusterctl/clientcmd/configutil.go index 9af03d0149..394d3de513 100644 --- a/vendor/sigs.k8s.io/cluster-api/cmd/clusterctl/clientcmd/configutil.go +++ b/vendor/sigs.k8s.io/cluster-api/cmd/clusterctl/clientcmd/configutil.go @@ -23,7 +23,7 @@ import ( "k8s.io/client-go/rest" "k8s.io/client-go/tools/clientcmd" "k8s.io/client-go/tools/clientcmd/api" - "sigs.k8s.io/cluster-api/pkg/client/clientset_generated/clientset" + "sigs.k8s.io/controller-runtime/pkg/client" ) // This is a convenience method to prevent the need of importing both this version of clientcmd and the client-go version @@ -31,9 +31,11 @@ func NewConfigOverrides() clientcmd.ConfigOverrides { return clientcmd.ConfigOverrides{} } -// NewCoreClientSetForDefaultSearchPath creates a core kubernetes clientset. If the kubeconfigPath is specified then the configuration is loaded from that path. -// Otherwise the default kubeconfig search path is used. -// The overrides parameter is used to select a specific context of the config, for example, select the context with a given cluster name or namespace. +// NewCoreClientSetForDefaultSearchPath creates a core kubernetes clientset. +// If the kubeconfigPath is specified then the configuration is loaded from that path, +// otherwise the default kubeconfig search path is used. +// The overrides parameter is used to select a specific context of the config, for example, +// select the context with a given cluster name or namespace. func NewCoreClientSetForDefaultSearchPath(kubeconfigPath string, overrides clientcmd.ConfigOverrides) (*kubernetes.Clientset, error) { config, err := newRestConfigForDefaultSearchPath(kubeconfigPath, overrides) if err != nil { @@ -42,19 +44,23 @@ func NewCoreClientSetForDefaultSearchPath(kubeconfigPath string, overrides clien return kubernetes.NewForConfig(config) } -// NewClusterAPIClientForDefaultSearchPath creates a Cluster API clientset. If the kubeconfigPath is specified then the configuration is loaded from that path. -// Otherwise the default kubeconfig search path is used. -// The overrides parameter is used to select a specific context of the config, for example, select the context with a given cluster name or namespace. -func NewClusterAPIClientForDefaultSearchPath(kubeconfigPath string, overrides clientcmd.ConfigOverrides) (*clientset.Clientset, error) { +// NewControllerRuntimeClient creates a controller-runtime Client. +// If the kubeconfigPath is specified then the configuration is loaded from that path, +// otherwise the default kubeconfig search path is used. +// The overrides parameter is used to select a specific context of the config, for example, +// select the context with a given cluster name or namespace. +func NewControllerRuntimeClient(kubeconfigPath string, overrides clientcmd.ConfigOverrides) (client.Client, error) { config, err := newRestConfigForDefaultSearchPath(kubeconfigPath, overrides) if err != nil { return nil, err } - return clientset.NewForConfig(config) + + return client.New(config, client.Options{}) } // newRestConfig creates a rest.Config for the given apiConfig -// The overrides parameter is used to select a specific context of the config, for example, select the context with a given cluster name or namespace. +// The overrides parameter is used to select a specific context of the config, for example, +// select the context with a given cluster name or namespace. func newRestConfig(apiConfig *api.Config, overrides clientcmd.ConfigOverrides) (*rest.Config, error) { return clientcmd.NewDefaultClientConfig(*apiConfig, &overrides).ClientConfig() } diff --git a/vendor/sigs.k8s.io/cluster-api/cmd/clusterctl/clusterdeployer/BUILD.bazel b/vendor/sigs.k8s.io/cluster-api/cmd/clusterctl/clusterdeployer/BUILD.bazel index 0fdbfaafc6..d8fcf5a964 100644 --- a/vendor/sigs.k8s.io/cluster-api/cmd/clusterctl/clusterdeployer/BUILD.bazel +++ b/vendor/sigs.k8s.io/cluster-api/cmd/clusterctl/clusterdeployer/BUILD.bazel @@ -9,12 +9,12 @@ go_library( importpath = "sigs.k8s.io/cluster-api/cmd/clusterctl/clusterdeployer", visibility = ["//visibility:public"], deps = [ + "//api/v1alpha2:go_default_library", "//cmd/clusterctl/clusterdeployer/bootstrap:go_default_library", "//cmd/clusterctl/clusterdeployer/clusterclient:go_default_library", "//cmd/clusterctl/clusterdeployer/provider:go_default_library", "//cmd/clusterctl/phases:go_default_library", "//cmd/clusterctl/providercomponents:go_default_library", - "//pkg/apis/cluster/v1alpha1:go_default_library", "//vendor/github.com/pkg/errors:go_default_library", "//vendor/k8s.io/client-go/kubernetes:go_default_library", "//vendor/k8s.io/klog:go_default_library", @@ -26,9 +26,9 @@ go_test( srcs = ["clusterdeployer_test.go"], embed = [":go_default_library"], deps = [ + "//api/v1alpha2:go_default_library", "//cmd/clusterctl/clusterdeployer/clusterclient:go_default_library", "//cmd/clusterctl/clusterdeployer/provider:go_default_library", - "//pkg/apis/cluster/v1alpha1:go_default_library", "//vendor/github.com/pkg/errors:go_default_library", "//vendor/k8s.io/api/core/v1:go_default_library", "//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", diff --git a/vendor/sigs.k8s.io/cluster-api/cmd/clusterctl/clusterdeployer/bootstrap/kind/BUILD.bazel b/vendor/sigs.k8s.io/cluster-api/cmd/clusterctl/clusterdeployer/bootstrap/kind/BUILD.bazel index d38c05ba34..b85d865ac8 100644 --- a/vendor/sigs.k8s.io/cluster-api/cmd/clusterctl/clusterdeployer/bootstrap/kind/BUILD.bazel +++ b/vendor/sigs.k8s.io/cluster-api/cmd/clusterctl/clusterdeployer/bootstrap/kind/BUILD.bazel @@ -6,6 +6,7 @@ go_library( importpath = "sigs.k8s.io/cluster-api/cmd/clusterctl/clusterdeployer/bootstrap/kind", visibility = ["//visibility:public"], deps = [ + "//pkg/util:go_default_library", "//vendor/github.com/pkg/errors:go_default_library", "//vendor/k8s.io/klog:go_default_library", ], diff --git a/vendor/sigs.k8s.io/cluster-api/cmd/clusterctl/clusterdeployer/bootstrap/kind/kind.go b/vendor/sigs.k8s.io/cluster-api/cmd/clusterctl/clusterdeployer/bootstrap/kind/kind.go index dcf1538a18..f5f3eeab86 100644 --- a/vendor/sigs.k8s.io/cluster-api/cmd/clusterctl/clusterdeployer/bootstrap/kind/kind.go +++ b/vendor/sigs.k8s.io/cluster-api/cmd/clusterctl/clusterdeployer/bootstrap/kind/kind.go @@ -24,10 +24,11 @@ import ( "github.com/pkg/errors" "k8s.io/klog" + "sigs.k8s.io/cluster-api/pkg/util" ) const ( - kindClusterName = "clusterapi" + kindClusterNamePrefix = "clusterapi-" ) var ( @@ -55,7 +56,7 @@ func WithOptions(options []string) *Kind { } return true }() { - options = append(options, fmt.Sprintf("name=%s", kindClusterName)) + options = append(options, fmt.Sprintf("name=%s", kindClusterNamePrefix+util.RandomString(5))) } return &Kind{ diff --git a/vendor/sigs.k8s.io/cluster-api/cmd/clusterctl/clusterdeployer/bootstrap/minikube/BUILD.bazel b/vendor/sigs.k8s.io/cluster-api/cmd/clusterctl/clusterdeployer/bootstrap/minikube/BUILD.bazel index 81a56cbfb9..dcc5bb0905 100644 --- a/vendor/sigs.k8s.io/cluster-api/cmd/clusterctl/clusterdeployer/bootstrap/minikube/BUILD.bazel +++ b/vendor/sigs.k8s.io/cluster-api/cmd/clusterctl/clusterdeployer/bootstrap/minikube/BUILD.bazel @@ -6,6 +6,7 @@ go_library( importpath = "sigs.k8s.io/cluster-api/cmd/clusterctl/clusterdeployer/bootstrap/minikube", visibility = ["//visibility:public"], deps = [ + "//pkg/util:go_default_library", "//vendor/github.com/pkg/errors:go_default_library", "//vendor/k8s.io/klog:go_default_library", ], diff --git a/vendor/sigs.k8s.io/cluster-api/cmd/clusterctl/clusterdeployer/bootstrap/minikube/minikube.go b/vendor/sigs.k8s.io/cluster-api/cmd/clusterctl/clusterdeployer/bootstrap/minikube/minikube.go index bb9044a10d..1a6a9f90bd 100644 --- a/vendor/sigs.k8s.io/cluster-api/cmd/clusterctl/clusterdeployer/bootstrap/minikube/minikube.go +++ b/vendor/sigs.k8s.io/cluster-api/cmd/clusterctl/clusterdeployer/bootstrap/minikube/minikube.go @@ -25,10 +25,11 @@ import ( "github.com/pkg/errors" "k8s.io/klog" + "sigs.k8s.io/cluster-api/pkg/util" ) const ( - minikubeClusterName = "clusterapi" + minikubeClusterNamePrefix = "clusterapi-" ) type Minikube struct { @@ -61,7 +62,7 @@ func WithOptionsAndKubeConfigPath(options []string, kubeconfigpath string) *Mini } return true }() { - options = append(options, fmt.Sprintf("profile=%s", minikubeClusterName)) + options = append(options, fmt.Sprintf("profile=%s", minikubeClusterNamePrefix+util.RandomString(5))) } return &Minikube{ @@ -94,7 +95,13 @@ func (m *Minikube) Create() error { } func (m *Minikube) Delete() error { - _, err := m.exec("delete") + args := []string{"delete"} + for _, opt := range m.options { + if strings.HasPrefix(opt, "profile=") { + args = append(args, fmt.Sprintf("--%v", opt)) + } + } + _, err := m.exec(args...) os.Remove(m.kubeconfigpath) return err } diff --git a/vendor/sigs.k8s.io/cluster-api/cmd/clusterctl/clusterdeployer/clusterclient/BUILD.bazel b/vendor/sigs.k8s.io/cluster-api/cmd/clusterctl/clusterdeployer/clusterclient/BUILD.bazel index 69dad45976..6e357962bf 100644 --- a/vendor/sigs.k8s.io/cluster-api/cmd/clusterctl/clusterdeployer/clusterclient/BUILD.bazel +++ b/vendor/sigs.k8s.io/cluster-api/cmd/clusterctl/clusterdeployer/clusterclient/BUILD.bazel @@ -9,9 +9,8 @@ go_library( importpath = "sigs.k8s.io/cluster-api/cmd/clusterctl/clusterdeployer/clusterclient", visibility = ["//visibility:public"], deps = [ + "//api/v1alpha2:go_default_library", "//cmd/clusterctl/clientcmd:go_default_library", - "//pkg/apis/cluster/v1alpha1:go_default_library", - "//pkg/client/clientset_generated/clientset:go_default_library", "//pkg/util:go_default_library", "//vendor/github.com/pkg/errors:go_default_library", "//vendor/k8s.io/api/autoscaling/v1:go_default_library", @@ -22,6 +21,7 @@ go_library( "//vendor/k8s.io/client-go/plugin/pkg/client/auth:go_default_library", "//vendor/k8s.io/client-go/tools/clientcmd:go_default_library", "//vendor/k8s.io/klog:go_default_library", + "//vendor/sigs.k8s.io/controller-runtime/pkg/client:go_default_library", ], ) diff --git a/vendor/sigs.k8s.io/cluster-api/cmd/clusterctl/clusterdeployer/clusterclient/clusterclient.go b/vendor/sigs.k8s.io/cluster-api/cmd/clusterctl/clusterdeployer/clusterclient/clusterclient.go index 810b04f89b..6ff3d1ee9a 100644 --- a/vendor/sigs.k8s.io/cluster-api/cmd/clusterctl/clusterdeployer/clusterclient/clusterclient.go +++ b/vendor/sigs.k8s.io/cluster-api/cmd/clusterctl/clusterdeployer/clusterclient/clusterclient.go @@ -17,10 +17,10 @@ limitations under the License. package clusterclient import ( + "context" "fmt" "io" "io/ioutil" - "net" "os" "os/exec" "reflect" @@ -37,42 +37,41 @@ import ( _ "k8s.io/client-go/plugin/pkg/client/auth" // nolint tcmd "k8s.io/client-go/tools/clientcmd" "k8s.io/klog" + clusterv1 "sigs.k8s.io/cluster-api/api/v1alpha2" "sigs.k8s.io/cluster-api/cmd/clusterctl/clientcmd" - clusterv1 "sigs.k8s.io/cluster-api/pkg/apis/cluster/v1alpha1" - "sigs.k8s.io/cluster-api/pkg/client/clientset_generated/clientset" "sigs.k8s.io/cluster-api/pkg/util" + ctrlclient "sigs.k8s.io/controller-runtime/pkg/client" ) const ( - defaultAPIServerPort = "443" - retryIntervalKubectlApply = 10 * time.Second - retryIntervalResourceReady = 10 * time.Second - retryIntervalResourceDelete = 10 * time.Second - timeoutKubectlApply = 15 * time.Minute - timeoutResourceReady = 15 * time.Minute - timeoutMachineReady = 30 * time.Minute - timeoutResourceDelete = 15 * time.Minute - machineClusterLabelName = "cluster.k8s.io/cluster-name" + retryIntervalKubectlApply = 10 * time.Second + retryIntervalResourceReady = 10 * time.Second + timeoutKubectlApply = 15 * time.Minute + timeoutResourceReady = 15 * time.Minute + timeoutMachineReady = 30 * time.Minute + machineClusterLabelName = "cluster.k8s.io/cluster-name" ) const ( TimeoutMachineReady = "CLUSTER_API_MACHINE_READY_TIMEOUT" ) +var ( + ctx = context.Background() + deletePropagationPolicy = ctrlclient.PropagationPolicy(metav1.DeletePropagationForeground) +) + // Provides interaction with a cluster type Client interface { Apply(string) error Close() error CreateClusterObject(*clusterv1.Cluster) error - CreateMachineClass(*clusterv1.MachineClass) error CreateMachineDeployments([]*clusterv1.MachineDeployment, string) error CreateMachineSets([]*clusterv1.MachineSet, string) error CreateMachines([]*clusterv1.Machine, string) error Delete(string) error DeleteClusters(string) error DeleteNamespace(string) error - DeleteMachineClasses(string) error - DeleteMachineClass(namespace, name string) error DeleteMachineDeployments(string) error DeleteMachineSets(string) error DeleteMachines(string) error @@ -81,10 +80,10 @@ type Client interface { ForceDeleteMachineSet(namespace, name string) error ForceDeleteMachineDeployment(namespace, name string) error EnsureNamespace(string) error + GetKubeconfigFromSecret(namespace, clusterName string) (string, error) GetClusters(string) ([]*clusterv1.Cluster, error) GetCluster(string, string) (*clusterv1.Cluster, error) GetContextNamespace() string - GetMachineClasses(namespace string) ([]*clusterv1.MachineClass, error) GetMachineDeployment(namespace, name string) (*clusterv1.MachineDeployment, error) GetMachineDeploymentsForCluster(*clusterv1.Cluster) ([]*clusterv1.MachineDeployment, error) GetMachineDeployments(string) ([]*clusterv1.MachineDeployment, error) @@ -96,13 +95,12 @@ type Client interface { GetMachinesForCluster(*clusterv1.Cluster) ([]*clusterv1.Machine, error) GetMachinesForMachineSet(*clusterv1.MachineSet) ([]*clusterv1.Machine, error) ScaleStatefulSet(namespace, name string, scale int32) error - WaitForClusterV1alpha1Ready() error - UpdateClusterObjectEndpoint(string, string, string) error + WaitForClusterV1alpha2Ready() error WaitForResourceStatuses() error } type client struct { - clientSet clientset.Interface + clientSet ctrlclient.Client kubeconfigFile string configOverrides tcmd.ConfigOverrides closeFn func() error @@ -134,6 +132,25 @@ func (c *client) EnsureNamespace(namespaceName string) error { return errors.Wrap(err, "error creating core clientset") } + _, err = clientset.CoreV1().Namespaces().Get(namespaceName, metav1.GetOptions{}) + if err == nil { + return nil + } + if apierrors.IsForbidden(err) { + namespaces, err := clientset.CoreV1().Namespaces().List(metav1.ListOptions{}) + if err != nil { + return err + } + + for _, ns := range namespaces.Items { + if ns.Name == namespaceName { + return nil + } + } + } + if !apierrors.IsNotFound(err) { + return err + } namespace := apiv1.Namespace{ ObjectMeta: metav1.ObjectMeta{ Name: namespaceName, @@ -146,6 +163,20 @@ func (c *client) EnsureNamespace(namespaceName string) error { return nil } +func (c *client) GetKubeconfigFromSecret(namespace, clusterName string) (string, error) { + clientset, err := clientcmd.NewCoreClientSetForDefaultSearchPath(c.kubeconfigFile, clientcmd.NewConfigOverrides()) + if err != nil { + return "", errors.Wrap(err, "error creating core clientset") + } + + secretName := fmt.Sprintf("%s-kubeconfig", clusterName) + secret, err := clientset.CoreV1().Secrets(namespace).Get(secretName, metav1.GetOptions{}) + if err != nil { + return "", err + } + return string(secret.Data["value"]), nil +} + func (c *client) ScaleStatefulSet(ns string, name string, scale int32) error { clientset, err := clientcmd.NewCoreClientSetForDefaultSearchPath(c.kubeconfigFile, clientcmd.NewConfigOverrides()) if err != nil { @@ -186,7 +217,7 @@ func (c *client) DeleteNamespace(namespaceName string) error { // NewFromDefaultSearchPath creates and returns a Client. The kubeconfigFile argument is expected to be the path to a // valid kubeconfig file. func NewFromDefaultSearchPath(kubeconfigFile string, overrides tcmd.ConfigOverrides) (*client, error) { //nolint - c, err := clientcmd.NewClusterAPIClientForDefaultSearchPath(kubeconfigFile, overrides) + c, err := clientcmd.NewControllerRuntimeClient(kubeconfigFile, overrides) if err != nil { return nil, err } @@ -238,18 +269,17 @@ func (c *client) GetCluster(name, ns string) (*clusterv1.Cluster, error) { // ForceDeleteCluster removes the finalizer for a Cluster prior to deleting, this is used during pivot func (c *client) ForceDeleteCluster(namespace, name string) error { - cluster, err := c.clientSet.ClusterV1alpha1().Clusters(namespace).Get(name, metav1.GetOptions{}) - if err != nil { + cluster := &clusterv1.Cluster{} + if err := c.clientSet.Get(ctx, ctrlclient.ObjectKey{Name: name, Namespace: namespace}, cluster); err != nil { return errors.Wrapf(err, "error getting cluster %s/%s", namespace, name) } cluster.ObjectMeta.SetFinalizers([]string{}) - - if _, err := c.clientSet.ClusterV1alpha1().Clusters(namespace).Update(cluster); err != nil { + if err := c.clientSet.Update(ctx, cluster); err != nil { return errors.Wrapf(err, "error removing finalizer on cluster %s/%s", namespace, name) } - if err := c.clientSet.ClusterV1alpha1().Clusters(namespace).Delete(name, &metav1.DeleteOptions{}); err != nil { + if err := c.clientSet.Delete(ctx, cluster); err != nil { return errors.Wrapf(err, "error deleting cluster %s/%s", namespace, name) } @@ -257,45 +287,38 @@ func (c *client) ForceDeleteCluster(namespace, name string) error { } func (c *client) GetClusters(namespace string) ([]*clusterv1.Cluster, error) { - clusters := []*clusterv1.Cluster{} - clusterlist, err := c.clientSet.ClusterV1alpha1().Clusters(namespace).List(metav1.ListOptions{}) - if err != nil { + clusters := &clusterv1.ClusterList{} + if err := c.clientSet.List(ctx, clusters); err != nil { return nil, errors.Wrapf(err, "error listing cluster objects in namespace %q", namespace) } - - for i := 0; i < len(clusterlist.Items); i++ { - clusters = append(clusters, &clusterlist.Items[i]) - } - return clusters, nil -} - -func (c *client) GetMachineClasses(namespace string) ([]*clusterv1.MachineClass, error) { - machineClassesList, err := c.clientSet.ClusterV1alpha1().MachineClasses(namespace).List(metav1.ListOptions{}) - if err != nil { - return nil, errors.Wrapf(err, "error listing MachineClasses in namespace %q", namespace) - } - var machineClasses []*clusterv1.MachineClass - for i := 0; i < len(machineClassesList.Items); i++ { - machineClasses = append(machineClasses, &machineClassesList.Items[i]) + items := []*clusterv1.Cluster{} + for i := 0; i < len(clusters.Items); i++ { + items = append(items, &clusters.Items[i]) } - return machineClasses, nil + return items, nil } func (c *client) GetMachineDeployment(namespace, name string) (*clusterv1.MachineDeployment, error) { - machineDeployment, err := c.clientSet.ClusterV1alpha1().MachineDeployments(namespace).Get(name, metav1.GetOptions{}) - if err != nil { - return nil, errors.Wrapf(err, "error getting MachineDeployment: %s/%s", namespace, name) + obj := &clusterv1.MachineDeployment{} + if err := c.clientSet.Get(ctx, ctrlclient.ObjectKey{Name: name, Namespace: namespace}, obj); err != nil { + return nil, errors.Wrapf(err, "error getting MachineDeployment %s/%s", namespace, name) } - return machineDeployment, nil + return obj, nil } func (c *client) GetMachineDeploymentsForCluster(cluster *clusterv1.Cluster) ([]*clusterv1.MachineDeployment, error) { - machineDeploymentList, err := c.clientSet.ClusterV1alpha1().MachineDeployments(cluster.Namespace).List(metav1.ListOptions{ - LabelSelector: fmt.Sprintf("%s=%s", machineClusterLabelName, cluster.Name), - }) - if err != nil { + selectors := []ctrlclient.ListOption{ + ctrlclient.MatchingLabels{ + machineClusterLabelName: cluster.Name, + }, + ctrlclient.InNamespace(cluster.Namespace), + } + machineDeploymentList := &clusterv1.MachineDeploymentList{} + if err := c.clientSet.List(ctx, machineDeploymentList, selectors...); err != nil { return nil, errors.Wrapf(err, "error listing MachineDeployments for Cluster %s/%s", cluster.Namespace, cluster.Name) + } + var machineDeployments []*clusterv1.MachineDeployment for _, md := range machineDeploymentList.Items { for _, or := range md.GetOwnerReferences() { @@ -309,8 +332,8 @@ func (c *client) GetMachineDeploymentsForCluster(cluster *clusterv1.Cluster) ([] } func (c *client) GetMachineDeployments(namespace string) ([]*clusterv1.MachineDeployment, error) { - machineDeploymentList, err := c.clientSet.ClusterV1alpha1().MachineDeployments(namespace).List(metav1.ListOptions{}) - if err != nil { + machineDeploymentList := &clusterv1.MachineDeploymentList{} + if err := c.clientSet.List(ctx, machineDeploymentList, ctrlclient.InNamespace(namespace)); err != nil { return nil, errors.Wrapf(err, "error listing machine deployment objects in namespace %q", namespace) } var machineDeployments []*clusterv1.MachineDeployment @@ -321,16 +344,16 @@ func (c *client) GetMachineDeployments(namespace string) ([]*clusterv1.MachineDe } func (c *client) GetMachineSet(namespace, name string) (*clusterv1.MachineSet, error) { - machineSet, err := c.clientSet.ClusterV1alpha1().MachineSets(namespace).Get(name, metav1.GetOptions{}) - if err != nil { + obj := &clusterv1.MachineSet{} + if err := c.clientSet.Get(ctx, ctrlclient.ObjectKey{Name: name, Namespace: namespace}, obj); err != nil { return nil, errors.Wrapf(err, "error getting MachineSet: %s/%s", namespace, name) } - return machineSet, nil + return obj, nil } func (c *client) GetMachineSets(namespace string) ([]*clusterv1.MachineSet, error) { - machineSetList, err := c.clientSet.ClusterV1alpha1().MachineSets(namespace).List(metav1.ListOptions{}) - if err != nil { + machineSetList := &clusterv1.MachineSetList{} + if err := c.clientSet.List(ctx, machineSetList, ctrlclient.InNamespace(namespace)); err != nil { return nil, errors.Wrapf(err, "error listing MachineSets in namespace %q", namespace) } var machineSets []*clusterv1.MachineSet @@ -341,10 +364,14 @@ func (c *client) GetMachineSets(namespace string) ([]*clusterv1.MachineSet, erro } func (c *client) GetMachineSetsForCluster(cluster *clusterv1.Cluster) ([]*clusterv1.MachineSet, error) { - machineSetList, err := c.clientSet.ClusterV1alpha1().MachineSets(cluster.Namespace).List(metav1.ListOptions{ - LabelSelector: fmt.Sprintf("%s=%s", machineClusterLabelName, cluster.Name), - }) - if err != nil { + selectors := []ctrlclient.ListOption{ + ctrlclient.MatchingLabels{ + machineClusterLabelName: cluster.Name, + }, + ctrlclient.InNamespace(cluster.Namespace), + } + machineSetList := &clusterv1.MachineSetList{} + if err := c.clientSet.List(ctx, machineSetList, selectors...); err != nil { return nil, errors.Wrapf(err, "error listing MachineSets for Cluster %s/%s", cluster.Namespace, cluster.Name) } var machineSets []*clusterv1.MachineSet @@ -373,28 +400,29 @@ func (c *client) GetMachineSetsForMachineDeployment(md *clusterv1.MachineDeploym return controlledMachineSets, nil } -func (c *client) GetMachines(namespace string) ([]*clusterv1.Machine, error) { - machines := []*clusterv1.Machine{} - machineslist, err := c.clientSet.ClusterV1alpha1().Machines(namespace).List(metav1.ListOptions{}) - if err != nil { +func (c *client) GetMachines(namespace string) (machines []*clusterv1.Machine, _ error) { + machineslist := &clusterv1.MachineList{} + if err := c.clientSet.List(ctx, machineslist, ctrlclient.InNamespace(namespace)); err != nil { return nil, errors.Wrapf(err, "error listing Machines in namespace %q", namespace) } - for i := 0; i < len(machineslist.Items); i++ { machines = append(machines, &machineslist.Items[i]) } - return machines, nil + return } func (c *client) GetMachinesForCluster(cluster *clusterv1.Cluster) ([]*clusterv1.Machine, error) { - machineslist, err := c.clientSet.ClusterV1alpha1().Machines(cluster.Namespace).List(metav1.ListOptions{ - LabelSelector: fmt.Sprintf("%s=%s", machineClusterLabelName, cluster.Name), - }) - if err != nil { + selectors := []ctrlclient.ListOption{ + ctrlclient.MatchingLabels{ + machineClusterLabelName: cluster.Name, + }, + ctrlclient.InNamespace(cluster.Namespace), + } + machineslist := &clusterv1.MachineList{} + if err := c.clientSet.List(ctx, machineslist, selectors...); err != nil { return nil, errors.Wrapf(err, "error listing Machines for Cluster %s/%s", cluster.Namespace, cluster.Name) } var machines []*clusterv1.Machine - for i := 0; i < len(machineslist.Items); i++ { m := &machineslist.Items[i] @@ -423,29 +451,12 @@ func (c *client) GetMachinesForMachineSet(ms *clusterv1.MachineSet) ([]*clusterv return controlledMachines, nil } -func (c *client) CreateMachineClass(machineClass *clusterv1.MachineClass) error { - _, err := c.clientSet.ClusterV1alpha1().MachineClasses(machineClass.Namespace).Create(machineClass) - if err != nil { - return errors.Wrapf(err, "error creating MachineClass %s/%s", machineClass.Namespace, machineClass.Name) - } - return nil -} - -func (c *client) DeleteMachineClass(namespace, name string) error { - if err := c.clientSet.ClusterV1alpha1().MachineClasses(namespace).Delete(name, newDeleteOptions()); err != nil { - return errors.Wrapf(err, "error deleting MachineClass %s/%s", namespace, name) - } - return nil -} - func (c *client) CreateClusterObject(cluster *clusterv1.Cluster) error { namespace := c.GetContextNamespace() if cluster.Namespace != "" { namespace = cluster.Namespace } - - _, err := c.clientSet.ClusterV1alpha1().Clusters(namespace).Create(cluster) - if err != nil { + if err := c.clientSet.Create(ctx, cluster); err != nil { return errors.Wrapf(err, "error creating cluster in namespace %v", namespace) } return nil @@ -454,8 +465,7 @@ func (c *client) CreateClusterObject(cluster *clusterv1.Cluster) error { func (c *client) CreateMachineDeployments(deployments []*clusterv1.MachineDeployment, namespace string) error { for _, deploy := range deployments { // TODO: Run in parallel https://github.com/kubernetes-sigs/cluster-api/issues/258 - _, err := c.clientSet.ClusterV1alpha1().MachineDeployments(namespace).Create(deploy) - if err != nil { + if err := c.clientSet.Create(ctx, deploy); err != nil { return errors.Wrapf(err, "error creating a machine deployment object in namespace %q", namespace) } } @@ -465,8 +475,7 @@ func (c *client) CreateMachineDeployments(deployments []*clusterv1.MachineDeploy func (c *client) CreateMachineSets(machineSets []*clusterv1.MachineSet, namespace string) error { for _, ms := range machineSets { // TODO: Run in parallel https://github.com/kubernetes-sigs/cluster-api/issues/258 - _, err := c.clientSet.ClusterV1alpha1().MachineSets(namespace).Create(ms) - if err != nil { + if err := c.clientSet.Create(ctx, ms); err != nil { return errors.Wrapf(err, "error creating a machine set object in namespace %q", namespace) } } @@ -486,15 +495,14 @@ func (c *client) CreateMachines(machines []*clusterv1.Machine, namespace string) go func(machine *clusterv1.Machine) { defer wg.Done() - createdMachine, err := c.clientSet.ClusterV1alpha1().Machines(namespace).Create(machine) - if err != nil { + if err := c.clientSet.Create(ctx, machine); err != nil { errOnce.Do(func() { gerr = errors.Wrapf(err, "error creating a machine object in namespace %v", namespace) }) return } - if err := waitForMachineReady(c.clientSet, createdMachine); err != nil { + if err := waitForMachineReady(c.clientSet, machine); err != nil { errOnce.Do(func() { gerr = err }) } }(machine) @@ -505,244 +513,82 @@ func (c *client) CreateMachines(machines []*clusterv1.Machine, namespace string) // DeleteClusters deletes all Clusters in a namespace. If the namespace is empty then all Clusters in all namespaces are deleted. func (c *client) DeleteClusters(namespace string) error { - seen := make(map[string]bool) - - if namespace != "" { - seen[namespace] = true - } else { - clusters, err := c.clientSet.ClusterV1alpha1().Clusters("").List(metav1.ListOptions{}) - if err != nil { - return errors.Wrap(err, "error listing Clusters in all namespaces") - } - for _, cluster := range clusters.Items { - if _, ok := seen[cluster.Namespace]; !ok { - seen[cluster.Namespace] = true - } - } - } - for ns := range seen { - err := c.clientSet.ClusterV1alpha1().Clusters(ns).DeleteCollection(newDeleteOptions(), metav1.ListOptions{}) - if err != nil { - return errors.Wrapf(err, "error deleting Clusters in namespace %q", ns) - } - err = c.waitForClusterDelete(ns) - if err != nil { - return errors.Wrapf(err, "error waiting for Cluster(s) deletion to complete in namespace %q", ns) - } + if err := c.clientSet.DeleteAllOf(ctx, &clusterv1.Cluster{}, ctrlclient.InNamespace(namespace)); err != nil { + return errors.Wrapf(err, "error deleting Clusters in namespace %q", namespace) } - - return nil -} - -// DeleteMachineClasses deletes all MachineClasses in a namespace. If the namespace is empty then all MachineClasses in all namespaces are deleted. -func (c *client) DeleteMachineClasses(namespace string) error { - seen := make(map[string]bool) - - if namespace != "" { - seen[namespace] = true - } else { - machineClasses, err := c.clientSet.ClusterV1alpha1().MachineClasses("").List(metav1.ListOptions{}) - if err != nil { - return errors.Wrap(err, "error listing MachineClasses in all namespaces") - } - for _, mc := range machineClasses.Items { - if _, ok := seen[mc.Namespace]; !ok { - seen[mc.Namespace] = true - } - } - } - - for ns := range seen { - if err := c.DeleteMachineClasses(ns); err != nil { - return err - } - err := c.clientSet.ClusterV1alpha1().MachineClasses(ns).DeleteCollection(newDeleteOptions(), metav1.ListOptions{}) - if err != nil { - return errors.Wrapf(err, "error deleting MachineClasses in namespace %q", ns) - } - err = c.waitForMachineClassesDelete(ns) - if err != nil { - return errors.Wrapf(err, "error waiting for MachineClass(es) deletion to complete in ns %q", ns) - } - } - return nil } // DeleteMachineDeployments deletes all MachineDeployments in a namespace. If the namespace is empty then all MachineDeployments in all namespaces are deleted. func (c *client) DeleteMachineDeployments(namespace string) error { - seen := make(map[string]bool) - - if namespace != "" { - seen[namespace] = true - } else { - machineDeployments, err := c.clientSet.ClusterV1alpha1().MachineDeployments("").List(metav1.ListOptions{}) - if err != nil { - return errors.Wrap(err, "error listing MachineDeployments in all namespaces") - } - for _, md := range machineDeployments.Items { - if _, ok := seen[md.Namespace]; !ok { - seen[md.Namespace] = true - } - } + if err := c.clientSet.DeleteAllOf(ctx, &clusterv1.MachineDeployment{}, ctrlclient.InNamespace(namespace)); err != nil { + return errors.Wrapf(err, "error deleting Clusters in namespace %q", namespace) } - for ns := range seen { - err := c.clientSet.ClusterV1alpha1().MachineDeployments(ns).DeleteCollection(newDeleteOptions(), metav1.ListOptions{}) - if err != nil { - return errors.Wrapf(err, "error deleting MachineDeployments in namespace %q", ns) - } - err = c.waitForMachineDeploymentsDelete(ns) - if err != nil { - return errors.Wrapf(err, "error waiting for MachineDeployment(s) deletion to complete in namespace %q", ns) - } - } - return nil } // DeleteMachineSets deletes all MachineSets in a namespace. If the namespace is empty then all MachineSets in all namespaces are deleted. func (c *client) DeleteMachineSets(namespace string) error { - seen := make(map[string]bool) - - if namespace != "" { - seen[namespace] = true - } else { - machineSets, err := c.clientSet.ClusterV1alpha1().MachineSets("").List(metav1.ListOptions{}) - if err != nil { - return errors.Wrap(err, "error listing MachineSets in all namespaces") - } - for _, ms := range machineSets.Items { - if _, ok := seen[ms.Namespace]; !ok { - seen[ms.Namespace] = true - } - } - } - for ns := range seen { - err := c.clientSet.ClusterV1alpha1().MachineSets(ns).DeleteCollection(newDeleteOptions(), metav1.ListOptions{}) - if err != nil { - return errors.Wrapf(err, "error deleting MachineSets in namespace %q", ns) - } - err = c.waitForMachineSetsDelete(ns) - if err != nil { - return errors.Wrapf(err, "error waiting for MachineSet(s) deletion to complete in namespace %q", ns) - } + if err := c.clientSet.DeleteAllOf(ctx, &clusterv1.MachineSet{}, ctrlclient.InNamespace(namespace)); err != nil { + return errors.Wrapf(err, "error deleting Clusters in namespace %q", namespace) } - return nil } // DeleteMachines deletes all Machines in a namespace. If the namespace is empty then all Machines in all namespaces are deleted. func (c *client) DeleteMachines(namespace string) error { - seen := make(map[string]bool) - - if namespace != "" { - seen[namespace] = true - } else { - machines, err := c.clientSet.ClusterV1alpha1().Machines("").List(metav1.ListOptions{}) - if err != nil { - return errors.Wrap(err, "error listing Machines in all namespaces") - } - for _, m := range machines.Items { - if _, ok := seen[m.Namespace]; !ok { - seen[m.Namespace] = true - } - } + if err := c.clientSet.DeleteAllOf(ctx, &clusterv1.Machine{}, ctrlclient.InNamespace(namespace)); err != nil { + return errors.Wrapf(err, "error deleting Clusters in namespace %q", namespace) } - for ns := range seen { - err := c.clientSet.ClusterV1alpha1().Machines(ns).DeleteCollection(newDeleteOptions(), metav1.ListOptions{}) - if err != nil { - return errors.Wrapf(err, "error deleting Machines in namespace %q", ns) - } - err = c.waitForMachinesDelete(ns) - if err != nil { - return errors.Wrapf(err, "error waiting for Machine(s) deletion to complete in namespace %q", ns) - } - } - return nil } func (c *client) ForceDeleteMachine(namespace, name string) error { - machine, err := c.clientSet.ClusterV1alpha1().Machines(namespace).Get(name, metav1.GetOptions{}) - if err != nil { + machine := &clusterv1.Machine{} + if err := c.clientSet.Get(ctx, ctrlclient.ObjectKey{Name: name, Namespace: namespace}, machine); err != nil { return errors.Wrapf(err, "error getting Machine %s/%s", namespace, name) } machine.SetFinalizers([]string{}) - if _, err := c.clientSet.ClusterV1alpha1().Machines(namespace).Update(machine); err != nil { + if err := c.clientSet.Update(ctx, machine); err != nil { return errors.Wrapf(err, "error removing finalizer for Machine %s/%s", namespace, name) } - if err := c.clientSet.ClusterV1alpha1().Machines(namespace).Delete(name, newDeleteOptions()); err != nil { + if err := c.clientSet.Delete(ctx, machine, deletePropagationPolicy); err != nil { return errors.Wrapf(err, "error deleting Machine %s/%s", namespace, name) } return nil } func (c *client) ForceDeleteMachineSet(namespace, name string) error { - ms, err := c.clientSet.ClusterV1alpha1().MachineSets(namespace).Get(name, metav1.GetOptions{}) - if err != nil { + ms := &clusterv1.MachineSet{} + if err := c.clientSet.Get(ctx, ctrlclient.ObjectKey{Name: name, Namespace: namespace}, ms); err != nil { return errors.Wrapf(err, "error getting MachineSet %s/%s", namespace, name) } ms.SetFinalizers([]string{}) - if _, err := c.clientSet.ClusterV1alpha1().MachineSets(namespace).Update(ms); err != nil { + if err := c.clientSet.Update(ctx, ms); err != nil { return errors.Wrapf(err, "error removing finalizer for MachineSet %s/%s", namespace, name) } - if err := c.clientSet.ClusterV1alpha1().MachineSets(namespace).Delete(name, newDeleteOptions()); err != nil { + if err := c.clientSet.Delete(ctx, ms, deletePropagationPolicy); err != nil { return errors.Wrapf(err, "error deleting MachineSet %s/%s", namespace, name) } return nil } func (c *client) ForceDeleteMachineDeployment(namespace, name string) error { - md, err := c.clientSet.ClusterV1alpha1().MachineDeployments(namespace).Get(name, metav1.GetOptions{}) - if err != nil { + md := &clusterv1.MachineDeployment{} + if err := c.clientSet.Get(ctx, ctrlclient.ObjectKey{Name: name, Namespace: namespace}, md); err != nil { return errors.Wrapf(err, "error getting MachineDeployment %s/%s", namespace, name) } md.SetFinalizers([]string{}) - if _, err := c.clientSet.ClusterV1alpha1().MachineDeployments(namespace).Update(md); err != nil { + if err := c.clientSet.Update(ctx, md); err != nil { return errors.Wrapf(err, "error removing finalizer for MachineDeployment %s/%s", namespace, name) } - if err := c.clientSet.ClusterV1alpha1().MachineDeployments(namespace).Delete(name, newDeleteOptions()); err != nil { + if err := c.clientSet.Delete(ctx, md, deletePropagationPolicy); err != nil { return errors.Wrapf(err, "error deleting MachineDeployment %s/%s", namespace, name) } return nil } -func newDeleteOptions() *metav1.DeleteOptions { - propagationPolicy := metav1.DeletePropagationForeground - return &metav1.DeleteOptions{ - PropagationPolicy: &propagationPolicy, - } -} - -// UpdateClusterObjectEndpoint updates the status of a cluster API endpoint, clusterEndpoint -// can be passed as hostname or hostname:port, if port is not present the default port 443 is applied. -// TODO: Test this function -func (c *client) UpdateClusterObjectEndpoint(clusterEndpoint, clusterName, namespace string) error { - cluster, err := c.GetCluster(clusterName, namespace) - if err != nil { - return err - } - endpointHost, endpointPort, err := net.SplitHostPort(clusterEndpoint) - if err != nil { - // We rely on provider.GetControlPlaneEndpoint to provide a correct hostname/IP, no - // further validation is done. - endpointHost = clusterEndpoint - endpointPort = defaultAPIServerPort - } - endpointPortInt, err := strconv.Atoi(endpointPort) - if err != nil { - return errors.Wrapf(err, "error while converting cluster endpoint port %q", endpointPort) - } - cluster.Status.APIEndpoints = append(cluster.Status.APIEndpoints, - clusterv1.APIEndpoint{ - Host: endpointHost, - Port: endpointPortInt, - }) - _, err = c.clientSet.ClusterV1alpha1().Clusters(namespace).UpdateStatus(cluster) - return err -} - -func (c *client) WaitForClusterV1alpha1Ready() error { +func (c *client) WaitForClusterV1alpha2Ready() error { return waitForClusterResourceReady(c.clientSet) } @@ -752,8 +598,9 @@ func (c *client) WaitForResourceStatuses() error { timeout := time.Until(deadline) return util.PollImmediate(retryIntervalResourceReady, timeout, func() (bool, error) { klog.V(2).Info("Waiting for Cluster API resources to have statuses...") - clusters, err := c.clientSet.ClusterV1alpha1().Clusters("").List(metav1.ListOptions{}) - if err != nil { + + clusters := &clusterv1.ClusterList{} + if err := c.clientSet.List(ctx, clusters); err != nil { klog.V(10).Infof("retrying: failed to list clusters: %v", err) return false, nil } @@ -762,13 +609,14 @@ func (c *client) WaitForResourceStatuses() error { klog.V(10).Info("retrying: cluster status is empty") return false, nil } - if cluster.Status.ProviderStatus == nil { - klog.V(10).Info("retrying: cluster.Status.ProviderStatus is not set") + if !cluster.Status.InfrastructureReady { + klog.V(10).Info("retrying: cluster.Status.InfrastructureReady is false") return false, nil } } - machineDeployments, err := c.clientSet.ClusterV1alpha1().MachineDeployments("").List(metav1.ListOptions{}) - if err != nil { + + machineDeployments := &clusterv1.MachineDeploymentList{} + if err := c.clientSet.List(ctx, machineDeployments); err != nil { klog.V(10).Infof("retrying: failed to list machine deployment: %v", err) return false, nil } @@ -778,8 +626,9 @@ func (c *client) WaitForResourceStatuses() error { return false, nil } } - machineSets, err := c.clientSet.ClusterV1alpha1().MachineSets("").List(metav1.ListOptions{}) - if err != nil { + + machineSets := &clusterv1.MachineSetList{} + if err := c.clientSet.List(ctx, machineSets); err != nil { klog.V(10).Infof("retrying: failed to list machinesets: %v", err) return false, nil } @@ -789,8 +638,9 @@ func (c *client) WaitForResourceStatuses() error { return false, nil } } - machines, err := c.clientSet.ClusterV1alpha1().Machines("").List(metav1.ListOptions{}) - if err != nil { + + machines := &clusterv1.MachineList{} + if err := c.clientSet.List(ctx, machines); err != nil { klog.V(10).Infof("retrying: failed to list machines: %v", err) return false, nil } @@ -799,100 +649,16 @@ func (c *client) WaitForResourceStatuses() error { klog.V(10).Info("retrying: machine status is empty") return false, nil } - if m.Status.ProviderStatus == nil { - klog.V(10).Info("retrying: machine.Status.ProviderStatus is not set") - return false, nil - } + // if m.Status.ProviderStatus == nil { + // klog.V(10).Info("retrying: machine.Status.ProviderStatus is not set") + // return false, nil + // } } return true, nil }) } -func (c *client) waitForClusterDelete(namespace string) error { - return util.PollImmediate(retryIntervalResourceDelete, timeoutResourceDelete, func() (bool, error) { - klog.V(2).Infof("Waiting for Clusters to be deleted...") - response, err := c.clientSet.ClusterV1alpha1().Clusters(namespace).List(metav1.ListOptions{}) - if err != nil { - return false, nil - } - if len(response.Items) > 0 { - return false, nil - } - return true, nil - }) -} - -func (c *client) waitForMachineClassesDelete(namespace string) error { - return util.PollImmediate(retryIntervalResourceDelete, timeoutResourceDelete, func() (bool, error) { - klog.V(2).Infof("Waiting for MachineClasses to be deleted...") - response, err := c.clientSet.ClusterV1alpha1().MachineClasses(namespace).List(metav1.ListOptions{}) - if err != nil { - return false, nil - } - if len(response.Items) > 0 { - return false, nil - } - return true, nil - }) -} - -func (c *client) waitForMachineDeploymentsDelete(namespace string) error { - return util.PollImmediate(retryIntervalResourceDelete, timeoutResourceDelete, func() (bool, error) { - klog.V(2).Infof("Waiting for MachineDeployments to be deleted...") - response, err := c.clientSet.ClusterV1alpha1().MachineDeployments(namespace).List(metav1.ListOptions{}) - if err != nil { - return false, nil - } - if len(response.Items) > 0 { - return false, nil - } - return true, nil - }) -} - -func (c *client) waitForMachineSetsDelete(namespace string) error { - return util.PollImmediate(retryIntervalResourceDelete, timeoutResourceDelete, func() (bool, error) { - klog.V(2).Infof("Waiting for MachineSets to be deleted...") - response, err := c.clientSet.ClusterV1alpha1().MachineSets(namespace).List(metav1.ListOptions{}) - if err != nil { - return false, nil - } - if len(response.Items) > 0 { - return false, nil - } - return true, nil - }) -} - -func (c *client) waitForMachinesDelete(namespace string) error { - return util.PollImmediate(retryIntervalResourceDelete, timeoutResourceDelete, func() (bool, error) { - klog.V(2).Infof("Waiting for Machines to be deleted...") - response, err := c.clientSet.ClusterV1alpha1().Machines(namespace).List(metav1.ListOptions{}) - if err != nil { - return false, nil - } - if len(response.Items) > 0 { - return false, nil - } - return true, nil - }) -} - -func (c *client) waitForMachineDelete(namespace, name string) error { - return util.PollImmediate(retryIntervalResourceDelete, timeoutResourceDelete, func() (bool, error) { - klog.V(2).Infof("Waiting for Machine %s/%s to be deleted...", namespace, name) - response, err := c.clientSet.ClusterV1alpha1().Machines(namespace).Get(name, metav1.GetOptions{}) - if err != nil { - return false, nil - } - if response != nil { - return false, nil - } - return true, nil - }) -} - func (c *client) kubectlDelete(manifest string) error { return c.kubectlManifestCmd("delete", manifest) } @@ -956,32 +722,20 @@ func (c *client) waitForKubectlApply(manifest string) error { return err } -func waitForClusterResourceReady(cs clientset.Interface) error { +func waitForClusterResourceReady(cs ctrlclient.Client) error { deadline := time.Now().Add(timeoutResourceReady) - err := util.PollImmediate(retryIntervalResourceReady, timeoutResourceReady, func() (bool, error) { - klog.V(2).Info("Waiting for Cluster v1alpha resources to become available...") - _, err := cs.Discovery().ServerResourcesForGroupVersion("cluster.k8s.io/v1alpha1") - if err == nil { - return true, nil - } - return false, nil - }) - - if err != nil { - return err - } timeout := time.Until(deadline) return util.PollImmediate(retryIntervalResourceReady, timeout, func() (bool, error) { - klog.V(2).Info("Waiting for Cluster v1alpha resources to be listable...") - _, err := cs.ClusterV1alpha1().Clusters(apiv1.NamespaceDefault).List(metav1.ListOptions{}) - if err == nil { + klog.V(2).Info("Waiting for Cluster resources to be listable...") + clusterList := &clusterv1.ClusterList{} + if err := cs.List(ctx, clusterList); err == nil { return true, nil } return false, nil }) } -func waitForMachineReady(cs clientset.Interface, machine *clusterv1.Machine) error { +func waitForMachineReady(cs ctrlclient.Client, machine *clusterv1.Machine) error { timeout := timeoutMachineReady if p := os.Getenv(TimeoutMachineReady); p != "" { t, err := strconv.Atoi(p) @@ -994,15 +748,14 @@ func waitForMachineReady(cs clientset.Interface, machine *clusterv1.Machine) err err := util.PollImmediate(retryIntervalResourceReady, timeout, func() (bool, error) { klog.V(2).Infof("Waiting for Machine %v to become ready...", machine.Name) - m, err := cs.ClusterV1alpha1().Machines(machine.Namespace).Get(machine.Name, metav1.GetOptions{}) - if err != nil { + if err := cs.Get(ctx, ctrlclient.ObjectKey{Name: machine.Name, Namespace: machine.Namespace}, machine); err != nil { return false, nil } // TODO: update once machine controllers have a way to indicate a machine has been provisoned. https://github.com/kubernetes-sigs/cluster-api/issues/253 // Seeing a node cannot be purely relied upon because the provisioned control plane will not be registering with // the stack that provisions it. - ready := m.Status.NodeRef != nil || len(m.Annotations) > 0 + ready := machine.Status.NodeRef != nil || len(machine.Annotations) > 0 return ready, nil }) diff --git a/vendor/sigs.k8s.io/cluster-api/cmd/clusterctl/clusterdeployer/clusterdeployer.go b/vendor/sigs.k8s.io/cluster-api/cmd/clusterctl/clusterdeployer/clusterdeployer.go index 499c33c711..6eb1b66559 100644 --- a/vendor/sigs.k8s.io/cluster-api/cmd/clusterctl/clusterdeployer/clusterdeployer.go +++ b/vendor/sigs.k8s.io/cluster-api/cmd/clusterctl/clusterdeployer/clusterdeployer.go @@ -22,11 +22,11 @@ import ( "github.com/pkg/errors" "k8s.io/klog" + clusterv1 "sigs.k8s.io/cluster-api/api/v1alpha2" "sigs.k8s.io/cluster-api/cmd/clusterctl/clusterdeployer/bootstrap" "sigs.k8s.io/cluster-api/cmd/clusterctl/clusterdeployer/clusterclient" "sigs.k8s.io/cluster-api/cmd/clusterctl/clusterdeployer/provider" "sigs.k8s.io/cluster-api/cmd/clusterctl/phases" - clusterv1 "sigs.k8s.io/cluster-api/pkg/apis/cluster/v1alpha1" ) type ClusterDeployer struct { @@ -56,7 +56,7 @@ func New( } // Create the cluster from the provided cluster definition and machine list. -func (d *ClusterDeployer) Create(cluster *clusterv1.Cluster, machines []*clusterv1.Machine, provider provider.Deployer, kubeconfigOutput string, providerComponentsStoreFactory provider.ComponentsStoreFactory) error { +func (d *ClusterDeployer) Create(cluster *clusterv1.Cluster, machines []*clusterv1.Machine, kubeconfigOutput string, providerComponentsStoreFactory provider.ComponentsStoreFactory) error { controlPlaneMachines, nodes, err := clusterclient.ExtractControlPlaneMachines(machines) if err != nil { return errors.Wrap(err, "unable to separate control plane machines from node machines") @@ -94,13 +94,8 @@ func (d *ClusterDeployer) Create(cluster *clusterv1.Cluster, machines []*cluster return errors.Wrap(err, "unable to create control plane machine") } - klog.Infof("Updating bootstrap cluster object for cluster %v in namespace %q with control plane endpoint running on machine", cluster.Name, cluster.Namespace) - if err := d.updateClusterEndpoint(bootstrapClient, provider, cluster.Name, cluster.Namespace); err != nil { - return errors.Wrap(err, "unable to update bootstrap cluster endpoint") - } - klog.Info("Creating target cluster") - targetKubeconfig, err := phases.GetKubeconfig(bootstrapClient, provider, kubeconfigOutput, cluster.Name, cluster.Namespace) + targetKubeconfig, err := phases.GetKubeconfig(bootstrapClient, kubeconfigOutput, cluster.Name, cluster.Namespace) if err != nil { return fmt.Errorf("unable to create target cluster kubeconfig: %v", err) } @@ -128,13 +123,6 @@ func (d *ClusterDeployer) Create(cluster *clusterv1.Cluster, machines []*cluster return errors.Wrap(err, "unable to save provider components to target cluster") } - // For some reason, endpoint doesn't get updated in bootstrap cluster sometimes. So we - // update the target cluster endpoint as well to be sure. - klog.Info("Updating target cluster object with control plane endpoint running on machine") - if err := d.updateClusterEndpoint(targetClient, provider, cluster.Name, cluster.Namespace); err != nil { - return errors.Wrap(err, "unable to update target cluster endpoint") - } - if len(controlPlaneMachines) > 1 { // TODO(h0tbird) Done serially until kubernetes/kubeadm#1097 is resolved and all // supported versions of k8s we are deploying (using kubeadm) have the fix. @@ -185,25 +173,6 @@ func (d *ClusterDeployer) Delete(targetClient clusterclient.Client) error { return nil } -func (d *ClusterDeployer) updateClusterEndpoint(client clusterclient.Client, provider provider.Deployer, clusterName, namespace string) error { - // Update cluster endpoint. Needed till this logic moves into cluster controller. - // TODO: https://github.com/kubernetes-sigs/cluster-api/issues/158 - // Fetch fresh objects. - cluster, controlPlane, _, err := clusterclient.GetClusterAPIObject(client, clusterName, namespace) - if err != nil { - return err - } - clusterEndpoint, err := provider.GetIP(cluster, controlPlane) - if err != nil { - return errors.Wrap(err, "unable to get cluster endpoint") - } - err = client.UpdateClusterObjectEndpoint(clusterEndpoint, clusterName, namespace) - if err != nil { - return errors.Wrap(err, "unable to update cluster endpoint") - } - return nil -} - func (d *ClusterDeployer) saveProviderComponentsToCluster(factory provider.ComponentsStoreFactory, kubeconfigPath string) error { clientset, err := d.clientFactory.NewCoreClientsetFromKubeconfigFile(kubeconfigPath) if err != nil { @@ -237,11 +206,6 @@ func deleteClusterAPIObjectsInAllNamespaces(client clusterclient.Client) error { err = errors.Wrap(err, "error deleting Machines") errorList = append(errorList, err.Error()) } - klog.Infof("Deleting MachineClasses in all namespaces") - if err := client.DeleteMachineClasses(""); err != nil { - err = errors.Wrap(err, "error deleting MachineClasses") - errorList = append(errorList, err.Error()) - } klog.Infof("Deleting Clusters in all namespaces") if err := client.DeleteClusters(""); err != nil { err = errors.Wrap(err, "error deleting Clusters") @@ -254,7 +218,9 @@ func deleteClusterAPIObjectsInAllNamespaces(client clusterclient.Client) error { } func closeClient(client clusterclient.Client, name string) { - if err := client.Close(); err != nil { - klog.Errorf("Could not close %v client: %v", name, err) + if client != nil { + if err := client.Close(); err != nil { + klog.Errorf("Could not close %v client: %v", name, err) + } } } diff --git a/vendor/sigs.k8s.io/cluster-api/cmd/clusterctl/clusterdeployer/clusterdeployer_test.go b/vendor/sigs.k8s.io/cluster-api/cmd/clusterctl/clusterdeployer/clusterdeployer_test.go index 1a9e39e1a2..1a83761011 100644 --- a/vendor/sigs.k8s.io/cluster-api/cmd/clusterctl/clusterdeployer/clusterdeployer_test.go +++ b/vendor/sigs.k8s.io/cluster-api/cmd/clusterctl/clusterdeployer/clusterdeployer_test.go @@ -26,9 +26,9 @@ import ( apiv1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/client-go/kubernetes" + clusterv1 "sigs.k8s.io/cluster-api/api/v1alpha2" "sigs.k8s.io/cluster-api/cmd/clusterctl/clusterdeployer/clusterclient" "sigs.k8s.io/cluster-api/cmd/clusterctl/clusterdeployer/provider" - clusterv1 "sigs.k8s.io/cluster-api/pkg/apis/cluster/v1alpha1" ) type testClusterProvisioner struct { @@ -91,7 +91,7 @@ type stringCheckFunc func(string) error type testClusterClient struct { ApplyErr error DeleteErr error - WaitForClusterV1alpha1ReadyErr error + WaitForClusterV1alpha2ReadyErr error GetClustersErr error GetClusterErr error GetMachineClassesErr error @@ -109,7 +109,6 @@ type testClusterClient struct { DeleteMachineDeploymentsErr error DeleteMachinesErr error DeleteMachineSetsErr error - UpdateClusterObjectEndpointErr error EnsureNamespaceErr error DeleteNamespaceErr error CloseErr error @@ -117,10 +116,10 @@ type testClusterClient struct { ApplyFunc stringCheckFunc clusters map[string][]*clusterv1.Cluster - machineClasses map[string][]*clusterv1.MachineClass machineDeployments map[string][]*clusterv1.MachineDeployment machineSets map[string][]*clusterv1.MachineSet machines map[string][]*clusterv1.Machine + secrets []*apiv1.Secret namespaces []string contextNamespace string } @@ -143,8 +142,8 @@ func (c *testClusterClient) GetContextNamespace() string { return c.contextNamespace } -func (c *testClusterClient) WaitForClusterV1alpha1Ready() error { - return c.WaitForClusterV1alpha1ReadyErr +func (c *testClusterClient) WaitForClusterV1alpha2Ready() error { + return c.WaitForClusterV1alpha2ReadyErr } func (c *testClusterClient) GetCluster(clusterName, namespace string) (*clusterv1.Cluster, error) { @@ -273,18 +272,6 @@ func (c *testClusterClient) DeleteClusters(ns string) error { return nil } -func (c *testClusterClient) DeleteMachineClasses(ns string) error { - if c.DeleteMachineClassesErr != nil { - return c.DeleteMachineClassesErr - } - if ns == "" { - c.machineClasses = make(map[string][]*clusterv1.MachineClass) - } else { - delete(c.machineClasses, ns) - } - return nil -} - func (c *testClusterClient) DeleteMachineDeployments(ns string) error { if c.DeleteMachineDeploymentsErr != nil { return c.DeleteMachineDeploymentsErr @@ -321,13 +308,20 @@ func (c *testClusterClient) DeleteMachines(ns string) error { return nil } -func (c *testClusterClient) UpdateClusterObjectEndpoint(string, string, string) error { - return c.UpdateClusterObjectEndpointErr -} func (c *testClusterClient) Close() error { return c.CloseErr } +func (c *testClusterClient) GetKubeconfigFromSecret(namespace, clusterName string) (string, error) { + for _, secret := range c.secrets { + if secret.Namespace == namespace && secret.Name == fmt.Sprintf("%s-kubeconfig", clusterName) { + return string(secret.Data["value"]), nil + } + } + + return "", c.EnsureNamespaceErr +} + func (c *testClusterClient) EnsureNamespace(nsName string) error { if exists := contains(c.namespaces, nsName); !exists { c.namespaces = append(c.namespaces, nsName) @@ -490,21 +484,6 @@ func (c *testClusterClient) WaitForResourceStatuses() error { return nil } -// TODO: implement GetMachineClasses for testClusterClient and add tests -func (c *testClusterClient) GetMachineClasses(namespace string) ([]*clusterv1.MachineClass, error) { - return c.machineClasses[namespace], c.GetMachineClassesErr -} - -// TODO: implement CreateMachineClass for testClusterClient and add tests -func (c *testClusterClient) CreateMachineClass(*clusterv1.MachineClass) error { - return errors.Errorf("CreateMachineClass Not yet implemented.") -} - -// TODO: implement DeleteMachineClass for testClusterClient and add tests -func (c *testClusterClient) DeleteMachineClass(namespace, name string) error { - return errors.Errorf("DeleteMachineClass Not yet implemented.") -} - func contains(s []string, e string) bool { exists := false for _, existingNs := range s { @@ -702,7 +681,7 @@ func TestClusterCreate(t *testing.T) { { name: "fail waiting for api ready on bootstrap cluster", targetClient: &testClusterClient{ApplyFunc: func(yaml string) error { return nil }}, - bootstrapClient: &testClusterClient{WaitForClusterV1alpha1ReadyErr: errors.New("Test failure")}, + bootstrapClient: &testClusterClient{WaitForClusterV1alpha2ReadyErr: errors.New("Test failure")}, namespaceToExpectedInternalMachines: make(map[string]int), namespaceToInputCluster: map[string][]*clusterv1.Cluster{metav1.NamespaceDefault: getClustersForNamespace(metav1.NamespaceDefault, 1)}, cleanupExternal: true, @@ -749,16 +728,6 @@ func TestClusterCreate(t *testing.T) { expectExternalCreated: true, expectErr: true, }, - { - name: "fail update bootstrap cluster endpoint", - targetClient: &testClusterClient{ApplyFunc: func(yaml string) error { return nil }}, - bootstrapClient: &testClusterClient{UpdateClusterObjectEndpointErr: errors.New("Test failure")}, - namespaceToExpectedInternalMachines: make(map[string]int), - namespaceToInputCluster: map[string][]*clusterv1.Cluster{metav1.NamespaceDefault: getClustersForNamespace(metav1.NamespaceDefault, 1)}, - cleanupExternal: true, - expectExternalCreated: true, - expectErr: true, - }, { name: "fail apply yaml to target cluster", targetClient: &testClusterClient{ApplyErr: errors.New("Test failure")}, @@ -771,7 +740,7 @@ func TestClusterCreate(t *testing.T) { }, { name: "fail wait for api ready on target cluster", - targetClient: &testClusterClient{WaitForClusterV1alpha1ReadyErr: errors.New("Test failure")}, + targetClient: &testClusterClient{WaitForClusterV1alpha2ReadyErr: errors.New("Test failure")}, bootstrapClient: &testClusterClient{}, namespaceToExpectedInternalMachines: make(map[string]int), namespaceToInputCluster: map[string][]*clusterv1.Cluster{metav1.NamespaceDefault: getClustersForNamespace(metav1.NamespaceDefault, 1)}, @@ -799,16 +768,6 @@ func TestClusterCreate(t *testing.T) { expectExternalCreated: true, expectErr: true, }, - { - name: "fail update cluster endpoint target", - targetClient: &testClusterClient{UpdateClusterObjectEndpointErr: errors.New("Test failure")}, - bootstrapClient: &testClusterClient{}, - namespaceToExpectedInternalMachines: make(map[string]int), - namespaceToInputCluster: map[string][]*clusterv1.Cluster{metav1.NamespaceDefault: getClustersForNamespace(metav1.NamespaceDefault, 1)}, - cleanupExternal: true, - expectExternalCreated: true, - expectErr: true, - }, { name: "success bootstrap_only components not applied to target cluster", targetClient: &testClusterClient{ApplyFunc: func(yaml string) error { @@ -837,8 +796,6 @@ func TestClusterCreate(t *testing.T) { err: testcase.provisionExternalErr, kubeconfig: bootstrapKubeconfig, } - pd := &testProviderDeployer{} - pd.kubeconfig = targetKubeconfig f := newTestClusterClientFactory() f.clusterClients[bootstrapKubeconfig] = testcase.bootstrapClient f.clusterClients[targetKubeconfig] = testcase.targetClient @@ -860,7 +817,20 @@ func TestClusterCreate(t *testing.T) { for _, inputCluster := range inputClusters { inputCluster.Name = fmt.Sprintf("%s-cluster", ns) inputMachines[inputCluster.Name] = generateMachines(inputCluster, ns) - err = d.Create(inputCluster, inputMachines[inputCluster.Name], pd, kubeconfigOut, &pcFactory) + + kubeconfigSecret := &apiv1.Secret{ + ObjectMeta: metav1.ObjectMeta{ + Name: fmt.Sprintf("%s-kubeconfig", inputCluster.Name), + Namespace: ns, + }, + Data: map[string][]byte{ + "value": []byte(targetKubeconfig), + }, + } + testcase.bootstrapClient.secrets = append(testcase.bootstrapClient.secrets, kubeconfigSecret) + testcase.targetClient.secrets = append(testcase.bootstrapClient.secrets, kubeconfigSecret) + + err = d.Create(inputCluster, inputMachines[inputCluster.Name], kubeconfigOut, &pcFactory) if err != nil { break } @@ -945,11 +915,24 @@ func TestCreateProviderComponentsScenarios(t *testing.T) { p := &testClusterProvisioner{ kubeconfig: bootstrapKubeconfig, } - pd := &testProviderDeployer{} - pd.kubeconfig = targetKubeconfig + + kubeconfigSecret := &apiv1.Secret{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-cluster-kubeconfig", + Namespace: metav1.NamespaceDefault, + }, + Data: map[string][]byte{ + "value": []byte(targetKubeconfig), + }, + } + f := newTestClusterClientFactory() - f.clusterClients[bootstrapKubeconfig] = &testClusterClient{} - f.clusterClients[targetKubeconfig] = &testClusterClient{} + f.clusterClients[bootstrapKubeconfig] = &testClusterClient{ + secrets: []*apiv1.Secret{kubeconfigSecret}, + } + f.clusterClients[targetKubeconfig] = &testClusterClient{ + secrets: []*apiv1.Secret{kubeconfigSecret}, + } inputCluster := &clusterv1.Cluster{ ObjectMeta: metav1.ObjectMeta{ @@ -962,7 +945,7 @@ func TestCreateProviderComponentsScenarios(t *testing.T) { providerComponentsYaml := "---\nyaml: definition" addonsYaml := "---\nyaml: definition" d := New(p, f, providerComponentsYaml, addonsYaml, "", false) - err := d.Create(inputCluster, inputMachines, pd, kubeconfigOut, &pcFactory) + err := d.Create(inputCluster, inputMachines, kubeconfigOut, &pcFactory) if err == nil && tc.expectedError != "" { t.Fatalf("error mismatch: got '%v', want '%v'", err, tc.expectedError) } @@ -1376,9 +1359,6 @@ func TestClusterDelete(t *testing.T) { for _, machineSets := range testCase.bootstrapClient.machineSets { bootstrapMachineSets = bootstrapMachineSets + len(machineSets) } - for _, machineClasses := range testCase.bootstrapClient.machineClasses { - bootstrapMachineClasses = bootstrapMachineClasses + len(machineClasses) - } for _, clusters := range testCase.targetClient.clusters { targetClusters = targetClusters + len(clusters) } @@ -1391,9 +1371,6 @@ func TestClusterDelete(t *testing.T) { for _, machineSets := range testCase.targetClient.machineSets { targetMachineSets = targetMachineSets + len(machineSets) } - for _, machineClasses := range testCase.targetClient.machineClasses { - targetMachineClasses = targetMachineClasses + len(machineClasses) - } if bootstrapClusters != 0 { t.Fatalf("Unexpected Cluster count in bootstrap cluster. Got: %d, Want: 0", bootstrapClusters) @@ -1434,10 +1411,8 @@ func generateTestControlPlaneMachines(cluster *clusterv1.Cluster, ns string, nam machines := make([]*clusterv1.Machine, 0, len(names)) for _, name := range names { machine := generateTestNodeMachine(cluster, ns, name) - machine.Spec = clusterv1.MachineSpec{ - Versions: clusterv1.MachineVersionInfo{ - ControlPlane: "1.10.1", - }, + machine.ObjectMeta.Labels = map[string]string{ + clusterv1.MachineControlPlaneLabelName: "true", } machines = append(machines, machine) } diff --git a/vendor/sigs.k8s.io/cluster-api/cmd/clusterctl/clusterdeployer/provider/BUILD.bazel b/vendor/sigs.k8s.io/cluster-api/cmd/clusterctl/clusterdeployer/provider/BUILD.bazel index b6db884d1e..94f460abd9 100644 --- a/vendor/sigs.k8s.io/cluster-api/cmd/clusterctl/clusterdeployer/provider/BUILD.bazel +++ b/vendor/sigs.k8s.io/cluster-api/cmd/clusterctl/clusterdeployer/provider/BUILD.bazel @@ -5,8 +5,5 @@ go_library( srcs = ["provider.go"], importpath = "sigs.k8s.io/cluster-api/cmd/clusterctl/clusterdeployer/provider", visibility = ["//visibility:public"], - deps = [ - "//pkg/apis/cluster/v1alpha1:go_default_library", - "//vendor/k8s.io/client-go/kubernetes:go_default_library", - ], + deps = ["//vendor/k8s.io/client-go/kubernetes:go_default_library"], ) diff --git a/vendor/sigs.k8s.io/cluster-api/cmd/clusterctl/clusterdeployer/provider/provider.go b/vendor/sigs.k8s.io/cluster-api/cmd/clusterctl/clusterdeployer/provider/provider.go index 060f198582..c3e4720d47 100644 --- a/vendor/sigs.k8s.io/cluster-api/cmd/clusterctl/clusterdeployer/provider/provider.go +++ b/vendor/sigs.k8s.io/cluster-api/cmd/clusterctl/clusterdeployer/provider/provider.go @@ -18,18 +18,8 @@ package provider import ( "k8s.io/client-go/kubernetes" - clusterv1 "sigs.k8s.io/cluster-api/pkg/apis/cluster/v1alpha1" ) -// Deployer is a deprecated interface for Provider specific logic. Please do not extend or add. This interface should be removed -// once issues/158 and issues/160 below are fixed. -type Deployer interface { - // TODO: This requirement can be removed once after: https://github.com/kubernetes-sigs/cluster-api/issues/158 - GetIP(cluster *clusterv1.Cluster, machine *clusterv1.Machine) (string, error) - // TODO: This requirement can be removed after: https://github.com/kubernetes-sigs/cluster-api/issues/160 - GetKubeConfig(cluster *clusterv1.Cluster, master *clusterv1.Machine) (string, error) -} - // ComponentsStore is an interface for saving and loading Provider Components type ComponentsStore interface { Save(providerComponents string) error diff --git a/vendor/sigs.k8s.io/cluster-api/cmd/clusterctl/cmd/BUILD.bazel b/vendor/sigs.k8s.io/cluster-api/cmd/clusterctl/cmd/BUILD.bazel index 48fc8fc2d9..b28b7d64e2 100644 --- a/vendor/sigs.k8s.io/cluster-api/cmd/clusterctl/cmd/BUILD.bazel +++ b/vendor/sigs.k8s.io/cluster-api/cmd/clusterctl/cmd/BUILD.bazel @@ -1,4 +1,4 @@ -load("@io_bazel_rules_go//go:def.bzl", "go_library", "go_test") +load("@io_bazel_rules_go//go:def.bzl", "go_library") go_library( name = "go_default_library", @@ -25,16 +25,14 @@ go_library( importpath = "sigs.k8s.io/cluster-api/cmd/clusterctl/cmd", visibility = ["//visibility:public"], deps = [ + "//api/v1alpha2:go_default_library", "//cmd/clusterctl/clientcmd:go_default_library", "//cmd/clusterctl/clusterdeployer:go_default_library", "//cmd/clusterctl/clusterdeployer/bootstrap:go_default_library", "//cmd/clusterctl/clusterdeployer/clusterclient:go_default_library", - "//cmd/clusterctl/clusterdeployer/provider:go_default_library", "//cmd/clusterctl/phases:go_default_library", "//cmd/clusterctl/providercomponents:go_default_library", "//cmd/clusterctl/validation:go_default_library", - "//pkg/apis:go_default_library", - "//pkg/apis/cluster/common:go_default_library", "//pkg/util:go_default_library", "//vendor/github.com/pkg/errors:go_default_library", "//vendor/github.com/spf13/cobra:go_default_library", @@ -50,9 +48,3 @@ go_library( "//vendor/sigs.k8s.io/controller-runtime/pkg/manager:go_default_library", ], ) - -go_test( - name = "go_default_test", - srcs = ["create_cluster_test.go"], - embed = [":go_default_library"], -) diff --git a/vendor/sigs.k8s.io/cluster-api/cmd/clusterctl/cmd/alpha_phase_get_kubeconfig.go b/vendor/sigs.k8s.io/cluster-api/cmd/clusterctl/cmd/alpha_phase_get_kubeconfig.go index 5dac0147bc..69fe656ce1 100644 --- a/vendor/sigs.k8s.io/cluster-api/cmd/clusterctl/cmd/alpha_phase_get_kubeconfig.go +++ b/vendor/sigs.k8s.io/cluster-api/cmd/clusterctl/cmd/alpha_phase_get_kubeconfig.go @@ -71,12 +71,7 @@ func RunAlphaPhaseGetKubeconfig(pgko *AlphaPhaseGetKubeconfigOptions) error { return fmt.Errorf("unable to create cluster client: %v", err) } - provider, err := getProvider(pgko.Provider) - if err != nil { - return err - } - - if _, err := phases.GetKubeconfig(client, provider, pgko.KubeconfigOutput, pgko.ClusterName, pgko.Namespace); err != nil { + if _, err := phases.GetKubeconfig(client, pgko.KubeconfigOutput, pgko.ClusterName, pgko.Namespace); err != nil { return fmt.Errorf("unable to get kubeconfig: %v", err) } @@ -87,8 +82,6 @@ func init() { // Required flags alphaPhaseGetKubeconfigCmd.Flags().StringVarP(&pgko.Kubeconfig, "kubeconfig", "", "", "Path for the kubeconfig file to use") alphaPhaseGetKubeconfigCmd.Flags().StringVarP(&pgko.ClusterName, "cluster-name", "", "", "Cluster Name") - // TODO: Remove as soon as code allows https://github.com/kubernetes-sigs/cluster-api/issues/157 - alphaPhaseGetKubeconfigCmd.Flags().StringVarP(&pgko.Provider, "provider", "", "", "Which provider deployment logic to use") // Optional flags alphaPhaseGetKubeconfigCmd.Flags().StringVarP(&pgko.KubeconfigOutput, "kubeconfig-out", "", "kubeconfig", "Where to output the kubeconfig for the provisioned cluster") diff --git a/vendor/sigs.k8s.io/cluster-api/cmd/clusterctl/cmd/create_cluster.go b/vendor/sigs.k8s.io/cluster-api/cmd/clusterctl/cmd/create_cluster.go index a46bd4ef48..082d17691d 100644 --- a/vendor/sigs.k8s.io/cluster-api/cmd/clusterctl/cmd/create_cluster.go +++ b/vendor/sigs.k8s.io/cluster-api/cmd/clusterctl/cmd/create_cluster.go @@ -25,8 +25,6 @@ import ( "sigs.k8s.io/cluster-api/cmd/clusterctl/clusterdeployer" "sigs.k8s.io/cluster-api/cmd/clusterctl/clusterdeployer/bootstrap" "sigs.k8s.io/cluster-api/cmd/clusterctl/clusterdeployer/clusterclient" - "sigs.k8s.io/cluster-api/cmd/clusterctl/clusterdeployer/provider" - clustercommon "sigs.k8s.io/cluster-api/pkg/apis/cluster/common" "sigs.k8s.io/cluster-api/pkg/util" ) @@ -78,10 +76,6 @@ func RunCreate(co *CreateOptions) error { return err } - pd, err := getProvider(co.Provider) - if err != nil { - return err - } pc, err := ioutil.ReadFile(co.ProviderComponents) if err != nil { return errors.Wrapf(err, "error loading provider components file %q", co.ProviderComponents) @@ -109,7 +103,7 @@ func RunCreate(co *CreateOptions) error { string(bc), co.BootstrapFlags.Cleanup) - return d.Create(c, m, pd, co.KubeconfigOutput, pcsFactory) + return d.Create(c, m, co.KubeconfigOutput, pcsFactory) } func init() { @@ -120,9 +114,6 @@ func init() { createClusterCmd.MarkFlagRequired("machines") createClusterCmd.Flags().StringVarP(&co.ProviderComponents, "provider-components", "p", "", "A yaml file containing cluster api provider controllers and supporting objects. Required.") createClusterCmd.MarkFlagRequired("provider-components") - // TODO: Remove as soon as code allows https://github.com/kubernetes-sigs/cluster-api/issues/157 - createClusterCmd.Flags().StringVarP(&co.Provider, "provider", "", "", "Which provider deployment logic to use. Required.") - createClusterCmd.MarkFlagRequired("provider") // Optional flags createClusterCmd.Flags().StringVarP(&co.AddonComponents, "addon-components", "a", "", "A yaml file containing cluster addons to apply to the internal cluster") @@ -132,15 +123,3 @@ func init() { co.BootstrapFlags.AddFlags(createClusterCmd.Flags()) createCmd.AddCommand(createClusterCmd) } - -func getProvider(name string) (provider.Deployer, error) { - provisioner, err := clustercommon.ClusterProvisioner(name) - if err != nil { - return nil, err - } - provider, ok := provisioner.(provider.Deployer) - if !ok { - return nil, errors.Errorf("provider for %s does not implement provider.Deployer interface", name) - } - return provider, nil -} diff --git a/vendor/sigs.k8s.io/cluster-api/cmd/clusterctl/cmd/create_cluster_test.go b/vendor/sigs.k8s.io/cluster-api/cmd/clusterctl/cmd/create_cluster_test.go deleted file mode 100644 index dd2142cba6..0000000000 --- a/vendor/sigs.k8s.io/cluster-api/cmd/clusterctl/cmd/create_cluster_test.go +++ /dev/null @@ -1,41 +0,0 @@ -/* -Copyright 2018 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package cmd - -import ( - "testing" -) - -func TestGetProvider(t *testing.T) { - var testcases = []struct { - provider string - expectErr bool - }{ - { - provider: "blah blah", - expectErr: true, - }, - } - for _, testcase := range testcases { - t.Run(testcase.provider, func(t *testing.T) { - _, err := getProvider(testcase.provider) - if (testcase.expectErr && err == nil) || (!testcase.expectErr && err != nil) { - t.Fatalf("Unexpected returned error. Got: %v, Want Err: %v", err, testcase.expectErr) - } - }) - } -} diff --git a/vendor/sigs.k8s.io/cluster-api/cmd/clusterctl/cmd/validate_cluster.go b/vendor/sigs.k8s.io/cluster-api/cmd/clusterctl/cmd/validate_cluster.go index 10b5358a43..66657eb2aa 100644 --- a/vendor/sigs.k8s.io/cluster-api/cmd/clusterctl/cmd/validate_cluster.go +++ b/vendor/sigs.k8s.io/cluster-api/cmd/clusterctl/cmd/validate_cluster.go @@ -25,8 +25,8 @@ import ( "github.com/spf13/cobra" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" tcmd "k8s.io/client-go/tools/clientcmd" + "sigs.k8s.io/cluster-api/api/v1alpha2" "sigs.k8s.io/cluster-api/cmd/clusterctl/validation" - "sigs.k8s.io/cluster-api/pkg/apis" "sigs.k8s.io/controller-runtime/pkg/client" "sigs.k8s.io/controller-runtime/pkg/client/config" "sigs.k8s.io/controller-runtime/pkg/manager" @@ -67,7 +67,7 @@ func RunValidateCluster() error { return errors.Wrap(err, "failed to create manager") } // Setup Scheme for all resources - if err := apis.AddToScheme(mgr.GetScheme()); err != nil { + if err := v1alpha2.AddToScheme(mgr.GetScheme()); err != nil { return errors.Wrap(err, "failed to add APIs to manager") } diff --git a/vendor/sigs.k8s.io/cluster-api/cmd/clusterctl/main.go b/vendor/sigs.k8s.io/cluster-api/cmd/clusterctl/main.go index fccad5e92b..3df6b181da 100644 --- a/vendor/sigs.k8s.io/cluster-api/cmd/clusterctl/main.go +++ b/vendor/sigs.k8s.io/cluster-api/cmd/clusterctl/main.go @@ -16,8 +16,13 @@ limitations under the License. package main -import "sigs.k8s.io/cluster-api/cmd/clusterctl/cmd" +import ( + "k8s.io/client-go/kubernetes/scheme" + "sigs.k8s.io/cluster-api/api/v1alpha2" + "sigs.k8s.io/cluster-api/cmd/clusterctl/cmd" +) func main() { + v1alpha2.AddToScheme(scheme.Scheme) cmd.Execute() } diff --git a/vendor/sigs.k8s.io/cluster-api/cmd/clusterctl/phases/BUILD.bazel b/vendor/sigs.k8s.io/cluster-api/cmd/clusterctl/phases/BUILD.bazel index 716245bf44..bc6056ec97 100644 --- a/vendor/sigs.k8s.io/cluster-api/cmd/clusterctl/phases/BUILD.bazel +++ b/vendor/sigs.k8s.io/cluster-api/cmd/clusterctl/phases/BUILD.bazel @@ -15,10 +15,9 @@ go_library( importpath = "sigs.k8s.io/cluster-api/cmd/clusterctl/phases", visibility = ["//visibility:public"], deps = [ + "//api/v1alpha2:go_default_library", "//cmd/clusterctl/clusterdeployer/bootstrap:go_default_library", "//cmd/clusterctl/clusterdeployer/clusterclient:go_default_library", - "//cmd/clusterctl/clusterdeployer/provider:go_default_library", - "//pkg/apis/cluster/v1alpha1:go_default_library", "//pkg/util:go_default_library", "//vendor/github.com/pkg/errors:go_default_library", "//vendor/k8s.io/api/apps/v1:go_default_library", @@ -32,7 +31,7 @@ go_test( srcs = ["pivot_test.go"], embed = [":go_default_library"], deps = [ - "//pkg/apis/cluster/v1alpha1:go_default_library", + "//api/v1alpha2:go_default_library", "//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", ], ) diff --git a/vendor/sigs.k8s.io/cluster-api/cmd/clusterctl/phases/applycluster.go b/vendor/sigs.k8s.io/cluster-api/cmd/clusterctl/phases/applycluster.go index 08a41bfc6f..211c535b99 100644 --- a/vendor/sigs.k8s.io/cluster-api/cmd/clusterctl/phases/applycluster.go +++ b/vendor/sigs.k8s.io/cluster-api/cmd/clusterctl/phases/applycluster.go @@ -19,8 +19,8 @@ package phases import ( "github.com/pkg/errors" "k8s.io/klog" + clusterv1 "sigs.k8s.io/cluster-api/api/v1alpha2" "sigs.k8s.io/cluster-api/cmd/clusterctl/clusterdeployer/clusterclient" - clusterv1 "sigs.k8s.io/cluster-api/pkg/apis/cluster/v1alpha1" ) func ApplyCluster(client clusterclient.Client, cluster *clusterv1.Cluster) error { diff --git a/vendor/sigs.k8s.io/cluster-api/cmd/clusterctl/phases/applyclusterapicomponents.go b/vendor/sigs.k8s.io/cluster-api/cmd/clusterctl/phases/applyclusterapicomponents.go index e9bcf3e807..a901e389fb 100644 --- a/vendor/sigs.k8s.io/cluster-api/cmd/clusterctl/phases/applyclusterapicomponents.go +++ b/vendor/sigs.k8s.io/cluster-api/cmd/clusterctl/phases/applyclusterapicomponents.go @@ -28,5 +28,5 @@ func ApplyClusterAPIComponents(client clusterclient.Client, providerComponents s return errors.Wrap(err, "unable to apply cluster api controllers") } - return client.WaitForClusterV1alpha1Ready() + return client.WaitForClusterV1alpha2Ready() } diff --git a/vendor/sigs.k8s.io/cluster-api/cmd/clusterctl/phases/applymachines.go b/vendor/sigs.k8s.io/cluster-api/cmd/clusterctl/phases/applymachines.go index ffc8f1af14..4dadc4980e 100644 --- a/vendor/sigs.k8s.io/cluster-api/cmd/clusterctl/phases/applymachines.go +++ b/vendor/sigs.k8s.io/cluster-api/cmd/clusterctl/phases/applymachines.go @@ -19,8 +19,8 @@ package phases import ( "github.com/pkg/errors" "k8s.io/klog" + clusterv1 "sigs.k8s.io/cluster-api/api/v1alpha2" "sigs.k8s.io/cluster-api/cmd/clusterctl/clusterdeployer/clusterclient" - clusterv1 "sigs.k8s.io/cluster-api/pkg/apis/cluster/v1alpha1" ) func ApplyMachines(client clusterclient.Client, namespace string, machines []*clusterv1.Machine) error { diff --git a/vendor/sigs.k8s.io/cluster-api/cmd/clusterctl/phases/getkubeconfig.go b/vendor/sigs.k8s.io/cluster-api/cmd/clusterctl/phases/getkubeconfig.go index 47589b004f..623d7c23a2 100644 --- a/vendor/sigs.k8s.io/cluster-api/cmd/clusterctl/phases/getkubeconfig.go +++ b/vendor/sigs.k8s.io/cluster-api/cmd/clusterctl/phases/getkubeconfig.go @@ -25,7 +25,6 @@ import ( "k8s.io/klog" "sigs.k8s.io/cluster-api/cmd/clusterctl/clusterdeployer/clusterclient" - "sigs.k8s.io/cluster-api/cmd/clusterctl/clusterdeployer/provider" "sigs.k8s.io/cluster-api/pkg/util" ) @@ -36,9 +35,9 @@ const ( ) // GetKubeconfig returns a kubeconfig for the target cluster -func GetKubeconfig(bootstrapClient clusterclient.Client, provider provider.Deployer, kubeconfigOutput string, clusterName, namespace string) (string, error) { +func GetKubeconfig(bootstrapClient clusterclient.Client, kubeconfigOutput string, clusterName, namespace string) (string, error) { klog.V(1).Info("Getting target cluster kubeconfig.") - targetKubeconfig, err := waitForKubeconfigReady(bootstrapClient, provider, clusterName, namespace) + targetKubeconfig, err := waitForKubeconfigReady(bootstrapClient, clusterName, namespace) if err != nil { return "", fmt.Errorf("unable to get target cluster kubeconfig: %v", err) } @@ -50,30 +49,26 @@ func GetKubeconfig(bootstrapClient clusterclient.Client, provider provider.Deplo return targetKubeconfig, nil } -func waitForKubeconfigReady(bootstrapClient clusterclient.Client, provider provider.Deployer, clusterName, namespace string) (string, error) { +func waitForKubeconfigReady(bootstrapClient clusterclient.Client, clusterName, namespace string) (string, error) { timeout := defaultTimeoutKubeconfigReady if v := os.Getenv(TimeoutMachineReadyEnv); v != "" { t, err := strconv.Atoi(v) if err == nil { timeout = time.Duration(t) * time.Minute - klog.V(4).Infof("Setting KubeConfg timeout to %v", timeout) + klog.V(4).Infof("Setting KubeConfig timeout to %v", timeout) } } kubeconfig := "" err := util.PollImmediate(retryKubeConfigReady, timeout, func() (bool, error) { - cluster, controlPlane, _, err := clusterclient.GetClusterAPIObject(bootstrapClient, clusterName, namespace) - if err != nil { - return false, err - } - - klog.V(2).Infof("Waiting for kubeconfig on %v to become ready...", controlPlane.Name) - k, err := provider.GetKubeConfig(cluster, controlPlane) + klog.V(2).Infof("Waiting for kubeconfig from Secret %q-kubeconfig in namespace %q to become available...", clusterName, namespace) + k, err := bootstrapClient.GetKubeconfigFromSecret(namespace, clusterName) if err != nil { klog.V(4).Infof("error getting kubeconfig: %v", err) return false, nil } if k == "" { + klog.V(4).Info("error getting kubeconfig: secret value is empty") return false, nil } kubeconfig = k diff --git a/vendor/sigs.k8s.io/cluster-api/cmd/clusterctl/phases/pivot.go b/vendor/sigs.k8s.io/cluster-api/cmd/clusterctl/phases/pivot.go index d83ee6bd42..d549bb57fc 100644 --- a/vendor/sigs.k8s.io/cluster-api/cmd/clusterctl/phases/pivot.go +++ b/vendor/sigs.k8s.io/cluster-api/cmd/clusterctl/phases/pivot.go @@ -24,18 +24,16 @@ import ( appsv1 "k8s.io/api/apps/v1" "k8s.io/apimachinery/pkg/util/yaml" "k8s.io/klog" - clusterv1 "sigs.k8s.io/cluster-api/pkg/apis/cluster/v1alpha1" + clusterv1 "sigs.k8s.io/cluster-api/api/v1alpha2" ) type sourceClient interface { Delete(string) error - DeleteMachineClass(namespace, name string) error ForceDeleteCluster(string, string) error ForceDeleteMachine(string, string) error ForceDeleteMachineDeployment(string, string) error ForceDeleteMachineSet(namespace, name string) error GetClusters(string) ([]*clusterv1.Cluster, error) - GetMachineClasses(string) ([]*clusterv1.MachineClass, error) GetMachineDeployments(string) ([]*clusterv1.MachineDeployment, error) GetMachineDeploymentsForCluster(*clusterv1.Cluster) ([]*clusterv1.MachineDeployment, error) GetMachines(namespace string) ([]*clusterv1.Machine, error) @@ -45,20 +43,19 @@ type sourceClient interface { GetMachinesForCluster(*clusterv1.Cluster) ([]*clusterv1.Machine, error) GetMachinesForMachineSet(*clusterv1.MachineSet) ([]*clusterv1.Machine, error) ScaleStatefulSet(string, string, int32) error - WaitForClusterV1alpha1Ready() error + WaitForClusterV1alpha2Ready() error } type targetClient interface { Apply(string) error CreateClusterObject(*clusterv1.Cluster) error - CreateMachineClass(*clusterv1.MachineClass) error CreateMachineDeployments([]*clusterv1.MachineDeployment, string) error CreateMachines([]*clusterv1.Machine, string) error CreateMachineSets([]*clusterv1.MachineSet, string) error EnsureNamespace(string) error GetMachineDeployment(namespace, name string) (*clusterv1.MachineDeployment, error) GetMachineSet(string, string) (*clusterv1.MachineSet, error) - WaitForClusterV1alpha1Ready() error + WaitForClusterV1alpha2Ready() error } // Pivot deploys the provided provider components to a target cluster and then migrates @@ -80,14 +77,14 @@ func Pivot(source sourceClient, target targetClient, providerComponents string) func pivot(from sourceClient, to targetClient, providerComponents string) error { // TODO: Attempt to handle rollback in case of pivot failure - klog.V(4).Info("Ensuring cluster v1alpha1 resources are available on the source cluster") - if err := from.WaitForClusterV1alpha1Ready(); err != nil { - return errors.New("cluster v1alpha1 resource not ready on source cluster") + klog.V(4).Info("Ensuring cluster v1alpha2 resources are available on the source cluster") + if err := from.WaitForClusterV1alpha2Ready(); err != nil { + return errors.New("cluster v1alpha2 resource not ready on source cluster") } - klog.V(4).Info("Ensuring cluster v1alpha1 resources are available on the target cluster") - if err := to.WaitForClusterV1alpha1Ready(); err != nil { - return errors.New("cluster v1alpha1 resource not ready on target cluster") + klog.V(4).Info("Ensuring cluster v1alpha2 resources are available on the target cluster") + if err := to.WaitForClusterV1alpha2Ready(); err != nil { + return errors.New("cluster v1alpha2 resource not ready on target cluster") } klog.V(4).Info("Parsing list of cluster-api controllers from provider components") @@ -104,16 +101,6 @@ func pivot(from sourceClient, to targetClient, providerComponents string) error } } - klog.V(4).Info("Retrieving list of MachineClasses to move") - machineClasses, err := from.GetMachineClasses("") - if err != nil { - return err - } - - if err := copyMachineClasses(from, to, machineClasses); err != nil { - return err - } - klog.V(4).Info("Retrieving list of Clusters to move") clusters, err := from.GetClusters("") if err != nil { @@ -151,10 +138,6 @@ func pivot(from sourceClient, to targetClient, providerComponents string) error return err } - if err := deleteMachineClasses(from, machineClasses); err != nil { - return err - } - klog.V(4).Infof("Deleting provider components from source cluster") if err := from.Delete(providerComponents); err != nil { klog.Warningf("Could not delete the provider components from the source cluster: %v", err) @@ -178,58 +161,6 @@ func moveClusters(from sourceClient, to targetClient, clusters []*clusterv1.Clus return nil } -func deleteMachineClasses(client sourceClient, machineClasses []*clusterv1.MachineClass) error { - machineClassNames := make([]string, 0, len(machineClasses)) - for _, mc := range machineClasses { - machineClassNames = append(machineClassNames, mc.Name) - } - klog.V(4).Infof("Preparing to delete MachineClasses: %v", machineClassNames) - - for _, mc := range machineClasses { - if err := deleteMachineClass(client, mc); err != nil { - return errors.Wrapf(err, "failed to delete MachineClass %s:%s", mc.Namespace, mc.Name) - } - } - return nil -} - -func deleteMachineClass(client sourceClient, machineClass *clusterv1.MachineClass) error { - // New objects cannot have a specified resource version. Clear it out. - machineClass.SetResourceVersion("") - if err := client.DeleteMachineClass(machineClass.Namespace, machineClass.Name); err != nil { - return errors.Wrapf(err, "error deleting MachineClass %s/%s from source cluster", machineClass.Namespace, machineClass.Name) - } - - klog.V(4).Infof("Successfully deleted MachineClass %s/%s from source cluster", machineClass.Namespace, machineClass.Name) - return nil -} - -func copyMachineClasses(from sourceClient, to targetClient, machineClasses []*clusterv1.MachineClass) error { - machineClassNames := make([]string, 0, len(machineClasses)) - for _, mc := range machineClasses { - machineClassNames = append(machineClassNames, mc.Name) - } - klog.V(4).Infof("Preparing to copy MachineClasses: %v", machineClassNames) - - for _, mc := range machineClasses { - if err := copyMachineClass(from, to, mc); err != nil { - return errors.Wrapf(err, "failed to copy MachineClass %s:%s", mc.Namespace, mc.Name) - } - } - return nil -} - -func copyMachineClass(from sourceClient, to targetClient, machineClass *clusterv1.MachineClass) error { - // New objects cannot have a specified resource version. Clear it out. - machineClass.SetResourceVersion("") - if err := to.CreateMachineClass(machineClass); err != nil { - return errors.Wrapf(err, "error copying MachineClass %s/%s to target cluster", machineClass.Namespace, machineClass.Name) - } - - klog.V(4).Infof("Successfully copied MachineClass %s/%s", machineClass.Namespace, machineClass.Name) - return nil -} - func moveCluster(from sourceClient, to targetClient, cluster *clusterv1.Cluster) error { klog.V(4).Infof("Moving Cluster %s/%s", cluster.Namespace, cluster.Name) diff --git a/vendor/sigs.k8s.io/cluster-api/cmd/clusterctl/phases/pivot_test.go b/vendor/sigs.k8s.io/cluster-api/cmd/clusterctl/phases/pivot_test.go index 2c7dc5a1ce..c6f80fe02f 100644 --- a/vendor/sigs.k8s.io/cluster-api/cmd/clusterctl/phases/pivot_test.go +++ b/vendor/sigs.k8s.io/cluster-api/cmd/clusterctl/phases/pivot_test.go @@ -22,7 +22,7 @@ import ( "testing" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - clusterv1 "sigs.k8s.io/cluster-api/pkg/apis/cluster/v1alpha1" + clusterv1 "sigs.k8s.io/cluster-api/api/v1alpha2" ) // sourcer map keys are namespaces @@ -32,7 +32,6 @@ type sourcer struct { machineDeployments map[string][]*clusterv1.MachineDeployment machineSets map[string][]*clusterv1.MachineSet machines map[string][]*clusterv1.Machine - machineClasses map[string][]*clusterv1.MachineClass } func newSourcer() *sourcer { @@ -41,7 +40,6 @@ func newSourcer() *sourcer { machineDeployments: make(map[string][]*clusterv1.MachineDeployment), machineSets: make(map[string][]*clusterv1.MachineSet), machines: make(map[string][]*clusterv1.Machine), - machineClasses: make(map[string][]*clusterv1.MachineClass), } } func (s *sourcer) WithCluster(ns, name string) *sourcer { @@ -67,7 +65,7 @@ func (s *sourcer) WithMachineDeployment(ns, cluster, name string) *sourcer { blockOwnerDeletion := true md.OwnerReferences = []metav1.OwnerReference{ { - APIVersion: clusterv1.SchemeGroupVersion.Version, + APIVersion: clusterv1.GroupVersion.Version, Kind: "Cluster", Name: cluster, BlockOwnerDeletion: &blockOwnerDeletion, @@ -90,7 +88,7 @@ func (s *sourcer) WithMachineSet(ns, cluster, md, name string) *sourcer { blockOwnerDeletion := true ms.OwnerReferences = []metav1.OwnerReference{ { - APIVersion: clusterv1.SchemeGroupVersion.Version, + APIVersion: clusterv1.GroupVersion.Version, Kind: "Cluster", Name: cluster, BlockOwnerDeletion: &blockOwnerDeletion, @@ -101,7 +99,7 @@ func (s *sourcer) WithMachineSet(ns, cluster, md, name string) *sourcer { isController := true ms.OwnerReferences = []metav1.OwnerReference{ { - APIVersion: clusterv1.SchemeGroupVersion.Version, + APIVersion: clusterv1.GroupVersion.Version, Kind: "MachineDeployment", Name: md, Controller: &isController, @@ -125,7 +123,7 @@ func (s *sourcer) WithMachine(ns, cluster, ms, name string) *sourcer { blockOwnerDeletion := true m.OwnerReferences = []metav1.OwnerReference{ { - APIVersion: clusterv1.SchemeGroupVersion.Version, + APIVersion: clusterv1.GroupVersion.Version, Kind: "Cluster", Name: cluster, BlockOwnerDeletion: &blockOwnerDeletion, @@ -136,7 +134,7 @@ func (s *sourcer) WithMachine(ns, cluster, ms, name string) *sourcer { isController := true m.OwnerReferences = []metav1.OwnerReference{ { - APIVersion: clusterv1.SchemeGroupVersion.Version, + APIVersion: clusterv1.GroupVersion.Version, Kind: "MachineSet", Name: ms, Controller: &isController, @@ -148,31 +146,11 @@ func (s *sourcer) WithMachine(ns, cluster, ms, name string) *sourcer { return s } -func (s *sourcer) WithMachineClass(ns, name string) *sourcer { - s.machineClasses[ns] = append(s.machineClasses[ns], &clusterv1.MachineClass{ - ObjectMeta: metav1.ObjectMeta{ - Name: name, - Namespace: ns, - }, - }) - return s -} - // Interface implementation below func (s *sourcer) Delete(string) error { return nil } -func (s *sourcer) DeleteMachineClass(ns, name string) error { - newMachineClasses := []*clusterv1.MachineClass{} - for _, mc := range s.machineClasses[ns] { - if mc.Name != name { - newMachineClasses = append(newMachineClasses, mc) - } - } - s.machineClasses[ns] = newMachineClasses - return nil -} func (s *sourcer) ForceDeleteCluster(ns, name string) error { newClusters := []*clusterv1.Cluster{} for _, d := range s.clusters[ns] { @@ -226,19 +204,7 @@ func (s *sourcer) GetClusters(ns string) ([]*clusterv1.Cluster, error) { } return s.clusters[ns], nil } -func (s *sourcer) GetMachineClasses(ns string) ([]*clusterv1.MachineClass, error) { - // empty ns implies all namespaces - if ns == "" { - out := []*clusterv1.MachineClass{} - for _, mcs := range s.machineClasses { - for _, mc := range mcs { - out = append(out, mc) - } - } - return out, nil - } - return s.machineClasses[ns], nil -} + func (s *sourcer) GetMachineDeployments(ns string) ([]*clusterv1.MachineDeployment, error) { // empty ns implies all namespaces if ns == "" { @@ -335,7 +301,7 @@ func (s *sourcer) GetMachinesForMachineSet(ms *clusterv1.MachineSet) ([]*cluster func (s *sourcer) ScaleStatefulSet(string, string, int32) error { return nil } -func (s *sourcer) WaitForClusterV1alpha1Ready() error { +func (s *sourcer) WaitForClusterV1alpha2Ready() error { return nil } @@ -344,7 +310,6 @@ type target struct { machineDeployments map[string][]*clusterv1.MachineDeployment machineSets map[string][]*clusterv1.MachineSet machines map[string][]*clusterv1.Machine - machineClasses map[string][]*clusterv1.MachineClass } func (t *target) Apply(string) error { @@ -354,10 +319,6 @@ func (t *target) CreateClusterObject(c *clusterv1.Cluster) error { t.clusters[c.Namespace] = append(t.clusters[c.Namespace], c) return nil } -func (t *target) CreateMachineClass(mc *clusterv1.MachineClass) error { - t.machineClasses[mc.Namespace] = append(t.machineClasses[mc.Namespace], mc) - return nil -} func (t *target) CreateMachineDeployments(deployments []*clusterv1.MachineDeployment, ns string) error { t.machineDeployments[ns] = append(t.machineDeployments[ns], deployments...) return nil @@ -390,7 +351,7 @@ func (t *target) GetMachineSet(ns, name string) (*clusterv1.MachineSet, error) { } return nil, fmt.Errorf("no machineset found with name %q in namespace %q", ns, name) } -func (t *target) WaitForClusterV1alpha1Ready() error { +func (t *target) WaitForClusterV1alpha2Ready() error { return nil } @@ -431,7 +392,6 @@ func TestPivot(t *testing.T) { WithMachineSet(ns1, "cluster1", "", "machineset3"). WithMachine(ns1, "cluster1", "machineset3", "machine4"). WithMachine(ns1, "cluster1", "", "machine5"). - WithMachineClass(ns1, "my-machine-class"). WithMachineDeployment(ns1, "", "deployment2"). WithMachineSet(ns1, "", "deployment2", "machineset4"). WithMachine(ns1, "", "machineset4", "machine6"). @@ -444,14 +404,12 @@ func TestPivot(t *testing.T) { expectedMachineDeployments := len(source.machineDeployments[ns1]) + len(source.machineDeployments[ns2]) expectedMachineSets := len(source.machineSets[ns1]) + len(source.machineSets[ns2]) expectedMachines := len(source.machines[ns1]) + len(source.machines[ns2]) - expectedMachineClasses := len(source.machineClasses[ns1]) + len(source.machineClasses[ns2]) target := &target{ clusters: make(map[string][]*clusterv1.Cluster), machineDeployments: make(map[string][]*clusterv1.MachineDeployment), machineSets: make(map[string][]*clusterv1.MachineSet), machines: make(map[string][]*clusterv1.Machine), - machineClasses: make(map[string][]*clusterv1.MachineClass), } if err := Pivot(source, target, pc.String()); err != nil { @@ -478,11 +436,6 @@ func TestPivot(t *testing.T) { t.Logf("target: %v", target.machines) t.Fatal("should have deleted all machines from source k8s cluster") } - if len(source.machineClasses[ns1])+len(source.machineClasses[ns2]) != 0 { - t.Logf("source: %v", source.machineClasses) - t.Logf("target: %v", target.machineClasses) - t.Fatal("should have deleted all machine classes from source k8s cluster") - } if len(target.clusters[ns1])+len(target.clusters[ns2]) != expectedClusters { t.Logf("source: %v", source.clusters) @@ -514,11 +467,6 @@ func TestPivot(t *testing.T) { } t.Fatal("expected machines to pivot") } - if len(target.machineClasses[ns1])+len(target.machineClasses[ns2]) != expectedMachineClasses { - t.Logf("source: %v", source.machineClasses) - t.Logf("target: %v", target.machineClasses) - t.Fatal("expected machine classes to pivot") - } } // An example of testing a failure scenario @@ -528,10 +476,10 @@ type waitFailSourcer struct { *sourcer } -func (s *waitFailSourcer) WaitForClusterV1alpha1Ready() error { +func (s *waitFailSourcer) WaitForClusterV1alpha2Ready() error { return errors.New("failed to wait for ready cluster resources") } -func TestWaitForV1alpha1Failure(t *testing.T) { +func TestWaitForV1alpha2Failure(t *testing.T) { w := &waitFailSourcer{ newSourcer(), diff --git a/vendor/sigs.k8s.io/cluster-api/cmd/clusterctl/providercomponents/BUILD.bazel b/vendor/sigs.k8s.io/cluster-api/cmd/clusterctl/providercomponents/BUILD.bazel index a701ef8434..e3024cbdb4 100644 --- a/vendor/sigs.k8s.io/cluster-api/cmd/clusterctl/providercomponents/BUILD.bazel +++ b/vendor/sigs.k8s.io/cluster-api/cmd/clusterctl/providercomponents/BUILD.bazel @@ -20,7 +20,6 @@ go_test( srcs = ["providercomponents_test.go"], embed = [":go_default_library"], deps = [ - "//pkg/apis/cluster/v1alpha1:go_default_library", "//vendor/github.com/pkg/errors:go_default_library", "//vendor/k8s.io/api/core/v1:go_default_library", "//vendor/k8s.io/apimachinery/pkg/api/errors:go_default_library", diff --git a/vendor/sigs.k8s.io/cluster-api/cmd/clusterctl/providercomponents/providercomponents_test.go b/vendor/sigs.k8s.io/cluster-api/cmd/clusterctl/providercomponents/providercomponents_test.go index 9a3569ca8e..e642305b38 100644 --- a/vendor/sigs.k8s.io/cluster-api/cmd/clusterctl/providercomponents/providercomponents_test.go +++ b/vendor/sigs.k8s.io/cluster-api/cmd/clusterctl/providercomponents/providercomponents_test.go @@ -26,7 +26,6 @@ import ( "k8s.io/apimachinery/pkg/types" "k8s.io/apimachinery/pkg/watch" "sigs.k8s.io/cluster-api/cmd/clusterctl/providercomponents" - "sigs.k8s.io/cluster-api/pkg/apis/cluster/v1alpha1" ) func TestLoadFromConfigMap(t *testing.T) { @@ -73,7 +72,7 @@ func TestSaveToConfigMap(t *testing.T) { providerComponentsContent := "content\nmore content >>" configMapName := "clusterctl" providerComponentsKey := "provider-components" - notFoundErr := apierrors.NewNotFound(v1alpha1.Resource("configmap"), configMapName) + notFoundErr := apierrors.NewNotFound(core.Resource("configmap"), configMapName) testCases := []struct { name string getResult *core.ConfigMap diff --git a/vendor/sigs.k8s.io/cluster-api/cmd/clusterctl/testdata/create-cluster-no-args-invalid-flag.golden b/vendor/sigs.k8s.io/cluster-api/cmd/clusterctl/testdata/create-cluster-no-args-invalid-flag.golden index ee7a56a873..5381521467 100644 --- a/vendor/sigs.k8s.io/cluster-api/cmd/clusterctl/testdata/create-cluster-no-args-invalid-flag.golden +++ b/vendor/sigs.k8s.io/cluster-api/cmd/clusterctl/testdata/create-cluster-no-args-invalid-flag.golden @@ -13,7 +13,6 @@ Flags: -h, --help help for cluster --kubeconfig-out string Where to output the kubeconfig for the provisioned cluster (default "kubeconfig") -m, --machines string A yaml file containing machine object definition(s). Required. - --provider string Which provider deployment logic to use. Required. -p, --provider-components string A yaml file containing cluster api provider controllers and supporting objects. Required. Global Flags: @@ -25,7 +24,7 @@ Global Flags: --log-file-max-size uint Defines the maximum size a log file can grow to. Unit is megabytes. If the value is 0, the maximum file size is unlimited. (default 1800) --log-flush-frequency duration Maximum number of seconds between log flushes (default 5s) --logtostderr log to standard error instead of files (default true) - --master string The address of the Kubernetes API server. Overrides any value in kubeconfig. Only required if out-of-cluster. + --master --kubeconfig (Deprecated: switch to --kubeconfig) The address of the Kubernetes API server. Overrides any value in kubeconfig. Only required if out-of-cluster. --skip-headers If true, avoid header prefixes in the log messages --skip-log-headers If true, avoid headers when opening log files --stderrthreshold severity logs at or above this threshold go to stderr (default 2) diff --git a/vendor/sigs.k8s.io/cluster-api/cmd/clusterctl/testdata/create-cluster-no-args.golden b/vendor/sigs.k8s.io/cluster-api/cmd/clusterctl/testdata/create-cluster-no-args.golden index c6b350a274..0d49284dd4 100644 --- a/vendor/sigs.k8s.io/cluster-api/cmd/clusterctl/testdata/create-cluster-no-args.golden +++ b/vendor/sigs.k8s.io/cluster-api/cmd/clusterctl/testdata/create-cluster-no-args.golden @@ -1,4 +1,4 @@ -Error: required flag(s) "cluster", "machines", "provider", "provider-components" not set +Error: required flag(s) "cluster", "machines", "provider-components" not set Usage: clusterctl create cluster [flags] @@ -13,7 +13,6 @@ Flags: -h, --help help for cluster --kubeconfig-out string Where to output the kubeconfig for the provisioned cluster (default "kubeconfig") -m, --machines string A yaml file containing machine object definition(s). Required. - --provider string Which provider deployment logic to use. Required. -p, --provider-components string A yaml file containing cluster api provider controllers and supporting objects. Required. Global Flags: @@ -25,11 +24,11 @@ Global Flags: --log-file-max-size uint Defines the maximum size a log file can grow to. Unit is megabytes. If the value is 0, the maximum file size is unlimited. (default 1800) --log-flush-frequency duration Maximum number of seconds between log flushes (default 5s) --logtostderr log to standard error instead of files (default true) - --master string The address of the Kubernetes API server. Overrides any value in kubeconfig. Only required if out-of-cluster. + --master --kubeconfig (Deprecated: switch to --kubeconfig) The address of the Kubernetes API server. Overrides any value in kubeconfig. Only required if out-of-cluster. --skip-headers If true, avoid header prefixes in the log messages --skip-log-headers If true, avoid headers when opening log files --stderrthreshold severity logs at or above this threshold go to stderr (default 2) -v, --v Level number for the log level verbosity --vmodule moduleSpec comma-separated list of pattern=N settings for file-filtered logging -required flag(s) "cluster", "machines", "provider", "provider-components" not set +required flag(s) "cluster", "machines", "provider-components" not set diff --git a/vendor/sigs.k8s.io/cluster-api/cmd/clusterctl/testdata/create-no-args-invalid-flag.golden b/vendor/sigs.k8s.io/cluster-api/cmd/clusterctl/testdata/create-no-args-invalid-flag.golden index 626fd1196c..04f17efe6e 100644 --- a/vendor/sigs.k8s.io/cluster-api/cmd/clusterctl/testdata/create-no-args-invalid-flag.golden +++ b/vendor/sigs.k8s.io/cluster-api/cmd/clusterctl/testdata/create-no-args-invalid-flag.golden @@ -17,7 +17,7 @@ Global Flags: --log-file-max-size uint Defines the maximum size a log file can grow to. Unit is megabytes. If the value is 0, the maximum file size is unlimited. (default 1800) --log-flush-frequency duration Maximum number of seconds between log flushes (default 5s) --logtostderr log to standard error instead of files (default true) - --master string The address of the Kubernetes API server. Overrides any value in kubeconfig. Only required if out-of-cluster. + --master --kubeconfig (Deprecated: switch to --kubeconfig) The address of the Kubernetes API server. Overrides any value in kubeconfig. Only required if out-of-cluster. --skip-headers If true, avoid header prefixes in the log messages --skip-log-headers If true, avoid headers when opening log files --stderrthreshold severity logs at or above this threshold go to stderr (default 2) diff --git a/vendor/sigs.k8s.io/cluster-api/cmd/clusterctl/testdata/create-no-args.golden b/vendor/sigs.k8s.io/cluster-api/cmd/clusterctl/testdata/create-no-args.golden index ad51185112..91c575ff4f 100644 --- a/vendor/sigs.k8s.io/cluster-api/cmd/clusterctl/testdata/create-no-args.golden +++ b/vendor/sigs.k8s.io/cluster-api/cmd/clusterctl/testdata/create-no-args.golden @@ -18,7 +18,7 @@ Global Flags: --log-file-max-size uint Defines the maximum size a log file can grow to. Unit is megabytes. If the value is 0, the maximum file size is unlimited. (default 1800) --log-flush-frequency duration Maximum number of seconds between log flushes (default 5s) --logtostderr log to standard error instead of files (default true) - --master string The address of the Kubernetes API server. Overrides any value in kubeconfig. Only required if out-of-cluster. + --master --kubeconfig (Deprecated: switch to --kubeconfig) The address of the Kubernetes API server. Overrides any value in kubeconfig. Only required if out-of-cluster. --skip-headers If true, avoid header prefixes in the log messages --skip-log-headers If true, avoid headers when opening log files --stderrthreshold severity logs at or above this threshold go to stderr (default 2) diff --git a/vendor/sigs.k8s.io/cluster-api/cmd/clusterctl/testdata/delete-cluster-no-args-invalid-flag.golden b/vendor/sigs.k8s.io/cluster-api/cmd/clusterctl/testdata/delete-cluster-no-args-invalid-flag.golden index 153050588b..fba3da4031 100644 --- a/vendor/sigs.k8s.io/cluster-api/cmd/clusterctl/testdata/delete-cluster-no-args-invalid-flag.golden +++ b/vendor/sigs.k8s.io/cluster-api/cmd/clusterctl/testdata/delete-cluster-no-args-invalid-flag.golden @@ -22,7 +22,7 @@ Global Flags: --log-file-max-size uint Defines the maximum size a log file can grow to. Unit is megabytes. If the value is 0, the maximum file size is unlimited. (default 1800) --log-flush-frequency duration Maximum number of seconds between log flushes (default 5s) --logtostderr log to standard error instead of files (default true) - --master string The address of the Kubernetes API server. Overrides any value in kubeconfig. Only required if out-of-cluster. + --master --kubeconfig (Deprecated: switch to --kubeconfig) The address of the Kubernetes API server. Overrides any value in kubeconfig. Only required if out-of-cluster. --skip-headers If true, avoid header prefixes in the log messages --skip-log-headers If true, avoid headers when opening log files --stderrthreshold severity logs at or above this threshold go to stderr (default 2) diff --git a/vendor/sigs.k8s.io/cluster-api/cmd/clusterctl/testdata/delete-cluster-no-args.golden b/vendor/sigs.k8s.io/cluster-api/cmd/clusterctl/testdata/delete-cluster-no-args.golden index 91edb26dc3..49826ffbe8 100644 --- a/vendor/sigs.k8s.io/cluster-api/cmd/clusterctl/testdata/delete-cluster-no-args.golden +++ b/vendor/sigs.k8s.io/cluster-api/cmd/clusterctl/testdata/delete-cluster-no-args.golden @@ -24,7 +24,7 @@ Global Flags: --log-file-max-size uint Defines the maximum size a log file can grow to. Unit is megabytes. If the value is 0, the maximum file size is unlimited. (default 1800) --log-flush-frequency duration Maximum number of seconds between log flushes (default 5s) --logtostderr log to standard error instead of files (default true) - --master string The address of the Kubernetes API server. Overrides any value in kubeconfig. Only required if out-of-cluster. + --master --kubeconfig (Deprecated: switch to --kubeconfig) The address of the Kubernetes API server. Overrides any value in kubeconfig. Only required if out-of-cluster. --skip-headers If true, avoid header prefixes in the log messages --skip-log-headers If true, avoid headers when opening log files --stderrthreshold severity logs at or above this threshold go to stderr (default 2) diff --git a/vendor/sigs.k8s.io/cluster-api/cmd/clusterctl/testdata/delete-no-args-invalid-flag.golden b/vendor/sigs.k8s.io/cluster-api/cmd/clusterctl/testdata/delete-no-args-invalid-flag.golden index 9344b377cd..af69192e5b 100644 --- a/vendor/sigs.k8s.io/cluster-api/cmd/clusterctl/testdata/delete-no-args-invalid-flag.golden +++ b/vendor/sigs.k8s.io/cluster-api/cmd/clusterctl/testdata/delete-no-args-invalid-flag.golden @@ -17,7 +17,7 @@ Global Flags: --log-file-max-size uint Defines the maximum size a log file can grow to. Unit is megabytes. If the value is 0, the maximum file size is unlimited. (default 1800) --log-flush-frequency duration Maximum number of seconds between log flushes (default 5s) --logtostderr log to standard error instead of files (default true) - --master string The address of the Kubernetes API server. Overrides any value in kubeconfig. Only required if out-of-cluster. + --master --kubeconfig (Deprecated: switch to --kubeconfig) The address of the Kubernetes API server. Overrides any value in kubeconfig. Only required if out-of-cluster. --skip-headers If true, avoid header prefixes in the log messages --skip-log-headers If true, avoid headers when opening log files --stderrthreshold severity logs at or above this threshold go to stderr (default 2) diff --git a/vendor/sigs.k8s.io/cluster-api/cmd/clusterctl/testdata/delete-no-args.golden b/vendor/sigs.k8s.io/cluster-api/cmd/clusterctl/testdata/delete-no-args.golden index f914826e22..503824c28c 100644 --- a/vendor/sigs.k8s.io/cluster-api/cmd/clusterctl/testdata/delete-no-args.golden +++ b/vendor/sigs.k8s.io/cluster-api/cmd/clusterctl/testdata/delete-no-args.golden @@ -18,7 +18,7 @@ Global Flags: --log-file-max-size uint Defines the maximum size a log file can grow to. Unit is megabytes. If the value is 0, the maximum file size is unlimited. (default 1800) --log-flush-frequency duration Maximum number of seconds between log flushes (default 5s) --logtostderr log to standard error instead of files (default true) - --master string The address of the Kubernetes API server. Overrides any value in kubeconfig. Only required if out-of-cluster. + --master --kubeconfig (Deprecated: switch to --kubeconfig) The address of the Kubernetes API server. Overrides any value in kubeconfig. Only required if out-of-cluster. --skip-headers If true, avoid header prefixes in the log messages --skip-log-headers If true, avoid headers when opening log files --stderrthreshold severity logs at or above this threshold go to stderr (default 2) diff --git a/vendor/sigs.k8s.io/cluster-api/cmd/clusterctl/testdata/no-args-invalid-flag.golden b/vendor/sigs.k8s.io/cluster-api/cmd/clusterctl/testdata/no-args-invalid-flag.golden index cd0b38b4c8..134d0598f8 100644 --- a/vendor/sigs.k8s.io/cluster-api/cmd/clusterctl/testdata/no-args-invalid-flag.golden +++ b/vendor/sigs.k8s.io/cluster-api/cmd/clusterctl/testdata/no-args-invalid-flag.golden @@ -20,7 +20,7 @@ Flags: --log-file-max-size uint Defines the maximum size a log file can grow to. Unit is megabytes. If the value is 0, the maximum file size is unlimited. (default 1800) --log-flush-frequency duration Maximum number of seconds between log flushes (default 5s) --logtostderr log to standard error instead of files (default true) - --master string The address of the Kubernetes API server. Overrides any value in kubeconfig. Only required if out-of-cluster. + --master --kubeconfig (Deprecated: switch to --kubeconfig) The address of the Kubernetes API server. Overrides any value in kubeconfig. Only required if out-of-cluster. --skip-headers If true, avoid header prefixes in the log messages --skip-log-headers If true, avoid headers when opening log files --stderrthreshold severity logs at or above this threshold go to stderr (default 2) diff --git a/vendor/sigs.k8s.io/cluster-api/cmd/clusterctl/testdata/no-args.golden b/vendor/sigs.k8s.io/cluster-api/cmd/clusterctl/testdata/no-args.golden index 812f19afdb..c4cd3a0be1 100644 --- a/vendor/sigs.k8s.io/cluster-api/cmd/clusterctl/testdata/no-args.golden +++ b/vendor/sigs.k8s.io/cluster-api/cmd/clusterctl/testdata/no-args.golden @@ -21,7 +21,7 @@ Flags: --log-file-max-size uint Defines the maximum size a log file can grow to. Unit is megabytes. If the value is 0, the maximum file size is unlimited. (default 1800) --log-flush-frequency duration Maximum number of seconds between log flushes (default 5s) --logtostderr log to standard error instead of files (default true) - --master string The address of the Kubernetes API server. Overrides any value in kubeconfig. Only required if out-of-cluster. + --master --kubeconfig (Deprecated: switch to --kubeconfig) The address of the Kubernetes API server. Overrides any value in kubeconfig. Only required if out-of-cluster. --skip-headers If true, avoid header prefixes in the log messages --skip-log-headers If true, avoid headers when opening log files --stderrthreshold severity logs at or above this threshold go to stderr (default 2) diff --git a/vendor/sigs.k8s.io/cluster-api/cmd/clusterctl/testdata/validate-cluster-no-args-invalid-flag.golden b/vendor/sigs.k8s.io/cluster-api/cmd/clusterctl/testdata/validate-cluster-no-args-invalid-flag.golden index 7dc93adbab..36384be8e8 100644 --- a/vendor/sigs.k8s.io/cluster-api/cmd/clusterctl/testdata/validate-cluster-no-args-invalid-flag.golden +++ b/vendor/sigs.k8s.io/cluster-api/cmd/clusterctl/testdata/validate-cluster-no-args-invalid-flag.golden @@ -17,7 +17,7 @@ Global Flags: --log-file-max-size uint Defines the maximum size a log file can grow to. Unit is megabytes. If the value is 0, the maximum file size is unlimited. (default 1800) --log-flush-frequency duration Maximum number of seconds between log flushes (default 5s) --logtostderr log to standard error instead of files (default true) - --master string The address of the Kubernetes API server. Overrides any value in kubeconfig. Only required if out-of-cluster. + --master --kubeconfig (Deprecated: switch to --kubeconfig) The address of the Kubernetes API server. Overrides any value in kubeconfig. Only required if out-of-cluster. --skip-headers If true, avoid header prefixes in the log messages --skip-log-headers If true, avoid headers when opening log files --stderrthreshold severity logs at or above this threshold go to stderr (default 2) diff --git a/vendor/sigs.k8s.io/cluster-api/cmd/clusterctl/testdata/validate-no-args-invalid-flag.golden b/vendor/sigs.k8s.io/cluster-api/cmd/clusterctl/testdata/validate-no-args-invalid-flag.golden index b0dd25c4f1..a0f367a5f0 100644 --- a/vendor/sigs.k8s.io/cluster-api/cmd/clusterctl/testdata/validate-no-args-invalid-flag.golden +++ b/vendor/sigs.k8s.io/cluster-api/cmd/clusterctl/testdata/validate-no-args-invalid-flag.golden @@ -17,7 +17,7 @@ Global Flags: --log-file-max-size uint Defines the maximum size a log file can grow to. Unit is megabytes. If the value is 0, the maximum file size is unlimited. (default 1800) --log-flush-frequency duration Maximum number of seconds between log flushes (default 5s) --logtostderr log to standard error instead of files (default true) - --master string The address of the Kubernetes API server. Overrides any value in kubeconfig. Only required if out-of-cluster. + --master --kubeconfig (Deprecated: switch to --kubeconfig) The address of the Kubernetes API server. Overrides any value in kubeconfig. Only required if out-of-cluster. --skip-headers If true, avoid header prefixes in the log messages --skip-log-headers If true, avoid headers when opening log files --stderrthreshold severity logs at or above this threshold go to stderr (default 2) diff --git a/vendor/sigs.k8s.io/cluster-api/cmd/clusterctl/testdata/validate-no-args.golden b/vendor/sigs.k8s.io/cluster-api/cmd/clusterctl/testdata/validate-no-args.golden index b053749dca..d8ee3a1b05 100644 --- a/vendor/sigs.k8s.io/cluster-api/cmd/clusterctl/testdata/validate-no-args.golden +++ b/vendor/sigs.k8s.io/cluster-api/cmd/clusterctl/testdata/validate-no-args.golden @@ -18,7 +18,7 @@ Global Flags: --log-file-max-size uint Defines the maximum size a log file can grow to. Unit is megabytes. If the value is 0, the maximum file size is unlimited. (default 1800) --log-flush-frequency duration Maximum number of seconds between log flushes (default 5s) --logtostderr log to standard error instead of files (default true) - --master string The address of the Kubernetes API server. Overrides any value in kubeconfig. Only required if out-of-cluster. + --master --kubeconfig (Deprecated: switch to --kubeconfig) The address of the Kubernetes API server. Overrides any value in kubeconfig. Only required if out-of-cluster. --skip-headers If true, avoid header prefixes in the log messages --skip-log-headers If true, avoid headers when opening log files --stderrthreshold severity logs at or above this threshold go to stderr (default 2) diff --git a/vendor/sigs.k8s.io/cluster-api/cmd/clusterctl/validation/BUILD.bazel b/vendor/sigs.k8s.io/cluster-api/cmd/clusterctl/validation/BUILD.bazel index 20bc2c589f..312a423150 100644 --- a/vendor/sigs.k8s.io/cluster-api/cmd/clusterctl/validation/BUILD.bazel +++ b/vendor/sigs.k8s.io/cluster-api/cmd/clusterctl/validation/BUILD.bazel @@ -9,9 +9,9 @@ go_library( importpath = "sigs.k8s.io/cluster-api/cmd/clusterctl/validation", visibility = ["//visibility:public"], deps = [ - "//pkg/apis/cluster/common:go_default_library", - "//pkg/apis/cluster/v1alpha1:go_default_library", + "//api/v1alpha2:go_default_library", "//pkg/controller/noderefutil:go_default_library", + "//pkg/errors:go_default_library", "//vendor/github.com/pkg/errors:go_default_library", "//vendor/k8s.io/api/core/v1:go_default_library", "//vendor/k8s.io/apimachinery/pkg/types:go_default_library", @@ -29,15 +29,14 @@ go_test( data = glob(["testdata/**"]), embed = [":go_default_library"], deps = [ - "//pkg/apis:go_default_library", - "//pkg/apis/cluster/common:go_default_library", - "//pkg/apis/cluster/v1alpha1:go_default_library", - "//pkg/apis/cluster/v1alpha1/testutil:go_default_library", + "//api/v1alpha2:go_default_library", + "//pkg/errors:go_default_library", "//vendor/k8s.io/api/core/v1:go_default_library", "//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", "//vendor/k8s.io/apimachinery/pkg/types:go_default_library", "//vendor/k8s.io/client-go/kubernetes/scheme:go_default_library", "//vendor/k8s.io/client-go/rest:go_default_library", + "//vendor/k8s.io/utils/pointer:go_default_library", "//vendor/sigs.k8s.io/controller-runtime/pkg/client:go_default_library", "//vendor/sigs.k8s.io/controller-runtime/pkg/envtest:go_default_library", "//vendor/sigs.k8s.io/controller-runtime/pkg/manager:go_default_library", diff --git a/vendor/sigs.k8s.io/cluster-api/cmd/clusterctl/validation/validate_cluster_api_objects.go b/vendor/sigs.k8s.io/cluster-api/cmd/clusterctl/validation/validate_cluster_api_objects.go index b3f49f2b30..afe9c53b3e 100644 --- a/vendor/sigs.k8s.io/cluster-api/cmd/clusterctl/validation/validate_cluster_api_objects.go +++ b/vendor/sigs.k8s.io/cluster-api/cmd/clusterctl/validation/validate_cluster_api_objects.go @@ -24,10 +24,9 @@ import ( "github.com/pkg/errors" corev1 "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/types" - "sigs.k8s.io/cluster-api/pkg/apis/cluster/common" - "sigs.k8s.io/cluster-api/pkg/apis/cluster/v1alpha1" - clusterv1alpha1 "sigs.k8s.io/cluster-api/pkg/apis/cluster/v1alpha1" + "sigs.k8s.io/cluster-api/api/v1alpha2" "sigs.k8s.io/cluster-api/pkg/controller/noderefutil" + capierrors "sigs.k8s.io/cluster-api/pkg/errors" "sigs.k8s.io/controller-runtime/pkg/client" ) @@ -42,23 +41,23 @@ func ValidateClusterAPIObjects(ctx context.Context, w io.Writer, c client.Client return err } - machines := &clusterv1alpha1.MachineList{} - if err := c.List(ctx, client.InNamespace(namespace), machines); err != nil { + machines := &v1alpha2.MachineList{} + if err := c.List(ctx, machines, client.InNamespace(namespace)); err != nil { return errors.Wrapf(err, "failed to get the machines from the apiserver in namespace %q", namespace) } return validateMachineObjects(ctx, w, machines, c) } -func getClusterObject(ctx context.Context, c client.Reader, clusterName string, namespace string) (*v1alpha1.Cluster, error) { +func getClusterObject(ctx context.Context, c client.Reader, clusterName string, namespace string) (*v1alpha2.Cluster, error) { if clusterName != "" { - cluster := &clusterv1alpha1.Cluster{} + cluster := &v1alpha2.Cluster{} err := c.Get(ctx, types.NamespacedName{Name: clusterName, Namespace: namespace}, cluster) return cluster, err } - clusters := &clusterv1alpha1.ClusterList{} - if err := c.List(ctx, &client.ListOptions{Namespace: namespace}, clusters); err != nil { + clusters := &v1alpha2.ClusterList{} + if err := c.List(ctx, clusters, client.InNamespace(namespace)); err != nil { return nil, errors.Wrapf(err, "failed to get the clusters from the apiserver in namespace %q", namespace) } @@ -67,21 +66,30 @@ func getClusterObject(ctx context.Context, c client.Reader, clusterName string, } else if numOfClusters > 1 { return nil, errors.Errorf("fail: There is more than one cluster in namespace %q. Please specify --cluster-name", namespace) } + return &clusters.Items[0], nil } -func validateClusterObject(w io.Writer, cluster *v1alpha1.Cluster) error { +func validateClusterObject(w io.Writer, cluster *v1alpha2.Cluster) error { fmt.Fprintf(w, "Checking cluster object %q... ", cluster.Name) - if cluster.Status.ErrorReason != "" || cluster.Status.ErrorMessage != "" { + if cluster.Status.ErrorReason != nil || cluster.Status.ErrorMessage != nil { + var reason capierrors.ClusterStatusError + if cluster.Status.ErrorReason != nil { + reason = *cluster.Status.ErrorReason + } + var message string + if cluster.Status.ErrorMessage != nil { + message = *cluster.Status.ErrorMessage + } fmt.Fprintf(w, "FAIL\n") - fmt.Fprintf(w, "\t[%v]: %s\n", cluster.Status.ErrorReason, cluster.Status.ErrorMessage) + fmt.Fprintf(w, "\t[%v]: %s\n", reason, message) return errors.Errorf("cluster %q failed the validation", cluster.Name) } fmt.Fprintf(w, "PASS\n") return nil } -func validateMachineObjects(ctx context.Context, w io.Writer, machines *v1alpha1.MachineList, client client.Client) error { +func validateMachineObjects(ctx context.Context, w io.Writer, machines *v1alpha2.MachineList, client client.Client) error { pass := true for _, machine := range machines.Items { if !validateMachineObject(ctx, w, machine, client) { @@ -94,10 +102,10 @@ func validateMachineObjects(ctx context.Context, w io.Writer, machines *v1alpha1 return nil } -func validateMachineObject(ctx context.Context, w io.Writer, machine v1alpha1.Machine, client client.Client) bool { +func validateMachineObject(ctx context.Context, w io.Writer, machine v1alpha2.Machine, client client.Client) bool { fmt.Fprintf(w, "Checking machine object %q... ", machine.Name) if machine.Status.ErrorReason != nil || machine.Status.ErrorMessage != nil { - var reason common.MachineStatusError + var reason capierrors.MachineStatusError if machine.Status.ErrorReason != nil { reason = *machine.Status.ErrorReason } diff --git a/vendor/sigs.k8s.io/cluster-api/cmd/clusterctl/validation/validate_cluster_api_objects_suite_test.go b/vendor/sigs.k8s.io/cluster-api/cmd/clusterctl/validation/validate_cluster_api_objects_suite_test.go index bfbb39134a..1bd4de7263 100644 --- a/vendor/sigs.k8s.io/cluster-api/cmd/clusterctl/validation/validate_cluster_api_objects_suite_test.go +++ b/vendor/sigs.k8s.io/cluster-api/cmd/clusterctl/validation/validate_cluster_api_objects_suite_test.go @@ -24,7 +24,7 @@ import ( "k8s.io/client-go/kubernetes/scheme" "k8s.io/client-go/rest" - "sigs.k8s.io/cluster-api/pkg/apis" + "sigs.k8s.io/cluster-api/api/v1alpha2" "sigs.k8s.io/controller-runtime/pkg/envtest" "sigs.k8s.io/controller-runtime/pkg/manager" ) @@ -33,9 +33,9 @@ var cfg *rest.Config func TestMain(m *testing.M) { t := &envtest.Environment{ - CRDDirectoryPaths: []string{filepath.Join("..", "..", "..", "config", "crds")}, + CRDDirectoryPaths: []string{filepath.Join("..", "..", "..", "config", "crd", "bases")}, } - apis.AddToScheme(scheme.Scheme) + v1alpha2.AddToScheme(scheme.Scheme) var err error if cfg, err = t.Start(); err != nil { diff --git a/vendor/sigs.k8s.io/cluster-api/cmd/clusterctl/validation/validate_cluster_api_objects_test.go b/vendor/sigs.k8s.io/cluster-api/cmd/clusterctl/validation/validate_cluster_api_objects_test.go index 3d18b89907..7f853b572d 100644 --- a/vendor/sigs.k8s.io/cluster-api/cmd/clusterctl/validation/validate_cluster_api_objects_test.go +++ b/vendor/sigs.k8s.io/cluster-api/cmd/clusterctl/validation/validate_cluster_api_objects_test.go @@ -23,39 +23,47 @@ import ( "path" "testing" + corev1 "k8s.io/api/core/v1" v1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/types" - "sigs.k8s.io/cluster-api/pkg/apis/cluster/common" - "sigs.k8s.io/cluster-api/pkg/apis/cluster/v1alpha1" - "sigs.k8s.io/cluster-api/pkg/apis/cluster/v1alpha1/testutil" + "k8s.io/utils/pointer" + "sigs.k8s.io/cluster-api/api/v1alpha2" + capierrors "sigs.k8s.io/cluster-api/pkg/errors" "sigs.k8s.io/controller-runtime/pkg/client" "sigs.k8s.io/controller-runtime/pkg/manager" ) var c client.Client -func newClusterStatus(errorReason common.ClusterStatusError, errorMessage string) v1alpha1.ClusterStatus { - return v1alpha1.ClusterStatus{ +func newClusterStatus(errorReason *capierrors.ClusterStatusError, errorMessage *string) v1alpha2.ClusterStatus { + return v1alpha2.ClusterStatus{ ErrorReason: errorReason, ErrorMessage: errorMessage, } } -func newMachineStatus(nodeRef *v1.ObjectReference, errorReason *common.MachineStatusError, errorMessage *string) v1alpha1.MachineStatus { - return v1alpha1.MachineStatus{ +func newMachineStatus(nodeRef *v1.ObjectReference, errorReason *capierrors.MachineStatusError, errorMessage *string) v1alpha2.MachineStatus { + return v1alpha2.MachineStatus{ NodeRef: nodeRef, ErrorReason: errorReason, ErrorMessage: errorMessage, } } -func getMachineWithError(machineName, namespace string, nodeRef *v1.ObjectReference, errorReason *common.MachineStatusError, errorMessage *string) v1alpha1.Machine { - return v1alpha1.Machine{ +func getMachineWithError(machineName, namespace string, nodeRef *v1.ObjectReference, errorReason *capierrors.MachineStatusError, errorMessage *string) v1alpha2.Machine { + return v1alpha2.Machine{ ObjectMeta: metav1.ObjectMeta{ Name: machineName, Namespace: namespace, }, + Spec: v1alpha2.MachineSpec{ + InfrastructureRef: corev1.ObjectReference{ + APIVersion: "infrastructure.cluster.x-k8s.io/v1alpha2", + Kind: "InfrastructureRef", + Name: "machine-infrastructure", + }, + }, Status: newMachineStatus(nodeRef, errorReason, errorMessage), } } @@ -75,7 +83,7 @@ func getNodeWithReadyStatus(nodeName string, nodeReadyStatus v1.ConditionStatus) func TestGetClusterObjectWithNoCluster(t *testing.T) { // Setup the Manager and Controller. - mgr, err := manager.New(cfg, manager.Options{}) + mgr, err := manager.New(cfg, manager.Options{MetricsBindAddress: "0"}) if err != nil { t.Fatalf("error creating new manager: %v", err) } @@ -89,7 +97,7 @@ func TestGetClusterObjectWithNoCluster(t *testing.T) { func TestGetClusterObjectWithOneCluster(t *testing.T) { // Setup the Manager and Controller. - mgr, err := manager.New(cfg, manager.Options{}) + mgr, err := manager.New(cfg, manager.Options{MetricsBindAddress: "0"}) if err != nil { t.Fatalf("error creating new manager: %v", err) } @@ -98,7 +106,7 @@ func TestGetClusterObjectWithOneCluster(t *testing.T) { const testClusterName = "test-cluster" const testNamespace = "get-cluster-object-with-one-cluster" - cluster := testutil.GetVanillaCluster() + cluster := v1alpha2.Cluster{} cluster.Name = testClusterName cluster.Namespace = testNamespace if err := c.Create(context.TODO(), &cluster); err != nil { @@ -155,7 +163,7 @@ func TestGetClusterObjectWithOneCluster(t *testing.T) { func TestGetClusterObjectWithMoreThanOneCluster(t *testing.T) { // Setup the Manager and Controller. - mgr, err := manager.New(cfg, manager.Options{}) + mgr, err := manager.New(cfg, manager.Options{MetricsBindAddress: "0"}) if err != nil { t.Fatalf("error creating new manager: %v", err) } @@ -165,7 +173,7 @@ func TestGetClusterObjectWithMoreThanOneCluster(t *testing.T) { const testNamespace = "get-cluster-object-with-more-than-one-cluster" const testClusterName1 = "test-cluster1" - cluster1 := testutil.GetVanillaCluster() + cluster1 := v1alpha2.Cluster{} cluster1.Name = testClusterName1 cluster1.Namespace = testNamespace if err := c.Create(context.TODO(), &cluster1); err != nil { @@ -174,7 +182,7 @@ func TestGetClusterObjectWithMoreThanOneCluster(t *testing.T) { defer c.Delete(context.TODO(), &cluster1) const testClusterName2 = "test-cluster2" - cluster2 := testutil.GetVanillaCluster() + cluster2 := v1alpha2.Cluster{} cluster2.Name = testClusterName2 cluster2.Namespace = testNamespace if err := c.Create(context.TODO(), &cluster2); err != nil { @@ -217,38 +225,38 @@ func TestGetClusterObjectWithMoreThanOneCluster(t *testing.T) { func TestValidateClusterObject(t *testing.T) { var testcases = []struct { name string - errorReason common.ClusterStatusError - errorMessage string + errorReason *capierrors.ClusterStatusError + errorMessage *string expectErr bool }{ { name: "Cluster has no error", - errorReason: "", - errorMessage: "", + errorReason: nil, + errorMessage: nil, expectErr: false, }, { name: "Cluster has error reason", - errorReason: common.CreateClusterError, - errorMessage: "", + errorReason: capierrors.ClusterStatusErrorPtr(capierrors.CreateClusterError), + errorMessage: nil, expectErr: true, }, { name: "Cluster has error message", - errorReason: "", - errorMessage: "Failed to create cluster", + errorReason: nil, + errorMessage: pointer.StringPtr("Failed to create cluster"), expectErr: true, }, { name: "Cluster has error reason and message", - errorReason: common.CreateClusterError, - errorMessage: "Failed to create cluster", + errorReason: capierrors.ClusterStatusErrorPtr(capierrors.CreateClusterError), + errorMessage: pointer.StringPtr("Failed to create cluster"), expectErr: true, }, } for _, testcase := range testcases { t.Run(testcase.name, func(t *testing.T) { - cluster := testutil.GetVanillaCluster() + cluster := v1alpha2.Cluster{} cluster.Name = "test-cluster" cluster.Namespace = "default" cluster.Status = newClusterStatus(testcase.errorReason, testcase.errorMessage) @@ -266,7 +274,7 @@ func TestValidateClusterObject(t *testing.T) { func TestValidateMachineObjects(t *testing.T) { // Setup the Manager and Controller. - mgr, err := manager.New(cfg, manager.Options{}) + mgr, err := manager.New(cfg, manager.Options{MetricsBindAddress: "0"}) if err != nil { t.Fatalf("error creating new manager: %v", err) } @@ -281,12 +289,12 @@ func TestValidateMachineObjects(t *testing.T) { defer c.Delete(context.TODO(), &testNode) testNodeRef := v1.ObjectReference{Kind: "Node", Name: testNodeName} - machineErrorReason := common.CreateMachineError + machineErrorReason := capierrors.CreateMachineError machineErrorMessage := "Failed to create machine" var testcases = []struct { name string nodeRef *v1.ObjectReference - errorReason *common.MachineStatusError + errorReason *capierrors.MachineStatusError errorMessage *string expectErr bool }{ @@ -328,8 +336,8 @@ func TestValidateMachineObjects(t *testing.T) { } for _, testcase := range testcases { t.Run(testcase.name, func(t *testing.T) { - machines := v1alpha1.MachineList{ - Items: []v1alpha1.Machine{ + machines := v1alpha2.MachineList{ + Items: []v1alpha2.Machine{ getMachineWithError("test-machine-with-no-error", "default", &testNodeRef, nil, nil), getMachineWithError("test-machine", "default", testcase.nodeRef, testcase.errorReason, testcase.errorMessage), }, @@ -348,7 +356,7 @@ func TestValidateMachineObjects(t *testing.T) { func TestValidateMachineObjectWithReferredNode(t *testing.T) { // Setup the Manager and Controller. - mgr, err := manager.New(cfg, manager.Options{}) + mgr, err := manager.New(cfg, manager.Options{MetricsBindAddress: "0"}) if err != nil { t.Fatalf("error creating new manager: %v", err) } @@ -394,8 +402,8 @@ func TestValidateMachineObjectWithReferredNode(t *testing.T) { } for _, testcase := range testcases { t.Run(testcase.name, func(t *testing.T) { - machines := v1alpha1.MachineList{ - Items: []v1alpha1.Machine{ + machines := v1alpha2.MachineList{ + Items: []v1alpha2.Machine{ getMachineWithError("test-machine", "default", &testcase.nodeRef, nil, nil), }, } @@ -413,7 +421,7 @@ func TestValidateMachineObjectWithReferredNode(t *testing.T) { func TestValidateClusterAPIObjectsOutput(t *testing.T) { // Setup the Manager and Controller. - mgr, err := manager.New(cfg, manager.Options{}) + mgr, err := manager.New(cfg, manager.Options{MetricsBindAddress: "0"}) if err != nil { t.Fatalf("error creating new manager: %v", err) } @@ -435,31 +443,34 @@ func TestValidateClusterAPIObjectsOutput(t *testing.T) { testNodeRef2 := v1.ObjectReference{Kind: "Node", Name: testNode2Name} testNodeRefNotReady := v1.ObjectReference{Kind: "Node", Name: testNodeNotReadyName} testNodeRefNotExist := v1.ObjectReference{Kind: "Node", Name: "test-node-not-exist"} - machineErrorReason := common.CreateMachineError + machineErrorReason := capierrors.CreateMachineError machineErrorMessage := "Failed to create machine" var testcases = []struct { name string namespace string - clusterStatus v1alpha1.ClusterStatus - machine1Status v1alpha1.MachineStatus - machine2Status v1alpha1.MachineStatus + clusterStatus v1alpha2.ClusterStatus + machine1Status v1alpha2.MachineStatus + machine2Status v1alpha2.MachineStatus expectErr bool outputFileName string }{ { name: "Pass", namespace: "validate-cluster-objects", - clusterStatus: v1alpha1.ClusterStatus{}, + clusterStatus: v1alpha2.ClusterStatus{}, machine1Status: newMachineStatus(&testNodeRef1, nil, nil), machine2Status: newMachineStatus(&testNodeRef2, nil, nil), expectErr: false, outputFileName: "validate-cluster-api-object-output-pass.golden", }, { - name: "Failed to validate cluster object", - namespace: "validate-cluster-objects-errors", - clusterStatus: newClusterStatus(common.CreateClusterError, "Failed to create cluster"), + name: "Failed to validate cluster object", + namespace: "validate-cluster-objects-errors", + clusterStatus: newClusterStatus( + capierrors.ClusterStatusErrorPtr(capierrors.CreateClusterError), + pointer.StringPtr("Failed to create cluster"), + ), machine1Status: newMachineStatus(&testNodeRef1, nil, nil), machine2Status: newMachineStatus(&testNodeRef2, nil, nil), expectErr: true, @@ -468,16 +479,16 @@ func TestValidateClusterAPIObjectsOutput(t *testing.T) { { name: "Failed to validate machine objects with errors", namespace: "validate-machine-objects-errors", - clusterStatus: v1alpha1.ClusterStatus{}, + clusterStatus: v1alpha2.ClusterStatus{}, machine1Status: newMachineStatus(&testNodeRef1, &machineErrorReason, &machineErrorMessage), - machine2Status: v1alpha1.MachineStatus{}, // newMachineStatus(nil, nil, nil), + machine2Status: v1alpha2.MachineStatus{}, // newMachineStatus(nil, nil, nil), expectErr: true, outputFileName: "fail-to-validate-machine-objects-with-errors.golden", }, { name: "Failed to validate machine objects with node ref errors", namespace: "validate-machine-objects-node-ref-errors", - clusterStatus: v1alpha1.ClusterStatus{}, + clusterStatus: v1alpha2.ClusterStatus{}, machine1Status: newMachineStatus(&testNodeRefNotReady, nil, nil), machine2Status: newMachineStatus(&testNodeRefNotExist, nil, nil), expectErr: true, @@ -486,7 +497,7 @@ func TestValidateClusterAPIObjectsOutput(t *testing.T) { } for _, testcase := range testcases { t.Run(testcase.name, func(t *testing.T) { - cluster := testutil.GetVanillaCluster() + cluster := v1alpha2.Cluster{} cluster.Name = testClusterName cluster.Namespace = testcase.namespace if err := c.Create(context.TODO(), &cluster); err != nil { diff --git a/vendor/sigs.k8s.io/cluster-api/cmd/clusterctl/validation/validate_pods.go b/vendor/sigs.k8s.io/cluster-api/cmd/clusterctl/validation/validate_pods.go index 5bf5ec5f97..98a75e5089 100644 --- a/vendor/sigs.k8s.io/cluster-api/cmd/clusterctl/validation/validate_pods.go +++ b/vendor/sigs.k8s.io/cluster-api/cmd/clusterctl/validation/validate_pods.go @@ -51,7 +51,7 @@ func ValidatePods(ctx context.Context, w io.Writer, c client.Client, namespace s func getPods(ctx context.Context, c client.Client, namespace string) (*corev1.PodList, error) { pods := &corev1.PodList{} - if err := c.List(ctx, client.InNamespace(namespace), pods); err != nil { + if err := c.List(ctx, pods, client.InNamespace(namespace)); err != nil { return nil, fmt.Errorf("failed to get pods in namespace %q: %v", namespace, err) } return pods, nil @@ -108,7 +108,7 @@ func validatePods(w io.Writer, pods *corev1.PodList, namespace string) error { func getComponents(ctx context.Context, c client.Client) (*corev1.ComponentStatusList, error) { components := &corev1.ComponentStatusList{} - if err := c.List(ctx, &client.ListOptions{}, components); err != nil { + if err := c.List(ctx, components); err != nil { return nil, err } return components, nil diff --git a/vendor/sigs.k8s.io/cluster-api/cmd/example-provider/BUILD.bazel b/vendor/sigs.k8s.io/cluster-api/cmd/example-provider/BUILD.bazel index 6d7398a9f8..c9001caa8f 100644 --- a/vendor/sigs.k8s.io/cluster-api/cmd/example-provider/BUILD.bazel +++ b/vendor/sigs.k8s.io/cluster-api/cmd/example-provider/BUILD.bazel @@ -6,13 +6,9 @@ go_library( importpath = "sigs.k8s.io/cluster-api/cmd/example-provider", visibility = ["//visibility:private"], deps = [ - "//pkg/apis:go_default_library", - "//pkg/apis/cluster/common:go_default_library", - "//pkg/client/clientset_generated/clientset:go_default_library", + "//api/v1alpha2:go_default_library", "//pkg/controller/cluster:go_default_library", "//pkg/controller/machine:go_default_library", - "//pkg/provider/example/actuators/cluster:go_default_library", - "//pkg/provider/example/actuators/machine:go_default_library", "//vendor/k8s.io/klog:go_default_library", "//vendor/sigs.k8s.io/controller-runtime/pkg/client/config:go_default_library", "//vendor/sigs.k8s.io/controller-runtime/pkg/manager:go_default_library", diff --git a/vendor/sigs.k8s.io/cluster-api/cmd/example-provider/main.go b/vendor/sigs.k8s.io/cluster-api/cmd/example-provider/main.go index 8131207c4f..9c532f7b94 100644 --- a/vendor/sigs.k8s.io/cluster-api/cmd/example-provider/main.go +++ b/vendor/sigs.k8s.io/cluster-api/cmd/example-provider/main.go @@ -20,13 +20,9 @@ import ( "flag" "k8s.io/klog" - clusterapis "sigs.k8s.io/cluster-api/pkg/apis" - "sigs.k8s.io/cluster-api/pkg/apis/cluster/common" - "sigs.k8s.io/cluster-api/pkg/client/clientset_generated/clientset" + "sigs.k8s.io/cluster-api/api/v1alpha2" capicluster "sigs.k8s.io/cluster-api/pkg/controller/cluster" capimachine "sigs.k8s.io/cluster-api/pkg/controller/machine" - "sigs.k8s.io/cluster-api/pkg/provider/example/actuators/cluster" - "sigs.k8s.io/cluster-api/pkg/provider/example/actuators/machine" "sigs.k8s.io/controller-runtime/pkg/client/config" "sigs.k8s.io/controller-runtime/pkg/manager" "sigs.k8s.io/controller-runtime/pkg/runtime/signals" @@ -34,7 +30,6 @@ import ( func main() { klog.InitFlags(nil) - flag.Set("logtostderr", "true") flag.Parse() cfg := config.GetConfigOrDie() @@ -45,28 +40,12 @@ func main() { klog.Fatalf("Failed to set up controller manager: %v", err) } - cs, err := clientset.NewForConfig(cfg) - if err != nil { - klog.Fatalf("Failed to create client from configuration: %v", err) - } - - recorder := mgr.GetRecorder("clusterapi-controller") - - // Initialize cluster actuator. - clusterActuator, _ := cluster.NewClusterActuator(cs.ClusterV1alpha1(), recorder) - - // Initialize machine actuator. - machineActuator, _ := machine.NewMachineActuator(cs.ClusterV1alpha1(), recorder) - - // Register cluster deployer - common.RegisterClusterProvisioner("example", clusterActuator) - - if err := clusterapis.AddToScheme(mgr.GetScheme()); err != nil { + if err := v1alpha2.AddToScheme(mgr.GetScheme()); err != nil { klog.Fatal(err) } - capimachine.AddWithActuator(mgr, machineActuator) - capicluster.AddWithActuator(mgr, clusterActuator) + capimachine.Add(mgr) + capicluster.Add(mgr) if err := mgr.Start(signals.SetupSignalHandler()); err != nil { klog.Fatalf("Failed to run manager: %v", err) diff --git a/vendor/sigs.k8s.io/cluster-api/cmd/manager/BUILD.bazel b/vendor/sigs.k8s.io/cluster-api/cmd/manager/BUILD.bazel index ae0ad11ab6..469943842b 100644 --- a/vendor/sigs.k8s.io/cluster-api/cmd/manager/BUILD.bazel +++ b/vendor/sigs.k8s.io/cluster-api/cmd/manager/BUILD.bazel @@ -6,7 +6,7 @@ go_library( importpath = "sigs.k8s.io/cluster-api/cmd/manager", visibility = ["//visibility:private"], deps = [ - "//pkg/apis:go_default_library", + "//api/v1alpha2:go_default_library", "//pkg/controller:go_default_library", "//vendor/k8s.io/client-go/plugin/pkg/client/auth/gcp:go_default_library", "//vendor/k8s.io/klog:go_default_library", diff --git a/vendor/sigs.k8s.io/cluster-api/cmd/manager/main.go b/vendor/sigs.k8s.io/cluster-api/cmd/manager/main.go index 6b0e713e91..9f8a482ba4 100644 --- a/vendor/sigs.k8s.io/cluster-api/cmd/manager/main.go +++ b/vendor/sigs.k8s.io/cluster-api/cmd/manager/main.go @@ -23,7 +23,7 @@ import ( _ "k8s.io/client-go/plugin/pkg/client/auth/gcp" "k8s.io/klog" "k8s.io/klog/klogr" - "sigs.k8s.io/cluster-api/pkg/apis" + "sigs.k8s.io/cluster-api/api/v1alpha2" "sigs.k8s.io/cluster-api/pkg/controller" "sigs.k8s.io/controller-runtime/pkg/client/config" "sigs.k8s.io/controller-runtime/pkg/manager" @@ -65,7 +65,7 @@ func main() { klog.Info("Registering Components") // Setup Scheme for all resources. - if err := apis.AddToScheme(mgr.GetScheme()); err != nil { + if err := v1alpha2.AddToScheme(mgr.GetScheme()); err != nil { klog.Fatal(err) } diff --git a/vendor/sigs.k8s.io/cluster-api/config/BUILD.bazel b/vendor/sigs.k8s.io/cluster-api/config/BUILD.bazel index 2b62279db3..3cde91bbaf 100644 --- a/vendor/sigs.k8s.io/cluster-api/config/BUILD.bazel +++ b/vendor/sigs.k8s.io/cluster-api/config/BUILD.bazel @@ -1,7 +1,7 @@ filegroup( name = "kustomize-yaml", srcs = glob([ - "crds/*.yaml", + "crd/bases/*.yaml", "rbac/*.yaml", "manager/*.yaml", "default/*.yaml", diff --git a/vendor/sigs.k8s.io/cluster-api/config/ci/rbac/kustomization.yaml b/vendor/sigs.k8s.io/cluster-api/config/ci/rbac/kustomization.yaml index e62b08f20a..e425d80a78 100644 --- a/vendor/sigs.k8s.io/cluster-api/config/ci/rbac/kustomization.yaml +++ b/vendor/sigs.k8s.io/cluster-api/config/ci/rbac/kustomization.yaml @@ -4,5 +4,5 @@ # YAML string, with resources separated by document # markers ("---"). resources: -- manager_role_binding.yaml -- manager_role.yaml +- role_binding.yaml +- role.yaml diff --git a/vendor/sigs.k8s.io/cluster-api/config/crd/bases/cluster.x-k8s.io_clusters.yaml b/vendor/sigs.k8s.io/cluster-api/config/crd/bases/cluster.x-k8s.io_clusters.yaml new file mode 100644 index 0000000000..59a5f3ad4b --- /dev/null +++ b/vendor/sigs.k8s.io/cluster-api/config/crd/bases/cluster.x-k8s.io_clusters.yaml @@ -0,0 +1,158 @@ + +--- +apiVersion: apiextensions.k8s.io/v1beta1 +kind: CustomResourceDefinition +metadata: + creationTimestamp: null + name: clusters.cluster.x-k8s.io +spec: + group: cluster.x-k8s.io + names: + kind: Cluster + plural: clusters + shortNames: + - cl + scope: Namespaced + subresources: + status: {} + validation: + openAPIV3Schema: + description: Cluster is the Schema for the clusters API + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation + of an object. Servers should convert recognized schemas to the latest + internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this + object represents. Servers may infer this from the endpoint the client + submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds' + type: string + metadata: + type: object + spec: + description: / [ClusterSpec] ClusterSpec defines the desired state of Cluster + properties: + clusterNetwork: + description: Cluster network configuration + properties: + pods: + description: The network ranges from which Pod networks are allocated. + properties: + cidrBlocks: + items: + type: string + type: array + required: + - cidrBlocks + type: object + serviceDomain: + description: Domain name for services. + type: string + services: + description: The network ranges from which service VIPs are allocated. + properties: + cidrBlocks: + items: + type: string + type: array + required: + - cidrBlocks + type: object + required: + - pods + - serviceDomain + - services + type: object + infrastructureRef: + description: InfrastructureRef is a reference to a provider-specific + resource that holds the details for provisioning infrastructure for + a cluster in said provider. + properties: + apiVersion: + description: API version of the referent. + type: string + fieldPath: + description: 'If referring to a piece of an object instead of an + entire object, this string should contain a valid JSON/Go field + access statement, such as desiredState.manifest.containers[2]. + For example, if the object reference is to a container within + a pod, this would take on a value like: "spec.containers{name}" + (where "name" refers to the name of the container that triggered + the event) or if no container name is specified "spec.containers[2]" + (container with index 2 in this pod). This syntax is chosen only + to have some well-defined way of referencing a part of an object. + TODO: this design is not final and this field is subject to change + in the future.' + type: string + kind: + description: 'Kind of the referent. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds' + type: string + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names' + type: string + namespace: + description: 'Namespace of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/namespaces/' + type: string + resourceVersion: + description: 'Specific resourceVersion to which this reference is + made, if any. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#concurrency-control-and-consistency' + type: string + uid: + description: 'UID of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#uids' + type: string + type: object + type: object + status: + description: / [ClusterStatus] ClusterStatus defines the observed state + of Cluster + properties: + apiEndpoints: + description: APIEndpoints represents the endpoints to communicate with + the control plane. + items: + description: / [APIEndpoint] APIEndpoint represents a reachable Kubernetes + API endpoint. + properties: + host: + description: The hostname on which the API server is serving. + type: string + port: + description: The port on which the API server is serving. + type: integer + required: + - host + - port + type: object + type: array + errorMessage: + description: ErrorMessage indicates that there is a problem reconciling + the state, and will be set to a descriptive error message. + type: string + errorReason: + description: ErrorReason indicates that there is a problem reconciling + the state, and will be set to a token value suitable for programmatic + interpretation. + type: string + infrastructureReady: + description: InfrastructureReady is the state of the infrastructure + provider. + type: boolean + phase: + description: Phase represents the current phase of cluster actuation. + E.g. Pending, Running, Terminating, Failed etc. + type: string + type: object + type: object + version: v1alpha2 + versions: + - name: v1alpha2 + served: true + storage: true +status: + acceptedNames: + kind: "" + plural: "" + conditions: [] + storedVersions: [] diff --git a/vendor/sigs.k8s.io/cluster-api/config/crd/bases/cluster.x-k8s.io_machinedeployments.yaml b/vendor/sigs.k8s.io/cluster-api/config/crd/bases/cluster.x-k8s.io_machinedeployments.yaml new file mode 100644 index 0000000000..088fbead55 --- /dev/null +++ b/vendor/sigs.k8s.io/cluster-api/config/crd/bases/cluster.x-k8s.io_machinedeployments.yaml @@ -0,0 +1,534 @@ + +--- +apiVersion: apiextensions.k8s.io/v1beta1 +kind: CustomResourceDefinition +metadata: + creationTimestamp: null + name: machinedeployments.cluster.x-k8s.io +spec: + group: cluster.x-k8s.io + names: + kind: MachineDeployment + plural: machinedeployments + shortNames: + - md + scope: Namespaced + subresources: + scale: + labelSelectorPath: .status.labelSelector + specReplicasPath: .spec.replicas + statusReplicasPath: .status.replicas + status: {} + validation: + openAPIV3Schema: + description: / [MachineDeployment] MachineDeployment is the Schema for the machinedeployments + API + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation + of an object. Servers should convert recognized schemas to the latest + internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this + object represents. Servers may infer this from the endpoint the client + submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds' + type: string + metadata: + type: object + spec: + description: / [MachineDeploymentSpec] MachineDeploymentSpec defines the + desired state of MachineDeployment + properties: + minReadySeconds: + description: Minimum number of seconds for which a newly created machine + should be ready. Defaults to 0 (machine will be considered available + as soon as it is ready) + format: int32 + type: integer + paused: + description: Indicates that the deployment is paused. + type: boolean + progressDeadlineSeconds: + description: The maximum time in seconds for a deployment to make progress + before it is considered to be failed. The deployment controller will + continue to process failed deployments and a condition with a ProgressDeadlineExceeded + reason will be surfaced in the deployment status. Note that progress + will not be estimated during the time a deployment is paused. Defaults + to 600s. + format: int32 + type: integer + replicas: + description: Number of desired machines. Defaults to 1. This is a pointer + to distinguish between explicit zero and not specified. + format: int32 + type: integer + revisionHistoryLimit: + description: The number of old MachineSets to retain to allow rollback. + This is a pointer to distinguish between explicit zero and not specified. + Defaults to 1. + format: int32 + type: integer + selector: + description: Label selector for machines. Existing MachineSets whose + machines are selected by this will be the ones affected by this deployment. + It must match the machine template's labels. + properties: + matchExpressions: + description: matchExpressions is a list of label selector requirements. + The requirements are ANDed. + items: + description: A label selector requirement is a selector that contains + values, a key, and an operator that relates the key and values. + properties: + key: + description: key is the label key that the selector applies + to. + type: string + operator: + description: operator represents a key's relationship to a + set of values. Valid operators are In, NotIn, Exists and + DoesNotExist. + type: string + values: + description: values is an array of string values. If the operator + is In or NotIn, the values array must be non-empty. If the + operator is Exists or DoesNotExist, the values array must + be empty. This array is replaced during a strategic merge + patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + description: matchLabels is a map of {key,value} pairs. A single + {key,value} in the matchLabels map is equivalent to an element + of matchExpressions, whose key field is "key", the operator is + "In", and the values array contains only "value". The requirements + are ANDed. + type: object + type: object + strategy: + description: The deployment strategy to use to replace existing machines + with new ones. + properties: + rollingUpdate: + description: Rolling update config params. Present only if MachineDeploymentStrategyType + = RollingUpdate. + properties: + maxSurge: + anyOf: + - type: string + - type: integer + description: 'The maximum number of machines that can be scheduled + above the desired number of machines. Value can be an absolute + number (ex: 5) or a percentage of desired machines (ex: 10%). + This can not be 0 if MaxUnavailable is 0. Absolute number + is calculated from percentage by rounding up. Defaults to + 1. Example: when this is set to 30%, the new MachineSet can + be scaled up immediately when the rolling update starts, such + that the total number of old and new machines do not exceed + 130% of desired machines. Once old machines have been killed, + new MachineSet can be scaled up further, ensuring that total + number of machines running at any time during the update is + at most 130% of desired machines.' + maxUnavailable: + anyOf: + - type: string + - type: integer + description: 'The maximum number of machines that can be unavailable + during the update. Value can be an absolute number (ex: 5) + or a percentage of desired machines (ex: 10%). Absolute number + is calculated from percentage by rounding down. This can not + be 0 if MaxSurge is 0. Defaults to 0. Example: when this is + set to 30%, the old MachineSet can be scaled down to 70% of + desired machines immediately when the rolling update starts. + Once new machines are ready, old MachineSet can be scaled + down further, followed by scaling up the new MachineSet, ensuring + that the total number of machines available at all times during + the update is at least 70% of desired machines.' + type: object + type: + description: Type of deployment. Currently the only supported strategy + is "RollingUpdate". Default is RollingUpdate. + type: string + type: object + template: + description: Template describes the machines that will be created. + properties: + metadata: + description: 'Standard object''s metadata. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata' + properties: + annotations: + additionalProperties: + type: string + description: 'Annotations is an unstructured key value map stored + with a resource that may be set by external tools to store + and retrieve arbitrary metadata. They are not queryable and + should be preserved when modifying objects. More info: http://kubernetes.io/docs/user-guide/annotations' + type: object + generateName: + description: "GenerateName is an optional prefix, used by the + server, to generate a unique name ONLY IF the Name field has + not been provided. If this field is used, the name returned + to the client will be different than the name passed. This + value will also be combined with a unique suffix. The provided + value has the same validation rules as the Name field, and + may be truncated by the length of the suffix required to make + the value unique on the server. \n If this field is specified + and the generated name exists, the server will NOT return + a 409 - instead, it will either return 201 Created or 500 + with Reason ServerTimeout indicating a unique name could not + be found in the time allotted, and the client should retry + (optionally after the time indicated in the Retry-After header). + \n Applied only if Name is not specified. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#idempotency" + type: string + labels: + additionalProperties: + type: string + description: 'Map of string keys and values that can be used + to organize and categorize (scope and select) objects. May + match selectors of replication controllers and services. More + info: http://kubernetes.io/docs/user-guide/labels' + type: object + name: + description: 'Name must be unique within a namespace. Is required + when creating resources, although some resources may allow + a client to request the generation of an appropriate name + automatically. Name is primarily intended for creation idempotence + and configuration definition. Cannot be updated. More info: + http://kubernetes.io/docs/user-guide/identifiers#names' + type: string + namespace: + description: "Namespace defines the space within each name must + be unique. An empty namespace is equivalent to the \"default\" + namespace, but \"default\" is the canonical representation. + Not all objects are required to be scoped to a namespace - + the value of this field for those objects will be empty. \n + Must be a DNS_LABEL. Cannot be updated. More info: http://kubernetes.io/docs/user-guide/namespaces" + type: string + ownerReferences: + description: List of objects depended by this object. If ALL + objects in the list have been deleted, this object will be + garbage collected. If this object is managed by a controller, + then an entry in this list will point to this controller, + with the controller field set to true. There cannot be more + than one managing controller. + items: + description: OwnerReference contains enough information to + let you identify an owning object. An owning object must + be in the same namespace as the dependent, or be cluster-scoped, + so there is no namespace field. + properties: + apiVersion: + description: API version of the referent. + type: string + blockOwnerDeletion: + description: If true, AND if the owner has the "foregroundDeletion" + finalizer, then the owner cannot be deleted from the + key-value store until this reference is removed. Defaults + to false. To set this field, a user needs "delete" permission + of the owner, otherwise 422 (Unprocessable Entity) will + be returned. + type: boolean + controller: + description: If true, this reference points to the managing + controller. + type: boolean + kind: + description: 'Kind of the referent. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds' + type: string + name: + description: 'Name of the referent. More info: http://kubernetes.io/docs/user-guide/identifiers#names' + type: string + uid: + description: 'UID of the referent. More info: http://kubernetes.io/docs/user-guide/identifiers#uids' + type: string + required: + - apiVersion + - kind + - name + - uid + type: object + type: array + type: object + spec: + description: 'Specification of the desired behavior of the machine. + More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status' + properties: + bootstrap: + description: Bootstrap is a reference to a local struct which + encapsulates fields to configure the Machine’s bootstrapping + mechanism. + properties: + configRef: + description: ConfigRef is a reference to a bootstrap provider-specific + resource that holds configuration details. The reference + is optional to allow users/operators to specify Bootstrap.Data + without the need of a controller. + properties: + apiVersion: + description: API version of the referent. + type: string + fieldPath: + description: 'If referring to a piece of an object instead + of an entire object, this string should contain a + valid JSON/Go field access statement, such as desiredState.manifest.containers[2]. + For example, if the object reference is to a container + within a pod, this would take on a value like: "spec.containers{name}" + (where "name" refers to the name of the container + that triggered the event) or if no container name + is specified "spec.containers[2]" (container with + index 2 in this pod). This syntax is chosen only to + have some well-defined way of referencing a part of + an object. TODO: this design is not final and this + field is subject to change in the future.' + type: string + kind: + description: 'Kind of the referent. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds' + type: string + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names' + type: string + namespace: + description: 'Namespace of the referent. More info: + https://kubernetes.io/docs/concepts/overview/working-with-objects/namespaces/' + type: string + resourceVersion: + description: 'Specific resourceVersion to which this + reference is made, if any. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#concurrency-control-and-consistency' + type: string + uid: + description: 'UID of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#uids' + type: string + type: object + data: + description: Data contains the bootstrap data, such as cloud-init + details scripts. If nil, the Machine should remain in + the Pending state. + type: string + type: object + infrastructureRef: + description: InfrastructureRef is a required reference to a + custom resource offered by an infrastructure provider. + properties: + apiVersion: + description: API version of the referent. + type: string + fieldPath: + description: 'If referring to a piece of an object instead + of an entire object, this string should contain a valid + JSON/Go field access statement, such as desiredState.manifest.containers[2]. + For example, if the object reference is to a container + within a pod, this would take on a value like: "spec.containers{name}" + (where "name" refers to the name of the container that + triggered the event) or if no container name is specified + "spec.containers[2]" (container with index 2 in this pod). + This syntax is chosen only to have some well-defined way + of referencing a part of an object. TODO: this design + is not final and this field is subject to change in the + future.' + type: string + kind: + description: 'Kind of the referent. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds' + type: string + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names' + type: string + namespace: + description: 'Namespace of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/namespaces/' + type: string + resourceVersion: + description: 'Specific resourceVersion to which this reference + is made, if any. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#concurrency-control-and-consistency' + type: string + uid: + description: 'UID of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#uids' + type: string + type: object + metadata: + description: ObjectMeta will autopopulate the Node created. + Use this to indicate what labels, annotations, name prefix, + etc., should be used when creating the Node. + properties: + annotations: + additionalProperties: + type: string + description: 'Annotations is an unstructured key value map + stored with a resource that may be set by external tools + to store and retrieve arbitrary metadata. They are not + queryable and should be preserved when modifying objects. + More info: http://kubernetes.io/docs/user-guide/annotations' + type: object + generateName: + description: "GenerateName is an optional prefix, used by + the server, to generate a unique name ONLY IF the Name + field has not been provided. If this field is used, the + name returned to the client will be different than the + name passed. This value will also be combined with a unique + suffix. The provided value has the same validation rules + as the Name field, and may be truncated by the length + of the suffix required to make the value unique on the + server. \n If this field is specified and the generated + name exists, the server will NOT return a 409 - instead, + it will either return 201 Created or 500 with Reason ServerTimeout + indicating a unique name could not be found in the time + allotted, and the client should retry (optionally after + the time indicated in the Retry-After header). \n Applied + only if Name is not specified. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#idempotency" + type: string + labels: + additionalProperties: + type: string + description: 'Map of string keys and values that can be + used to organize and categorize (scope and select) objects. + May match selectors of replication controllers and services. + More info: http://kubernetes.io/docs/user-guide/labels' + type: object + name: + description: 'Name must be unique within a namespace. Is + required when creating resources, although some resources + may allow a client to request the generation of an appropriate + name automatically. Name is primarily intended for creation + idempotence and configuration definition. Cannot be updated. + More info: http://kubernetes.io/docs/user-guide/identifiers#names' + type: string + namespace: + description: "Namespace defines the space within each name + must be unique. An empty namespace is equivalent to the + \"default\" namespace, but \"default\" is the canonical + representation. Not all objects are required to be scoped + to a namespace - the value of this field for those objects + will be empty. \n Must be a DNS_LABEL. Cannot be updated. + More info: http://kubernetes.io/docs/user-guide/namespaces" + type: string + ownerReferences: + description: List of objects depended by this object. If + ALL objects in the list have been deleted, this object + will be garbage collected. If this object is managed by + a controller, then an entry in this list will point to + this controller, with the controller field set to true. + There cannot be more than one managing controller. + items: + description: OwnerReference contains enough information + to let you identify an owning object. An owning object + must be in the same namespace as the dependent, or be + cluster-scoped, so there is no namespace field. + properties: + apiVersion: + description: API version of the referent. + type: string + blockOwnerDeletion: + description: If true, AND if the owner has the "foregroundDeletion" + finalizer, then the owner cannot be deleted from + the key-value store until this reference is removed. + Defaults to false. To set this field, a user needs + "delete" permission of the owner, otherwise 422 + (Unprocessable Entity) will be returned. + type: boolean + controller: + description: If true, this reference points to the + managing controller. + type: boolean + kind: + description: 'Kind of the referent. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds' + type: string + name: + description: 'Name of the referent. More info: http://kubernetes.io/docs/user-guide/identifiers#names' + type: string + uid: + description: 'UID of the referent. More info: http://kubernetes.io/docs/user-guide/identifiers#uids' + type: string + required: + - apiVersion + - kind + - name + - uid + type: object + type: array + type: object + providerID: + description: ProviderID is the identification ID of the machine + provided by the provider. This field must match the provider + ID as seen on the node object corresponding to this machine. + This field is required by higher level consumers of cluster-api. + Example use case is cluster autoscaler with cluster-api as + provider. Clean-up logic in the autoscaler compares machines + to nodes to find out machines at provider which could not + get registered as Kubernetes nodes. With cluster-api as a + generic out-of-tree provider for autoscaler, this field is + required by autoscaler to be able to have a provider view + of the list of machines. Another list of nodes is queried + from the k8s apiserver and then a comparison is done to find + out unregistered machines and are marked for delete. This + field will be set by the actuators and consumed by higher + level entities like autoscaler that will be interfacing with + cluster-api as generic provider. + type: string + version: + description: Version defines the desired Kubernetes version. + This field is meant to be optionally used by bootstrap providers. + type: string + required: + - bootstrap + - infrastructureRef + type: object + type: object + required: + - selector + - template + type: object + status: + description: / [MachineDeploymentStatus] MachineDeploymentStatus defines + the observed state of MachineDeployment + properties: + availableReplicas: + description: Total number of available machines (ready for at least + minReadySeconds) targeted by this deployment. + format: int32 + type: integer + observedGeneration: + description: The generation observed by the deployment controller. + format: int64 + type: integer + readyReplicas: + description: Total number of ready machines targeted by this deployment. + format: int32 + type: integer + replicas: + description: Total number of non-terminated machines targeted by this + deployment (their labels match the selector). + format: int32 + type: integer + unavailableReplicas: + description: Total number of unavailable machines targeted by this deployment. + This is the total number of machines that are still required for the + deployment to have 100% available capacity. They may either be machines + that are running but not yet available or machines that still have + not been created. + format: int32 + type: integer + updatedReplicas: + description: Total number of non-terminated machines targeted by this + deployment that have the desired template spec. + format: int32 + type: integer + type: object + type: object + version: v1alpha2 + versions: + - name: v1alpha2 + served: true + storage: true +status: + acceptedNames: + kind: "" + plural: "" + conditions: [] + storedVersions: [] diff --git a/vendor/sigs.k8s.io/cluster-api/config/crd/bases/cluster.x-k8s.io_machines.yaml b/vendor/sigs.k8s.io/cluster-api/config/crd/bases/cluster.x-k8s.io_machines.yaml new file mode 100644 index 0000000000..97c986ed2d --- /dev/null +++ b/vendor/sigs.k8s.io/cluster-api/config/crd/bases/cluster.x-k8s.io_machines.yaml @@ -0,0 +1,377 @@ + +--- +apiVersion: apiextensions.k8s.io/v1beta1 +kind: CustomResourceDefinition +metadata: + creationTimestamp: null + name: machines.cluster.x-k8s.io +spec: + additionalPrinterColumns: + - JSONPath: .spec.providerID + description: Provider ID + name: ProviderID + type: string + - JSONPath: .status.phase + description: Machine status such as Terminating/Pending/Running/Failed etc + name: Phase + type: string + - JSONPath: .status.nodeRef.name + description: Node name associated with this machine + name: NodeName + priority: 1 + type: string + group: cluster.x-k8s.io + names: + kind: Machine + plural: machines + shortNames: + - ma + scope: Namespaced + subresources: + status: {} + validation: + openAPIV3Schema: + description: / [Machine] Machine is the Schema for the machines API + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation + of an object. Servers should convert recognized schemas to the latest + internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this + object represents. Servers may infer this from the endpoint the client + submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds' + type: string + metadata: + type: object + spec: + description: / [MachineSpec] MachineSpec defines the desired state of Machine + properties: + bootstrap: + description: Bootstrap is a reference to a local struct which encapsulates + fields to configure the Machine’s bootstrapping mechanism. + properties: + configRef: + description: ConfigRef is a reference to a bootstrap provider-specific + resource that holds configuration details. The reference is optional + to allow users/operators to specify Bootstrap.Data without the + need of a controller. + properties: + apiVersion: + description: API version of the referent. + type: string + fieldPath: + description: 'If referring to a piece of an object instead of + an entire object, this string should contain a valid JSON/Go + field access statement, such as desiredState.manifest.containers[2]. + For example, if the object reference is to a container within + a pod, this would take on a value like: "spec.containers{name}" + (where "name" refers to the name of the container that triggered + the event) or if no container name is specified "spec.containers[2]" + (container with index 2 in this pod). This syntax is chosen + only to have some well-defined way of referencing a part of + an object. TODO: this design is not final and this field is + subject to change in the future.' + type: string + kind: + description: 'Kind of the referent. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds' + type: string + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names' + type: string + namespace: + description: 'Namespace of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/namespaces/' + type: string + resourceVersion: + description: 'Specific resourceVersion to which this reference + is made, if any. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#concurrency-control-and-consistency' + type: string + uid: + description: 'UID of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#uids' + type: string + type: object + data: + description: Data contains the bootstrap data, such as cloud-init + details scripts. If nil, the Machine should remain in the Pending + state. + type: string + type: object + infrastructureRef: + description: InfrastructureRef is a required reference to a custom resource + offered by an infrastructure provider. + properties: + apiVersion: + description: API version of the referent. + type: string + fieldPath: + description: 'If referring to a piece of an object instead of an + entire object, this string should contain a valid JSON/Go field + access statement, such as desiredState.manifest.containers[2]. + For example, if the object reference is to a container within + a pod, this would take on a value like: "spec.containers{name}" + (where "name" refers to the name of the container that triggered + the event) or if no container name is specified "spec.containers[2]" + (container with index 2 in this pod). This syntax is chosen only + to have some well-defined way of referencing a part of an object. + TODO: this design is not final and this field is subject to change + in the future.' + type: string + kind: + description: 'Kind of the referent. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds' + type: string + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names' + type: string + namespace: + description: 'Namespace of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/namespaces/' + type: string + resourceVersion: + description: 'Specific resourceVersion to which this reference is + made, if any. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#concurrency-control-and-consistency' + type: string + uid: + description: 'UID of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#uids' + type: string + type: object + metadata: + description: ObjectMeta will autopopulate the Node created. Use this + to indicate what labels, annotations, name prefix, etc., should be + used when creating the Node. + properties: + annotations: + additionalProperties: + type: string + description: 'Annotations is an unstructured key value map stored + with a resource that may be set by external tools to store and + retrieve arbitrary metadata. They are not queryable and should + be preserved when modifying objects. More info: http://kubernetes.io/docs/user-guide/annotations' + type: object + generateName: + description: "GenerateName is an optional prefix, used by the server, + to generate a unique name ONLY IF the Name field has not been + provided. If this field is used, the name returned to the client + will be different than the name passed. This value will also be + combined with a unique suffix. The provided value has the same + validation rules as the Name field, and may be truncated by the + length of the suffix required to make the value unique on the + server. \n If this field is specified and the generated name exists, + the server will NOT return a 409 - instead, it will either return + 201 Created or 500 with Reason ServerTimeout indicating a unique + name could not be found in the time allotted, and the client should + retry (optionally after the time indicated in the Retry-After + header). \n Applied only if Name is not specified. More info: + https://git.k8s.io/community/contributors/devel/api-conventions.md#idempotency" + type: string + labels: + additionalProperties: + type: string + description: 'Map of string keys and values that can be used to + organize and categorize (scope and select) objects. May match + selectors of replication controllers and services. More info: + http://kubernetes.io/docs/user-guide/labels' + type: object + name: + description: 'Name must be unique within a namespace. Is required + when creating resources, although some resources may allow a client + to request the generation of an appropriate name automatically. + Name is primarily intended for creation idempotence and configuration + definition. Cannot be updated. More info: http://kubernetes.io/docs/user-guide/identifiers#names' + type: string + namespace: + description: "Namespace defines the space within each name must + be unique. An empty namespace is equivalent to the \"default\" + namespace, but \"default\" is the canonical representation. Not + all objects are required to be scoped to a namespace - the value + of this field for those objects will be empty. \n Must be a DNS_LABEL. + Cannot be updated. More info: http://kubernetes.io/docs/user-guide/namespaces" + type: string + ownerReferences: + description: List of objects depended by this object. If ALL objects + in the list have been deleted, this object will be garbage collected. + If this object is managed by a controller, then an entry in this + list will point to this controller, with the controller field + set to true. There cannot be more than one managing controller. + items: + description: OwnerReference contains enough information to let + you identify an owning object. An owning object must be in the + same namespace as the dependent, or be cluster-scoped, so there + is no namespace field. + properties: + apiVersion: + description: API version of the referent. + type: string + blockOwnerDeletion: + description: If true, AND if the owner has the "foregroundDeletion" + finalizer, then the owner cannot be deleted from the key-value + store until this reference is removed. Defaults to false. + To set this field, a user needs "delete" permission of the + owner, otherwise 422 (Unprocessable Entity) will be returned. + type: boolean + controller: + description: If true, this reference points to the managing + controller. + type: boolean + kind: + description: 'Kind of the referent. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds' + type: string + name: + description: 'Name of the referent. More info: http://kubernetes.io/docs/user-guide/identifiers#names' + type: string + uid: + description: 'UID of the referent. More info: http://kubernetes.io/docs/user-guide/identifiers#uids' + type: string + required: + - apiVersion + - kind + - name + - uid + type: object + type: array + type: object + providerID: + description: ProviderID is the identification ID of the machine provided + by the provider. This field must match the provider ID as seen on + the node object corresponding to this machine. This field is required + by higher level consumers of cluster-api. Example use case is cluster + autoscaler with cluster-api as provider. Clean-up logic in the autoscaler + compares machines to nodes to find out machines at provider which + could not get registered as Kubernetes nodes. With cluster-api as + a generic out-of-tree provider for autoscaler, this field is required + by autoscaler to be able to have a provider view of the list of machines. + Another list of nodes is queried from the k8s apiserver and then a + comparison is done to find out unregistered machines and are marked + for delete. This field will be set by the actuators and consumed by + higher level entities like autoscaler that will be interfacing with + cluster-api as generic provider. + type: string + version: + description: Version defines the desired Kubernetes version. This field + is meant to be optionally used by bootstrap providers. + type: string + required: + - bootstrap + - infrastructureRef + type: object + status: + description: / [MachineStatus] MachineStatus defines the observed state + of Machine + properties: + addresses: + description: Addresses is a list of addresses assigned to the machine. + This field is copied from the infrastructure provider reference. + items: + description: MachineAddress contains information for the node's address. + properties: + address: + description: The machine address. + type: string + type: + description: Machine address type, one of Hostname, ExternalIP + or InternalIP. + type: string + required: + - address + - type + type: object + type: array + bootstrapReady: + description: BootstrapReady is the state of the bootstrap provider. + type: boolean + errorMessage: + description: "ErrorMessage will be set in the event that there is a + terminal problem reconciling the Machine and will contain a more verbose + string suitable for logging and human consumption. \n This field should + not be set for transitive errors that a controller faces that are + expected to be fixed automatically over time (like service outages), + but instead indicate that something is fundamentally wrong with the + Machine's spec or the configuration of the controller, and that manual + intervention is required. Examples of terminal errors would be invalid + combinations of settings in the spec, values that are unsupported + by the controller, or the responsible controller itself being critically + misconfigured. \n Any transient errors that occur during the reconciliation + of Machines can be added as events to the Machine object and/or logged + in the controller's output." + type: string + errorReason: + description: "ErrorReason will be set in the event that there is a terminal + problem reconciling the Machine and will contain a succinct value + suitable for machine interpretation. \n This field should not be set + for transitive errors that a controller faces that are expected to + be fixed automatically over time (like service outages), but instead + indicate that something is fundamentally wrong with the Machine's + spec or the configuration of the controller, and that manual intervention + is required. Examples of terminal errors would be invalid combinations + of settings in the spec, values that are unsupported by the controller, + or the responsible controller itself being critically misconfigured. + \n Any transient errors that occur during the reconciliation of Machines + can be added as events to the Machine object and/or logged in the + controller's output." + type: string + infrastructureReady: + description: InfrastructureReady is the state of the infrastructure + provider. + type: boolean + lastUpdated: + description: LastUpdated identifies when this status was last observed. + format: date-time + type: string + nodeRef: + description: NodeRef will point to the corresponding Node if it exists. + properties: + apiVersion: + description: API version of the referent. + type: string + fieldPath: + description: 'If referring to a piece of an object instead of an + entire object, this string should contain a valid JSON/Go field + access statement, such as desiredState.manifest.containers[2]. + For example, if the object reference is to a container within + a pod, this would take on a value like: "spec.containers{name}" + (where "name" refers to the name of the container that triggered + the event) or if no container name is specified "spec.containers[2]" + (container with index 2 in this pod). This syntax is chosen only + to have some well-defined way of referencing a part of an object. + TODO: this design is not final and this field is subject to change + in the future.' + type: string + kind: + description: 'Kind of the referent. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds' + type: string + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names' + type: string + namespace: + description: 'Namespace of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/namespaces/' + type: string + resourceVersion: + description: 'Specific resourceVersion to which this reference is + made, if any. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#concurrency-control-and-consistency' + type: string + uid: + description: 'UID of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#uids' + type: string + type: object + phase: + description: Phase represents the current phase of machine actuation. + E.g. Pending, Running, Terminating, Failed etc. + type: string + version: + description: Version specifies the current version of Kubernetes running + on the corresponding Node. This is meant to be a means of bubbling + up status from the Node to the Machine. It is entirely optional, but + useful for end-user UX if it’s present. + type: string + type: object + type: object + version: v1alpha2 + versions: + - name: v1alpha2 + served: true + storage: true +status: + acceptedNames: + kind: "" + plural: "" + conditions: [] + storedVersions: [] diff --git a/vendor/sigs.k8s.io/cluster-api/config/crd/bases/cluster.x-k8s.io_machinesets.yaml b/vendor/sigs.k8s.io/cluster-api/config/crd/bases/cluster.x-k8s.io_machinesets.yaml new file mode 100644 index 0000000000..936fc1b435 --- /dev/null +++ b/vendor/sigs.k8s.io/cluster-api/config/crd/bases/cluster.x-k8s.io_machinesets.yaml @@ -0,0 +1,496 @@ + +--- +apiVersion: apiextensions.k8s.io/v1beta1 +kind: CustomResourceDefinition +metadata: + creationTimestamp: null + name: machinesets.cluster.x-k8s.io +spec: + group: cluster.x-k8s.io + names: + kind: MachineSet + plural: machinesets + shortNames: + - ms + scope: Namespaced + subresources: + scale: + labelSelectorPath: .status.labelSelector + specReplicasPath: .spec.replicas + statusReplicasPath: .status.replicas + status: {} + validation: + openAPIV3Schema: + description: / [MachineSet] MachineSet is the Schema for the machinesets API + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation + of an object. Servers should convert recognized schemas to the latest + internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this + object represents. Servers may infer this from the endpoint the client + submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds' + type: string + metadata: + type: object + spec: + description: / [MachineSetSpec] MachineSetSpec defines the desired state + of MachineSet + properties: + deletePolicy: + description: DeletePolicy defines the policy used to identify nodes + to delete when downscaling. Defaults to "Random". Valid values are + "Random, "Newest", "Oldest" + enum: + - Random + - Newest + - Oldest + type: string + minReadySeconds: + description: MinReadySeconds is the minimum number of seconds for which + a newly created machine should be ready. Defaults to 0 (machine will + be considered available as soon as it is ready) + format: int32 + type: integer + replicas: + description: Replicas is the number of desired replicas. This is a pointer + to distinguish between explicit zero and unspecified. Defaults to + 1. + format: int32 + type: integer + selector: + description: 'Selector is a label query over machines that should match + the replica count. Label keys and values that must match in order + to be controlled by this MachineSet. It must match the machine template''s + labels. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/#label-selectors' + properties: + matchExpressions: + description: matchExpressions is a list of label selector requirements. + The requirements are ANDed. + items: + description: A label selector requirement is a selector that contains + values, a key, and an operator that relates the key and values. + properties: + key: + description: key is the label key that the selector applies + to. + type: string + operator: + description: operator represents a key's relationship to a + set of values. Valid operators are In, NotIn, Exists and + DoesNotExist. + type: string + values: + description: values is an array of string values. If the operator + is In or NotIn, the values array must be non-empty. If the + operator is Exists or DoesNotExist, the values array must + be empty. This array is replaced during a strategic merge + patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + description: matchLabels is a map of {key,value} pairs. A single + {key,value} in the matchLabels map is equivalent to an element + of matchExpressions, whose key field is "key", the operator is + "In", and the values array contains only "value". The requirements + are ANDed. + type: object + type: object + template: + description: Template is the object that describes the machine that + will be created if insufficient replicas are detected. Object references + to custom resources resources are treated as templates. + properties: + metadata: + description: 'Standard object''s metadata. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata' + properties: + annotations: + additionalProperties: + type: string + description: 'Annotations is an unstructured key value map stored + with a resource that may be set by external tools to store + and retrieve arbitrary metadata. They are not queryable and + should be preserved when modifying objects. More info: http://kubernetes.io/docs/user-guide/annotations' + type: object + generateName: + description: "GenerateName is an optional prefix, used by the + server, to generate a unique name ONLY IF the Name field has + not been provided. If this field is used, the name returned + to the client will be different than the name passed. This + value will also be combined with a unique suffix. The provided + value has the same validation rules as the Name field, and + may be truncated by the length of the suffix required to make + the value unique on the server. \n If this field is specified + and the generated name exists, the server will NOT return + a 409 - instead, it will either return 201 Created or 500 + with Reason ServerTimeout indicating a unique name could not + be found in the time allotted, and the client should retry + (optionally after the time indicated in the Retry-After header). + \n Applied only if Name is not specified. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#idempotency" + type: string + labels: + additionalProperties: + type: string + description: 'Map of string keys and values that can be used + to organize and categorize (scope and select) objects. May + match selectors of replication controllers and services. More + info: http://kubernetes.io/docs/user-guide/labels' + type: object + name: + description: 'Name must be unique within a namespace. Is required + when creating resources, although some resources may allow + a client to request the generation of an appropriate name + automatically. Name is primarily intended for creation idempotence + and configuration definition. Cannot be updated. More info: + http://kubernetes.io/docs/user-guide/identifiers#names' + type: string + namespace: + description: "Namespace defines the space within each name must + be unique. An empty namespace is equivalent to the \"default\" + namespace, but \"default\" is the canonical representation. + Not all objects are required to be scoped to a namespace - + the value of this field for those objects will be empty. \n + Must be a DNS_LABEL. Cannot be updated. More info: http://kubernetes.io/docs/user-guide/namespaces" + type: string + ownerReferences: + description: List of objects depended by this object. If ALL + objects in the list have been deleted, this object will be + garbage collected. If this object is managed by a controller, + then an entry in this list will point to this controller, + with the controller field set to true. There cannot be more + than one managing controller. + items: + description: OwnerReference contains enough information to + let you identify an owning object. An owning object must + be in the same namespace as the dependent, or be cluster-scoped, + so there is no namespace field. + properties: + apiVersion: + description: API version of the referent. + type: string + blockOwnerDeletion: + description: If true, AND if the owner has the "foregroundDeletion" + finalizer, then the owner cannot be deleted from the + key-value store until this reference is removed. Defaults + to false. To set this field, a user needs "delete" permission + of the owner, otherwise 422 (Unprocessable Entity) will + be returned. + type: boolean + controller: + description: If true, this reference points to the managing + controller. + type: boolean + kind: + description: 'Kind of the referent. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds' + type: string + name: + description: 'Name of the referent. More info: http://kubernetes.io/docs/user-guide/identifiers#names' + type: string + uid: + description: 'UID of the referent. More info: http://kubernetes.io/docs/user-guide/identifiers#uids' + type: string + required: + - apiVersion + - kind + - name + - uid + type: object + type: array + type: object + spec: + description: 'Specification of the desired behavior of the machine. + More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status' + properties: + bootstrap: + description: Bootstrap is a reference to a local struct which + encapsulates fields to configure the Machine’s bootstrapping + mechanism. + properties: + configRef: + description: ConfigRef is a reference to a bootstrap provider-specific + resource that holds configuration details. The reference + is optional to allow users/operators to specify Bootstrap.Data + without the need of a controller. + properties: + apiVersion: + description: API version of the referent. + type: string + fieldPath: + description: 'If referring to a piece of an object instead + of an entire object, this string should contain a + valid JSON/Go field access statement, such as desiredState.manifest.containers[2]. + For example, if the object reference is to a container + within a pod, this would take on a value like: "spec.containers{name}" + (where "name" refers to the name of the container + that triggered the event) or if no container name + is specified "spec.containers[2]" (container with + index 2 in this pod). This syntax is chosen only to + have some well-defined way of referencing a part of + an object. TODO: this design is not final and this + field is subject to change in the future.' + type: string + kind: + description: 'Kind of the referent. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds' + type: string + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names' + type: string + namespace: + description: 'Namespace of the referent. More info: + https://kubernetes.io/docs/concepts/overview/working-with-objects/namespaces/' + type: string + resourceVersion: + description: 'Specific resourceVersion to which this + reference is made, if any. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#concurrency-control-and-consistency' + type: string + uid: + description: 'UID of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#uids' + type: string + type: object + data: + description: Data contains the bootstrap data, such as cloud-init + details scripts. If nil, the Machine should remain in + the Pending state. + type: string + type: object + infrastructureRef: + description: InfrastructureRef is a required reference to a + custom resource offered by an infrastructure provider. + properties: + apiVersion: + description: API version of the referent. + type: string + fieldPath: + description: 'If referring to a piece of an object instead + of an entire object, this string should contain a valid + JSON/Go field access statement, such as desiredState.manifest.containers[2]. + For example, if the object reference is to a container + within a pod, this would take on a value like: "spec.containers{name}" + (where "name" refers to the name of the container that + triggered the event) or if no container name is specified + "spec.containers[2]" (container with index 2 in this pod). + This syntax is chosen only to have some well-defined way + of referencing a part of an object. TODO: this design + is not final and this field is subject to change in the + future.' + type: string + kind: + description: 'Kind of the referent. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds' + type: string + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names' + type: string + namespace: + description: 'Namespace of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/namespaces/' + type: string + resourceVersion: + description: 'Specific resourceVersion to which this reference + is made, if any. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#concurrency-control-and-consistency' + type: string + uid: + description: 'UID of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#uids' + type: string + type: object + metadata: + description: ObjectMeta will autopopulate the Node created. + Use this to indicate what labels, annotations, name prefix, + etc., should be used when creating the Node. + properties: + annotations: + additionalProperties: + type: string + description: 'Annotations is an unstructured key value map + stored with a resource that may be set by external tools + to store and retrieve arbitrary metadata. They are not + queryable and should be preserved when modifying objects. + More info: http://kubernetes.io/docs/user-guide/annotations' + type: object + generateName: + description: "GenerateName is an optional prefix, used by + the server, to generate a unique name ONLY IF the Name + field has not been provided. If this field is used, the + name returned to the client will be different than the + name passed. This value will also be combined with a unique + suffix. The provided value has the same validation rules + as the Name field, and may be truncated by the length + of the suffix required to make the value unique on the + server. \n If this field is specified and the generated + name exists, the server will NOT return a 409 - instead, + it will either return 201 Created or 500 with Reason ServerTimeout + indicating a unique name could not be found in the time + allotted, and the client should retry (optionally after + the time indicated in the Retry-After header). \n Applied + only if Name is not specified. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#idempotency" + type: string + labels: + additionalProperties: + type: string + description: 'Map of string keys and values that can be + used to organize and categorize (scope and select) objects. + May match selectors of replication controllers and services. + More info: http://kubernetes.io/docs/user-guide/labels' + type: object + name: + description: 'Name must be unique within a namespace. Is + required when creating resources, although some resources + may allow a client to request the generation of an appropriate + name automatically. Name is primarily intended for creation + idempotence and configuration definition. Cannot be updated. + More info: http://kubernetes.io/docs/user-guide/identifiers#names' + type: string + namespace: + description: "Namespace defines the space within each name + must be unique. An empty namespace is equivalent to the + \"default\" namespace, but \"default\" is the canonical + representation. Not all objects are required to be scoped + to a namespace - the value of this field for those objects + will be empty. \n Must be a DNS_LABEL. Cannot be updated. + More info: http://kubernetes.io/docs/user-guide/namespaces" + type: string + ownerReferences: + description: List of objects depended by this object. If + ALL objects in the list have been deleted, this object + will be garbage collected. If this object is managed by + a controller, then an entry in this list will point to + this controller, with the controller field set to true. + There cannot be more than one managing controller. + items: + description: OwnerReference contains enough information + to let you identify an owning object. An owning object + must be in the same namespace as the dependent, or be + cluster-scoped, so there is no namespace field. + properties: + apiVersion: + description: API version of the referent. + type: string + blockOwnerDeletion: + description: If true, AND if the owner has the "foregroundDeletion" + finalizer, then the owner cannot be deleted from + the key-value store until this reference is removed. + Defaults to false. To set this field, a user needs + "delete" permission of the owner, otherwise 422 + (Unprocessable Entity) will be returned. + type: boolean + controller: + description: If true, this reference points to the + managing controller. + type: boolean + kind: + description: 'Kind of the referent. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds' + type: string + name: + description: 'Name of the referent. More info: http://kubernetes.io/docs/user-guide/identifiers#names' + type: string + uid: + description: 'UID of the referent. More info: http://kubernetes.io/docs/user-guide/identifiers#uids' + type: string + required: + - apiVersion + - kind + - name + - uid + type: object + type: array + type: object + providerID: + description: ProviderID is the identification ID of the machine + provided by the provider. This field must match the provider + ID as seen on the node object corresponding to this machine. + This field is required by higher level consumers of cluster-api. + Example use case is cluster autoscaler with cluster-api as + provider. Clean-up logic in the autoscaler compares machines + to nodes to find out machines at provider which could not + get registered as Kubernetes nodes. With cluster-api as a + generic out-of-tree provider for autoscaler, this field is + required by autoscaler to be able to have a provider view + of the list of machines. Another list of nodes is queried + from the k8s apiserver and then a comparison is done to find + out unregistered machines and are marked for delete. This + field will be set by the actuators and consumed by higher + level entities like autoscaler that will be interfacing with + cluster-api as generic provider. + type: string + version: + description: Version defines the desired Kubernetes version. + This field is meant to be optionally used by bootstrap providers. + type: string + required: + - bootstrap + - infrastructureRef + type: object + type: object + required: + - selector + type: object + status: + description: / [MachineSetStatus] MachineSetStatus defines the observed + state of MachineSet + properties: + availableReplicas: + description: The number of available replicas (ready for at least minReadySeconds) + for this MachineSet. + format: int32 + type: integer + errorMessage: + type: string + errorReason: + description: "In the event that there is a terminal problem reconciling + the replicas, both ErrorReason and ErrorMessage will be set. ErrorReason + will be populated with a succinct value suitable for machine interpretation, + while ErrorMessage will contain a more verbose string suitable for + logging and human consumption. \n These fields should not be set for + transitive errors that a controller faces that are expected to be + fixed automatically over time (like service outages), but instead + indicate that something is fundamentally wrong with the MachineTemplate's + spec or the configuration of the machine controller, and that manual + intervention is required. Examples of terminal errors would be invalid + combinations of settings in the spec, values that are unsupported + by the machine controller, or the responsible machine controller itself + being critically misconfigured. \n Any transient errors that occur + during the reconciliation of Machines can be added as events to the + MachineSet object and/or logged in the controller's output." + type: string + fullyLabeledReplicas: + description: The number of replicas that have labels matching the labels + of the machine template of the MachineSet. + format: int32 + type: integer + observedGeneration: + description: ObservedGeneration reflects the generation of the most + recently observed MachineSet. + format: int64 + type: integer + readyReplicas: + description: The number of ready replicas for this MachineSet. A machine + is considered ready when the node has been created and is "Ready". + format: int32 + type: integer + replicas: + description: Replicas is the most recently observed number of replicas. + format: int32 + type: integer + required: + - replicas + type: object + type: object + version: v1alpha2 + versions: + - name: v1alpha2 + served: true + storage: true +status: + acceptedNames: + kind: "" + plural: "" + conditions: [] + storedVersions: [] diff --git a/vendor/sigs.k8s.io/cluster-api/config/crd/kustomization.yaml b/vendor/sigs.k8s.io/cluster-api/config/crd/kustomization.yaml new file mode 100644 index 0000000000..63ad5fe921 --- /dev/null +++ b/vendor/sigs.k8s.io/cluster-api/config/crd/kustomization.yaml @@ -0,0 +1,30 @@ +# This kustomization.yaml is not intended to be run by itself, +# since it depends on service name and namespace that are out of this kustomize package. +# It should be run by config/default +resources: +- bases/cluster.x-k8s.io_clusters.yaml +- bases/cluster.x-k8s.io_machines.yaml +- bases/cluster.x-k8s.io_machinesets.yaml +- bases/cluster.x-k8s.io_machinedeployments.yaml +# +kubebuilder:scaffold:crdkustomizeresource + +patches: +# [WEBHOOK] To enable webhook, uncomment all the sections with [WEBHOOK] prefix. +# patches here are for enabling the conversion webhook for each CRD +#- patches/webhook_in_clusters.yaml +#- patches/webhook_in_machines.yaml +#- patches/webhook_in_machinesets.yaml +#- patches/webhook_in_machinedeployments.yaml +# +kubebuilder:scaffold:crdkustomizewebhookpatch + +# [CERTMANAGER] To enable webhook, uncomment all the sections with [CERTMANAGER] prefix. +# patches here are for enabling the CA injection for each CRD +#- patches/cainjection_in_clusters.yaml +#- patches/cainjection_in_machines.yaml +#- patches/cainjection_in_machinesets.yaml +#- patches/cainjection_in_machinedeployments.yaml +# +kubebuilder:scaffold:crdkustomizecainjectionpatch + +# the following config is for teaching kustomize how to do kustomization for CRDs. +configurations: +- kustomizeconfig.yaml diff --git a/vendor/sigs.k8s.io/cluster-api/config/crd/kustomizeconfig.yaml b/vendor/sigs.k8s.io/cluster-api/config/crd/kustomizeconfig.yaml new file mode 100644 index 0000000000..6f83d9a94b --- /dev/null +++ b/vendor/sigs.k8s.io/cluster-api/config/crd/kustomizeconfig.yaml @@ -0,0 +1,17 @@ +# This file is for teaching kustomize how to substitute name and namespace reference in CRD +nameReference: +- kind: Service + version: v1 + fieldSpecs: + - kind: CustomResourceDefinition + group: apiextensions.k8s.io + path: spec/conversion/webhookClientConfig/service/name + +namespace: +- kind: CustomResourceDefinition + group: apiextensions.k8s.io + path: spec/conversion/webhookClientConfig/service/namespace + create: false + +varReference: +- path: metadata/annotations diff --git a/vendor/sigs.k8s.io/cluster-api/config/crd/patches/cainjection_in_clusters.yaml b/vendor/sigs.k8s.io/cluster-api/config/crd/patches/cainjection_in_clusters.yaml new file mode 100644 index 0000000000..81bb1e1bfc --- /dev/null +++ b/vendor/sigs.k8s.io/cluster-api/config/crd/patches/cainjection_in_clusters.yaml @@ -0,0 +1,8 @@ +# The following patch adds a directive for certmanager to inject CA into the CRD +# CRD conversion requires k8s 1.13 or later. +apiVersion: apiextensions.k8s.io/v1beta1 +kind: CustomResourceDefinition +metadata: + annotations: + certmanager.k8s.io/inject-ca-from: $(CERTIFICATE_NAMESPACE)/$(CERTIFICATE_NAME) + name: clusters.cluster.x-k8s.io diff --git a/vendor/sigs.k8s.io/cluster-api/config/crd/patches/cainjection_in_machinedeployments.yaml b/vendor/sigs.k8s.io/cluster-api/config/crd/patches/cainjection_in_machinedeployments.yaml new file mode 100644 index 0000000000..befb940f34 --- /dev/null +++ b/vendor/sigs.k8s.io/cluster-api/config/crd/patches/cainjection_in_machinedeployments.yaml @@ -0,0 +1,8 @@ +# The following patch adds a directive for certmanager to inject CA into the CRD +# CRD conversion requires k8s 1.13 or later. +apiVersion: apiextensions.k8s.io/v1beta1 +kind: CustomResourceDefinition +metadata: + annotations: + certmanager.k8s.io/inject-ca-from: $(CERTIFICATE_NAMESPACE)/$(CERTIFICATE_NAME) + name: machinedeployments.cluster.x-k8s.io diff --git a/vendor/sigs.k8s.io/cluster-api/config/crd/patches/cainjection_in_machines.yaml b/vendor/sigs.k8s.io/cluster-api/config/crd/patches/cainjection_in_machines.yaml new file mode 100644 index 0000000000..53d73e08ab --- /dev/null +++ b/vendor/sigs.k8s.io/cluster-api/config/crd/patches/cainjection_in_machines.yaml @@ -0,0 +1,8 @@ +# The following patch adds a directive for certmanager to inject CA into the CRD +# CRD conversion requires k8s 1.13 or later. +apiVersion: apiextensions.k8s.io/v1beta1 +kind: CustomResourceDefinition +metadata: + annotations: + certmanager.k8s.io/inject-ca-from: $(CERTIFICATE_NAMESPACE)/$(CERTIFICATE_NAME) + name: machines.cluster.x-k8s.io diff --git a/vendor/sigs.k8s.io/cluster-api/config/crd/patches/cainjection_in_machinesets.yaml b/vendor/sigs.k8s.io/cluster-api/config/crd/patches/cainjection_in_machinesets.yaml new file mode 100644 index 0000000000..db0ab29253 --- /dev/null +++ b/vendor/sigs.k8s.io/cluster-api/config/crd/patches/cainjection_in_machinesets.yaml @@ -0,0 +1,8 @@ +# The following patch adds a directive for certmanager to inject CA into the CRD +# CRD conversion requires k8s 1.13 or later. +apiVersion: apiextensions.k8s.io/v1beta1 +kind: CustomResourceDefinition +metadata: + annotations: + certmanager.k8s.io/inject-ca-from: $(CERTIFICATE_NAMESPACE)/$(CERTIFICATE_NAME) + name: machinesets.cluster.x-k8s.io diff --git a/vendor/sigs.k8s.io/cluster-api/config/crd/patches/webhook_in_clusters.yaml b/vendor/sigs.k8s.io/cluster-api/config/crd/patches/webhook_in_clusters.yaml new file mode 100644 index 0000000000..6af79a20e2 --- /dev/null +++ b/vendor/sigs.k8s.io/cluster-api/config/crd/patches/webhook_in_clusters.yaml @@ -0,0 +1,17 @@ +# The following patch enables conversion webhook for CRD +# CRD conversion requires k8s 1.13 or later. +apiVersion: apiextensions.k8s.io/v1beta1 +kind: CustomResourceDefinition +metadata: + name: clusters.cluster.x-k8s.io +spec: + conversion: + strategy: Webhook + webhookClientConfig: + # this is "\n" used as a placeholder, otherwise it will be rejected by the apiserver for being blank, + # but we're going to set it later using the cert-manager (or potentially a patch if not using cert-manager) + caBundle: Cg== + service: + namespace: system + name: webhook-service + path: /convert diff --git a/vendor/sigs.k8s.io/cluster-api/config/crd/patches/webhook_in_machinedeployments.yaml b/vendor/sigs.k8s.io/cluster-api/config/crd/patches/webhook_in_machinedeployments.yaml new file mode 100644 index 0000000000..1b47439866 --- /dev/null +++ b/vendor/sigs.k8s.io/cluster-api/config/crd/patches/webhook_in_machinedeployments.yaml @@ -0,0 +1,17 @@ +# The following patch enables conversion webhook for CRD +# CRD conversion requires k8s 1.13 or later. +apiVersion: apiextensions.k8s.io/v1beta1 +kind: CustomResourceDefinition +metadata: + name: machinedeployments.cluster.x-k8s.io +spec: + conversion: + strategy: Webhook + webhookClientConfig: + # this is "\n" used as a placeholder, otherwise it will be rejected by the apiserver for being blank, + # but we're going to set it later using the cert-manager (or potentially a patch if not using cert-manager) + caBundle: Cg== + service: + namespace: system + name: webhook-service + path: /convert diff --git a/vendor/sigs.k8s.io/cluster-api/config/crd/patches/webhook_in_machines.yaml b/vendor/sigs.k8s.io/cluster-api/config/crd/patches/webhook_in_machines.yaml new file mode 100644 index 0000000000..66dd96bd73 --- /dev/null +++ b/vendor/sigs.k8s.io/cluster-api/config/crd/patches/webhook_in_machines.yaml @@ -0,0 +1,17 @@ +# The following patch enables conversion webhook for CRD +# CRD conversion requires k8s 1.13 or later. +apiVersion: apiextensions.k8s.io/v1beta1 +kind: CustomResourceDefinition +metadata: + name: machines.cluster.x-k8s.io +spec: + conversion: + strategy: Webhook + webhookClientConfig: + # this is "\n" used as a placeholder, otherwise it will be rejected by the apiserver for being blank, + # but we're going to set it later using the cert-manager (or potentially a patch if not using cert-manager) + caBundle: Cg== + service: + namespace: system + name: webhook-service + path: /convert diff --git a/vendor/sigs.k8s.io/cluster-api/config/crd/patches/webhook_in_machinesets.yaml b/vendor/sigs.k8s.io/cluster-api/config/crd/patches/webhook_in_machinesets.yaml new file mode 100644 index 0000000000..9748ec4dce --- /dev/null +++ b/vendor/sigs.k8s.io/cluster-api/config/crd/patches/webhook_in_machinesets.yaml @@ -0,0 +1,17 @@ +# The following patch enables conversion webhook for CRD +# CRD conversion requires k8s 1.13 or later. +apiVersion: apiextensions.k8s.io/v1beta1 +kind: CustomResourceDefinition +metadata: + name: machinesets.cluster.x-k8s.io +spec: + conversion: + strategy: Webhook + webhookClientConfig: + # this is "\n" used as a placeholder, otherwise it will be rejected by the apiserver for being blank, + # but we're going to set it later using the cert-manager (or potentially a patch if not using cert-manager) + caBundle: Cg== + service: + namespace: system + name: webhook-service + path: /convert diff --git a/vendor/sigs.k8s.io/cluster-api/config/crds/cluster_v1alpha1_cluster.yaml b/vendor/sigs.k8s.io/cluster-api/config/crds/cluster.k8s.io_clusters.yaml similarity index 63% rename from vendor/sigs.k8s.io/cluster-api/config/crds/cluster_v1alpha1_cluster.yaml rename to vendor/sigs.k8s.io/cluster-api/config/crds/cluster.k8s.io_clusters.yaml index 5a346c63a6..c7865bb5f1 100644 --- a/vendor/sigs.k8s.io/cluster-api/config/crds/cluster_v1alpha1_cluster.yaml +++ b/vendor/sigs.k8s.io/cluster-api/config/crds/cluster.k8s.io_clusters.yaml @@ -1,9 +1,9 @@ + +--- apiVersion: apiextensions.k8s.io/v1beta1 kind: CustomResourceDefinition metadata: creationTimestamp: null - labels: - controller-tools.k8s.io: "1.0" name: clusters.cluster.k8s.io spec: group: cluster.k8s.io @@ -13,10 +13,9 @@ spec: shortNames: - cl scope: Namespaced - subresources: - status: {} validation: openAPIV3Schema: + description: / [Cluster] Cluster is the Schema for the clusters API properties: apiVersion: description: 'APIVersion defines the versioned schema of this representation @@ -31,6 +30,7 @@ spec: metadata: type: object spec: + description: / [ClusterSpec] ClusterSpec defines the desired state of Cluster properties: clusterNetwork: description: Cluster network configuration @@ -59,9 +59,9 @@ spec: - cidrBlocks type: object required: - - services - pods - serviceDomain + - services type: object providerSpec: description: Provider-specific serialized configuration to use during @@ -83,29 +83,63 @@ spec: description: The machine class from which the provider config should be sourced. properties: + apiVersion: + description: API version of the referent. + type: string + fieldPath: + description: 'If referring to a piece of an object instead + of an entire object, this string should contain a valid + JSON/Go field access statement, such as desiredState.manifest.containers[2]. + For example, if the object reference is to a container + within a pod, this would take on a value like: "spec.containers{name}" + (where "name" refers to the name of the container that + triggered the event) or if no container name is specified + "spec.containers[2]" (container with index 2 in this pod). + This syntax is chosen only to have some well-defined way + of referencing a part of an object. TODO: this design + is not final and this field is subject to change in the + future.' + type: string + kind: + description: 'Kind of the referent. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds' + type: string + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names' + type: string + namespace: + description: 'Namespace of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/namespaces/' + type: string provider: description: Provider is the name of the cloud-provider which MachineClass is intended for. type: string + resourceVersion: + description: 'Specific resourceVersion to which this reference + is made, if any. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#concurrency-control-and-consistency' + type: string + uid: + description: 'UID of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#uids' + type: string type: object type: object type: object - required: - - clusterNetwork type: object status: + description: / [ClusterStatus] ClusterStatus defines the observed state + of Cluster properties: apiEndpoints: description: APIEndpoint represents the endpoint to communicate with the IP. items: + description: / [APIEndpoint] APIEndpoint represents a reachable Kubernetes + API endpoint. properties: host: description: The hostname on which the API server is serving. type: string port: description: The port on which the API server is serving. - format: int64 type: integer required: - host @@ -127,7 +161,12 @@ spec: from this field. type: object type: object + type: object version: v1alpha1 + versions: + - name: v1alpha1 + served: true + storage: true status: acceptedNames: kind: "" diff --git a/vendor/sigs.k8s.io/cluster-api/config/crds/cluster_v1alpha1_machineclass.yaml b/vendor/sigs.k8s.io/cluster-api/config/crds/cluster.k8s.io_machineclasses.yaml similarity index 83% rename from vendor/sigs.k8s.io/cluster-api/config/crds/cluster_v1alpha1_machineclass.yaml rename to vendor/sigs.k8s.io/cluster-api/config/crds/cluster.k8s.io_machineclasses.yaml index 33a9281d4b..a7632e2b87 100644 --- a/vendor/sigs.k8s.io/cluster-api/config/crds/cluster_v1alpha1_machineclass.yaml +++ b/vendor/sigs.k8s.io/cluster-api/config/crds/cluster.k8s.io_machineclasses.yaml @@ -1,9 +1,9 @@ + +--- apiVersion: apiextensions.k8s.io/v1beta1 kind: CustomResourceDefinition metadata: creationTimestamp: null - labels: - controller-tools.k8s.io: "1.0" name: machineclasses.cluster.k8s.io spec: group: cluster.k8s.io @@ -15,6 +15,8 @@ spec: scope: Namespaced validation: openAPIV3Schema: + description: / [MachineClass] MachineClass can be used to templatize and re-use + provider configuration across multiple Machines / MachineSets / MachineDeployments. properties: apiVersion: description: 'APIVersion defines the versioned schema of this representation @@ -33,7 +35,12 @@ spec: type: object required: - providerSpec + type: object version: v1alpha1 + versions: + - name: v1alpha1 + served: true + storage: true status: acceptedNames: kind: "" diff --git a/vendor/sigs.k8s.io/cluster-api/config/crds/cluster.k8s.io_machinedeployments.yaml b/vendor/sigs.k8s.io/cluster-api/config/crds/cluster.k8s.io_machinedeployments.yaml new file mode 100644 index 0000000000..7563fb57c0 --- /dev/null +++ b/vendor/sigs.k8s.io/cluster-api/config/crds/cluster.k8s.io_machinedeployments.yaml @@ -0,0 +1,595 @@ + +--- +apiVersion: apiextensions.k8s.io/v1beta1 +kind: CustomResourceDefinition +metadata: + creationTimestamp: null + name: machinedeployments.cluster.k8s.io +spec: + group: cluster.k8s.io + names: + kind: MachineDeployment + plural: machinedeployments + shortNames: + - md + scope: Namespaced + subresources: + scale: + labelSelectorPath: .status.labelSelector + specReplicasPath: .spec.replicas + statusReplicasPath: .status.replicas + validation: + openAPIV3Schema: + description: / [MachineDeployment] MachineDeployment is the Schema for the machinedeployments + API + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation + of an object. Servers should convert recognized schemas to the latest + internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this + object represents. Servers may infer this from the endpoint the client + submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds' + type: string + metadata: + type: object + spec: + description: / [MachineDeploymentSpec] MachineDeploymentSpec defines the + desired state of MachineDeployment + properties: + minReadySeconds: + description: Minimum number of seconds for which a newly created machine + should be ready. Defaults to 0 (machine will be considered available + as soon as it is ready) + format: int32 + type: integer + paused: + description: Indicates that the deployment is paused. + type: boolean + progressDeadlineSeconds: + description: The maximum time in seconds for a deployment to make progress + before it is considered to be failed. The deployment controller will + continue to process failed deployments and a condition with a ProgressDeadlineExceeded + reason will be surfaced in the deployment status. Note that progress + will not be estimated during the time a deployment is paused. Defaults + to 600s. + format: int32 + type: integer + replicas: + description: Number of desired machines. Defaults to 1. This is a pointer + to distinguish between explicit zero and not specified. + format: int32 + type: integer + revisionHistoryLimit: + description: The number of old MachineSets to retain to allow rollback. + This is a pointer to distinguish between explicit zero and not specified. + Defaults to 1. + format: int32 + type: integer + selector: + description: Label selector for machines. Existing MachineSets whose + machines are selected by this will be the ones affected by this deployment. + It must match the machine template's labels. + properties: + matchExpressions: + description: matchExpressions is a list of label selector requirements. + The requirements are ANDed. + items: + description: A label selector requirement is a selector that contains + values, a key, and an operator that relates the key and values. + properties: + key: + description: key is the label key that the selector applies + to. + type: string + operator: + description: operator represents a key's relationship to a + set of values. Valid operators are In, NotIn, Exists and + DoesNotExist. + type: string + values: + description: values is an array of string values. If the operator + is In or NotIn, the values array must be non-empty. If the + operator is Exists or DoesNotExist, the values array must + be empty. This array is replaced during a strategic merge + patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + description: matchLabels is a map of {key,value} pairs. A single + {key,value} in the matchLabels map is equivalent to an element + of matchExpressions, whose key field is "key", the operator is + "In", and the values array contains only "value". The requirements + are ANDed. + type: object + type: object + strategy: + description: The deployment strategy to use to replace existing machines + with new ones. + properties: + rollingUpdate: + description: Rolling update config params. Present only if MachineDeploymentStrategyType + = RollingUpdate. + properties: + maxSurge: + anyOf: + - type: string + - type: integer + description: 'The maximum number of machines that can be scheduled + above the desired number of machines. Value can be an absolute + number (ex: 5) or a percentage of desired machines (ex: 10%). + This can not be 0 if MaxUnavailable is 0. Absolute number + is calculated from percentage by rounding up. Defaults to + 1. Example: when this is set to 30%, the new MachineSet can + be scaled up immediately when the rolling update starts, such + that the total number of old and new machines do not exceed + 130% of desired machines. Once old machines have been killed, + new MachineSet can be scaled up further, ensuring that total + number of machines running at any time during the update is + at most 130% of desired machines.' + maxUnavailable: + anyOf: + - type: string + - type: integer + description: 'The maximum number of machines that can be unavailable + during the update. Value can be an absolute number (ex: 5) + or a percentage of desired machines (ex: 10%). Absolute number + is calculated from percentage by rounding down. This can not + be 0 if MaxSurge is 0. Defaults to 0. Example: when this is + set to 30%, the old MachineSet can be scaled down to 70% of + desired machines immediately when the rolling update starts. + Once new machines are ready, old MachineSet can be scaled + down further, followed by scaling up the new MachineSet, ensuring + that the total number of machines available at all times during + the update is at least 70% of desired machines.' + type: object + type: + description: Type of deployment. Currently the only supported strategy + is "RollingUpdate". Default is RollingUpdate. + type: string + type: object + template: + description: Template describes the machines that will be created. + properties: + metadata: + description: 'Standard object''s metadata. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata' + properties: + annotations: + additionalProperties: + type: string + description: 'Annotations is an unstructured key value map stored + with a resource that may be set by external tools to store + and retrieve arbitrary metadata. They are not queryable and + should be preserved when modifying objects. More info: http://kubernetes.io/docs/user-guide/annotations' + type: object + generateName: + description: "GenerateName is an optional prefix, used by the + server, to generate a unique name ONLY IF the Name field has + not been provided. If this field is used, the name returned + to the client will be different than the name passed. This + value will also be combined with a unique suffix. The provided + value has the same validation rules as the Name field, and + may be truncated by the length of the suffix required to make + the value unique on the server. \n If this field is specified + and the generated name exists, the server will NOT return + a 409 - instead, it will either return 201 Created or 500 + with Reason ServerTimeout indicating a unique name could not + be found in the time allotted, and the client should retry + (optionally after the time indicated in the Retry-After header). + \n Applied only if Name is not specified. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#idempotency" + type: string + labels: + additionalProperties: + type: string + description: 'Map of string keys and values that can be used + to organize and categorize (scope and select) objects. May + match selectors of replication controllers and services. More + info: http://kubernetes.io/docs/user-guide/labels' + type: object + name: + description: 'Name must be unique within a namespace. Is required + when creating resources, although some resources may allow + a client to request the generation of an appropriate name + automatically. Name is primarily intended for creation idempotence + and configuration definition. Cannot be updated. More info: + http://kubernetes.io/docs/user-guide/identifiers#names' + type: string + namespace: + description: "Namespace defines the space within each name must + be unique. An empty namespace is equivalent to the \"default\" + namespace, but \"default\" is the canonical representation. + Not all objects are required to be scoped to a namespace - + the value of this field for those objects will be empty. \n + Must be a DNS_LABEL. Cannot be updated. More info: http://kubernetes.io/docs/user-guide/namespaces" + type: string + ownerReferences: + description: List of objects depended by this object. If ALL + objects in the list have been deleted, this object will be + garbage collected. If this object is managed by a controller, + then an entry in this list will point to this controller, + with the controller field set to true. There cannot be more + than one managing controller. + items: + description: OwnerReference contains enough information to + let you identify an owning object. An owning object must + be in the same namespace as the dependent, or be cluster-scoped, + so there is no namespace field. + properties: + apiVersion: + description: API version of the referent. + type: string + blockOwnerDeletion: + description: If true, AND if the owner has the "foregroundDeletion" + finalizer, then the owner cannot be deleted from the + key-value store until this reference is removed. Defaults + to false. To set this field, a user needs "delete" permission + of the owner, otherwise 422 (Unprocessable Entity) will + be returned. + type: boolean + controller: + description: If true, this reference points to the managing + controller. + type: boolean + kind: + description: 'Kind of the referent. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds' + type: string + name: + description: 'Name of the referent. More info: http://kubernetes.io/docs/user-guide/identifiers#names' + type: string + uid: + description: 'UID of the referent. More info: http://kubernetes.io/docs/user-guide/identifiers#uids' + type: string + required: + - apiVersion + - kind + - name + - uid + type: object + type: array + type: object + spec: + description: 'Specification of the desired behavior of the machine. + More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status' + properties: + configSource: + description: ConfigSource is used to populate in the associated + Node for dynamic kubelet config. This field already exists + in Node, so any updates to it in the Machine spec will be + automatically copied to the linked NodeRef from the status. + The rest of dynamic kubelet config support should then work + as-is. + properties: + configMap: + description: ConfigMap is a reference to a Node's ConfigMap + properties: + kubeletConfigKey: + description: KubeletConfigKey declares which key of + the referenced ConfigMap corresponds to the KubeletConfiguration + structure This field is required in all cases. + type: string + name: + description: Name is the metadata.name of the referenced + ConfigMap. This field is required in all cases. + type: string + namespace: + description: Namespace is the metadata.namespace of + the referenced ConfigMap. This field is required in + all cases. + type: string + resourceVersion: + description: ResourceVersion is the metadata.ResourceVersion + of the referenced ConfigMap. This field is forbidden + in Node.Spec, and required in Node.Status. + type: string + uid: + description: UID is the metadata.UID of the referenced + ConfigMap. This field is forbidden in Node.Spec, and + required in Node.Status. + type: string + required: + - kubeletConfigKey + - name + - namespace + type: object + type: object + metadata: + description: ObjectMeta will autopopulate the Node created. + Use this to indicate what labels, annotations, name prefix, + etc., should be used when creating the Node. + properties: + annotations: + additionalProperties: + type: string + description: 'Annotations is an unstructured key value map + stored with a resource that may be set by external tools + to store and retrieve arbitrary metadata. They are not + queryable and should be preserved when modifying objects. + More info: http://kubernetes.io/docs/user-guide/annotations' + type: object + generateName: + description: "GenerateName is an optional prefix, used by + the server, to generate a unique name ONLY IF the Name + field has not been provided. If this field is used, the + name returned to the client will be different than the + name passed. This value will also be combined with a unique + suffix. The provided value has the same validation rules + as the Name field, and may be truncated by the length + of the suffix required to make the value unique on the + server. \n If this field is specified and the generated + name exists, the server will NOT return a 409 - instead, + it will either return 201 Created or 500 with Reason ServerTimeout + indicating a unique name could not be found in the time + allotted, and the client should retry (optionally after + the time indicated in the Retry-After header). \n Applied + only if Name is not specified. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#idempotency" + type: string + labels: + additionalProperties: + type: string + description: 'Map of string keys and values that can be + used to organize and categorize (scope and select) objects. + May match selectors of replication controllers and services. + More info: http://kubernetes.io/docs/user-guide/labels' + type: object + name: + description: 'Name must be unique within a namespace. Is + required when creating resources, although some resources + may allow a client to request the generation of an appropriate + name automatically. Name is primarily intended for creation + idempotence and configuration definition. Cannot be updated. + More info: http://kubernetes.io/docs/user-guide/identifiers#names' + type: string + namespace: + description: "Namespace defines the space within each name + must be unique. An empty namespace is equivalent to the + \"default\" namespace, but \"default\" is the canonical + representation. Not all objects are required to be scoped + to a namespace - the value of this field for those objects + will be empty. \n Must be a DNS_LABEL. Cannot be updated. + More info: http://kubernetes.io/docs/user-guide/namespaces" + type: string + ownerReferences: + description: List of objects depended by this object. If + ALL objects in the list have been deleted, this object + will be garbage collected. If this object is managed by + a controller, then an entry in this list will point to + this controller, with the controller field set to true. + There cannot be more than one managing controller. + items: + description: OwnerReference contains enough information + to let you identify an owning object. An owning object + must be in the same namespace as the dependent, or be + cluster-scoped, so there is no namespace field. + properties: + apiVersion: + description: API version of the referent. + type: string + blockOwnerDeletion: + description: If true, AND if the owner has the "foregroundDeletion" + finalizer, then the owner cannot be deleted from + the key-value store until this reference is removed. + Defaults to false. To set this field, a user needs + "delete" permission of the owner, otherwise 422 + (Unprocessable Entity) will be returned. + type: boolean + controller: + description: If true, this reference points to the + managing controller. + type: boolean + kind: + description: 'Kind of the referent. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds' + type: string + name: + description: 'Name of the referent. More info: http://kubernetes.io/docs/user-guide/identifiers#names' + type: string + uid: + description: 'UID of the referent. More info: http://kubernetes.io/docs/user-guide/identifiers#uids' + type: string + required: + - apiVersion + - kind + - name + - uid + type: object + type: array + type: object + providerID: + description: ProviderID is the identification ID of the machine + provided by the provider. This field must match the provider + ID as seen on the node object corresponding to this machine. + This field is required by higher level consumers of cluster-api. + Example use case is cluster autoscaler with cluster-api as + provider. Clean-up logic in the autoscaler compares machines + to nodes to find out machines at provider which could not + get registered as Kubernetes nodes. With cluster-api as a + generic out-of-tree provider for autoscaler, this field is + required by autoscaler to be able to have a provider view + of the list of machines. Another list of nodes is queried + from the k8s apiserver and then a comparison is done to find + out unregistered machines and are marked for delete. This + field will be set by the actuators and consumed by higher + level entities like autoscaler that will be interfacing with + cluster-api as generic provider. + type: string + providerSpec: + description: ProviderSpec details Provider-specific configuration + to use during node creation. + properties: + value: + description: Value is an inlined, serialized representation + of the resource configuration. It is recommended that + providers maintain their own versioned API types that + should be serialized/deserialized from this field, akin + to component config. + type: object + valueFrom: + description: Source for the provider configuration. Cannot + be used if value is not empty. + properties: + machineClass: + description: The machine class from which the provider + config should be sourced. + properties: + apiVersion: + description: API version of the referent. + type: string + fieldPath: + description: 'If referring to a piece of an object + instead of an entire object, this string should + contain a valid JSON/Go field access statement, + such as desiredState.manifest.containers[2]. For + example, if the object reference is to a container + within a pod, this would take on a value like: + "spec.containers{name}" (where "name" refers to + the name of the container that triggered the event) + or if no container name is specified "spec.containers[2]" + (container with index 2 in this pod). This syntax + is chosen only to have some well-defined way of + referencing a part of an object. TODO: this design + is not final and this field is subject to change + in the future.' + type: string + kind: + description: 'Kind of the referent. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds' + type: string + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names' + type: string + namespace: + description: 'Namespace of the referent. More info: + https://kubernetes.io/docs/concepts/overview/working-with-objects/namespaces/' + type: string + provider: + description: Provider is the name of the cloud-provider + which MachineClass is intended for. + type: string + resourceVersion: + description: 'Specific resourceVersion to which + this reference is made, if any. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#concurrency-control-and-consistency' + type: string + uid: + description: 'UID of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#uids' + type: string + type: object + type: object + type: object + taints: + description: The list of the taints to be applied to the corresponding + Node in additive manner. This list will not overwrite any + other taints added to the Node on an ongoing basis by other + entities. These taints should be actively reconciled e.g. + if you ask the machine controller to apply a taint and then + manually remove the taint the machine controller will put + it back) but not have the machine controller remove any taints + items: + description: The node this Taint is attached to has the "effect" + on any pod that does not tolerate the Taint. + properties: + effect: + description: Required. The effect of the taint on pods + that do not tolerate the taint. Valid effects are NoSchedule, + PreferNoSchedule and NoExecute. + type: string + key: + description: Required. The taint key to be applied to + a node. + type: string + timeAdded: + description: TimeAdded represents the time at which the + taint was added. It is only written for NoExecute taints. + format: date-time + type: string + value: + description: Required. The taint value corresponding to + the taint key. + type: string + required: + - effect + - key + type: object + type: array + versions: + description: Versions of key software to use. This field is + optional at cluster creation time, and omitting the field + indicates that the cluster installation tool should select + defaults for the user. These defaults may differ based on + the cluster installer, but the tool should populate the values + it uses when persisting Machine objects. A Machine spec missing + this field at runtime is invalid. + properties: + controlPlane: + description: ControlPlane is the semantic version of the + Kubernetes control plane to run. This should only be populated + when the machine is a control plane. + type: string + kubelet: + description: Kubelet is the semantic version of kubelet + to run + type: string + required: + - kubelet + type: object + type: object + type: object + required: + - selector + - template + type: object + status: + description: / [MachineDeploymentStatus] MachineDeploymentStatus defines + the observed state of MachineDeployment + properties: + availableReplicas: + description: Total number of available machines (ready for at least + minReadySeconds) targeted by this deployment. + format: int32 + type: integer + observedGeneration: + description: The generation observed by the deployment controller. + format: int64 + type: integer + readyReplicas: + description: Total number of ready machines targeted by this deployment. + format: int32 + type: integer + replicas: + description: Total number of non-terminated machines targeted by this + deployment (their labels match the selector). + format: int32 + type: integer + unavailableReplicas: + description: Total number of unavailable machines targeted by this deployment. + This is the total number of machines that are still required for the + deployment to have 100% available capacity. They may either be machines + that are running but not yet available or machines that still have + not been created. + format: int32 + type: integer + updatedReplicas: + description: Total number of non-terminated machines targeted by this + deployment that have the desired template spec. + format: int32 + type: integer + type: object + type: object + version: v1alpha1 + versions: + - name: v1alpha1 + served: true + storage: true +status: + acceptedNames: + kind: "" + plural: "" + conditions: [] + storedVersions: [] diff --git a/vendor/sigs.k8s.io/cluster-api/config/crds/cluster.k8s.io_machines.yaml b/vendor/sigs.k8s.io/cluster-api/config/crds/cluster.k8s.io_machines.yaml new file mode 100644 index 0000000000..cb97721594 --- /dev/null +++ b/vendor/sigs.k8s.io/cluster-api/config/crds/cluster.k8s.io_machines.yaml @@ -0,0 +1,493 @@ + +--- +apiVersion: apiextensions.k8s.io/v1beta1 +kind: CustomResourceDefinition +metadata: + creationTimestamp: null + name: machines.cluster.k8s.io +spec: + group: cluster.k8s.io + names: + kind: Machine + plural: machines + shortNames: + - ma + scope: Namespaced + validation: + openAPIV3Schema: + description: / [Machine] Machine is the Schema for the machines API + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation + of an object. Servers should convert recognized schemas to the latest + internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this + object represents. Servers may infer this from the endpoint the client + submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds' + type: string + metadata: + type: object + spec: + description: / [MachineSpec] MachineSpec defines the desired state of Machine + properties: + configSource: + description: ConfigSource is used to populate in the associated Node + for dynamic kubelet config. This field already exists in Node, so + any updates to it in the Machine spec will be automatically copied + to the linked NodeRef from the status. The rest of dynamic kubelet + config support should then work as-is. + properties: + configMap: + description: ConfigMap is a reference to a Node's ConfigMap + properties: + kubeletConfigKey: + description: KubeletConfigKey declares which key of the referenced + ConfigMap corresponds to the KubeletConfiguration structure + This field is required in all cases. + type: string + name: + description: Name is the metadata.name of the referenced ConfigMap. + This field is required in all cases. + type: string + namespace: + description: Namespace is the metadata.namespace of the referenced + ConfigMap. This field is required in all cases. + type: string + resourceVersion: + description: ResourceVersion is the metadata.ResourceVersion + of the referenced ConfigMap. This field is forbidden in Node.Spec, + and required in Node.Status. + type: string + uid: + description: UID is the metadata.UID of the referenced ConfigMap. + This field is forbidden in Node.Spec, and required in Node.Status. + type: string + required: + - kubeletConfigKey + - name + - namespace + type: object + type: object + metadata: + description: ObjectMeta will autopopulate the Node created. Use this + to indicate what labels, annotations, name prefix, etc., should be + used when creating the Node. + properties: + annotations: + additionalProperties: + type: string + description: 'Annotations is an unstructured key value map stored + with a resource that may be set by external tools to store and + retrieve arbitrary metadata. They are not queryable and should + be preserved when modifying objects. More info: http://kubernetes.io/docs/user-guide/annotations' + type: object + generateName: + description: "GenerateName is an optional prefix, used by the server, + to generate a unique name ONLY IF the Name field has not been + provided. If this field is used, the name returned to the client + will be different than the name passed. This value will also be + combined with a unique suffix. The provided value has the same + validation rules as the Name field, and may be truncated by the + length of the suffix required to make the value unique on the + server. \n If this field is specified and the generated name exists, + the server will NOT return a 409 - instead, it will either return + 201 Created or 500 with Reason ServerTimeout indicating a unique + name could not be found in the time allotted, and the client should + retry (optionally after the time indicated in the Retry-After + header). \n Applied only if Name is not specified. More info: + https://git.k8s.io/community/contributors/devel/api-conventions.md#idempotency" + type: string + labels: + additionalProperties: + type: string + description: 'Map of string keys and values that can be used to + organize and categorize (scope and select) objects. May match + selectors of replication controllers and services. More info: + http://kubernetes.io/docs/user-guide/labels' + type: object + name: + description: 'Name must be unique within a namespace. Is required + when creating resources, although some resources may allow a client + to request the generation of an appropriate name automatically. + Name is primarily intended for creation idempotence and configuration + definition. Cannot be updated. More info: http://kubernetes.io/docs/user-guide/identifiers#names' + type: string + namespace: + description: "Namespace defines the space within each name must + be unique. An empty namespace is equivalent to the \"default\" + namespace, but \"default\" is the canonical representation. Not + all objects are required to be scoped to a namespace - the value + of this field for those objects will be empty. \n Must be a DNS_LABEL. + Cannot be updated. More info: http://kubernetes.io/docs/user-guide/namespaces" + type: string + ownerReferences: + description: List of objects depended by this object. If ALL objects + in the list have been deleted, this object will be garbage collected. + If this object is managed by a controller, then an entry in this + list will point to this controller, with the controller field + set to true. There cannot be more than one managing controller. + items: + description: OwnerReference contains enough information to let + you identify an owning object. An owning object must be in the + same namespace as the dependent, or be cluster-scoped, so there + is no namespace field. + properties: + apiVersion: + description: API version of the referent. + type: string + blockOwnerDeletion: + description: If true, AND if the owner has the "foregroundDeletion" + finalizer, then the owner cannot be deleted from the key-value + store until this reference is removed. Defaults to false. + To set this field, a user needs "delete" permission of the + owner, otherwise 422 (Unprocessable Entity) will be returned. + type: boolean + controller: + description: If true, this reference points to the managing + controller. + type: boolean + kind: + description: 'Kind of the referent. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds' + type: string + name: + description: 'Name of the referent. More info: http://kubernetes.io/docs/user-guide/identifiers#names' + type: string + uid: + description: 'UID of the referent. More info: http://kubernetes.io/docs/user-guide/identifiers#uids' + type: string + required: + - apiVersion + - kind + - name + - uid + type: object + type: array + type: object + providerID: + description: ProviderID is the identification ID of the machine provided + by the provider. This field must match the provider ID as seen on + the node object corresponding to this machine. This field is required + by higher level consumers of cluster-api. Example use case is cluster + autoscaler with cluster-api as provider. Clean-up logic in the autoscaler + compares machines to nodes to find out machines at provider which + could not get registered as Kubernetes nodes. With cluster-api as + a generic out-of-tree provider for autoscaler, this field is required + by autoscaler to be able to have a provider view of the list of machines. + Another list of nodes is queried from the k8s apiserver and then a + comparison is done to find out unregistered machines and are marked + for delete. This field will be set by the actuators and consumed by + higher level entities like autoscaler that will be interfacing with + cluster-api as generic provider. + type: string + providerSpec: + description: ProviderSpec details Provider-specific configuration to + use during node creation. + properties: + value: + description: Value is an inlined, serialized representation of the + resource configuration. It is recommended that providers maintain + their own versioned API types that should be serialized/deserialized + from this field, akin to component config. + type: object + valueFrom: + description: Source for the provider configuration. Cannot be used + if value is not empty. + properties: + machineClass: + description: The machine class from which the provider config + should be sourced. + properties: + apiVersion: + description: API version of the referent. + type: string + fieldPath: + description: 'If referring to a piece of an object instead + of an entire object, this string should contain a valid + JSON/Go field access statement, such as desiredState.manifest.containers[2]. + For example, if the object reference is to a container + within a pod, this would take on a value like: "spec.containers{name}" + (where "name" refers to the name of the container that + triggered the event) or if no container name is specified + "spec.containers[2]" (container with index 2 in this pod). + This syntax is chosen only to have some well-defined way + of referencing a part of an object. TODO: this design + is not final and this field is subject to change in the + future.' + type: string + kind: + description: 'Kind of the referent. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds' + type: string + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names' + type: string + namespace: + description: 'Namespace of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/namespaces/' + type: string + provider: + description: Provider is the name of the cloud-provider + which MachineClass is intended for. + type: string + resourceVersion: + description: 'Specific resourceVersion to which this reference + is made, if any. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#concurrency-control-and-consistency' + type: string + uid: + description: 'UID of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#uids' + type: string + type: object + type: object + type: object + taints: + description: The list of the taints to be applied to the corresponding + Node in additive manner. This list will not overwrite any other taints + added to the Node on an ongoing basis by other entities. These taints + should be actively reconciled e.g. if you ask the machine controller + to apply a taint and then manually remove the taint the machine controller + will put it back) but not have the machine controller remove any taints + items: + description: The node this Taint is attached to has the "effect" on + any pod that does not tolerate the Taint. + properties: + effect: + description: Required. The effect of the taint on pods that do + not tolerate the taint. Valid effects are NoSchedule, PreferNoSchedule + and NoExecute. + type: string + key: + description: Required. The taint key to be applied to a node. + type: string + timeAdded: + description: TimeAdded represents the time at which the taint + was added. It is only written for NoExecute taints. + format: date-time + type: string + value: + description: Required. The taint value corresponding to the taint + key. + type: string + required: + - effect + - key + type: object + type: array + versions: + description: Versions of key software to use. This field is optional + at cluster creation time, and omitting the field indicates that the + cluster installation tool should select defaults for the user. These + defaults may differ based on the cluster installer, but the tool should + populate the values it uses when persisting Machine objects. A Machine + spec missing this field at runtime is invalid. + properties: + controlPlane: + description: ControlPlane is the semantic version of the Kubernetes + control plane to run. This should only be populated when the machine + is a control plane. + type: string + kubelet: + description: Kubelet is the semantic version of kubelet to run + type: string + required: + - kubelet + type: object + type: object + status: + description: / [MachineStatus] MachineStatus defines the observed state + of Machine + properties: + addresses: + description: Addresses is a list of addresses assigned to the machine. + Queried from cloud provider, if available. + items: + description: NodeAddress contains information for the node's address. + properties: + address: + description: The node address. + type: string + type: + description: Node address type, one of Hostname, ExternalIP or + InternalIP. + type: string + required: + - address + - type + type: object + type: array + conditions: + description: 'Conditions lists the conditions synced from the node conditions + of the corresponding node-object. Machine-controller is responsible + for keeping conditions up-to-date. MachineSet controller will be taking + these conditions as a signal to decide if machine is healthy or needs + to be replaced. Refer: https://kubernetes.io/docs/concepts/architecture/nodes/#condition' + items: + description: NodeCondition contains condition information for a node. + properties: + lastHeartbeatTime: + description: Last time we got an update on a given condition. + format: date-time + type: string + lastTransitionTime: + description: Last time the condition transit from one status to + another. + format: date-time + type: string + message: + description: Human readable message indicating details about last + transition. + type: string + reason: + description: (brief) reason for the condition's last transition. + type: string + status: + description: Status of the condition, one of True, False, Unknown. + type: string + type: + description: Type of node condition. + type: string + required: + - status + - type + type: object + type: array + errorMessage: + description: "ErrorMessage will be set in the event that there is a + terminal problem reconciling the Machine and will contain a more verbose + string suitable for logging and human consumption. \n This field should + not be set for transitive errors that a controller faces that are + expected to be fixed automatically over time (like service outages), + but instead indicate that something is fundamentally wrong with the + Machine's spec or the configuration of the controller, and that manual + intervention is required. Examples of terminal errors would be invalid + combinations of settings in the spec, values that are unsupported + by the controller, or the responsible controller itself being critically + misconfigured. \n Any transient errors that occur during the reconciliation + of Machines can be added as events to the Machine object and/or logged + in the controller's output." + type: string + errorReason: + description: "ErrorReason will be set in the event that there is a terminal + problem reconciling the Machine and will contain a succinct value + suitable for machine interpretation. \n This field should not be set + for transitive errors that a controller faces that are expected to + be fixed automatically over time (like service outages), but instead + indicate that something is fundamentally wrong with the Machine's + spec or the configuration of the controller, and that manual intervention + is required. Examples of terminal errors would be invalid combinations + of settings in the spec, values that are unsupported by the controller, + or the responsible controller itself being critically misconfigured. + \n Any transient errors that occur during the reconciliation of Machines + can be added as events to the Machine object and/or logged in the + controller's output." + type: string + lastOperation: + description: LastOperation describes the last-operation performed by + the machine-controller. This API should be useful as a history in + terms of the latest operation performed on the specific machine. It + should also convey the state of the latest-operation for example if + it is still on-going, failed or completed successfully. + properties: + description: + description: Description is the human-readable description of the + last operation. + type: string + lastUpdated: + description: LastUpdated is the timestamp at which LastOperation + API was last-updated. + format: date-time + type: string + state: + description: State is the current status of the last performed operation. + E.g. Processing, Failed, Successful etc + type: string + type: + description: Type is the type of operation which was last performed. + E.g. Create, Delete, Update etc + type: string + type: object + lastUpdated: + description: LastUpdated identifies when this status was last observed. + format: date-time + type: string + nodeRef: + description: NodeRef will point to the corresponding Node if it exists. + properties: + apiVersion: + description: API version of the referent. + type: string + fieldPath: + description: 'If referring to a piece of an object instead of an + entire object, this string should contain a valid JSON/Go field + access statement, such as desiredState.manifest.containers[2]. + For example, if the object reference is to a container within + a pod, this would take on a value like: "spec.containers{name}" + (where "name" refers to the name of the container that triggered + the event) or if no container name is specified "spec.containers[2]" + (container with index 2 in this pod). This syntax is chosen only + to have some well-defined way of referencing a part of an object. + TODO: this design is not final and this field is subject to change + in the future.' + type: string + kind: + description: 'Kind of the referent. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds' + type: string + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names' + type: string + namespace: + description: 'Namespace of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/namespaces/' + type: string + resourceVersion: + description: 'Specific resourceVersion to which this reference is + made, if any. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#concurrency-control-and-consistency' + type: string + uid: + description: 'UID of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#uids' + type: string + type: object + phase: + description: Phase represents the current phase of machine actuation. + E.g. Pending, Running, Terminating, Failed etc. + type: string + providerStatus: + description: ProviderStatus details a Provider-specific status. It is + recommended that providers maintain their own versioned API types + that should be serialized/deserialized from this field. + type: object + versions: + description: "Versions specifies the current versions of software on + the corresponding Node (if it exists). This is provided for a few + reasons: \n 1) It is more convenient than checking the NodeRef, traversing + it to the Node, and finding the appropriate field in Node.Status.NodeInfo + \ (which uses different field names and formatting). 2) It removes + some of the dependency on the structure of the Node, so that if + the structure of Node.Status.NodeInfo changes, only machine controllers + need to be updated, rather than every client of the Machines API. + 3) There is no other simple way to check the control plane version. + A client would have to connect directly to the apiserver running + on the target node in order to find out its version." + properties: + controlPlane: + description: ControlPlane is the semantic version of the Kubernetes + control plane to run. This should only be populated when the machine + is a control plane. + type: string + kubelet: + description: Kubelet is the semantic version of kubelet to run + type: string + required: + - kubelet + type: object + type: object + type: object + version: v1alpha1 + versions: + - name: v1alpha1 + served: true + storage: true +status: + acceptedNames: + kind: "" + plural: "" + conditions: [] + storedVersions: [] diff --git a/vendor/sigs.k8s.io/cluster-api/config/crds/cluster.k8s.io_machinesets.yaml b/vendor/sigs.k8s.io/cluster-api/config/crds/cluster.k8s.io_machinesets.yaml new file mode 100644 index 0000000000..2020238c5c --- /dev/null +++ b/vendor/sigs.k8s.io/cluster-api/config/crds/cluster.k8s.io_machinesets.yaml @@ -0,0 +1,557 @@ + +--- +apiVersion: apiextensions.k8s.io/v1beta1 +kind: CustomResourceDefinition +metadata: + creationTimestamp: null + name: machinesets.cluster.k8s.io +spec: + group: cluster.k8s.io + names: + kind: MachineSet + plural: machinesets + shortNames: + - ms + scope: Namespaced + subresources: + scale: + labelSelectorPath: .status.labelSelector + specReplicasPath: .spec.replicas + statusReplicasPath: .status.replicas + validation: + openAPIV3Schema: + description: / [MachineSet] MachineSet ensures that a specified number of machines + replicas are running at any given time. + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation + of an object. Servers should convert recognized schemas to the latest + internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this + object represents. Servers may infer this from the endpoint the client + submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds' + type: string + metadata: + type: object + spec: + description: / [MachineSetSpec] MachineSetSpec defines the desired state + of MachineSet + properties: + deletePolicy: + description: DeletePolicy defines the policy used to identify nodes + to delete when downscaling. Defaults to "Random". Valid values are + "Random, "Newest", "Oldest" + enum: + - Random + - Newest + - Oldest + type: string + minReadySeconds: + description: MinReadySeconds is the minimum number of seconds for which + a newly created machine should be ready. Defaults to 0 (machine will + be considered available as soon as it is ready) + format: int32 + type: integer + replicas: + description: Replicas is the number of desired replicas. This is a pointer + to distinguish between explicit zero and unspecified. Defaults to + 1. + format: int32 + type: integer + selector: + description: 'Selector is a label query over machines that should match + the replica count. Label keys and values that must match in order + to be controlled by this MachineSet. It must match the machine template''s + labels. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/#label-selectors' + properties: + matchExpressions: + description: matchExpressions is a list of label selector requirements. + The requirements are ANDed. + items: + description: A label selector requirement is a selector that contains + values, a key, and an operator that relates the key and values. + properties: + key: + description: key is the label key that the selector applies + to. + type: string + operator: + description: operator represents a key's relationship to a + set of values. Valid operators are In, NotIn, Exists and + DoesNotExist. + type: string + values: + description: values is an array of string values. If the operator + is In or NotIn, the values array must be non-empty. If the + operator is Exists or DoesNotExist, the values array must + be empty. This array is replaced during a strategic merge + patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + description: matchLabels is a map of {key,value} pairs. A single + {key,value} in the matchLabels map is equivalent to an element + of matchExpressions, whose key field is "key", the operator is + "In", and the values array contains only "value". The requirements + are ANDed. + type: object + type: object + template: + description: Template is the object that describes the machine that + will be created if insufficient replicas are detected. + properties: + metadata: + description: 'Standard object''s metadata. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata' + properties: + annotations: + additionalProperties: + type: string + description: 'Annotations is an unstructured key value map stored + with a resource that may be set by external tools to store + and retrieve arbitrary metadata. They are not queryable and + should be preserved when modifying objects. More info: http://kubernetes.io/docs/user-guide/annotations' + type: object + generateName: + description: "GenerateName is an optional prefix, used by the + server, to generate a unique name ONLY IF the Name field has + not been provided. If this field is used, the name returned + to the client will be different than the name passed. This + value will also be combined with a unique suffix. The provided + value has the same validation rules as the Name field, and + may be truncated by the length of the suffix required to make + the value unique on the server. \n If this field is specified + and the generated name exists, the server will NOT return + a 409 - instead, it will either return 201 Created or 500 + with Reason ServerTimeout indicating a unique name could not + be found in the time allotted, and the client should retry + (optionally after the time indicated in the Retry-After header). + \n Applied only if Name is not specified. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#idempotency" + type: string + labels: + additionalProperties: + type: string + description: 'Map of string keys and values that can be used + to organize and categorize (scope and select) objects. May + match selectors of replication controllers and services. More + info: http://kubernetes.io/docs/user-guide/labels' + type: object + name: + description: 'Name must be unique within a namespace. Is required + when creating resources, although some resources may allow + a client to request the generation of an appropriate name + automatically. Name is primarily intended for creation idempotence + and configuration definition. Cannot be updated. More info: + http://kubernetes.io/docs/user-guide/identifiers#names' + type: string + namespace: + description: "Namespace defines the space within each name must + be unique. An empty namespace is equivalent to the \"default\" + namespace, but \"default\" is the canonical representation. + Not all objects are required to be scoped to a namespace - + the value of this field for those objects will be empty. \n + Must be a DNS_LABEL. Cannot be updated. More info: http://kubernetes.io/docs/user-guide/namespaces" + type: string + ownerReferences: + description: List of objects depended by this object. If ALL + objects in the list have been deleted, this object will be + garbage collected. If this object is managed by a controller, + then an entry in this list will point to this controller, + with the controller field set to true. There cannot be more + than one managing controller. + items: + description: OwnerReference contains enough information to + let you identify an owning object. An owning object must + be in the same namespace as the dependent, or be cluster-scoped, + so there is no namespace field. + properties: + apiVersion: + description: API version of the referent. + type: string + blockOwnerDeletion: + description: If true, AND if the owner has the "foregroundDeletion" + finalizer, then the owner cannot be deleted from the + key-value store until this reference is removed. Defaults + to false. To set this field, a user needs "delete" permission + of the owner, otherwise 422 (Unprocessable Entity) will + be returned. + type: boolean + controller: + description: If true, this reference points to the managing + controller. + type: boolean + kind: + description: 'Kind of the referent. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds' + type: string + name: + description: 'Name of the referent. More info: http://kubernetes.io/docs/user-guide/identifiers#names' + type: string + uid: + description: 'UID of the referent. More info: http://kubernetes.io/docs/user-guide/identifiers#uids' + type: string + required: + - apiVersion + - kind + - name + - uid + type: object + type: array + type: object + spec: + description: 'Specification of the desired behavior of the machine. + More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status' + properties: + configSource: + description: ConfigSource is used to populate in the associated + Node for dynamic kubelet config. This field already exists + in Node, so any updates to it in the Machine spec will be + automatically copied to the linked NodeRef from the status. + The rest of dynamic kubelet config support should then work + as-is. + properties: + configMap: + description: ConfigMap is a reference to a Node's ConfigMap + properties: + kubeletConfigKey: + description: KubeletConfigKey declares which key of + the referenced ConfigMap corresponds to the KubeletConfiguration + structure This field is required in all cases. + type: string + name: + description: Name is the metadata.name of the referenced + ConfigMap. This field is required in all cases. + type: string + namespace: + description: Namespace is the metadata.namespace of + the referenced ConfigMap. This field is required in + all cases. + type: string + resourceVersion: + description: ResourceVersion is the metadata.ResourceVersion + of the referenced ConfigMap. This field is forbidden + in Node.Spec, and required in Node.Status. + type: string + uid: + description: UID is the metadata.UID of the referenced + ConfigMap. This field is forbidden in Node.Spec, and + required in Node.Status. + type: string + required: + - kubeletConfigKey + - name + - namespace + type: object + type: object + metadata: + description: ObjectMeta will autopopulate the Node created. + Use this to indicate what labels, annotations, name prefix, + etc., should be used when creating the Node. + properties: + annotations: + additionalProperties: + type: string + description: 'Annotations is an unstructured key value map + stored with a resource that may be set by external tools + to store and retrieve arbitrary metadata. They are not + queryable and should be preserved when modifying objects. + More info: http://kubernetes.io/docs/user-guide/annotations' + type: object + generateName: + description: "GenerateName is an optional prefix, used by + the server, to generate a unique name ONLY IF the Name + field has not been provided. If this field is used, the + name returned to the client will be different than the + name passed. This value will also be combined with a unique + suffix. The provided value has the same validation rules + as the Name field, and may be truncated by the length + of the suffix required to make the value unique on the + server. \n If this field is specified and the generated + name exists, the server will NOT return a 409 - instead, + it will either return 201 Created or 500 with Reason ServerTimeout + indicating a unique name could not be found in the time + allotted, and the client should retry (optionally after + the time indicated in the Retry-After header). \n Applied + only if Name is not specified. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#idempotency" + type: string + labels: + additionalProperties: + type: string + description: 'Map of string keys and values that can be + used to organize and categorize (scope and select) objects. + May match selectors of replication controllers and services. + More info: http://kubernetes.io/docs/user-guide/labels' + type: object + name: + description: 'Name must be unique within a namespace. Is + required when creating resources, although some resources + may allow a client to request the generation of an appropriate + name automatically. Name is primarily intended for creation + idempotence and configuration definition. Cannot be updated. + More info: http://kubernetes.io/docs/user-guide/identifiers#names' + type: string + namespace: + description: "Namespace defines the space within each name + must be unique. An empty namespace is equivalent to the + \"default\" namespace, but \"default\" is the canonical + representation. Not all objects are required to be scoped + to a namespace - the value of this field for those objects + will be empty. \n Must be a DNS_LABEL. Cannot be updated. + More info: http://kubernetes.io/docs/user-guide/namespaces" + type: string + ownerReferences: + description: List of objects depended by this object. If + ALL objects in the list have been deleted, this object + will be garbage collected. If this object is managed by + a controller, then an entry in this list will point to + this controller, with the controller field set to true. + There cannot be more than one managing controller. + items: + description: OwnerReference contains enough information + to let you identify an owning object. An owning object + must be in the same namespace as the dependent, or be + cluster-scoped, so there is no namespace field. + properties: + apiVersion: + description: API version of the referent. + type: string + blockOwnerDeletion: + description: If true, AND if the owner has the "foregroundDeletion" + finalizer, then the owner cannot be deleted from + the key-value store until this reference is removed. + Defaults to false. To set this field, a user needs + "delete" permission of the owner, otherwise 422 + (Unprocessable Entity) will be returned. + type: boolean + controller: + description: If true, this reference points to the + managing controller. + type: boolean + kind: + description: 'Kind of the referent. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds' + type: string + name: + description: 'Name of the referent. More info: http://kubernetes.io/docs/user-guide/identifiers#names' + type: string + uid: + description: 'UID of the referent. More info: http://kubernetes.io/docs/user-guide/identifiers#uids' + type: string + required: + - apiVersion + - kind + - name + - uid + type: object + type: array + type: object + providerID: + description: ProviderID is the identification ID of the machine + provided by the provider. This field must match the provider + ID as seen on the node object corresponding to this machine. + This field is required by higher level consumers of cluster-api. + Example use case is cluster autoscaler with cluster-api as + provider. Clean-up logic in the autoscaler compares machines + to nodes to find out machines at provider which could not + get registered as Kubernetes nodes. With cluster-api as a + generic out-of-tree provider for autoscaler, this field is + required by autoscaler to be able to have a provider view + of the list of machines. Another list of nodes is queried + from the k8s apiserver and then a comparison is done to find + out unregistered machines and are marked for delete. This + field will be set by the actuators and consumed by higher + level entities like autoscaler that will be interfacing with + cluster-api as generic provider. + type: string + providerSpec: + description: ProviderSpec details Provider-specific configuration + to use during node creation. + properties: + value: + description: Value is an inlined, serialized representation + of the resource configuration. It is recommended that + providers maintain their own versioned API types that + should be serialized/deserialized from this field, akin + to component config. + type: object + valueFrom: + description: Source for the provider configuration. Cannot + be used if value is not empty. + properties: + machineClass: + description: The machine class from which the provider + config should be sourced. + properties: + apiVersion: + description: API version of the referent. + type: string + fieldPath: + description: 'If referring to a piece of an object + instead of an entire object, this string should + contain a valid JSON/Go field access statement, + such as desiredState.manifest.containers[2]. For + example, if the object reference is to a container + within a pod, this would take on a value like: + "spec.containers{name}" (where "name" refers to + the name of the container that triggered the event) + or if no container name is specified "spec.containers[2]" + (container with index 2 in this pod). This syntax + is chosen only to have some well-defined way of + referencing a part of an object. TODO: this design + is not final and this field is subject to change + in the future.' + type: string + kind: + description: 'Kind of the referent. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds' + type: string + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names' + type: string + namespace: + description: 'Namespace of the referent. More info: + https://kubernetes.io/docs/concepts/overview/working-with-objects/namespaces/' + type: string + provider: + description: Provider is the name of the cloud-provider + which MachineClass is intended for. + type: string + resourceVersion: + description: 'Specific resourceVersion to which + this reference is made, if any. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#concurrency-control-and-consistency' + type: string + uid: + description: 'UID of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#uids' + type: string + type: object + type: object + type: object + taints: + description: The list of the taints to be applied to the corresponding + Node in additive manner. This list will not overwrite any + other taints added to the Node on an ongoing basis by other + entities. These taints should be actively reconciled e.g. + if you ask the machine controller to apply a taint and then + manually remove the taint the machine controller will put + it back) but not have the machine controller remove any taints + items: + description: The node this Taint is attached to has the "effect" + on any pod that does not tolerate the Taint. + properties: + effect: + description: Required. The effect of the taint on pods + that do not tolerate the taint. Valid effects are NoSchedule, + PreferNoSchedule and NoExecute. + type: string + key: + description: Required. The taint key to be applied to + a node. + type: string + timeAdded: + description: TimeAdded represents the time at which the + taint was added. It is only written for NoExecute taints. + format: date-time + type: string + value: + description: Required. The taint value corresponding to + the taint key. + type: string + required: + - effect + - key + type: object + type: array + versions: + description: Versions of key software to use. This field is + optional at cluster creation time, and omitting the field + indicates that the cluster installation tool should select + defaults for the user. These defaults may differ based on + the cluster installer, but the tool should populate the values + it uses when persisting Machine objects. A Machine spec missing + this field at runtime is invalid. + properties: + controlPlane: + description: ControlPlane is the semantic version of the + Kubernetes control plane to run. This should only be populated + when the machine is a control plane. + type: string + kubelet: + description: Kubelet is the semantic version of kubelet + to run + type: string + required: + - kubelet + type: object + type: object + type: object + required: + - selector + type: object + status: + description: / [MachineSetStatus] MachineSetStatus defines the observed + state of MachineSet + properties: + availableReplicas: + description: The number of available replicas (ready for at least minReadySeconds) + for this MachineSet. + format: int32 + type: integer + errorMessage: + type: string + errorReason: + description: "In the event that there is a terminal problem reconciling + the replicas, both ErrorReason and ErrorMessage will be set. ErrorReason + will be populated with a succinct value suitable for machine interpretation, + while ErrorMessage will contain a more verbose string suitable for + logging and human consumption. \n These fields should not be set for + transitive errors that a controller faces that are expected to be + fixed automatically over time (like service outages), but instead + indicate that something is fundamentally wrong with the MachineTemplate's + spec or the configuration of the machine controller, and that manual + intervention is required. Examples of terminal errors would be invalid + combinations of settings in the spec, values that are unsupported + by the machine controller, or the responsible machine controller itself + being critically misconfigured. \n Any transient errors that occur + during the reconciliation of Machines can be added as events to the + MachineSet object and/or logged in the controller's output." + type: string + fullyLabeledReplicas: + description: The number of replicas that have labels matching the labels + of the machine template of the MachineSet. + format: int32 + type: integer + observedGeneration: + description: ObservedGeneration reflects the generation of the most + recently observed MachineSet. + format: int64 + type: integer + readyReplicas: + description: The number of ready replicas for this MachineSet. A machine + is considered ready when the node has been created and is "Ready". + format: int32 + type: integer + replicas: + description: Replicas is the most recently observed number of replicas. + format: int32 + type: integer + required: + - replicas + type: object + type: object + version: v1alpha1 + versions: + - name: v1alpha1 + served: true + storage: true +status: + acceptedNames: + kind: "" + plural: "" + conditions: [] + storedVersions: [] diff --git a/vendor/sigs.k8s.io/cluster-api/config/crds/cluster_v1alpha1_machine.yaml b/vendor/sigs.k8s.io/cluster-api/config/crds/cluster_v1alpha1_machine.yaml deleted file mode 100644 index 61fd671cb0..0000000000 --- a/vendor/sigs.k8s.io/cluster-api/config/crds/cluster_v1alpha1_machine.yaml +++ /dev/null @@ -1,253 +0,0 @@ -apiVersion: apiextensions.k8s.io/v1beta1 -kind: CustomResourceDefinition -metadata: - creationTimestamp: null - labels: - controller-tools.k8s.io: "1.0" - name: machines.cluster.k8s.io -spec: - additionalPrinterColumns: - - JSONPath: .spec.providerID - description: Provider ID - name: ProviderID - type: string - - JSONPath: .status.phase - description: Machine status such as Terminating/Pending/Running/Failed etc - name: Phase - type: string - - JSONPath: .status.nodeRef.name - description: Node name associated with this machine - name: NodeName - priority: 1 - type: string - group: cluster.k8s.io - names: - kind: Machine - plural: machines - shortNames: - - ma - scope: Namespaced - subresources: - status: {} - validation: - openAPIV3Schema: - properties: - apiVersion: - description: 'APIVersion defines the versioned schema of this representation - of an object. Servers should convert recognized schemas to the latest - internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#resources' - type: string - kind: - description: 'Kind is a string value representing the REST resource this - object represents. Servers may infer this from the endpoint the client - submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds' - type: string - metadata: - type: object - spec: - properties: - configSource: - description: ConfigSource is used to populate in the associated Node - for dynamic kubelet config. This field already exists in Node, so - any updates to it in the Machine spec will be automatically copied - to the linked NodeRef from the status. The rest of dynamic kubelet - config support should then work as-is. - type: object - metadata: - description: ObjectMeta will autopopulate the Node created. Use this - to indicate what labels, annotations, name prefix, etc., should be - used when creating the Node. - type: object - providerID: - description: ProviderID is the identification ID of the machine provided - by the provider. This field must match the provider ID as seen on - the node object corresponding to this machine. This field is required - by higher level consumers of cluster-api. Example use case is cluster - autoscaler with cluster-api as provider. Clean-up logic in the autoscaler - compares machines to nodes to find out machines at provider which - could not get registered as Kubernetes nodes. With cluster-api as - a generic out-of-tree provider for autoscaler, this field is required - by autoscaler to be able to have a provider view of the list of machines. - Another list of nodes is queried from the k8s apiserver and then a - comparison is done to find out unregistered machines and are marked - for delete. This field will be set by the actuators and consumed by - higher level entities like autoscaler that will be interfacing with - cluster-api as generic provider. - type: string - providerSpec: - description: ProviderSpec details Provider-specific configuration to - use during node creation. - properties: - value: - description: Value is an inlined, serialized representation of the - resource configuration. It is recommended that providers maintain - their own versioned API types that should be serialized/deserialized - from this field, akin to component config. - type: object - valueFrom: - description: Source for the provider configuration. Cannot be used - if value is not empty. - properties: - machineClass: - description: The machine class from which the provider config - should be sourced. - properties: - provider: - description: Provider is the name of the cloud-provider - which MachineClass is intended for. - type: string - type: object - type: object - type: object - taints: - description: The list of the taints to be applied to the corresponding - Node in additive manner. This list will not overwrite any other taints - added to the Node on an ongoing basis by other entities. These taints - should be actively reconciled e.g. if you ask the machine controller - to apply a taint and then manually remove the taint the machine controller - will put it back) but not have the machine controller remove any taints - items: - type: object - type: array - versions: - description: Versions of key software to use. This field is optional - at cluster creation time, and omitting the field indicates that the - cluster installation tool should select defaults for the user. These - defaults may differ based on the cluster installer, but the tool should - populate the values it uses when persisting Machine objects. A Machine - spec missing this field at runtime is invalid. - properties: - controlPlane: - description: ControlPlane is the semantic version of the Kubernetes - control plane to run. This should only be populated when the machine - is a control plane. - type: string - kubelet: - description: Kubelet is the semantic version of kubelet to run - type: string - required: - - kubelet - type: object - required: - - providerSpec - type: object - status: - properties: - addresses: - description: Addresses is a list of addresses assigned to the machine. - Queried from cloud provider, if available. - items: - type: object - type: array - conditions: - description: 'Conditions lists the conditions synced from the node conditions - of the corresponding node-object. Machine-controller is responsible - for keeping conditions up-to-date. MachineSet controller will be taking - these conditions as a signal to decide if machine is healthy or needs - to be replaced. Refer: https://kubernetes.io/docs/concepts/architecture/nodes/#condition' - items: - type: object - type: array - errorMessage: - description: ErrorMessage will be set in the event that there is a terminal - problem reconciling the Machine and will contain a more verbose string - suitable for logging and human consumption. This field should not - be set for transitive errors that a controller faces that are expected - to be fixed automatically over time (like service outages), but instead - indicate that something is fundamentally wrong with the Machine's - spec or the configuration of the controller, and that manual intervention - is required. Examples of terminal errors would be invalid combinations - of settings in the spec, values that are unsupported by the controller, - or the responsible controller itself being critically misconfigured. Any - transient errors that occur during the reconciliation of Machines - can be added as events to the Machine object and/or logged in the - controller's output. - type: string - errorReason: - description: ErrorReason will be set in the event that there is a terminal - problem reconciling the Machine and will contain a succinct value - suitable for machine interpretation. This field should not be set - for transitive errors that a controller faces that are expected to - be fixed automatically over time (like service outages), but instead - indicate that something is fundamentally wrong with the Machine's - spec or the configuration of the controller, and that manual intervention - is required. Examples of terminal errors would be invalid combinations - of settings in the spec, values that are unsupported by the controller, - or the responsible controller itself being critically misconfigured. Any - transient errors that occur during the reconciliation of Machines - can be added as events to the Machine object and/or logged in the - controller's output. - type: string - lastOperation: - description: LastOperation describes the last-operation performed by - the machine-controller. This API should be useful as a history in - terms of the latest operation performed on the specific machine. It - should also convey the state of the latest-operation for example if - it is still on-going, failed or completed successfully. - properties: - description: - description: Description is the human-readable description of the - last operation. - type: string - lastUpdated: - description: LastUpdated is the timestamp at which LastOperation - API was last-updated. - format: date-time - type: string - state: - description: State is the current status of the last performed operation. - E.g. Processing, Failed, Successful etc - type: string - type: - description: Type is the type of operation which was last performed. - E.g. Create, Delete, Update etc - type: string - type: object - lastUpdated: - description: LastUpdated identifies when this status was last observed. - format: date-time - type: string - nodeRef: - description: NodeRef will point to the corresponding Node if it exists. - type: object - phase: - description: Phase represents the current phase of machine actuation. - E.g. Pending, Running, Terminating, Failed etc. - type: string - providerStatus: - description: ProviderStatus details a Provider-specific status. It is - recommended that providers maintain their own versioned API types - that should be serialized/deserialized from this field. - type: object - versions: - description: 'Versions specifies the current versions of software on - the corresponding Node (if it exists). This is provided for a few - reasons: 1) It is more convenient than checking the NodeRef, traversing - it to the Node, and finding the appropriate field in Node.Status.NodeInfo (which - uses different field names and formatting). 2) It removes some of - the dependency on the structure of the Node, so that if the structure - of Node.Status.NodeInfo changes, only machine controllers need - to be updated, rather than every client of the Machines API. 3) - There is no other simple way to check the control plane version. - A client would have to connect directly to the apiserver running - on the target node in order to find out its version.' - properties: - controlPlane: - description: ControlPlane is the semantic version of the Kubernetes - control plane to run. This should only be populated when the machine - is a control plane. - type: string - kubelet: - description: Kubelet is the semantic version of kubelet to run - type: string - required: - - kubelet - type: object - type: object - version: v1alpha1 -status: - acceptedNames: - kind: "" - plural: "" - conditions: [] - storedVersions: [] diff --git a/vendor/sigs.k8s.io/cluster-api/config/crds/cluster_v1alpha1_machinedeployment.yaml b/vendor/sigs.k8s.io/cluster-api/config/crds/cluster_v1alpha1_machinedeployment.yaml deleted file mode 100644 index d7da82b527..0000000000 --- a/vendor/sigs.k8s.io/cluster-api/config/crds/cluster_v1alpha1_machinedeployment.yaml +++ /dev/null @@ -1,265 +0,0 @@ -apiVersion: apiextensions.k8s.io/v1beta1 -kind: CustomResourceDefinition -metadata: - creationTimestamp: null - labels: - controller-tools.k8s.io: "1.0" - name: machinedeployments.cluster.k8s.io -spec: - group: cluster.k8s.io - names: - kind: MachineDeployment - plural: machinedeployments - shortNames: - - md - scope: Namespaced - subresources: - scale: - labelSelectorPath: .status.labelSelector - specReplicasPath: .spec.replicas - statusReplicasPath: .status.replicas - status: {} - validation: - openAPIV3Schema: - properties: - apiVersion: - description: 'APIVersion defines the versioned schema of this representation - of an object. Servers should convert recognized schemas to the latest - internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#resources' - type: string - kind: - description: 'Kind is a string value representing the REST resource this - object represents. Servers may infer this from the endpoint the client - submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds' - type: string - metadata: - type: object - spec: - properties: - minReadySeconds: - description: Minimum number of seconds for which a newly created machine - should be ready. Defaults to 0 (machine will be considered available - as soon as it is ready) - format: int32 - type: integer - paused: - description: Indicates that the deployment is paused. - type: boolean - progressDeadlineSeconds: - description: The maximum time in seconds for a deployment to make progress - before it is considered to be failed. The deployment controller will - continue to process failed deployments and a condition with a ProgressDeadlineExceeded - reason will be surfaced in the deployment status. Note that progress - will not be estimated during the time a deployment is paused. Defaults - to 600s. - format: int32 - type: integer - replicas: - description: Number of desired machines. Defaults to 1. This is a pointer - to distinguish between explicit zero and not specified. - format: int32 - type: integer - revisionHistoryLimit: - description: The number of old MachineSets to retain to allow rollback. - This is a pointer to distinguish between explicit zero and not specified. - Defaults to 1. - format: int32 - type: integer - selector: - description: Label selector for machines. Existing MachineSets whose - machines are selected by this will be the ones affected by this deployment. - It must match the machine template's labels. - type: object - strategy: - description: The deployment strategy to use to replace existing machines - with new ones. - properties: - rollingUpdate: - description: Rolling update config params. Present only if MachineDeploymentStrategyType - = RollingUpdate. - properties: - maxSurge: - anyOf: - - type: string - - type: integer - description: 'The maximum number of machines that can be scheduled - above the desired number of machines. Value can be an absolute - number (ex: 5) or a percentage of desired machines (ex: 10%). - This can not be 0 if MaxUnavailable is 0. Absolute number - is calculated from percentage by rounding up. Defaults to - 1. Example: when this is set to 30%, the new MachineSet can - be scaled up immediately when the rolling update starts, such - that the total number of old and new machines do not exceed - 130% of desired machines. Once old machines have been killed, - new MachineSet can be scaled up further, ensuring that total - number of machines running at any time during the update is - at most 130% of desired machines.' - maxUnavailable: - anyOf: - - type: string - - type: integer - description: 'The maximum number of machines that can be unavailable - during the update. Value can be an absolute number (ex: 5) - or a percentage of desired machines (ex: 10%). Absolute number - is calculated from percentage by rounding down. This can not - be 0 if MaxSurge is 0. Defaults to 0. Example: when this is - set to 30%, the old MachineSet can be scaled down to 70% of - desired machines immediately when the rolling update starts. - Once new machines are ready, old MachineSet can be scaled - down further, followed by scaling up the new MachineSet, ensuring - that the total number of machines available at all times during - the update is at least 70% of desired machines.' - type: object - type: - description: Type of deployment. Currently the only supported strategy - is "RollingUpdate". Default is RollingUpdate. - type: string - type: object - template: - description: Template describes the machines that will be created. - properties: - metadata: - description: 'Standard object''s metadata. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata' - type: object - spec: - description: 'Specification of the desired behavior of the machine. - More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status' - properties: - configSource: - description: ConfigSource is used to populate in the associated - Node for dynamic kubelet config. This field already exists - in Node, so any updates to it in the Machine spec will be - automatically copied to the linked NodeRef from the status. - The rest of dynamic kubelet config support should then work - as-is. - type: object - metadata: - description: ObjectMeta will autopopulate the Node created. - Use this to indicate what labels, annotations, name prefix, - etc., should be used when creating the Node. - type: object - providerID: - description: ProviderID is the identification ID of the machine - provided by the provider. This field must match the provider - ID as seen on the node object corresponding to this machine. - This field is required by higher level consumers of cluster-api. - Example use case is cluster autoscaler with cluster-api as - provider. Clean-up logic in the autoscaler compares machines - to nodes to find out machines at provider which could not - get registered as Kubernetes nodes. With cluster-api as a - generic out-of-tree provider for autoscaler, this field is - required by autoscaler to be able to have a provider view - of the list of machines. Another list of nodes is queried - from the k8s apiserver and then a comparison is done to find - out unregistered machines and are marked for delete. This - field will be set by the actuators and consumed by higher - level entities like autoscaler that will be interfacing with - cluster-api as generic provider. - type: string - providerSpec: - description: ProviderSpec details Provider-specific configuration - to use during node creation. - properties: - value: - description: Value is an inlined, serialized representation - of the resource configuration. It is recommended that - providers maintain their own versioned API types that - should be serialized/deserialized from this field, akin - to component config. - type: object - valueFrom: - description: Source for the provider configuration. Cannot - be used if value is not empty. - properties: - machineClass: - description: The machine class from which the provider - config should be sourced. - properties: - provider: - description: Provider is the name of the cloud-provider - which MachineClass is intended for. - type: string - type: object - type: object - type: object - taints: - description: The list of the taints to be applied to the corresponding - Node in additive manner. This list will not overwrite any - other taints added to the Node on an ongoing basis by other - entities. These taints should be actively reconciled e.g. - if you ask the machine controller to apply a taint and then - manually remove the taint the machine controller will put - it back) but not have the machine controller remove any taints - items: - type: object - type: array - versions: - description: Versions of key software to use. This field is - optional at cluster creation time, and omitting the field - indicates that the cluster installation tool should select - defaults for the user. These defaults may differ based on - the cluster installer, but the tool should populate the values - it uses when persisting Machine objects. A Machine spec missing - this field at runtime is invalid. - properties: - controlPlane: - description: ControlPlane is the semantic version of the - Kubernetes control plane to run. This should only be populated - when the machine is a control plane. - type: string - kubelet: - description: Kubelet is the semantic version of kubelet - to run - type: string - required: - - kubelet - type: object - required: - - providerSpec - type: object - type: object - required: - - selector - - template - type: object - status: - properties: - availableReplicas: - description: Total number of available machines (ready for at least - minReadySeconds) targeted by this deployment. - format: int32 - type: integer - observedGeneration: - description: The generation observed by the deployment controller. - format: int64 - type: integer - readyReplicas: - description: Total number of ready machines targeted by this deployment. - format: int32 - type: integer - replicas: - description: Total number of non-terminated machines targeted by this - deployment (their labels match the selector). - format: int32 - type: integer - unavailableReplicas: - description: Total number of unavailable machines targeted by this deployment. - This is the total number of machines that are still required for the - deployment to have 100% available capacity. They may either be machines - that are running but not yet available or machines that still have - not been created. - format: int32 - type: integer - updatedReplicas: - description: Total number of non-terminated machines targeted by this - deployment that have the desired template spec. - format: int32 - type: integer - type: object - version: v1alpha1 -status: - acceptedNames: - kind: "" - plural: "" - conditions: [] - storedVersions: [] diff --git a/vendor/sigs.k8s.io/cluster-api/config/crds/cluster_v1alpha1_machineset.yaml b/vendor/sigs.k8s.io/cluster-api/config/crds/cluster_v1alpha1_machineset.yaml deleted file mode 100644 index 29b5e8d91a..0000000000 --- a/vendor/sigs.k8s.io/cluster-api/config/crds/cluster_v1alpha1_machineset.yaml +++ /dev/null @@ -1,227 +0,0 @@ -apiVersion: apiextensions.k8s.io/v1beta1 -kind: CustomResourceDefinition -metadata: - creationTimestamp: null - labels: - controller-tools.k8s.io: "1.0" - name: machinesets.cluster.k8s.io -spec: - group: cluster.k8s.io - names: - kind: MachineSet - plural: machinesets - shortNames: - - ms - scope: Namespaced - subresources: - scale: - labelSelectorPath: .status.labelSelector - specReplicasPath: .spec.replicas - statusReplicasPath: .status.replicas - status: {} - validation: - openAPIV3Schema: - properties: - apiVersion: - description: 'APIVersion defines the versioned schema of this representation - of an object. Servers should convert recognized schemas to the latest - internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#resources' - type: string - kind: - description: 'Kind is a string value representing the REST resource this - object represents. Servers may infer this from the endpoint the client - submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds' - type: string - metadata: - type: object - spec: - properties: - deletePolicy: - description: DeletePolicy defines the policy used to identify nodes - to delete when downscaling. Defaults to "Random". Valid values are - "Random, "Newest", "Oldest" - enum: - - Random - - Newest - - Oldest - type: string - minReadySeconds: - description: MinReadySeconds is the minimum number of seconds for which - a newly created machine should be ready. Defaults to 0 (machine will - be considered available as soon as it is ready) - format: int32 - type: integer - replicas: - description: Replicas is the number of desired replicas. This is a pointer - to distinguish between explicit zero and unspecified. Defaults to - 1. - format: int32 - type: integer - selector: - description: 'Selector is a label query over machines that should match - the replica count. Label keys and values that must match in order - to be controlled by this MachineSet. It must match the machine template''s - labels. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/#label-selectors' - type: object - template: - description: Template is the object that describes the machine that - will be created if insufficient replicas are detected. - properties: - metadata: - description: 'Standard object''s metadata. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata' - type: object - spec: - description: 'Specification of the desired behavior of the machine. - More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status' - properties: - configSource: - description: ConfigSource is used to populate in the associated - Node for dynamic kubelet config. This field already exists - in Node, so any updates to it in the Machine spec will be - automatically copied to the linked NodeRef from the status. - The rest of dynamic kubelet config support should then work - as-is. - type: object - metadata: - description: ObjectMeta will autopopulate the Node created. - Use this to indicate what labels, annotations, name prefix, - etc., should be used when creating the Node. - type: object - providerID: - description: ProviderID is the identification ID of the machine - provided by the provider. This field must match the provider - ID as seen on the node object corresponding to this machine. - This field is required by higher level consumers of cluster-api. - Example use case is cluster autoscaler with cluster-api as - provider. Clean-up logic in the autoscaler compares machines - to nodes to find out machines at provider which could not - get registered as Kubernetes nodes. With cluster-api as a - generic out-of-tree provider for autoscaler, this field is - required by autoscaler to be able to have a provider view - of the list of machines. Another list of nodes is queried - from the k8s apiserver and then a comparison is done to find - out unregistered machines and are marked for delete. This - field will be set by the actuators and consumed by higher - level entities like autoscaler that will be interfacing with - cluster-api as generic provider. - type: string - providerSpec: - description: ProviderSpec details Provider-specific configuration - to use during node creation. - properties: - value: - description: Value is an inlined, serialized representation - of the resource configuration. It is recommended that - providers maintain their own versioned API types that - should be serialized/deserialized from this field, akin - to component config. - type: object - valueFrom: - description: Source for the provider configuration. Cannot - be used if value is not empty. - properties: - machineClass: - description: The machine class from which the provider - config should be sourced. - properties: - provider: - description: Provider is the name of the cloud-provider - which MachineClass is intended for. - type: string - type: object - type: object - type: object - taints: - description: The list of the taints to be applied to the corresponding - Node in additive manner. This list will not overwrite any - other taints added to the Node on an ongoing basis by other - entities. These taints should be actively reconciled e.g. - if you ask the machine controller to apply a taint and then - manually remove the taint the machine controller will put - it back) but not have the machine controller remove any taints - items: - type: object - type: array - versions: - description: Versions of key software to use. This field is - optional at cluster creation time, and omitting the field - indicates that the cluster installation tool should select - defaults for the user. These defaults may differ based on - the cluster installer, but the tool should populate the values - it uses when persisting Machine objects. A Machine spec missing - this field at runtime is invalid. - properties: - controlPlane: - description: ControlPlane is the semantic version of the - Kubernetes control plane to run. This should only be populated - when the machine is a control plane. - type: string - kubelet: - description: Kubelet is the semantic version of kubelet - to run - type: string - required: - - kubelet - type: object - required: - - providerSpec - type: object - type: object - required: - - selector - type: object - status: - properties: - availableReplicas: - description: The number of available replicas (ready for at least minReadySeconds) - for this MachineSet. - format: int32 - type: integer - errorMessage: - type: string - errorReason: - description: In the event that there is a terminal problem reconciling - the replicas, both ErrorReason and ErrorMessage will be set. ErrorReason - will be populated with a succinct value suitable for machine interpretation, - while ErrorMessage will contain a more verbose string suitable for - logging and human consumption. These fields should not be set for - transitive errors that a controller faces that are expected to be - fixed automatically over time (like service outages), but instead - indicate that something is fundamentally wrong with the MachineTemplate's - spec or the configuration of the machine controller, and that manual - intervention is required. Examples of terminal errors would be invalid - combinations of settings in the spec, values that are unsupported - by the machine controller, or the responsible machine controller itself - being critically misconfigured. Any transient errors that occur during - the reconciliation of Machines can be added as events to the MachineSet - object and/or logged in the controller's output. - type: string - fullyLabeledReplicas: - description: The number of replicas that have labels matching the labels - of the machine template of the MachineSet. - format: int32 - type: integer - observedGeneration: - description: ObservedGeneration reflects the generation of the most - recently observed MachineSet. - format: int64 - type: integer - readyReplicas: - description: The number of ready replicas for this MachineSet. A machine - is considered ready when the node has been created and is "Ready". - format: int32 - type: integer - replicas: - description: Replicas is the most recently observed number of replicas. - format: int32 - type: integer - required: - - replicas - type: object - version: v1alpha1 -status: - acceptedNames: - kind: "" - plural: "" - conditions: [] - storedVersions: [] diff --git a/vendor/sigs.k8s.io/cluster-api/config/crds/kustomization.yaml b/vendor/sigs.k8s.io/cluster-api/config/crds/kustomization.yaml index d46ad691d2..0d91d678ad 100644 --- a/vendor/sigs.k8s.io/cluster-api/config/crds/kustomization.yaml +++ b/vendor/sigs.k8s.io/cluster-api/config/crds/kustomization.yaml @@ -4,9 +4,9 @@ # YAML string, with resources separated by document # markers ("---"). resources: -- cluster_v1alpha1_cluster.yaml -- cluster_v1alpha1_machine.yaml -- cluster_v1alpha1_machineclass.yaml -- cluster_v1alpha1_machinedeployment.yaml -- cluster_v1alpha1_machineset.yaml +- cluster.k8s.io_clusters.yaml +- cluster.k8s.io_machines.yaml +- cluster.k8s.io_machinesets.yaml +- cluster.k8s.io_machineclasses.yaml +- cluster.k8s.io_machinedeployments.yaml diff --git a/vendor/sigs.k8s.io/cluster-api/config/default/kustomization.yaml b/vendor/sigs.k8s.io/cluster-api/config/default/kustomization.yaml index 9ff239f49f..45c43cb3e7 100644 --- a/vendor/sigs.k8s.io/cluster-api/config/default/kustomization.yaml +++ b/vendor/sigs.k8s.io/cluster-api/config/default/kustomization.yaml @@ -9,9 +9,51 @@ namespace: cluster-api-system namePrefix: cluster-api- bases: -- ../crds/ -- ../rbac/ -- ../manager/ +- ../crd +- ../rbac +- ../manager +# [WEBHOOK] To enable webhook, uncomment all the sections with [WEBHOOK] prefix including the one in crd/kustomization.yaml +#- ../webhook +# [CERTMANAGER] To enable cert-manager, uncomment all sections with 'CERTMANAGER'. 'WEBHOOK' components are required. +#- ../certmanager patchesStrategicMerge: - manager_image_patch.yaml + +# [WEBHOOK] To enable webhook, uncomment all the sections with [WEBHOOK] prefix including the one in crd/kustomization.yaml +#- manager_webhook_patch.yaml + +# [CERTMANAGER] To enable cert-manager, uncomment all sections with 'CERTMANAGER'. +# Uncomment 'CERTMANAGER' sections in crd/kustomization.yaml to enable the CA injection in the admission webhooks. +# 'CERTMANAGER' needs to be enabled to use ca injection +#- webhookcainjection_patch.yaml + +# the following config is for teaching kustomize how to do var substitution +vars: +# [CERTMANAGER] To enable cert-manager, uncomment all sections with 'CERTMANAGER' prefix. +#- name: CERTIFICATE_NAMESPACE # namespace of the certificate CR +# objref: +# kind: Certificate +# group: certmanager.k8s.io +# version: v1alpha1 +# name: serving-cert # this name should match the one in certificate.yaml +# fieldref: +# fieldpath: metadata.namespace +#- name: CERTIFICATE_NAME +# objref: +# kind: Certificate +# group: certmanager.k8s.io +# version: v1alpha1 +# name: serving-cert # this name should match the one in certificate.yaml +#- name: SERVICE_NAMESPACE # namespace of the service +# objref: +# kind: Service +# version: v1 +# name: webhook-service +# fieldref: +# fieldpath: metadata.namespace +#- name: SERVICE_NAME +# objref: +# kind: Service +# version: v1 +# name: webhook-service diff --git a/vendor/sigs.k8s.io/cluster-api/config/default/manager_image_patch.yaml b/vendor/sigs.k8s.io/cluster-api/config/default/manager_image_patch.yaml index 5405548e2a..85281216e0 100644 --- a/vendor/sigs.k8s.io/cluster-api/config/default/manager_image_patch.yaml +++ b/vendor/sigs.k8s.io/cluster-api/config/default/manager_image_patch.yaml @@ -7,5 +7,5 @@ spec: template: spec: containers: - - image: us.gcr.io/k8s-artifacts-prod/cluster-api/cluster-api-controller:v0.1.9 + - image: gcr.io/k8s-staging-cluster-api/cluster-api-controller:latest name: manager diff --git a/vendor/sigs.k8s.io/cluster-api/config/rbac/kustomization.yaml b/vendor/sigs.k8s.io/cluster-api/config/rbac/kustomization.yaml index 984ec5c183..d2847dbbf0 100644 --- a/vendor/sigs.k8s.io/cluster-api/config/rbac/kustomization.yaml +++ b/vendor/sigs.k8s.io/cluster-api/config/rbac/kustomization.yaml @@ -4,6 +4,6 @@ # YAML string, with resources separated by document # markers ("---"). resources: -- manager_role_binding.yaml -- manager_role.yaml +- role_binding.yaml +- role.yaml diff --git a/config/rbac/rbac_role.yaml b/vendor/sigs.k8s.io/cluster-api/config/rbac/role.yaml similarity index 71% rename from config/rbac/rbac_role.yaml rename to vendor/sigs.k8s.io/cluster-api/config/rbac/role.yaml index 7a014515e3..465421b1ea 100644 --- a/config/rbac/rbac_role.yaml +++ b/vendor/sigs.k8s.io/cluster-api/config/rbac/role.yaml @@ -1,57 +1,70 @@ + +--- apiVersion: rbac.authorization.k8s.io/v1 kind: ClusterRole metadata: creationTimestamp: null - name: openstack-provider-manager-role + name: manager-role rules: - apiGroups: - - openstackproviderconfig.k8s.io + - bootstrap.cluster.x-k8s.io + - infrastructure.cluster.x-k8s.io resources: - - openstackmachineproviderconfigs + - '*' verbs: + - create + - delete - get - list - - watch - - create - - update - patch - - delete + - update + - watch - apiGroups: - - cluster.k8s.io + - cluster.x-k8s.io resources: - clusters - clusters/status - machinedeployments - - machinesets + - machinedeployments/status - machines - machines/status + - machinesets + - machinesets/status verbs: + - create + - delete - get - list - - watch - - create - - update - patch - - delete + - update + - watch - apiGroups: - "" resources: - - nodes - events verbs: + - create - get - list + - patch - watch +- apiGroups: + - "" + resources: + - nodes + verbs: - create - - update - - patch - delete + - get + - list + - patch + - update + - watch - apiGroups: - "" resources: - secrets verbs: - - create - get - list - watch diff --git a/vendor/sigs.k8s.io/cluster-api/config/rbac/manager_role_binding.yaml b/vendor/sigs.k8s.io/cluster-api/config/rbac/role_binding.yaml similarity index 100% rename from vendor/sigs.k8s.io/cluster-api/config/rbac/manager_role_binding.yaml rename to vendor/sigs.k8s.io/cluster-api/config/rbac/role_binding.yaml diff --git a/vendor/sigs.k8s.io/cluster-api/config/samples/cluster_v1alpha1_cluster.yaml b/vendor/sigs.k8s.io/cluster-api/config/samples/cluster_v1alpha1_cluster.yaml deleted file mode 100644 index fc796aa6b5..0000000000 --- a/vendor/sigs.k8s.io/cluster-api/config/samples/cluster_v1alpha1_cluster.yaml +++ /dev/null @@ -1,9 +0,0 @@ -apiVersion: cluster.k8s.io/v1alpha1 -kind: Cluster -metadata: - labels: - controller-tools.k8s.io: "1.0" - name: cluster-sample -spec: - # Add fields here - foo: bar diff --git a/vendor/sigs.k8s.io/cluster-api/config/samples/cluster_v1alpha1_machine.yaml b/vendor/sigs.k8s.io/cluster-api/config/samples/cluster_v1alpha1_machine.yaml deleted file mode 100644 index 9b37401789..0000000000 --- a/vendor/sigs.k8s.io/cluster-api/config/samples/cluster_v1alpha1_machine.yaml +++ /dev/null @@ -1,9 +0,0 @@ -apiVersion: cluster.k8s.io/v1alpha1 -kind: Machine -metadata: - labels: - controller-tools.k8s.io: "1.0" - name: machine-sample -spec: - # Add fields here - foo: bar diff --git a/vendor/sigs.k8s.io/cluster-api/config/samples/cluster_v1alpha1_machinedeployment.yaml b/vendor/sigs.k8s.io/cluster-api/config/samples/cluster_v1alpha1_machinedeployment.yaml deleted file mode 100644 index d2f072342d..0000000000 --- a/vendor/sigs.k8s.io/cluster-api/config/samples/cluster_v1alpha1_machinedeployment.yaml +++ /dev/null @@ -1,16 +0,0 @@ -apiVersion: cluster.k8s.io/v1alpha1 -kind: MachineDeployment -metadata: - labels: - controller-tools.k8s.io: "1.0" - name: machinedeployment-sample -spec: - replicas: 3 - selector: - matchLabels: - # Add fields here - foo: bar - template: - metadata: - labels: - foo: bar \ No newline at end of file diff --git a/vendor/sigs.k8s.io/cluster-api/config/samples/cluster_v1alpha1_machineset.yaml b/vendor/sigs.k8s.io/cluster-api/config/samples/cluster_v1alpha1_machineset.yaml deleted file mode 100644 index 929af08698..0000000000 --- a/vendor/sigs.k8s.io/cluster-api/config/samples/cluster_v1alpha1_machineset.yaml +++ /dev/null @@ -1,16 +0,0 @@ -apiVersion: cluster.k8s.io/v1alpha1 -kind: MachineSet -metadata: - labels: - controller-tools.k8s.io: "1.0" - name: machineset-sample -spec: - replicas: 3 - selector: - matchLabels: - # Add fields here - foo: bar - template: - metadata: - labels: - foo: bar diff --git a/vendor/sigs.k8s.io/cluster-api/config/webhook/manifests.yaml b/vendor/sigs.k8s.io/cluster-api/config/webhook/manifests.yaml new file mode 100644 index 0000000000..e69de29bb2 diff --git a/vendor/sigs.k8s.io/cluster-api/controllers/BUILD.bazel b/vendor/sigs.k8s.io/cluster-api/controllers/BUILD.bazel new file mode 100644 index 0000000000..6e62a383f3 --- /dev/null +++ b/vendor/sigs.k8s.io/cluster-api/controllers/BUILD.bazel @@ -0,0 +1,36 @@ +load("@io_bazel_rules_go//go:def.bzl", "go_library", "go_test") + +go_library( + name = "go_default_library", + srcs = [ + "cluster_controller.go", + "machine_controller.go", + "machinedeployment_controller.go", + "machineset_controller.go", + ], + importpath = "sigs.k8s.io/cluster-api/controllers", + visibility = ["//visibility:public"], + deps = [ + "//api/v1alpha2:go_default_library", + "//vendor/github.com/go-logr/logr:go_default_library", + "//vendor/sigs.k8s.io/controller-runtime:go_default_library", + "//vendor/sigs.k8s.io/controller-runtime/pkg/client:go_default_library", + ], +) + +go_test( + name = "go_default_test", + srcs = ["suite_test.go"], + embed = [":go_default_library"], + deps = [ + "//api/v1alpha2:go_default_library", + "//vendor/github.com/onsi/ginkgo:go_default_library", + "//vendor/github.com/onsi/gomega:go_default_library", + "//vendor/k8s.io/client-go/kubernetes/scheme:go_default_library", + "//vendor/k8s.io/client-go/rest:go_default_library", + "//vendor/sigs.k8s.io/controller-runtime/pkg/client:go_default_library", + "//vendor/sigs.k8s.io/controller-runtime/pkg/envtest:go_default_library", + "//vendor/sigs.k8s.io/controller-runtime/pkg/log:go_default_library", + "//vendor/sigs.k8s.io/controller-runtime/pkg/log/zap:go_default_library", + ], +) diff --git a/vendor/sigs.k8s.io/cluster-api/controllers/cluster_controller.go b/vendor/sigs.k8s.io/cluster-api/controllers/cluster_controller.go new file mode 100644 index 0000000000..466196212b --- /dev/null +++ b/vendor/sigs.k8s.io/cluster-api/controllers/cluster_controller.go @@ -0,0 +1,51 @@ +/* +Copyright 2019 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package controllers + +import ( + "context" + + "github.com/go-logr/logr" + ctrl "sigs.k8s.io/controller-runtime" + "sigs.k8s.io/controller-runtime/pkg/client" + + clusterv1alpha2 "sigs.k8s.io/cluster-api/api/v1alpha2" +) + +// ClusterReconciler reconciles a Cluster object +type ClusterReconciler struct { + client.Client + Log logr.Logger +} + +// +kubebuilder:rbac:groups=cluster.x-k8s.io,resources=clusters,verbs=get;list;watch;create;update;patch;delete +// +kubebuilder:rbac:groups=cluster.x-k8s.io,resources=clusters/status,verbs=get;update;patch + +func (r *ClusterReconciler) Reconcile(req ctrl.Request) (ctrl.Result, error) { + _ = context.Background() + _ = r.Log.WithValues("cluster", req.NamespacedName) + + // your logic here + + return ctrl.Result{}, nil +} + +func (r *ClusterReconciler) SetupWithManager(mgr ctrl.Manager) error { + return ctrl.NewControllerManagedBy(mgr). + For(&clusterv1alpha2.Cluster{}). + Complete(r) +} diff --git a/vendor/sigs.k8s.io/cluster-api/controllers/machine_controller.go b/vendor/sigs.k8s.io/cluster-api/controllers/machine_controller.go new file mode 100644 index 0000000000..de1f5acbdc --- /dev/null +++ b/vendor/sigs.k8s.io/cluster-api/controllers/machine_controller.go @@ -0,0 +1,51 @@ +/* +Copyright 2019 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package controllers + +import ( + "context" + + "github.com/go-logr/logr" + ctrl "sigs.k8s.io/controller-runtime" + "sigs.k8s.io/controller-runtime/pkg/client" + + clusterv1alpha2 "sigs.k8s.io/cluster-api/api/v1alpha2" +) + +// MachineReconciler reconciles a Machine object +type MachineReconciler struct { + client.Client + Log logr.Logger +} + +// +kubebuilder:rbac:groups=cluster.x-k8s.io,resources=machines,verbs=get;list;watch;create;update;patch;delete +// +kubebuilder:rbac:groups=cluster.x-k8s.io,resources=machines/status,verbs=get;update;patch + +func (r *MachineReconciler) Reconcile(req ctrl.Request) (ctrl.Result, error) { + _ = context.Background() + _ = r.Log.WithValues("machine", req.NamespacedName) + + // your logic here + + return ctrl.Result{}, nil +} + +func (r *MachineReconciler) SetupWithManager(mgr ctrl.Manager) error { + return ctrl.NewControllerManagedBy(mgr). + For(&clusterv1alpha2.Machine{}). + Complete(r) +} diff --git a/vendor/sigs.k8s.io/cluster-api/controllers/machinedeployment_controller.go b/vendor/sigs.k8s.io/cluster-api/controllers/machinedeployment_controller.go new file mode 100644 index 0000000000..1cca4e77a7 --- /dev/null +++ b/vendor/sigs.k8s.io/cluster-api/controllers/machinedeployment_controller.go @@ -0,0 +1,51 @@ +/* +Copyright 2019 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package controllers + +import ( + "context" + + "github.com/go-logr/logr" + ctrl "sigs.k8s.io/controller-runtime" + "sigs.k8s.io/controller-runtime/pkg/client" + + clusterv1alpha2 "sigs.k8s.io/cluster-api/api/v1alpha2" +) + +// MachineDeploymentReconciler reconciles a MachineDeployment object +type MachineDeploymentReconciler struct { + client.Client + Log logr.Logger +} + +// +kubebuilder:rbac:groups=cluster.x-k8s.io,resources=machinedeployments,verbs=get;list;watch;create;update;patch;delete +// +kubebuilder:rbac:groups=cluster.x-k8s.io,resources=machinedeployments/status,verbs=get;update;patch + +func (r *MachineDeploymentReconciler) Reconcile(req ctrl.Request) (ctrl.Result, error) { + _ = context.Background() + _ = r.Log.WithValues("machinedeployment", req.NamespacedName) + + // your logic here + + return ctrl.Result{}, nil +} + +func (r *MachineDeploymentReconciler) SetupWithManager(mgr ctrl.Manager) error { + return ctrl.NewControllerManagedBy(mgr). + For(&clusterv1alpha2.MachineDeployment{}). + Complete(r) +} diff --git a/vendor/sigs.k8s.io/cluster-api/controllers/machineset_controller.go b/vendor/sigs.k8s.io/cluster-api/controllers/machineset_controller.go new file mode 100644 index 0000000000..d3bde72d11 --- /dev/null +++ b/vendor/sigs.k8s.io/cluster-api/controllers/machineset_controller.go @@ -0,0 +1,51 @@ +/* +Copyright 2019 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package controllers + +import ( + "context" + + "github.com/go-logr/logr" + ctrl "sigs.k8s.io/controller-runtime" + "sigs.k8s.io/controller-runtime/pkg/client" + + clusterv1alpha2 "sigs.k8s.io/cluster-api/api/v1alpha2" +) + +// MachineSetReconciler reconciles a MachineSet object +type MachineSetReconciler struct { + client.Client + Log logr.Logger +} + +// +kubebuilder:rbac:groups=cluster.x-k8s.io,resources=machinesets,verbs=get;list;watch;create;update;patch;delete +// +kubebuilder:rbac:groups=cluster.x-k8s.io,resources=machinesets/status,verbs=get;update;patch + +func (r *MachineSetReconciler) Reconcile(req ctrl.Request) (ctrl.Result, error) { + _ = context.Background() + _ = r.Log.WithValues("machineset", req.NamespacedName) + + // your logic here + + return ctrl.Result{}, nil +} + +func (r *MachineSetReconciler) SetupWithManager(mgr ctrl.Manager) error { + return ctrl.NewControllerManagedBy(mgr). + For(&clusterv1alpha2.MachineSet{}). + Complete(r) +} diff --git a/vendor/sigs.k8s.io/cluster-api/controllers/suite_test.go b/vendor/sigs.k8s.io/cluster-api/controllers/suite_test.go new file mode 100644 index 0000000000..620b92c259 --- /dev/null +++ b/vendor/sigs.k8s.io/cluster-api/controllers/suite_test.go @@ -0,0 +1,80 @@ +/* +Copyright 2019 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package controllers + +import ( + "path/filepath" + "testing" + + . "github.com/onsi/ginkgo" + . "github.com/onsi/gomega" + + "k8s.io/client-go/kubernetes/scheme" + "k8s.io/client-go/rest" + clusterv1alpha2 "sigs.k8s.io/cluster-api/api/v1alpha2" + "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/controller-runtime/pkg/envtest" + logf "sigs.k8s.io/controller-runtime/pkg/log" + "sigs.k8s.io/controller-runtime/pkg/log/zap" + // +kubebuilder:scaffold:imports +) + +// These tests use Ginkgo (BDD-style Go testing framework). Refer to +// http://onsi.github.io/ginkgo/ to learn more about Ginkgo. + +var cfg *rest.Config +var k8sClient client.Client +var testEnv *envtest.Environment + +func TestAPIs(t *testing.T) { + RegisterFailHandler(Fail) + + RunSpecsWithDefaultAndCustomReporters(t, + "Controller Suite", + []Reporter{envtest.NewlineReporter{}}) +} + +var _ = BeforeSuite(func(done Done) { + logf.SetLogger(zap.LoggerTo(GinkgoWriter, true)) + + By("bootstrapping test environment") + testEnv = &envtest.Environment{ + CRDDirectoryPaths: []string{filepath.Join("..", "config", "crd", "bases")}, + } + + var err error + cfg, err = testEnv.Start() + Expect(err).ToNot(HaveOccurred()) + Expect(cfg).ToNot(BeNil()) + + err = clusterv1alpha2.AddToScheme(scheme.Scheme) + Expect(err).NotTo(HaveOccurred()) + + // +kubebuilder:scaffold:scheme + + k8sClient, err = client.New(cfg, client.Options{Scheme: scheme.Scheme}) + Expect(err).ToNot(HaveOccurred()) + Expect(k8sClient).ToNot(BeNil()) + + close(done) +}, 60) + +var _ = AfterSuite(func() { + By("tearing down the test environment") + err := testEnv.Stop() + Expect(err).ToNot(HaveOccurred()) +}) diff --git a/vendor/sigs.k8s.io/cluster-api/docs/book/ABBREVIATIONS.md b/vendor/sigs.k8s.io/cluster-api/docs/book/ABBREVIATIONS.md new file mode 100644 index 0000000000..9267f5415b --- /dev/null +++ b/vendor/sigs.k8s.io/cluster-api/docs/book/ABBREVIATIONS.md @@ -0,0 +1,10 @@ +# Acronyms and Abbreviations + +Acronym or Abbreviation | Full name +-------------------------|--------------------- +CAPA | Cluster API Provider AWS +CAPD | Cluster API Provider Docker +CAPI | Cluster API +CAPO | Cluster API Provider OpenStack +CAPV | Cluster API Provider vSphere +CAPZ | Cluster API Provider Azure diff --git a/vendor/sigs.k8s.io/cluster-api/docs/book/GLOSSARY.md b/vendor/sigs.k8s.io/cluster-api/docs/book/GLOSSARY.md index 4e0a3bdf78..2f20ee543b 100644 --- a/vendor/sigs.k8s.io/cluster-api/docs/book/GLOSSARY.md +++ b/vendor/sigs.k8s.io/cluster-api/docs/book/GLOSSARY.md @@ -1,3 +1,181 @@ -## Solas -International Convention for the Safety of Life at Sea +--- +title: Glossary +authors: + - "@dhellmann" + - "@timothysc" +reviewers: + - "@timothysc" + - "@justinsb" + - "@detiber" + - "@davidewatson" + - "@vincepri" +creation-date: 2019-06-10 +last-updated: 2019-06-10 +--- + + +# Table of Contents + +[A](#a) | [B](#b) | [C](#c) | [D](#d) | [H](#h) | [I](#i) | [K](#k) | [M](#m) | [N](#n) | [O](#o) | [P](#p) | [S](#s) | [T](#t) | [W](#w) + +# A + +## Add-ons + +Services beyond the fundamental components of Kubernetes. + +* __Core Add-ons__: Addons that are required to deploy a Kubernetes-conformant cluster: DNS, kube-proxy, CNI. +* __Additional Add-ons__: Addons that are not required for a Kubernetes-conformant cluster (e.g. metrics/Heapster, Dashboard). + +# B + +## Bootstrap + +The process of turning a server into a Kubernetes node. This may involve assembling data to provide when creating the server that backs the Machine, as well as runtime configuration of the software running on that server. + +## Bootstrap cluster + +A temporary cluster that is used to provision a Target Management cluster. + +# C + +## Cluster + +A full Kubernetes deployment. See Management Cluster and Workload Cluster + +## Cluster API + +Or __Cluster API project__ + +The Cluster API sub-project of the SIG-cluster-lifecycle. It is also used to refer to the software components, APIs, and community that produce them. + +## Control plane + +The set of Kubernetes services that form the basis of a cluster. See also https://kubernetes.io/docs/concepts/#kubernetes-control-plane There are two variants: + +* __Self-provisioned__: A Kubernetes control plane consisting of pods or machines wholly managed by a single Cluster API deployment. +* __External__: A control plane offered and controlled by some system other than Cluster API (e.g., GKE, AKS, EKS, IKS). + +# D + +## Default implementation + +A feature implementation offered as part of the Cluster API project, infrastructure providers can swap it out for a different one. + +# H + +## Horizontal Scaling + +The ability to add more machines based on policy and well defined metrics. For example, add a machine to a cluster when CPU load average > (X) for a period of time (Y). + +## Host + +see [Server](#server) + +# I + +## Infrastructure provider + +A source of computational resources (e.g. machines, networking, etc.). Examples for cloud include AWS, Azure, Google, etc.; for bare metal include VMware, MAAS, metal3.io, etc. When there is more than one way to obtain resources from the same infrastructure provider (e.g. EC2 vs. EKS) each way is referred to as a variant. + +## Instance + +see [Server](#server) + +## Immutability + +A resource that does not mutate. In kubernetes we often state the instance of a running pod is immutable or does not change once it is run. In order to make a change, a new pod is run. In the context of [Cluster API](#cluster-api) we often refer to an running instance of a [Machine](#machine) is considered immutable, from a [Cluster API](#cluster-api) perspective. + +# K + +## Kubernetes-conformant + +Or __Kubernetes-compliant__ + +A cluster that passes the Kubernetes conformance tests. + +## K/K + +Refers to the [main Kubernetes git repository](https://github.com/kubernetes/kubernetes) or the main Kubernetes project. + +# M + +## Machine + +Or __Machine Resource__ + +The Custom Resource for Kubernetes that represents a request to have a place to run kubelet. + +See also: [Server](#server) + +## Manage a cluster + +Perform create, scale, upgrade, or destroy operations on the cluster. + +## Management cluster + +The cluster where one or more Infrastructure Providers run, and where resources (e.g. Machines) are stored. Typically referred to when you are provisioning multiple clusters. + +# N + +## Node pools + +A node pool is a group of nodes within a cluster that all have the same configuration. + +# O + +## Operating system + +Or __OS__ + +A generically understood combination of a kernel and system-level userspace interface, such as Linux or Windows, as opposed to a particular distribution. + +# P + +## Pivot + +Pivot is a process for moving the provider components and declared cluster-api resources from a Source Management cluster to a Target Management cluster. + +The pivot process is also used for deleting a management cluster and could also be used during an upgrade of the management cluster. + +## Provider + +See [Infrastructure Provider](#user-content-infrastructure-provider) + +#### Provider implementation + +Existing Cluster API implementations consist of generic and infrastructure provider-specific logic. The [infrastructure provider](#infrastructure-provider)-specific logic is currently maintained in infrastructure provider repositories. + +# S + +## Scaling + +Unless otherwise specified, this refers to horizontal scaling. + +## Server + +The infrastructure that backs a [Machine Resource](#user-content-machine), typically either a cloud instance, virtual machine, or physical host. + +# T + +## Target Management cluster + +The declared cluster we intend to create and manage using cluster-api when running `clusterctl create cluster`. + +When running `clusterctl alpha phases pivot` this refers to the cluster that will be the new management cluster. + +# W + +## Workload cluster + +A cluster whose lifecycle is managed by the Management cluster. diff --git a/vendor/sigs.k8s.io/cluster-api/docs/book/README.md b/vendor/sigs.k8s.io/cluster-api/docs/book/README.md index 15a44e267b..a7988b70f2 100644 --- a/vendor/sigs.k8s.io/cluster-api/docs/book/README.md +++ b/vendor/sigs.k8s.io/cluster-api/docs/book/README.md @@ -57,7 +57,7 @@ reliability. ## Resources -- GitBook: [kubernetes-sigs.github.io/cluster-api](https://kubernetes-sigs.github.io/cluster-api) +- GitBook: [cluster-api.sigs.k8s.io](https://cluster-api.sigs.k8s.io) - GitHub Repo: [kubernetes-sigs/cluster-api](https://github.com/kubernetes-sigs/cluster-api) - Slack channel: [#cluster-api](http://slack.k8s.io/#cluster-api) - Google Group: [kubernetes-sig-cluster-lifecycle@googlegroups.com](https://groups.google.com/forum/#!forum/kubernetes-sig-cluster-lifecycle) diff --git a/vendor/sigs.k8s.io/cluster-api/docs/book/RELEASE b/vendor/sigs.k8s.io/cluster-api/docs/book/RELEASE index 8f1fc4a0ac..5f7a55b878 100644 --- a/vendor/sigs.k8s.io/cluster-api/docs/book/RELEASE +++ b/vendor/sigs.k8s.io/cluster-api/docs/book/RELEASE @@ -12,10 +12,21 @@ content is generated into [./docs/book/_book](../../docs/book/_book) but not checked into version control. Netlify will automatically rebuild the documentation on each merge to master. +## Submit a PR + +## Testing with Netlify + +Each PR to the cluster-api repository causes mutliple CI stages to be run. One +of the stages is named `deploy/netlify` and by clicking on the `Details` link +from the PR you can browse the generated content before it is deployed live. + ## Testing locally +This has only been verified to work on OSX. For platform agnostic testing see +the [Testing with Netlify](#testing-with-netlify) section above. + ``` -hack/build-gitbook.sh +make book ``` ## Verify the contents of the GitBook are correct by viewing the pages which @@ -25,13 +36,42 @@ should include new or changed content and: 1) They are rendered as expected. ``` -cd docs/book -gitbook serve -# open localhost:4000 in a web browser +make serve-book ``` ## Finally submit a PR +# Creating a new book + +Each major and minor release should get a new version of the book. Netlify is +already configured to deploy each branch to a Netlify site at +`https://--kubernetes-sigs-cluster-api.netlify.com`. +Note that dots in the branch name will be replaced by dashes in the domain +name. For example, `release-0-1--kubernetes-sigs-cluster-api.netlify.com` is +the domain name for the branch named `release-0.1`. It remains to: + +1) Create a subdomain for the branch. Subdomains are of the form +`.cluster-api.sigs.k8s.io`. To create one submit a PR +to the kubernetes/k8s.io repository similar to this one: +https://github.com/kubernetes/k8s.io/pull/324 + +2) Request for the Netlify SSL cert to be extended to the new subdomain. For +example: + +Submit a request [here](https://www.netlify.com/support/) with the following +information: + +Topic: SSL ISSUE +Site Name: kubernetes-sigs-cluster-api +Subject: Extend SSL certificate to new subdomain +How can we help you?: + +>I am at step 4) "Contact Netlify Technical Support to get the SSL certificate extended to the new sub-domain." of [how to use Netlify’s branch deploy feature without Netlify DNS](https://community.netlify.com/t/common-issue-how-to-use-netlify-s-branch-deploy-feature-without-netlify-dns/128). +> +>Can you please extend the SSL cert for the kubernetes-sigs-cluster-api site to the subdomain: .cluster-api.sigs.k8s.io? +> +>Thank you! + [markdown]: https://toolchain.gitbook.com/syntax/markdown.html [gitbook]: https://toolchain.gitbook.com/ [plugins]: https://toolchain.gitbook.com/plugins/ diff --git a/vendor/sigs.k8s.io/cluster-api/docs/book/SUMMARY.md b/vendor/sigs.k8s.io/cluster-api/docs/book/SUMMARY.md index d0cd5238e3..faa071317e 100644 --- a/vendor/sigs.k8s.io/cluster-api/docs/book/SUMMARY.md +++ b/vendor/sigs.k8s.io/cluster-api/docs/book/SUMMARY.md @@ -4,6 +4,8 @@ ## Getting Started +* [Glossary](GLOSSARY.md) +* [Abbreviations](ABBREVIATIONS.md) * [Existing Providers](getting_started/existing_providers.md) ## Common Code @@ -14,6 +16,7 @@ * [Machine Controller](common_code/machine_controller.md) * [MachineSet Controller](common_code/machineset_controller.md) * [MachineDeployment Controller](common_code/machinedeployment_controller.md) +* [NodeRef Controller](common_code/noderef_controller.md) * [Node Controller](common_code/node_controller.md) ## Creating a New Provider diff --git a/vendor/sigs.k8s.io/cluster-api/docs/book/common_code/architecture.md b/vendor/sigs.k8s.io/cluster-api/docs/book/common_code/architecture.md index 658ad54d23..055c6cbc62 100644 --- a/vendor/sigs.k8s.io/cluster-api/docs/book/common_code/architecture.md +++ b/vendor/sigs.k8s.io/cluster-api/docs/book/common_code/architecture.md @@ -4,8 +4,8 @@ It may be useful to read at least the following chapters of the Kubebuilder book in order to better understand this section. -- [What is a Resource](https://book.kubebuilder.io/basics/what_is_a_resource.html) -- [What is a Controller](https://book.kubebuilder.io/basics/what_is_a_controller.html) +- [What is a Resource](https://book-v1.book.kubebuilder.io/basics/what_is_a_resource.html) +- [What is a Controller](https://book-v1.book.kubebuilder.io/basics/what_is_a_controller.html) {% endpanel %} {% panel style="warning", title="Architecture Diagram" %} diff --git a/vendor/sigs.k8s.io/cluster-api/docs/book/common_code/cluster_controller.md b/vendor/sigs.k8s.io/cluster-api/docs/book/common_code/cluster_controller.md index 32a248f4d6..90e77dad06 100644 --- a/vendor/sigs.k8s.io/cluster-api/docs/book/common_code/cluster_controller.md +++ b/vendor/sigs.k8s.io/cluster-api/docs/book/common_code/cluster_controller.md @@ -24,7 +24,7 @@ it's name, namespace, labels, and annotations, etc. `ObjectMeta` contains data common to most objects. {% sample lang="go" %} -[import:'Cluster'](../../../pkg/apis/cluster/v1alpha1/cluster_types.go) +[import:'Cluster'](../../../pkg/apis/deprecated/v1alpha1/cluster_types.go) {% endmethod %} {% method %} @@ -40,7 +40,7 @@ API definitions are meant to live outside of the Cluster API, which will allow them to evolve independently of it. {% sample lang="go" %} -[import:'ClusterSpec'](../../../pkg/apis/cluster/v1alpha1/cluster_types.go) +[import:'ClusterSpec'](../../../pkg/apis/deprecated/v1alpha1/cluster_types.go) {% endmethod %} {% method %} @@ -64,7 +64,7 @@ https://github.com/kubernetes-sigs/cluster-api-provider-gcp/blob/f3145d8810a5c7f used in practice. {% sample lang="go" %} -[import:'ClusterStatus'](../../../pkg/apis/cluster/v1alpha1/cluster_types.go) +[import:'ClusterStatus'](../../../pkg/apis/deprecated/v1alpha1/cluster_types.go) {% endmethod %} {% method %} @@ -93,7 +93,7 @@ If a `Cluster` resource is deleted, the controller will call the actuator's - If the `Delete()` method returns true, remove the finalizer, we're done. - If the `Cluster` has not been deleted, call the `Reconcile()` method. -[cluster_source]: https://github.com/kubernetes-sigs/cluster-api/blob/master/pkg/apis/cluster/v1alpha1/cluster_types.go +[cluster_source]: https://github.com/kubernetes-sigs/cluster-api/blob/master/pkg/apis/deprecated/v1alpha1/cluster_types.go #### cluster object reconciliation logic diff --git a/vendor/sigs.k8s.io/cluster-api/docs/book/common_code/machine_controller.md b/vendor/sigs.k8s.io/cluster-api/docs/book/common_code/machine_controller.md index b4770eeb89..999382ec01 100644 --- a/vendor/sigs.k8s.io/cluster-api/docs/book/common_code/machine_controller.md +++ b/vendor/sigs.k8s.io/cluster-api/docs/book/common_code/machine_controller.md @@ -37,7 +37,7 @@ example, it's name, namespace, labels, and annotations, etc. `ObjectMeta` contains data common to most objects. {% sample lang="go" %} -[import:'Machine'](../../../pkg/apis/cluster/v1alpha1/machine_types.go) +[import:'Machine'](../../../pkg/apis/deprecated/v1alpha1/machine_types.go) {% endmethod %} {% method %} @@ -62,7 +62,7 @@ command does this [here](https://github.com/kubernetes-sigs/cluster-api/blob/a30 ``` {% sample lang="go" %} -[import:'MachineSpec'](../../../pkg/apis/cluster/v1alpha1/machine_types.go) +[import:'MachineSpec'](../../../pkg/apis/deprecated/v1alpha1/machine_types.go) {% endmethod %} {% method %} @@ -82,7 +82,7 @@ these providers a `Machine` and it's corresponding `Node` may never be within the same cluster. **TODO**: There are open issues to address this. {% sample lang="go" %} -[import:'MachineStatus'](../../../pkg/apis/cluster/v1alpha1/machine_types.go) +[import:'MachineStatus'](../../../pkg/apis/deprecated/v1alpha1/machine_types.go) {% endmethod %} {% method %} @@ -156,4 +156,4 @@ There are two consequences of this: [^1] One reason a `Machine` may not be deleted is if it corresponds to the node running the Machine controller. -[machine_types_source]: https://github.com/kubernetes-sigs/cluster-api/blob/master/pkg/apis/cluster/v1alpha1/machine_types.go +[machine_types_source]: https://github.com/kubernetes-sigs/cluster-api/blob/master/pkg/apis/deprecated/v1alpha1/machine_types.go diff --git a/vendor/sigs.k8s.io/cluster-api/docs/book/common_code/machinedeployment_controller.md b/vendor/sigs.k8s.io/cluster-api/docs/book/common_code/machinedeployment_controller.md index 510ec51023..9176a04045 100644 --- a/vendor/sigs.k8s.io/cluster-api/docs/book/common_code/machinedeployment_controller.md +++ b/vendor/sigs.k8s.io/cluster-api/docs/book/common_code/machinedeployment_controller.md @@ -4,28 +4,28 @@ ## MachineDeployment {% sample lang="go" %} -[import:'MachineDeployment'](../../../pkg/apis/cluster/v1alpha1/machinedeployment_types.go) +[import:'MachineDeployment'](../../../pkg/apis/deprecated/v1alpha1/machinedeployment_types.go) {% endmethod %} {% method %} ## MachineDeploymentSpec {% sample lang="go" %} -[import:'MachineDeploymentSpec'](../../../pkg/apis/cluster/v1alpha1/machinedeployment_types.go) +[import:'MachineDeploymentSpec'](../../../pkg/apis/deprecated/v1alpha1/machinedeployment_types.go) {% endmethod %} {% method %} ## MachineDeploymentStrategy {% sample lang="go" %} -[import:'MachineDeploymentStrategy'](../../../pkg/apis/cluster/v1alpha1/machinedeployment_types.go) +[import:'MachineDeploymentStrategy'](../../../pkg/apis/deprecated/v1alpha1/machinedeployment_types.go) {% endmethod %} {% method %} ## MachineRollingUpdateDeployment {% sample lang="go" %} -[import:'MachineRollingUpdateDeployment'](../../../pkg/apis/cluster/v1alpha1/machinedeployment_types.go) +[import:'MachineRollingUpdateDeployment'](../../../pkg/apis/deprecated/v1alpha1/machinedeployment_types.go) {% endmethod %} {% method %} @@ -33,7 +33,7 @@ ## MachineDeploymentStatus {% sample lang="go" %} -[import:'MachineDeploymentStatus'](../../../pkg/apis/cluster/v1alpha1/machinedeployment_types.go) +[import:'MachineDeploymentStatus'](../../../pkg/apis/deprecated/v1alpha1/machinedeployment_types.go) {% endmethod %} ## MachineDeployment Controller Semantics diff --git a/vendor/sigs.k8s.io/cluster-api/docs/book/common_code/machineset_controller.md b/vendor/sigs.k8s.io/cluster-api/docs/book/common_code/machineset_controller.md index 37cbeea994..b37b32d8f4 100644 --- a/vendor/sigs.k8s.io/cluster-api/docs/book/common_code/machineset_controller.md +++ b/vendor/sigs.k8s.io/cluster-api/docs/book/common_code/machineset_controller.md @@ -2,35 +2,35 @@ `MachineSet`s are currently different from `Cluster` and `Machine` resources in that they do not define an actuator interface. They are generic controllers -which implement their intent by modifying provider-specific `Cluster` and +which implement their intent by modifying provider-specific `Cluster` and `Machine` resources. {% method %} ## MachineSet {% sample lang="go" %} -[import:'MachineSet'](../../../pkg/apis/cluster/v1alpha1/machineset_types.go) +[import:'MachineSet'](../../../pkg/apis/deprecated/v1alpha1/machineset_types.go) {% endmethod %} {% method %} ## MachineSetSpec {% sample lang="go" %} -[import:'MachineSetSpec'](../../../pkg/apis/cluster/v1alpha1/machineset_types.go) +[import:'MachineSetSpec'](../../../pkg/apis/deprecated/v1alpha1/machineset_types.go) {% endmethod %} {% method %} ## MachineSetTemplateSpec {% sample lang="go" %} -[import:'MachineSetTemplateSpec'](../../../pkg/apis/cluster/v1alpha1/machineset_types.go) +[import:'MachineSetTemplateSpec'](../../../pkg/apis/deprecated/v1alpha1/machineset_types.go) {% endmethod %} {% method %} ## MachineSetStatus {% sample lang="go" %} -[import:'MachineSetStatus'](../../../pkg/apis/cluster/v1alpha1/machineset_types.go) +[import:'MachineSetStatus'](../../../pkg/apis/deprecated/v1alpha1/machineset_types.go) {% endmethod %} ## MachineSet Controller Semantics diff --git a/vendor/sigs.k8s.io/cluster-api/docs/book/common_code/node_controller.md b/vendor/sigs.k8s.io/cluster-api/docs/book/common_code/node_controller.md index d18d4994ba..8724e40455 100644 --- a/vendor/sigs.k8s.io/cluster-api/docs/book/common_code/node_controller.md +++ b/vendor/sigs.k8s.io/cluster-api/docs/book/common_code/node_controller.md @@ -1,5 +1,8 @@ # Node Controller +> NOTE: This controller is deprecated and going to be removed in v1alpha2, +> infrastructure providers should switch to noderef controller. + The node controller has one simple job. It links or unlinks a `Machine` to the underlying core k8s `Node` object if an optional annotation `cluster.k8s.io/machine` is present on the `Node`. The decision to add the annotation is left to each infrastructure-specific provider. When the @@ -8,7 +11,7 @@ objectRef. The simplest way for Infrastructure-specific providers to add this support is to annotate nodes in their node bootup script as part of the `kubeadm join` operation (via a commandline -parameter). +parameter). ## Node Controller Semantics @@ -19,4 +22,4 @@ deletion, it unlinks the `Node` from the `Machine`. #### node reconciliation logic -![node object reconciliation logic](images/activity_node_reconciliation.svg) \ No newline at end of file +![node object reconciliation logic](images/activity_node_reconciliation.svg) diff --git a/vendor/sigs.k8s.io/cluster-api/docs/book/common_code/noderef_controller.md b/vendor/sigs.k8s.io/cluster-api/docs/book/common_code/noderef_controller.md new file mode 100644 index 0000000000..64608f6645 --- /dev/null +++ b/vendor/sigs.k8s.io/cluster-api/docs/book/common_code/noderef_controller.md @@ -0,0 +1,8 @@ +# NodeRef Controller + +The NodeRef controller populates a `Machine`'s `Status.NodeRef` field using the Kubernetes Node +object associated with the Machine. This controller supports only machines linked to a cluster. + +Infrastructure providers can opt-in to use this controller by providing a `kubeconfig` as a Kubernetes Secret. +The secret must be stored in the namespace the Cluster lives in and named as `-kubeconfig`, +the data should only contain a single key called `value` and its data should be a valid Kubernetes `kubeconfig`. diff --git a/vendor/sigs.k8s.io/cluster-api/docs/book/provider_implementations/create_actuators.md b/vendor/sigs.k8s.io/cluster-api/docs/book/provider_implementations/create_actuators.md index f2ee2babd9..e57b20e6ac 100644 --- a/vendor/sigs.k8s.io/cluster-api/docs/book/provider_implementations/create_actuators.md +++ b/vendor/sigs.k8s.io/cluster-api/docs/book/provider_implementations/create_actuators.md @@ -31,7 +31,7 @@ import ( "fmt" "log" - clusterv1 "sigs.k8s.io/cluster-api/pkg/apis/cluster/v1alpha1" + clusterv1 "sigs.k8s.io/cluster-api/pkg/apis/cluster/v1alpha2" client "sigs.k8s.io/cluster-api/pkg/client/clientset_generated/clientset/typed/cluster/v1alpha1" ) @@ -97,7 +97,7 @@ import ( "fmt" "log" - clusterv1 "sigs.k8s.io/cluster-api/pkg/apis/cluster/v1alpha1" + clusterv1 "sigs.k8s.io/cluster-api/pkg/apis/cluster/v1alpha2" client "sigs.k8s.io/cluster-api/pkg/client/clientset_generated/clientset/typed/cluster/v1alpha1" ) diff --git a/vendor/sigs.k8s.io/cluster-api/docs/book/provider_implementations/overview.md b/vendor/sigs.k8s.io/cluster-api/docs/book/provider_implementations/overview.md index da7b8c56a0..d69a939375 100644 --- a/vendor/sigs.k8s.io/cluster-api/docs/book/provider_implementations/overview.md +++ b/vendor/sigs.k8s.io/cluster-api/docs/book/provider_implementations/overview.md @@ -1,14 +1,13 @@ # Overview -In order to demonstrate how to develop a new Cluster API provider we will use +In order to demonstrate how to develop a new Cluster API provider we will use `kubebuilder` to create an example provider. For more information on `kubebuilder` and CRDs in general we highly recommend reading the [Kubebuilder Book][kubebuilder-book]. Much of the information here was adapted directly from it. The minimal version of -`kubebuilder` required is [`v1.0.5`][kubebuilder-1.0.5]. +`kubebuilder` required is [`v2.0.0`][kubebuilder-2.0.0]. ## Prerequisites -- Install [`dep`][install-dep] - Install [`kubectl`][kubectl-install] - Install [`kustomize`][install-kustomize] - Install [`kubebuilder`][install-kubebuilder] @@ -53,8 +52,7 @@ chmod u+x /usr/local/bin/kustomize {% endcodegroup %} [kubebuilder-book]: https://book.kubebuilder.io/ -[install-dep]: https://github.com/golang/dep/blob/master/docs/installation.md [kubectl-install]: http://kubernetes.io/docs/user-guide/prereqs/ [install-kustomize]: https://github.com/kubernetes-sigs/kustomize/blob/master/docs/INSTALL.md [install-kubebuilder]: https://book.kubebuilder.io/getting_started/installation_and_setup.html -[kubebuilder-1.0.5]: https://github.com/kubernetes-sigs/kubebuilder/releases/tag/v1.0.5 +[kubebuilder-2.0.0]: https://github.com/kubernetes-sigs/kubebuilder/releases diff --git a/vendor/sigs.k8s.io/cluster-api/docs/developer/releasing.md b/vendor/sigs.k8s.io/cluster-api/docs/developer/releasing.md index 34209dc776..b38bf4c2cd 100644 --- a/vendor/sigs.k8s.io/cluster-api/docs/developer/releasing.md +++ b/vendor/sigs.k8s.io/cluster-api/docs/developer/releasing.md @@ -1,4 +1,15 @@ # Releasing + + + + +- [Output](#output) + - [Expected artifacts](#expected-artifacts) + - [Artifact locations](#artifact-locations) +- [Process](#process) + - [Permissions](#permissions) + + ## Output diff --git a/vendor/sigs.k8s.io/cluster-api/docs/developer/v1alpha1-compared-to-v1alpha2.md b/vendor/sigs.k8s.io/cluster-api/docs/developer/v1alpha1-compared-to-v1alpha2.md new file mode 100644 index 0000000000..632212f528 --- /dev/null +++ b/vendor/sigs.k8s.io/cluster-api/docs/developer/v1alpha1-compared-to-v1alpha2.md @@ -0,0 +1,67 @@ +# Cluster API v1alpha1 compared to v1alpha2 + +## Providers + +### v1alpha1 + +Providers in v1alpha1 wrap behavior specific to an environment to create the infrastructure and bootstrap instances into +Kubernetes nodes. Examples of environments that have integrated with Cluster API v1alpha1 include, AWS, GCP, OpenStack, +Azure, vSphere and others. The provider vendors in Cluster API's controllers, registers its own actuators with the +Cluster API controllers and runs a single manager to complete a Cluster API management cluster. + +### v1alpha2 + +v1alpha2 introduces two new providers and changes how the Cluster API is consumed. This means that in order to have a +complete management cluster that is ready to build clusters you will need three managers. + +* Core (Cluster API) +* Bootstrap (kubeadm) +* Infrastructure (aws, gcp, azure, vsphere, etc) + +Cluster API's controllers are no longer vendored by providers. Instead, Cluster API offers its own independent +controllers that are responsible for the core types: + +* Cluster +* Machine +* MachineSet +* MachineDeployment +* [plus a few more](../../pkg/apis/cluster/v1alpha2) + +Bootstrap providers are an entirely new concept aimed at reducing the amount of kubeadm boilerplate that every provider +reimplemented in v1alpha1. The Bootstrap provider is responsible for running a controller that generates data necessary +to bootstrap an instance into a Kubernetes node (cloud-init, bash, etc). + +v1alpha1 "providers" have become Infrastructure providers. The Infrastructure provider is responsible for generating +actual infrastructure (networking, load balancers, instances, etc) for a particular environment that can consume +bootstrap data to turn the infrastructure into a Kubernetes cluster. + +## Actuators + +### v1alpha1 + +Actuators are interfaces that the Cluster API controller calls. A provider pulls in the generic Cluster API controller +and then registers actuators to run specific infrastructure logic (calls to the provider cloud). + +### v1alpha2 + +Actuators are not used in this version. Cluster API's controllers are no longer shared across providers and therefore +they do not need to know about the actuator interface. Instead, providers communicate to each other via Cluster API's +central objects, namely Machine and Cluster. When a user modifies a Machine with a reference, each provider will notice +that update and respond in some way. For example, the Bootstrap provider may attach BootstrapData to a BootstrapConfig +which will then be attached to the Machine object via Cluster API's controllers or the Infrastructure provider may +create a cloud instance for Kubernetes to run on. + +## `clusterctl` + +### v1alpha1 + +`clusterctl` was a command line tool packaged with v1alpha1 providers. The goal of this tool was to go from nothing to a +running management cluster in whatever environment the provider was built for. For example, Cluster-API-Provider-AWS +packaged a `clusterctl` that created a Kubernetes cluster in EC2 and installed the necessary controllers to respond to +Cluster API's APIs. + +### v1alpha2 + +`clusterctl` is likely becoming provider-agnostic meaning one clusterctl is bundled with Cluster API and can be reused +across providers. Work here is still being figured out but providers will not be packaging their own `clusterctl` +anymore. diff --git a/vendor/sigs.k8s.io/cluster-api/docs/examples/cluster/cluster.yaml b/vendor/sigs.k8s.io/cluster-api/docs/examples/cluster/cluster.yaml index 9640cbfbe0..632c458426 100644 --- a/vendor/sigs.k8s.io/cluster-api/docs/examples/cluster/cluster.yaml +++ b/vendor/sigs.k8s.io/cluster-api/docs/examples/cluster/cluster.yaml @@ -1,6 +1,6 @@ note: Cluster Example sample: | - apiVersion: cluster.k8s.io/v1alpha1 + apiVersion: cluster.k8s.io/v1alpha2 kind: Cluster metadata: name: cluster-example diff --git a/vendor/sigs.k8s.io/cluster-api/docs/examples/machine/machine.yaml b/vendor/sigs.k8s.io/cluster-api/docs/examples/machine/machine.yaml index 323c2c9383..ea201a8223 100644 --- a/vendor/sigs.k8s.io/cluster-api/docs/examples/machine/machine.yaml +++ b/vendor/sigs.k8s.io/cluster-api/docs/examples/machine/machine.yaml @@ -1,6 +1,6 @@ note: Machine Example sample: | - apiVersion: cluster.k8s.io/v1alpha1 + apiVersion: cluster.k8s.io/v1alpha2 kind: Machine metadata: name: machine-example diff --git a/vendor/sigs.k8s.io/cluster-api/docs/examples/machinedeployment/machinedeployment.yaml b/vendor/sigs.k8s.io/cluster-api/docs/examples/machinedeployment/machinedeployment.yaml index c092b6acbe..da604ebb16 100644 --- a/vendor/sigs.k8s.io/cluster-api/docs/examples/machinedeployment/machinedeployment.yaml +++ b/vendor/sigs.k8s.io/cluster-api/docs/examples/machinedeployment/machinedeployment.yaml @@ -1,6 +1,6 @@ note: MachineDeployment Example sample: | - apiVersion: cluster.k8s.io/v1alpha1 + apiVersion: cluster.k8s.io/v1alpha2 kind: MachineDeployment metadata: name: machinedeployment-example diff --git a/vendor/sigs.k8s.io/cluster-api/docs/examples/machineset/machineset.yaml b/vendor/sigs.k8s.io/cluster-api/docs/examples/machineset/machineset.yaml index 47353ebb14..fa67250382 100644 --- a/vendor/sigs.k8s.io/cluster-api/docs/examples/machineset/machineset.yaml +++ b/vendor/sigs.k8s.io/cluster-api/docs/examples/machineset/machineset.yaml @@ -1,6 +1,6 @@ note: MachineSet Example sample: | - apiVersion: cluster.k8s.io/v1alpha1 + apiVersion: cluster.k8s.io/v1alpha2 kind: MachineSet metadata: name: machineset-example diff --git a/vendor/sigs.k8s.io/cluster-api/docs/how-to-use-clusterctl.md b/vendor/sigs.k8s.io/cluster-api/docs/how-to-use-clusterctl.md new file mode 100644 index 0000000000..5b87014529 --- /dev/null +++ b/vendor/sigs.k8s.io/cluster-api/docs/how-to-use-clusterctl.md @@ -0,0 +1,94 @@ +# Using `clusterctl` to create a cluster from scratch + + + + +- [What is `clusterctl`?](#what-is-clusterctl) +- [Creating a cluster](#creating-a-cluster) + - [Creating a workload cluster using the management cluster](#creating-a-workload-cluster-using-the-management-cluster) + + + +This document provides an overview of how `clusterctl` works and explains how one can use `clusterctl` +to create a Kubernetes cluster from scratch. + +## What is `clusterctl`? + +`clusterctl` is a CLI tool to create a Kubernetes cluster. `clusterctl` is provided by the [provider implementations](https://github.com/Kubernetes-sigs/cluster-api#provider-implementations). +It uses Cluster API provider implementations to provision resources needed by the Kubernetes cluster. + +## Creating a cluster + +`clusterctl` needs 4 YAML files to start with: `provider-components.yaml`, `cluster.yaml`, `machines.yaml` , +`addons.yaml`. + +* `provider-components.yaml` contains the *Custom Resource Definitions ([CRDs](https://Kubernetes.io/docs/concepts/extend-Kubernetes/api-extension/custom-resources/))* +of all the resources that are managed by Cluster API. Some examples of these resources +are: `Cluster`, `Machine`, `MachineSet`, etc. For more details about Cluster API resources +click [here](https://cluster-api.sigs.k8s.io/common_code/architecture.html#cluster-api-resources). +* `cluster.yaml` defines an object of the resource type `Cluster`. +* `machines.yaml` defines an object of the resource type `Machine`. Generally creates the machine +that becomes the control-plane. +* `addons.yaml` contains the addons for the provider. + +Many providers implementations come with helpful scripts to generate these YAMLS. Provider implementation +can be found [here](https://github.com/Kubernetes-sigs/cluster-api#provider-implementations). + +`clusterctl` also comes with additional features. For example, `clusterctl` can also take in an optional +`bootstrap-only-components.yaml` to provide resources to the bootstrap cluster without also providing them +to the target cluster post-pivot. + +For more details about all the supported options run: + +``` +clusterctl create cluster --help +``` + +After generating the YAML files run the following command: + +``` +clusterctl create cluster --provider --bootstrap-type -c cluster.yaml -m machines.yaml -p provider-components.yaml --addon-components addons.yaml +``` + +Example usage: + +``` +# VMware vSphere +clusterctl create cluster --provider vsphere --bootstrap-type kind -c cluster.yaml -m machines.yaml -p provider-components.yaml --addon-components addons.yaml + +# Amazon AWS +clusterctl create cluster --provider aws --bootstrap-type kind -c cluster.yaml -m machines.yaml -p provider-components.yaml --addon-components addons.yaml +``` + +**What happens when we run the command?** +After running the command first it creates a local cluster. If `kind` was passed as the `--bootstrap-type` +it creates a local [kind](https://kind.sigs.k8s.io/) cluster. This cluster is generally referred to as the *bootstrap cluster*. +On this kind Kubernetes cluster the `provider-components.yaml` file is applied. This step loads the CRDs into +the cluster. It also creates 2 [StatefulSet](https://Kubernetes.io/docs/concepts/workloads/controllers/statefulset/) +pods that run the cluster api controller and the provider specific controller. These pods register the custom +controllers that manage the newly defined resources (`Cluster`, `Machine`, `MachineSet`, etc). + +Next, `clusterctl` applies the `cluster.yaml` and `machines.yaml` to the local kind Kubernetes cluster. This +step creates a Kubernetes cluster with only a control-plane(as defined in `machines.yaml`) on the specified +provider. This newly created cluster is generally referred to as the *management cluster* or *pivot cluster* +or *initial target cluster*. The management cluster is responsible for creating and maintaining the work-load cluster. + +Lastly, `clusterctl` moves all the CRDs and the custom controllers from the bootstrap cluster to the +management cluster and deletes the locally created bootstrap cluster. This step is referred to as the *pivot*. + +### Creating a workload cluster using the management cluster + +The *workload cluster* also sometimes referred to as the *target cluster* is the Kubernetes cluster on to which +the final application is deployed. The target cluster is responsible for handling the workload of the application, +not the management cluster. + +As the management cluster is up we can create a workload cluster by simply applying the appropriate +`cluster.yaml`, `machines.yaml` and `machineset.yaml` on the management cluster. This will create the VMs(Nodes) +as defined in these YAMLs. Following this, a bootstrap mechanism is used to create a Kubernetes cluster on these VMs. +While any of the several bootstrapping mechanisms can be used `kubeadm` is the popular option. + +**NOTE:** Workload clusters do not have any addons applied. Nodes in your workload clusters will be in the `NotReady` +state until you apply addons for CNI plugin. + +Once the target cluster is up the user can create the `Deployments`, `Services`, etc that handle the workload +of the application. diff --git a/vendor/sigs.k8s.io/cluster-api/docs/proposals/20181121-machine-api.md b/vendor/sigs.k8s.io/cluster-api/docs/proposals/20181121-machine-api.md index 971973a871..481e723977 100644 --- a/vendor/sigs.k8s.io/cluster-api/docs/proposals/20181121-machine-api.md +++ b/vendor/sigs.k8s.io/cluster-api/docs/proposals/20181121-machine-api.md @@ -1,6 +1,20 @@ Minimalistic Machines API ========================= - + + + + +- [Capabilities](#capabilities) +- [Proposal](#proposal) +- [In-place vs. Replace](#in-place-vs-replace) +- [Omitted Capabilities](#omitted-capabilities) + - [A provider-agnostic mechanism to request new nodes](#a-provider-agnostic-mechanism-to-request-new-nodes) + - [Dynamic API endpoint](#dynamic-api-endpoint) +- [Conditions](#conditions) +- [Types](#types) + + + This proposal is for a minimalistic start to a new Machines API, as part of the overall Cluster API project. It is intended to live outside of core Kubernetes and add optional machine management features to Kubernetes clusters. @@ -129,4 +143,4 @@ revisit the specifics when new patterns start to emerge in core. ## Types -Please see the full types [here](https://github.com/kubernetes-sigs/cluster-api/blob/master/pkg/apis/cluster/v1alpha1/machine_types.go). +Please see the full types [here](https://github.com/kubernetes-sigs/cluster-api/blob/master/pkg/apis/deprecated/v1alpha1/machine_types.go). diff --git a/vendor/sigs.k8s.io/cluster-api/docs/proposals/20190610-machine-states-preboot-bootstrapping.md b/vendor/sigs.k8s.io/cluster-api/docs/proposals/20190610-machine-states-preboot-bootstrapping.md new file mode 100644 index 0000000000..8e7c6b160f --- /dev/null +++ b/vendor/sigs.k8s.io/cluster-api/docs/proposals/20190610-machine-states-preboot-bootstrapping.md @@ -0,0 +1,502 @@ +--- +title: Machine States & Preboot Bootstrapping + + + + +- [Machine States & Preboot Bootstrapping](#machine-states--preboot-bootstrapping) + - [Table of Contents](#table-of-contents) + - [Glossary](#glossary) + - [Summary](#summary) + - [Motivation](#motivation) + - [Goals](#goals) + - [Non-Goals/Future Work](#non-goalsfuture-work) + - [Proposal](#proposal) + - [Data model changes](#data-model-changes) + - [States and transitions](#states-and-transitions) + - [Pending](#pending) + - [Transition Conditions](#transition-conditions) + - [Expectations](#expectations) + - [Provisioning](#provisioning) + - [Transition Conditions](#transition-conditions-1) + - [Expectations](#expectations-1) + - [Provisioned](#provisioned) + - [Transition Conditions](#transition-conditions-2) + - [Expectations](#expectations-2) + - [Running](#running) + - [Transition Conditions](#transition-conditions-3) + - [Expectations](#expectations-3) + - [Deleting](#deleting) + - [Transition Conditions](#transition-conditions-4) + - [Expectations](#expectations-4) + - [Deleted](#deleted) + - [Transition Conditions](#transition-conditions-5) + - [Expectations](#expectations-5) + - [Failed](#failed) + - [Transition Conditions](#transition-conditions-6) + - [Expectations](#expectations-6) + - [Sequence diagram: User creates a machine with Kubeadm bootstrapper.](#sequence-diagram-user-creates-a-machine-with-kubeadm-bootstrapper) + - [User Stories](#user-stories) + - [As a Kubernetes operator, I’d like to provide custom bootstrap data without the use of a Kubernetes controller.](#as-a-kubernetes-operator-id-like-to-provide-custom-bootstrap-data-without-the-use-of-a-kubernetes-controller) + - [As a Kubernetes operator, I’d like to monitor the progress of fulfilling a Machine and understand what errors, if any, have been reported by the controllers involved.](#as-a-kubernetes-operator-id-like-to-monitor-the-progress-of-fulfilling-a-machine-and-understand-what-errors-if-any-have-been-reported-by-the-controllers-involved) + - [As an infrastructure provider author, I would like to build the fewest number of components possible to support the full cluster-api.](#as-an-infrastructure-provider-author-i-would-like-to-build-the-fewest-number-of-components-possible-to-support-the-full-cluster-api) + - [As an infrastructure provider author, I would like to take advantage of the kubernetes API to provide validation for provider-specific data needed to provision a machine.](#as-an-infrastructure-provider-author-i-would-like-to-take-advantage-of-the-kubernetes-api-to-provide-validation-for-provider-specific-data-needed-to-provision-a-machine) + - [As an infrastructure provider author, I would like to build a controller to manage provisioning machines using tools of my own choosing.](#as-an-infrastructure-provider-author-i-would-like-to-build-a-controller-to-manage-provisioning-machines-using-tools-of-my-own-choosing) + - [As an infrastructure provider author, I would like to build a controller to manage provisioning machines without being restricted to a CRUD API.](#as-an-infrastructure-provider-author-i-would-like-to-build-a-controller-to-manage-provisioning-machines-without-being-restricted-to-a-crud-api) + - [As an infrastructure provider consumer, I would like to have validation for the provider-specific data I need to give the system to have it provision a machine.](#as-an-infrastructure-provider-consumer-i-would-like-to-have-validation-for-the-provider-specific-data-i-need-to-give-the-system-to-have-it-provision-a-machine) + - [Implementation Details/Notes/Constraints](#implementation-detailsnotesconstraints) + - [Machine Controller Role](#machine-controller-role) + - [Machine Controller dynamic watchers](#machine-controller-dynamic-watchers) + - [Object References, Templates, MachineSets and MachineDeployments](#object-references-templates-machinesets-and-machinedeployments) + - [Controllers and the single responsibility approach](#controllers-and-the-single-responsibility-approach) + - [Remote references and accessing a workload cluster](#remote-references-and-accessing-a-workload-cluster) + - [The “Phase” field and its role](#the-phase-field-and-its-role) + - [Showing a status summary to users](#showing-a-status-summary-to-users) + - [Risks and Mitigations](#risks-and-mitigations) + - [State transitions are inflexible](#state-transitions-are-inflexible) + - [Machine Controller can access any machine or cluster in any namespace](#machine-controller-can-access-any-machine-or-cluster-in-any-namespace) + - [Certificates and tokens are exposed in plaintext](#certificates-and-tokens-are-exposed-in-plaintext) + - [Bootstrap data cannot be merged](#bootstrap-data-cannot-be-merged) + - [MachineClass is deprecated and will be revisited later](#machineclass-is-deprecated-and-will-be-revisited-later) + - [Design Details](#design-details) + - [Test Plan](#test-plan) + - [Graduation Criteria](#graduation-criteria) + - [Upgrade / Downgrade Strategy](#upgrade--downgrade-strategy) + - [Version Skew Strategy](#version-skew-strategy) + - [Implementation History](#implementation-history) + - [Drawbacks](#drawbacks) + - [Alternatives](#alternatives) + - [Object References, Templates, MachineSets and MachineDeployments](#object-references-templates-machinesets-and-machinedeployments-1) + + + +authors: + - "@ncdc" + - "@vincepri" +reviewers: + - "@davidewatson" + - "@detiber" + - "@justinsb" + - "@timothysc" + - "@vincepri" +creation-date: 2019-06-10 +last-updated: 2019-06-28 +status: provisional +--- + +# Machine States & Preboot Bootstrapping + +## Table of Contents + +* [Machine States & Preboot Bootstrapping](#machine-states--preboot-bootstrapping) + * [Table of Contents](#table-of-contents) + * [Glossary](#glossary) + * [Summary](#summary) + * [Motivation](#motivation) + * [Goals](#goals) + * [Non\-Goals](#non-goals) + * [Future Work](#future-work) + * [Proposal](#proposal) + * [Data model changes](#data-model-changes) + * [States and transitions](#states-and-transitions) + * [Pending](#pending) + * [Transition Conditions](#transition-conditions) + * [Expectations](#expectations) + * [Provisioning](#provisioning) + * [Transition Conditions](#transition-conditions-1) + * [Expectations](#expectations-1) + * [Provisioned](#provisioned) + * [Transition Conditions](#transition-conditions-2) + * [Expectations](#expectations-2) + * [Running](#running) + * [Transition Conditions](#transition-conditions-3) + * [Expectations](#expectations-3) + * [Deleting](#deleting) + * [Transition Conditions](#transition-conditions-4) + * [Expectations](#expectations-4) + * [Deleted](#deleted) + * [Transition Conditions](#transition-conditions-5) + * [Expectations](#expectations-5) + * [Failed](#failed) + * [Transition Conditions](#transition-conditions-6) + * [Expectations](#expectations-6) + * [Sequence diagram: User creates a machine with Kubeadm bootstrapper\.](#sequence-diagram-user-creates-a-machine-with-kubeadm-bootstrapper) + * [User Stories](#user-stories) + * [As a Kubernetes operator, I’d like to provide custom bootstrap data without the use of a Kubernetes controller\.](#as-a-kubernetes-operator-id-like-to-provide-custom-bootstrap-data-without-the-use-of-a-kubernetes-controller) + * [As a Kubernetes operator, I’d like to monitor the progress of fulfilling a Machine and understand what errors, if any, have been reported by the controllers involved\.](#as-a-kubernetes-operator-id-like-to-monitor-the-progress-of-fulfilling-a-machine-and-understand-what-errors-if-any-have-been-reported-by-the-controllers-involved) + * [As an infrastructure provider author, I would like to build the fewest number of components possible to support the full cluster\-api\.](#as-an-infrastructure-provider-author-i-would-like-to-build-the-fewest-number-of-components-possible-to-support-the-full-cluster-api) + * [As an infrastructure provider author, I would like to take advantage of the kubernetes API to provide validation for provider\-specific data needed to provision a machine\.](#as-an-infrastructure-provider-author-i-would-like-to-take-advantage-of-the-kubernetes-api-to-provide-validation-for-provider-specific-data-needed-to-provision-a-machine) + * [As an infrastructure provider consumer, I would like to have validation for the provider\-specific data I need to give the system to have it provision a machine\.](#as-an-infrastructure-provider-consumer-i-would-like-to-have-validation-for-the-provider-specific-data-i-need-to-give-the-system-to-have-it-provision-a-machine) + * [As an infrastructure provider author, I would like to build a controller to manage provisioning machines using tools of my own choosing\.](#as-an-infrastructure--provider-author-i-would-like-to-build-a-controller-to-manage-provisioning-machines-using-tools-of-my-own-choosing) + * [As an infrastructure provider author, I would like to build a controller to manage provisioning machines without being restricted to a CRUD API\.](#as-an-infrastructure-provider-author-i-would-like-to-build-a-controller-to-manage-provisioning-machines-without-being-restricted-to-a-crud-api) + * [Implementation Details/Notes/Constraints](#implementation-detailsnotesconstraints) + * [Machine Controller Role](#machine-controller-role) + * [Machine Controller dynamic watchers](#machine-controller-dynamic-watchers) + * [Object References, Templates, MachineSets and MachineDeployments](#object-references-templates-machinesets-and-machinedeployments) + * [Controllers and the single responsibility approach](#controllers-and-the-single-responsibility-approach) + * [Remote references and accessing a workload cluster](#remote-references-and-accessing-a-workload-cluster) + * [The “Phase” field and its role](#the-phase-field-and-its-role) + * [Showing a status summary to users](#showing-a-status-summary-to-users) + * [Risks and Mitigations](#risks-and-mitigations) + * [State transitions are inflexible](#state-transitions-are-inflexible) + * [Machine Controller can access any machine or cluster in any namespace](#machine-controller-can-access-any-machine-or-cluster-in-any-namespace) + * [Certificates and tokens are exposed in plaintext](#certificates-and-tokens-are-exposed-in-plaintext) + * [Bootstrap data cannot be merged](#bootstrap-data-cannot-be-merged) + * [MachineClass is deprecated and will be revisited later](#machineclass-is-deprecated-and-will-be-revisited-later) + * [Design Details](#design-details) + * [Test Plan](#test-plan) + * [Graduation Criteria](#graduation-criteria) + * [Upgrade / Downgrade Strategy](#upgrade--downgrade-strategy) + * [Version Skew Strategy](#version-skew-strategy) + * [Implementation History](#implementation-history) + * [Drawbacks](#drawbacks) + * [Alternatives](#alternatives) + * [Object References, Templates, MachineSets and MachineDeployments](#object-references-templates-machinesets-and-machinedeployments-1) + +## Glossary + +- **[Cluster API](../glossary.md#cluster-api)**: Unless otherwise specified, this refers to the project as a whole. +- **Cluster API Manager**: The controller-runtime's Manager that runs controllers. +- **[Machine](../glossary.md#machine)**: The Kubernetes Custom Resource Definition offered by Cluster API. +- **[Server/Instance/Host](../glossary.md#server)**: The infrastructure that backs a Machine. +- **Bootstrapping**: The process of turning a server into a Kubernetes node. + +## Summary + +This proposal outlines splitting up the Machine data model into two new providers/controllers along with the generic Machine controller. The two new providers are the Infrastructure provider and the Bootstrap provider. The infrastructure provider handles provisioning infrastructure (cloud, bare metal, virtual) and the bootstrap provider handles turning a server into a Kubernetes node. This change will improve separation of concerns for the Machine controller and gives more degrees of freedom to the user. + +In Cluster API v1alpha1, users can create Machine custom resources. When a Machine resource is created, Cluster API components react by provisioning infrastructure (typically a physical or virtual machine) and then bootstrapping Kubernetes on the provisioned server, making it a Kubernetes Node. Cluster API’s current architecture mandates that a single “provider” handle both the infrastructure provisioning and the Kubernetes bootstrapping. + +## Motivation +- A singular provider that combines infrastructure provisioning and Kubernetes bootstrapping is too restrictive. Many current v1alpha1 providers target single cloud providers. If a user wants to install a specific Kubernetes distribution (Kubernetes, OpenShift, Rancher, etc.), it is left to the provider to add support for it. In practical terms, we are not able to guarantee that a user can install the same distribution regardless of infrastructure environment. +- The Machine custom resource contains fields for provider-specific configuration and status. These are represented as opaque [RawExtensions](https://godoc.org/k8s.io/apimachinery/pkg/runtime#RawExtension), meaning their contents are not directly processed as part of the Machine’s validation. +- The strict relationship between the Machine controller with provider-specific Actuator doesn't allow reconciling machines with providerSpec of different cloud providers in the same cluster (i.e. hybrid cluster). Separation of provider-specific bits from the Machine controller increases flexibility of the cluster api plane and moves the responsibility for their interpretation and reconciliation into dedicated controllers. +- Both users and developers are confused by the code organization split. The Cluster API repository currently offers controllers for Cluster, Machine, MachineSet, MachineDeployment, Node. Some of these controllers are meant to be vendored and run as part of a provider's Manager; while some others are generic (MachineDeployment, MachineSet, Node) and meant to be run as part of the Cluster API Manager. As outlined in this proposal, we'd like to streamline the relationship between where the code lives and where it runs. In particular, we'd like to make the Machine type and its controller generic, which will allow for it to be part of the Cluster API Manager. + +### Goals +1. To use Kubernetes Controllers to manage the lifecycle of Machines. +1. To make bootstrap implementations reusable across any infrastructure provider. +1. To support pre-boot (e.g. cloud-init, bash scripts, or similar) bootstrapping. +1. To support bootstrapping different Kubernetes distributions on the same infrastructure provider. +1. To validate provider-specific content as early as possible (create, update). + +### Non-Goals/Future Work +1. To modify the Cluster object. +1. To fully implement machine state lifecycle hooks. This must be part of a future proposal that builds on these proposed changes. +1. To setup OS configuration, install software or any other features related to image building. +1. To support post-boot configuration of Machine. +1. To revisit MachineClass role and functionality. +1. To customize the image beyond the settings required to run kubelet, that is an implementation constraint of the bootstrap provider. + +## Proposal +In this section we outline the proposed API changes; describe states, transitions, and sequence diagrams for the most important scenarios; and provide a kubeadm-based example. + +### Data model changes +The Machine Spec and Status fields undergo some breaking changes which remove or add new fields. We introduce a Bootstrap Controller which encapsulates the integration point between a Machine and its path to become a Kubernetes Node. + +```go +type MachineSpec struct +``` +- **To remove** + - **ProviderSpec [optional]** _Superseded by InfrastructureRef_ + - Type: `*runtime.RawExtension` + - Description: ProviderSpec details Provider-specific configuration to use during node creation. + - **Versions [optional]** _Superseded by Version_ + - Type: `MachineVersionInfo` + - Description: Versions of key software to use. This field is optional at cluster creation time, and omitting the field indicates that the cluster installation tool should select defaults for the user. These defaults may differ based on the cluster installer, but the tool should populate the values it uses when persisting Machine objects. A Machine spec missing this field at runtime is invalid. +- **To add** + - **Bootstrap** + - Type: `Bootstrap` + - Description: Bootstrap is a reference to a local struct which encapsulates fields to configure the Machine’s bootstrapping mechanism. + - **InfrastructureRef [required]** + - Type: `corev1.ObjectReference` + - Description: InfrastructureRef is a required reference to a Custom Resource Definition offered by an infrastructure provider. + - **Version [optional]** + - Type: `string` + - Description: Version defines the desired Kubernetes version. This field is meant to be optionally used by bootstrap providers. + +```go +type MachineStatus struct +``` + +- **To remove** + - **ProviderStatus [optional]** + - Type: `*runtime.RawExtension` + - Description: ProviderStatus details a Provider-specific status. It is recommended that providers maintain their own versioned API types that should be serialized/deserialized from this field. + - **Versions [optional]** _Superseded by Version_ + - Type: `*MachineVersionInfo` + - Description: Versions specifies the current versions of software on the corresponding Node (if it exists). This is provided for a few reasons [...]. +- **To add** + - **Version [optional]** + - Type: `*string` + - Description: Version specifies the current version of Kubernetes running on the corresponding Node. This is meant to be a means of bubbling up status from the Node to the Machine. It is entirely optional, but useful for end-user UX if it’s present. + - **BootstrapReady [optional]** + - Type: `bool` + - Description: True when the bootstrap provider status is ready. + - **InfrastructureReady [optional]** + - Type: `bool` + - Description: True when the infrastructure provider status is ready. + +```go +type Bootstrap struct +``` +- **To add** + - **ConfigRef [optional]** + - Type: `*corev1.ObjectReference` + - Description: ConfigRef is a reference to a bootstrap provider-specific resource that holds configuration details. The reference is optional to allow users/operators to specify Bootstrap.Data without the need of a controller. + - **Data [optional]** + - Type: `*string` + - Description: Data contains the bootstrap data, such as cloud-init details scripts. If nil, the Machine should remain in the Pending state. + +## States and transitions +The Cluster API Machine Controller is responsible for managing the lifecycle of a Machine, including its state transitions. This is the only controller that should be writing updates to Machines. + +--- +### Pending +```go +// MachinePhasePending is the first state a Machine is assigned by +// Cluster API Machine controller after being created. +MachinePhasePending = MachinePhaseType("pending") +``` + +#### Transition Conditions +- `Machine.Status.Phase` is empty string + +#### Expectations +- When `Machine.Spec.Bootstrap.Data` is: + - ``, expect the field to be set by an external controller. + - `""` (empty string), expect the bootstrap step to be ignored. + - `"..."` (populated by user or from the bootstrap provider), expect the contents to be used by a bootstrap (pre-Running transition) or infrastructure (Provisioning state) provider. + +--- +### Provisioning +```go +// MachinePhaseProvisioning is the state when the +// Machine infrastructure is being created. +MachinePhaseProvisioning = MachinePhaseType("provisioning") +``` + +#### Transition Conditions +- `Machine.Spec.Bootstrap.ConfigRef`->`Status.Ready` is true +- `Machine.Spec.Bootstrap.Data` is not `` + +#### Expectations +- Machine’s infrastructure to be in the process of being provisioned. + +--- +### Provisioned +```go +// MachinePhaseProvisioned is the state when its +// infrastructure has been created and configured. +MachinePhaseProvisioned = MachinePhaseType("provisioned") +``` + +#### Transition Conditions +- `Machine.Spec.InfrastructureRef`->`Status.Ready` is true +- `Machine.Spec.ProviderID` is synced from `Machine.Spec.InfrastructureRef` -> `Spec.ProviderID` +- (optional) `Machine.Status.Addresses` is synced from `Machine.Spec.InfrastructureRef` -> `Status.Addresses` + +#### Expectations +- Machine’s infrastructure has been created and the compute resource is available to be configured. + +--- +### Running +```go +// MachinePhaseRunning is the Machine state when it has +// become a Kubernetes Node in a Ready state. +MachinePhaseRunning = MachinePhaseType("running") +``` + +#### Transition Conditions +- A Kubernetes Node is found with the same `Machine.Spec.ProviderID` and its state is “Ready” + +#### Expectations +- Machine controller should set `Machine.Status.NodeRef`. + +--- +### Deleting +```go +// MachinePhaseDeleting is the Machine state when a delete +// request has been sent to the API Server, +// but its infrastructure has not yet been fully deleted. +MachinePhaseDeleting = MachinePhaseType("deleting") +``` + +#### Transition Conditions +- `Machine.ObjectMeta.DeletionTimestamp` is not `` + +#### Expectations +- Node associated should be drained and cordoned. +- Machine’s related resources should be deleted first using cascading deletion. + +--- +### Deleted +```go +// MachinePhaseDeleted is the Machine state when the object +// and the related infrastructure is deleted and +// ready to be garbage collected by the API Server. +MachinePhaseDeleted = MachinePhaseType("deleted") +``` + +#### Transition Conditions +- `Machine.ObjectMeta.DeletionTimestamp` is not `` +- (optional) `Machine.Bootstrap.ConfigRef` -> `ObjectMeta.DeletionTimestamp` is not `` +- `Machine.Spec.InfrastructureRef`-> `ObjectMeta.DeletionTimestamp` is not `` + +#### Expectations +- Machine controller should remove finalizer. + +--- +### Failed +```go +// MachinePhaseFailed is the Machine state when the system +// might require user intervention. +MachinePhaseFailed = MachinePhaseType("failed") +``` + +#### Transition Conditions +- `Machine.ErrorReason` and/or `Machine.ErrorMessage` is populated. + +#### Expectations +- User intervention. + +![Figure 1](./images/machine-states-preboot/Figure1.png) + +--- +### Sequence diagram: User creates a machine with Kubeadm bootstrapper. +In this scenario, we go through each step from “kubectl apply” to seeing the Node in “Running” state. The user has chosen to create a Machine with the following: no custom user data, Machine.Bootstrap is a Kubeadm bootstrap provider, and Machine.InfrastructureRef is an AWS infrastructure provider. + +![Figure 2](./images/machine-states-preboot/Figure2.png) + +Figure 2 is a simplified diagram of the configuration of a Machine using a Kubeadm bootstrap provider. Machine.Spec.Bootstrap.ConfigRef points to a bootstrap provider-specific custom resource; in this example, a KubeadmBootstrapConfig called controlplane-0. + +![Figure 3](./images/machine-states-preboot/Figure3.png) + +In this diagram, we have two main entities: API Server and Cluster API Machine Controller. When a Machine is created, the Kubernetes API Server validates it, persists it, and emits a watch event. The Cluster API Machine Controller receives the watch event and reconciles the Machine. + +The reconciliation loop starts by checking the Machine’s Bootstrap fields. For the purpose of this example, we expect the Machine.Spec.Bootstrap.Data field to be nil and for the ConfigRef’s associated resource to have a non-Ready status. The Machine Controller sets Machine.Status.Phase to “Pending” and sends the update to the API Server. + +The Machine Controller also establishes a watch for the bootstrap and infrastructure specific resources (KubeadmBootstrapConfig, AWSInfrastructureConfig). In addition, the controller sets an OwnerReference on KubeadmBootstrapConfig and AWSInfrastructureConfig. + +![Figure 4](./images/machine-states-preboot/Figure4.png) + +After the Machine Controller updates the Machine, it must wait for the machine to be “provisionable” - in this case, the Machine must have its Spec.Bootstrap.Data specified. Any value besides nil is acceptable. Some environments may not require any bootstrap data, in which case this would be designated by the empty string. Other environments that do require bootstrap data would use a non-empty string. + +In this example, we have a Kubeadm Bootstrap Provider that runs a controller watching for KubeadmBootstrapConfig and Machine WatchEvents. Upon receiving a Machine event, a reconcile request is enqueued for the associated KubeadmBootstrapConfig. Next, it retrieves the Machine, Cluster, and generates a cloud-init based bootstrap configuration which is then stored in KubeadmBootstrapConfig.Status.BootstrapData. It also sets KubeadmBootstrapConfig.Status.Ready to `true`. A non-nil value in KubeadmBootstrapConfig.Status.BootstrapData is indicative to the Cluster API Machine Controller that the bootstrap provider has completed its work. + +![Figure 5](./images/machine-states-preboot/Figure5.png) + +The Cluster API Machine Controller receives an event for the updated KubeadmBootstrapConfig resource and enqueues a reconciliation request for the associated Machine. The Machine Controller sees the populated KubeadmBootstrapConfig.Status.BootstrapData field and copies it to Machine.Spec.Bootstrap.Data. The Machine Controller also updates Machine.Status.Phase to Provisioning and sends the updated Machine to the API Server. + +![Figure 6](./images/machine-states-preboot/Figure6.png) + +With this latest Machine update, the API Server emits another watch event. A machine infrastructure provider controller watches for updates to Machine objects to determine when it can reconcile its own objects and start provisioning. For this example, we’ll use an AWS infrastructure provider that bootstraps using cloud-init. As shown in Figure 6, it watches for AWSInfrastructureConfig and Machine WatchEvents. Once it receives the event on the Machine, it enqueues a reconciliation request for the associated AWSInfrastructureConfig (via Machine.InfrastructureRef). When the controller sees the Machine’s non-nil bootstrap data, it asks AWS to create an EC2 instance, using the Machine’s bootstrap data as the cloud-init data for the virtual machine. After the EC2 instance is up and running, the AWS controller sets AWSInfrastructureConfig.Status.Ready to `true`, populates the AWSInfrastructureConfig.Status.Addresses field, and sends the updated AWSInfrastructureConfig to the API Server. + +![Figure 7](./images/machine-states-preboot/Figure7.png) + +The Cluster API Controller manager receives an event for the updated AWSInfrastructureConfig resource and enqueues a reconciliation request for the associated Machine. The Machine Controller understands that the infrastructure is ready by checking that the AWSInfrastructureConfig.Status.Addresses field has been populated and proceeds to wait for a Kubernetes Node matching the Machine.ProviderID in “Ready” state. After a Node is found, the Machine Controller sets Machine.Status.NodeRef, Machine.Status.Phase to “Ready” and updates the Machine object. + +![Figure 8](./images/machine-states-preboot/Figure8.png) + +The Machine has now become a Kubernetes Node and ready to be used. + +### User Stories + +#### As a Kubernetes operator, I’d like to provide custom bootstrap data without the use of a Kubernetes controller. + +#### As a Kubernetes operator, I’d like to monitor the progress of fulfilling a Machine and understand what errors, if any, have been reported by the controllers involved. + +#### As an infrastructure provider author, I would like to build the fewest number of components possible to support the full cluster-api. + +#### As an infrastructure provider author, I would like to take advantage of the kubernetes API to provide validation for provider-specific data needed to provision a machine. + +#### As an infrastructure provider author, I would like to build a controller to manage provisioning machines using tools of my own choosing. + +#### As an infrastructure provider author, I would like to build a controller to manage provisioning machines without being restricted to a CRUD API. + +#### As an infrastructure provider consumer, I would like to have validation for the provider-specific data I need to give the system to have it provision a machine. + +### Implementation Details/Notes/Constraints + +#### Machine Controller Role +The Machine Controller lives within Cluster API manager alongside MachineSet and MachineDeployment controllers. This controller should be the only controller having write permissions to Machine objects. The controller responsibility focuses on state transitions and operations including but not limited to: +- Manage Machines finalizers. +- Set Machine OwnerReferences (e.g. to Cluster, MachineSet). +- Setting Machine.Status.Phase based on the transition conditions described above. +- Pull data from provider custom resources into the right places (e.g. Machine.Spec.Bootstrap.Data from Machine.Spec.Bootstrap.ConfigRef -> Status.BootstrapData). +- Update Status fields from provider custom resources. + +#### Machine Controller dynamic watchers +The Machine Controller needs to watch for updates to bootstrap and infrastructure provider specific resources so it can copy bootstrap data/status and infrastructure provider status information to the Machine status. To achieve this, we can use controller-runtime’s `source.Informer` type and client-go’s `dynamicinformer.DynamicSharedInformerFactory`. + +When the Machine Controller reconciles a Machine and sees a reference to a provider-specific resource, such as a KubeadmBootstrapConfig or an AWSMachineConfig, it can: +- Get a dynamic informer for the provider-specific resource. +- Invoke c.Watch() (where c is a `controller.Controller` instance) with a `source.Informer` for the dynamic informer and a `handler.EnqueueRequestForOwner` configured with Machine as the OwnerType. + +#### Object References, Templates, MachineSets and MachineDeployments +With providerSpec and providerStatus no longer inlined in the Machine resource, we need a way to ensure we can continue to provide at least unique status information for a Machine per bootstrap configuration and per infrastructure configuration. + +Each Machine needs a reference to a unique Bootstrap provider-specific resource, and a reference to a unique Infrastructure provider-specific resource. This is easy to do when creating a single Machine, but not so obvious when a MachineSet is creating multiple Machine replicas. + +For the MachineSet, the references to the Bootstrap and Infrastructure resources are references to templates. When the MachineSet Controller creates a new Machine replica, it retrieves the Bootstrap and Infrastructure resources referenced by the MachineSet’s Machine spec template and creates copies for that Machine to use exclusively. + +#### Controllers and the single responsibility approach +Each controller internal or external to Cluster API in v1alpha2 should follow a single responsibility approach. In the scenario above we go through some of these details: a controller should only modify its own custom resources, but it can watch or read other objects. By following this principle, we aim to reduce or completely eliminate the problem of having controllers modifying and fighting for updates to the same resource. + +#### Remote references and accessing a workload cluster +Part of the workflow shown above is the ability for the Machine Controller to access the Workload Cluster’s API Server. As part of this proposal, we want to standardize how the Machine Controller gains access to a cluster using a Kubernetes Secret. The Secret should be living within the associated Cluster namespace and following a preset naming convention (e.g. `-kubeconfig`) and RBAC should be configured appropriately. + +For the purpose of this proposal, the secret-based access to the Workload Cluster is going to require that the Workload Cluster’s API server be accessible from the management cluster with direct https. Features such as tunneling or other forms of access are left to a later proposal. + +#### The “Phase” field and its role +The Phase field is a high-level indicator of the status of the Machine as it is provisioned, from the API user’s perspective. The value should not be interpreted by any software components as a reliable indication of the actual state of the Machine, and controllers should not use the Phase value when making decisions about what action to take. Controllers should always look at the actual state of the Machine’s fields to make those decisions. + +#### Showing a status summary to users +One of the project’s goals is to improve the user experience by providing more information on a Machine status. In this proposal we add new fields (Machine.Status.InfrastructurePhase and Machine.Status.BootstrapPhase) to the Machine Status which the Machine Controller synchronizes from the referenced custom objects. + +### Risks and Mitigations + +#### State transitions are inflexible +To determine state transitions, we adopt a simple approach that relies on the values from a specific set of fields. As outlined in the state description tables above, a transition can happen if and only if the whole set of conditions is true. Some might argue that it’d be more ideal to adopt a more complex yet powerful system that would allow controllers to dynamically specify conditions that need to be met for a state transition to happen. Although desirable, this particular feature adds a lot of complexity to the system, therefore we propose to tackle related use cases and implementation work in a future community proposal. + +#### Machine Controller can access any machine or cluster in any namespace +The Cluster API Machine controller, given the design outlined above, can access any Machine in any namespace and gain access (via secret-based kubeconfig) to any Workload Cluster API Server. To overcome the security concerns, we allow users or cluster operators to optionally restrict the Cluster API manager, to watch and operate on objects within a single namespace, based on prefix, or a list using command line flags and providing RBAC examples. + +#### Certificates and tokens are exposed in plaintext +Sensitive data is exposed in multiple places and could pose a security risk. The behavior won’t change from v1alpha1 and will be tackled in a future proposal. + +#### Bootstrap data cannot be merged +Cluster API doesn’t provide an out of the box experience for merging user-supplied user-data with one or more bootstrap providers. While there are some use cases (e.g. installing software tools, configuring kernel params) that would require support for this feature, we’d like to keep v1alpha2 scoped to a single source of truth and put aside the complications that might arise supporting such request. Bootstrap providers may allow for such feature by providing additional fields in their own custom resources. + +#### MachineClass is deprecated and will be revisited later +MachineClass has served as a templating mechanisms for Machines. In v1alpha2 templating is left to MachineSet and MachineDeployment references and the functionality from MachineClass is being removed in favor of the new approach. In future revisions MachineClass might be revisited to support other use cases and tools like cluster autoscaler. + +## Design Details + +### Test Plan +TODO @release + +### Graduation Criteria +TODO @release + +### Upgrade / Downgrade Strategy +TODO @release + +### Version Skew Strategy +TODO @release + +## Implementation History + +- [x] 05/29/2019: Discuss v1alpha2 scope reduction at the [community meeting](https://docs.google.com/document/d/1Ys-DOR5UsgbMEeciuG0HOgDQc8kZsaWIWJeKJ1-UfbY/edit#heading=h.s8py1oplak7i) +- [x] 06/04/2019: Compile a Google Doc following the CAEP template [link](https://docs.google.com/document/d/18JOgoK9EF3miEd2FWvIggruA-41jL8fLcEhiVzzwzjQ/edit?ts=5cfa7379#) +- [x] 06/05/2019: Present the proposal at the [community meeting](https://docs.google.com/document/d/1Ys-DOR5UsgbMEeciuG0HOgDQc8kZsaWIWJeKJ1-UfbY/edit#heading=h.pj3kmzqnc2qa) +- [x] 06/06/2019: Ask community for feedback +- [x] 06/10/2019: Open proposal PR + +## Drawbacks + +## Alternatives + +#### Object References, Templates, MachineSets and MachineDeployments +For each reference have a template reference as well. +Alongside Machine.Bootstrap.ConfigRef and Machine.InfrastructureRef we can add two more fields: Machine.Bootstrap.ConfigTemplateRef and Machine.InfrastructureTemplateRef. The Machine Controller is responsible, in pre-pending phase, for turning templates in new instances and fill in Machine.Bootstrap.ConfigRef and Machine.InfrastructureRef. MachineDeployments and MachineSets should only permit the use of template references. diff --git a/vendor/sigs.k8s.io/cluster-api/docs/proposals/20190709-cluster-spec-crds.md b/vendor/sigs.k8s.io/cluster-api/docs/proposals/20190709-cluster-spec-crds.md new file mode 100644 index 0000000000..458d8143db --- /dev/null +++ b/vendor/sigs.k8s.io/cluster-api/docs/proposals/20190709-cluster-spec-crds.md @@ -0,0 +1,233 @@ +--- +title: Cluster Provider Spec and Status as CRDs +authors: + - "@pablochacin" +reviewers: + - "@justinsb" + - "@detiber" + - "@ncdc" + - "@vincepri" +creation-date: 2019-07-09 +last-updated: 2019-07-18 +status: provisional +see-also: + - "/docs/proposals/20190610-machine-states-preboot-bootstrapping.md" +--- + +# Cluster Spec & Status CRDs + +## Table of Contents + + * [Cluster Spec & Status CRDs](#cluster-spec--status-crds) + * [Table of Contents](#table-of-contents) + * [Summary](#summary) + * [Motivation](#motivation) + * [Goals](#goals) + * [Non-Goals/Future Work](#non-goalsfuture-work) + * [Proposal](#proposal) + * [Data Model changes](#data-model-changes) + * [Controller collaboration](#controller-collaboration) + * [States and Transitions](#states-and-transitions) + * [Pending](#pending) + * [Conditions](#conditions) + * [Expectations](#expectations) + * [Provisioning](#provisioning) + * [Transition Conditions](#transition-conditions) + * [Expectations](#expectations-1) + * [Provisioned](#provisioned) + * [Transition Conditions](#transition-conditions-1) + * [Expectations](#expectations-2) + * [User Stories](#user-stories) + * [As an infrastructure provider author, I would like to take advantage of the Kubernetes API to provide validation for provider-specific data needed to provision a cluster infrastructure.](#as-an-infrastructure-provider-author-i-would-like-to-take-advantage-of-the-kubernetes-api-to-provide-validation-for-provider-specific-data-needed-to-provision-a-cluster-infrastructure) + * [As an infrastructure provider author, I would like to build a controller to manage provisioning cluster infrastructure using tools of my own choosing.](#as-an-infrastructure-provider-author-i-would-like-to-build-a-controller-to-manage-provisioning-cluster-infrastructure-using-tools-of-my-own-choosing) + * [As an infrastructure provider author, I would like to build a controller to manage provisioning clusters without being restricted to a CRUD API.](#as-an-infrastructure-provider-author-i-would-like-to-build-a-controller-to-manage-provisioning-clusters-without-being-restricted-to-a-crud-api) + * [Implementation Details/Notes/Constraints](#implementation-detailsnotesconstraints) + * [Role of Cluster Controller](#role-of-cluster-controller) + * [Cluster Controller dynamic watchers](#cluster-controller-dynamic-watchers) + * [Risks and Mitigations](#risks-and-mitigations) + * [Design Details](#design-details) + * [Test Plan](#test-plan) + * [Graduation Criteria](#graduation-criteria) + * [Upgrade / Downgrade Strategy](#upgrade--downgrade-strategy) + * [Version Skew Strategy](#version-skew-strategy) + * [Implementation History](#implementation-history) + * [Drawbacks [optional]](#drawbacks-optional) + * [Alternatives [optional]](#alternatives-optional) + +## Summary + +In Cluster API (CAPI) v1alpha1 Cluster object contains the `ClusterSpec` and `ClusterStatus` structs. Both structs contain provider-specific fields, `ProviderSpec` and `ProviderStatus` respectively, embedded as opaque [RawExtensions](https://godoc.org/k8s.io/apimachinery/pkg/runtime#RawExtension). + +The Cluster controller does not handle these fields directly. Instead, the provider-specific actuator receives the `ProviderSpec` as part of the Cluster object and sets the `ProviderStatus` to reflect the status of the cluster infrastructure. + +This proposal outlines the replacement of these provider-specific embedded fields by references to objects managed by a provider controller, effectively eliminating the actuator interface. The new objects are introduced and the cooperation between the Cluster controller and the Cluster Infrastructure Controller is outlined. This process introduces a new field `InfrastructureReady` which reflects the state of the provisioning process. + +## Motivation + +Embedding opaque provider-specific information has some disadvantages: +- Using an embedded provider spec as a `RawExtension` means its content is not validated as part of the Cluster object validation +- Embedding the provider spec makes difficult implementing the logic of the provider as an independent controller watching for changes in the specs. Instead, providers implement cluster controllers which embed the generic logic and a provider-specific actuator which implements the logic for handling events in the Cluster object. +- Embedding the provider status as part of the Cluster requires that either the generic cluster controller be responsible for pulling this state from the provider (e.g. by invoking a provider actuator) or the provider updating this field, incurring in overlapping responsibilities. + +### Goals + +1. To use Kubernetes Controllers to manage the lifecycle of provider specific cluster infrastructure object. +1. To validate provider-specific object as early as possible (create, update). +1. Allow cluster providers to expose the status via a status subresource, eliminating the need of status updates across controllers. + +### Non-Goals/Future Work +1. To modify the generic Cluster object beyond the elements related to the provider-specific specs and status. +1. Splitting the provider-specific infrastructure into more granular elements, such as network infrastructure and control plane specs. Revisiting the definition of the provider specific infrastructure is left for further proposals. +1. To replace the cluster status by alternative representations, such as a list of conditions. +1. To propose cluster state lifecycle hooks. This must be part of a future proposal that builds on these proposed changes + +## Proposal + +This proposal introduces changes in the Cluster data model and describes a collaboration model between the Cluster controller and the provider infrastructure controller, including a model for managing the state transition during the provisioning process. + +### Data Model changes + +```go +type ClusterSpec struct +``` +- **To remove** + - **ProviderSpec** [optional] _Superseded by InfrastructureRef_ + - Type: `ProviderSpec` + - Description: Provider-specific serialized configuration to use during cluster creation. It is recommended that providers maintain their own versioned API types that should be serialized/deserialized from this field. + +- **To add** + - **InfrastructureRef** + - Type: `*corev1.ObjectReference` + - Description: InfrastructureRef is a reference to a provider-specific resource that holds the details for provisioning infrastructure for a cluster in said provider. + +```go +type ClusterStatus struct +``` +- **To remove** + - **ProviderStatus** [optional] + - Type: `*runtime.RawExtension` + - Description: Provider-specific status. It is recommended that providers maintain their own versioned API types that should be serialized/deserialized from this field. + +- **To add** + - **InfrastructureReady** [optional] + - Type: `bool` + - Description: InfrastructureReady indicates the state of the infrastructure provisioning process. + +### Controller collaboration + +Moving the provider-specific infrastructure specs to a separate object makes a clear separation of responsibilities between the generic Cluster controller and the provider's cluster infrastructure controller, but also introduces the need for coordination as the provider controller will likely require information from the Cluster object during the cluster provisioning process. + +It is also necessary to model the state of the cluster along the provisioning process to be able to keep track of its progress. In order to do so, a new `InfrastructureReady` field is introduced as part of the `ClusterStatus` struct. The state and their transitions conditions are explained in detail in section [States and Transition](#states-and-transitions). + +The sequence diagram below describes the high-level process, collaborations and state transitions. + +![Figure 1](./images/cluster-spec-crds/figure1.png) + +Figure 1 presents the sequence of actions involved in provisioning a cluster, highlighting the coordination required between the CAPI cluster controller and the provider infrastructure controller. + +The creation of the Cluster and provider infrastructure objects are independent events. It is expected that the cluster infrastructure object to be created before the cluster object and the reference be set to in the cluster object at creation time. + +When a provider infrastructure object is created, the provider's controller will do nothing unless its owner reference is set to a cluster object. + +When the cluster object is created, the cluster controller will retrieve the infrastructure object. If the object has not been seen before, it will start watching it. Also, if the object's owner is not set, it will set to the Cluster object. + +When an infrastructure object is updated, the provider controller will check the owner reference. If it is set, it will retrieve the cluster object to obtain the required cluster specification and starts the provisioning process. When the process finishes, it sets the `Infrastructure.Status.Ready` to true. + +When the cluster controller detects the `Infrastructure.Status.Ready` is set to true, it updates `Cluster.Status.APIEndpoints` from `Infrastructure.Status.APIEndpoints` and sets `Cluster.Status.InfrastructureReady` to true. + +### States and Transitions + +The Cluster Controller has the responsibility of managing the state transitions during cluster lifecycle. In order to do so, it requires collaboration with the provider's controller to clearly signal the state transitions without requiring knowledge of the internals of the provider. + +#### Pending + +The initial state when the Cluster object has been created but the infrastructure object referenced by `InfrastructureRef` has not yet been associated with it. + +##### Conditions +- `Cluster.InsfrastructureRef`->Metadata.OwnerRefences is `` + +##### Expectations +- `InfrastructureRef->Metadata.OwnerRefences` is expected to be set by the cluster controller to reference the cluster object. +- `Cluster.Status.InfrastructureReady` is `False` + +#### Provisioning + +The cluster has a provider infrastructure object associated. The provider can start provisioning the infrastructure. + +##### Transition Conditions + +- The Infrastructure object referenced by `InfrastructureRef` has its owner set to the Cluster + +##### Expectations +- The cluster infrastructure is in the process of being provisioned +- `Cluster.Status.InfrastructureReady` is `False` + +#### Provisioned + +The provider has finished provisioning the infrastructure. + +##### Transition Conditions +- The Infrastructure object referenced by `InfrastructureRef` has the `Status.Ready` field set to true + +##### Expectations +- The cluster infrastructure has been provisioned +- Cluster status has been populated with information from the `Infrastructure.Status` such as the `APIEndpoints` +- `Cluster.Status.InfrastructureReady` is `True` + +![Figure 2](./images/cluster-spec-crds/figure2.png) + +### User Stories + +#### As an infrastructure provider author, I would like to take advantage of the Kubernetes API to provide validation for provider-specific data needed to provision a cluster infrastructure. + +#### As an infrastructure provider author, I would like to build a controller to manage provisioning cluster infrastructure using tools of my own choosing. + +#### As an infrastructure provider author, I would like to build a controller to manage provisioning clusters without being restricted to a CRUD API. + + +### Implementation Details/Notes/Constraints + +#### Role of Cluster Controller +The Cluster Controller should be the only controller having write permissions to the Cluster objects. Its main responsibility is to +- Manage cluster finalizers +- Set provider's infrastructure object's OwnerRef to Cluster +- Update the state of the infrastructure provisioning + +#### Cluster Controller dynamic watchers + +As the provider spec data is no longer inlined in the Cluster object, the Cluster Controller needs to watch for updates to provider specific resources so it can detect events. To achieve this, when the Cluster Controller reconciles a Cluster and detects a reference to a provider-specific infrastructure object, it starts watching this resource using the [dynamic informer](https://godoc.org/k8s.io/client-go/dynamic/dynamicinformer) and a [`handler.EnqueueRequestForOwner`](https://godoc.org/sigs.k8s.io/controller-runtime/pkg/handler#EnqueueRequestForOwner) configured with Cluster as the OwnerType, to ensure it only watches provider objects related to a CAPI cluster. + +### Risks and Mitigations + +## Design Details + +### Test Plan + +TODO + +### Graduation Criteria + +TODO + +### Upgrade / Downgrade Strategy + +TODO + +### Version Skew Strategy + +TODO + +## Implementation History + +- [x] 03/20/2019 [Issue Opened](https://github.com/kubernetes-sigs/cluster-api/issues/833) proposing the externalization of embeded provider objects as CRDs +- [x] 06/19/2009 [Discussed](https://github.com/kubernetes-sigs/cluster-api/issues/833#issuecomment-501380522) the inclusion in the scope of v1alpha2. +- [x] 07/09/2019 initial version of the proposal created +- [X] 07/11/2019 Presentation of proposal to the community +- [X] Feedback + +## Drawbacks [optional] + +TODO + +## Alternatives [optional] + diff --git a/vendor/sigs.k8s.io/cluster-api/docs/proposals/YYYYMMDD-template.md b/vendor/sigs.k8s.io/cluster-api/docs/proposals/YYYYMMDD-template.md index 5bc6a579b4..54eed86100 100644 --- a/vendor/sigs.k8s.io/cluster-api/docs/proposals/YYYYMMDD-template.md +++ b/vendor/sigs.k8s.io/cluster-api/docs/proposals/YYYYMMDD-template.md @@ -1,5 +1,36 @@ --- title: proposal Template + + + + +- [Title](#title) + - [Table of Contents](#table-of-contents) + - [Summary](#summary) + - [Motivation](#motivation) + - [Goals](#goals) + - [Non-Goals](#non-goals) + - [Proposal](#proposal) + - [User Stories [optional]](#user-stories-optional) + - [Story 1](#story-1) + - [Story 2](#story-2) + - [Implementation Details/Notes/Constraints [optional]](#implementation-detailsnotesconstraints-optional) + - [Risks and Mitigations](#risks-and-mitigations) + - [Design Details](#design-details) + - [Test Plan](#test-plan) + - [Graduation Criteria](#graduation-criteria) + - [Examples](#examples) + - [Alpha -> Beta Graduation](#alpha---beta-graduation) + - [Beta -> GA Graduation](#beta---ga-graduation) + - [Removing a deprecated flag](#removing-a-deprecated-flag) + - [Upgrade / Downgrade Strategy](#upgrade--downgrade-strategy) + - [Version Skew Strategy](#version-skew-strategy) + - [Implementation History](#implementation-history) + - [Drawbacks [optional]](#drawbacks-optional) + - [Alternatives [optional]](#alternatives-optional) + + + authors: - "@janedoe" reviewers: diff --git a/vendor/sigs.k8s.io/cluster-api/docs/proposals/images/Dockerfile b/vendor/sigs.k8s.io/cluster-api/docs/proposals/images/Dockerfile new file mode 100644 index 0000000000..ce99c6dd13 --- /dev/null +++ b/vendor/sigs.k8s.io/cluster-api/docs/proposals/images/Dockerfile @@ -0,0 +1,37 @@ +# Copyright 2019 The Kubernetes Authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# Recommended usage: +# +# - Run an ephemeral container +# - Mount the current working directory into the container. +# - Run the entrypoint as the user invoking docker run. Otherwise the output +# files will be owned by root, the default user. +# +# - Example: +# docker run \ +# --rm \ +# --volume ${PWD}:/figures \ +# --user $(shell id --user):$(shell id --group) \ +# ${IMAGE_TAG} \ +# -v /figures/*.plantuml + +FROM maven:3-jdk-8 + +RUN apt-get update && apt-get install -y --no-install-recommends graphviz fonts-symbola fonts-wqy-zenhei && rm -rf /var/lib/apt/lists/* +RUN wget -O /plantuml.jar http://sourceforge.net/projects/plantuml/files/plantuml.1.2019.6.jar/download + +# By default, java writes a 'hsperfdata_' directory in the work dir. +# This directory is not needed; to ensure it is not written, we set `-XX:-UsePerfData` +ENTRYPOINT [ "java", "-XX:-UsePerfData", "-jar", "/plantuml.jar" ] diff --git a/vendor/sigs.k8s.io/cluster-api/docs/proposals/images/Makefile b/vendor/sigs.k8s.io/cluster-api/docs/proposals/images/Makefile new file mode 100644 index 0000000000..a6c8d412db --- /dev/null +++ b/vendor/sigs.k8s.io/cluster-api/docs/proposals/images/Makefile @@ -0,0 +1,32 @@ +# Copyright 2019 The Kubernetes Authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +ROOT_DIR:=$(shell dirname $(realpath $(lastword $(MAKEFILE_LIST)))) + +SOURCES := $(shell find ${ROOT_DIR} -name \*.plantuml) +DIAGRAMS := $(SOURCES:%.plantuml=%.png) + +diagrams: $(DIAGRAMS) + +# --user ensures the output files are owned by the user invoking docker on the host +# TODO: Publish docker image owned by CAPI project: https://github.com/kubernetes-sigs/cluster-api/issues/1070 +%.png: %.plantuml + docker run \ + --rm \ + --volume ${ROOT_DIR}:/diagrams \ + --user $(shell id -u):$(shell id -g) \ + us.gcr.io/k8s-artifacts-prod/cluster-api/plantuml:1.2019.6 \ + -v /diagrams/$(shell basename $(shell dirname $^))/$(notdir $^) + +.PHONY: diagrams diff --git a/vendor/sigs.k8s.io/cluster-api/docs/proposals/images/cluster-spec-crds/figure1.plantuml b/vendor/sigs.k8s.io/cluster-api/docs/proposals/images/cluster-spec-crds/figure1.plantuml new file mode 100644 index 0000000000..7504c678ae --- /dev/null +++ b/vendor/sigs.k8s.io/cluster-api/docs/proposals/images/cluster-spec-crds/figure1.plantuml @@ -0,0 +1,58 @@ +@startuml +title Figure 1. Cluster Provisioning process +actor User + +' -- GROUPS START --- + +box #LightGreen +participant "API Server" +end box + +box #LightBlue +participant "Cluster Controller" +end box + +box #LightGrey +participant "Infrastructure Controller" +end box + +' -- GROUPS END --- + +User -> "API Server" : Create Cluster Infrastructure +"API Server" -->> "Infrastructure Controller": New Provider Infrastructure + +opt IF Infrastructure has no owner ref +"Infrastructure Controller"->"Infrastructure Controller": Do Nothing +end + +User -> "API Server": Create Cluster +"API Server" -->> "Cluster Controller": New Cluster +"Cluster Controller" -> "API Server": Get Infrastructure +opt Required Only if Infrastructure not seen before +"Cluster Controller" -> "Cluster Controller": Add Watcher for Cluster.InfrastructureRef.Kind objects +end + +opt Required Only if Infrastructure has no owner +"Cluster Controller" -> "Cluster Controller": Set Infrastructure's owner to Cluster +"Cluster Controller" -> "API Server": Update Infrastructure +end + +"API Server" -->> "Infrastructure Controller": Infrastructure update +opt Required only if Infrastructure has owner ref +"Infrastructure Controller" -> "API Server": Get Cluster +"Infrastructure Controller" -> "Infrastructure Controller": Provision infrastructure +"Infrastructure Controller" -> "Infrastructure Controller": Set Infrastructure.Status.APIEndpoint +"Infrastructure Controller" -> "Infrastructure Controller": Set Infrastructure.Status.Ready=true +"Infrastructure Controller" -> "API Server": Update Infrastructure +end + +"API Server" -->> "Cluster Controller": Infrastructure Update +opt Only required if Infrastructure.Status.Ready is true +"Cluster Controller" -> "API Server": Get Cluster +"Cluster Controller" -> "Cluster Controller": Set Cluster.Status.APIEndpoint = Infrastructure.Status.APIEndpoint +"Cluster Controller" -> "Cluster Controller": Set Cluster.Status.InfrastructureReady true +"Cluster Controller" -> "API Server": Update Cluster +end + +hide footbox +@enduml diff --git a/vendor/sigs.k8s.io/cluster-api/docs/proposals/images/cluster-spec-crds/figure1.png b/vendor/sigs.k8s.io/cluster-api/docs/proposals/images/cluster-spec-crds/figure1.png new file mode 100644 index 0000000000000000000000000000000000000000..a033a71815b4fff4926617b014ce86037032623a GIT binary patch literal 60035 zcmce;cRbZ?{0EG3yBnw|g+s~~LJ}t{A$#w=63(HFW78yLuZ)92RyM~Tk-hggM#|>c zdp_4Ws=NFC{eI8u`Rln~ukM5MUDtJe=KKA=zCMcblEh~z&*I_X5lcOKsEmhqS^*F5 z)ac&?;4d3ZU0&dW#a>*~-o)mqGtBItL`Kj^n2(m3yHSK@>9S_dgI! ze9N;V68!W@7IN>k&)u6O-iPgKxZ>nDzH>_1F!Y;={U(e| zcF|O`V?`rgvpf3|lafGNdCNH#SFd|TkMoB4#Y{`+%BG6%FX4S!3FU(I{# z&Z65hH;)U_3Il7^Hi2i_9Vw?Ypwl+dy)WH2IK7JU47mz(-Srmz8M$219E$^sdQ8Z{ z%D0DzbGHQis+QH*j-oX9`C+UiaDi8r#0Ru549=uCbb4BglzKcQv4OI8cBcl3eq}cj zy8YxFb+D^Iawjjr?rXx8u-nf(4|5Li_>7yw$;lXlMyd)1sN^k|E;qcn;AGLUP~K`m z7%`Bhv8%S3v6P^E^GXu)Cr<$vo(#K7B7W!ACE{GH9=1W(59V{JQrJB2g;?E9Vw8}f z3k$B(&X0>(VzE%-;-0n|MTnref0B74l1sHdpDJ|Z=*i8;bX7& zyk=o~-spv1H)qvBo>@zT^H_4GP7z{hq(nJc8i7Exx3~9YXxNNbx(m@f7A**0y7tW0 zHr=4=sOHVN!axr(g`5l*ZIkZm(M^hM+#b>pU|*P=B&Vcg z4V89&`ZVWOXjoX1I&(7<12N^D+v~T99h-Vly$oh{d9DEg0ZU6u7<7@a<5FzeUB1Gk ztvBWcb*9&}lo9&<+9;S-06gm%c@5vVTTiAAA1y7d8PWG2Kic@KDB-DG=kTx#%1nut zl9!JtwJxO`dT#-xBilxou3ZZWmP+0i0&}l*^oBxTk^!mjDSMy zJ)?W5M2%P7RpVuj+1xBMGK{q~HT1{p9l}rn0coPc=5RJ{7EcCFJ!h5uDMsKb9 z#i`~=o2o;`P!bZ7PaLEwGL{BT5{?R^3{8zmy+K7+$ih;Hgbz1VYT*{~2L5+@-xK0J z2WCzF7BhLTWpq?;t(#1Z(erR~U}U&`w9Jv1;UQ^-^sP2T64_^2X{3VC?&Nd)Q`|f} za+s6n^oO)GHg>w&G&~_T=JZODIvd^;Y2cy7n3$NzAnd+p-?Y6r#M9xOR>5x81a(|u z4(-n8W9S~nJ(iBFjS(qn;QgZa^>vBv9=yYfCRa`z>;uZg$|@s^S#W^bq>Kewm!Xk8 zJunviQ7V+N#3e-ThX{vmac71Gr>Cc9p<@PLn9&h_wB2@(bTq= zlSTGLRP*xkE?v5mRIXcMEA8X&?>}ZdO!TA@GijMMW~bGhsB3AiEz5HKx`;H09Q2-6 z{h!k?@?PF)tMoBrsI?YE0q6Zh_{T$fohp>yJ zOfx~;?+UQY+iqIA&fZ4NVJuaLTjD}o4jLstFbOh}TkS|F@Bo=ej_@q= zN_0SHm8T~!K_QGyP|&?CnioD?z@@9VFJc;AXf>c;;j+0jS{B1+6S20k^3+T{SvuU5 zo0qq*_0+BFXr-}|$jrv+uz?MS@e+y~H*baq-`Is(bkipmT2Oy73L{WKH*=!9Dp%g2 ze3m$xCQ)$o+K9335;X(@%$&81j>K)cHEKZLwJml%qsxo(=V(6WB888IFXY&V#XEgJ zWA;?UU1nmW#16QzBlyq30X|k(0`H*7g;Wnl=PL^vV#M8yjKPSyffXRmX=mQ~sVt zO(6^*T)Mb^eD<8s6zX~8dEYF>yO9nX8A|P{j*ndVt(r?h%e#i}kn}t`ZJGGb*fhOw zv9$R4&PwdI==k|;RPKED)7RIxy|j5l(JJ;2U(!51?zZl4Pi-}jC8zRyO4q4wD#aF; zfAIVg?Pl5y+&9$_h^@Jvl<>$awg=AC&uhw8u-Oizw(EiP3=F)m!A_8h>gtlgT7#=3 z%T<5mxQ|VI|9gu3zjGiibN>1o-PBO(ArKoKT+Hh}SMMj)ROPk~@wYOgkFb2u{V0|~ zIk(slZ-?Z@vwT7rljFe=I+M(VB756ae|c?KGk-lw%V*p3_?0>;+9q0Sz&!Zfr;mlm zzLYAa%h_KkZi~ZQ-R*bt7?-huxWT`TWw{UR3ucgeY&mUH z&5ieT4w9f=VS>P*VMI9)W_i?l&qOc%jAvu`hWMi*wkDtLW!gSDiUE!x>R!=lkNKV} z6JHQH5SaIC42ZSQF(SFS$TH#My|j*3__2qA<0X#UY=V7C-J=HEOhwLW5knU?zW23g z0H4gu&$qCL5uvfCTeASaHiQ~>q3rCx*_U{Q=xp6d7D!)#r5_r;X4*ZREhs2xZIz8Z zIU44za~5R#K@!a@%)zt$fVK1J&BTQ}s*>(WauEkf-F6_H$x2B{DJWDSFqAc~FosYt zs#7Po@Ii%^JPXtnLD!VdqzXuDFYFiGs(QTj5m+}{O=x(y_<{-c=vN^0;%*U(iH)n& zfb@co0-0;P9xZrWNe5*c;%6|GaLf-6z+NcEqv%9bXS(t5QHQ z;<$K#s0HMBPyztY@9FNoI)4mO@f9QFN|;NNj_k=SDOl434%^8Dt{9 zHZ<6bmGd(&Obr)W#e#!zlZCCr(D*HRNMI8S9jA587!XeBSlEetvm zJlUCQ?85xM%PsWoVUbP-wU|c4nO$DEOhCFF1HV!hDx3y>iHxicA#}9!1B_~Uq=ZLM z5J~p@U$dr#x=HN@FV_`wE!*l-N4h#z4UeqU5uw(2gg#Vg3wTp_xC)yYnwyQ-{v6OH)G3hZP^apu5wBa~ zmkqQ&MRC`d>^#D*D#~j!sKh7W16xY##l8utySP&xf!W3`4ml}W>9x69Nea!ov9L2k z7mwzTXbyqxj0|8hGuz69SE!{fYiGui=daPNuC6|0!A*!a@QGh%U*w<>-qiAb!G?)B zRUEv9U zxkE}urlqwttWFVYURYbkndA#&6X@9n367w13!Oyyb(Yxb{g5tXsLJygWU>|Ne=VJ6 zEao8RM7NfSSb2YIRnebM?^m(+Zi-IF63C{K)PZ^FD|#JB`4iH{A6@8kYBkX=PxwB zz*8?>Wh*wJ&`SFstPQuDmMQO2p>w6tK2q@lo`)-!VvO4r@)gni8}RN19jS;2b=spV zX#7S=G=Jstw-bcDIv})F(DD~It1QY7b3}^#s7`zdlC+* z8bm?@Y^FN}T4y)u()?(IHPo0_OH0dJvd#0Gu)LAZOdV#lOamr)_9Bf{9i0Tg z%)ZPn(-IuP2)}?1e(<^>8Ya2K2@)r}(wwyI$0@Y@nfshqJidV5gwO~&vhc`-PX`S7 zE5bU-^EgdGrzC`A(??ZWwU6I*Yq4_Fwg0a3l&HpM%l%hP&}7AnG?WlDOaFGyGbQ)2 z@x7x?JuyjlUQ4(6ou+G{xsZ^0LLR%XtrqdH*pel`U7f8XM~&vS>~?UoK@yg68x$?6 z|GU6I1Khek((a&Alzjrg3^r4*qx zurifzG0s|33lg5n=j9X=?@gMpdrwyOzR<#sMP4(lp&!A#lb(H%rMhEu@ZC(QCFE1~ zk?d>pxg#=cv=(dLj=|L6-!w5XiM*wiXFDZ*F%EPwCeW2fsGBaKBYD}nV)24wdId8b z$DUqtYLJOuvC1L$>uKM6@+N06M9sIfp3e;BAdTIHksC)k%kCRv2|GkJJoW>ft7$(@ zhHX+k_-y@3pw3mM8Xmh*isRAnmsqDwa;qAu-=e)pM#kfLUGwWRD+1aI0Pp*X3B(E_IqC`B@`tl#Zow(+<5;>P?w*i@P)XqqRTe! zo~^lDgCtT5=bTn{r0h(@o$5LD?_xszbJ3*6zbBi725R0;eKBruaByd5hg}C{2n}*X z^K&6qUuoDNbq-=}r#q@789UV=b(UdTt2*GBN^#)kTk2AQm>EiHIONfOy!F(n+q*e%h1zfYEx6sU$Rt*SsLq4U zcta$^;`?_w5dQq>iJy}>P&1e&Qe)*v_xZ2=dViCG@Syn?6UM6M%PCXa@3isw3j*&} z52F9G&(`|-biX<8HZbe8HbkC$a&}gyL{^N+a@$=4hmF_f==D=cUcBLGq|npT6S>A{ zp8%T0Z-&Bl)2=Styo6DguWFi2HVX0CEDaR*j8?i|=gu=2E#+;FaIm+tn;XoXqA>Vs z40X;&>J*W@quQpmkhO55h_s!UkQ>O3Kb$q-+3nNo9ISXS#swov&_oCD$B$6*vMvwh z(|hhO>~i1{Mt)xQ@WY544Yp%igsoGzu19Q)Wr-a#343ZlMA7=>6D77>b}BzSr34pf zm$xu{X$OA?77n+QDfDWWJ8gt2^xt(cAoJ(9rLQz=k+7-QRPcw6vSr^N&qVVuDP%=V zdF~jw8h;PF!cGYuGY4W(J>3>R+m})6LnbveM3ikXUSiP`i-L=>e?=5HlA`gS9~44# zbkMHb0b#P}+~$3D)Tvz)Xqmo~M%H_{QFg}ar%ly{SVJq{6q1D3 zoj`1vneBSr*~xTy29&&a9K_0>PVm^+korTBp`ouv-pawJdKAok<@oOn4x}nMAM6w@ zjR}5MzYGx^?C3z(*Vot9szHQ8=$#!MH7+voXcSSRecmlnT!#9I4riz1BXu!xNrdMlPM_Wx8VV1tI!6)EJ6bW-a`V-zSEhFQ`b&bY zs4@iHy9>fIQYwM;$_(~?RfE9u?MJkh2w(N^RfyL|C(BUnetPQQu+*E@*O$>$Qc5)6 z_vx96n)NMWs{Y{74COlW)$d)OSbOh@RO|<)LBspTZE_)vwCE;*aE=F;g{+cf@?5nUq+1EpyBpX;QRn- zSQVf3VkqYa^EivU&%UEA)iSktg*-j-Ck>n?SW2K7R_r==bH%)`OlMp3qExc=+bUcm zg?urMMf)3b%SRhMkwrGN_nv!i(qph8&ZuzB&=7X2@j#>AXla#AWls)d#w7P^ z6eh?^wmH!jv+sH2 ztWo7Ly0XY)PH4gu1h&s_t1;hq&$u zo3WWTSxj+grHyYw7hfQ!K$qCfbeaSbnwrywmAi56?7PbNjA3M_KPAzltVfG&#wGWkyRFV?<&Q;{tuI%3Cbyp@wQ-s4wAFi-uGM3ZIX{>iE`1cL@JZ;<{_T=> zhPoV&r$a_AL{id~pCZs0I@J;xs@}Yc?^VwGd25N+=IFKVnNoxL=Ee`oGr>1kJ&_4) z===9|<=pq@t@X;}D`dw=&Z6;|ZP8paM+vJeM^llb-Mk%rf{NL9{bMZiA8$@kHOHRi zB~f(Lp>$-;s2VqhDn3Ug26;Q$*pKfjRU8E>=t#W;5YT51=R(%H!Z4yR zGpQ*y=_6_VD6{GL!2s{*>}Y6HzV#pQw0 zaXw15O$?fsJ1^dpQQmD|_}H1NC?j%`X=u_jhWA%M@tn59{H?&oVht1uq6EY zfmnR3uxBy9S`)kifw2q;uA1)*+Rf1M#=M=2B1^8Wt=()6hgPUj*U$+2mS+Qs7YSs_ZN@-achZgq|FrcbKlm;WZ@EngChz@FI z4U^g0AsKy{)d+z9UAUy2?YR7wj433E3meUy*L7?E?(MFz&Ew`Nl8%9mSLRrcQqF&# ziL(M3!i^=;qq^L=mXbF;;|Nw~9^a-yYtyJxYq>l&-`Aynk=uz^9XWL-6DvGAG6JN+<% z|4hSV6~^=!DLLeZ!jBCxhC>zZE48c;i2(8=;b2AH8g33E6;hzk*3J$PDSw~;%nci? z>f3gs#|-8ct7X(rHS;&=HZ|6KN{_xh0f&}xuW1x@9`t4Y7!bD^8xelo8Y3#&w}`jmQ$iz!#S~F#d^~AXbhuT3DA*MXt>JzVaz0iD|9&ca9aDBnU%knhS% zP??-T8M?h_ z+=J@k#fiB&3mcp91kqPtzdm$`0+_Ji?j#n~=e6v8f7_^Lx#~D83u;n-URYQdP%642a6R*(mckpLg$62jF0U z5)yx50{#JGKr_}-h+La$F(`NHdir8zWu?}eG$S-m7C^^+4uQ17@`*nItfw-dZ39q= zW&!3rM4-U9&ex`L_pz?-$o9BrHplfh$+X5+R#qk^H(O-%^|k>tbw}W5!&gL(8?wkn zbU=RoI-uk}>Z}271C2&!gf;}y#tS%D4rJ+#j*L8RegA=6KtN!=Ss`HWG_HCCea3Sv ze#i_%TlA)O^=Ilh$Os)Dth;Q^U*m9I8sXQ_&|sjYq%1Z3LI7)+htF)ho3r&~QIaAV zT7##Yj-zO)GR%QI2OP|j1AT1_sDw&TNPsc@_19n4LwQp(-j5goR?l(WY{{-ekcq?o zN4@{J$3Fb%VE3bh^e#ZSM@B~@IrR2^1PV9nM!^QMASe@el!|fa8#GB10q0d1(e-h- z_e1XTA!BHe75bujwm#(2rSDrofZog0D^o`@L@N1RVFwU7oBHQLkVOG5oR(jCu-2lU zsSTc^y{Sox8(n16fcV^m0PSo{mC^f?z{hi_s+6v)XkQ}+`0AhhtAog*=#NP3pJA0L9x5xskd5&mBL#iRuN z6!e$ZIZ>w!pFm6J4l|cYrKvZ@B$xg5?`u;hyr$>Z$EWQ*{l!P4ukwy#7TmKM(aWMsXiZCdLj;QVIhZd!r}H-2ZM*~GL%KP z1=#+0mIlY;W;&Oa^PeYX$|Bkba(N)4}e#2_DT2idVXqY>nu#4HrO% zg$CPGvnAYf4qp+~gx;GSEq0W76!I!uEaFsTZQ#M(ZUVSD=wo^`JQ+t9h;H6r7pA`q z56Qni^Pv50Y-IAj!pR+C?!vW8Y?C*vK{q+`z(Wv-mCCa^{{%vu*v+VvmjsrC#vHHL686V3vL;#%$y<#jK|D8R%yu=*fmI&lAtxtI=LSHdF93iyDJe6=c2Q)t*d%w5pD4$f zlG|I@W?Dpah=fAjCl4~WAn8HHf;+j{P_(( z{e-Y_e^LyAdovRb%F`tslg#;JG%GNl=lM=y!D`8v$)Xz;rvX;c2aqO~7ia!}qE0UN z4_5UHmcv2ZC(-l-wgp!30!^ZHHV5*N+z^TdWkDRq{%D9q)xLRyx$uG#jRjr-CXD^@ z1o*bNo8UetiQwd_XJ6;!*8{QKdk^1iLvfG`tc4i1i?p&{{9J1^8twF&+~l3$=1Y^I~2;Es*WGGLX= zO-&zV9|9&=uhgD}72P#>&94x7Kxtp|Cx|Fd!*w}1K{Akrj7k1Z-*Ft5iC27lJSbXa z$I2XSYF?iKIl<1(ZlNz@z@LgQtgbzl|EBzRpa+1ymnGpm zmi2r2>J;n zW8;UJR|+MP--V=89_A~x6lq^Nu{!`5Jxr)aBsihv0jFs_UI~}J-5xKTE&mrGAyUX) zyU74V9?QOTuMB`D@9yqi!z9Gymu|H`+1@`qJTx&j1_ZdFeb$!HEF>)rqhIN!)ihr2 z+|$(5l!|$uE?H#OI;2*SE(>u0`8p*$J>`%-#tZXKNJyJ~n`DE=<4JKy{L{xT%c`I_ z69l;w>#LgUgYAd=+kH}V;|oFzW`KbYqZM*12Bn9!wY9V}(3|)pI^{Cs3{0Ss0n|Qk z--5E>@AKzJ%AM_SQ)O4ECy4IK{5?6trHP3@@|68STg>Abofd)SAD+7#;GaqS=S|6N z(4c9RR8M7hHud){YW;+dzS# zx)#Z4(2thldJW8TZ)a!HlJ?Gx)03d{x4349P_r>okz}tl1^S6{@Ab&~dxLUq2jepL zekcU;Z3+cy$YpmmNC=ne;jkzyEyVr`BVvRONMj}hGM?@U;12b5eiZrn`J^=WpRk@E z{;ZW>Q(LQ|q5_Dk_00hT$Ax}5mdHL(pRg^&3A(EHs|~CHrumJXu3!`jmdQDHY}&ze^=n@x2|UqqR(O5+E)wUA$BrpKL2e?(bMxGSXcfFl(K*K0vGy->HPc-Gby8;^Z3b2y|?KCXQrYXT<;e; zJ&b?f6Z)G&!X_FpJWFW$JpG_I{UG0dv6?rbl-Q1g5*Q$k2st?d4*e+LlN`f1d{>Fc z?mUJ7mz+ShdvXorM>HuWag5D}C zUi#0CIBvmGo-Pugb6v2>^=FX7-R9SGpRC)jw0-i4ofX{7fFYmE5CEF6&rT;TdgOLb zCY*b{eDXsPB)s@bk)^}XX>3*p^9`y8Ky%?eOyFQfbD8lHT0$Kt@9rA5dp~?xkG&A? zX@R!jW{6^n95PC{t(UDeyZLgnYlO{#@{h*l8|-#4rUp?erkOP#gLuykeOI9X60IE( zvOh?)D@IV_(mS)<$ujsu0U_5MZAU^O@vy-{BQB6qmCyZJE37dh9~2^`=0fkEy6+9S z`uGV8k7w+VwG(ukb;*n^48CXElNR|&+Nf^+viA~g`mIhAeNqM<)8!yvUe@S5h!k17 zEc1-MJbV1_L|lLsi2$*co10sCMFqpnP^@eV0BP9#c9lKqX&hSeIp|ypdE7fQBSPqZJM~0yl9sAsa2HQ<{ffTtEr>?%ZGk4 zVsq<9>Sxu$ng+ZAHdj{F=A+hk=YurM#Sn$`0e9W`;Nc@4 zqf#S5FSWTd?*w+6n$B@n@}_>YnX={J$R@c4PiIw;Oe*t9>XjKKa{Hz>G(J>U=rCiw z$})~46M9XjrNf?l z`?KSqUyngp>mZp4Bo^{X_x+9Cte#S@%`-hYNFQ^?i1nQogaBgkeN4q*7@;-shULuXKSYQ0E$I@)tH|$VfmnnS@VLdd_ z122s0Z91r& zTaaMcL0_DMeJ`Xlndd#HlY@D#x*15KH@xjahV}oq%792CPry7iGJ=Xzz0kfK(toJ! zO?q~KZQ~??3*7s$w9FAD#I^^rK!of*7Y4~DP<3aYp@~THbQR5UX%o|sas%(|h)9UG z$I*dvVWwqo>NAjo7&sl#&!6Kp#R@p2ONX<9BCKq3%4T!EFI-JEBe>3fK)L1B%a`); zLYb47VaiKE?prh zuUq_s&A*{zBc0m}Dx{Xqk|;Z@Hb)b^baBr5qpg3moe(;6Qh3C5BtL}qXX%ag)9O7q z)1Gv}IGM*G-}V4|fn7n5`;+GSeu=Xwy0PDx8FeqWSO9(N}sYJ1?27ZlNf?wpO&AH}*n-&{88yBMYrA(SUdjn7TVw{j%J2babE;@5_g1-Qwj{ zYl^@3>;XH_x*}X$N`n=aYYErt5cZL)Y04O6Q~nQaZFcTCKW`Bw5~-V12BH9dt%X0vMXWvSk*b=bVCl;%?X5<}#pj4StWS{R0Q z1igdrd~cE~S)n4MRbeXcVs*|eTbJ0@c_kV{l{h22O$W^VlKe@i!o2PWfYKFI(ZM$*)AMOu7jxggzZ#Xn#O^8N1`aTfuh_VtCBh!hYZW2u`}Zh-Hx>Y{ z3P73{!vXJGgGy)_Jls8wDE7tTob{1ZTW$?QR8_jWa$bW0F)2mLv)`AvTzlkI)PU0+ z0N<-iaVs)Jzo^fQ-(9Fcii*)%b?eR#xZ7aIwq2(jPCU?u{#xZ7m_2^LqNRj@)$d_G zU<5#B07wy5zLS8ZC7fV0)e_hQExP;C3EqTBsr43nY_S2M3I_Cg~RC(pP z6k%7R>MzB-spqh>f=%(b=JB(yi7ScQ?w5CkyaLi8?_|H<#(s%y2yk6?E-OiW=I-Jf z{P-}NCPmVJpX!-9^JI-?_%!-O4eF#QAWIerSq!<9mC(4prjg|~mA%HoTu^RGpE=Jb zw%559>6Y8bW>78e&=$mGK)v2fB^>h%+V6$>fg4F)(=7KycBANvF#z~T-j#IN`p zwrK7wcAD`18WPqbo?|p8mT~jX+&GJdHBoY@aq{4#7>{S38%6o_%2(||WcuDomVR=o>!3%5; z=KdzoWV_6&dID77YC^*-%v(C$q8pQM@8uhA(gLy_SEHTCgPuEhz~bH3n%wudl7V(B zIoa&-UVS$(EY95PcT`-3Kd znFduJ059<-fn1s>0b(+T;eTT{fNNzwyz5s8NeuO}@t(;vH%OMgtTp}BhwKW&CBhl{ zLHV)UP(o)3Y zAkh8zeHwn44Y74ddaTaObZBtAr%~r!4mM11TNG!dnSoNVqP(1*$6;PI4KZ9`5wZ>1 z(!D}H%XGp!^>}R-Vu^`wDHZa3FrdA5Uu$9l7_)+mOzl6CE&%O){P@u{6Vy(hQc|i` z>#0I0Cc4z3&EuRId3iTC=6XVcn?OXr(moiF8n9vDh#3xFMwuLi!lZ@n-rZW6s0{%) zO5fnukK%qy<5eJa-ihuGR%1-&7E_Z3U259Ex4I2s3`n_xV`BHsj_Ri|ZZQJ$-t+kI@Sg zlyQ?cwPpO-kNPG+z1Lav=t?oIT&Fxk3*hDp75GR*X>mzjDNHxzJF18D@+U<050JGP2WKT0Rh1+OW4ds zwFWnI6TD;04ZXUy7Sa?J76x>m*Q{3?gKsJ;rzta-b=TF2Gji&cnijAEh9D|T4eWz3 z-JwUny?E0;-n1v9xwq6MMlqf8zEe(@=y{AuM_+KX%0%M*CPuFW)yvnev4l2X4_pBr zVu_uZ_cqY!fnU)cB&q~OxcOvbaI-6PrXyZs4DI9V+wE#I1FRxNA>Lza$lS~pAeha% zQwEEm&?;o^%RP%}ZC&i7}sNv4y7_@QPYRoh8W<9nGcN%VY%@<(#j zEL|;sWkb#IG_a&}YQ$G>SC5Yy9BN%%N7HLaTFSqXK1~ql{X~-Gw(j)N;XcqRI6i%< zSg$#mJ(>K_eL>>pmJxev*|$CqJ$JaH#H!RzqXL~^{2anoCIVGDw9>Ip;hUp z6sd(#Ajh1u1liMjjsdA|g+1UO)LXF|mXa#rP#llVm%nnR2-m{h%TTbZ zNQ0g<=b7-Ix#ziYKiqOCk3mkf>fC?Zb%4>|LQlZ?|F|(g0TRXmxDV3Y?<9jmxqrJN zRww`Gy}`G2#QZUL2>y5;kWU#J5jodkDaFGiHZ4ZD&JpVb;EOk}B&uX~>8=CnlY%0W zDc;StF^CROLT#C;*q8uXf{yUyS@!3Bpfq|6b@Ma9<+`^b{c_q*-sO5&)7_*8l>4>aX)FKDPAaMp*s`4>N zB{_`$$?aSg*bO^3n9_H_Ru==eM_}`eL?8`3-3tOm+_CJP1@Xb9FFO6pQy3Etkv^>V zrvkj>Ibveo0!ZSychvlOa5O54)Ok7zut@*&;>Nc?s*$dg@TX)_h*Mnyn^fdPM4n3! zgCI7RV^q6wR;~{afdE!jUjqQEy{#=b_Og!-!R{L6wLCg-!Cdlx7Zjjuxd-gNldY+- zabe3fsRgKRM-OGi*8kDh- zO2s8U&38a6AtIh>S zTc-t%Z*?jGFOp6L@ex<3<0S2Ge3lFYLJ$Q93UsNs_VS-OK@vi{FPGO!XXsy)0Sl&KzA=CcxO42^H^SDN}|oSa1mR5Mz6K^NJnK-E>wX zH5X~B5o;yObu0}CY>dNEnqc~8bgFwl9}{ohUvEzlf@2T*J;MOI_}qX{#Xi|%_3xwc zp2;QHp!X~A;h}R`vG~SLFFwwQ21%Y1I>fN5rgjpvCRn94-ytC9UwZ+#IBZ{YQkvks zw}Rp+E?y_#{T=f~NLu3r%chR8aR6YEePFk>SBee%x$Y&+WbF&&Hw0ZaE}(sYPQ1$F z5EPuCrA^F z=yTiu^rt2p@$SC?%#r>tJH;BHNie&z?_KPb{%KypVuJ4U_wNBufzk_za>~lefbNFR zviAw#eZ^;)J|P%w3d2uJ{>d3Lj3Q*kSsuyAtZ!|_3AwvSNJs#L`o$@tQu{e&Ra9aw z&!BuRHoAqK#-<67q_LI(wq-T+zUc#aEGTlLmET0JN>3c6C(P{412ZLeS}{sek?4+fMOQ3bHTw~Wsb`@THE-ygdH1S z4T(`IpJbyzqKLWl4g~OwP*G7)PIBCszUVOlMB(cKivlx&*|F)$d{ z#hm&h5(CM^<>7$q701|`Jyr3`*jOcKo<4bepB}4l<>KN3ivZMeAn526StFIPuTKEp zj~*IQOH&rVqB!6i5D+2Y@Z-ZZJ;2!BqzwrQy1=kC49d3Nwb!pt4-vDFl8}JT3Ru@a z5gs^gwE&#Rcb`v=?ul5dPwHUtmwXOMoD%M2<0p8iLVwI#%vX)S_ftDs>_`G?aS07a zM^(RsHK>LWR3lmy&Y6r!6 zh?DRH=KYX0A?zD$9cs6QTuunzZi; zk~z#w_?+AWsxh(wuF@m2k?&aLMqRM#B9M28XE*BIT7uQFiVn|e$5J&o1Zgfbnk6W_5F{-H5 zk3ITmS|4|H;oIWvsT%SoM`SzwMbJCdWJ+PBjUO|CxO_UeXimM8-0R^aS@8hgrWm4&^Jrh*i!Lqx;N1HjcP`j5B zVW@ecNns|SV*Kk(?N)7l-oCr)YP<++czkp#%Saa0 z+1UxAHpu)xzzTxuO|iwmWiC0CNnxzSP7>BIFrdM*v9ST(x1vVn-2Yi?Vq!x1hMzll zKgnpR16$L7tj!QAPV++B(RNya=an`QmXb+c{ZncZwzln9X`@q*Fdlx`q;Q`{X>EDg zWqfBDLw!~K_|2YtmOOAtjWb!sJFZY$SK>2gl#H|4HFHGR6&^iG)hk1V5fKrAg{ zk^qI3uZFaCuDh8x=_NgFT>kh&rh-*ClEXn~)j;Xxf{3U8LyPeu#0q*0>DJlJIKN2_ z*FTE8TBfVp!fDYi3wOt{x;Yf%fyDLN)VS z(kJ{v5GmZa=fWBzmL)-{w2q^-DlysH2zA(i3MdSIsyP57^-F|lEBM}SR;ShsbTFXh z!Gcuay#%1S0os{>lB%Nh?Jv~7^gkyJbRF1qX$X7iM28YaAfcC$)sAK1GEZbmU+&H&~aShNx}{6eugA6Q8Er51u_Po4|}@6oXd@ zs7J)QSfqdFVWmXfj3PXDMKx2qud9oTp8lbAp6|z0=Fs1vJ-CU-yxo4TyKZ!Jw6CvF zB}E?a9xBl;02l|dFy+8rl+&tlt;>;mpO`}z*;R+N&`=6}H!DT)@#ZOAU400*c2u&l zs)v1x0hK_1ZG*;^I%B9ZrvvTE3k@?*bBcj}WEmiOy$gQT0N-(@9>5^U@^lq(?U~c- zw33TtH!T50&1$CFTaqeq4C`IXi~#WdBgX&2Vbr0)Zoh<}|C{79%obFWpu1dLTzvQL z9Vi#6AZVow<$8qdHJQK{Vl-CWZe#uK&%EU;R^}ImT}+-p<%aK3e6Ve_vQD>Hp zmP+!FfCOTLJFHL3foPJ5vIFvr#I~65Z~*G0O#aq)CZzDWgi5P{9z-R1xRtnRSRv11 zP#(5MS6KcZDa8pmgcG1)%~>-DQ|xr@8d}M$zP(F+Ta=SZCq{QDK#9LAAGyE1oDu3x zN^3n@S{M@(lULH6EWW%O&kpw-DoSqf z09oY5>Z%fBxQ07`*TY0UGad~N9TS%?@_(ZAv+0}nGJ!U`-m~N`%!(saOQ*>(+!w{Z z>H?>=IH??O0cM?+7Bl}Nz24MMfMV07W+5G8?>n!WAjO6d zEl~G<&w0P^ALBn`oUuiO^~8P8dChBH^SX0meJM&l)^;-1gqXt&84Oi-LUXaHDFaX@ zfR5=eP?|8>J$y)rtxc4VfXf!MVv>$(dhCJAytLhpu`!Vz9!7L!yw%d%%=9o~%VxC# zmuQQ6;9@bQ$~}0EkDT20w8G4EueAJh{NM!!hwNriwXb@ur)jWT> z`XI5H@nd;J70J(P6-&mVu;Grh7R7&HW3+<12cJYgN;>PPSe`|gEF0J0XmW+(A3i@~ z+1S+alq`NYv;53_guE0r;kWmKN^2<|xKy5@G4!csW%g(n(NnXDu3__qv}bF&Cj@0Cws$U(yg2zqv`JZ z`@;)w6G`d?PED(Q9ZLD}bXN+AE3hHRGwfK1O1T|d?&0xX-4wld1esUI#xVHrwY>_3 zEdHbYnQ70Pg7r6|d{>!@Pf-7}fCg$2t#{6(Df{y+xgD^vc=_j3OshlLTkSa?VI%r) z;Exni&`sJM19(JihUHv9UH50X0!vXAJM?f;>9^njaPeNXg6nZ$83SZZ8cfvH?!Mm}{|L_QhbYsxW&_Rr?eh^WMoiWf{V zuv|a+-wQ6gon2-^7IHPa$`t3PdwG{bE?9Y z`G(K)cUmVVfn@v9kV&me77^9=m)5Dpnqv@VX=t5?EC(z>hRMr-^sk!!hVK>4Ey)*|Cfsd z38JnsXAs`_#KjC{#q@|JzxYtRu|>a=Z2hhBUfd}^lLo~M5 z)*!1Pi@-o;zqP<19r0Nsk|LEnW?M~qwU2m(wqWD6?_@`tw5KUQY;oX^$iJ$UIKoeH zB4K@(v5f?&uwQSnL#{qFDm7TPN7*dFA+JBrq{`69Q z#G28eU|ad{=$SKT^uN#c=Gq!1pNyDB9MWJkVqF(zD!#mVbtXZ68&qAT>l5KnH2_%$ zU8-XH`B-^2({n!eK)%H!fPg@Y)LdV6WSO+d2>5{pD>Ptx1o*V& zpPxYfFwxV5Q%FR6N==r zx+CPw4qfyp${X$R^6o8hc7@O-Xtrx@!;-KOa4}U%XsUo$!nS^|@|l5=k(pxrgF`j~ zX3zLddWc5$e=t%;9!Q+Gjwy{~V0a%*rHk@vI^Z}M0?x>AF`meyhGhY<;xR5Z$sJ*- zK|%c)n7ByCzUngm0Hqnt_i*YpC*$eRH2ATvJ<>iYi;0E-_;EYWGUemz5LN90w9TiR zKm;dS7dSM6jE+Ew2{#PiUo;*B$r4`6Z5jv4+MC!JUJHTI%v) z+1EV|O&Grl1wL6b4wD5PO=ws`OIn&yB`^;BQ90Gp)$>;eon)*+Diq<4A15Xzj=62G z8)2BQj}lJUcM+O_eHW-K1}-xzjrSDarY!Fe3Sr{S7rwo!)(d()UYlt}m2M#}8};Kj zE(dzd0N?QQ$9te8j!RnrHOU>Mps&MwoY8z2Mr%R?UVZ&?UXjXwl|^r09B>41-qYK; zYTszU-$u#G_E^8%hi{zq5s7^ZSp=E^481F}y>XHhE`Cz{G*ncq>pPp1!VA2NjEpoE z3^kq0^jn*oCoO`6vcIPRD}Gx<YWv#I^uTh(hAJXF{tZQi|dDxRZ}C2M!!i?dxfZOY*sGr{-u#@(U*cxV+!uv#EIaxdVNxhJ7f#&Gqag)BhWXk}28`N25=2YkP z1s)zA>pGx$RGy$m$H(dX;j%%JaQVw?uH*p46h*PnhYudS5wrbE+R8z}=(@=?bX=!| zY78hwNx0GVj!fM0USL1m&xqfDn$P;( z!l2K??$=bHPbKWL3$vMDS9Yc;*R7j7PHdqJ=0t&v#-Zg-wl_3ZpFz zR)V-Bg=EZ^A5q98r|zfcAiEY7tqJ2cdwG1SN+0Yt2O<7yfst+~GRSRP1vj4BJf-0? zy&0M*Y+%f{l`C*|t7&*xhtCXT+WdA57wasa+}i$d5s6Q`y4G{zk6_`_ZN^GRDJ8zO zqOa;keY&sT3~LOgM4QtI-af&NLBgc;*NO@(5jIwzM8r$-2P+1QLo3>U@^H`lP*61@ zK>%4`3OVM9Jz#(pQDA5LKfVDwm~PT<)aEeKx%7a!_1<5Cy)Tk`95$9E#kOOx)@|6t z4ZvyNfB$NGQ+TE6Alu6s%bp|WB}NX4Sbo4i!Pt60VUw#o)|)&wM*UNZh+borDRErF zJU2RDt8#{J9$(J7yc%s%$}1-~!fL9e@J`I%PsCKR#O$O9-QNTk>y(~2?_9+=w+}=b zTvrVmm|So49SXgCc`|};a<+Hz3V%)bj+=4Z2u9nF{&i#|6*cuBalvrbPPb|OfHh@& zJo7s_Mt=1oL+3+)8kJA5z3X%IwJw-%le;RLyY3HKNlVxR5Z>buE+c?o3)<> z-L5|HLg@$~X|e*Ut7PcPBfxM_9rA}r2Ida{R(^I(-h&D9h1J!}j0|Nx7`E!TucQ4s%Ui3-yiw(!jsxD3ECKtV$Xmf41{~D_}2xlX2%ll&lG%_O>Zey za99>jTv%SFf_fcRQMmHCK6xr3=PMU48bjh_b+K#IBJCk!hAt#7biDt7>FDeP)7Ono z745(>;?iX|TOVKFp&SD*Vm3y`2R1g$Y;4LX7#~Xtj8-m))Q}W6Y=d%+lc1@&Ir#J< zC|;sMLZ*h%?RWDhKcnLgAkNL;R(gzD;Jry<>OAb<(7XkpV$D2@a2*h2-veYz3_a`* zijd={IIiJ)96ILb@QOlPLE*Aq^(&}IYZa}a{W#qq#;#=myny0}WWZ+U2A znT?!YDh!0OoHHa+{y*jHpw}E18#_h@wlL;BI`b=(PNMYdVcmXWWc1%u%}-K#nfe6` zYKHSBRlQsKekc5CV4YkhxSyp22L}h%aWxOn4hiq9H@DFVZT`4PY1R{T{P}Vd8Vt(7 z!nhFe$Mp*kOK`Nakf;hPxbSNWdSI@UAxh1yd0)2x_Xc=Mr{k*G)pBS-X$eYDAfZpe zfaX=BHZPSz*DXg`*%mO5VEziyJ8x>ipWS8K`A^1)LkDZ+2tW`e3;{kTX8|%BpwuuP zi?FKDjQ3CST0<`ziAzYp08Dz6|LfPUgM%3qN`M>zNk5$sAD;sVX@KjuUKp$OpKJ(? zixP$g-Q&mjGdpXn{MjB%K#Roa;cJIU%es>otT)Ml!?6BIZ|6- zE$W#6uCaoas^oTdd=kCbw}E^u4|Rw0F(nr)$ds8x382Hua`*j@L8rO=dFRD2XOJX) zq5&Y?lSEd!9J?2@px$YZ^>`uK1#vGW zC8cy{Rf3!IjXcrQr%z$iND|bqMqIUN^Xgux*WO+TRMC3#u_fjvjCTgU7zV`B;Sds( zlfxO;YW%>pD^Z4KIl3Xgy|JR=2HjmbxtQ(Itw_5;2V`{SuA<^en2L;Y1BZzj>GMn9 z2Q-|tt;3IL4aVF&96ueV!w;|A@7pM-g3~3Jtj2woOlv=(mfBi*Ub5ce789S#Sf=Q> z-QQ|>FWy>F-<6%GZFt?0n-RlY(cVLW9^bDUpPM|7OoCKvl8?v$;okZb3>NQfptAJJw5&W`Ex`-%Xf}} z!RXWT6M)pk6QH+FHfhvXOc8Oc{-J`?1rVf76D%(;AM5j}b*Z)V2p+1-jr2HtoSv3; z?dMO>;m!>dbz|S6a>RAYWGT^1IdjGoF?q*A-jwT&5*dvZ;^UvS`4$0sc4ZV8ZaI5# zuX_q_#z4SAXrDM!WU&V}C6HTfv6&~UdO>>>%NHXoczdisgWS%1|CpQ%2++aCy3@_D zOQ=8llvRb`>X!?bX~(-|5z}$VnXYSndc+>Or?6IHo8dgO2 z=_>R&wept{G^3W-qPa}k;Jv0Lvb49ne0hZKuBxiBUzj9PgixJ2WjwzEuf{x&3d85< ztt%nd$lW=jsbMbb|86ORyS5^zt`=e(K480ig23cYF!8IfB;(NAP`&qty6XrgB4y|P z=gpiaA-Cn*7x@1Dd*Q8bGDAbo`8rEet<%g0rky3&2cHB4oCMwxmXc_NpAM+%x-v9V zsRL`fV9F#lHMJ@e401a~KIM1xMrAp#Tbo?qs|o5BpmkyNy{o05U;}291SIrabtp0x z)wiYMmjF*ZJxb1FzYg(!{PvjiIB{Vxd+Az*vlKccBm_3_!omWK0IsdsbGt$c;fxH7 zt?Rf9Bfuk=epk^6g&*`iK3Z`-@``ZV-}`hu!f4~|(#6Pll$)AeS8_oW(eGO7fO6ZK z(T-J`b*L24wLr z1fWFG7$^ZDKA3xW-;UE~kiX{=q^6?6AU5DI83{T*Sm4t)?(_6XD=Bq?KMVPN5`v_% zqdBFoG8^c@IfmT;IZ~h_e0`_jg2I!jDPx0kY2c2)?FzM>{C;8@QqsV{K!f=ei?TSw z>_1{s)gfeys{6B;r$xXF0HXv^pM7c7>^1~nc#V3Hn$R6ttTcZ!M@p&rOR>3utT^8+ zU#-k+Hz48mX)2gZAbXZ01n)g3B+x0?m#O-VOrQxv-~-js{(A>u?4$g^$3PgSmh0B4 zM5+GaKyi;_-+k(%@$pQ&D-58wVn?i6WADQTLi6s1;%@hGCNi%HkI zsUdB9s&0>TuHi(UE;i!NdWeZN&F_~gypLKxc|tYTP`7s zTEX`;il2!KCKdrnH@(|vKyJJ887A*6IcLs0U5y8$3desv3G*Il2|4Rw)vhd~_JKI1 z{p&M5d_}MH>ZQl#-yGPLF(B98C$OB)>%(%l>@tV+iKqq|shjq=TUM_@sRC<+TuI{7 zLnO;`GLD{8RF59~bsV~V$ZfcnSa|w8#bkj)2$H`Rk|vv1xA4Cr#3d<~{Xu?xL>)@8 zZn2avAGNq#ud;6c`$g*TOB=w^6WC{tyf7byULUZ+*vcqx?prRV!Fzk@^ItEJXv=E0 zn z^OwoN2U#ETWZJ9EMI}J^1@MappulEKa&p;*phkaR*OuvgH1d&&6L#KT0audttChKy zyz+-%s5c0Qry%5V9$i0>2rZzKhXhrsKio5(x#g2?VNqW#u)>4Kb!ffg9giQVX-Crw z1|0zgfciIN6^mMR_$_KtFqY~v{BhrjD7uno^d<9$(dc~E@2ell@Qeu;k{!}7rD>OG zDKfwj2hCA}FqcHI=nAVc($GT*`gO{TS^EzjJa}FJDqzRt#gC*r{$B$$`fl!wrxl>{ z`b#$UVXr;CIt#c-gkex*U}m1lZ5GnQZluVG2oi;K1Vk5b85kH;RaK1$WELK`*Bcs& zu=ZEC?b6Vgyfn;(y@y#7Lbz#?W^nA15wQgz^BLejc57kpA`i1LSZftwZS!42@6JBs zS=E{^RM<@naTL21kWGz^saVVmf5!bcEAn&5Y+z6a5|iupPo+2Pk4nMrnE*=cRxIf zWC0u~@bG4YVZW{uU;{$k4gBk76*03>k}8G;*k} z2DSJ2oVt-ra?Vbq9$i>_ED(>4a}a@TzsnHVgH3#cM5j20AxMH`j_u;bi_FY%>R8Sh zc>s}6Jz+AZG4m$H`LDtXBD}?=oRGE+9E1Y7Ell|oW;}xQ zu(*z&xpKBb;qKjr?!R}zwEH4hu(du=?Y|Tq1RR42oa3SGJbz0gI}$?uvD2Jm=zhyZ zVAI@Z(Gi#fhYw4vO|`~JNhmn2vl7uaUU_6$SO`;O$R~hS6>T*H-k=cK0SJFMfc=Ew z_qZ@=)9-U&<@-QtgLK~~XSpxcjg1(njp~bPK?18^aQ&;v!fbopQ}nxJ!AY{iB=DY1 zZjNFAUlzim16?WbcJ-41Y>%)3ZoiKi|8Nd^almdU{chpHVd1q$!NDr__G@4#LVHO) z+F{fiF{cp_6da!dE<4XtQT>kXCmF~d9e2@SR&rrqp%mRV90$@YuqdgcqXQk=8gELw zuN4RBv=UfGPQhseyz4POn0wp_*=oiwUc7j-24G91cWZ6bH}K$U!)3oEFbUC@4}ly? zPz)6w@nE)|=M&gJDJpXG=_c_)tP^Igg$}0T?*WxsYfq2nLoF=U4Tp$i$rF1OO&o;N z2>RIF)+ORvlR8oY--UfKov_u_+CdK`dlS?p4$s%n%(?p4P&-MY8Z$fj8P1+XMr-h4 z^1)9Z7=6}zU~VTid{kA)e&5|EZwpjrmF<>^_elD4mKHDXeJaZ#SnUy~)wI|91 zGp>PoJnOv^)ah-nD7dFVcn{MK0q(P5K^tbj?6X^QM`W+SZ-y zW|dDU__vuoO^k9BuPEEXSONM=r!|N_NNLW!ctc!pb8F76yX$7$_YU31^rXxbaTul? zGuQM%zT-k`Z)aVB`i3Q|G%zCO3foVc-5kJ+YT~`g{VN zS-Wl;8cu-u6uc@FotBahxZ~7Wg0d6jJyO(Xr|9ClkAM`GxYT()XE{m-ch3QhGcj^@ zrV@{#7u%O{-r#bX@>~QIv9;gfl&-P4IA0+qH<;!en*%_YZj_?3pPuw7L>e3udW|xR z%A@@4=KicFfo?wo9nf7yM~)s1)FcR@vE}M)4QB%@K*$Cf8^=5AHrId#pbyr_rZQMy z@-$yp(jduciQV+Wz?xA)v2Ob{I$q)9Q>$ne+DO}kfh^JlX&DKr$=A;7iQK=2fuqVu|-#_^f@aQs2~4P@#l^ zV-Dn<@QNS{T2uon_-J6kz{1iH6NUK_JFObZR)1G^UDRy+@U1$lXf6A%2Qg(zZkaom zb3cB(MnEHA{{v=-@*cEh2Hyfd5RWo_1!cxl@VABGU+jil`jn2CGhS0u3vGA^bsxKw zvWkikZ~vR}Pqf@XFZ2pR!iD0@GU|d*7?+QB$XmkOio69#(v>KS_C z7PilZdiw$n^K(*~bp&jYS%@AY@$Ue`VTzL{->KMAQ&VsKT=q{;Ij@qg9{-SaG5%m$ z-s2qc>#zzsm5wy{=fodO&kJLQ39HrAM~a#O!hRF(XN0F=XpJXqKNj|2FT~RXc_P=m3KoDX1r0hm zHTC!L@$u2o%Mf)y(@8I5K^d_OI*#}cANpbvx*PcwteAb%!<3uA%1&UZ1WMqWeVeE~J@K#<}>+0$@WCs2KIVX5w1PlNYGcqs`yY#6P7)LmBjEhbP z5Z;9gHz$U`)DG^vGhE5czb)LRUSCt_g;7()8~!lxQC{|MoVyc#%Og-PyGQ*~k*=N| z`xkxSKVaG;=ratE6vI?H@$1ik1Ca(?1u`QKQzPksFe;4e&BuA*-VU6I0kscLBg)+5 z6a5!N^A39#S@(EdfH3_-zXQFb$8pMW^}8S(54bGnD|s4jDfx-l%a?FF6bAgj9E!h> zjjgD!uaCumkDGL+;_W!#QKvhZVGJG;QCFl$c)(yp5j@xF#n*>qc*=mxbPSBsg=>C? z?yR{Yp8_F*U=|V|*eMd*ua5@x_~ORYJZ@Ez5*zkdu;XmbnV@ z@~$Gz$6j6vL}0}W{mvBti9nk|t6tgl9vw_4iRtI|`$#@EFx1x6bb)3rI500yf!Bn= zc{YRBFRW!{UsrL4(2|mnz~PyST3T#&CuwLhsWsstI1|&kV4V1Y;?^|td#W{_q{@O0 z0$pWNX5Fh(mvp~YJQd%pXD|B#YqO(6kswo}pvjmk<8EP_WBN2{^-G43FZo*c@}BeD zp#(dP&wOo?P@_Cu4>NbnHqhAp=Cj3#pa?rP7xO!-75FVtwb z#N)D>nvo5K1|w77zQIf}yc=nR%kPC#Ld&^(Da`m`_-5C?5#GFWXB>Fl`q?vTDJm+$ z$QyEVbVFdM)LM5$2mBDHpm)Y11Z z4s_7?fqpv){0yM2<@W>Ihyd=n?$0}u{G0U6;CXTgOb;NZPwWi&XkmKWo=d$)oYeL> zpqTJ|>4X{F*ivzC-$Js$*1$-BGFIyUy8OVrhNz|S?mqR?NQy$R$UaZR=-PTT<640$-aMUwg9kJJ~B+n}C)va+;`cD8k~p9>&! z0UaGq4Fu#7+pjHsHa*QHB+k6ox1j(b%8)09l=V2;&!jO^FszpJ_rt^?qe?F1JhHd( z>V*Px;cg91E>5d*MULK$Qv#B@Z3A%y8$zgSbw zz(-ZN7KRp)3&TF1N$nrHm(Y5L)eFM&Us@BM=G2FFC|l?8(~?+>g|J!o5#4-Ej8ie4 zoJm>_ji+8h~k69XlZqI zt}DGCQmC1!X@ETec;wwYzaJkTRAKQ^ZZIGU@$bc4U;Ye48ZR%eKoP}&cIYjEUk(KY z#WVmjIAmdA!FtBvPn^T48lK4LrXVl>>b4$mVaqSi=^Gl}zI99Ns$bu;W0WjGjkJtN zUlkg+F$Ye;@WC;+6mxbU~}DwTfRHfMY~%(YeAQp5mKOb38jv zlMyupN58=-a&>iea4!weDFWLYG8-{zX=*-eeQ0i(n0&+aF7-+X2?z)r92{cz!67yv zwwo{!w})v%AO*X;PMka$@aByi#LehvVQ|k8WN4+2H(~)9pa1s#iH6gKPr*@1L%4U- zCr_~D*(VquXakhta@+}f^t9>~Jmajm*+oW6Vx z3-5^8n3#?b5M*mT+wCuY9UnJnMFEyEnm?3Z1?Tsun3_%_4MhGIJdA>R(B1J}2&g41 zxrE>gbjKkNg5OcY6sj-t8X;{bsiKz3%E~d#os<+NkT}_Du@#vXm+Qz@5VusPF0MsH zM1W3bZEfwM@RYTkUBQ((a=q5=?-GsD&BrKS1>bjZDfy79^6FnrKNWTt3+VEhnY|@I z$2C?2_3hOa;2MRj!^GAlO#dph&AY3ep%((kce)tt4@Sk;db0KUjQCAA0bqAJ6o0Be z0NHG>V^_TJ_@%grhzQ{3{WMj#`+Qc;l;~c3ZZbPq!q36cqT4GeO_rLRlq6dL5Nuj* zo^H`-FQ2mJm+Ro;0wXT$c~5{BukWi@n^SSVb(RakWt-zL)2ju>E?3!U1Re7h2AjYX zl8jv|?Gw9t-W_jWZ*QfV@LErlzJuG(rL} z&Dcs_9WHxM+52?;6l&?g!k9PH!n9ZPC31h%g#A5J~&Er!FPUx7qi3|{o}moH)c zV=r*5=-kz->T1=jFp(?h2$e9bB}(tx$w>Rr3x`Yo@f?Xv`HNLw0hH7DpoYn>vzc?g zzqQbL+nIXpxPG5H^L`|VrJj@}=ppJs{Mb!w7kJ&)y4MOJ(7qI2Pk9i17~&=pw{Usp z!L<~xOy_wK`D)B+{Zvrvx9#c_NK#0TVmDJ=IkO%vygB?`l((QvraHgGg~p^p0ek!{ zE--yWo%st>dB$L)KFdfBIrI@5v><-jXP@fN-rv^9rmt@?|4PitEi>h+xitDx2sXYV zr13U6ZcftB6%T>z!UP1Nq1r*O_e^6>d7P|e=2jQcUrLMet{SFif#+%ji=uu|#$O}&@Rhn)^U)IHQMCr7jVu1M5s(FG3?7L3%wRAs!gzRjw47~7?TqaFYs>%(@Ot2>;~rNQ4RH39D^`aHPyK!RbT16B z54v7=`951|_~7VqqLSRe*J+g(&iC~7bU9KTI^z#Dp9RD`0R14Qg;?l(3^8kSA4TFA z|7oChtM^S@$HOy{78ALy*ax8%y3@ggXNvAu-ZlcQOB&%KYA;E2+od&-A2 zPFvU4)ghe>+Ayw0esuM~v47xxpsAkKdGUM-B)!cj!;G$XVkmo1*H~!feeNJG4jB96+5=SR zp&dge^ym^g*H-xmc3)r@t}5&fpQD!lqkRLd$v*}Iv7-gQ^5Wh<1~O*^_uD-(Y{c(l1)Ja)Y=*tQ%p#43 z$8ih7$YW+ly1zJ{&O1GzTM+!NFA0;EMD)izpgV@E2^BWOYmVbJkvM_d;^}()#P(O# zzgm`jaIklLLPWULIOG~9l5(+#pq`}I$sRg?S}5`{miLp1t6wH;!ehK*c6zq!%d|3R zx%D;(_1xls_>j8G+bTfC=zBmOfe>-D25cPDmKGKm(6Pcf1K@Jkou(FdMn3zZaG$*e z85GIjim+@p*_$lUed4%OkNuRS2T$yG@!{;kqeVYaa~+-h-zOE?e8-sQiS6&V`4fM` z%&e|ulBGV?MrcCzQFdo_0awYRxT(-F_Bw*tfOmHqR~6~Zp~WCFP{LGLqZr3 zQ-X)sh_< z+MF65SglH$^b}c=a&($W$h6*Jn0(hGH(Tk?lCU#o6(mAL?H9CO7e$6l1AwG2`aqpb zpemY_lEqbp(nr_H+UI(Unp0_3so447g2%kagg1+B}=O{ZnJI1QEuc(l_ z4#l%`VNUzHmox|WX2z6=-%AT4B;^m$1xShD_&ZCf)yb6klg9b=p-k(Lk}dxL^-3pxk96Hww^}v_{+ynXF}b){05GDEke1*F5PZY1 zKqQ=CurcTp9X$jJEXv|8S}zy?heI3b3%n$S{i1Ws+!|y?8jU*bh)!|M7Zy?;j})ul zbZ&Vrr{2H*_4Pe)-COl&pZI3wGi>ZEw{x!8eSTq)#_+rFKX`F z-g9k@D)|?BT`%pFjWfN-mZt@&K9+6l%fdE1N3W!w1l}l0+O4n5x2(vs$hUPeCYVOv zS7>{qhHkL3y|QQMip``X9p^o@$0{o9m@xCcX}{|{SFZv#1+!-`AWarUEL`9S*0|QG z(NUlzV@2p07@VA(QYWW?lewj-sRNAc#*Ym@Qb?3Gmfw5Q_4bD>Ke4RiMQ>y zKcs6ulel6t?WKAQxrr+=m>=df-=Z-;c0ixdjCvT2c|zyB6s}U*@H61*-ksb7)6p!d zNoT8F;8>36l;CM)R^L6C@#h0^*?AL5DeVSm=L}9qBxH8K)yye7M$W;^$GH-kfsTiPWp(N-H)VZ z_@v#o#@=E@J^@##@39Ej*MeQj2<}Jd4cy&Ph#5|4?2K15ebf#Tapxc_=wB;GeJc%<;Z zK74RVtwg~5m+!L>-EflS zSKMGV?6^B_u6VW2A4b4VlaWnKOmyYxhG>8P;_+hOXW;%=3p0MVI=qPE6Ph2>luECk zzi}9Qwsy+#6pfs#_er+)(*@sMd}+X zG#y;62D=cX`=u@|AN;|fWF!9y(jnRF*CAuE>hce{tPX&d+3kl14V;DW)YG$UsZ9du zb%T27!uj(R;49>lp-~|1bf;<_cn)xOR1#7l>NHK)<>%2dmI#BxUVtC|P6gu#?i!)_ zgCH%0(c~}rW-4HlsHH`X{SX)`d7YX7!j#C_Z;8=CS%3K|o4`DhEtI+7Q6YW}rR{{_ zE)wiNA^Z)zMs0efrKG$Mh)f5p!SQ55iw&1yqz-B8!0!dNBEUY`6GRl5(7mS@?#LEQ ztLEZ4%4)sgE4*#L8ZNNXqnBeA_4Hk_Njts2u863p#4rdy0)K#g6|Df=cnMR^wm)r& zhQcCu*%twBI%q6Rf=GM091L&!VfI!)AT8k2NQOf>;N}(3Iky>eF)f?WCDRuKJ4A+b zmtpKo^==-9vvfB$By>Q`X#EwGA#}5olMJEi>@Nm2xq4cz!$n{9WAlt&sNdg)^Ymq-?1#o!sMqdp|#8$szwU!~8D;QefsA?ao)K4G) zS3(WB?QBIvoFM=ThiRU(#9uR%zZ5JL=}Gt@4QZH9LTfuMGt=q@nY#X=G(k~Rz8WIq z5<|4A?@qySK*Y~|eN~SB082)$q)W%T^gY(q)Ao^O8@wo+$tViY46X6Pb6)%Ed5k9_ zEI?}u>F5z}W31c&qFc~0GFx=(IE9kSPk)_r+bKd*Bdu+&C6D7m7mMITzjWu3fH{xY z*z*LSvRAV@QBu@mM$td>$O!7NmS+~81><+SP=tp(IFtRdI3qz(+xAoP-o2O{=6#EC zJ_-L-M!|xo4O>V~?jGl56wobID1ahiK~KqG*a*Yt7r=A$MP_PhuIejTaFDeg&i2Tq zT;kxk4|@=8{K^}xN2l$@{W>(1@BZ+|eycLg7clw;`urS9I9Ld}RWQHrDE6(h3}!{1 zuxpo^mmF6$DLT|&FJQ3fXxtIm*VY0PTmAz8Bx{F6XFMZdhkypqKwfjmm$Y>$ zZq?(kw-(vlp@E=4d`^kdhu{2a$Cb@RLo4de zX}ETFJz+5~pqlv#C)5!LZ_MT{@LsqOciJMcyj}_gV%@f<`t<37OXI=XARoV2Huv)7 z%N#n!lAi^wC<=3aW{iPEB-gaGg^BU_X-znilFmR(VVjAGPMe@hBG!%^UG~<%=?M19 zAheztK#{TCO)4(10lEEn38L`{fiFPJ413RqT2LLO4c?yPNt_b?wGlUyeFDXD8|VYw z@|0bb-)-VgKPA`VSnswhi4Hw@=#co@aJdJZOqt-V;8wx{1H)GH!RB3+z|C<=z*u5Q zk_i9<=02aYva(s~s}HAuSK@8H!=#|=xHJ_ri?{}$Iz6rECucg#&scaZx(3di>A@l(5-*_YSAeV#Vh3#h@D$qSTN_;{ zevw$;S;qYcm(1Pl{&+%TNeIl%Q>GL8>zUAU$ds3TpRr49QgQ9t}^xP z=fB#L7@czan;?gPPHBm$v9dA_;%VEI_*jZk>dB`(+Ug(0x0q8;F3t8TXvTjO_~%`H zC@s|r=!9VEP<`Hwj^)y{2Cs6EMV37|y59%G2>G<32-5idhy5nYJ93E*N|PIgTSK?{ zr}U)_Q$Zu6o0y-U@BH+p#K#@BBOhf6TB^-qQ-M$+KXO7ID|1M<1et>Y4_4-&2D!5Q z0G@jcF``?@id4>{s!;@L2$3RuG_j}&jGjim0PIWDZPj-M`~h#+B3~ofnBqGxq{KKT ze4OY?2Je;C^MF2yZ0{q!&mgjc9~?|KL6dj=-z=1hC(9F9QB}-BCdPM}j;_j>3sj9! z)JL(`1u(8@6|pgtj%?!(i--__2~rSTwzVO4e9fVkYhp8u3I6))3NWbc=AuXZ|76a+ z7&I8$a0-B_4ESx>>hek;hO02#78>0-Ir7ZZeTNwrco$)_dh-wo9^Nx?JZSML-}r=@z2;w}rla$AJ(o&}qqw*@uZ#}ZShE(C z<#=yXxyNA=Vq#|(7pT9$!`*CNpZQiwW~SYD*UzA!$G&M0G5-~1Kk@=3X5UQ2uv~Q1 zx{~ssH}a&AxsiUKa-F1I3YuQ%LK_?cZrRG=0SICIZo6Y(mDi!8F*V4rFBCzmKYe2+Yypjd@OMCN1L?X0ZK|D8F;|s_A zDic8fk55RbJ6zq~ewmZg8m>{?+1fg%pr9ZMsNy^4t#7~w96m;&zrBNjGd*b$!q^ER zjJ=2QOcX>!o_rF$bqivnT7yI;*h^s7KeQ{aBw|$O4C(l4gxxqzM0M`YGibMlS&;vJ zIUiB#UeLl!)E-#arXmRMA1GQz$0WheBU`p*!XlA&a&3q@w5-bJGnp@q!NU)3&?Mab zK8{*hzSl(79MXvS9%$iIa99?*_W@2nLR;{IK!2N&4>PAE;0ay^-rkMeyc4V+(*l)_ zeC#zB{bo4sFr+V|n$^i6VjERrCzO*q8InWdz!gjK5%=Da08n;GhaYvVzj4~kYHZte zGKyxCWk67U#O~>*3j$61JP3B9LWU(j({x7$F59sPZ_E!zL=zdNf#Sa%*0G^X4~6cbPZbV{fK+{u6_y|UvT|i|h-oT}>s%sRIFt~aSx{7h%Vmh%Qd9T3!B zQ6UZv4me#IKSx7ez70%Z+uG!ptzXQqt_GjtZUluFh~zxV|0X8RxmS6}|4d`Mq6{<^ z{UydK;J~g6qCTpO=qmqy6OOWAs3Pd-=~2Lc!+E5z_SxCkx|q|&u1<)B{`Kk~a;Z5` zae?*}(M$b@&gE4&{~1~3cwBv`(ybMZ6Q{UcaecwMjD`W&%YVp^fWRthD}4yM)KL*{vwl1!psmp^oGX^3{Aic5DG1Ec>q08myL)b;oK=C9^g?J@pG0$ z*^_W@LnSb60uNFMPC_FVvL3smsv2@YI^xQSGiUB5?0*0U{iFP4E-qf8c#c?04?` z6^RDhI%FLB*UB#7R7?@hgqda-b#-$Eq{9tCNlUO%heJ_GiSZu6=L}*WFRy;^F*yLI zv%V``jFbKXCv1kiemy)hLrG0tYB#6lALnSIUVv}IYs@h?re*z0+Vtf;m=v?d=Z6Gn zeecsSdkmNJw_E%65zuFQZ-3!2_q?U|t4WVt!^>Kcyf3<)h;m`j1d@Wp*{fH7g7zlQ zKD}XJcM#CJ12o`H_i2rjR@)kB@P`jK$hylxL zWMAb4U|7H?(51!@rYw(=vnd`Qu3A-D;NZc~Go2E-@fMVaUOC9n%Ea4ftU$Ua8@NmjKU6JH#+TXgODo8N#L1Q-X z(nlEEAIiuyfByVAlHaZ^8ZoVzGDLzOK&)#sz5iSy0IvRf@N+*6Iivprr4$w#!5SZq z$hbx4Zq|!E{^8d$^AT3moE2lf!Xk3c9@_(-3dzq+7GNK+FE5rCg!7i;-khm%Qn zyPi*Tc8MPNfmu=bfy*-m7;k%3Mg18IWLypTa3F%E8yx(fnw}CM8b%v?alkarF&Xtl%olCC?GheY zLxcGOS6=yPQ;@Qyq^4>-AR1`PHKm;Cs+J5dFbA2*LpwWUK3I(G2$&)Of3pUKn310? z|K&6994Q0A7ZXV*?#!G}Jt9*chOVG5gx@7DBNIHgid7hIpb7f3LviP6Z|A6H59`pce%fma~t*vj@|gTN8WvBq{0o&!2t9{AA&(f9Hq8VWwXs zu3h^Gg=XHjG>|#MbPrKu1fPPm!Uzn#M1yY+XhHe3@u%a;ZZFe`{%b>68YI2)@^Tt` zBPs+Qf^~J`{x?Ai0T7^}u@TTJV&NCTXOkx5CS{oPFJ({np0cOTa%X!hHz()aDc!$z zeGiCfuv(vU{==J_n_wm}-MEo2_D!uEY9#$ZYeef4_gm{TrG(tGpVweXzbcQHlD<|5 z*w6S9+ry!&pvTp_)ChwvXdoA%9Im3*%F!1yH=hM(2KKw}Ih@z-TU-?{KKqDV+ga`f z`A)z9m?bQNLHd^`rDM7Ck#Z$h=JnCC?^QepxOo0o+{+?I{EAGlzRsnepy+v z1R$J+)u5Ia38LF?fptLieFgt~fc#OyZi7FIwZj9VNWqs_)a5|yaP5TMyT%)t*5+v% z%(h7-c9ivo*)Q`V1f9B?n=?~V#4BLcjhN8Cc>YU$qkg`}i`#!!_V4%k2re zySr11`1trP+0z)S-f$Gdws!rB;)wt#sv&YS7U$$ZXhKb{bf1lWgO zOl|Kn%%ciT0NhgY;e-0b2#g&}w*rc&GgqxUtFZp|?OBjdi!tqW^X`7XxZBLL4lTIz zvu_n8>DN}&MSHZQt~JUe-V!m*z`gbkzvAUC4e(XrHeqC}jow~L49bcG+ZJ#OQFjp* z4fmlCHto2$ycK~+mgg|Ll4YJ1I_9mGbLRhu+(?yR(8ue#X%FN59_9CIsO%ad_;TO9 z6X8=-Q3(zUVgq&uv=tY{xGTsBS?IAg1t7E+Y!@F0(@(LR;|XfiB~7No znh!MhPm9DNhy!o~)8cpA^@plr1m@K(m_ zhUr(q-{jNq+D$+m$S>nS;{zFm&5!PwmPU6#F0f>gb-SDGs!etN^=oTrn6jzv^X+2( zyP!$NEpueRd>3Q58^*7wj~Chry?0$_!rlQkv(W~OooNo;Uf9(O$FKl>z}Nq^O81v^ z8t<*iO#>Ww*|OS+_Ihc)Ownfzl3;h{KB7o!5L-RGtb$ym@vQ86vnMjNzr3`9wWt1Uj(tE2bQ`K|A=`XU_cfLej3l^IzY;qNw%xvtL>I`_>}} zHUO=(`f0qk@z3gF8}_@ETGbgA7J{FuW<4%rcdze`czn z?YxwpFf59;BXVDGQ0RMyMIzKtw=WU3?#h7S*+TA2yy4K1YxPkN(G??a5YOF4K5Dfw zIX^s8yHs2W-Y7Q30!b0K{~w=*1FI|)UjI)eH*knQlH-OzoxE3?@9g`(7XI#+ah_9b zi-%Jx2#jyy+)?17S({#;y9sdRwjfSM41d)!^54M9nS%0fRyeRiWg0bLt$<>-HPcII zR6?V6s=K@GK+;Q3velJx)v1mcI98iJBA9?ez!s#~GjIxzf{Y;D<+8A_Fi_FNCnnaf zI@R||UzQ%C8bYOYq;FY>5;(68*m;q$!ufn0$ej5w;oibW0s$O32}2CJ^DuTW@GqmK z?UD(`M^(uw6pa6Wq7!DDE$kT7{{l{`n89_8>0oZ#C*{5yc@~hYiD~}+I}0q|=z8JA zN6WrEF)0`?7QT4#PNsrd@&QW9)}S!NHx#L^-D4J_p(7-?@cBZt>rWE{gZSE*U!^#d zLvXCP0~{zwHu=TT5kuVuJJKD{~aCaa=i2TV9al)Wx2V)0iOU!sC$q3?VV7$B_*5Y|px!YM6qiX^CM17^Pr z3Z924)Jq%bR~e}&M~o^>DrhK9HrLH(wZ%U^GAIi$y`nTtBXk-8xbB0wtWx7}4& zH?>xrD2D@thzJSuR1f+48^~t}X-MzDa1*Du0I;PV5GaXE)mvkX75) zet^}5Afp>MZ+3F8Qf$&`v{PggaPJ5*%X;J@E+2SYYZl;^15-T@K?V#Y0CI*a8=Ev= z9^}ZL9@Wr+u8xi~LeBOupC(=mT=La%i`)Hg7}tO-FvxKJWAMqBil5%#b+YsSGWpb? z@drZzlTXIYdGZDHq0<9J-UhjvoJEs?eW_OTK~;&+j*Gf zaCae$2FZyNP|^Vd#8GW!AlPXdCK-|WCJF=Wd{ed$P9$A8^jjqT zA8tVpP9uY(AG54sm7LfK15;gjgfe(T@A z{a=&^8~$Es*uy5sCGPr&z@Olppnp;(*K!F1^ShavOBTz)kp@fnEVTdyx64!)Tu}`A z6MN{U9WwWn1{N9t2^OwNY0$zj+(NtTrw)$OD0zmC_$N$2Q7{Dp4-)A)_ieF-tg_`5 zTcw7g?Wt5`{Mj>`6u_2y5EVj|^cVT|k!~uy2xufqsZ>3dVZ5tk-=!*gI1#uZzm$;A zqrUHM{VpNnf^M=Ds_w=ly>>2hnL+2dRY05v8&l`jh>FT;ceU&REeYzO z)gDOrA>%J9s?guj4#;2T{V!aJ;Jf1*KXArnWYp0m3$FSB3>TYG%fB}Ye(?0@O%}#@ zIRbZ5x|bL_vFaBdhLG(nQC2_r72>KBBo_IRs_-M-^-@*e4MxP{O;ek4d0YqUuhkA3 zJ`QI&>6Gd2r}SrYVuU=$KQYBx^-vyHit(r&DLq)XYpP_>AflzLR^<;+C*VkhcDsbn`k9~z;#lPL2 zTXnp|thJg^OiFm*=clHjVT)PF8mWeGT9lpL`)~Y10NS_Pd3lcsZljR>5{fk5%py-7 zJ}j`?hYlV;`Me(;9Yr%WzL%pha?Hb^A;sVW8vq1*CLda;{ba76R?FlD(fqdEM7WB` z!13Q*MTqmg{rYeCJ)nzzt=+Z$%8zKmvw3v?Xeq<=@9GX6gAjdlLc!a89^ zy3||%%$xcZJfRlf1`+T$egpIBV$b?;42k&BD{cYa>fbB(nk(Ha|L{Y) zA+)UhD@_wo+gI*!xgPiij1-dFdHl<|&OHrYjQ)$=`iCyRQ-(eQI@5QO&0Bb7;#T|r z)jmXgGsOCofs}cnR?Cu}MKrlzkz?Bf`oU^l4WF{2qTU9h2Wn2EJFlS~B_GM62`+NK zL)%YvwH3x$Y3t6khqfs&T$!o3-RpXy=4PgISvnF0z8{JFFwC9r ztL(2!LJz*pSWu1=xb~C+B^WP`o_KGo(pg*m3rPdcJS~aeE6@1B;*2q;ZOiF)l3SJL zO{7yaYf(bh<}}pxXFBlD*DZ4A%Ef2J&0|q}K<0r>bW=*$i1jTe`OwT7z7t-Cm$IKf zMFsj(%e9ciVqvvlHT`Y(;2OOd1mfT#WF0Z zIF#fUI|m1|8*X_d^0)+|rF)_V@$~qye&K0Li-T>Y33>s_yDDaS*hY2QdS(jBw@Njf z-MoX{G24EO)V8WbsjKof@v#Yx92)rzXdY|7eLMZ>ng&yt%OKrVt5oKf#HLF~NO00d zgiH1vwefE4S?~Up_-EJ{31iTLU9~`87A5=*k7iVb5=M3XRc7SQb*N^}6kNZ8#>}_A z3t(K+rGHS{g!}=D*!Z?9_V#TxulAQo9spG{X8!= za)^uD*DaV1DHOzuR;*0i7NT}edBx#vJ7{()yEC5P4&Kc9RGU6rxN=KK_2$k-=A@mT zZsCu1U$|ksQD~oepQkd-vp0cu?w9smj)2^6hnoMx7bXSte{3q9v*;n&*_D@eu-r_| z9}@7caGDn|ns@41D3@w`7s`Z)eFSU0T-%xI=-0jNUGSNT{YpmXG=@Jkx7E{}pDwd$ zvnlRA>3xh;b2?SOfJ&5W53^+9WeAU*~WoPfduR`T{wXt<1xSJs*cVXm8(Ps~C($x)*1oPr1 zO#`$q#u()EJsBR5bCxN+?6jQc_h{~}ZJ%D8qiq{mVPd7ybRo7Jv`Y*jPS+Es-#Nyz z$T?9Y&lKr1@oVz$Ar_@P4v_{$b4^IqDG!OHu-=DjTy#?P_voSE*6rDcKpPl%mZDfyD(!1REV)Vp-nqD51o= zv)@gf5Ux?kh~U~?+bDmdKjILXlOvu5uXS@!u*MQXa=aZ1Jx09GnR>4c_uty>h?Tz+ z;C=GM)#`Unjv8?%ly#zc6lgzfc%Nb*Z!4B#MM^5NPiHV2>>p~fHm1+w_cx>Ek5Q^Y1VDTThU*c*%m)LXSn~3>hFOn zSU$wMaaL_)xOQu>{u@oTOUCHCkGi|NIgR|UOxc}pEN|xRHL);KejV@IS z!|XaUO{Rv9#XETs*}^R7PvnWun$8|bt0&Qg-1hY3L7=%~@kD}@Q^O|_WAJ9YdtcxbKQSi83!8f3Ckhb_op6^(@4^nxE9O>A72LzImFR< zuo+^uQ@q_G@omR-%bO1f^gIfzUG~_o%>bsNUE{p-^RKravb)Or$(}V_Tw(S~I5NJn ztYf70hsVd!MYoGEFG{8X^HivNH+}3YUs?PCEn9O5m3d3si2#GX;1Mp{xLW(E$z29q zn`;yJQ#poGx4O+WPG-*Z4i8tSv0Cc3QH#3_@EZBR5y>I8k4d>FEXMbxu@)9`s#}lusYPk4sYT^8uV1fkXD*w?81tfH zi_irq>Jk1Hbn^PwPl1QU#OX|RDyBFhtpl7+H%*Os%h}=!n{7%DbE@TS8@5+7jGXUv zS5G`}+}A$w;^2!;lf1FEWH+U6eD9S6PNI$)H7R4fAM6jb$~$C! zyWN^PQC6})WZWsEYIwmsMsY?sX~5}l1qn14K(mRdDUyZ|V_0K@pmR~Z0%z!bm|8Qd zz}}%(KrLlj{R)j9A0MAo&9S#rT8AelT8=KgT2QOydhf4ef#Ym2$_WE=g&hVKT!R$?p>3XuVHOTTcBZ$ZBd1S{ha5g zRrcu?{k-lQ9P^EDbGa@+A;vS@>DjM;0dzqQ(XBx#4#1kbzYAE!#5|1;W@$h}fy!@fICryu54I_Y`IevH2Hr!9H> zr358pxw)G3ETi*Y9Z8tP95mBsZ)^Y0BD4dMz zD8~!ljD@(&CToWI)2cLREl>_amUrZLOMYwe3(v!xo(JlueI-wxBp+~Oy~|f(8t1x> z3l$3|$;0f(pA+X)QH^;~-X^62azR3THH)UCrPB{_V@P==7uSNBgLqamSK>=iO!@T% zR$#=$day{*D#SEGY@n31I`UE_|6|HIOUWepkxfFrhU8en#l58bc{|t2y#>scxx>jc z-!hGxc;Z5CaL?`7!5Slw^7e&oJpdHr?87Aay^J>bZE7}Ui$G-O*0dvEHkfLPnbvY+ z$lJA^6jrNcUAZ5A1Jn)mL1ZEx#D(3G9EM!)yn(;oZY3%StMiz@MI6=$YQYw<#zcE{ z1G~&dn0`9wyeNX~4E+~%h0brBioSg5R2)5EdH*zDVq1o})}>2nXdrfO$$sa3V|mz# zL>ggx2l3#p1p4;3Mqi=Bbwb!TLETGBzkoZ1Sd+o)h*L%I<)Ia>>8ypp@Z})nJP9L; zkVi?ZO)nL%+qdtGAhUwGF1x3QStLlXW0__CQ*n52{`%`WR8Npg+K=}cn7b?1>=Wv= z#4CUhgr~jzZAMjB;k7|$Q$0bo-9}%*&wqo%bEhF_i5(IMZrZ(T7iHZ(q%rFavCP+ifF$o*Yd?%VqXXcz_X@d4 zvI`8sV9?vy$v*Vt)YF$=dH#^aRL4jT+bpAst=-)SVwCOHU~t*9=cbE`1tRB+*}=}J zOd?hzl7Ki|uyN_89v){8;MKh^dLi5l!C54z?Gq9cPpGf2Tld+dAtkCZ=>j*eNH$Gb zr@r8ALxzYRW&XKmpKE8S$mU*wF(P789VKo7U~6K-1s?62E{zx2g|DnpO8s^*#tlS?&Gk17Wj5aO&&V zuW`)@?MB&?L1eV0g5I$?FMf}agYVHy~=@aVys`BvqY(U4tL-?ML5Bn`dc5!)Q zMU7SfQ#>2?AUClljO;K~79Gb#uOhI6wafg!E@`*s`TD$Hp>mBfj`2 zxrEB7gDn|hWWILY%OXD1BF@CKab#>P?Ggs za(Thkh%?d9K{!y}y5S2Ujh`Ujt?T`F% zymswWrJ1rNx${JA_4KV9@M|AJd>Pwjo2?(gu zbC>(w5muy#n$mi&d(I_V{$nv~6$RY}1t%|bN9_-SqG3-4ciiKSU?N#sQ!{v^DdyQT zQGpQtt8uosxzoa^8Iytz#k)=3h!aQ{>KlA^i?vEZGFtr9$&(`RT^9Clwaa-}$ChXO zbay+?fO)5ky0s_{J9@zf*tZb!;E}Q1!c-h>3onv+uf>U8t+1F4NZ`qw6`vmatUu}d zoBYTm`nN;|GrV6ZL&QvvjM&*+6O5PXV@v*H%+YY;mMy!x;T6EjwcluWOJ_YKs%78g zg=x)Y9hlaw+ri7oCOKO!J`^a-F|$Vq{u&jX>6w{#^5gusDyQvSkBBi4sfx3lA(heG zV!4J4aUCX5zdbWfO^XsueRa>!-A>%%Op&0sw_(L=6<^bBC5**mBVXsVIky%M>(}5sp~y=1ge+dl8B3h!r;Nn0lZPCzC*kx0`hzQo!~9k;bduiXb`poez&!Z$-~n-kH`HEo)+?^AcQl>+LRsC&0uY$< z-mjRjU(t)>>curL;e8ows_U|4mZtTR5m+fft`+JVD6mSm>4HMdvYlXjnH3;?%~+1| z0RTc@wn_T3HVyH(X^b3ELBkTRC$3HTtCZgZ$%lGDyZvxK(Vr!Q737~3W(epNP>ty6 z`7R^Z{>foZFFeuBqTg29yiA6Ps-Q7$9e~xt26LSBIQNq~nOF#(fOrM`cIlT}z72kq znp^QBk#^}%3uw$-nIRixwh}p`jxH_&$6ylReg&+o5BK-Uua^XFN@1O#K z{%x;TMj|Q=1iL~*2u;!}QXp(!1N>?fyM23Y#wZx;{E?D9c0;vwb>K^-cJJN|BPYxQ zu)Y1`7#rw#4qa>b1^I?}WY|vVVkT12>(`J-{^N|X;4-N^WqY|yDT-BaRhVtM2q_7? zl21=dD}LeQ&DI?3LYS4cn}-KFF(!<_@?a7Wy!`qO-XS!d@fR}K!;0U%gPt-WHg>a= zV{T80VYZ-IeKNYG&nmcBDE2tw8A~fsY``kP^p9)DEnl=gkgF3#^m!rOrZ$%qzJfOK zn+7H5NV?3%Fu>t_ev@7RqOMx{HsE3t3uPjc)dB&!W(Ok*xzVK=YxLT^8Su&FIp^rt zug335UwRtLaTviThL9HhvuClf$7Q7i?QO}jE=upbQ@rBWr6AjbB+eiqQ~&$^Z$Cdu zOe`H!HT>d9))HjHo)!t%g`Oc1Bi|MF~ug zcPW0kFH+LyaddQ#&oN2mvNKa7-8hm*YN7RmlARyIwy1n;eJXKTFA$Nr)(?F#&}LBI zA{;$X(uW7l8(3K}nv<9nBc%2i0b*llK}7OA4MUJ?S;km4G{fXIPqZz(_8}#m$O#uc zhPZlsTEvGE_J!`pNRbvh0I&1(o&F(2{Evv~1<# zJrZ-i{SEY7)PanteJ{hMyzl1;33kukf6gCVPU}NM<(#g{iF@>5Y7?1Bp(^9cx!kQe zoN~D-htAK&TI-wadN#VWhtxe~^>aGiH~c9zz0-TmR>CYwu0ge5kv90sAIOt?ppc~Pa|;X==(-d6;jK zDN+Ktaj1}~x`Kkog9o~LdhN*k9vK}S+8HGJFWcqLm02^dUY9axJ{yi!ypPT~yO2hPDfe4M4~cYDYl;woS~(aQZT~QpnP1*UQG~B7-n(*B~Uia{I%p z<^3fLN;WiXx?901@4wl?X!zr6w2Vh;(FHdUiqfeb18!MOPwzJ#^-$;VzFEjJJ?B&N zroI{{Z;$AxWf7%iFELiNyI~I?J~lQ6sEbLWcWo1XD~zSd+q~15m4kfciA7%!W%eg0 zXE#z9W|0P-bUMdy40}&4hK~UMAl?(H445_QLA}7g`+O0!?=X>JOf_K8y%czYJW$+N z8wi}D0D9q2F@y%|eJ}@_fnvM(Dwne+)k~dw*d-pTfYo)Jm1PCgq%_tn~q`;__-)U>j)Qcx&GcBs%v;i@PM=025kLx!1`n;T<7 zG4JJJ$^}d_NLVLZHqX36EJ1nX+Xs1GrtVu}t(C0nw$E zB6{I!PgfWG4-*(J0DU>81}GqNp;Z%;#^I6fVglPB3cEXkLw)Uz+EjNH-UeJK4no@p z=6vjR3@oC0(+U~rihFo?pcp^DcM&b5b?yD&&!L0+DcJn;b?DZoc`>C? ziYuHw&B#@?M|M7vXyCkLAyDn4l5BAtB^yk zU#<{3Oym2Ao0Bk)whoI&KAb-o`@*Gk)eUnqS<7~Aw_Op6pXr~RDoRMo-?DY<-Xg_w zt?^6OxQI@Zf(21uSlv%OL0hhR@V%x3brErb%QSUNATXSIjo}xn2)cTPHEFdP~Q1w@sUbra*0CVgb7xI z^1D>+9^cUykiuT7-Xm02tv?nbAOsmxlX1jiHARLM#2=7xL&}fsU>TaaRCjn?UP6aX{#sew$-f{+#55QX|Wl zacVrnK5EMQ_7ZL;W&lvVEM>i;tj^ohH9a2StP>yRn|0EvgSniw(>wJz+;XT54nMkA}* zr_DwJI*kINgOHp)xCY|^i29V+Xfp=-*S`~K1+vy(Nku7%$)H<>5YsK)N+9PK!8pe* z{CW}GIS+n%v9xFZUzEB4qZHm7-vNGz&ELSBQsWW}h{R?vwUO`lo(8aM3JP?q{(1jf zQ4owEWO%n4na{*pc5aak@=cS5rZ&@d)X;1Bqm^C)i=`>Lr`Ap7@Sz!%1;bF>+1tBm z^JY=hgaAi5NB4rnl*If{7>Q7dfQN%NIOpimA<4T!sW&!ScMcc6VCG!Xkml0BnWKHtTrNWFRgoZUEjUCd?s%bBSXp7a5i~Q()QVILjQyUqS+A z1d&0&yQ9rN!@Em@1a>2(u|^SNjUr=Wz*TG_o)7;KxDmJP8|8`7(VQ#8;XPWQv4FU| z@ho(7EFu=#2m#HccDi4uz*6MFl&bgQw^6s~%2hQ1m?(1L!Ub${z#SOAQ)wojDqj+e zlw0V4G3F%AYL%i0CZW{J6x`x#fCR$=?1PUV^C(wN47wmCWvrIay9itGmV?4YTihO0^ zCV2c2;ThYssf~e_R)A$%hK(uj3o$nUs-G2BGoq9l(%&5z#is3<%37?55p@VK;5dd!F+bP&r_!A`^o(!Yv_dGM}I8@HG-&`AGt<9G#CVn&+2 zI7mq1)45Tz$7E;QL7pfz`GT=DDJioKfl~5HO2{j^WMI(E?ky2+Hj(*!&K&tFuN8bR zcNueQroElLt##$xSoW$feB?zam_yWB?-K#bxL|(6I5%7xuxAb!<6I-TU^%=6Da?RD7a` zFfMWt!(Z}VojE6NIB{e!gmYCHF!CId>_aa472gXJYGE(jSxV>g&z0cG`%~lN6Z_%+ zM=-gZ#&jZyef#zfaD&qmIl3nZ^4ZJ$Mvi{*-KUZE?Tt&U0v5L&&PZ=e(k>OKKnVZe zad)|!`3{VO={Uabf5&t*vI;g{Idi51dEfZ)^R&0K)ASoQooI_^*E=WfVjo#2BF{i9 zEzM2Lf2N)^^RZD(&@uf`SIQlEV};an5~bLWeXIqt}c3)-IC z_x96eM#|oSeJ<1k03gl0NuBE*jm}(u&i3eUZ(_j@?F5mUJ9s2Me!emdU$sv2%N zBW~xA+VzmhTrO#W08Z!SD8OmPqWb2o!Z8day8@G-VQpPgKk6(rL>|mDOg-!{1SH(W zix@kHvPrY%yOsT0DWr@d)lVmkEpCQFS4KuUx-rmJaPdYfg=wBX{Ww=mSC{wCN%$>b z*xq`;&a5dzF|MMdix&o*uW9B)GJKqvTh; zhU^glfZ$M&cQ@AaTWd+BaqhB>fNH^6x-6cc*hR;eST5Y_RO4sMcP=brn;8!nRrfN| zWsU_U`yHQ9l^rqAxfU8b@Qe>C^2P4?^7q;&*d)~X^e%#HGuh^}Wxu}t{Kn-@VDapft?3DKn@NTneozs+#wQZPO7UH?W41`@nj!LH?0r^LTw zrRMRv=)<&|EWE(_QMkh+7|{YavdDS@=RoMQvsAF`g=Uzs)!dt@VhGe_JvL)#NY~&X zD{kzN5PK0V>t(WM`6(2>m1t83nVA*bU&>NTl9QAB4u<{#nu>)^;SGMDI6Z2 zsIIFkED$A)&TMIMwl~vOtVO zjXsZAU`+FA3g+c^afr04zYu2pdYRj^A4P3RjXt=MF3&8iE`1)J( zW{{goZwf{Xhh$XrJR}ElQ$Qasb>J=tL&J{kLD%hn;sjxl#Zcb8bT|S-GPtcN5fSz>3VzDggAsxh_CbK)!qBx zUht38Ihu737uVg#m{ELEUA=@F%9o#skVYKsOXu=O!;}l;wn@S?#4&~Eep{+tQJCb^ zmp|T9S5oId8hvR{SOV7SZ8b6k+P;WKVg^g2lLOoQltKnpFbKbd=e9mCBaiw-stQAwMAWWc4)W{(uTC?0r<6gCGT@w|vwD6;(Bm9Qzg~p6(kMn}T4F zXbc)tXojMtB(;thYE%n_uypqJag+;~{i8k(W7$vly-~>x6u!Uz$aW+3N9g^HGXZ?z7-6tb_WB1=(Y0Jd!1b`T z247c<>EKPlysv{c8lk37m)r>J6VE`B1(D4Q#B6~WBQr>s&{Y(>^7Y~Birl?(Mu}lB z!m~>A7An2*@57#soa8P~uivLf6}n-nHkW=Q@#4y17}kWDo0ov72@XP$<`|dNKEf*Y z(i&Sm#ZFKq?LY89qC+(7u7TrwMp6^a`Te7X5&_!ko|C*MTZERZ4U7lfFP~Rc+(YQ1 z5Gt)4h8_mY;l#ifLCzKa?iaA5qO^7&%^hqIpX2?^ka8qYjBs6a)a1N^&~J~;Tm$DO zCWD`z@|ZMj&~C*&HiM0v_ffiSRSrW-m3Tq~rB}Y)o22n&oBN^5GM~L!iwsnU>-J_n zn|QJ>3Wsgo9Q~zrb_|P7Qy>pi!vu;Z76th~Lib1@5~50uz=tj%nI#kM1+$`V^FM7g zg$bnNWlJ_I;I+UhZIY}?4%FDZX=BXl%~a*m+zJ^K2tWV=0tyiLhWtP0^-YlKQUPA> zga5SP1dZORrVbar|Aq>HK_3DJ=eTtR1g;`}{)cBIASdy&_ySA6 g{U84JGl2zK>(!iJ&OE(Ceq%=uEB*cCFTLyk3sjx{dH?_b literal 0 HcmV?d00001 diff --git a/vendor/sigs.k8s.io/cluster-api/docs/proposals/images/cluster-spec-crds/figure2.plantuml b/vendor/sigs.k8s.io/cluster-api/docs/proposals/images/cluster-spec-crds/figure2.plantuml new file mode 100644 index 0000000000..074af303bc --- /dev/null +++ b/vendor/sigs.k8s.io/cluster-api/docs/proposals/images/cluster-spec-crds/figure2.plantuml @@ -0,0 +1,29 @@ +@startuml +title Figure 2: Cluster State Transitions + +start + +:Pending; + +note right +Cluster object is created but +has no associated Infrastructure +end note + +:Provisioning; + +note right +Cluster has associated Infrastructure. +Provider is provisioning it +end note + +:Provisioned; +note right +Infrastructureis ready. +APIEndpoint is set. +end note + +stop + +hide footbox +@enduml diff --git a/vendor/sigs.k8s.io/cluster-api/docs/proposals/images/cluster-spec-crds/figure2.png b/vendor/sigs.k8s.io/cluster-api/docs/proposals/images/cluster-spec-crds/figure2.png new file mode 100644 index 0000000000000000000000000000000000000000..8cdb33d48df1626f29cd2d0f3d779b8f2df6c778 GIT binary patch literal 17984 zcmbWf1yodR-##kcJ#>R~D=E_908#@;cPS3t-5n04bm-9C(xG%nHzM5#(tS41^Stl( ze&6}d`JZ)Mi{)N3bMJlUFRtHp?MOA1S2&mym`|QO!BGIqLY_Q9hynfx(Gh`9Ood#z zf&bWCRpsX0aPjY2C;W%jAgAD z`{u)|bQ)cu$S;dM8hYt=9}*RZ>;paXy}*o-W>yz?_^|fGn&E?@X*NyCaZP9Z%MVv; zNCKkalR}i7TLq&XcAutWbpq;~TbzEUHYFzc-P-&lU;Q)(I-h2F$9huCYX4{T4|Jcv zYV*ky{s4y|NmEn6r!4~bW#fw*foy4qu^AEykUc9ANk?N^N^77*JXyn=ZxbWq>-QI1 zC@)88TECPry-D2I=U8zPzKq1i+Uv&Tw0<@6=dDt7#a_#$e)5!w7Ireo^V_?q=-`&F zH{U>PZ6yWW+TFbE{e!z^qj`Ol)lCiWi9|`T>Xh1FDT%Ii)}>L{e0kSh$|T@qHc88` zw*TS`K_>U-5r0w@`I9HIj0&<+njZ59nP@tonfYD%+w>D{CuJ z&ZGP6ll|x3=R-x$+kJnern;Zr2TSLwz2FqvM+9Mp!hmbm5a2=@3|xSufs0&x2rCGa z9=K3LXZvqIp@;wLC%Yo>(|eLQs&koE>gj92Q?D1ID%Io11WisG1N(C|3k~+mh3a1f zZRanqt}HDri#|>@yZ>fjWJJTGfm&KlRhr5W5hZdPHeKG_7`OT8uDvR*zd6~RDrswL z8>JuRa&vRj$ba4Far`-rSgn1;3lmIA*R4Yol2k6E*!zG~G(Iy!iWxevsiCJgU!Y3c zn5o7==ej@3w~c(5D+|sY)6}Qe+G`r`?q*_OSiM;G_rJS#b#oK+34J5{;stwhKhQt# z^L-4_`T6<#%e4=Lk;rC7Z9blR(`86#xRP0QSdeW0dl3uqx1%{385zaJ#Vo2>=*Wof z?iUo{nzkbA_>ek6NUd>nR8$xaB`OgE7FM)~^Jd2t=gg|f>my(sS0`KgdU_NT6bTaD z-@kW|PqlbDrxd$vkFGTTZu34~4Gk*$>i(O?;=P3hTyr5%w(?VJ>)l%Khp7xwHS-+T z677n^#YX2uX64t)%B`)fiHV8iIG894BV+G01?(2d67G(BX`}S{Wp7iTTZdtjBj!<3 zQeGS^TmWazP)6alM&B(|7WFU{c$w$}Uny1v&t z1xI_=XM3jbq#zJiwduF8nF`~rS-Iik1T40>$9~?ro^Fp#@~^C{EPl&Hga@eI-CTT9 z-dS0Za#GwFpo1sr^O(HM3JMBBZ(i#P#UdsqK6+7BR_}{2@Fgh(P7K2)w4be%Vgq-!Q#I`5Qox6XhD^MBHZ7%*RlRe(YUSe^{TpMjk1o6ta`l`u($2)a*MtnAdJm zIL>gAil-5lU|Yl>us`KBY$cxYRnE%MjK}4X zDQ^TF;Xa!HM=)&s3A{r-)VA0)Eh!030gs~=l~-O{Uq_BlO--Ey3sccooA<^!r8}H% zt6vU=COOozPPwHhOA-J*nBauxFjwMCB)aZRTUuE7Tpk6Qzml6DnSB@Y!u^nnkCr98 zTED?g^P3{~SB&&p<#a)tX(EWXo_FZutFKg@31;tSquX zXRb_mgx{N*XXtD=uP-8)K5YHSPHgXL543>=Q5U^4@f&d)D^UL;?}TA#V)6t{%FeF5 z+IA*^$@P3+S4l~!8Uq&>S42bvD#||Y)}yjEdUNsH|L$x?%= z?yjo@jN|J|TAUpBrnwre##XzdlUwhkH;y6rkXkUYP2s#lAu8J?X8aSie-^}|z`Ig; zq1AHmRPIoDS*z_D4==k z#tV$smTk=i&qi9=VZD!FZYK-#@586`k&|9!2QWa5)zyUJrs?(G%Wb}qS8|@7o_>CQ zA85otfgrx2>FH^}m?nXr9%&=h_a7`YbgBhmlXKLYenSmXdqR=CaKAa6ez??3RFAsu z{BVBZg*&3|bsc8n*5V!C2Q~pS#_tH!OOWv>$tkfq^kOXiC}jV0k{y13a^bsWLUDO{iSLd__~URmQ}k1dBx5?U`|}^!5_k{iM;x_?n|*sT z6)ZfWUAI_%K|dL-NHFUSJHrf`-E7A`%keS=Qqj@&;S78>3{EV2GL*}#l7Y@=Lhw3W z5a)j7U3bwNYaD4sJDkSG4nC@=+N6)D!YJn;muEVXFnTC-sVVts(u>DEA_v&X@5_Du zYOI=>YGvV*M=ReRd-~_kpYic=z=Pp8RK>ebdE)5VQ6f1~% zoxa_jEc#vpt$z0G874Y9u&F!fWr0{hAm{x|`IudRndU;hZK$-VpPx9{8^D3APbmC# zZ#iNR2)|*7odK{C;@LbT(Fj^hOiaK7BiJ7wEvKcWy*yHb%zabx<4! z@YL~-z!A$UXd&S-;v|d&tK{;9O4Gw@ne_GbiRq@Q%zIN($PzTZivRWLXP_55Ma4%) z4m2OoPQ^InfNsvt!BGN-BflaA;-MXU_~u9^7Az_*js!E5oSfW4>BVEO%65K65p$R{kpfy_m_B#qrmSBi?2ydXv0oKlD9B?y{ z+UbrZfaTB4%{7tl?&;~0#k1dhT!yKqDWy!z%n)N%tIyU)safxc#E@1?0s}ic0+BvY zi6Ck+pv$GHU+Q2#4xy<77x6tSBO?-a9$?UdyC+K`1&k|Hv-1T3LqaR+2G!9atHMPw zVL%VXBl`k8dzXkV9}W-s1V#YdlIN5qRUj!W6!mCjTuEy5{2*6 z@B={h-J7gYpTB%TP&HA_oe+H-Em9AtpRG#A$LYo|K(p&Q(-DLbD&3)BW5eErC;zyS z!iuY_pGC?#2}q;E6tP4TR0wobb#-+w8BiaapD=5gu8IU`az9wm%XKhUW<61TEBUym zDW=y>gq}cDxfeV5WPtnp9kfCu?Xw@c5!iMs9(SP@Qd4WRu@RkVh^DU9i0@A}Dv8HW z6N{6QnDzV>9HKDU=s@4wOFTl?9=D8*j&-JW1Sc9o0T_&syPAI5%x}l^_et;+yWEM9 zT;BOQtbOrW1rdH2(i^FF-O*F?zQN%LIB}Z8duz=X$I92ydN_o-*vNty>E6uzuBtZA zL9cKhcU4ri6%_d;Q$5ww(4?a9j*hDae403_$5kQLQdC^SHVjac#pieGW^x94n5X|@ z$rTZ!DBCyKVOEV*?(}%cJ491Jp~=VY7pK4%?3>;D1d)CXY%-gdNf|8>u;P4)5Mf54 zodL}fsU~Cn!mVa^B|ekF<0s)}8G+8WTbizt#k*q@TXPMx+3mS1VCO6Zu`H37;9;K} z=8C~AMmX;z5qRmo!QXZ(sh;kv`trDpOWoQ_IS?ZacfR9T-p+c-YUyWpxEBNPOH**C zwNW2-VB5)tZ&lsoLE0w$+4+@mJ8}pi^u?Z_i2GE&=#3j`G{e1cuQtoe*KtiR1fGab z4Hf8sJVkGpg+AE|PrQFw)t4;b%sA_X&W7zu48eD5P(@S8T=s-lR#r|sDt?1`xgESo zI8YNlskrhtzVg2A{m{Pc6hN_wTox(hr2KZbyt6l%07B-11HremOJ^Q+S6p^11X?D- z*awFKYwkk6jTY!KQ1=oE+-jwG_4rm#3LTfa^7`$HD!u}nj!J>=hKq{o;x#COHM=uc zL-Ox5sKVRXTh*e`PCJY_#u zhf*Yu9Nb#r<=nJ@tgfWGl)dVRe2mRjknqWFa9{vpjlRbY=QsK>5B@ffTQuLtHY~Y& zUY?yJs;GKw!;_2d&0hELv@L$G|BItpmCgpeMX;vd{SX1;i_~;V0UL|angW6B-vuM| z<*h_AH6nZa7^R(d+JYrEdqnjzRc?-os*F~epU-k$x6jq3sjXip<)|-bzszT0Vk#P9 z>tXzoV^W`-++S}?y2v{{2dI_GzY>J1tm2A<#2hmO=eIxi1DrA#q=Gws+%9TU~4Z={7CxMY+BZ2P315 zsOZGq4H}U|Ji|DUS==?b-tF(FHM}+Dw^W=Vt)v8BKr$A|X?49I zf>#%+50m>`i)TQdB@yrS=E%YEA4~i)aj2!1mT!Yop3*{nzLixhLT}8OW@TV4U3v|4 zV%7$*ew>#Q0rmX$KWBebOniBTmAotYwyoXYeU{qm-QY(AD9*|u->&wdc+qBm&I|vZ zIt<7sc{Jt)e_bJTxy4UUf=Qp#!+vZPvEyGN z$aY|+3mzE9L^jx8oSxP;JNG{HC(04A_|f2xroZ0YTR83vwznJ%o%lI8XvC0;X4(-@ zU-G6gp;_|w?&gH9qbE(ku9k3a01>mfxuMn98}~bp!K_mGRL7E}jmco@Jb%3&;P2SE zY;TSIxje7=O@6k}OceO%YNC}J9VninjwwN5tA~7tf#p)*V~>|hTS-l@>&ru5>52Q9 z+u&VnI33l4&-byowz07>>0DmGl=rBznMV;o!oseJiIp^iHPZ$2o83H@n}t4^4~#a} zbXT;GFx1nEXW%hoq?Q)G#1O$|nC4b<#Zj!|i2&Qr? zX;lHYlwymCNV}=A(``pekVGm)ExfuqAD8y$SXb97U9Cr z{@nw>FaOIAXJuzsOgb99r#m}!GEz32367tJj5N~Ij~`<#lM)dDVQU9j7Mtq5mPOH1m*OO^OR z6Vl3Ll!*8`cZ->>*S6J~)i zm_v~U##p8#mVzbB&BoDsk|4dFWr3h$LefJm=ReE+U7s5(DsGmV`_n>`dfvH8tf2F= z+bp>`^`L#^b(d)A<7oOJdZr|~t=pgr)xP+3iiL5U1m4togBsbLXjyUC_;nT)t+~~M zz+)@!q0$uB;&!$wyDRqrI7S)Y1O=5N``(nL!B|j3M%8$Dc)TM5E#~GXdSk9Mx#1sq zek+wL1eR5qg<>^Ee^3=Q3$^^!;Wk;vepu~yDb?hJjLPV`5%ES z=m7=>W9|=A_ibk<9u!fB4MoywAvy#~IGJ~ht)$2`JdM74>jVK3MZ4un4*A}b$g=g3 zJ9Bq)TDg~t?JX`p5H~Q76LNEMuIE62z05)tW<(`$FkK&zFd14gkTR3?>+9*so`u!) zS9Qi#BRAGWX_PVkq@(zW#FJ)Ck$t3MlM%-0PzJ6lKh z#Nsr$yaR9`)<#%RWPii*d59+~@+=(-=XxD`3d3;fM5fo&hvri4iZ=cF(uy+M=d%P3 zLprMRV7|WxxY7Rxi2lZ4p5MZ*Z5I@^+X@FCqFm_s{m=Ig)wo^yrpss+wJ+X^^$6xr z`9g+agy_hV7Z>}{(?Ef&PhPnUMa*(0X#XKYl zQ?pnJVN($})t5T2slBW5=)0i`jZOFF3ZWs$UY=)xbIUc>q|D0K?7R+ysXu@GSo_&Z z6i`<8{;vPDv(veyH8Uz&_2K@84e!Ix7?|0j<5Y2~vER=1ktsVeZFU=`G`mxOlkZ+b zn6h!ppyg23rtcnQ^4pbxWVV=@L0&re?qnK>OiCvFyxEy0GQ!=ZmWNeX9xCCLgi3;+ z+E&Ydklp-|9f(6?BM)$?p(iIq_qq!svF0J5wKyP@Y;)bq4x&Rtp|7)Fyv31eo7;aL z)ZpTBIHh}b8(=qIJ9fAv?qS9V-@SMFK=FhD74;fl5{H66K(*&|T|4D_tCwr&Y#5F_ zxat7?x%eD)BfYD|%78x)&b+wO&~%)g_po>N3B|3_ufm(49UedZh+ZMo5Y5&A&8?Ah zUV+5)tXs^FlNIflkq0d;9^LeLX-smZ3P5MGgdL?b+*TX1aSer8Tf11YPitr}@1TWv0V zZS952O9C*Eph?5L9`3z#W-sF6&SNQscIT()`5ak<-uFejeEhh%P>;fj%a*b_oF$ep z>$ud^2&ARO7)6tllW_P`GHsdo#AK4|O zMZgtG2Y2CLUq=Tro#D()DKq$~)yOYfOliz85e5cCadR^>y0Bv_m5`)3Nkt8$Kv=QB zUy)P3xMK8_qaX_MYgR7hM6e35*>qbx7~EHH4Rayj(1TPuCg|Wcdq_RNjL*&Mb-LJ3m&PY2n;m}tL|U;5K%8Kb2;mSvYq{H~ShK|22Cn=hF}|BMp#yqO|k*XU&GbP1Rw=_%R@z z{JYAcZ=oKAKb z#Ui`hsnn0e>4_wAEpwFzB{}aYLO?Z74FIRzx4 zX8x?od=8D1UH9TK9fS=dU%Vo)x9r}8%MsWMLT}i& z(@^QViw6_mU|?tZe~>Wn{`6VJ<}~E3rTFtNj4N z=-?w$vvl_%^3>(*B33MCuvzUgXbyZ#VU)>2IkP|TpAj?5#qYee7Py!Ii zsEXsJ+*sEcLxcTl+Sh*1G3Q~FLjBJ-puh)qL{&*9GiuBp?0SLwG*@ggAV0<{dgx`V zBI6|R8yZU)6&vFae8shdP7iq0hgL+PjiwiJ%M{CohT=mH0xNGRfTb1rlG-dz1rR@cLy!~uF8GB2H( zySdxqNXmI z@vHH7;8x#zQZ#sB`y;@niL-sq?dKU7LIAN$6}j*%aJYT^R2hhdy@t$wWeZ`xg|Uld z(c5gCl&=!U(E@pXv8mzFwP1NXheK2d2w&0Umyhka71hf}JmQ31MHmenWxuaiYm5wQ zs^gP3nI#p>GHebF{tK2cqFrtcC(oXIkdh(YIf0>$`1o#3JfuZ1K7Q*WC-munppFVG z-r&2qS@N0o!%E*{3C~bH56+)kB#KewLF8`m>{YH!yotDGFa23|`==i$VIz`-$V3-S zRTgnsF}#x{YEFO5sKYfKyBf_me?q_lt`(F@o7+I<+3JKqVg0N0S%D22yu$#L<|V12 z>O!V2gl!T63iCoK3Oc~j+61x`FWuWq&+~)cE&I2k85e1I%B}oXaso*xT*Y3eAYf|* z=hLBN3r+tZ*%meaNm{ZmimVwpNo!r>hcWBcW$KZc4K5Il8^FK}JqIub6Ez|TemTmI zCy$VTY*7Or8yZ{Xvk{yWw$|@f>wiv0$vr#wqH)6jkYUolr1DmXjOCSKCPhxX5ZNHk z=x5QJt{hkL-~uV7Jf47U!4*?8RB22kRM*G+cGxAbP2)OFCdye61H4f_j}^)^hucJ2 zwY5}I`hnKJ2oFII^FVCL7E~){e$+K%`2zqMcnh)F(6zNWdfCKKX3BVC?{O8IHfc5e zk7Th-^aHh+YAKSuuY=@`yz?e`_jY!s`c||wHg-MK%JiQ-XAu#uEetng_pasWW(%cr zb=DOIoh*#Nt|M8V-Chkk#wWXHG-|&YPqpz2WR+Gd*saj3FE6h@K4u(EfPB>WL$Bq; zpmE*R9AJ5tp(g9$$RRg7!12>>b}HJ;wk1kT!hbV@AaiIqmO|j!_S{6;j|+{hQnd$A z*@^!R#lB9XZfcrfc`5cd+n+Z-E~}{zFr5^|;VSNw{#~Yh;SDqKYO<_i-N7oP%0|p2 zB_nJ-rW~v(ww9LFw&}e~&CV-t+x`U`0J$gsRB9ao#6W79ZH zI46s<^=fEEXx}@dS36bXKk$OrwO^wC|KJ5VEc%B)Z~evzcsTn_9YZLm4lM!W)Xwz!=p1gI6sdZfJ+Itg=n2E4J32hE~KTU3ZRZoT!&W~ zbT@Br4AS{O99D{gnk)yO*n#=nD9SkXvN?WoRX;i#w~)SbJo7hDuFoW$(@N}&F&+YJ z3y&lG#8L*XJ(p+iSGmryHZR~-R!{({jtStW18rCl1P}*A5Xp!-4g|=#P!J`A%{%`; zzz5`m)Z#7X>hTg(5uKfO+}!oDJ~QQI15ZjD0Ch7cOI08r(^k88$7cOCXZ=gAkrl#s(1V|Qh1ipNbsGK>M_1T^~&bC?f3}J@~c~<0KU9Qj^ep# zxFFPFD-4Co=4k6R^xLw)L17T%&gJpg^fdE#?Rg4~0+mf(G_pR%Nuw1prQbKOati&X4%7hz?lfD*v~-RR>BO zz{E|a9tQHsy}kyJ7|+PhFovJLN4+BAy^V_FlmCVuY_e`)$ph^+=CA4>DSX+**ldN?K-5wAG{sL%;m}4c z7$W*ip4<56Cxb-9J@al%Qil6*TKIcz7}r`FmVP6q+Zb?!#l;T}m$T6j2;Wjc6kV~( zMruoEVK@FW>}IWUCziQCKgs5BHNBY1!*cXbC1Ps9JLbEwv2-R?GxjY;&M(s{?7UNu7LqW z!C&v)1lwiM7Vv28@OMnV_})y5h#)@uR$EKk?C#G0%*Mjn+WA+9jh>1yp&>$Z zN8HGWp|kVaThVH!0IJ)&R{(g6Np3e+4KCl1do1@b*JtIav*o^9UdKHoeTCClXX~S| zyRU7jbhJW;avIrgf$?6(?TIOBgXS z0@-?Ge{b$Y!zN5nkZWzN0ZH_5Sv8gK+lMz)=3%mk5)vQ0FH|%&4fL9)v-M~XkxSkT z-~Lh3F64bgEZC)Xh2Vs!*iGnc(wm#Z;kXCxTPlR)Jn4I-eO_wD+9HQbQW5@|H6qP0-4h9sfsvOOA!FDUQMr}Sqhx+P3+EgrzD;~n+>QK-*SBz z;uR3^ycMQKgm!4#0L+%&pnh}Hu@>{*dKT5Ni74|SwR&+)K}VJVsy-1$i$Xg1d2D*S zX`fKKZ`jGl>kXsKP&6u3YyhO5tbm67xI7MSu*Cupp|KDQ<7bEHa3vMon)-UR#RLcr zq*xb1fHf1squ=cNeW(ogr}vQI$kz|eO(s@jV<-Rsq9d_{WYVFgPhaP>f$fCRKT&g0 zto6Juovi)orr=zb&- z3wNf-C(_K*lhp%-!{$q_ukXfyISM9XPV>?*th*T zM*Ujex>|nY20OcNVPON(y8v|x2CnR`Lp3w8MMQ$!4)mN0JYi&Pt*Y!El$3CY zhDKsSLe7NeOCuK-7mLfb!hpIKbu4r%j9~-exc!@ptyr1^VZU2z4-$HyFkCl}!%8a>09d#V`tEkUpTWsJqN)}u z?b{JFJQrbeuMct=LSz%Nr+Fd}=(II*fZ#f65>2MW;%|Af-L>fxwlIcy=BLt!Ptolt zQBaBn!k!NT9S0n@Mu?puKSEOfD?lQq^#2Ty^oMFH^dkx@kyBgpUb6?DSWo0Xh+Q-| zei1$1MoMK#uOXPaZD8F_L4gp45~?Pa>aOK?k~lW=nVxJj%zVJq4I5Q|%`xar_FR94`D3-g6lV3I|yko^c9E$TEl`s-3krC2!UVN2(8~!+s4Av-N68d2&;GHx#h&Uq3Ww zem<2psJTbnQdkr#2`V8$q3_--LGH57t_vZ@zh!Uc0hY$~wxQ1z*f0IkXt>Z2+s$D z(klqjZ!Yk{t|-Xps&A@A{xu8hQ(~;{BN>O2O3=Uh`$xO}C14cRd7Z6Zqx;;HRtMeDGvvMF^$8`H=(8c>^ zvyap;5VSp4r*9$(Z;bQ5nc290UDy(v4yS%$^5T>&MLmFJJg`N65seY<@{-eFw&bga zpZb`As%o=bv=^hX3pymH^yo`R(WJOJhren?xCbJ~pVjZ75Q&1jd`vTcL}>z}RDl}? z0M>{L2TB}|o^>gmZw+glR2UU_@{r}T2Wl=}x9In)AKTweG;8agA*+s#2USuZThV+M zLkqz{35mMZPxH+qMg?%0DS8yEAOJ9+mB<`*ujbbwd zY$NrW)UU^rEYoW#%$SiCmwyP*t1X}qNbUZlgW5lpBY4nqapA9}_L8&&(tlo)QA~JY zMv#b#jZLu{>(dx4Vf!z&r70WfxqN(sLUL2EoY+ywTb^2qd0eraX8=)xjQ1@TLNkFWy?1(ywD9A$>y%ekz%}Tf&dc*0XhkRaZ*M z*NXsYKZ+xr{<}?u{E;l!t5cIxMNu(G*WxLrR=b>03}jVc?*&uWr(*!?xz`9soJjSy|$iVt;7!IHu1fIl*Jz+-zG1612e|_O82Jk01K64%9eNj8p zszosd=);9+GiPe7nZ&(mJdYHeomv+P$p1&4NR-}HOCQf?sof9HwdKlCTmOqIvL&|J zO?if-5)VU^#=yR@8lDgN2d?SH*g{4f!RWa=lzWNA5@WEJrAVhRZ68f zWI>Qm*6uUhe`q3h+zX7*Ln(3I%-Mm0?OtF7UT4nCwQ?}X;$ z@>&Jr50SU~Y7tlL=<9PqBZ@$O_Mn;JaF~+HBFWwf_qR2E_6pD1l`$#nMf9a4_^w@| zJj4p=S0C7JOMAhaiW+SO=R-r}N4vhFdE}x`-5;{8m;HS1!_K=rE3OwcAUGNZ2m1t=-YFMti zKd%^UKfoqP!@(yzpPC%jMK5YXW=qf@fA|@mQS1p$w%kM?ez_8Z@^Z>naI7m`g6ICF z4++($t^9KJ2QfZ&8TRMS3BfX0Vq#(zAy&bdhKiK3>q#n-yE_{#e^EnxIS%utIHcoO zdOlk^yv;Hu&%dX<&=mb-U}(Fx_KJ?v1s~E^lomd@SG^TE9UQ1up~y3-DosHD&7EYq zog@G&~eQuI&3GWASZ zI`KME04{gtGeRd=41}T`Fa6?=kHr=)6w+*f<=xuhcEV~&uVJk|NI;} zmc(Xg+=33z`&3pBHD*ho*KcU?Az%N!OubrG`nn!UE}68qX*IZZl_0m+Dmv4X50KFj z0A_>NyRU&7J*o`rnrBk%EUcVA`-o%EpMo4)@vM9#Vs&5%2?@!`0OI_%VtEW59)|Z@ zM{^yIMc<(^JvzFK6VYU0reQ4@ZJE#*AXX-S6uhr!|Jb1*U~lwcXQG{Mb%mb@{njJ_ zy-IFWs2<%to zRJ4fT3D9t>>tXqq@$^g1e^l#@|0Wu)+LkC%hek;jfAtWrGBl!j3x)Qf7k&QR7<2VV zs9;8|TFQ^zDL4FiaX7po|5r}|l|WGC4k$MP8eJ&oatJ-%tEh=M2*=wBZj@E}p<^5O zqQUBHP#%n=D z?xbB7DjxYSHlqWG-SRaXB3t8e<=y$g!?;1?v+i%-aG6EFuMc+QAP{6H3lPUV`!|MC zv!Oa1Rpyf7qXmFLrn*zoI<=cF!wPY-EZarx$08FgMB)-+Ye$ik?iPwn17O z4_^4LgJ0OmqHnD?`WJFA8{UDoaP|jUT3cINX&=15yn?;%>L8>1d=iXY=30y=2&G7hH9-6=5;pq^yKJh zTZhVpXwpFU!%IBMP09P~Z+PQCxj4ItS#B`O)>w}qH`mC>=)96q^BN;g^yt_XEL%7;k=r0xg-w;|Oe}qGmn*tS^q|X)G-^O_?fT zddXlGUmAuCJE9dC-GC1Muy>>Ez~EMZ^GLDc(mc;p{o1? za#&hDujzqy2R5#+pJF&#+Ww%S{WJ4ltcet{&=8MjR*%?Agphq(xT4!3bnI)D)Fz;p9q(F6~MU$NsoLO0O#36&3gwNrb*|Hy9K`%JWS5 zbxi*yptzWw*AsX<`lRiRmZ5I#+n~C-(BxzkXvfSx)Gr(tpf4@bjenEBIWKc|LbeEd z_fEp?_u*TBRWUNM*%3HZyK3?LDbfB?Goo7{b!KMs=4KDU@$Y40<95HS);kt1y%d%7 zO?`F8$Ou3Xv8ZOzyRc1PneCbq#JZhb zKS=fSHk9neBt5fL%C+h_1~TGqjOsbM)KumF0rZGB;@=*)W(SPlY%yrSJKE7hJ&@1yzM% zH8^-nQoOsnAks%O00b$jN8{~1KcGCl+A8a7#>a=aIxAzw#OKP~EHx&|_9#7N>9;Up z+Sp>bb&ll}!iBQVqT?A0S`A4u{MoBOJN(%i6O}7yTiVst=701H#7IOA^dafv{Yi|e zW!~)USH7x)?YG8i4l%QSr>r=yxy&emz4rwj&hV*pq4_?%a;L_<(WkD!(;g0?{`z}y zr2HzGLxX>n@v&AoA*T0IBsus=Vug0aeQJCss!wRF#Q%blC|mqblw@5yCpj=s92tK| zawUEA_4eQ29=eq zZSGg5Zk<>F@i4_@)mNQ8o-q#n?q+1v!T^iX`O54wHxG-isdqd5`To#ruLXAGt2bhyC+1%IKv1;S`Tn;={a>Og5=#{N`Bq#_5 z3d+n(Or2x>YsUcZoAKL)97<^{xrjvlmT+66bxRRtFG{}K!BAHhY8T5H1(E0KQoDrx z>D%7pHHo#hIz@LCXviZ)9_HmG^85D%-5;2;1?vkk?$##KCVh16%@XZtF{2nQD29Q1D#F2f(P2B7;$PcpsObsm<=#W5&HX@X-PS^ zEp2TPcT9tQEM(a(o8yPAXmU`o(OuH*pFml;xdV^Mx91-xw+_C4RI=B`g|1Xr5 z;z|dVk~hi`1t%*+^()x$Ob_0dg`)Jlrmh;a*~oHeSCtnpk*xb^f+~#LxdBoj2M3ag z!qbG?TOWCVMZ?ZpiHF0l^ig<>44J6vZww{pD-+YqZ_Fue45-=~J-NBpGozv{oVi`* z|MnFBjNu@aPv7@tq~p2qFO`G_#gOMxy3yIXTs~h@=2EA#y zm*O|(E*7yeeciE{75YUsL^CrKwEeoGzyI90%{w_1_G0Z>mi+-^#GS&N49tRr5spiH zZ_@|EtyagUw)Z#6571c7$8%29k`08sL0m*$q= zTE+bjI}Mr3vfskQfTEWxo?u^5_F~Z6HUqK=0FdcQ2^bcOhDHL!0R*RDNb=aR+1C%}KK(!ESo!IsW z1bRTdTZF}yGHGGl9xv<@cv@%X7TgIhKDl6OxjI>`>(KH+vTjQaH~GdLq!#b5v{q5u z74TP+N>U){2W-Lk$JX2PZzfMeHfcUFa{|ZB$SdliSETLDw|^rY@0#h}>GrF)AN)+- zeQr3~C+GDoa0|OL7o4DG(^iK5*oSvm5EGuHcPud)zHLq>7fwG zo%A4PWScq=16YhI@4SV^1b_K!Na{>+Sp0UGC8EFc>sjC?98DhhC>Ik8sB=Y?$* z@mr93#2*n48&ZB83iRQS9_hr~6?USi08rQqT7Y_kUG#8!L67%m%pYjrV!P|IO;2_+ zk&8A_VmQkGmX4o@0!uk8tmHK-1V+4${fd{W*MT2P%~b_Zr-Xrswbq~4hR#pMrmQ_F zdgQkJ(@2BSLmCWds;AJ3c0tFtzW|V9SH2zT^1cxaR8S*}zcBD1uf3lJoPz4OsCiAz zio~0zgq^ump~cQYrlFmo^vs8sa?^l6Ozq=w z0G~Dki$lZHH9jhlMj@<|9A^8Juxv$oB?dk4FZK3IMo- zXS2wV*;C;$s zhyeL^&_?#s;|NdyVq#+ct?5zTy}dda85sdIu1|_43RJhOm%i2tuWxRY0?r*78F_Yo z9(Ra1LPmqy`^S%DJiM*^YfQ99~g%L6=qpk86c-~06l(PM>z1~2VN;05ZDeX z8yFZY0-{5pdphi<9O6BKx%m=*HKYXG-6(yI+c=lLaUET{UoYPVaKk{ zPK2*Au|Bt#$AG%n?{Xy^kTjy8q0s`}o~yCu_Z}D@cLJosHf{Q3kK(xX*QbE$12Kca zT-@gpmXuU$)*aR6ds7dc8y^}f1(e1>_90oazHYJG2FUaefdMKiM(J?Mt31+&%C=8; zCV;U|ru6mo0qJ{sTH5ZNKSCu5mmc!V&xj*EJh>dr+oVKDzo!!e@^3r zAq@=;Ah~}!@$DtS_^Vg1utK|HVz5(A|5gC<7v(FbLp}`!2M2R;af#5Ow1H%1 z`Vm-51Z0hX9FL*hZt?5b=qL;caxh=FP`l&bmIrKu{r&wXko6csXv$;VPu4WF5aUv7^vKD+y~G59K$;_-D1fGmeFMGlZ*PNsbS{$01ms^6&? zn80XKp8TaB7|yN3=|YUadl*8J>NT~rEbZ;HQ&Sas0L=|*M2**(O-gTSdOA|p6P@z% z^2gU1p!x#(@Ym!-L~H+{LySiJe!4pqeeU8YCDjQG;mK>>_Ju0*a5Yv=Zti?$kNexl zS0Suui)Vkp+IpOvW(q+6Ia#DZtcoXla(YT72qjBVud1r@z(Ran?{ao__O{E_Xh2)a zv08>gh({${shTYwz{30GiK@qQmdD>TIXOAKeEG7;?cn<4hlKM}B}GM>h57?PvyUMS z2wZjz@Mxo@{u=!2i4I&~%B!oZog5vnfEOKLVPZzyyzF!t7#v(^^Yw19TLPBjBex-D z&C5r>hDFQ!C?C`#)sX}YJHWf<<8wS-?e1U$-ifd(k8Eyk4hTk7Reun3yun8W`4B#~ z+Ac=7&d^g5>fF#K5!^n`#C|B9kHN7s39%^D3Z;{NLWhkv8^#!0lkX Wq7Sd>2HxfJL_tnPwp`jI=>Gy1G7gyl literal 0 HcmV?d00001 diff --git a/vendor/sigs.k8s.io/cluster-api/docs/proposals/images/machine-states-preboot/Figure1.plantuml b/vendor/sigs.k8s.io/cluster-api/docs/proposals/images/machine-states-preboot/Figure1.plantuml new file mode 100644 index 0000000000..2fa8d45fe0 --- /dev/null +++ b/vendor/sigs.k8s.io/cluster-api/docs/proposals/images/machine-states-preboot/Figure1.plantuml @@ -0,0 +1,61 @@ +@startuml +title Figure 1: State diagram with a generic provider + +(*) --> [ Machine Create ] "Pending" + +note right +- Bootstrap provider watches Machines in "pending" state, + generates //BootstrapConfig.Status.BootstrapData// and sets + //BootstrapConfig.Status.Ready// = true. +- Machine controller sets //Machine.Spec.Bootstrap.Data// + from //BootstrapConfig.Status.BootstrapData//. +- Machine controller can now transition to the next state. +end note + +"Pending" --> "Provisioning" + +note right +- Infrastructure provider watches Machines in "provisioning" + state and starts creating infrastructure for those Machines. +- When MachineInfrastructure is ready sets + //MachineInfrastructure.Status.Ready// = true. +- Machine controller sets //Machine.Status.Addresses// + from //MachineInfrastructure.Status.Addresses// and other fields. +end note + +"Provisioning" --> "Provisioned" + +note right +- Machine controller watches Machines in "provisioning" state and + //MachineInfrastructure.Status.Ready// = true. +- Machine controller sets //Machine.Status.Phase// = "provisioned". +end note + +"Provisioned" --> "Running" + +note right +- Machine controller transitions to this state when + //Machine.ProviderID// is set and a Kubernetes Node with + the same //ProviderID// has been found and in Ready state. +end note + + +"Pending" --> "Deleting" +"Provisioning" --> "Deleting" +"Provisioned" --> "Deleting" +"Running" --> "Deleting" + +"Deleting" --> "Deleted" + +"Pending" --> "Failed" +"Provisioning" --> "Failed" +"Provisioned" --> "Failed" +"Running" --> "Failed" +"Deleting" --> "Failed" +"Deleted" --> "Failed" + +"Failed" --> (*) +"Deleted" --> (*) + +hide footbox +@enduml diff --git a/vendor/sigs.k8s.io/cluster-api/docs/proposals/images/machine-states-preboot/Figure1.png b/vendor/sigs.k8s.io/cluster-api/docs/proposals/images/machine-states-preboot/Figure1.png new file mode 100644 index 0000000000000000000000000000000000000000..cfbcfac994cb63c0ff9087fe7f73d3e9fd239dcc GIT binary patch literal 82569 zcmb5V1z43`*DXwlAc%l~lyrlHq;ze%TUt^Yq#Hp%knZjV0cq)n?(P=p?yj@I=Xu}v z`_B2!`F}62OWf{#?|aQP*PLUHG42348Bt_JTtp}+C}eRlhyoN8%oG$9tN;QGctyEH z+XDPWZ72NRPT$Jf+1${`4ocL}($GfN&d`8J&zZ>B&d!>Xfx+5b*V4}3!kk{;$^wm% z<2AU-jfv8GyMJDXf(DmyN~%!P6_HkyV4+iw(}le;1XNm)+D zEnz&bS?#B=#_<*~)4Ui?gI`(tmbCa~oZ2O1H7A>2yihp;RWjDk<~y2I*lt8 zT||Fqhw8l3MuS-G{u|3MBYK?A8h_Ys>2AKat}hj4o6ck=#xkF**=yS` zYUbjfXWq@Q>N=02&HEg2Ot$$@h_ifeU6s8-V z$J9~spB7%zn}5dS77}GZ8K?e*dPHGBY3t=s=+oOU2EBG&u6LI1QbTLfDx2ERuwZ9I z*jqXI5ojB${yHVu9mcT^HxR>&NBohdpjQWxxF!hT`NfDvynTL7e;z<_~AsX5a*jG7|PRXlXYS#*{?$;pRfEWlOaErT_)f8)@*pVYiON$~C_ zD%p(yr8!Rteor{FoG2$>Ceyp4d3Nvi z*@}IApP6RB$Yyd6D$4XyYGY|{f6SdXkr=->YwAU)vb>cfO+v8vCxLL{d zF}RJm$;Z$=>5!!qLM$gElRAULLO&JOOSOs?F5K->k*QMR7G@IhW6=%qE24!q%smv; zXDD%qfRfY9PBQ#^%+8rkOw>|n1q@7RE{8YrO3x9#;bIP<(w1gq=wY$O#5paGL-a2* zOdQCcC_#pa5QaZMw^QMAWprw``5_}^IN3JEO6?i!r_A%fg~8t34_+Viy(nki@7vkC zF>)M~sk!GzeIkJR1T(TH{TUI0HxUAi|DRV-y?Vqt@2M-k=sJkT@8Vm~Xm_B@zf^Y* zwp{-91l3Do7fw>k&vQnn(am{tBpV;xoK9o?f=mjx^GIiBuH}4fPEJlpNC>G&&^w2n z_s|S(!jr~OXuGq}L@=mcP$Y6DP~k6EY6v)X78+)|y2NVs{GOAb@;`A$hK3L&O||D3 z^(WA&S8V)l4|GvYcRpUL&>u|U0YBGAtG(Cf`=SuY&$>ET2owowu`2i+lRvt~$>Pb$ ziO21ga=t?Q%#6#;#UX=s{g6|nCX;}SjEuFlbuccgl$6v(Ql~wPRhran4t_QBx?hDAM$;&7jpZu}3VL^Fo#g!r#$$JL zcULomhl4XQw}30D*ylPemLG`OJ>8iu4<{8(NKBlao-WdENcc_2>-NrkicwNhlE!kO z-r4i!@T(^4Gw6JQpr9Zs^jD*`_GUfh(ur*5k-s;a#?+z|8h-c4V&*4giiT2jb_H|T zu2KC=FU{E+naaZD(!!&x>=tO4gruyjuF5x6m`#S%XV1v{4G$0ZeC2UwwVZorlW#g+ z;Njsx6D^^iUZUNgUS*DBud~_{osWU&cG!+pqSKV)X=rE|e&5m2;kY-479*T3*G`Qd zBehc1p~$~~_OLSnPtaBYfQ76u5f1v@N(+;)6e6~tDowU|&Q_$oEA+OYuXedGHy5UJ|Fjdpd5t2o1O^vvU zW=-hL?XBig zf(!2FTBi!N_S+pq`a@{~6*V@i61r-5Z02cQYUM`xbw!65RASfsP-tDh_#u0U*vdNT(k7XoLM~i`lcA%i||3EH%0PXS=hsKhrf8!&%t!*^-NQ zvd%d2&J%_rF=*zzdS3hcfD_RJ1coFuga1E>lcWvKM4xc=h@I z;^NOwnKd;W`_rkOck#+9?_Vj$kT7V~X6=C_Nh8{Yg@&ZPd`2Rjz>VBkb&>Gn7;k80PGmGnsBx)I$zI3`+JM#d8#6PrNhGIwFRV~Q4vC4mf(576wD_*A+&*hM z<&OO1`J!jZD67Ejl-W#0zF>NhdSylz$m7DrgEZT3^M4Ul`%Vc=i6$m6T`DQX8XLnJ!u_~}gjdx5j18bZfkL%CQIz?N z-jLPf#>uyj`fdO4<_wLU#zyWHy^E6EEvJUT z*Q7(U&cR})P^CznWkgQu_Ud%ANj8;t92PQ$ulVk1lv#lpyvg55pEn7yP{>yxM?_cO zw3C)ANr;b+7fe6V`G&!>YQQb};z$1=-+dC>`e5oH+cbU}C|FyL&D4+So)`5Q!I%8& zle;Q>E!|2S<|$u=eZrSmzXMNLR7ag$zerD4)(2-f!r8;289r3Da6A_!m8MkwYLijF zk*w@&92}fr?6=-}ekg>NdU{7Cp0|O&XDiKe!!20}FncVGzvp+nc**)F`8+u4Sy9n; z$)Ax_h=)U-CrKipA)If%6fjUbl|77&vWE-zu0VO{C1n12i-9>mj2-dt@Mh}mw_nXl z$;eC?^?eu&P*hO(mVw9_Fpq~d@MU^(#JILk|NHmvR7N1!D1^LOPWHTVi^8FZTUW*+ z)3g>>GG#cdDpf!D4Zz{u(Z;w%Mn=9JOl|V}Q1W(U5tJ%wgS>@bE)PZZUnz2HBNyuU z=~Mc_)#Hf~O=f?(rmeAK1plF_ggPs2_A%>isymay|k=3kY^u30LqLjJ$PSgFZ)hopY z7yEDNW+bkK;Z0#BKLQCb4Xm~f9f?^RzZdL8v$<1F2jS9cyeE2zgVVh>2f;kwo6n4^ zbKF;$YH1O?#cAqzI@yXG{O|I)75l~ICRWEodBCWa@if2*J`cYZH@?;U7QU+Sp!W_vkIvC){fd$spK)dlKmtFhL^7v6jZ&Um zzC!j|f8t==+vT|$TVZ$-u-ysZe}Wy9USmoKk4DR!y!0?Kq7YVazx>9wqo{=;RNf$; zDKe}ntrT`ef2DlT5sb&n%d1?Vl>WUV1i$b`fH_#(WQVJJZ6wYDthx!4@a4-E{V7|V zI|x6$Q1{cxajMg;06J1{1PmMmqMzy#;xd zd(zn0Xvbn|F0L1D*c(%-(4~y%c6Yfhh={hpg*Hze-lw)qo#yz^{_>(W3QHRGml(C_MbCSlYxVG>NgCo4`~(al7yeUNrL{u-L&j=4V6MXUkWP1Pq z`X>~LF-+?iSZ!--D=Z95QgSjK9i4@_xq5{$8aDP%&-=UjdTvlj!1YnRzDf8HA>*+{ zY4}XEx04eSLl-@~4my^ging}4oSYm79R@abTttM;QVU$PL^Sx5m>4+m1>6+CK3-Ym zzKo#2kM~e3cz6KHbpqfO5D<_gJQVbRq4}ST zs4?g?R+N|1=4QTr{Te|hBO~Ko;IEv=g`;}eP52?>^=xK?MB1ILwtdxnP4dsj+^^5Z zQ6ROB`;DNH3=Iu!Y;0s@kwr`R!n6GS7HJpmSY6YpA4?NO8rA00F9`?;u&_EUzoqdI zKL#^PQg(w@Lt}PmNX|nMpc8UtX7t@>e->W-gk))H=?Tefl|^7s5CFhR`AWvJ58FTl zvq{uI*w11*MnyuR2T*`UwdH)Ne&<8?h>7t#^LY$P*o&?4!dMTYkxWsPKf8$Pg-F)m z*;r@2B>1lvvOW%`Y#!Qmbazv~dGj3P$YapGB|;NPb{r72Reb*hnAmuwD||+=3!c#J zFRCYei~qb##lWzCd%ifCA}9uM`9&JepWQ?hrHM8j&JYGk9V>*&#Z?CaRO(6`BzO?u z_-6%}A!TJ{^qN0W2?_ZOd!j&PnVy=$#=}$1lWU%ss4|-@W(JMKDz}W1DM;|g2L3m2p7=h@K_Jojdj$pPqUlK#L~uSAd%o zzi$af{d=)R2#_I!KVSLxpvM2?G1dz`l#>;|?81#~-bRa(jpu$I@Tii;uSW{drTov)se~z9qj4fN} z;oi_TcD=*|1qI8E`j3u|HW++`*-`Fp_dV~AdKmy_H6I-uv|8(n2O}ox+7Iic7J#<) zr%E$HncpJHxLYI^@-v?zjlR#Q{saXAqinf>c8ev)Uw;LBdANa%0= ziJFTmk)WsR>DKE0{$ib@)l!^07#qNWOh`yj2r4ao8(2|Mfk6y&dk!!HhSTv{aCmq) zr+?o45tm}MbWhNN|5-zkcb8SutqPdRj@Ji4#6UR z0gOHp6GxRtDTMdY{TTC<>rRs zvPz6-bc1WMv9kkM;1~1v-Q`^f7=YaX2)NR8{OV*>*4aQrL{3{6p|In=TE=j z(WHi(9+VfvtunI{r!e14H&dC#lQ~Kz}MQ9)O2*n|GLzfih$3X z)#YI{5q!@(8`$@tz(7MjFkIaIGrLqIEhk<9MqVG%6-jPUonTtZ$$ zp|x6x3+-hxNs>jVQa;=e%{Gz3UK5Q@qh^1C@>68f0tM+ewUVBjKc6(Ms$)pDD)>n! z&eVI{IzIPJPA)p7&4XAg$aAS3+}NKg_C{G1Ij$7TJbbItgn$nmpjB?K_ak#;_lL3r zc|i3O#Hlj-h=qB|8#-NS2GtG)C?Pm#G;-#xxC?!ZhuezQLe)L{)m$MFE%pB5@>ngB zvbwswP@&ui6LBa<(2c|Eec8{S&GhuF{e4oe@%x*ui(?G{v-*C?#7d{g?eZp0GPdId z_E1|-PhU$$Myk#-mt)R>fn==9Se|_R9jmV3EEa>2xC(?RI1j;oeSbdDR9|uP?lzyB z_FK9emEW~^4IEr$dXh z>mPqjmuKR!K%6TW#ikvMC|% zilz8FM6EovkYT4oVXQH2gp&#g36YVIfC&VdSLP~8MoRjQtnGB(ak~r~SEjx`NvR;f zfPmly(knap5G*Z1v+)94v?bBlvF%C9Alz{YCS;4N3znL&@2T3u4*S0WyMw zccr@2>b0R|Ws7y9ho-^6mzFo`$0wNCCYWgSz-Bgpp@b1JZ0>a2CKVmM|4 zI~jELHa9n&cGXekdw=Nm=SsVZmdRPNyeZP!mWJ-hRc9R)4b4%!C@iTrsZcC#WQ=SN zT(oy|{OMxPjoEW!VB+SEmz@hg{G-%391k9W1BKjGbS%YAjK%!@ZC5zdjFcF*pzgP& zfiCC5NY@tPs-aue0W};5Stz;hUd$W8y&uCHv-Qr&uBQdEohWLd1g90yX#Wlr>N8|y zxW_16hajN$dkrLCoAqdyEG&*XCKL{vYjbGZaTIDyx}9$Av0F;X{e%`u+}uPb!*{)~ zPH1pvO$mw>+dF(ItS0^#gd1NZFP1O4`P_*^TpVgYw0>5%aN$&7g<6fYUHxt^edD&R z!YC~gygehmxHmJRhOw8+Ac&Gz;e?8hZ^)b1^$`|&gZ`%@F1Y~m1urJWs|o2rD8Pyl&|#yTReS}qGb`g^~KZc$OFT;%vr8P zM5*y3-$-#7A8$YsN2PmVrHT`*pdoBl3$I_jdWDW284;18kY83_{`Lf9hyn~rhDM1^ zY{6_brRRw};Co+B=fuZ%mw#kdi-W1if6dUtM{Y>gfA+nIc38?~u@Pm3A9BID5`a#c zXXj14R#f`QIYp`rBClej{VXudHiaUM@8*|~*N|K)u~0%lJs3TgO;%9#lEUSky9H}_ zJh+qnzrxU|PwmZbFXk)aBPu>`r&ZO73mC$IYf(ItD{~yPO~YZ6uB^7&7xlbkez!U> z(#=yt7=aOUkH|s$#eYR0|1wf;DFiX6e=}zlnnJa|8j3`t%6uBIg(3J{9$-*H z!lE}YG}PSyRork7KOAHmQm!HAJQ`X9O0%b_0BLcu7G^g~#t3zxbmG-q9Xl(#W7PpB zGGY?n{rMC5y%2u`TzNE`fzGz)cJ@WB3qJ4cO|y)3Bs!RLshL8p=B8+Pc*Jt6@8{<6 zTUy)}opWFLZtfRr!E$rIjw)m@Wx+Kev*Hl5>~W@==8E8WqxKt6Ep3Ea&t@Sb8p(5` z@5r#`Ja73SNAE|<>R1WDu^wFx@n_3TjDe4{NyIwMj0e0{IXnwBH8p^b*e+nMnT83k zbIk2aU5ppetGXN(O}^O1Uc_izf`x3FZzoY6&Lmx(#roRZy?n(G@K~WX)RRkJa%n~q^K-ZW4?!{z(8FvhzZhZ zoEse}BWY6d^}8Ax>*?!jYtJ@vyFmq;x~*&OP__%jcN6L$WPs08E z5tc^dD=JtX190+1EdSCHzA+VG#qSQMcP8`T-aP3i2Tv8ImJq zk{XVdl)~FQI{ImA%&|wX{83nW7%jfw^psZEWXCqRSK_kLgyM{Ha=AWzgho(k*w3HB zf=d7Z#s4HHS*wXRU4vazQE|DgP1hJXZfOznD4FIMCSd%V!BVY2Ltn~;%t zwZb2a8x0GSpwq}LbKUbwB{6EFcx51R9rRN!hwrzmvD&!F4BuMw#RltIgmE5L@^X8+ zGKgw6B}w?5zrhY~eP{9ccBd>f$x{Rp9rslphwqXLo|C*h?k-B1A}ShHC*!1rm1Yf{ zp;n3qES%Fvt0^oP?QPHddJ;sysL0oid575UEMDjI>5W-wUtc!|2eA)P;H#Scl-&aN z6yI?Q@><%O;}Y%uMpwt4g@!v&;$9Fg9MKWVJitwb-LCP@|>NGPqAkq{Y0$kh+gdWL$NB=ZFOzIrtzmEiS*1yItR^K zWV|3JnQJyTJNW(@G4GY^s!hpzziLZ36x^|~jvR$-2@0XFY`p_1=_!5^xl5h>{V7RV zae(@wQ~Rym)JSf@W@#QB#^}SvVI;NfxR5V=xH~)fTPKXm&FTI21nY_i3FzmB01Iiz z%8DNK3>5cF>YOV%ri_@+U8p9?DZb;jfF@FZl{ugeSxBCv^_ zW5iP;wmufKeWZFwpv|1b^in&^oziXLH-gX^Vb-LR2$3>_#fDKYP{TDWG$&UFFh{O0 zSTg#K5%n=`f@zx~hpBp1P&{rqJ(2lv<4kb-W1^6dPl@3b6hw>+r8AepihtciMnxR< zCrlss`ghklI+CblmU>0dX-@gxN=AV&ZF`t*8P{5m8Z9;=n+)~$B}pl3R~B&HJMfZc zzLZVwVF;)By}#f0JKy4b=P>r#GAAoCD!R}tIg(OT4lSw!)E~zb6uu}wx)PyGr$jbZ z*;E4IT|(>)5>5S>*^^Ct};?F=?h#vU6yuwcVTX zTfoqpW4dT()BTe=NiJfYT6v|5c4)vtoO88yThqbd7&L;KEEi`esQ-*% za_+{HChmOk)N8%(Q}(B*oP8G96s~IY>%VGTzKBi8G)U*!HKM+C6e;78lvY)!chQjF zf3FsA@(=`!7amK`$emLrO78h+c_qC4SJZN>z0KyZ9#m(!)56nZHvAp66kNZVC0^wuvwOik<>BMSrAg%ou>)13_r$jhv8JVGKh=G0K)Y~7 z1l@(r4TtxF{g?wfzqQgieVB9gc7otm)p^s;yjGA+n*u2y) zvt93?syC(K>|K!&AC50Pd=B@}TzH~c?4V)8e^wFR5qSJy@$#QDU9P2fEH=D?DHbgL zwCb1;JZun)!W@XIl-Lgq!KchExMw8m$X|ucvoL{pQB8BEjF+`nRPn zj}2IuEm&!rU3nl-sNi&_|y8 zMK5`jUjxdgHK{vnp?f_(w=}P?hM071wVIiLi1}K~oxMH&?yU({%HrSMWV4DJ;sJXS zYi_`wP@uw^t>FD-7VX5iPrCQj#F|e8m+WQ93RU$3ID1AC@iBgmnwry=_3@C1;fx0c zxrQp1EO5Ei5;>%`hvDZWdnO+^Lmv~%YA-l{ct@lcR~Q_troOH!gsF+;zN3VnJ%{*qoH}+UbEBF zUmzeIyPp`lXwH2Q6+IUE>X*SKVXO)Rz>~iKslRFO-7WIE!aAz&aM4jLyBX z$kSY92K1F5b^v&HY17R{7j>q3JVHz6Jbr%298E|u5!k>DO^=zazrf?Rh)q(W&Y)ffhy zkecS0_q1uZSKE0LMP}GDwI$%opP;_Q$bLc}!1&4iX14LAH1Q5jzl`m9*8r)4g2Kkh zXrizkT=>g|DbDK%=-}UP-@=xc9ZfRMRuwIF64qgcA4fPG{(e892hhK*jUa-+D>@r} z!saoi!4^~WrIF)U`e2KRJ`wj%T0A@3C9X%L0M0z%>a_de$m31Bos`7uPvW_#MzN~J zX zheFyi`jXJ|1hh22@tMU%XVBgZKtn+H=(uh;$2DJ=Q{_WMqa&2{0tIV#x3E;!kJgP{ z*8Qyufb}r`7`X!j>gA)~dkaN7CE)$*JrZ79?p*2LcLV9nrQE-mNFf7D}2Z-b! zrl${|&w!q}TCkU40D)>5G#aI#RVw2luZyTBx3soq%!Sgp3pNc;;!YCTgF~uya-CE> z`%2~G@R|GktxMd?L8b4qLgG!A7|zFjR1RZ-tPmfcKPruu%%25e zsPedTOPiC+`jutv?$AiX0|%hvPt`a72WX<-jyT9&q-u}#FK8m$UF>$rm~lr>N5*EX(B<*XcUsdNifdSF)=Z{eH$(#oNW1R;|U45OQ}6>kcmM=Qfw~=HBI=n z5H!KytPj)Sp-CIM=Y?({hpi?n8})AQgxt7|vZH*Na$zkzd^NhqXPvQvmNN|d>>(a+ zKac=&A7-mO+=`Us(Z#RO3HB(>Yd?Qpt2fNZyxktl7YXhblU(XY2@W-a^mYbDfa-L> z{sAHP!gKYXJUHA~SQ@qawZMa+{icl{t23jEjntG`ndYQC8t7U^D8>qkisfZx-w+@Y zCwkikmX>Fr{x@0^hq?WyvR7N1qv`C3?EH#`~%MAcL(>Rf6s* zA1Yk{GKh|0ypZf)i{3r6nvp-P)mGA3w z6OqH5oDRUVgWf;TpIr1SD-Y609falg0Q37y#*#Qs@l6TuY@X>9-Sx%aEQQ?(TXJzG zj1CPf{OBHadDv8qYVY}Zd@8m1+BFvJV}poY>JuOMxMVJNzk)$4j~^L8J^=JRmKlk! z@*3Y&9{RYyDUZGgyzb3#CbZJGAGb(HnPy8zha^GoTpjE>Ln_rGi#68iZpMRF^iKY8 zB5&vA!FeHga%L*z`(S$BUYJY_m#eDYTidK8y^p=) z>gU8ao@X|R+h2`twWJIh?50s(gk=qDpUBL-t#c^Sh=C6P0)hRO()zFFW^4wK^#Gq! z`iJp2+W<{M;?2!ke@P>es7PRzvM&o;>eh3jX?L)16C7ram1AtLkJ-({0t4_Vu-t zq<5dvqN9cT6DCqUd6jQN{O3X#bW#uyk8%}^I}LktXwaX-hWiWaAP#l}b}BNR6V_c! zJ9geu?ps|f6c=c{aM*D-)w?%aL}~KW)7vW#Q*g7O>1dyL#tUdjLY@QyEcUpZwXZLb z>B>plTEC7yeQwNTrTy`F%wq$l*}xB{*Sf1EEz@g%&QS7VvSi=&;vn|(=X0YzgkPJt zH?{LWOH+qdB4xduu7m?S9QPOAl9Hb3+;a{U&2mK$k<=`E!+ABhI@Z|JhV;$WMY0jS zXF&uC8eDHamAP9%p|s2>bS6CyUAc@eZvPk~Y?ibSeu>uFo!0 zpHY{$3W@oft@dC7E`jn~(EuAb4#0$Kd>Y~p;?XvF*DL&F43w*9UPD4D zgnqI6VS^;AoNX-U^#_B^)0>K)e7&1LJk~B;f_{Wx{^qGE{~#q`gubGUPq6}pb!{c- zgD#?&_v7|R?BtyoQxK8a9aJDp3WT4!0uqP1Bm2Dj6Ku(i&R@6a=sLV9hF!4{BUb}y zb7s-mv*0k^ug21s@_ zLvcTEJrvgIY~W+9^=0@ZZWl=quz#(6r6*=I_jBFRC<+0UdUM$k=Sg@hgAl-11trFj zk&%Qz!jbXNcf zQg3hY(Ru_vqE1%>#arJ_5Lmq_fb(Hzw#jw18ZDA}A|BKQC#wuVdeSu)?ae4GJgUX*3=^on*pq2;VZ?qX3-4QcJAq`C{ zSeeGVJ5_oXtK(wwU2OOg_K^XB3um|T&?+~oDlb=T|CU3i!8TX^F`7yCfUs#SPxrJP zYqSDBJSho%xdZtF7cevVAYw{k)HhUGncYqQJh?Xz0JaSpns2{bTUvmN{K2nNlv*;) zW;h&rODW=TvZ&ljI#O0%466;MLV0;l&j{VgVrUFlRO(MeN8_psW1m=t5J?*vw&hnY z4lTg?0N?|(E{6Orz4q(byzucDI(KGf%Jmf=RPQ(HD-@T83*LCFy;G(1H=I0ctm~C}P2MdJi9K4TvqRS{iaU8$ zQ?G>vyrK7%pO#&BRDVjw{G2SY62D~m!#(a_kIKSw+2v_vOZe{1Cr_y!cgLrtv8AMh zMAO8}eL;;qPFjnp31cE+&m%w6~Qdz8k}E9#z;L_qOOk zbK#h7bf;^)NfE5t8VF>X`?18M`y9Um$WX_Vzp(a1Ym|#jC35-IXBG|fI~Bhi$mrrM z)M9cQ%PHa|N{qq$n=Ykvv9>MG-I5E^#G)Ege;etQDuhd!GO~$1g<``aLnwqam#q8`@ zmc$GU44jKA0k1Jq{*+ELd$ie1)Qe|!;2@9pQ1K=^+ zC%xI>OpWr3KbmM7?b_|NVy=|QpzZOg1!PhbSI#7)_fq6tm?XuJk*{O-%V*O6R(H+4 zFfF*35?kOlFm2G`R$jj{q^eM_ED(WQpv`GE@#>D{so)+D|19ZBN-i2q#v9F*{bJSB zPl?$RnHJ1#GBdUYz=xa^phE+QnV2aRt%}vhlNmOvfp*DK&Fiougew8GA-#Y^*78sG zMYcyP2okSxw}cKlC3T-s5ygK1G9mD0#{;aco`M_ZZlY~4n(LS*z|Q6n#>p;MW!II= z$;w1)hC4pCxTwaVA-+DI=*weU@9xS3tq92Y@0vD=Jrd&G41w0cpA&aUuw*S-%Jf9$ z%I)&uk$0oams;p|bTU|!I4hZT(XS87HL<$J)s@g*?9CxW@69I`gq3#@))+jt=iCf) z7czJ$Hn)e87}Mb1kIVXC)I7Lxju7kt6FQga8EQ&!X=&-lfu!yp;0)*kraCNoO(GI; zadBcQabU7rf62q|bLVj^J?L?ZTgG*1jveX7!au2*uw8Eb<>~l1Ltq7##z}P0)29j4 z1t=>K&nncGw949CPTyQk3EgMsr^KWjNH4*hAl3xH*t`g2!cYgOr%=@q7-&523d2G> z+=aFDb>+iFxJVL#W-6a%Hd#Acb?SOz%t`EhMaiG{$yl(@?;}mrB1BijRX*@gc4o1L zxqYIFDez-WH+kAz-mnuAD(lOqtKxLN?>>gm27hNsceJaB15|6>C8Uz^?c3kyun$GK zwOp_&VjwV~y(_Q&;05|+_lP20p0cxAXQ-zbeYuj`X3Hh zoCeduOS6dlS`}No-_p=9C#<}7FrxcmHi39%Kk;~FxO}-k*%T9!?U+x(Rru5Q9J|n( zZEbDM=;KFP>i{x;syDtpBqrEt9Oq}dw;K$2FP-#i-#BcqbxjBnLiIG1zFTu`$^A1o$U8`}bMbGC%1I z+Y+Lnz@J4>+mkFIBz6*Vzl*C4%~oq_bObznp)#|f;jm8%as@u8e{TEXp%4&ieG=cG zr{KYnp}z(bj=K9~Z?CxSPIvb+;F3*KF2pq#)+z8Eiod<)+ZZow{5xKwqv3pTq*3Y8 zRgEmWwDpAiMLj(cfml^!<@j7zj$2A!<-XjNVLC%hjF0CX?>`zsb<>9xR_2kkJpEW@ zBpsSAlt{hueXrbr>4Ab6)^S8v*qBnbxX-csjG*Dd?lz#XJ1AmLKfE6zwvf6#JX#Ut z*%8$_DrGmC!+X9b-QS<=et)M#AWNFDf{zCD;>3pljX9wRRpi%0HJmv@R$fnBL{RHK z=uV(uM;*(1wnN-?(%sMExm>3s{WLN=UMCSzA2@q8P1%95Ir*ZheYF5d z*F_HN&kq$2v-#l~<(%VmpjVt4j_Bp0VOPiu4{3|X*tIzs{V*P6<+sTccpTxcWKu)v0wgpd3jGoi$a6gL|4H_-Q(Z43%2WsT-)n0*n8rWF#E9k;$7kG!&F5o(CUhuypLqCI>8Ox+C*k^BF__! zPkKHz&h5*ce0@DVo}T3v8e9$xa`e26L%zc=b=-*998hZaR-yI)KpZLh#Xrv=rj-!W&CKVJ78;c31DH8P8cgT>c3g6k^9W| zKAcv}8QLYd#>DPDG(=FyJ&zoisx&oCJ33M12SitLR~iKi0a4(}NraUe!BnQkjMtMc zPf82(9f-@vCpsRR%GH*4R^i511C5fzJ;7E5@MDs>ChW9b)6)q?XEinD&bVh)ZSl;P z^@hTgm&c;~EG$Q{+*HWOrDGRk?Vu+w^9?I>smwd;s$rnMIr=aGc(1IiCGNq8$zN>@ z13)B<7;>CP5*wPl&_u8$Y^~p-n{bO&j|gc0DiPZ-zUqNA3XN_YFbp(r-~Ggg4(kBv zNA()^p)V*ACBL|<&*DpSb|CeL^>u~75;|~^adW#Jul4_@y&60cz!>KhC&3!-r!j`B zqw=6tNTdKs2L0UfgHM3jGsEzt*`}MZHp=>*IyAG%@>ycwdIlElYOBz1-y|d?Ktp-r z|CdCAFy!KNe-XR#DYyU>3Hfo+cp?s;JwbEIaMbs6VS@WAE1gV_B_k~oUaf8_a3e|X zOwrZ=qEfK3veMEb5y8-&0$y13u9(FX8Q@=V7jeMCGfJ;>+3m@R-*cVhQds4wf6=t* ze!4L=%|ZATug*`yA@IVSH8%`+$(L+#I5G1b9;N;VHrVglm;yLjfhnGq^)z>Nao2Q5 z68^-}Y;fgs_vUs}tvk`lUiv{s71yDzdQN?%)pn)T#LWnUwP^dmCEvv9YxGjxz%aq8 z8*0SP_ICN;JZ&Ye&j=4L0`Mflm(j%75_*p+>JDe3Qs9BElE{`27Y7FWXNlE2@aN%Z zYkQZZ4Fc*um8y`Y05sYO;TzQ?!3!ar0;F-V+*#sU?=CRqBjc$sa7oVMdH**%!sCEQ z-F(G!^NW{qG1G$>NW(YlgR;iJ=QHe;2uM79d})2>yI|jFGa)QG6paJni%wdSSMF-4 zy?o*rFP(y3lBR*1ux5M!bhq`p`A$?x6@USJ-B`V0qOy{zLt z#99ajCd3pG)!C+dSOx}4)3@Le0PldPsBjkplIpW>Y!0hEKi}%B1C=QZ_FwbUyIt_W zkcC1<9S}-VG}Kjg{vMd@8mV4TJ~%TJ6i!K&wQc^&5R)vSr4_vIa?MQnTrLJuR8v#a zz<{i7nqU8B;^CH8pUmFvCtA}E?%Kv|Brso!5JaS|&|ONUY)e1vjfN}LF&KdKJ8OR& z1Y(uTq!Nt(8Fx;*On24yfzXD*9V}DjRzry_{D(o0RiFRAG3bly6Auh}Xf%uKqa%nArOIV}4?CeKf$=0s=HH4jUc4RV8O$ zo^e<_ zYu&MnQ2%9>B*#=sKUxpm?LXC+6Lmars=)jL5;50=v!8)M6Cvu2*~P&tJhpnupu=Oc z8z7*0YJG0G1;AP!5E|@I7OS)ic{?KbH zrpc3?l>41R%6NNoQ%qYP&*Ro!)_$EDnHB$@B=g^L^) z*O;8*tP3#G1gegZhCu^oZq;eL^_C!jXyG-L@aVB9(ZI-l46^165&ha~0 zD$1ixJn7>$$c8DV8-b*ocuD!%f0Xy*Q7|_dU$@3lJojXxAfk<^PYW+^@I}Px@w($$ zVKEqW-n<|3rF7kIcxq?pd1TPRcXwzBX1Loc)~s}2i*FOSt0I{@HMW<5-3=Z*>wp_& zVH{<-I$Yk|Sze{nn90csdNAI!5^FH45T|T>HEjRx>X@4?$zyDuW`Km{pnLx^JALF? zgo2`I`RUO0nT6OFOQSUdTo09^t$|qeYy+j=`HGk1A-Ll`i;M9k*kwSMm{Xl4PyRm$ z6TCo}n97u|mrhIogSpGW2lWqWNR^B_PNDykcSD`6q8l;#kJMe#Xi2g(+~BJCU~w^c zjHcZdFdnsOE1plCHsZMOPr3`!j^9~1SRGE7vDxV75``9n$;DtdXcnWWRDE!1msmhahB6*RmMVT8PjOv=1N2}ATGicuHP7`DxcfhPOkFwQagBp1Y#n0 z#@l{rZEx@N=`x?){~$b|udmPNKJ5!k1@)zRz!60`aBl+=!0muy4j^p|E@a7;XRD3w zgeP0dI>dK?a|OaABJ*Ntrf05+mjLRQk>=zPH=j-#AZQmWgB92m*Gqs2(|fVu1c>dW8?IWz373#5589{kZw zmJ=Tw*_8!99xXOE-_`Ji8T{Y9i%aF@k+=qagGb|^M5wmG9M_0r$DJSSUK<5ols=~y z!X}VMTz6bP>ZnA^pd_) zfTR$8DdVNoy@VuQ>VLR0s3V50##*55kG}|M^t8#}PnsOg7Wb{68XxUHTt4TM8;}T=&p8vt1^<@l&zqLd= zYoBn-G?>N_Y(bM`N%mGLWG}37(Z4qnLHWmV0TI1++*QY#mq)C9`4oIMitIN0e659Y z+cBXAXDv7k#v5T%X{fAq{>!5JI24J?dx($D{atV%(!T|bso~+9q4e~Pke4&jk7X9J zC2(|!z3%1bMw<1DND#qhjDd{f0~&wZ2aAdCA@?`AuB906$7>LF_Ui&=>Ho#nTLx6s zbzQ?Whzf^B6gZ@GBMk>Qq$pj|B_$yt-F1-e7Ni>lk?sZ+=@O7m3F&?pUe|rU@AG^g zzZLdgd#yR=7<0_AW^0FT6|gR{SKiUxwHN?c_DFOTbgf@pz+&V_R1VPBMTl_Q(818a zP|h9m8c>H)JqiiobRp4chg zVym^%vqX=n?V%37M@@GkfrM;`XdrNbEDKj|Xu)wImXqJvn6|tqs;wuH-ru2wyEnP}FYz1oTko2- zH}KtQa({>i9$g%aqSbzSD#ONxO+ShX_r4mgbu{;&82x1Od*5En`&)yP<&H1%i_+4s z;6z@W^R8uvxc~1FmLVH3!XhK%2+GrekP0WJCw+7TP42va+-(#Te08U_Hii!ZoL$(+ zq`d@iJiVf=oq5vjA}@Aj#k&C_THUgUr#hcHM(0~zfgKj__zzE6IP6oumSD}Gydak! zQz~_R0nl-#&&&9CmOru@7!+6ON5&Wbnev$~n3^h>aO0Lw%#*2Qx@1pEa7Z{7m3N+T z0h2LzMVfB+>#MV8TQ@7$dk$jRCu)l9(=uEApNl{{6skfNfAf2n;qhLq2Td*mEKnpz z0K`A#)@X^_i4+R(RdRwKmQV`dw~5-zfe{nUl0;UU<>e4zFTun1084_WU&uuoc*6{u z&h%VdLVz-Tp;gRzApS&{&tO4%b5q<&S?6Lxk_F7N+Gt>fQf@0c9 zWJ5_T-N=vcVp4mHiSd{$IWp%Z>z>C6r-Aouc;_h~6SA5i|4#9A-X3ogNJm)Zd4`1AZOxQn-wc}N>6$f1 zsbrjd>%X@-Q)8=@%pSDK9yBdDgk8%PKdWj&c`wdVmHcj#n1%&U<+dN1=%L6TMMP7h z;GNVl2^cE-&rWri;r%}xs7l*x<9zMHNo*q_u*HdZ-NT~5ZP|ZrAuY{u-`sqtP^};C z^(#i?{L1R5xWHteqHY{@!uAYod$#nSq`(9|}Yn- zZ$Qq0=GI4!?m~8!(x31K!Sq$oecA zCt1(?`yzB%|9^&C#;VnAsjr{V$5UN=sxHYkbJ@7tYu1i3@cIEk^1Oy!55V?$Tv`da zoZQJt3c-7gj7p-?%?R^~($^Kvov)7pu=%UgiY(4%)d)YN0p05TNFQhwHWR=S3oK8mTJ7qtB2Z=->pS6}f!u1{qK4>wGItjmsX9 zJ_u$nkubXtpJG?edDFDlMW3|f?S3EH(dE6L{^i}%yL5Y}%0gg>G$y_U%FTe^GK5Qe zq67S3OSfkF3a#Mf%9NnoO|63QQ)Z~c(`N!BBio=<0DmUo!rMI!`X}?&n^0_^=kg-B z`M>;@vEJ1i0_8h7CfA6f-6ME1C{&E~P;lJPL|!%n7mI(|{(mN)Q^^Ia@7eLgE{E}; zD$zPLlYBdKCX<&decdy{MaXvpxPUB76%Xu@NX?-HI!z!NN!cn`LczoFAraN4wA^&I z-4$F-#QgVPT8XcKvYCgs_46C+M>_>e=y>@_E)?lc>iGwH((G5HdPD@oj5tV3X~{qi z$`c771w}%CamCETfa`(6ymOgJge&``3rmIq<2D(>3ng_T@9pFYFAGuL1 zHObcCKkWPffduKFq;#_bV%M-QdpglWUMTSQFI|k|rZHCohpf4PPDNGKQXY?pNXq*+x{EwQktv{?}mw30FjAM0<*ur0;FqPCx^2jotSvums_?H^76l8 z=O#o>cqyGsFaE@D1c1jE1H=0XXv4=$yu1kv|Jrv*DyCq>>k1xsqg~v^V^F4lUo7Idd)1L=_{q{(o1pR;nMKXFpJERcj(DSQ2GaXzP-2B~0 z7xL>_f4e<$(T5Dc9pX+-X~B?fv6DHJO-wm89H(aB48V_TeRctW8T}}6i;@ZqEs){ZArjQA z%^U23sL%?G*9r|VOa(|4@ z2^E%G8J~8XTz3NS6MzgKuwNxkSz7@nHRHAI-4}Wzwgk=rD6MnL=H$-)tau9UW`Sn~ zGvFU%ke*K?!{<{$<+~47d2bQ(p3+N9b}>-E`4LNIziBEm0uPR#qx7{uCXC&)O7wbm zF0c>I4IuhkW^{ij7+l5DTeS>+|C9>}PZENR$%!3v08Y+5I5-qSyJCFpHY~^kAie6T z&zN%wUWtuC3TS;%k+t_d_o zf3yJ}Fz=D!;Jsjz)>kSdP?ZaiEGQ(2xuhw=Uw!|k*ITz@e`;tPua$(flmPbZSb?eX zv-!_*#LL3b;tw7vsm4R#YXjG)*N)n>3ady2 zA$L&@z?{pf_to9sexQa6Y?vBB|A0OEC^h<18YguQcNp?Rz7q_EK)I*N!nFV^Y@J7w zAKzazvorz6g!5_GFrZ;KRt|M4LB)7%q2RpCQ5>hYP?;3R{OH?t;J*CHif2Q#!P_t# zCIV>eDN6{Or@48Vcn&H&%Xk=sAGiswhnO@E{8NZhkTKX?Y=UPDEKkGwY=6t;_B0Ws z>DEnG@ul#PErtn>%2JmKc5?;JsgLWy%K=3h5nyNHlYj}&;K2=rk7Pz=5Y)@Lth7FD06*8L$V#xmGI>1)aTs-GvgE0(l4 zm_YP#hu8kZ<8m@8GusB4dL-Lq6(Gg4P#PGTKU7xi6K2?sdU3d32x(nLRG}v~*e|0UYQ9w&$B@cqmgqOuy+1lSBp9P5uu8)hK8) zTWg-iG%I}DE$V7OgHO9w8>Eo`!J>Xwe>P1o@kst2jIka1XKj^vYX-Ceh6nz>awmxg zk`o`*DoCU2Yvs?$gvI8ad5{Oni+3?!J-7o$DVEF?Wf|h_(;?{lg1XtcitOH zp~Re+F}-CXp>QhCPfTA@>0<_pvdjn-&-|Tv9^QwKOzeg`-$UX6GZG*jlz`oBzqfBm zwug8L(y`REYn-z0Gsf5(bRNcMkdG5jUv?E-{B_|llZR7aTl|5Z3r`$*KZgDR?TgP| z3!u?rp?mjdAwL3h`avD_F15b90ZmD(#_SfT>@6}oFk{62%y%_L_S`T2`(m+#ncRzC23EvY4NZWND`}OtPV7swYiw!ZFLa5LLR`3EPAO7{;u^a=ZnES8lGM~oF z9IT+s?5PMydNi9Lftao9tOVV=JN9FduDn3x4`TG@NApQ8aOMQLZOlra!vIC}_O>4E z>wH;Xga-AyoZ!Y5BlbwzzYnekYi>IH1>#;D4GH+YbL%T<_;o9%`vG|lA{i2>G50e| zK`g_-0p%5VO|`m#mQs10#=}B`zPpbx8^zD(GB+0(uX2c*-hmtg_5rA@K4m?)+j>|` zf`C||-`>zpJ%3skLB~MFQ(yD+Q!arV;2e0{L3s4W6+=k=^UTfUtFJ{`T8#DX4Q@mSQ1nr*~lKa6F2$|d>Fkf;U6(E+6*Ujn>ON~hSX*|~N(si#4S1(M&x zD^*K$lo)_wmJiL4@IdZI_iRl7iL3wW%ecM%?ic|aOgzg>yCzK& zfzkWFwPhUY|L|J*X0H{v`F3kOzKsqnAP9_)I!>35(PBVM1bK(T6A2V-poB~oL`3I# z3JZ1=E)N;la=?hshcd-JeS9kns#cK0mzvA6a1p|?qLelZwPT*T#DFpa6>6mfCSFzC z{fDViq>*=p;dNbpU9htkjJDmT)N0VX4H?!2gRN`~`vUZs#H@)!(%!*~`bGtH&+JJc z$MJUBVs)R-jqWiY;KUaDKzduaDJc~U=mY5zAbc1G$W_Qd2E604yA#E1_z^erAHNrC zh?&pP$rm)d4=cxI@JcKXmz8K4C6JfFfYbvLI95D3KX>Oh8X2}y=7;F9js`l0^&#^| zW4z5yU&9w1SU)qC|Io{R0Os$&kjWk?wvdp{wx&|(FCYP)UHh%6y?dddq7~@$^jV4) zt>yj|2%%)uU_e_i{?{{NEhGph1B9`(9+Y(4}-igIrk zF!Roo0vdJacN3`~4ce_wAZ(%ddx94eAK-?_uv{%zAjb-6P=%n}=B zQ~qBsm~a0}8hPghQ(3ySgwa?xmg~t-yVtUv_aVw%Bis7X0vL< z?k;2wRA-Y8gc#R}#p{5P=z)}zS6{vc^bQKpGsWv%UjVWAv)~{;+nr97F@uL>YDwFK z$I_~HTJF5zizpWr>D3*Dy1Bivx$9T0 zTW-Xa$c!OxmuHl=m4nq7hl|Ylcce61|1qcQG{+lh?koTW(+7as3hg2`kf=d#t3gP$-~`UP`{BFt__q#o-?0aXmvXO4nF_K zWTw<2JomO(ji4XQvJkUuNW?hh-K>kvxoH4ZDR)S{*j+a;rMlU*DcvMnq8{%XAjY!icap)})D?~4W z1UyiR$I{pf%$E_jI5Q{bd&QQu+dWGc;>-8;dfs2UHGX;d0j<6_R<8~s%k7#vP<;;! zoTASLUD7ngiF+Kl3iPhL0=8+Vm^g3bk;zH`?@@j40F#pcex`4s%lpEF6*7%Kq|t{; zRXO9AOKEoQC)<(gXS{MbsHvD){V@#%B4mvP0;l%PCg=6%txv7Nz)a+0Hh8bfVwRKX~NlpjzIobDd) zjTPYpF$OOY2}+I03-Xo%R}S*B52!DGT4J<<*2={o1;Kv$>R*qNUVG&0N!Px9KiOgO z=K}@Fx*y#6psQ_}dsw)7sAw^)z*7d89QQ_&i``xw2HhtCMuqZS&S}!Ok`&@#?*7k* zRl>&z2zExw7kn5JgmGSodItDpm<9bmo-)YKZoSYR#sPKSRQ`q(=JDH@*M=+#j$Ahe z4d080!lktPe={>>0~Ns05j3Lg?akE!x7ebZO0t4&&m#ivALBJR9t2B3L!a%%_-H6S zzfV~IJ7>^fb7P~VR`Qy$EH;1_>NIL1lhtJVMv-n^1=K*wz`$;BBG zTLKc-AZfgM>*x9H^d(LmQ{?a4LI$l(DRFW1?nrV7DoQp3YgaISw?8n!7Z0VQmCoc% z%p~KbCx$i2a{J1v1~4!Xmrij^3U}f2ylYENy1k9=PurPo1a5z;{v`bCj-sdBLN36( zuezFhAtfWjgqUJ?f2rHQ(Ir4wgmtskz(=oM6KRF}6uT0Buxdv1$ucf1EDYTgts5IV zKf3mD)r7sHwc`9XF8R!dbBBzSox`Eu8$zzhKiKuG6mPd?#tjFFHP1A^06fg0&+9(G>Z|5VR2yY9&Tz3~KyPG+$Xb9Pa7q2}DwAOl5lHXA$ZiajZ=S z^YIfrkoszoXT47ehmlZXUSYyxW2M`5tAXd+~{Z!lM%nhL@g^XpXiDBo}PX7r|P}ysVyhNbbWP8{_C9G zxrYS!qC{s_81K_KbNCJ&QD0Y0R*-!EFgH0>_ooC6i^G6=XI`Yqs1t)DBY~If`Uq*I zLWeK#eEdOyG0NO^(ET`G9x~7Z!0e!-bA);xESUnyITu-h(fJHgW^g*+D=KJ5L7Bu> zzfGt5q=5ib#mgpC)$xKoQz7pm7JmMVYN{8fgY1}{*D5+td?4ZR6mneup7Cljl$^9!^~6YD;q+Kh^TxbA8uhrm;!Axr7M0^`>OX$6 zu_+-|wRun~NINIeqet}=4&^ zNAYXP%)o?7&iJ^8-l;FvkFp5m-iAx{RZlzt*vi%)y*31@)dwxg=heQlWjFC;epnjX z%2WXuDcwEZDvAol;Qw+$Rc0wa3EqSE5l0fd6qnCzWWq$`YF&taYYeW;THEX?ZvM(= ztN>L>b*#70l<>l!rWVRc-K*|O((=e?^oEeXsu{_pOezljGj1Njq@fYI=c;ZN&8GJt zS*A*2YUWkz%L`6vXa}_~bh>kWZiY0^9s1|O_HAqmrm;V+gy9vu)8`qoGW5@$WQvT! zPwQjtODs49;aMnQjSeZwUctqJU!3q7H4wZHI}9R4qdrDPL_53Z@s)ei3N!Ya%!xtC zw>d?{?HKAS&=7>>GmlGFm1gbUOTge0DRrf-t}(W&=fY*&DMRZ*nTH3pb|mQN$H7NE zJ>MqFotJh*63RO+Z=L(t(w>(`xr%BYD?Huve2r(Ro_Ae+n4v?*9H`Fl+(&dYlvC(A z;R1xsS{hlXZ$dV_W?_(DFZ-qa+Sg5UV->f360~IVNY3s*W+W}_>9GQj6Ozp_u?3y_ z(l(Dt-p9#$t~_&S(HJQD^3(mGi0RZw9IE2n0FL4yl~Om1HfMpBh$166mqCO-_F>rx zVv(bb8iGVfZE5j1$LJKS)ipA8$M>##VNT3L>y~=C3@_K0nKd3zM~;Q^Ieu6E@?PCg z(A0c2cAMtIkh+K}eWo`2mAd8a4)#4G@(+V!nTm>>uSuS^$YU#BO!u|fxj3nDp{S?J`=O=|RzG&+%wh*h zVl-+dDgaw#)G*+P#yP7$=V7$5_z!;K@O{B#QQ7?XD+k%KnSRW#`h*DO?vc&3+scG9 z3!G!J`7@$iHul9@8u^JzjERzQrnT!=E)eUE>O=92JuE01>Y+aG>LoypT_l$I!T2lN z8-4eFbLkiuT-lmi>K=c{GPi#zz2|9=Dp$c=S4p$uI)|d-bF8}hnuQu`egjqWc}CGg z>{_%IG|%DzFD(e7Fwk(OG|*g7AL_7oNjZdG3TN0i+FZN9vZ(v?3MZlU1qPkVei5qq zZ)P-jU`8kb@utRyq0t8X?6am6$CG;XQWVEiO6DY8$^*~xtrV{Fui1r+#Y>rb$}SQL z*XKusChv?69p=sGd=aG9hoQ#29Ca&%C4t7Cu>!y8^=;}VffF+#{3y`CVZk!g(?y=@ zaKMJ?Kacj_)+StV^%IO2wqm>=JgY;Cg)Yi+eSQSA>?Ac6_TYzIyp^N7rTCPi^g`!>0PsgntUg9oVjrd$Fe^ss7T|srX zfZydG&`^uS5WJo}GI+^d1u_+pr45}1{H*H!WQ>~uKb_;u3#uB;ig#l_EY2!?1O`M| z(p}^gn2B)Vf$BuAcPnT=CC_!}s&B7@4nyk^c5*7fi5IisMW#+ceF|b}p&cx6G=+cA zph>p&kLQPpl&??US4`@B`T9Xn1#>aIQ&PRS4Zm-IZc?U+RP*@nnMyb_ufT>Zj;e-F zYiIBL{?^e+uHU3(e`*_DPyo70qv0&do^DdF((E2!Y!7EVkqk}-DU+Q9f%V@fR<03P zKHrT`)?lJQ>=zB3R*vnj3*1EnDZHTh!hMt&E!L3YC-K3#Oc&SZCCSq7v`(54Kk0+c+v=J*`4gP^Z<9QFEHPh z4hX4e&~{(bT^RVCUpSL~06srh*M<2rwN($Wu40@_H$Po$p?^TuC_W}dL+&{G za@K$7KMOKKsqOa-p@Yh`toz)P<1lTd947aNg^`-vea->n(_%tgTqt-%Bi+9qPPYD3 zrGq+%ZB3;p8)5O98h9n-ri)U3em30^*2GrS=~>#iFuvMYysWDSqGp73kk30)Q*eHB zllbyLpuJ`ADG-L#6IRJ>R=?V2LW|i|l>2mrma429)^0A1u6N%+`?I;u+ohJ?r?k9n zLE94(3gSvRjY75hdN-?blfI-6g$nH=6$>S1BOwTAb)CkUxs0?lH2c*6g(z;%i&eiq zxyfHLmTm|!tmYWeuN!=LWJ-wvl;NYd=hq#XKKjMDSC=JCI%%z;laEjk@Su4c@M~yj zK=M6=<nkm)dj;gx>D4<2oJ)em@X=i6p*t@5b<;ap9%8$BU5l^{?UM zz~;HMuuzE)I4j#STIoxUH^j=6Yu+eDi&vCaOFh@XdPQmRU$jj zJbJV#XD`Q#jOTn}$)=fAEDwkJ!_Xf2Ax4gV$$m+;p?(5exk&Qp^10E`-0ZHQw&2w4 zXn|d|ucW0t^Vg2$^GZrfE8!tl#zJ)Ucyl?J%HWGSm=Ych9|dNAl$`DDy=>9^NyTsA zK4F;kUG!TJca>**wQkR$CENDXjL`yNRljdA;vx%57>g%*%vvKm~0s=KOs}v%FJu|N; zXE5YYS5^3!;{}M~JRGGoupjx!+APp_Aj7|-mlB@;wjkmFiIX6CbXL@>i>UPZkLsu)Zhhzk8#qley-= zUFD0FIp~_dmhL@bwuquiNSXYL+g}#i!6MOwdT~mv+ssUrZkdfke$%V`gtgY7bpj9X z1EkK)v7yNF`E<1k|9_tjAs9yrZ%2kZ^s<&b{yrX`|3^mgWBt#$dLp9`76ZK-I#Nsu z+E94xhYxp`AgHKh|H#IA3Nd1p;XdyO>VNjE$8LdbYkm&GW^{6|y7Ohm5u1KGi*W9N z02&DeUP_GB_NcD%I?GqR+JBZbVTh2fr@ILs7N{NQig~mrRp(B^sQdL>5E3h&=sXFT zsZ{sU+vNrni)hv)RnC~-sxPi4!O2&SEN5GwJ0-u52@zN%JX%@lzIXutkij0f23TUG z4jgR^PmO!72Qaafulejzt$AiUCPmxVp#G0P>qekcpi9|g0)m+ zGRv%)KjO;pNd~tw=ssp@n)Q9p(k_+o3a7-6D|cTTA*i}IgR0r_j^P2E>*7T*4lYd% z0x3k?(alrBzfv+WjNsJoZMKHZOsyTIo46`i(^s!vmBCDxrOlJ^DEO7?FRnXrX>>kY zQ~~h#)9AwxRh&P3Qm{ zTHnf0^buY3qzC-oJlO8nm({*hG^#W00s8jmb1u8---1;+TSl~lV|5ro*xynZxn^8P z(O`N2k7aFIkfo%C#N3B(q!zcdd`U`LZuU^W{y_p~ytZ>TT`}djPx{pT9Prb?M;^@B z04^I`8i8_hm2))*a@1*yViW0;539clKo|MU{w0`(|9Be0p+HUbfYZSbD^t=w0VxJ| zd$frO7O3d>Qf#wBbQKs;aM>F^{v!?u58DoDLy$WMnIxz%k37rg&lTFArVqi9UI5$f zhY4Ivz!Af^KNblSGwq_1TEZ8x5*C33-jeN?P_PKG4$qj*p;N&N<_bU0N zq%^vpewi4tGPa$+Xx!7Nd>=<7OW=9=Tcr=>k&TUqZnJxGLJT{yEEYCdUdb#rTDhK( zqAPk3QcUu`UGmUK1ot_Thh6E6)f8+j6qkCIv$2WSL zsnw&_6&!)NhDRgpt}!uq2qkwKujDt25wP4~4LHWbs*m6zw3oOPxp9L;%qV(#m^YUT zy}lt2HgCI05)9B!H%%T|1km>NO-%!gl;1W{_R!db4@tDnM+}&gP-?tQN0CFJ%%lRB zfBd;PPGN6-V7x_>Zg~F3a(6cXW@O06C&USZa|sD^orOu#pc1^1>D}~B&$>NO>}+CZ zJv;LL8gHItZsHQ4>qGj1#L@A0wfWPJj37hzjqD8>bZ^%|Aw?%5)b8#x6chn_^DnXw zR5q1UOC>Q!w}I)|&B^iBVkG%DXr+zLR5GWe9B5HU*4p8kPzQgpiKw;JyBcKg<$sG? z0E_*#*x8zK*z6WJ`?)Be1+u7sgmV8rfh;NQA#%*A**R{MREGUcDL&$=yJ#-G;@0}- zV7A*->s{qR16jO;A^XX`Xi7Ee9-U-sd-dwYz`%{S2MTgsV4Gd5S*_LhRe#)a8^rf{ ze%pPLY%kB?jcsck?HVC>r^hAyFa4@6I1WPWQKDfBd%iH`iq}_?IXywL7?EMtz~wMs zuQ=kee$&M6I`g)|+&n?dAa~8nueo_zQS3!wKR&RA7cfP*#nDGpRX;T}(4DPco5cmS z-W~5ih(Z1Jc?LM|bqaF*@~N;)^^T!Sswvb_3A}+lQM!r?XX`IyPtr zzJ9`ION!+G{=Fj-{8jBU!|TDPN|sO*lue2lqs|@h)cyV7Dv99UqO2?^m<;3kGiAu&{it`OFg@o_c=i(g^b3nS^@Y zW6Ni-hh(1cG1p^xbcREa$BJ#l_J-%!{#n!F$B1}f%*Ab<{^{+i3>zfNPW|REGmMzn z#&^@>Nh+tH`$I!Q)-w8L4;@Oemb7E`Ojw>DmTa=neB5fxn&yd0^5UFNl`1}_v~E!kLrt+LJh(&yPsH}t43y9 z7jkkc=$jpmcl(|x8e=z)tN+F~W&o4n7SIIGaO)vXF| zqkAvJUV#2YUvI2ee%d8v0XZzf42!1p-r60+IEAYGRbKjUQBdfQy29 zPgUaNNNev+FXIS1SzMlmA5O}IZ`9dkhBz<;u3FqfdCVu6mQq7&-hBVbyVG&afN$Sw zoSkdhY}xgH%~IbT!YxQMt2c_YFJA4a$f0+?7ICu*C_sbrsiTb zX`161M6-~BB9Vi?o$zYBBq)uq5$DNNu&ILV*=G-z&d~y~0A;IDdXiYu*F9gpL6x)eGN$po_ubkCiGz;W&ovtqrajf z?1A+JfBO%uS7gAY30f!`BZ-op-A4A+vAyW+S$G>^cT$o%S|)ljxT((f=2{2%LDN-6 z^_NPVZ?UQHy(H*=_83dpiwva>dFBBcnpJxNZWg4Cd3^{ICXN(~H5V397_I-V!$&IT*Stv*PsT9C>0gz1bJ|)8xDe2OpFo+ zOeVjuumQ}{CYw`Mb`O=xsnOe8kBY<9einf>wBVhFOVhlAr_$Zs&XGYEr{;L5;e;2& zvEqM^M}*>LIHx{1_?xV7OivP0Zo6)Zdvw99C`@EAoP&bkT8xXp3XnSGgE} zpsVr_-B)(~`vspZ+6Wy_-7x=G8VFzv3mAN6is7j1w^!cC%|EY)LWEll5H`EeM`R59 zEb3}%O`hi}NJ$K6U<>x%p_McRaAOy;eb;0yx9)4x@W)(T>A8OAY+v6R1wwOhz9vxp z`I<5WT)aAC6H_+VH>M)pMgo9KWY~s}`J~Tp=@{RU)uY5p8sbsLY~J zl(r;oJS6@9y#2;o4bdravU-B{kkW0v{#f3nOge4dYzQ!68Hjn~9B5fsUY-bmPt{hX z`|-j-2#y;rnW=^nN6-#FFsXuuMnznkb|E5JETfru#uDA+@JCgkZ7ERdj=0gilAqyU z@wF|_c+AV$9+1y?gAPqT?4>d|gcz8oc}b4zCj`#`{xZY49?xXF@HtM&@S_pKA#60dkYaWw2YR7pL^~RG$>bi%eSUdd5E7yha@Iq z%ps%NzrUFUu~b1^ByxB*f`a$3c9?k^6a9F=f*rIx8-J`zla_g!vBRIGnM@n4+pEw&TMViT@G>L^pz8U zzf5~ADed%9nS&VIA?2Um8?`P`r<{X<@^9(xKZMezudng;ZA-2+h`_)*CI{f}{iP2g zyTrsZ(OO@WS{lN}B1Yi~FJNx?Sq$%Y&=*V4Get?*6ecDnz7D3{#h`a(1inaC1hNnM zf6fVMT)UD=cE0}V#lZqKwzC6GH>B>lzpzK{7F|S7mjy~Qg$yMmFkCd}|GQX4QLYt% zUVdi*^-u7*x5JWKWjD>`sGuYHFYZqVxxDuD_J&QCCm8T(X>YRXS=pJ6h3DU{Jx&6? z`|WM0)mlrL8FUf)0~n^yL*%#_`(|GGobE0X5ruV|n)XzW>|1_FSe-0Oqv~?mvy~M) z;&=V=TvPKt2I==D3Sq2wPH!-^vsyST?mY%UfW4x0Re%+y5?e-_#HNlD;_m(`y60yT z(SzSk9@r)ScOfi%_Y;^^BO=o7k4a<^dEL~Lk6~4I_@kpH9|#ECC}ATd73042o&SZw z*_vu}<8l5c-Vb#a61O|G*mGs|dXRYXd&V&@?_)o5e0))m-0yNbd2U$y_CET<#Yr*q z9!sGfrz7e?D*RB>g%&S7JUlR&Iz2tDQuI017mKTk><{?BJ39*8Ts?maZ%}aSUmRXj zZc-|S4Of<}Yu)i#GD045sXkAty>PX0z@0xBch`lc03Xw|G#wQc@F(j(0OLrimX?ep zwi+2#=-;?R)2OjWLRpgupC29@x~3kgpnt04CjS|u+p{J#3la*Kh*Uu5k4;T|^%n-e z2fswr-lEW^`gMf)(C*+VBk=OPE6JDuUNV@kciY}5JU2`dDtWO!{z&7 zrs`+&*-jJXC`H-$wGd0plGuzmN;7FqJ)qh~b#bx1?suFqdT}`r<(yH_Gi zXeq`S6!Q!Lu@WDZsQ=3)s?JC^XQ;$v@^R85Hu4k+Wp+U;`xg zL~zB%&Q96{1LqA*%-Ih=aG&9i$=d1<9}+?37p3^reX3W1T?q-v7fB8-u4d-uwozqv z0$TVVm>ayWAxt*CIDDYc;rNCwki@PSL*6Gq(Bx4IF2uq}OS9hIvI3qw0S&(9%`DNZ z2j9}99M_JxIW2A9{CtAa^_bN_yvkb+TKP7oIz{{WQVbkM#@trG@f;dAI@&uuMH(k5 z?6OC}29?oJ{NBV8{Fd;>e-^H&i+;bZ(IuV#Q!lPqI8?@|RR8zX#{e8)vM0yRyq!6H zJDg(z3XRbwHxm#T<2^!^vfHC$fHqS=_MTjb?uTm z1dvy_5Bj@ucHX{?r;_6@U3YlXxx4(H;3uI}b?(<2!3ledix$b=5(OohLdG!?tB1~* z_ZcE2;!&wNh+nZ>exS)pNJ zL282cr}~S?4_fHKtqhy1W>UB;B+)(C`;1@l`&Pj9uU%bdNOkotc55sBBC7*2X-VA^ z6BnB03giM+%jJ3rvgoEL7N{=_`YnT-QgC_t2zHM}A&w)nJ>@Bg6e%RHyLcULtDxH;|3_qe$cwEr=LEd>X6AFktW zEVMemn$bdWPkyi}!MZ`0`MuOsNhV+N(JQkybpSk&QyEzwYJW0@aNl1=zmI_=3FUL4 zhDy+5`z=x;VXvT@qPhf{u7_a;__`yJ^ieT*zWb#X%B@26?+2oYREnx{>H6_ar~lcq zBJ0IkqzW3JRH*LxEDnA0gVywD?*c0{=fG46uM#Zrcns?O#($c`Rk4b}oZmH3f1I}K zW(FGJ%T018k04}RxrPHDrR~7Jr(`*rUq9wU*$l*8yROP}Oq^wBgSpmzWA=I!knkc2`Xi?x0)Oo3Jt9Zz)XuRUdDpb&qW#fjKJ=0dAO`;Ds8(Zz$f1)XBhlAR z67P~p=1s)ejnqt9qAaE={Ys0|>9}a9kdp4!>1#xTLtejnGF^24`8o3Zyavbnwm!@_ zy?Te0HM8c^w+>b^&r&*Gpz$wl&pu)Ubx8Yf8q1TNw;_+7JxI@GN!F5FN#V|XNctm~ zj+J|KX!sNPVJz0ik-Po&wD&gWmX3tvzF!O|G>^s@Z$F4cJhOFrZONR)&HN~ScjpaT z$%FmH&djOl6co6xf%a`A67=HO`kM+ro!RG7ZD?$A5<7+3#o6)~9{o0)bf$i3CMM)N zhAReFj*hVaD5QLRZi?6Zupn8{H1xYs)qlnxArRAq_3kw}!P1>R*Ml)cxT96g9x|eC zTou{I2NkW~a}GGD9m)|5m~g{<9W_GQQYQ0Ihe-~tq|&m-rKU@o@H4{1#(qM>fhe&^ zE>24}7MXWpp>Oo*D_*V2KJ)1Kg)f377m21y5gTmLYb^HM=ffbM3XL99cwecL-yonlK%IM z*{E}NZC~IFbe+grUg|)#T=!Nlg>$sv^k%E1JwY<{FE;tQ1hC0i68i@;Nd4V0dsLK; zc2|@D;SCa#&k>SMhH+Ko6Do2j1l@OmZeJ;C`u|aGTbyKKLNUF5B4P;8NGd%y}fFN z{xH;{$|!*QXKz)N670FhNu7?Cg{1MB^$gEre_5=<&9CxbK&mS%W6X8Ca%BsA%xeXU zW&I0>SV+q%IVzu%1aO;+npmOGfy<(|+5-&p>)-8Byqs6oujA0x(cxydGq$wso2hwe zuM?`!;+iOaw$oOGZ=NfG52$S@&jdXyUcAU&OMBnoCa+STMjLbyk$Thz8u@;*50#}h4?$E>JS-nyiBOBwUtkrT znA_8BlP5B!A4iE15BSR8damYp1Etu?NS~3T74-~Bpz_fwFVy-vO?$S4hfoVjxenI- z^tZkg2SL2~X7vMEON{#Tpu=l5E$LP{-Y;fwDkzE<&+)cKS?LCfqmb0KF1|elu<3W-#v*5?KnmE z7}x&jje57H4bq@@X!-1h=~nbhgWbTLp7g7*)!LDIkgse+bG))6?s3+KO0r@H!3v4> zV$VP8k?eR5ggPA+*9j{nFK$s+7jJ64Qmr48L`S&fRHTE%5#Po-Hlo zAv_s)!RLV#%>f~dT(;jV=ri*A(3HIVE0J_?8=lw_HG>O7jLuWd&nrTJTv^~+L4hgi z#2+aGNt}!HwPE?sT)CWby@%cxmK~r9t^7O|RCL(pXH#yF*`uA9>QPAYz0W~#=+DXb zg8I^mcWmp&w>0mElA25L>fmqJi8j+d0EPJJTxGlVGyN}0-y6avUxW*JAabLF_DJ5JrV67@!8QXBOzwAueMT*i_jG@s1grl~kxndpUsN;$5g_tMtDV5e zcK*Gv%veG@z}{I!lJ2e#`o+tu=$Aazv=wV%czg4#cTst5ZKk2&mHi%2L%~M5h0y%G zCCi^IHslsWRe$27FPQD8;0(FLxbDNC@5$uDs2azYmwn_Gsb~Q{e8nDd$>MvgL7!`Y z0*)#jpBB6uOv26Xmk!@>y*zoaVbOAPVX_eQ{?8t@mnMxW8g)yobrJox;COD85|(%rf*^L=hx z=f@IRJ_W@z0$DW2Nl4IijNG$-NSxozSS}|3m45j=Mi+*ig*q0*LIpbke3IV{?V~JY zq%KH53H$uNrG;PI$3<{6Ms5_*x+7ZN|6%MaCM= z5|D1`?rtdoB_+imq&uWrxMR8Z`|kJLbI$$UseW>;HRl*}jDJ4QGq`A^pV2~o{-U?W z17>}(6;C16Ew|3)6`E@hG!VS$9`?EVk7t`X)+-fc0IYRdG>Y2BtQP-b;rH=vU&YGf zR^dRjlSEu_5cl;JI?|HHXq5F>jK=3DUqZZl$>=0b12|Xrvs6zm47+Ca#&-4cFMWTw zk^d=8yo$L_N>{GeAwEj~G1)oo)3;cRBi)p#8Luh#&e zI9^0&D&s@6k*)2P|E{4A#ktJ4R*$A_Px}O%%`6Qc$*u6Xu8H#>UHpjC6FW|cZetTpaTF1p@cg?iBT9!+(Lh5})1UW@KrdU}pG;Da7t^ZnC zaHqGIt4shU1T(Is^b4`VI)0p%VtXoH8J_Gj-lG6sw;r2aP60##+(fQd$TpmnQ)gMR zWWLNxcmAtxym|W1D5n2_N@TB#QO^N5KI-Hl-)!xNtFbIHOUt}Kl8Zf;mfDH%{`$W* zl<&db8%8yAa1;orD_ZfVf%cn^w|>HNeo%G=>6Nmw^kB0*B$3m&5Dal8T1x>T(JVJ? zw$rNtjcsptPLBgu?;rffvKM$um(<#@I%<`WuWV0e$X*=&7ZhDZ_>_DJnQF?e_Gqd( z zVqx>>98!V5xf;6GlSz#no_`AI60+LN%6_B0Y%cPRWTDS!G*iF~Fkkvfn+ zP|u*bbr=}}rw7A_oQJl9{&(of)ryZ6yqeUzc!U1;o>7vCf7eE5(b02QP-WJJmn)RD zl1RQIMO(diF)GS}0{j@BM;-_Tw6`?nu!7JQ_!1_arT|2-JU}g^%L-KaCck0+DMR|W z#RKR+m5A`nVeu^p9{@fVIDe850j;9pKjlDJUi zuZE)m_#Gh4|5rF&8tCX0pXrXJlSrZ0cY0iW3PKh_Ws`?pWs-ktm%yw84_l5eB#88B z^%`9U?aA1%YQxcZW7hGK0{@>M`twV6F=Npf6~)u@_IQ9+%!IV00C1z*cZ82Q?f=G2 zg5Q%UR61p1=lnQz5qs>3Cd2eZ$m=LnQ!OEVV4oe|Yy>>E9Dr!TD#y@cq(pxSI#`yo zBUg_8gtIkkl|ZSha#_Rxc?VWgH5C8T{U^d5y=;gW*KDCEbEL2Vq=pC1SfmKf%tB0cEQ;(1`FfbHe1AzwsZhhk)aEDQR!&yy4hl?i-;+=RWKUG2Gg`2y9i<*%w z6OA%nfCKy21OTeY)Re}b z2g#jx8L>M~Txj>xq&9i)=CT@ey8I!+K=`3 zTsSoaMa3Zi?}IH7b<_9K3F-1G{@`FDus~#yMT58~ULTHh*F3by#GXUQW%yw6I9k=PbOJT7Df-@Xu1`+t3&q(kI+?` zs0j+pJOMx$*fw0mqwxoh0i}E_;MNQb001kqhSc!r5hny!J;Dcm0?B##jPqhG#sFoD zOJ{^`Xq0}04wnNyi*R3wNl^&PHZ2E(o^Hm1z>nTB0=bst|uS~{$8XZw>+2RZ1diyl0wad1i9hALOzJ4p6VVsYlr zZMRVj1w2+6e>2bE(!~_ z3mQc?oJT05IZ>Q)3;1&90EFvlxR7Ffm_TBBgjM}raDU}qPl+h;U@rn3L9Y#B9L57k zY2d18tcgZxH47Wh@6dpcFVjRtgp(hEzqtPs8pV1-q0SvYc33>tH zyi3}k6v>+!e>>A3f#`T?707F!)U9IX9~p&gL(!ZQW1s+)y$vO;Snr8Ihci=CM=H?> zmnuUDbWD$e&wZrnFh(B|ztl#59<&WlaI)|dLPs3_D885cHQe$6+^ak^6m#0_>L0jB zC{+lB%O_BY!aI(L9?iTv0enUEU}~&uzBFLuet}%n{SMVluywoa>x*22&<~i=IDHhM zJl86aJ{XX^Qjt2VHo=S9a>|GM>SD7-?Dc|W>Q|vfTDXyCxo`5A5+%b3XmCg6g^UNn zq>$i(N4qa+5%@T{l18*vL4#HswgQ{DIW@F#F+~#!m+?rIj72}ZqytMP17nmXm+nIY zx(aeG=B+aI6K=Pj_puU4snVcit_O+^NkT51F(Mx_)+R4b>!YrE3USsg5Zra`XSJ=1 zbr~O}dmUQlaYb=SV0_(0WMrcq?PW40CXVs)Ye|zr@}&wd#7Ah9l;HVcfEY`yX9(|( zK#Hs{<~}>&4G$ryesUPafEb6>bq%cK2purvgp8!kLf_ae$i!{sr1DSYuS5d7fi;YJ z^$-R)w$dV{eHnP^!cI99^SEakWPS7|_|lEGtcitIiqxDt#SS*T9hh7(M(1DVZ3=+($%3 zy4MK2iVa8NB(?a5N{Yj_x7=hwgQ$!)F@dtctb<$35Ww=>A6HV_cS*o9TH)k)_V+(A z9zaF_KYzl+jw8?q!7WoqKsz>z)%f@>7+9mz4`unMLYSxko|@^55oy2ty7IlrUE>Vc zc}L*w@M0sx?>ubM$Nup5r?A`h|`G8%k>y?;3G?$Z|>#Ke+N|beH4+j8W=jO<{Ts&E)sGR;LXRXb*Qef4CYli{^xG5GJE@Czg zRbuamf}l+8ow5m=G{zN9f7YhPLID8Zw-eSgPxe#+!XNwZN)+%R;DCIc87P9#{;Xf} zsVI9{u4h>wu$kjzf%*MGA2PmsbBsU#ui;lLzIgBQd*sOw@lP<1c+f9=?H9je?e2_0 zAO`yZ{0U&JkNr47!A2Gsz?s`l5H$VqKT_aap zzZStsXVQ<5(_+F$nWE+^#>!MY5LggaJub1fiKtml6ji!85hx)8<#s3mB`9@=8RdO` zj>(?4rg*NJk+1rxzEZj~TWGTL3Bn@5H2{ZSy7(D16hSeUJ>PM0eYOjl`{2sYmwHIG z2RK1(t0};ls+`_=q-W5SdW*AQ9el(~Tn?0_g=3PLn&6k~J2+b~M}iE3s|gg9%;SH< z2g+*PH_?lW-;ZU9l=nioGJ(6j0M-)pNb^A9>rrO#7sH{~4x8VLL*S#d9Nq1>4sABo zg~aljzJZJh_!Iy&W;t2FPlYYjG!)NE5aD_6t?+HSekHp#NZm0lE)fb<(G3%W4v_|1qx zP<j zV#TH8q06z-kec zi~$JuY&Sy9R^(M{`nwi@#Dn`^+sdmSqmb*%T~jVWk3h)nIz5e7fZq`tzLk(^iymdt z;xCBh%ZVj=GuukIoL;t|BoX(CHqeShzd!6#z=x|6e)VOYr@pco<42}y-|49fV4lR0 z*2DPXU-{4@DqGItgm{ zky12U)CKwZfC;7*CJU%MT8UY}0J3hAS*ClZ& zOa!4$kGOX3cTHlnj(28a`q8umVJZYg2HYmWMaugTIKSY85rOo9F(xkFSGCK7hJ-g> z`N%RZUu`}{O0FH?5-`KdobS(-NUMo7m_%rZu_3GQ*9Fx{!{@2L5Jni% zv2MH;V*p-1;LfWIn@0<`Jl%>1qdfD8uQ8~k15WsoCm=fjLSb zjWB`h>AGb}T3kh|U|J}^4o@_eg}*ETX951z;SJlklYhXiH7D+VD`~R-Rt~r;Y}laWp2(&}^X&6>w^EBm z7ZNS57Q`%$&FVYxf0-U=TO}H!1_E?%v74qA9Pj6DXJ_Z{-=)RkcXe#3-9As5GXWzC z%Y2Ckt%*!BqT&e*j6&3gOdNf!O**uwUkCHvx^8Ob_7^v8sY~&tD;jh(xMo!u!{^*w zsKG;O01$Yaz-N|mx%Wd6@M-}H1P}c!IUntjzCL-42`8byJ*$ZS$-}^|@V553S~EmF zZ6Rd~IVm^}Svx7t^!MEBpv44GAnTjsn|(~kK|tjJ91WiYkf5P-C@aX(qf>7kQZ<2iY5Ovpjf)FEcXvF+)!9gS6fDUNIrJh`h zG%YCS6aw!8Pn>h7^c}l)sOmGtE)i1R?OYMfDwffUGou>I4InzAg&f1w36auC0ICvG zO_t<14RQZ&;XXbI6L8ngoHkh({tP&6k&(fn0BE6pK#sDT(Bu)4{PZ>wEvFYDtDxY; z`z@Pa01+)s>c6_;3pzCD6zN4(Qv4+R%$OB{5l^3He4ApVlM7f69Bi_`uY?@(J?eUu zkRkl(>(`{ca-jgRKpswIx5uoIVKTKhM0n4zEImcu*jAWl+lZfh3<1HF^8`rDK*L^!kpm>$C~buJI^ybR%OqiCE+`x>`<)zF zA|^zfDxh!Zdo)4wAr4)$0(59EuU{1(!Nf`cVPB5|A!(Oaz`@nUr7qBTV}DK-8Ci4C z0F&4)Tgo7PpqDU1|@)T z{5|x|X=i}a=VFs&z~l#H_fiiPsgAp0csxL8u!tR;$lJFRKu2#OSOP7XCE~vB`gK>k zXZPBT4@asP(F%QJay87$$IIv{@FHLSYZ?HNE&C@GZZ#4v;GDs;AxJPlAP_MzF%c1p z>p7MfPY@@X5lYkMzj6nK3*ayz1SLPBRqH6PauzwiSObdIk_`iIG!0^k&PQcU54iZk zLUpb-+xb?r_pg$F#oGGn>DlO}QAHPnRtxY%&+Ddrfo=)fuX#Na%?9<0gK$tf94_zI zTLC>}08F%hX?RrykF+%mAW;C}y1Qj_yiNZ+!!Qh4c;EpEqrDBU@1-+#sIt`Y$HDcDjl3^kM)UCewgX$B2B@`!`tl)ar#&uc*|XN{-A8&2u%=iP z{NU(OdIJua{ggGRpSKGRwVi&M3sKAB?54!LPU{ow55~n z9?}48Br)2;Gx4K*NRJ3jM(cLyX>@qj;M;ys;OQa%_~vFu2!k$D%r&F1Zb4l!b4sKg z)Hb=fsTDX!QaM!WYA57Wd1v5Z%*v-t)F8*$7fZ(rcz%4FO7qx77VY@LDt=c!&p`&2 zP5pY88x|~rQsQ{lTurNBYpG3fL&H^v*KZI!qT<3|FAa18+SH{5@WwOPfEvAA2? z2e`p%F)hcUHtR*F-z;GvA|8}Wz%DJliI>}s-@~pZm6HU-F^seb(%F{|@c;?Al}Hx< z=&(Cp3`vu4|3~;`$Z=V^GQ->bC82l)hVAWbWMpJOKwKe#<;1WUoeRcW-|ow143-F8 z1>Y5WnX_Sps3|H+90tBy)U?a0y6~ep(QzkyQ|2}wDh!7Qn3OsG8ET0Sf4&!R8^k?f zR3W#tR_*!O-0b$bg$7hu`Fzch5%Bn~EjEURqD&(J>HQ4V+V^x)z85``Vexr(dxIUp zle?PST#4Iw5zwVhN{wi?D`I|+W~ie>#_2PgtORgbSu!hqUeTnk`zwDlsPwXepzZ;~ zZGp+aazo9VC}sS)=~18)Ay=noO`RQ-zZR6Mb(m;Ln6w0N;qA2!o*Q*h2g1&EH8(eR zbgVv%7XgJ-kgvUBwopk4>Bg5>1NJ+22DtsSc;JzzCMWl%2V~T30SDuB$Fi`u3-Jqq z&g%I-nNd6G7YRuknIyKwc0RwIFUA0s4NSe9+~?ch?-x3RjC}LTr{3~X533d%g(;6k13?!jKBW1YiJ$ zSLIFCc%6PGeF`}FEXBjKih@PnW9fDE!|UYJyR;e62V*?x-?}%I>s29Zhp=FUHj4f{RYxBTNMTFulO@N_pn`=s# zEQQg6PU0hKjN(=VfDNId18^_a-kBuSY%2hepcxzufI?znzfM-f`!0ieIBm@mzll|qQlvG%R~T>0)kY~A+(Q^ zd=0<4Y2a;5N!^^e5Ve;RR8LTGf(K&u%}pl=W!};(o~@4{vk4Un7Bktsy|+6Xe>j3> znBRI*AGy>3mk4a(d$W*+mmM$jGCfFY-c`P_c!w1)aha$pEGy@YF)tsaIIdc#f3^O-1y|A{|Jv>_+V}`j}pU7NI$p-C`JKotD*Qcgn_c5GxOMJiO`AN z_a8x#0BnyiLT~Nj{>Z^xNq*j+b937tLfl>3%w=;cc6$QuZ%7a0=ze!!nhVRE|IbX- z-v5EAilDYS!WmL3Mj^oA>_P_m>&Fe&;#W7Zy0twy{Sgu4a6%tH+RDV}(sG|f4U#G2 z_&inI+V*Nq_tQY#oX+ts!|?pJ&gmVn!M}!a3Kio!S zLOL7Hb{nF+-GD}kB!-(xfb5Z`=dMi8VC0CBTy*C+DX>k|8%ih(mF+ z+_B1XynzGkrry_2PahTW+D<{SxqLGacXtIA>iV!yWQ#YnVk(*P#TPJ@EBdTKTXPK1 z?l0MT%uB&B=FM;jO(hc(GUEV1+WtW( z{9(`?y6EX`m1*8R@gc`c<@h(@ur32@NY|L!`})+{VYU(OC7t1e$5l)ZEqOcN0o3|C z;UazqS-JCc~t=7{v`;;`Gh^2K@?1=_k?csEyWRo@4kK|zK=`-_9u(#`-3mHSY;fE z1Wyg&QY{3%4bjMo@D1zjlIef!piQd*gAC$hiy5=Co$l3t5S%|A);cds;L}jt3_YuI^OVw!)WttL zp|yW*4ns~e=#q@b8x`r)_sIN1fxH{YPe8c-U$CCfME@JsGq~b;3DC#w_5FRe8wLs< zq?~4B?2PmcGCJg9pGnxs(^B|Bfk74&wLlQmQ%DIZ0h)mH!X-?k4;&m+B*#_<-00>@ z)e+(Up)8tma)Nq+iADhyMdq*hbajAOd+jn#?fKw=g94kuzZi_gjJcxdYJ{@q!G|-! zsyVPEz}|jiU4;#x+TVXg&&K{wDqD(=H!*HqivR#S1+EN@ERopcQ&W{3|J!05cYlHZ7adjeka?&2Iz{ZwudtvVS z@@V1jmmq%ETI;cEZg2lUPW`@Dk7w; ztPJE-xHME$R6tuWU~UYN&P2{nc=NF5ab38yxdE2O*3ir8;;_A55gsvdad2AvFeiCs z!7gbP$^7`_HSXTlckFrzKeGvZegtJer_>v9(+@0UFi_+$DyPH- z6-e5*ZC+Dyj?thh=viO=5{48D&x-vJU7C;pAH8%GE;wWZwZ&_R;Z%LT2g z#=W1c#IUMMtmKwGN*ng!|4oupk7iNIVLe4BaPhu4>7s&)Gvyf78cq$ST$R=aDgd2f zQ;=vf1kwYt9T4BdRn*cV3`~GR(LmnRl$G=3!O_`%hkMzR41_;;iZI2z7urF0OaUZ7 z$>=!W68s#^zPQfRhs3MW!lELhc6V4ozpzk-CE2*!p9)ve-(M^+VQ=pxKL_Ys8om?a z{nD)0@ChIl*KY{_g1q7h`G^rd+2HqitybPdvJ%@ZkaX*^{Kto;q=y+XO z%rN+Gj4Y^DL6lz8b~HR8uRH?961*@tlY4KE^@4hjmP`4tFy%k2wVTV!X5p_6%`E~q z+a>5{ujb1)XKH*pQ4!+3!O0%qs9QU1Dx~!8gvmS=vhOKn=qzypA|2LAU+kJB%gZ_d zH6)tuT{>K5P8^3BCC#rRPxbUHZ$VzYYkjuMLs)w73G{t~pco2yCkte4U2i9A{W5ZN z$Abpb>l#>E%I=ydH4-I|sPAARk|lf+AOLANVV9N{3dARu;6l2{h4t7`<7nG1762 zk(h(to-l2_!#j$3K~k0Uct^*5I@eFpKO>cH&gaagi+^Zo?mPYuat^2n2+e;p%gm$s z{k)y(kz9ceV_-AEqfgI0&;G_78A~8-{kZBS#hPP7?OrrlpBZS7{#se)E0tLcxQF=# z1@KTAIXR#~v9Ym{0*YU(xI*rOb3cDZf5N2vE(bZv>y$7v%dK&;253-V3~6$noklZn zwWhK+IDM-=7OeS?LVvI=$6%hnOS%!JHNs=#Z$iqxCsvbxP`4RC$14KP1!3TMoMFi! z!jvw&SePC{dSuId5iR!3gd`*=QwPP zlnnQ6bLX=iE$9)*Y$RI>EwJZ5e|~3VR!U0-M1A*vE*ze2kDFf3Hx7j{9L&!2eEoVl z-=-KR$TIw(hlV|jArJ(DYir=(Vv5am6{L|6_@V7zY}w)g68Cpi(^H>@+h@XD^N3Fq zRIPQmOX*K2Jz>l+E`3CArhgH`s<1z!DYQ7u>8$-c(#}z(84F&^kfJLQs+T~T-T5PN zb@-74d*$8f>2~M+8K*o8SuT-RZmILwx!k?>Y2K>+nkkb8G6WR{_ajm}KlF;7I!+q7 z0f&vAEK-ThkJ43tT%xf5+QB(eGlz@B^4e!^1If?&x|2hePF-Dn^n&{^Gti}+2-@>& zEUOR5Jr5~~k$n>Mf#0$=E0*o064{v|!_g#n>Bqn@WbKL5J6F)my-(mB!g{|L=DxmcgDX45cbStPfv~?wd@$>xs z;YLZ!`s)GyzpdU-$n~WD=S@#$!d`voJG6gmVT8=dPwfFI7=lyK3X7{Z#mD)7CN^u- z#T^JQr1kXH$D%=51)6G9+>8JwS#q0eIYk@g@^D!d5bNLZ+9Gw=J0|zo$W(^QNs}6y z<*OPyAO5wB3Hic+99~UTr0rB+geQ;_cz^)pTLKOBs$v>CCj*H5wl2wql(At5R2ov4 zp6Op)^N#KJ($&)p>{P4~JMDvyuQ?L%Bp+8a_j5t>aj!K{p2DNxbyI326XDg1K3I5c zR9ke!W&h*7=UU>U6BZU?o<)c>&c`+g;6LN7dSo@owAqtZDw~xY{zXljOYfyUT8%9L z?TZH^@eAgVcd0`aIzkDbvjHyw<%^BoY{Jv6<>rJ->4*+gEN)h&G^u7zw*MzLZOC1V zHGz)P<-yZ++>Pn<`K2D?e_(vqt=V3hr2xg^Xr0{>wE=VCsa2C`IGk?7kh&q^0 zhfjP-jzf<+N=}#1BrhXG3t95W;2&RpC2x2W&+WZ#@NdHiQL% z!icd>k6q$kS6!H2&E1S5c|iRn<3Sj|<9u8{D1m;@mrYz980cFf zC)-Achn^m7_b175mhtMi@WL~d6%?e$KgMQkHI*jqou0lwH`@IF<+<@hm&I)yt+wGh z0!F7|v(02rV`H6ZV{dN{2(-pBucC|4b0#m#$4hXmB@v3%W+I{Q41DV4ktSmI{27-S zRn9-aS(#T)PjqDNzrCy2k{!rB`S4}z38(#C};omGJ4k86&S@e@`F$Zf@kKKOU(VTPN7ZO#T z>OYE?_?RqBH-q#?r9Kqpdj8cuiwVW_Yf{>aRdQ8NyM`-+qY`ODgIk=0{kB`a`b#=d&^}tZe@_Il;v1~$3U7fk?dw%}g zVHGtDLK#Iu2t-ALUfu5To7(S%T+b5{2)`;P6$cWG!Sy+(hgaa{M`JJN(hdj4a9|H& zheKWQ?#=@a8%uteaaf19ji20z(ui4OZ+5htWMBO5`sKw*O-DBSwcFqH(kW1FlFGz7 z)NkzR4LSsEGIUTXiEd>_9W5A*oE-cMhU$etQC4O5)*12N(56Z(JblUTqs9I>Cm`x% z8#|JO+|8Tb~jv0Xsb72Fl}&nIQT~!Oq;K$ z=)lgZX2ciarq_^yF=mqc`ghoDdo-#E-j>d1>m7T1ji`;z)T@b#dCwMO*7XG{QW;!H zX>j36;yx~ZMCT+|S02^5F?ZvUMY8j-vBhAPf?&JIF^`SH$kDu#h)LOH%kYWcRm>Kr4NETrEsOi|ByZETH{>YWLjjn%tyx}cXW)^~>(l)zQt%1nR@R|Z!mx>W?pJFx|xaXXV3IXQU-O7k}fF!srG;j6bo&0^&-rwljL zq{P)b3l%;K+Z|qfCHed;Z%EY=Y`1kpefYSBheD-4+Da>@^xU90Q6Yv&<*A`T&>+GD z0)ppK2rBz?@~yTp)E>&;F$`V#8QTVLe(7m;lD*iRqVZy15ZZcFbAjDWm40saa;Moz zGvT5nSB)u&ENXsAnTZerO-llIw&M)Ulk^a1ss*RTy8qOg3oBoLH*-_@LJ7Z)nXmrUs39=)Y}4v#j$kV*0}#pf}EzG8Fg|ulYYYtcSl;u6cal zmlxg2@F|=Gw-`NBB?xsVR}OPF zD^W)Lu0QXk*92KUOH+-0KAy3qm^tL;9o1TFGZfKp6!86BPN1d+%0URHhR$zkxzEdn zPiD*d8<+FQ9kpG=F=OKkldG;)0yIq1ev1l(DzJKe%4kYb(DssZ;fEJx{CHq9m!{c4 z6rpx(4sEi45O$EWq}#}c2;8d@&vqCb=Sg%u%z5GP3eUod3w}5gNftsFttJ{{@q!b! z0e_J1?V|=x3?l45wO1{BD6B^Rt1@^G{#9ptUS|i&9!95bS})`NN|51_0&~S%CE+Hk ziX^I&w`?b9(*;>*(bUxC%byG=1VrT$zNRUpJ`GxyV@MJWseHyzCdyMJ`gDV43Tz7v z^eLUezIOSUKrT1V#AQHqk{t(uw7X=$f*R!+P^32==?HWp7h8|AySKj zdayG?)1|ZBj3Ejy7guKVfy|P})aoXtdAb{xxysSr%;l>*HKr`|z*RY;lO_%7$r25M zF2r0U^4C2~rq0d~F7ofl8Hx^J^VKI(FZAHZVfXh8_C&cf;xxW!_UMT!|L_V?FxhfF z224{JHxk%BHO~baEs~8Ca4&>1O8eTOwv6HObI1bQ ztZvdvcwWY5;IYSC*A~rR+#*R4yh<9%NeU(il|zWCu3( z@KzIz+i}SWw)OLAY{)G1+m|b#z!J(&l)lf$bl-In4~=)urQG;Y>4 zeJUUQrA8^As8op?iPaaT>P&>gOuQ2d(>jAs2s%>(6I^Q!fk$&{X?!tE3ni`29s7C& zG0nRDV4SP1%HUnI&zda~#H0~@|cU2$bt|0yvNp0v7?fqYC=EQepH zfMmt{xke(_k#h*B?g^stvRCOm{v&^1@!!t;3CBX`2 zX;=Sd2v%<7XRutXW+a1Y%_Ue|(6^VqZuig+E&@*}{2d6)byvkjtv>*-S^bNtQ#V%_ z)=bcqdmNaWIF{c6O%}=9Jq2J?gX_YF3b7PhuwA7dwMQFN{-M#p9SwrQFz7a`>tIwY zFMP>?X!mUF>u=XOCPEBLZeZ3Tf*W6-?;w;A6}KlcfoDZulBhVbRji{z2yF6Lia#Kc zhdpPamH}PMgive$Lm8h6H3*azx@Ge|9c->Fc3tj?g+$x!aG+-cmZ8k^t&bB4AZk#r zh~flO#0fQIUEelMz6ihkl5t2S97Wi{(;7|-`=}CFvF{Qa++O;D$mHvXOhhES69l}{ zT94Wn4rK;RxUpl9q6WNH*n^6}9#rRH9jU&P0ctQ>Fx1Mul!QpV>i_>^GDM-N((Haq^!rI}$ej4$Mmfc9#H{fh3^^xJZ$nrIOXp1!%$e zqZKTv&GfS}ke}w>)P0>j*nmwF;g;PqS#TP#v~Rf!J5DUEx$X+efU#&?#lr!gIndAp zJ!FimkhgeKuob`M{=TSlo|SU8QLd3+G#F~O{g)xN8**8|!KK?)O6&cEfB;;*d%**A z-jug3AP9rmF)E!>{VqM_n4FmJ=xv&uHAt`7y*~NAN|pf+(5YakH_~_>+6^0bxnhNV zK8Y~uu9c?=Gkh{IY6^Fhd;0>Bp0EJIoza1JJh$+3-Pz;YV`9nv+QQBvnVxkvR?kj% z*?C9Xh}584FsXV1A`8)?p80usdE28h;xq!P#2|zftb5sU)1O7|*$e{7hTogvtJo7n zcxAv4LUYwc(FSc15p({0r6;#%sC98hR1X$40%MT>7KbVLNjhwHcfSd>?9<*RM-x6~ zze){T&a_UYhvtI&C%X9x2SYRH`hzvQrFyDDkcJbc1TNzk&^SaiWnsG|qKX>l7y#JAA@m9oOhq!#E zXezt{*mG)P`qRDg42c!5c-90D87 zHnNNYvC#EC{M!ZvmCG1S`)1hgVT$LMOI8ASgmgGRxSj@T*OJ(5X&OFj8{V(2UZ0L> z|3v1e2$0zq@j(aK!0b_7#Lz=Yb8?TrUSb-TC5yTrX2GqJ^q8su#$m zTLdY88quPMdZDaxyW&n#M@e|Vv6-vg1OTU%&wEF*#1{vd&xZ0jE$6LRPw;FpHUxGT|l!chm$09pZQpb)pDobtJ8cuix z(eJPCZayY_NZd{(A9^@Wf*hZ)*|p-XmT65GA-RFv=NF&e(Z{Kofdc>**Ly!MpsN6f zCPgWcmh;*?pZtfx&ojmlW-l)t{$+tKwOLe{A)^UsLgN_Fy*u+Acjxt zwG!#TM$fi98-CMM+y1@k^NnXqEw&MBL51}uL6`c?&t()k+iQ;Pm!m1JHGC5KxMNE9 zD&*UjWav-{=Ite}qwjU5mwNi`2rK`p2#XsJxw+v`=UH_$IlqeiWV#g! zE^Be^&Wh4UV|N`zvKVl&(9LqtMi)??VaV;Lj?3(I!1@gJqpFfEDdD5AskP*er?z64 z4}8qVshtD^(PC)C&s!G7f>#&bcP<9p%ktH?bnL32$k^~^Sm~{CSHTTGg-v1`&0&4z zJm30klZJ8I^H!yndHiPLabsjfqn&1iotyjjC%X&pOR#<%%&!@LKVzTv7Qwn;Z+L*O~K&@2{JaPR53DyBbj(VW?1 z|K*(sA1@t&T~t!yCP+yg`-l+ZeS5o{Za2=Lw;1!B%>Y$st^Zo^wX*zo^Egz9#_#dI5qldIsfOdj<)XvXt!Ixo z#k^d>X~qqV&l(vFNqIKJRyvA`WIZni2*vMNt7!AkQuZFvJh|>#mQ3C*SW^@X%z=$P ziL$if3ttg@imRH-%HOX)bie@d?edcQz06C|KHxd3!K+@IM-lC|iH@Aj-wmifC@eQI9SzA9^?q ztV8}3eZC;onyjHDsf97^dQ`_+a(lFrn7d~H?~{dJP66+*2Mz^TS$q0c)z9i%hCA|W*t4o7>Na%H!b5BWL2mh$(6Kq5x zp5EX{k8d+ooqi&b<2ZEQ-9=A}WA85`ScRD2Gdgno_@CFwgQbVoH5k>U@hu5OSJEi| zyS*1|Av<-R@oHTyiG1mrE;0CLya}6PXYQobE=6W>UO`Em#|h7OxLK}nP5m}TkKVj7 zcJ8oGP0d=k+@Aw7Gd~$|Anw9c5)AC@Z-*>&w6r9ai{r~M*(iR2@CCM^$=anj#qdb0 z@Jfa|d*OY`42KLT25OzRMc+AnA(Zkh7|L84%Y08hi)DwiH-!RSTSu1iIk>x#7r)+` zNPT_mt2$o1W%HaV?9sEvrwY1UQn=f{?vW@1cFKlUKfQHv%}B`l{ppZ)=(O(r&1(1k z?;_jj@M7B@_$wocYgAj<+zTcnvQmH)q_jtnsmuk=#R?yX)Qe#KZ*#1_C{G3XSkF_I5wD zIep%VLb*sWhiZ7#>TL^y1*z&1y;FsPpkGku6Y{TR-R|HW!vYwBP3G8IhIgHB$ znC%fteZR^=$IZIU5-)u+NO_R~s+c@pxNXJ@J+IJU6}|X{7b&*e#ql#%zzCMTWL2X` zPO=rMxo=r5hKG1Mx`Krf)6pflkM8tWT!jOXp~_oK{){Vw`5gHft6Ue$ClC)1woHKF zs_N9lnw(Hdcl*|J%9=yRr7c3g){*tCpgTDA!N*<)rL{S17 zS5MKA6mt64iD z8CasQXrzt@o_=L`cI8Kf{FZ{7D}SzHUS~JzNM!R5XPJvJR} z?c^2hK*uk9CdEXVF_-OM*mbdRg}hw(WX#rwtsklycijdxowbsfG-}T@?d_S)->uJg~w zwWmU6HheN>c>KqKKz8?~n8B@oL5T^8!mOx1-tJQ{H9fQIlxD+1^*5<#zb1&`@V35P zQ^7>d9MIY{^OEY-D|czj#5zWg^*#S3PUC_voLWA<*KxSNln2P^f7x3#uJkB z5#1!>@J+$iR-L#=85zRz#_v!>Rh5to>4wQh*)+|LO6EnOA|c$=Dr6W1OgbHvsYJ- z;?79bGdfy`)VY4fxUI zcMuoWv*KR1S{WH0P)n@^2UivF7t}tg>liL14SpM=&q7P^O2Z`MPa-T;VOUqcsI4L7 z-3Wr%)4_&}o6nmx;%e@%lp5b;xY83&8Q2zzX(`E#9lqufBA4FiY-w;Gl%43VXgH{e1(rv(sO3nlnQvX}IWA!Jg7Bpc|5;i8LpF%?R=Kfz*c~J+y`nQNYqGhYiuOZ{uqg5$h|ifiLbswT%hB3CY5646RCBBmL--(6DL5!;BM`4H4YxV1gyHxp)SvUZW@p_S#l zx9@c>kpgSpsqb}5vC82ss;Q5)(b;4-Z56 zqmR{0O9uxg4kbqFP1BPO(~V@7+mu)upLDzMNCcg6>*@9V1fZMusD1 zW+o-l$oOnbYJIfyViz-|_X3sq#=)!z1O1zb*`p(VY!(a`UgM44{`?eQ1E~)6*XvAA z;~(dkHZh|++%8bdnl`TZK_~9@Gf{bPFiE}!2PLal5jQ$WemWN)k0Nf*G*$F?g~0KY z-U!65*b$?xjLz5ajuF?Y=32*aCN}c={5jyk?BrNA^X20}=mZbRO_ zK~9z?>eRS|7g|b-wB~vy6pWh5MhiyCD3+9~*_i5if2!KA<9smkAiDGH#Qdpj%&+9rxfneq8exyU^gCLgqT# z^7w@SZ)9W=1;b;5TjfDlE==PM1ZbPo`<))&ZWH^x|GR#pMYJFpKY4rW4fn-^=jQ3G zKj}rEO-x)!JXRH#uf@R?7AzHNxs1FS4m$hq z2dL90AEPdC-)%O${gktj39b2fBP81TL(Z$#yG^PbQT2hU-UJrx<3X5Rc;rb5l^d&7 z8L_vV3adJ#KRW!(4Ih$cfmDpckxoRZOLHeFiF(eOO+04ljnvg##!!AN9xky=WptFv z5n;17y-gSEtF6@BWXr8fx<|<>o-D>KzvVTKlb$DA=ge`wslO95`t@)iJDOww-`T^_ z5eAVo#KT3}XmlctuiE=|LcbYSS^o#Qe6G5}a~AorpWTec-@8mr6R|Fg*Xk>#Iyndo zFr&2B$zrc5ajbJIbzQyA`1?L?$9BU5W3B19mRm^%PJX)VCO{UI6xzq4-_%a)Nxhc7 zScZ!FJ$np5#rFKy%|q5U0@!U(#b-iPyeWb}4Rgy9g~2GIrLp2sjk> zIB4buU-T}o-$qPxs-F^#eM;<$Gv5gkaCOB9^2Za6C8AAR6ZF32yzw^l<+*_tT4QCu znpYu41T6=RQ{FIR%3NmN-+G_;{M)dZonCWuh&nhAjCdG}V@Mp;ca}b6UFk{Lw>B`^ z8fRQ%shnmFA+z%7lt@lxBL3{wwGnfgF8%vnG^(CQx5S<|FS))uL552;fVBpEr+E{z zT&^SjzwVo(vp9`@9%En;Y|SSU`eavC|&fxW4lYtM(L?3M3?q#%$qeRZxy!$ z2Nj5owBxrvaEB4@+t0tKEC7K-wn*{bbVDrmML_| zbrk-TRtY-r*4|}R`&M&?#3_kMQdeyx49yx{x8`tB18r?SE)^AyG4EcyXi2){9z#UB zwB8aH9oxz~rc^_%$oCZq*!z-4Xbv@P!Ip3GD`}N*KDsHfKm2bDqYZSTx zIUs!Ixw`e{Ed%M{jfTOWlG~O=tdu*jWuPG9-5P9M7IhA!eM#`Kbwe z;aeUTWc*xpuQRv(smhKB5~Ohu!8}a!c3T zmqX@ur~H2%cCu4srcVM1)sS1^li1e6M#xO#g=;^Qy|8X{tv8ty2|U=Mx8w;Q%#6d) z|59m7DyMy`)JM~mS=s;eC2X&&ZSpa^HGKlMK$>5p4VPVBaptstG4H$8b;JAJ z#L^IJ3^LFM+3_Lunh469$9La{G(!y23==n=kyr2&rCg|GWLKTWk`?{rN9<6!bgUQ< zsWlINfRBSaw?ONCp5f@IPZ%0L;i|o?_=?zB!=ql6#3%6y(>b378ibJy?w{u|*-RJ0 zKA0^}5Vk38PLen>Nj(|GctIwRYGmB)wNjBnaOwVJlkfc}osF1!avYpLV?v$F>3~`C zaLgV@4rznP5+`m;NU-{qZN`moN7PIVl8|EBIkqlDqfk}G#_FLh8Pd|`r;FK%1Z5>r z6Bl>))PhuQ-XwdiVrIrXjThgQGqfbjAxcl22?aH`K`XmcU4urehsYR zA zr#5e=&(@CZ=p`rRkir*SvjvItH}>{+cir9HyJn$j`Hxpa$Ktvvd+Dn9XbH@_+k6h? zeXvaU7USKAZ3OdaZ^HY@+<@A&qwjnlIoYB3oIFUV{hVg6TtaR^`SRo9R*HcEj-2#GZR#r`Z1JK&1H4ckRYK!&6oTbr1K(;>SO>bYWgS#MjXIKsXkia1L82$lH zo9=*nY+jw~<5W*)Ng{u|t-W7Y^EK#go5WqW+B5SriIGosE8{gD7|lMGA2m>Ka_r^g zv_LB}$!)untSLW(*=Xp&z^e{SOs4(yeb)U?F79d9{ErQt;{p4^WMAT4B4LcV8dux| z81v%GtJjl!c!Vzm)hFb*f zWjOWd-f=syp){8`Czp|lFqTV#NPJ#L-ZK7<2$PnzwbLzYbDgDM|vCsX4KlX0&X$rzu^W{7 zJR6%71f)96GgT$uu;eQBCBG-t&&L=+-Bvui_9(iJW$s>Yy>iLWQ48Tv^|(G_J~`QY zaxywLD0U~o4VNVRi$x%Eb(Nt|3d5UM)yCJyGTYmdY*Vn&SvU3lR5Zv(1V3myOc-vK5$5!O<*hv|*pfBr7FE1~Xr#5ZCjPZuo z*6O3s$4w%~*BVZmn95If2#;uKYIptG-y`GXp_4G@N$2Ix>LGzimiS)#g7$mklfdKB zg|n0Org-M3ZRXCS-+ac?Bo(6UFh618NRV^#ivoF=NdbXB&>MolI3hasj5p)X=XG{o z9=5}M9MPuAlbO}_nCC;oG}kzrJIx*3%M__tP0Rt~FUB7+U-E)h-Ap>UMaZuiv=>>{ zrt-9+s#)F=#}Pae1*XEBg%7l}9IJ5f6dkL$1RZ{+<&$6TTH$bdc@>+5ZRk<;%|WKz zDYBe@OI1I~atFh;Ng|Rindo(W;OF%7`5|7$)ay--fV#GJsCDbXgs>2v16uRC@+mz`t^(AoNafuz%+GFdI^`5*LMrUaU*>Gq|TV|{+CaS zl%=H{X-6NIzm6Oa=QTt$@teLBa$M`Tp1@ugBFRzFQTZg@>;l244etkb#ZWgb+49Rc z($25krX!^bBO6tajcu9}Mh$2m$|F79_P!gphD_YpuLNsDmXJ>3>U%tOMj+E=S_( zl8vLR;GfkJPF2~;seigo6^QED)>-S^xi#`Y>En&Wy?d;h80lu!uQgqlW9r)yXzqBU zTM1*P`?-?Ojr{D+VD+fU-r7^ePPDvu(|ZZnn6yY>)YQLoxr@!_5N5Jm9RCyxiP9v4 z5BSV(iZf~+-P^ogQ*NQ*e4%KGc&NSUli?9MyM^Fv6B}>nF%PCo5be!%lAD-Fo;7k! z8}M?sf9Dj}?$*ea1IR}J_J~>i;U_6^|fn3fGgvPvaWIFP4+LH z#(84nURSTi|E!zB@Jh{Ftripa=FNvWI4G&3MJ(6n!nvxff}FTKU@!hv!J}8cim;xq zz%44|+%8JiRQ<}Lu~AFnUHs};qO%l{^{#&_AtoEj*osd21G;KDG0D)Q61=nFm*MO9 zspCRlir6m;?CrRmH$lVS1^RRgy9=6D@Ys~_*?=4i1UMjze8xrM)cPimre7qcxXFM# zIy!2z6tdnaDk`e4mn!IfEJ5wDmi2K>CHgS@R1t%H&zN)mn7Wl6d1%94xq4>lSDWUz zt{&iW?a>rEh8&7%1x=2n)0t@Iv7xL9i_&nN!Y~?HWL>E4*94P!nWZ8}>GJTVendiz zN7%6UeG<*&%WG{Bo^3x+)#CdApmGxK{LAD8>o4EnIhI|XG!J;R4-lVAP4V<4%MBJ6 zW=lH<>9T6CYof+8mz-Z0+&^^I>dJ8baOZ%+^vA#K9;S9feI5x4{ylo4P0lmtnY_0x z-+Fz~ZUxxVLhrk%r|xww>~9KrA1vwM=H9m4p0BY!XqHPkEqd{}pt|r1DzIU*UHA8% zz@IfOVYzetq>{*=T-t+jN~W;NI6YtIIj`4}UYdCbwDMWDemAJr&%gg|nxbrI%z6u# zh3z97BMI)s0|?o!_r6hvsZBIS$xKSSPwn&84n*VHIz2$UK-xrs-GRh=>`|IM#5%Wp z6GhJ=@ABOF;>);h&?^3O*gV^b_;}tGraqSN`d0ZkPxF!UNYO&4H$mII7m#qU=Zz3G z(Oeu>jpxsL{p^V!-5O^fr04rRA_}^;P!I;3omk$tA=ytj#>Z_F<~A^}Vr83|5fl>* zE!wHjFjLd{nx394CgSqy=h3cU%mL>gYkMb?RJHlY!J+d!yFO%s?3(X0Ibz!V7sNyb z%EnWlMvVad&v*648R1Ja;&LC}5s>Se1yW%>(cJfRP8Ro@p~L} z0zDSUTe)XkZ!yU9Z&5%`TjdR2bO($zSqi5h?_mya77`?$4!VB(6s_xCTzSy8!+r8h zoL}t2 zp_h=TkbJ)W=e+h8OSge*F!iWv@$0>OmCv>E=Nvu?q%6=Hw3JswFCc0pFDKjloa9~` zz7dVr#b#=1$`*^aC9SvG)?2CkGwSfzI91D&aU`DQL5HjO%Hc0p=KZD^a}w5D(emf~ z6OBG4P=m7{ZROS~Yj3y<>|PwU1_vf!z>0Z|FgH_D9-89wU0EdTij4{j_JI^G_Q^FzFY?q+ym#TLPMEGRVvL$_=o z#;XW9I`i*p8U42|(31w$PwR|nD{(De8Yr6^iY2Hj?;B(a+Rv?N{s)O z0mLDAdLoP+nZSV%K|g1`T;aaQK#wv3-qvzUxy0!1a=84$G!XQ_6Oo-$=KoNgaoCXi zzR>Sa3B(iq#d|69l?Vczx~F1yY3?e$*B~(drGVYk-ERDvJ6>%K3DkcP1eDEg2=tNA z_39V879=c#+-p5=T=U-?IzM#va5tT2)<(dU@pFdSNyPYd!bmP)R$-FGpdO_%Fxr#AQ7-kI2g;t5CF z>+=k^rz>BD7;|6MJ&(^9zFmOwJTg!ZD2{{inRvm$c#-Tp37Plh5kg4B;mM?|X5w8X z+Ct#QIWvY3p(9aZKc~w#x$deP+t~bOS&|fcu2HaOKLftV#71*Tn zCbT=&=i9)Po)O$He4nt^RxFLM6)X>`6gvzOkMUMBkw>fPu{aO`9wkCR+|Ms+d+MqKl{J`f&27@!$k(pbUPWJoP7(dJJ8Mw;o6YbhE>bh#KFVP|`f?d{ z?C;d|6#AGq5*5+#+7jPM_=5WEt{UU zKL)ty7W8zBKqT7;st;lrpEkT9(B;nMQ~tw7y+?PhXakKinf}j;%7w;(!y!xyNpz&Y z$B*&tzD7>>@hv#pO0T|tABA}u(UIHz^jXH1`Uf&D+<7>EUp{L%Cd>}=+Z(RSRNHo% zVfE5BU=O$qH8hX{MlRrI9U}fd?wN;;chtl~fTqybgl+6_Ld+?fn zCNKOwbqTxM86L}N5B%40Ra-FsyVJL{I~aU!UB4wBrWl}|(PzzaV`MY@(A_l6EvUl` zX$$wxYdHIBRfvIPXe2*m40%#pMpY5reJp_)DAuyu@ppg+I@1SbMLFOy$1jtH$^Y@| z`We-4$NB091L9*zt|VOb#JOx4`nyF||J*Jb3znrR|NGMdZHHkc&Y$0Z(=AD9On$4L zF~662jYLT2`{zG%PG|-GctxdgX z3YYm!Mm)gmA#@N;4aTa2ueF(d1c4<-;@|Eg+wLg9F{*wuigJysFBfsvX;{Aj#8IV# z+qj&(maS5sFV+4vEG|5@(9HKd?u61z6+@_6B`^EgOC0?ieLL^nozqPJdeA5Mh*rNJaX3tFo7gg6JXE_8-ib zK8@!faZu#hakx6@#SJ`F`gb&ZV(w^h=`jFX2S;pd z1BMZX?>!e8@a^;8n}8vt4Z*cfeZYE;E3U7)P}-aF-f&(yNHXo}+x9;0XE1-pk&s!* zt+EXP5Dp;SZy%q(CXJ-2<$M4~%i+Gc9m{308OhGwFG1>=mUU(mG1A_wRy&NAREUCA z|E4cAaY7zt^9`Xlfp3x%(ymW-l{0*RtQXA zpvc2QDb>~s{ynb)@8InBIY{D(>onOk1&8ww^GVRUgAVE(dY78vsnzNA>$_ZsVDzPT z+|+AsiM!n1vvvv(Bdpe~m}6&rmr3*P`4@yKyHf2o*?%sGCI^#15Up+ovL~LtbZcc( zFlE6)^%!|@W{W;dW!MziZoWmu|LUW+$F&H4n5a_~coIHXfrbtqh(&pZ{P#aUR|NZ; zN!9&kGbw@{$Q@fNejoMky<>W!j_WGIZ}Rx)N(qf@%yQLf=WAN|_)mdaRO5*v4x1OG z+2W-Fe(y>(ri5n#RNW!82&cj0DU2+f2R$n1IatlScihK`^q-8Fs55MSo@S^u7C8n> z!t@9DhDd`tThDgdTj62$eEG{pw(h;dAAxucV~%>xgtPu%vSLg7%0rD4YDt5di*8

J#&FvcC{Y>+f>bq|-Knx19|9jpb=PoGO%O2r;D32zG(qgzxTJM_O zczX-O#YRAH;Q{Ux33K-b8FJ8(AX06iJX|GhtXhGe+SA>AcXxN@*HcsWe}3TX$SQGg zdW@ehI@MNYXCUCxE^8Ot#0`Pl2Ia1dCq702LYD|wR{(hbWcphOS;VoU9wf^`9)VDP z{pZ-jW6)v*RP)hpb)_zTg*2XqMxyESo7;CNGps0SJsmiZMVIf=Wc2-^?$h4Js#Ai2 zigUm>t5|3gk3-oW{c{0t=pqfTk65zfwepzWxlJ#OAhiGehx=)BW0#76MFzaJXEYX1 z($vj2m>;(m78S`;H~+^FEc#5ooq9b6P-)R|=KK8H%l_$VJH@)PuUKbY=ntM5TJ~dIvwD@v9 z26JACm2P#u@p&h~{|WeI{QFRKd?aF-j6M8YV zCLoTnS}gwRX}l1qh$0n*d+<}wN|uYvgwp>z0VbV?H8tKdjh9!h z$@pjMJY$s&CHw6FA|oJ$uB6mHkgbf3f!?@1CKmiRls;iVzId_q;6X{mY-p&8R^M|$ zvz{jZljqaZU)b0~avg8_G&>_U_cH>P$Qfd6{?dLTwt*4!$Zy`X!3b-9brndj|1tSC z4Y|2JhqADbv_HlM22PI;AN;5;ErpL)F6mV9G~5wKRRZ6vg0Zu-2$z{wneqDRK~H%2 z_3^^;J}8X#%3`6))FA%1_;;f_6L={itkA-Qtdv56*6P+}4*1-o1_{GM04+>PvhwxS zBJYrv<|adGWZKUm@3>`DQ|Q8^4kY+wNIlHh>gtUz<{4Uy*LM?6a@8MTS9R8*Wg*SM4R2Z2crIxZ-FVGo z`!8iYg5}p1dWC);X{KF^-yAib)Inoi59Zb7YEfNgdE>q9J4YL<7*+6$EKAEMW^FNJ z?M+;T*CT(Gp04tM2Yhp8<&Gatw26=~)cMRx8)m*CpSA?wg~zri8H{@6j%PpRON5EQTFU@3x57}PD0+Rrp?RJC#D3&;rlrAz`F8 zLLDy?A137@HJ#f+zS;53&3`?_h`n>}L0EztUt*((_V(`GZZM)ajfhxne7O%YN*&LE zeq~MYS|L&sIKAlFK%L8!r&{R><&yD8El5buAAMjWiG?<3@n@^?*z(HN&QN~77j$36 z7@x^Vi0bUFM!RFOZPmUgo7WR<%Qq01GMTOnwwNKfkMTr@iFztorODuvg6K_tvfo}X zE$5&|{#y9F7J|aY@)J1cj>Y!ki|>o}*?>H}_?u*r44LERdFSRgmloqiuuw)yKnuP z@C8T?d~GhZ*X%9BpSn3u)?T>>NDN3L1Z6mH`M6{5VIeTotiF1Ear#P1NA>p*B7|<> z>u#}L>vfoNTsfz{tFKv=3vu}hljLE9^)K!G@4h;#Khu{9wirwf@I6>B0XLG~O1F@v z51i1roGh@vvfv7{!oP*4R5C?ZuKsmh8V2LjwBcdhO(FrJ%UP@|+|LID7S3V7MG{pl zEnLJkzk|zB9|7$KfdZu+BqW9(6W>9q4_Tbyl}Q*ifNC8K9g3Tthx|RtQb^jY~!8=ly|KB~mF|P{%Aw*pcGi9PSUt}cmrYW*Uqx_Hn zvF6aPEeOfug3r58a8-*^;ZOCVVbb(2O8xkHup4jsNgml#)38i~qjCnL#uXuRrNMoU zu>Sj^UF*UaL7BzH-J6cpY=|3>6NEy}n0ZqmN{l=f3|Q4h?W!>Yscc=T@*n{K<&$KH~NAYaTof zEU|}M7)Q%mbd_$Z1geKF_iyWqU%Ghl$MzvY43()WQw>1k=dQ1?B|pV+g!oOO?v=~H zR4!EA_~pf;T~7RNBtZRt`Zn`FcfNJ&xi-(7i`M+3%BMf~G}}WQ6$o8vi{v(3$8U+W zSgK&L@^S!FuFSkA{$^!;MPW&bnUu{&c!D(&djlQMmCMHoaQ37`$xKyuLqBzclqEU* z3S+azh6`&+kN;Y!UOvi@pBfJf!Yxc_Efqf2rSQmq^geCxwx3VW)g3`{+KMUqPzQDE zm9Kp&*IQx`EawW7LYFu=Ie&JRttXcUB-=J*-Z?#Lf5N7_N^+CMax)H;CSZnIL%oXp zc?@7R5cl&$Nt4C|Nbe9n4^{rVZ83^~H4m?W5bWdAVoFSEIu*RjSPU4XH>jGGvxzW#}Hm94ESyqVUSUg-QitG@%7s8HuB>ln^P=|179u- zP2n}`21i)TvcoTNKwO6#A$8aO{lYs}3M^g(w47av0#!`78GXmn!OvLwY|+`H%M z8?QaP*{2wPoFF?0EJwMyyF;=QB#XA|qpwe;x1mgIh>!w%qQW3*4*$^2K!b6NPUQ?G z)<8IpMDv@#MJ{w|bha6BoV18ZzOoXrb`;`Qx)}GpSrNs*%ZsZF-6_3QsLbzt2%eT> zVJ#p)Ak`qd1eXpyEWlm9ciV5cocTLVex~x-gu(kP3BUliN83$57hW#Q%G@wKH_my6 zh#+X2cXLtL)5{-B%S_o!pfZVc=duxpYuKI+0#OC>64!!z_zvmqQB_^dL%~Qvt z{+L(h6D8PvxEmC;aA)TLF;(;clRPTfA84_9M^3?3*&oCJ#sq_ux)G{=9>?UcY(#4( z2K}#aU4w)I03FCF?pdgi!O)sVq4E8^Tn0oJ%sF9BE#V+S85p}=h+u~L$L!yo(pF-h zj)pxfaDl)Kg)0ivD0g^b7^7OLL=8i%0Y>LzB8UUyY-T4A8Igt&{~l2TAVvs-pkM++ zrn7OkTRF>0ewIGPWMox(4?+iYcf~;q;JI%YKwdMw4S%WvyiW%(1Aff$ywnRrzv%Hd9-P|NdG=#Nv_aHEuU0%$KhLqM@@iW?d3PmkOXf{W z;!y~dA!+L&f0tUjNVQFd$IuRzOz)(uurmRzkfCoBgKAA8{xJRxD+m>=k_R#i0nm#!*T@b{ z-s>!%ZXg%O$hKDC36B_b3T)$98CIv`JsDkRdD)JXc+DQn{_kbQ;AO?%-Q^QK@wT6x zrlqj1Mw0_QgXWjFe~`O|nU$26b8NT;(r=0o|NDa+$aFx$l$9P*3q#>(uy0?U*4|~g z0x}ujS!I)3MhS_bmw>OL2a{{SLHVN?1e&3GWo&z!kslf01oUsU%i*_TV>yFVQC{v# zW|thH*mSF3AUyH0<8RWCsi6z0<6NLY$wqjw*7$9h<%8_WTs%0#_Af?}uw?(b>g-3* zIOstpxCqW=uk1bP9*MvW=J4iUGHrhgWAJy0H%tLzrd zj)gz->fXO!?Lc>60FeRiuXw799j6!|bC{Q@?I5!pcnfp?epvs0$1j1F*)Fh8@HqSc z8a2n4E;ACM4F{@B%ic?G(d9{Rcmb^(apV2JI!QbU9wY8%h7^qcE6MggOyd?g=qMiUf0S&uy1H0O;;* zKWusXffbMdD9G_XF~a%e#Hoat{A$RA@#O)`yakDZGa4%MWf1!`I|<#tb91+7^8GV7 zgjthd2libTThU9KrWo{xl2ve#e4w21vXQ`x0LpEE)HMSBIiEazL2bO(c!)0WZCPe* zqCrT|JD@XLheItvD=s|tJ%H^pOg2$oPx|+++aphF=b>Q)L1nlZ^gMFEygYtk3y2+K z2MYj2kZDMa_2*x?h+p5Da^v3{lwfY4PSptc2e(nJ_m9rIF!>{`OO5NGD57TgG zNK07cB&yxN?UCrnz~?icXZ>?<1zD$ofW$(E0^J82j1B<{K9CWZvg>{6rz5KoeW)bN zZvt3Z<;m`xohG(s?!T2NxDIj;Cjb~1&_lK#DxqU1%bD8aw=u%7dsk>3=T(N&I&R+T zNFTId00D#=B>Cj8y;0h!Km`L*7O+g(#Bj3rA|xZWwQt`wtiCI~5t;HDF9-7uU26+K z4Q9m(*0sHa=%?qU&i=o@FKhkRr$9k7a{yfocwSa&@6L(2jwlr!dB{)F)5;Cx>Q} zr6&Cyig{{utZQVn#An!8yzagW4|?9`t$X|(SpW6K$k6s_oo7bGGd?#nO$2^sT# z$dUXUJG$?bvIi7K73m)LzXm}dCaRPp!7tPO$y5#((|(yvl{$!?{vgCwy6H5-rQz(F z$NrjaYb66QwthD|%P};y^h|H;CcS>8$PMu8z-uHLSIj5F5G?cvfJm7NhU*(lHjAIkUKSK%s}*#-w~T^fcVJ| zsN?A0Iz*d?69PI|u~uosWhDaE5}2~|YWQj#=OvXc%c32j2!#zaAi-1HdUWFXXZ~k< z)GqEHNp`oth0)J;Lbf{(^1m$YH8PUq1Lsno8n_6COw@fYM1w0av7iM5?pP?~KfcNY znmf}U?~gZh7w}@O3}!VTVt4v}Y;SE1-vO$CgLW#Albglv1v$ZOK^p4ggYbXB?2SA4 z@uU$xOlc`ATr!k=dv#6a$1M0b6eG#wj#>P$ma?)-i8>0i{DWOiSh)J8E6{#8kN;Uc ziryaTnpoj{5HOo?uqMFT=gb}@?8%@rWwWTpAf z1Mt}Hf`pXuz>aC0ZJ>(jO1}ipV-hAcb#*J@6e=nzj3C(&&Dg24I-R;aLlD#A>=?z3 zqu*xuXBj8a&?Wz1>>_que^%qN-7t?sX1L|Oh#HYf=_$-8#D)6@evO9w2JAMX-E8^n z9^hj?9RKHi&(7%%UVMNmCmNiw{NEs+PtL1@GU`!QRB!C6DjFMnAn$I4AygG$EDdd} zhB+a|SGU|6hrG|*1wnf_S1z=&yz$%g!h#$lDamQA`!C$RJrOR=)Qz>e z?%Fynl?k@(;G5(+E1Kq0g`_fgFGQDSY=^i4PIi?pvb3O1uE0mU#Kgqd*=IpF4y#Y%~p7-fKvbg}(f=w_%j*{|D8R-L%n?vbbfQ zp~#pa{W6ONfetec&{I+p-wqoK1bMw@bfj>`8$qi^n|+xqMc9VT<@f{UEMQ&4IJN7I zzo*NG1=Iw!{Zev&j==;~n-Jq7A=1C8D$B9ZcoAItAg_UB5= zAu1*&2fxap6PMxh>2#3ueX#MIDOx4#q3-1Q^LVZ?Zb#e5FAMMDo%i}*JP5_qTqhv{ zFvnwyL1;az#{en|hm*1BfB`5Ldam-n8YsJgLkW)p+KzLRIe5clNguo_}Gy z6V}+Md#~5JemAL#64hCE=TkxihXqgd3QXqG2)H`f@?wP$?hvvCl0h}~N4zi=auGE8 z9$%h^mppdH1ieZVZmm(32~K?{s8M^QjF4kRb($($_aqMt<$uS*7APsTi8-TZJQm|! zqEJ@;?K0agv!AK}c+1S&yVhngF$h}+X|pAp|Cyb2U(RuAjOZ6c1s=7f^=Hy`$#}HA zIOqNe{m$yq{i{s&dB~GTA_3sLV1QRG>tK8Yi|zYauJn1IK5iAlX8)U;-@_m*L;Ssb z{x_gPHShfNg$&?m*L0~@7mtl+TL-s_#BL2HB;f3BpH3(czF3va)JLF4@YzFx2!+T< z-AU4GDo5b&Q#@q(M5GVwf>=e25nYhk1We-5p+6vgxhI9^*BV?+@+hduI=zy6W1W^L z{Jq@V{+Z)o@m}a8Xs$s=H5v2fJXhuh05OjaW%?&tEB6>@#;amcATi}ujEhB<@HN8b zZ45(wvnUYzyL{b@ohAbD7QuDHszZNqqBRIJP! zqxkI4nFeN{NGdD|s&xS43HP^(cn}2fSU-8G9w49rA3!VuDXvvjQ8XdQAkH6$-SWou zw%tl9{hTO})f!>0CZNP%47rA|kpVT2U2&Iv z&4n9~WYxbtB0x)^(tAG*Qb;M;7f-U^l$h1*u4gMvFvW?qzTA9(Hk#BEFF-6}K?>pRYQo^%xfPRQNp;O+PCE5rv%Ig zQQ94G{nBwhljIh7xU4y~Hksbn&b;o%183h|va@3d>&V0JuHyKqxINCv&&T{fIvq?L znLZ(kkn{PNIM9a;ra*}Y{$Kv3vN<{$&fu)nJE$zlpCck^;OO_(DC{Z}n;-`CsI3s?t!;bY|U=8g@ z?n^8^r`Iab#B?{5x_dnV{}*U=`1H)3a|l%W+1E0jsv6kd^!{_@@^UQ>@)1VKX8c3t zc`VO@>1)rr?Xs_!OH@{sa-2X{3{mnfaI`)Er0%SrmC}S>p54sd(W;7n#45|myN}iJ zr+;<95^HnNv^;J(L=nvd;K%;EEwB-)fEIyC?k2X**noZ|Bq+nTb|SFH^^6V?04uDe zvB_t}bj-R4zkSC5K;5qSU;r6B%OPiM-%|3r}7MX+C$|h>V`0?uP8bZg8 zh&ssFS&l5BPzSF=NM#iprF;oafwZH`)&2?0fm^$)=Qim3e?Vy3+4(DHLN*;u{zu0z zcaohJ%{ho}s1KlSINw@5x?w(0x*!sKRp>pZ@ZJiyOj`^d#tcV$7#iw>P$**yX+>c5 z;v30O;QjWa>DWGFyTlVLtSZ+&Di{WQKw~N2cg&Jj(+vi-ADM#j2y#*97@FSpKMaj{ zsdGos)|f)CmUH#(s`3LV+e!P!V=D62V=So%BfPiFyrJ5_h=U?}msC_+d)YIMlTvNA zR)epzYJZ45Y_UNkPlCS@;4v{rkPFT_wI*YZzNM$vW-T0X3AbQLS1c!kI<()wzhv3e zcIqVNw2Df3#gF7{`}=jRyj;04mQaKyoxm+GLc&yo zMVAVh23^!0t-H|cUS2H&c?S8DuJcXlTYwumi!)M3VT(Nq4{t+_ee$vmd$I30CcKj} zpLT;zd2xe69@`=WAurBaiMZRK+qr(m6#T8rDQD7ec-BaZCm|0@-wAp^AW*zp-6~MQ zkju=AWwsD|2LTJU%l$t98jAOR3 z5sorM1%U8GRYiS zCcSU+!Sgr4#D?>Gmu0mrDMI}1G?x^iOq^3mlF!5^llt-sSrzcLOfN|swbP;7uLCs2VQnwZ&) z6_&?bb(WL>imhr|xv{0Np<;e@VNJp~c8ze0mHJ_sDXxifG+KRLIfUUN82=NWRar z1ws<|TuQS{ryBN)joV%juno#t6P?kFNPM2-6c4dxB$Xe)migt}MZfa>ArD)qVPdli#S&Io zddDp%Wd*f82A#GL@4BWq5<5XM)<^m=a}~_F&1AxPmYG>>^8|4&o#gj&X-VH`TJkG~ zB8r8*Y}uxoQfDME*kdd7_jl9qNqthKu_dwU!a8tbZ^WQoLM9d)!bT&3gV$>rDxaih6Fr@QKGI#et0OQsL(v3YAZR_Y3B*S7zbN)3xE zuVk-oLRm^Z7B!odlAUjN>5GOgapM7>iY+e$JcYg0PZBf}I~SL0??sVid6PdG74~lz z7g)3>P)S`zjRZbqS^QDZ>U!xY#)Lh@oL(>yM>*3*XZo#XyXxS^=0_sV(OU?3qAl&3 z-478lj|>|76Uw8JZ?($GVqg7yS=(py@ttA&O>bepz+JZSB1Y+~dEs{dhZZ%R%FTtW zR+msK1oC98NM@w)3;ySKYHF1nlgr%7=?g}@6>*}8g;i8cS^8Z>F`Z>UQ60FX-qYNT zuc9}fT6!Xfoymf8cA1VUluaJ>pcEn6X)OF|Di7+Fw4C#%)e23X2|NN<S}p#SR9Jqp(Sl?z=~(p&Wyy^0s&_?EINO6ZjZ zd)ayLkk+pkkn8>#cdpNQl_=V1IzEe&CQF1xU$xX_Lc6u4-tYQG^NY&7UQ=D(r%&!8 zWEJLTfuGrb7_1w78rgI%eMQ~Oyg+jPP9E1fSZ=Lh@8OqmD|TOvc+u%~PZ(CaF6*Kc zi2(Qa!srpl0AZ=? zq|;-Fw=rFbXF>A6M3tj3TS)5~3F4_wS!ZRg)m$T>L99?TOeICxr3pwmyo3v;S-jnk zP%&kvN%$I~hgFHW?)25Im>{}U%VfN7MQ3A-MPQd0wu<&Xs6Ut8j4uADFD|}akAb8o zc-EIUq;wme%}?my@ys~ctROIa20~Q8+OIq~lp!0i(4Btk7^5^rNaIv6S(@NarNwcj za7$uh%ATF#64|eUnwr;)#3w5Y2Aq>u*{3Ao21zFBLO5=IyB=g)yZ**3KV99gO=C&X zJi@!jrlIodeF?_~CY{Jlusk9$=d^b}myj+M#p6C`G?pf~>$P$LFT!>5@ygb#dY3-1B^ect`^?!6~wo|$=O&b{YA zbWmBQp-(8uf<0psALv56!sDreCjaRdv906|Vt z)R8?fR9f}tnb>betIpK3;M310TM5C?cf5pk1StQs_SAb3>2vx09522zejDKHozJn^ zo~Pha_*!>zb3bioIYiONCq1oK9`Ugt4wt!rw=HRd<1cSH;`>7dEX*?fJxazvpy+e4 zX;vsK>P*n^^z(_hF?{L`)bv;l}L() zU-D_DK?IL1nGna?CG_cggCaNYg~41&ZCCgnRxDm4+MQ=T-6j)SdGBMHPw6!&P5sT^ zX_?P1_PtvVGrKDyk1FDSuYdReu7LGCnjPMU3CxkagLN3eKV;+3X`vy=g&}t*;N$?; zXvdcvdM|;0&Z0AP5z~^&*Vk-&(+J=at(CaUQ<=}D!RB3xOXnf@@1gX-EXVF<;OFIZ z_92c&!tUhI@ICY6xT5QE$ZpG^J%RnC{ynXm32Iav*7tAD^ee-ZsXwVwO*61u(G^Z24O@W^z$ z6)E1-mEg4^OlF#{UsA}`WrL>!F&gTGsuJ6lQdp?s1iSxtP7z!4NczX?hs@ZQCM(&v zz5B0q;O6Xb;JvN*zi*&sy796Z|8W=kMFKki5G$ixJ(9mWln0NXE%-vN*3~>&KplLv z5fRtiYi1btZQ*2ni$_5#p~WCk;Cw=m+b&GgbwRRbdvUqYXGb)6lCz*bB#I~e3A98~ z=Sec=5C6mPbKMaG9~B(PX4%YEfG;YnTZtFMTe|iv&BAp3Yxf>JO<@A|7_8s)1YFR^DVVDH>A`&Xdy;!5* z>~S}{wCAX2Pg{?S!dWcUzQ!T;C|OS{s>J4q0R$DqMAXlh0qnRG0pppQ52&Vd`vf2# zE83pJVtnKe{NXrNvR&7ngEmcx-1$Q-@-3avj>P(x)la#Chib zPNnO)JV&nYs#oL*j>em(?eEHGc=ddBmHTR!WI+1OpUtY6#wCkh@!=Xq=iewB*u_Vu zy-*M!ihN^{?lpCN_ju|>1ylynEsg*+DGcIlFxkbGfEm+iDwX*&eX;l;mW_rX{a6>P zaI$wmmEjrXj?;&5yr*xfW;o+_Y$FK;v#725-4*a$W?oao@+dnz5IWzZRM-8sB)DXd zGSc`b#Zc=~xU^)T!AQY+Z_NAX9hMli4(sdx$T%sM^EYv5YN~yFb9)ZjPz|bo_5=<- zpjass4fw&|>FFb;TWg_~EB&{0yxTueg+j3Ok>EA}RV7uG1c=p;N8?!)#-R@Z7EOYG z=S3X$q#E2ik?uQzejsFd#a!pxIC|D!Q~Wx6sXg2sh#32eH^Wd%#R$b$C>*t{Z%F;q z4s$J~C<^5!%tdQUMS!i%wzS=A<3)pvA9j_$V?+EG#r@wgEkdnmcRr;(JOlpiR@A#enoMsQ_=jV@)kHcRAl|`C_ z0NrwKum}itvUg|w(Qx_lV#y`ujxdWgCa1=s`#Y>D55xF7Ta~()Xx|x3i~(cYjEwQI z39|+c@x?)qToA2_tjyi+O@O?(@41a9*wLDee;n_ZpC`5ACK`x6Zjk!)@M<<=f3m4~ zfgWr=A|WbmOcb6UUbKtNx$zZ7k*bcYJb?IPa#f5B^Uab1S!7vhpunl}w1AJdk3+W% z3fwg@(N(jc7PVPHpJc>)x)}*@(iE(d2PQ1@gTkm82KDg1A}kQcb)SCOXco4m9j<+8 z=*49H?b|17$K<3W(rBsbDLZLaR0+vCqGlLY71pQuBOD42aRCfjlbv-6CbBM9JL~Y( z^-A#_aqoVX9)xc`C70L-z;VR9DhZr&VeYFcrCd$to) zcT7;Wh89YK><7dt^Q}@$`$D~Wf zh>3{Y7HCdbOYs&52cbL;+U{=D7ZRul8}xbSpcYdUR{`CD=t_#jM?2qGC1qzZL3V`q z8?qoN9OmGMR}60;uAG`(dNa*#(0;xUB=N8_F?nw)#K{;})Hm2^bW^d5o1ISB1u!?P zD*P}5qneredw5SFGR4O}=GDZ3OHprq3(Y$k=fHxLha2S6iF^?4t`3I#3gFEMQAvO4 zw>&3dvd&uODv>_JcQ)5WbGxPMvc5?ssq4?_U| ztV}XPdOK#YF3j18*3GfaTLp(nW@#L4m&Gn(>tzZTh=DX5?imb$CuJ`0#@t7Y`)LB# z>7!GN-nMeX;bJ9YWY`!vLzAPUbZ3lsY(q5CcH2Mv{QI!(ilU64+e19;!NH^;o=nPj z@SUZKMf~K>j>y@-=CxG3FBHkm4^(jao9;dNjH*?) z!iq^aq^*f#+kQ@+{6$@ICbW*~*=y4_?pftRv~2{Ln{FbtTL2V4k5&FpDM6 zQlcLjV(7yljNO?`Qo+&qsN_o6Wya?E9|7Pixg1C73}7DvnvtnkT6-hJqt7qCIpY%$ z5KvKNCL|;vFBywpt%h&N<~*lNz0~~K`EqIF%9vCx&*Z_w3UO@L@I%?54SyA1)iR02 zZFScxfctDMG)Kc~9-aTZn_5S!;GygwIjOu{TxPN}Q(H>C-+!Lkf!YdLn*VKVzyzg1 zcNG_7Qqqoj`Q{9He7LYzM;4>vNETP=Jup8rk(nqa`@o1Qles0y`j4s=) zz|(OTH<6zRq?F-}%8~_vJ;`mB2{wmCW)j$MI5W7jc}E9HN&>he@V`EXxS&<9c`EH^ zJk(UhQQZbHC|e>=$yI}JU{M8Rz2+x04nvuTYSXFqkB;pn-ajRSpji??bq1zL=zLoQe2YA|uN_NDzq$Ley8tqfApuf(a zkpZG1Bpkt#h$G7QzL~J!YpC@j_f4(g#{-=7D>S?JoAG=}Xx5=8rHR=so^kJ2d)wpy zajMIS!?s=&C!0Dg-fP&60IAPOB1uU68}aG-NP3X3I3_)@K>wpD^Lp`#c7#a89>jzL zc-eUvLWc$(RGoB<8uK=*B1h5x$TVqOmKA=2#k*AYxw$YZcb}~q=+#keQ@I!7=P!|& z7Not_glw$f{-B;CvZ?;`t@g}MuZykPlsOos_vTQBKu`lTzBprUCcMX^QDT<97*mMj z>hA#Uw(UL8%;fM4>G;K;-TpxW`1zyH7s}lylB?~&_-f9dRoipDde8r}`dQwVQ| zKBgH%>#xd#C#Z#2k!Xj=CNXm6ot73=1U(JD<|t^dh4Lc~r4fGniB!wWel824Emc+2 z?1K|CS2g7b^NDgd*JT9(kL}&Dl2>~NdUXyNncA%u{T0p41?dW@uaLG~QEb99%#bxx zSqeU6T$D1ScN~VmW0?3fBWiS;T4oWH0p1|NL~z|%mD%PKMe{5dM7aU;3+I=m%n)Z+ zS0P{iXHDf5BKwPpu~8GsB3o)YMferU?-kY!5h}e8=1yvf`p%y-?PQ)tzk2=lJ@2r= z)y|H^<>AcO_xrofo)q}kuzPD7YfXb(gFGQ2WNSXWpp6xz+cY07j5mnr!S!Ruc=Z)k z>NQC~rYUl=>UgH0;CLlwWt_~6OJYH` z=SD)l`D4?CE{C5gFundtBi{S{q~~_`mXPq%`?aD~ak+GU32*jXCY@kw8JzwLQ_7G# zwM>hPTr`Z7v9q(&*ViA;eDl*oMB~aywi_5DNPf0Fl6}Rm*+Br@wRpx~WV_Q+vJ-3| zIXa4~rz>1iCUkMO;o1O+j}_JhpSr-l4*_6wM$j3})SDu`cgiw&97B8GM4*!&DuNR) zU0hvlZESWS_lyiOk5;FLvvB!%nBX8BhjM`wUvfELrJrw^<^6+9Rs!T8ju~Q^L+5}3~mb#1VWX2TDqpODh7~! z(7)gM?lqKP1SV5jX)-}As6$A#(2gU?+@rSlF{7i% zA@Jz+gjuU9IOEClnZvik_m1`l8#_A@x9!TLOd6!gq>c1@wa!eogs&1K)z$=4KnMkq zYZ&%7^-Qn=MWEQpS+uUtS@V}-5Q`pGaSEqpzKaP8xZ~Pa3;axT1f{qwt9++;1{a&| z2YSTuY^FzUult*h@BF(56Y$Wgiu$hm& z7t-OP511~7+j9F&ufwUJvRFB(m7%Anx3Ru1cQ3|z;)3-Qt}Np}zO}Nn!Q%_5uDF!x z0J(uNTgp?y8q6b-i;1F)W>v3Y^$I4Q?YJ{>V|bNE+r~|hemYA0|1E>uTzYz9(x4_i zE$y{bPP_3SLqR@_X7FDoAAQ140ALK$!)^DuGj6Z0U{4#?K;O_cdf0Rx}JR8qy-}lyFa7{ELG!@ zk9dTIDb=#aY^-m4Z_akv2mmP9tXl4njB)sxcu`o(ts9?^K+`~PjOIL!PM@v+CWhbK z!h#eVL-)ooWGX60I3Z$FJ4dj~xs9>2gpVJeW+)Ym*^CSi8>7ko?~yD9shoJ?TUax1 zaw;ym>v=ni@)8!Wb>sofG&D4%rIE>qg3YV!s3#h-5O1P}RWV2Z`}-jT4|uuYL*hnx zf&f979$5k?Af6JSigdJ2*@a?~M0b&G1mdf*$`u6O-k4gvTQ@w5qvimWT#Npu7nj&cm$Y!%q?BdO z34QJF6%p6tGPj}`MgVgCS+i>Ww_MFZASfsLN#E#4A{CUZS(&Qsi{+L!Xloq$LD78o z*Nr|%=QVaTi7|tJ$yhkl)=(KW3w4{7;kXau-8#{Ns#4vJ&)pHJ-Jw8y4*|eqs1&>b-{Yt@`3m#%O6WNU8QOsTY zq`K9F;x-Gty+%>5salc>HdD?X2w4IRzSVenL44~C*+2pY)(>>*RZcMbniGsVkjgjB zls?pL=_M(tvWx)rk2O}|Xp6Dw)0=66BYxZW#(tG)>H~TX66SZjNjF1%fx^hUc~(qx zui^K&A^wTjk!y*0tI6A&|0eM?WeBVt>)q_EWm-I_3m)I-13qI?If9_30*Mfu=H18F zx6NP>qe4k@c^r!z2OC&GO>)+c;l?$2%*ij%1A(`DK9Bczu}ldRNWFdF?u!p28IO@v zQcs}C1&t@5l;(7deT6L&o9 zzqOkbQNq3nX}>W~1Kpg|rbSREF|@j}&?$c|8oM*7t%*d&rvMz6{d%gTBlWLSVf9RI zXTzn>R(rP2qMm;pmY`3_^1gfmzRc^w1CY3W>p_4MGexi(B3}>xXZAwHs35z?EpnUPzU8> zD^(;DA}~zg?8@mjjP}`S17+-piL&J`P=^CW0Ht?wH{(0Q*c7W>6?qo(UIj1j%Zm%a z(UC>VEtc9hIhNtik0K+~O*p0#Zw9q!Xl2`yy0@?-d)(^OEtyaB`0U4aEzgAwCbvMf z{LqV^g+SWPdcD3fHw|xRB%g(Ty8A~QQA~^9FvEKqpwkz>T`+US`TOfi;L~*P*c*)= zrq~kI0JcaV-QXdTq%%N;4`+@z9^mbaP``T}!h2BE@XO>#gv`K~s}2qh<>lq-`3ie` zdsEHcwY}yEpZN|?P=}m{tpm~)y)oZM36I)1EWW3^l}4Kt=h^8gpMZel5i(CIhtKzn zb6i71qj1dT)vH&Gj0J6NqSaGA^j8O+kdcs=bg_SVF901ESxr42{&k4^&o7u;T8eXt zOGxPH>TWlPnVFe6IaNkQ$*+sB$u>D+Ai>K7*Eu{kSu{rIhY1fs+uE9%F*|PZ=&rs# zxuEu}ku?{V6XTI_5q`mncOMTuPp%A$g~Oujk8kkQ)9iY*sRM>GKX0lnsjjYWU;rb` zEi7bW28VVF34!+tdr`-_F>A|k+mCe1h|^8XF7CSp-*h=mO9Lfhqq{$}_5z#}2To;S z)++Z|$?y<9Tg|C+jEIfx&Ln2lud^DWv16#m@}Rx-nH*lte*25P_V)H=Wn~@WPdPZi zL0=Y__rW$d^`vZ%n3<1vW_AWy za-^aof#jRMe9sUt(APILGn=E=(b36P4?a)4#nO_#+sU#W7YTNWW|_-sg#EA2cOd~a z3UArr3=C2)B4=j+ZKxBu!IcaZiGX|Q+xEviJjc;Ie^ylyfBeqv;WQ8kB#}WeFub$7 zyC63gY!#X_)Pt10H5LE#bUjH)bVCqH(J?TNm}tbr#KaNcbbDU6_0h@6$-26_+*~?O z1w}=iW5!!~qg`HJn&LykWe^BNEFaC#(2zI+sAY9yL;fENJyudu`aW8u9um%!)VGrP zs)Zb~x4(~1oUOvt6-z}$N-F#BMr6mAFKDK4g!zy6LIey7btNH!dz}QsEF3K@E!CC- z9rpokxM76QuV24{MuCy+-qn^gH#aYsLIC~#?PIPG)eH^^4h@YV=W*Ei(~JjYD7kf> zm-sN)*82KcFJopKcPFeKNWpfzbnoxq-<2k4I`H#@mg>~ljMX_W0I@LneYJcyc;PWntfT z&jw5-A&QWQ2w=FKTTM+Z;c;69$fr%vdc`MU;y2j9P>}v+Sv_PG{G0?EBSR_0_Kx3I zxr@8x>}5__GuZx7K SJa-fY{A4AS5ar^=f&T~LtNn!l literal 0 HcmV?d00001 diff --git a/vendor/sigs.k8s.io/cluster-api/docs/proposals/images/machine-states-preboot/Figure2.png b/vendor/sigs.k8s.io/cluster-api/docs/proposals/images/machine-states-preboot/Figure2.png new file mode 100644 index 0000000000000000000000000000000000000000..f1f67067cae8760a1b1699f52a7cd0ee9d4e1a7e GIT binary patch literal 60153 zcmdSBby$>L7ypZZ0s;aeT_UB@pbRmDpdgYK-3Ul`$AGkybeEJ$cMTvlbR!@!#1KO_ z14GQYdEVFO_56Fz`Q!Yq>%MfDYxX^B?|a8upY>f6s;VqUe4F+*78Vw<{LANWu&}V3 zu&{2-;NJq?ff+u)2L8h}d#3aZ3#&Ye@WSvW@b`V=mv5A?usqnYuzUlsu+YF;zDO)A zXFe>f4FfDJ(IhM^D!a6X*I+CxDJ=Qt&)&J+*h(jczJH&Lp^^{c4NrsL|I?ui8Q$1+hd@Cy6eGuq5Q(!@U}byCL)TEjb<$xq>1+ zpM3eD{yf`|O}aO#9+l=f1pMO&8Ct8{g_~_3qvufI^WQ^06QYCi&!uqxe|2pnNCg|o zPM8FjK(tU3(<&6P>h9MDX2syTAR7>w^IGR!43B>YZBX7IBfUZPO%nSBO_R@5{Ys0z zJ|t%{1o*5);(Z~0i0}WdcO*AxntnNQkCR{D z?7x@aZL%*T+RSA?)czWpED!u{WO0UNcEIe6+|Gj?6NXgnB>kuA4 z|GT|3?uR2a{GuNe|9%f(JtMY&N8y^*H;#WB11~5Z+|Q|%Hh5qBKRZLDj!6NV#3!Z2 z$^LFn3YbrUU1;&*uc!DU7_e!ho|0m-M_~v9OBY+NE4$qC^CYY}Omv-BI=FN?5x;p@ zy&Q17q8>G9Tam&~MdTXW6=g1vDe%9(Nxj7t6^ehhW|{dDjq&W__B67m?hZ~Bsz=%?j9F@b0nz_@G<*-RQi<$G&f#1G zWRP1Q9dM3Fk*s5Ig;%$B`#8*tm-KPC+HGE4sQEiuNAIU`$w4REIXIhb`qiP!0g7Yp3WVl562T>GXmSDv#tdKn5ANpG;?d}#mxML z=(6tqs4bxh;d+AY@ehpk6DE#z7Vk1+v>zGfN*@$dRU}!ccczQ%s%+8QKqmhCxU#(O zexWO7&~*hj=})aG*j&wu>|_q16QR+(=rf(#PD+NR5znul3tg_O>FvX;%@fkBTdc=( zV1&e^chrz}jP^yJyUhg~3ppZ=T>oM;(YqPrAm(M!#X8?h(c*{`CDCmdmqL#@&bGN& z)}EtGZE7@2zxYK8w>xi=B-FG?v#a;Kte$VgA(806+?#L7q%Kd#7>a@R6OwoKa?o1u zv*CSm>2%??3b~hsEt1^4ndxJCf2LYiGeP&5_WWdRD-z8Jx=&yqqB7`hkZ}Gg^U9{M zxF#~TeV>g*nFPiXv~p3PRwtJLMZ$C({oBPny`PK_m2M&(gMx-bnFh}CJTC%4v&-hL zV$i0XA#t`+I1T*cOFU%lFJ2O0J{ zp%#NvXXEE-qCc?NxOGTExC88Gg}v2~T}8o6u9^F?=-ObCLBW7nY0-bz-8V~MF>Im_ zgy?lz$W`uy+K1rqH@n&ISLNTtxhsN5&7n8hPq<40UhcPTxP*>0Sad4LM>k#}Ru{Oh zuC_5WgVs$0LM21?9QAL9t`bR8q{P^Rk3^>)4KAEluO4lw0h$)0LS zKeZK_8n;f8`5x`o`N{a3(eUyKoM$lI@=x??YVm;PFuBzgxRoApNbD1UF7YPKCW@c` zyF;Mavi5?|O*P3ZeYfp@V!`;p7vCgyKoTcUDW&m}R86IIwl>r;lTAl6W^>vQ^uy+f zK|^6glaflz`AO1#BystauJk>l^5I~kB^wgaTy>ps=ZH3T;qnjr(zPobqJd?Ef&s-? z*^~;5+8>#!ss1eaj~u`zQWHzaKvJ2RIqY)itj=C2Oo~w8;teuNSjg@a`~2FHzmB@6 z1Izec>H-DqTq}HQw7AYo(^<3Mc0y!St<-}}(MIfQ&-=E?>xMtui)w9 zN>}4qF0zVmA{cBPx3X>Si_yVeVwh~D!U~6 z73!BW+%`Xl1-V4FCiC0qG&&da=7n^6!{eEoA=|DpM7DZ2DbP}Bdl#7l1pAGYLWW9Y zyN$;aGxxTmYaFJ-{%qyrLN}B?6mD-$ZoyIFS2^$LHTeA~7;Xm=-P6KJ%oJoFUyQPa zW*qrB`-c9GfNUgD&uI$U5(WvvK3aw-bvN3#O?}W5oMr_n{snC3 z8Q1%1@pP<2SH^M)ldCzOPU%JM_nR}Ook8aFm<;`tEq#Bg+P)JpucLPIt6;9Ad$7Ak zZegMO;+LuOcVatApdSOYg?E{|a_jZQMI|r3+{{=KzN$6wHxX|AOpK!5xRcf{`NOo+Qe2@~g>LjQL!9gVtGj~tMllU@p>%@!|olo92W}<0ZPq+TTh4Awz zZxXx7%Z1>QuIBT`I}B;(TRWZ&C7JLMZ?8V?xpJIajoUSt-1GTFeFw{nyGCL=RfTk7 z`>V@|@S>|a-U;C|67cDsDz7=j!#HBba@Skt;jPSh_wyl}MxGfC=jb3S4Ge@i&d)hW z%v#Uv^qX;{qxi}1c@*kBDtNxsVb?}uckj8rBLY&VvR!v5ck{TGonY1l;e2$Svm#~T zfV)xk&jgjyCf#FKcpamCb$Tmfe6&+En6VR1fljG*|te-`GoP0T=ZOQg~F!hdHD@M7S4X}Qn7d-Z33zIJ7Tu3d@$ zSBAd0^tmjMyj&;292H3RNZBwNU)%DK#ZTAeHyaAQS*pMkAN~EuwS9gkQdtjkCjGnJ z%}KydWPvvxa|~z8sA_j^lssmNXyEu~2!^$Am$}WkM=j+3Hgw>+pMpmg|8b%I<6~r1 zU%&Z(@+XV=&~vWybKBLEdOMv|`u$exz$?e)WC4>`tL|)nPoAnY;OQp_T2%$TFI20i z;8d$yY!7Z!N)>wAtw8HOQLH_=kaqX)>C$`c`nd}WY4*jcNIbH&iH(bU_P+srER$ty z^5cCmzh8#9XG68+WJ9@eR|6tS5`K&vVUf_?JFQ>Ka8waLekVNt{Vj)U2bo;moXZS% zPn7HFdO@a3Fr&xEy?eYGW%u1;NkozNYj>N^gS^z_x*{2LGbOxdmBqH>;%afg15uJ! zZVweMmlX&Z*X5YJ$H^l7wpuV(JH9w~y8CKAj(p`4LW|nM!L{}X+1f5f&pN#(kCl~5 zk_nK6S;Wx6YZ7CB<};-JWQp#!Q-ywwRkG*R*;@Nr(ba1b5+>aSC-Y%c?p?@FVNx_P zh#Y(p4jB?hIoU$@gJ#MM{Ws?tYtxla0xIk>Hl;lS1IoY1J@~WCRK;-}Vphra68YAM zb7tED2_5M@SvXLwcucJE#X9u@xlwc=9(nB+AMX+f=8AcaPFKV>m#yn8;I-buF>`M}L8_0+k_m7BP92bdLmH{fxK*CMrEQ!#A}c*7kDHsH5dXk^F)ERs_;6 z5`Y)Y?Y!CwCwYgY;YXqapiCo!Jr_<>-fko+nm? zJmc0-Wa_c)aMm;TZCc9`Ib(*&&g`4ZO}=h>u!eU`F1V24Zlj?R>V>o zD9Lex(R*mRo8wXXPtyb+j{myqeh1_aF9&n)`uVopYelZK-=xwR{+T4u{!qw6E}WX| zU?5FIF#EKPkawA{j_AU=h$4m{RM6w44Q;TPvEXkCD?~1`0 zufb4Lt5_!gJ0yqXD&XjQX7p|WB2<08H*gNZkBz#_f{riF4iOvDsc!x9AcSBT6+Iu1 zajCQ3iwLhQy>Pu#gDJt9s`79{qA@pUm3)2D``K{!Xhd+}DKs*b{b%)F>K_OH)q_2h zZ)eQsM~;s8Zl!st{Ki3IXk<>i#T__jlIDQ(q;6s^xy-BnZNVWhg3*0BnpIrlzNs)} z*!i4UruWh^Y33^(E@?0~e=8hc#Yls@5;WbmXdER@Y4pV#Db-y$?{hA)yH7;7-5|urxUaTk?xlf10wCxpe#T56Z_Df%wbq*cIi4+B zhN|sy{g{fRcqvKI$1jxHnbfviZKq!0_5u zrJrSiR|!XjmnI~z^H5Ilq5Gc314T-NAbg=}L31U|-EKfT_2feeYV1H$+)Zh~@pEEK zLsC{eC40n(#4$_9f@x=#Kz6vO;_Y8%<<&(Vs=cTEEE$Y=WiPiW_WjJ;W z3)GjsxkYcsh$6KD9Nz%=9Aw&Al*}E z8AGn*Dp@>;qG?jJLpV-Tuf&VUQmk1~+!TbR7$1#FD0sYPDqT@Z^6g~hA5HELIy`rT^3yY=W+Rwc?06DBRWPnWh!*B$*B2Gut{^{o z%o2gy5sjKXCMHrAk9sZ61VPP@12%mjx~;8;zDGAzCHL~SAw4WcASQv#xAJdGxf>O# z5u;ZY_nvrYK5Jca8bIEpxW&A}C&9lLV%=#cpiDkA8*UVO?;)kbdlzb@CUz{?l$1$?h;i1xz@Vo*p@L^(eST z<4*Y12uKKA&hDoBJ*@dDJ4}Qp?1m-GdD=LPBh6!s({NaBbR92~gZ@>giE*-IRPR zd*GEuAUum`d#kMdjvvi7qwB1Fwk}=aN}zuB<}*gM+P$D}hnqBw%rZe5{I$T|5@h*; zPTQ(H@cd#;GBeG8Gs@?P#dRfKy@3~U)+aBjXy|#m7#!?0Jutmw8Fz2u*)!~mOm_mv zZPQVL|N7rA$&g>rI!G3t1nIRn9&A4>SSCsl52s0-*|3d{9NRn1Y4wfCcIXSs@OXeM z06yvjGG_WejNsc1>@&h26H>vyUXXpeg8;w4Es}bD3%a-s#BmFBFcP!+xIF0zGbrC z57ECPnGF`zanUBG^QkTepX4l~L{MS#>mPD}Va_rJVca#{-oW#47R(T(=_d4{7vK26 zMH>XfFV^I4Bd(C$BIbLR-4^L^E{|8_H^@FZV`~-E{)(m=Gq5?^ourCY`44#gihJim z(QIR=%9aAI`ugaH&i@?R=UKGZp-fO=mc+k93%qz22lVvw()*SFZt4H@>X#H?{!^Ws z$A3A*&$GT>_w-)r<9`vybtHA``spn`{zme5dx~q4@YU^o{okYa^qPtit3UJp&kXtR zJb8FcMLP?7|2_MFVf^6-*kn<6d&B?l_Ah|ZFMg=^p68E)3Ye-&j4P@p!!eFW_VJEi z1Rbcsq%R@xE8uyRZjRd`|L+rq2D_a)_b7)0xeYo>dfPM5-xBe=Axx}(i5 zPWO}7EV-lYT{b5M7y!S+un`6Kw6wZ)#)#0DTRwvV+}-Fu7A54Drmv^b+N(SG|e>YX(TTt)f8NGy8oi^01d{bjh#pZ3k zqP^v0iE}K6iVGft)0g%!&g&)W_lCe=PtT*3Z*sdb({T*5X=W3wvaMl3vCWPdxm3o_ zcy{Xoz=xl*G0RJVHUN>C_1v|wD}Wr%P?-4ny<|eCyE({jU#yW}=5!@@-hfc*LwG|L z@_~JEXSFS7)pYwq`i{VM)M&G3Zq_K{!La=g76~*V`7wOSFzr4%MNrTG5&k! zmW56ImBpmVIm7JKW?a+B0m`+CC-*(@A&V14deNd z6aAFF_cH~j4iF%1`lJ!dmD~e*orvt=40bQnxbCnP-W0R+%7 z7h>C0v73{n`lpI>j%$MhVaIur>n`1j;Jr^b31{hSp=2k!EC6u&iOJqU zgDYtFGJ^PC$i>FU8NBeUWYck0o)~NY_GR801R-iW*~xIGaw%Z^lwb6tXC6t;l*gv^ ztmMhYTo&3!Xp`ye9VPx~ANgh{Z~lrsPIs?T#D;`PQb9B6^0%Nca#$}? z{i>sS>DkW2r6cohWixMI0(kXmS?2Lx_+8hqXS7`{qftY49*gS1nK^*T{SY0-)04qcyc!C zC!x61(uSdQK9Kx)>j@^Y#dGnuYCrWT3Lt&_Wh;<&m@>OG|3W8*Aj5#sB}4Dz`9pmq zZBVT90SCu-8|*6#7szU=>_4$O6A<{-lSrK2U$4shCsOxZ45V^^cMrIe*bmZW*W=s~ zzP}dHuIjVY5h~xDENB+=PXM~rWSF#^D&g&61BBCyg6mb=5fihOLlUWR`qz=Z>n?mT z4HV=E&$?snpvYvD4L-gX1I*0e!=qTASXbn5HnW(syNqO2yM#pKj9*wpC*8Uq#c?^n zVTx79UX|X(zKA+idZUZ$&>RFqYq`JVwBUDk70o6QHHx5_X-Y=M)dhu3^X@( zl1?U@LL3Jh{vOKxK9pp>Z3|R zXUyIL3Tc0uUkpR*5U&|5*D8)YuTkWxPDMs2xVGM;Vl})I5wWa2k5&r_({1*uKa~Ap zj5+Az6|;TWU|HH!^K1k_SPz%A>mBlpS)f1ZqVDW&raZNjrgcB{0=N#_3c!(O7Z%f> zTB?16tdHhpb04HT^gfc8xC4E?!vo!GgI1m5WPF@Hk@emibhtn;VT8epZ>bm;&JNea zY?p%~j7M*gFw!5WQY|vUHAvFnUWZDk4ssP?TSdHGp{+ex1;7*7i1l@M+b2~{a`60Z zP7-edt5`!=Xcg3G(omKVa!Y>dmg3) z?k$2Ro`ZDhHdhrCgLJMoJ;sD{+aOHp`AFK$Y{_v%gpBJDL+z7q*t-wmN;M*I&mPw1 zw<_Sr;oBQ@`D|y_MQ(brSLf=x1~tF_wrh{m#&7QSbbV|<&@8yzQ)X)51bT&@Y-vd4h$OUDOcQNS z_c-ac>#ts{B!KrTNeI!HrnQd|j=4)FZ$n3z&@!S$w=x_|7aH7J!JLdIEm}u}u7kC_ z#E9HWbJPW^n-+%`Y8S=NF6S`UG?J`dQ%>S$%*!CcW#@~B;oUnsF;T}WMMsdsRw=s1 zBGnVY0h~Y6K?Jor5T$O^klCdIPEc;%&7p&Ms&p!JFGg_t%IG&|>zM8a>yXpjuRYCc zuQ5;nwFppcl5ApQSU3SQUf_@2ncC9E_59QWc0a>+M#9El?(UHG0W>#lnJw4%p!=HP za8b!>++@B3%qbn=;MUOc&8BkZG-g(`WB`aX=vf^u^DTCDUFUeZrxtPETpk!?PE=3> zUlPF(-(r%_7}LT1aGLfj7@e&m`-S1pdEkbve38U1g>s(2SwGFOcbe0JA3M>1T8Gx> z#V_sZ;}_U{N(g)4Y`CCvX8TrVPBQgSKifn4wuU$s3&z70|8?Ziu?<2wrfvfSjmLv1VdK(Xk6vB|EV z^GL?4`=)oNTf~YV)BJkL#b8KT1ZFbYW;$Y?P*(id1-_G`ihFQq@XVd)w;K`k!R{`x zHPMrB(S)s(^LbRzp|YJ1S96T*Mr22TsT^T?6jif(V>T$~5h* zLGCOYqNo20R~?@MsCRRnlL^>*H>h*OO6WsUXa6>C*LPGhMN5_%_N6bi9w zf){!4b#z70Q9bllP@eDYrp*VSM=o?%^;VO%$J(Gi_d+kKQ^oMP4-Z~p$>IpK6?qQRfxq3nQJ#S9B~L-%Wd{n(OrS4qRp+0$%Vu?HathIUu14 zKBIFhp}0qNJ42Rpm3@gf5s9H1m5f*K`H+yxiyVHlP9Gyx#P6$O{py7=vF*D=VodNa zle`l2PA^Lhx+O8GE5w7E*!(rra26FN>Jm=Q2gB?^X64fKR$Osr?O8heqos2xlWM_F zo%ch`+M_Wz(4jnh#A<`-FUW^^WB=e(3;`J49hqU`Rs51AvfDq(B>6;3 zm@b#`F>e{|I~V= zK#!YY_7ipS?AY6WSoSq8f@`-V>^BraUc>5ht}Wh?ji~4@e>0)gy~SWA7*o4FvkVSS z&qj@OQBNmuYbP7X=M_kII((Xml>hZnP|$d@_sT^Q%hQ zPg5=(U%LdWcc{tp$=t=6gIQa*oNV;k)m7_Losg&MCss;JX)T1VH9p#XkTJMfFh>#o zTn%Et=I^-+60@I#30cEWC z-Hu11OnGGv zZ`j}v9X;q`QoSW5F$fjG)=?2apjA%l;d`UcK`*39*37)TY8Jfz7_|CnvDHrd;PmSP z87mJ85SSc*!&{V^HrAes2APVUoUEG^(|tXz0wGDE&D|2IsC9Y$G*1V|q@Qcv-){B= z8_sfPVE76SaPQ#nw*@Zq?B+2&^|BLI5)Z&8{ow!u$=yuY$C1t!9bG@X6wsxf?!X-r zI(`t4d|ziwBSE}g_=-A`hJ{255QY#P*^iG4ZlBNhg|!oNfG~>?tg{CFN~%RnAO@7+ zpg@>SIgZ!j(3dUo99Q$A&kH9rm@`4V&^dZUdsDqE^h!GkXTQE20EBX->G<}1h^Wh4 z`Pg3CvCn?%iBAY)e=iXjQ75s;hU3G`p!vM^Ekz}&Klsk>7{|+7P>-=I##HurQ1141 z2uV}RMlq%bh><#m2h;KHc1N=m*PUF0D&++zAul0}wsH?aac^(fsC#KqhEIxJ&)pse zXKYXLV6FeYk>p}yj(0z}5ljQBmJyw*)3h6mKbWCWaEy2y( zU6Wx^Shdl)u6fnr#RG75L#u~=Ayp7PtT&3tZo6U2=dUXK1#h@$TIl70s zrRi+(E1R>rjAkI{UHkiQc1&FT%qI&*_ba|)b8>Dqq2PvRyn~16Obr8~7wk2LRWva|t#A&cT8Xy>WsMP&>&_PoK|snZ%bWD)7bBX&>@LQgMAwGt<0EpnJ*+4|8#GrN zMC`J5M}&vkj!RzFiv3n9O(s9H53FvUS-fr2F1$UGq1H})-cEbzd*@!V0HV3f>i4j5 z6#gw5MlB)@@Mq(aWcnZN+VXVN!ERyvjOI~HFAaWkvA;AtiXIH$RC6~luWH8IrWrjQ zAxt>_>Q#Rek;ctUEpJ?M>7yay=h#|@Qsg~^ZysszZ^7jHQoT3d*b;d}p$8^)*L|F3 z;yysf7bm|foxa{B^MO*=cQG}qXk2ZLR}Q*@i!^@gx||njANMN2qYn**ZL^7W-j)d3 zciDM%1dHFho8Z(q#T$Qe3=w?@CbEyH%W#A9d+~=DFHT1fst}~U9()Qt?UlC|>N+r> zE`ED?*Jz$m=;1CslZM1jul(CsC$xxRFN05B_~Gnia|PrSvIq&pdwf9IDEi`!VKg3? z>-0E!4(X^&Kwm^Hd*LaeQ!tSy6OCh_iGEP|SX-NPA+i5t$~fxNEjn2SX5#bR76{ye z@Pd{uwx3iFQ#pD=&u~<89ScF-9=;ILhkVPdZB2{0pJ*!$@zS0R= zQWWkhm+YY|x}FO~cTbMZq_YUTa#HVd-r(NvT%m9UpZMFmj2S)6 zaQ~hn51*bsu)~RhI+uB8sRP;7&ja0}GmHIbe(LatMq=DNhhgWC-wapr60HUmUet1K zI0Fh6?hpGc*P2jbeEUXkZsiEOku>obuiR=Qq6C9EF0fO%qkVQ(D`2#ZAA zoQ4its)B-1-b0yq?2NLv31m4jVSF$4CKy)_Q`JRvBQ2HZY%kZN?nG6K$90~4Pz$ z?60>-fQ9RI9;qG1zn%YIiXbZz8{H!IL>^02#qllhHMp#L5XDYRjKlX-T4;U{av{gY(*uhFV1K(b(B?AN%)|N1P2 zD*zz;fsU{LYBR3^v&VyfvLcw%F3F1pf07UPC&%I^HJ)e55?Ix*B-EqkA^PZVpG))$ zl+zvPKsH5R8NYJ^U$Px91xRTyHRfaa+!ZkWxRFi5Rp^I!^^|MFsvh6C6n;^-#8bmR z$MTsi&kJZk!8SZ4E)9?Q{8jz`*jxvYe`$ZO%Jlb;K7IwHYF_OBaQHjZWO0AEoK|#1 z@4O5U4WUjEQH_8efcyRwpoqAJ%!RaF`AVrVSNUQMW6EOn%ITtcvB!X04~WL@`(vSe zL2$QrYd}y~0we@zMAEctZFHw{^#%7IzQ%p^;J%8jd|hwiSrmM`K?Wfzd5tK|*=#=U;&Gsh?###ex^IDu_2rfu- zcvV>!_Xc@1rs7&@I`BB}W%oI-ZP#bZ1lN2v)N@#Scdho*z56qn&*KQ`BH}`V^S>k7y|}-!el;vZqBQt^=Ekk_WV4Lq z$zmV|9w(+39v(;*>=M5`8jIY2_X@xAW8{-s9mnZ6|D>cI24NW*3X|O;rd^JdygD`d z{4feg8wp;^^=?%qP%eJYpe~tOs1^-fXPfmq<&J$3Mp=KY4%2lSd9jnAZDa4o8H#zF zdtbdsV{r$1sEnF*913*tKN2p9# z;80Z{-psVfw73H<>gH>@aw(S~ILE>L*_nWN36db}_i(rU%*Epkj|MBC6o*^F17y z^iJ%@Bl_reaoEul=RH$$-PC4D3PFjDh~um2YRl`C%O`lmabh7X_(YxfRzQ4w8d zRc13nX^j$!ST8&ipKV zbmJlzTFEuzAVVMiYZeH^5_G}NxMGkin5vMy+aoG~v*PdNO{>D9dOi7>`XFF9zR zUpcZVm^JP26&DSO)p!4fh2SqkoE5n~y;`DYr~MfbmH|H#hxFV7uc%d~PnaJulRzFS z=wG(K8uUcAKds<*RAOCWDLbP*OV9m`AGeELo0BpOxuU%_w>*y(WbCBQm5(vjHFvLP z$`Ty$MzU^vJ|=d~jvWp}vK-xONc6;Z&MF)UMRN7Vptfu0l~W+g(Q?v02li1?khci} z_7b*L!3VR6AIEl7js|cq73gNepkwSK1-Y+5==t2Kef!L8iNeGaBqtZLu}29sckke5 z*!ip3mO5QyYt%i?On7^$P?ICOhzXHblF>_lGDP_1T7YX^K!py65c;0&tlUGnOS>;| z%2+NwmeFzWAFuAQ`nHCWt(3l;I(0D6*}oYQ@uKi&bBNb##GxrTRUlXI9_^V?;Ieq; zv)7%s1cEqv`+7$yVUK#TJJnwHBlCui!XkLZK1cG8_!K#P4~FJS0+FbEHTAjI$ZORJ z^|y-llCP;Hu0-rUQ;_unbW#^49}UG;0fy?veNY^^kw zy0bE8@o34>QHH(#xQJR$E?8se+|ZI`j|lqN?>b(!AaR%|qNA+Fk}`ABehl{%a-bcS zyRH_O!-{>;u97Js?p3}`FsGlLrO4BKx&-A7cqAUD#FbC4c$RdqK$t|?EJdB+6{;xT zsj`@m$?H{w%QbgmdNev&L~LTL(h$dgGt4lJGLF+(!zbJ{?j<_x4rd%M96z>d#PTz7 zCmknw_@+i{zOBHxSuZskHKED6b^<8Fht9P(36ISt)rk?STY)a-fOP5tvgR7|_!UV=bWByGtXvTVAOFq zZmsPl7$xl+40(rmQGW*}YEz$^qT*1#Gc%fzg%t1Ro8HrhRP$6t+|chx$D?xo?ok9a zOgJ=!Wwkudr-TZO*q^ziQDe`wZ;4`HcZn1y!PkMLv6cIC4(1^83HC4=FF~t#?v%ZG zM-}9Im{VrPkfi%YI-$QH$Usi>-l&!8&6ZjB%u6bX4hnAV8m?UO!T>j! zfisSIJ1a;C3%txwCIs_fC$#RXllQWcigM+_dK2brBaNUbYfreJuv!9{vB~X#y8N|F zhy=Z7Amzz^rpz4?Rxef;!fYtyjScC{P0mtWTHTR0&XO;9K1CY~6n0Y~0|&Py#^=YG z*vVi^6q?jHeL0#|^!tt-ohAg{xuxYA>EY!XHTLcdfTpgB2^((bP!xcO_*#}CQ)C?R z$rTG%w=M0~(#LxBti(E!thMhf-0*OH%D5Kdb@0qVS&bD=Pqq@L+}fL56qSA-_t$)6G2 zACGtt4!TERy%#8>$($RnfW!;p2omjrYp1Cn3(Z)?qLNASKdi@wLVBy)q2lfcFL#bh zyeT&oH#dWyrysu3f+B6^8tMTNI)|}xqUDxg+Mp!nLKUyq5(w=L1%gm1W|Vym5()g` zcVumhB6kvZ094F)a)h^d zm2aS{0L4iopk)=Rm;5bU6z*-R(y}c9TQh>GZ+Z7S*?LJ<8HYP)fYj^On8SN^%Tyf2|Dy+S*J}TnthzbYCGxW8ThkqDPF&ZTA5Gl7y`9>5pJyn$mXN?&Ww5Vf zX5BuF?ochdRnrJ&pp!4s%G=&K22i~H1!!6B=QKiP54}Yh2E5*jC=F_Q=4pEc~Wbi zSwfV|D2NK3)SNFQn~)KGt?rWI$8~$T>c=qGXAtx>v7s4#Lyo$$^riLX7 zEPynIJ(KLAA~dPv$Z%Tk6}OVnn_=%zT*p-TL6d8?i=ZXTI>Ea#*k72$mYhbA zgg`4tgng{0OG)kyr`!lGH1W&*P0|bHGV$25BzFY^D8z$gO{4s0hvcu|C$?^Eu~hhO zC|UcY1rzdW{ApCL0B+mN?c2F$ydNGi;f)KHhfAn}+>;ay$f=TEnl0%MEiVlTMGtVs z@Fll-zK#`l_AnM4-)!kndr?6|gUII&khjtk38G-KT1qMYEHx)|bHes(I{J{LVMy#jfC^a32lLM%&-L zxAV=u5ABdtg$_Pf9M9%T+3|hj#WE!B(7Rg>Sf> z7rz*FBNWvZxY5jIdLPY6-h6NCY}CLiSs=&gEn`uQf%}x8@srDyWkOR~8hwGnR96BWp(cka?^3c3)cm5YzCGW&9`t569r zsRrXo{^@seHWPETTWnn-60wsV_v(*t>g9*O7AiM@(kk3zr#sHxLyBD}sk)_^*=|>? z$$pS6Tlv*&f-9si))M=U%r8NA+(nBv9Z_odEGH4qZ7euHbGRViOgNaa;`|m0KXL%e zDs-fet<~U&89O^_yiXRQDa6&BY4A}?QVB0M$}>51U32HAy>49l)RRXhUU4*HE>@$w zmSWzvixQze`6>xr=tSqzywX72oP?{K8AQb_UHz`zP{?o9DDf}3yAdu7>k1PvclmW( zp~}N{o_~J};HWWZG~4iv>Fr|E3{|5;0#ck;Td_@7`}#?Hn?~zX??0iN7B?>vA9wta zIP4c>-pf>fc}1A^`-YxK6ObWVr02xfn6(uZEWJ$R(!8rTc3u2KycFujEkk#uK=*mn zOb?OU$r(o~7Qf?}7L~*{EqwySOSf7t5+;}IL)#B-^Ftr^(Q@o}b=*EI6cUoqGbZzq z`YjC>F}pNi=g(ZHW2UL1B1EtWC3zsw3ovJYLT?t<0QE4%=d6;Y3)kq9^@z3PZ5KDo zpGvcYr}?9ucZbElYP@+Z;lyD08m`()nEkl|cP>C8^q9Q#*T{ZpGumQUG9a3aJWN>{ zluZJpO`rC9ARd(nAnlG40<-N$of=&oglja0&#u$JFCRNeK8yN#P8|!nEkk6fL(*cj z8nH*Jt)O3z%6@y+*YE58{1NXWyOZS+UFs&T|!K46}Z z%bO59$sc|^Q(<8n;JZT>@pI%6l~2QDIJ}BXZ^(GFT2{KN#L zy+6IVINuc;?o6vNvqqLZhiw`M9I8!!yBztogAWwPH%=;xQ~9~bXO*j~Z{L0COr*6) z{L7wUJnZoUsS7^k!V@E;Iw2aiLk4JTR~~<70uH|yWFHl?Dc<`Dy#E9sxL2z0(}6eH z$Ukl|W-}E$q$#+4z3u=dJE%AI_uOIsmgbcDx zX1q4&Xw1?d694)j{G;#D|gZ5AaVtpAXKneAdOT z455vGaZR66{5^nd{<*%9T^E}! zOMm}=Y^YS%D%Ja*8umQ^UNt&zPCWe`0WiCI84~fT9xv-;Lj){h2VfvhbB(Sf+6w0f zfDopRO5baQ0l-uxKv2qET*9@|Cl0G{dKC}Mk#X@GW5EF4OPvAS0Ku+Ci(iksU`?Mq zWsCa&EI13GHF8*on`|WgdJRy~sKu0k*7pZW0S`!#z!wbaW7t8qFB1w{Z%tn~6l z0^B1$FaBCUh^FMx(MUk|zLu}=(ukC(Mu`eRAQq|-YPyLt=PhsEN6jB)gFGBAFt&ortzhD(g5bWt!KG=`i*eN znY+AnwYe8ryjnQ+eBRADlPv(>jn{7CQQm)Oc-32M-9{H@_~zyxBG-^FG&DxSuzieV0P&~Rx{!%iI{@(Zu- zdgE?0-G%}Ekt3kiuxSq>Ubo7gKZ^CTa+9fmL5(^;4_`&gF|NDJd%JdBeda9bKCF8y z;++Xp!wQqoQJ3g8);wzr4{Ua10Hk$WC)1X^u1ldD|A^wD9k~pS{rrBrK-ubM$iP44 z6;h7)ia?EIM|C7M|5v}k-XB9tMtFDVshvn0h{T_|?#x2BkS0P$EGR!9aVdr@29jHn z%i3iI&xS3LpkBuoc|sJ&;Oi5h9)++$RNSm{#{|%u8Z@{aBs(G$ zrm~RXKUnSgX^t*7$O+iZ?mm3*Kt?k~#`p9+gxF!dZVEK-CAcO-?*M?skE+|iQRFC5 zCkeziyDLQ(07gzdWb~U&;x~4<=4l+CR$Qjd3uZTd6~-R$LieO!BGS~em`gM7N3*zW zWCt_8I^I+|_s&veHDBy#>&uxN$jpM>SoET$bhPaLRVsmUSv)!=j^7v!M(_ zXoBltnh0|`6j(O))PjgLLBtbHpmx^ag{TfJk|DLS&SE3H6AiDmo&PfAxgz7ezS*t; z1wGKoBv%$@mZPVg^xytO$b%1*s?N7SsP!At>y#K0m3=oCLGpbBad-6Dv?6@hWt=X? zzrXe8d1eNSp%}BYFGn&RF*GnZ?kV@o7_dOEMZ-_=UjASp%puP?bw;*3Q@vTji z>f3fYBHa$Ya`Zq3Mb>cB|1ZwYGAyd_UH5`?BQ4z^-Q5TxDUCEEAX3uZ-O^oBBHi8H z-QC?i)LG-+XYc>{dalb4Bg{B!Eoa{KJoj_ozc<)w`^DI3>(koSv7j5+`0g#ZyW(c1 z*N?fkMsLnet57PmW$QR zPkHZKt~u08flx2{paY<=XX}l!p`kqUt^tCkZ%kN(9Ja_{s$E4A1(ylk$5%__&oh^7 zG#olxM}8BstD4a%MnRzEgqkbBYpPwdSoiDAagh|qtfTZ-XKK~v7#O*NtT0SB%_w4Q zvmCsZvAef9EV$Q12g@pQ#jTok*;fNWBz=Ykwm^>|ARj!q14O1p1WzgpjFCvhkz$E=j{zp0`{e+ww(W#l*>~~bk~_3| zU8j|recJ$b68mv6R0XyMAN;S~MgbuXSLjeKE=-=cd5L;RSQO1Xyxkp`1-n57zw2jERRh=B*g#|ZI0$Z~JZk?*Vr^$OJ!Fb?z3 zg|;35pSJOf?vBbp7Ph8+gi1mUiiA&qtAOYPPrvCMR+BYnf0qAWNTG1Z^2LQenEgVrMQkGb)4klU zHm{lKET5CwrBbo4#o zqT8+RG_hcuZtX%7@VCHk62_*&Z%vapc>EPmZt6mW#o>8Ubpi@Z$Zz>KnuLY&EZzVX zahrYSGg!-USbHk0p`TXHEZ;BJqbkBZ6_8z0DDzpW!a+@Kxej)Ai!w(~^=L-|Sa+cV zbDA0Tdc+!}Kyf$YB>!QSa`yD2;l;GoyRtoVGiUDSqYsbxx3w`oQC<$Wx#7PhZ4y*8 zO_TV)h~p;}hyQ%Ond4cIB&YtCu~!e>Yov#yl3$%nVy6SwBj<(R=7pMgS0r`KmM|Yn zRA72TEnEhIlrZ1rq3c#%&3}5{D}C7Os=Q1wxgkJV1#bBb5TobVgXoKG2tn<>OH*B3 z(P=%&YHKnckHQ|lG=})&w#2H-UV3@dE5r!50Rl7Wq)GS9$cxG*sNLPro1d}vp6h_y zpUWS3^S6;nc>I5d!TLpUc9OAzBl!>iWW7OzUmxI2AhfFQu&kL;M@^^>x#D!RklF{j zAL4MOPGUyvjzL4g-V@vKIeuA-0U`m@4Fr>Q5}R(!jp=l!RWq#^A=zpB(`7z7$vBD= zFn*PwU*d2xZdgF$K&sv>?Mh;OcqF8v%c?;%SkDF_YMfsQU8dA&-1ABeV||88zzw^x z*2^|=FF$XrM$1P{Kt2K>5;?1LMyZ@Fg4V0e%@q4T1Xq_y*-RSR$S;-z7!1r3`X+12 zh#}?P-q-lN0)JkNprKxy1cUWTVdg-Ri@(K$vhECuPY8j9neG^)7vQi0)6Eu5>x4PI z=6nj=;qKY0nz5eKhZ!<)Xjb8o*f3v7nF>#e&jg^vmxTxW73W%Oplll#iQ_twpY#&M zLqLbZuk7ZjGGTULKfT!M3*yA4-l{lya{j) z|LLQ}7@#?yRP8DuS7h9mS{Ah?^Yv4fF`jrH!{;pel@9iA`AeHKgp-Z4ZsK;uV87L? zQ9ntM-(8jV&99k*+G0Lpiync($>p4yjZCLZ`_Ik<9E8%v5+#w>!bK<5dNk;u&ueSn zWAhd_-Jtu5|6r|QOIbf_ef|*qKzvZiQV}6LY6sy6bYMrF+IoZ-@l-s`y=~0dSej2$-8TBYGg?Nfm)LS!~+zvW|5Tg*11+tK)JFPio!L_TU zPC<;DSz|5zvzBcS0qpoebx@>~={p4t#E#hErJ?*@>@}=8?@MpN!i#QDeGC7I_~4z6kMNBpk(J$&+P!0sJgazmX%1+Ky!V}Vr$7b zx%^wWQCQW3K_(Snplzg_+@8Sg99AgtFR%LFwYxgvcM9ziy@q#>Bj)c(W8bSA)B__| zcG;`L-aZqq`!D{(!=L}RqXLLP0MP5dIX|Kr{|2)bTLxqZ3{&)XC zIH%O|zpn!pGbmu?Wb!Wf4fzWE-#;~CfPunNhxX?GX>5@UAx*q5s)2PQyXEYMbOZ1hR@RsrnM~y^hGtDUdga z3u<>!t* z0I~Bl#Jmss&n8w3Siq2fPyF8$IHZyFafLL-dZoF&;aF4O^e%|V;`wk^U-|8<*PX0Q2AN6Y#ZhVO#da^fc483K=;HgdA&ZbK*19! zO**)O4+>U*zEY)p81O{w0@iMXxk8l!UzZgAi|ts&MxapQC_h}NDj?*u{igEK9)S2| z!678Q@a|{%EkhcVg)PAM(g3l}q`T}UC-22;3i<%4xA+^9u9^)`TcCErTSLr@J(AHn zMRhxEFqJW0qPPMY@I72cO^jY3tQXWM>;bO+$TPib0?j9Lf|11U@MDy)fzRv27U)e! z9L`pS_E0;|7YOYexXfo&-C6zMPzTLlfp!Ne6NpP_*e=BxGDcXgo{vCGG1PPNK8gCC ze0!uYop40}yNJ7p1m_1abm}YmlftApbctQZVm;0jse2s|2+dp|Ji|Z$Q(xu23!`|m zgv?w|<^)(r@Z#I%<#Y&3bqgzhZgl>XHyusW`Z1wC<~1eH6Yw|&IDiOG7hc@bA$XpS z*E#hw0|w6iB;*5*03K|L9DAoOR36?k=g!y@Ad9_{YTbIEUaCWIyPAZYu?C9d>%HLA zlev&8-7=dYoX>s+oFjb?sZj!le{xb*iZ%2`z3wi#t@j(*K)v;Vt7rrgE*ts#c+p3S zF8GI39T?onR~>#K2R@sJ)a8JL;RJvd)U4snn@yY5uF(=h*R+2Ssra3;0BLPq=Zyd> zAey)#JR+3pHUW@nR_-sh2)C`H5ZPUqw$xaRh8fh05#S7fem~7?(4je>=R)yUM-m~M zk^dIgAhFZ3VZnz=+FV~xO}YRtWhWqsq=ryIXX;mNv*Iw<@bdg9pa*)VFrl*mB#%E8i-P-$+4tqJ4SXHXw6T?Nda#cnkzan4l+MXdJhqjJzXP8datSdvOgl0PMy_? zZ>ywrKI!m)gv*rQ_D8%thvWnGP1V$Q_C$@5jr#%)9DNiC^}3hG%~zw@a>b+rnIxpZ zZHs%_2hc^rvlQi{Vn*5QXbj$VS$iC5laqSMiS*qOr?E$btdO2Fc5$4dSF}YSj^dZX z-@RY_w#7RVSW!L!_gweV!Rg`k$<7}YwEbFu-;r}9!Q0uK%G-u+jqq9!B@F(aR=lN7 zF^+^u{Nr~X4A58M7$rR&MaTT*KdGJ)=R0E>c_5UC#OF2g31M*s6qK08;1UYQg!^?s zd;(u}a)*b3J@6&=;VM)C-2>slLJrWzY8}2jexPc9jb5W- zqE!mesC&|7f9`PdyxbnxXi_*fW8CMC;*fC#+$FXzOv>Xv5Xk~${t&)EILhG93=w7e>J<+y;xzYxE(r=cu zSLX3LsByex7S4gJ+8mUxd&#OWsFdJ>Mssn$E=4?ynmraL2C%zGP7!mn6$d2N4j9aU zAhmnHwsP;a$Q{P1z`p5`6S!4E3+tNm`CAU!VixInJ~LL+UUzdn$@bRQUy(0&v4j$& z$;cpR!o}^3O`%etcxScpd*b%WD?BoF=yN;OJ>PhxVVZrd*FO-vxh6pKEP7u^Tk9Zz z`xukJ-rf_27sNswN`=co^#y?@p9)0X$%jo4>Q5LD@;Zk4d0co2Z$6IsGkbba^R&Ec z@iv`A3cZSOl?|mB`9iJ`cAZSNjg3V6cOFV1D~{G^EulrAk$S;Sk#`9W}uhY8Pe|MQnYit-E9eK(orV))x_=DR9kCN|5jOaN?NyM1#-iXb^ zOp1Dsm4ZM*M;{Q1Mv}sTcz$p2X-;zlI(Nl5i#=cCY(w zyUD&v(19sRk7m-h2;ses_HGoV$Ke(v6E?z87j(N%nH;7H{VAtWx&kCHMm`KWtikff zuTUUCT)WF8&_JnvN={AuDM$yePe-vghIx#yN!lcR%e@7W2W_3B(k?wj}=mHeY$Yl+aWgeI8dBU1ac)IpqRtb!fn<++Q ztI+k5`O4nJjI7a#yJu*Ok1`NA0W&A<+4CRK=m$1m>b#7c}1G^gGC?7m7dLc z)z-f{`;CqyvE>Y~!e9TobL7^fL4FR(m3TAGAh*nAxXJ8m73xsQlx&iX4AEshWRAqF z?F0A-_64!5QQ%ha=S=rDpN|)+hm0^2t~~!9L>$A(e0A zxvu(G4igyK8nB7Ge>j#$cov-mFSxm;^ZX#bQb7nYmzx~6NFGSi(jg&PO`b9szDBO~ z7tW!;G4vC=Uq$E6w-3pkKLw(_Ie$|#Ka6n$U#WFVg`TfFJ z>j-~QvR*^qgG+1eU0z&i6Fv2E)|X6!!AUD<8xkurM`49KJ-=_n@>cwS!KRhc3jeK8 z$@++rX8D`ZQPJn#-oktRv)?ad6&MPv(Rr{HOCP38S~|zSycQaHChfRjgEH%1nI73>{5D{ z6TRq}#QalB_1eNCX(IN!znZM4gxVu?&jvV&mezg1op1?<#kLv0&>a!6Rguug21jnY z9m|_1N4+JTZUb=Twi7$f8*ab_LRZsz^#0fqufUE`j&scB2hc!VlHPNoQID1)ZES>p z+twEI&Ov<68=52JN7Cr5!Ab4@8$OtUV%^HW@YRGl|5^->tL$g@UaQha#Q&f&ok`~& zuDXm%sqMkl-o{~jPD|A63}OVe6mB}JCEo|a6M!;~LTOj~Zn#rLw%S>(4#5|!G2Fc_ zycKRrv)Yq@^2UP_^4f6A44lHFm#EK0PWVM2`%vAli6Iqcm-|Yi>)*p#ok1+R;DLpV zUI3fa2hGGSve4R(e_&>9{gn^<2keeJ;J6A%9&on2hb46TS_d8j>OHtEai}k|*b3R; zcxGq^$#puBgv#(O;s|dZ;SARZMJ5pNFMM}3G91R%1#N?X`7LXxIqKdO~ zAHo)w*W$b+Ezgdb++6^e6`?AeE{%NOtFxUItog;~QM6I_N-sY-cj?D~>0%I5?gFtdJfvc%SpKAMWJNwM?x*%6{B_h^{cN_e)Fk^Xv(Rc+u^yi?Zve( zfqL3-cWRpZGd{;N2h90?EdOGQ>$m;(N}~2U zhKUY?G?x@b%55dCw2V442!-HXD8#u=zjs-EAyF50%k zPtf+tt}FK8>Uzmr$&6~_2fXZ)WqlJDjI_P={Sr+F7fONSaHo_b4oKKrJ;uujvI#xn znZb6w>sE)Y68kcYz2k?GV>T8FY4_xT*9~@iG;EFT%pdE=zUhh(G3bb#a>*mEngC`d zuk9dmBIn==)~@`+!o91@>)ERr%Z>R{faP`mH8SKaeu+E%1@8HDa8M%c*9`lSCf7+B z+5jc^ICA}Jz>C}9wCX~;jH2W{9eoQ^T^^Rri-h_%O+&W2Yl<&4L%3J*tf#t1MF0u9 z$H!=oql^)8!$60QTKKB5pG6CsM&{f=DZ~b6N`D8_N1dj&EqI zp7`hLZMMWcS0{1ad4749llXvwel1llE4~=j z1el(^;xl-1b@%%C*;9tvpMH)TI4^kwjXAD$Olc2$1U!Li)qNX?8F#YxRIf*R zUMB+WYCM!)ip2T9=mUeD%2J`gAB9FlRi&hNUr%M_5wR}!=YSCazVmr`hexy6Jc-e4l^D5nF#jye-Bl>Q{6fZqd z)ok`%{qFZU|FH{OCdLf_A_2NUVlUns;yb)8YZez?U%~v%eJ2->*wU#D( zaqP~KL>R+xHAGwD@}go6Y0DqH8Z9{uEo=EFo!ktk9a3@ip7EMcggBouj|%F)zN+jR zhA`TDBR2XOqqZ2v?Mqtc1{miyn=&h+16MVoVjTA-??yMF)G)c!UV9aJMf7!%_x+UL zEWAAdcvb(ZJo~NIUj{vH#v)x(+&NKnlZYtC@iR7#$@(+=syx}m-O65BVVK_HN5Ui1 z-V+V|d%CXOwu8sWs7zFl(E^t>Ss-YUDxF{cI0NhIcgwx7>78WnX2k9Gs#xkl#g{T& zzk4sl$nB`3k@rD24Cd$zr@D?U_!_Pa)y1|qakVbT(JA0Z!=N_O+C@wY?GiRFRbgn{ z5A9Qw+MrG>Va0ttM6O^P^I{#l(T$mP8T_+1ipv$}L!!>Uq@MR1ybTL|+{}v|L*#{) z&`+E3iM8W?H3+3mXV=PY7a3f{zdp2_W^>Xwzq@#I*+|@?DbwK=uGbx#mZ3kf?06sm z5No1YDahz8)lhG6 z{bVjzwx^A7!}>KM+X&`q>c@mb4z}!ozI_h?K_*aRl8s4<(3 z*u)o&{gs9plr#t|3v${v^Q)7anRDX8mx||hj3Db(Zn$CTCDR5ehd2_Nvv`zJgB$pa z8Msz$r?pFi(mA)jzB1<2&6Z_>NlhfNG~^huFHGe=kpEh13q|VdZiKWeUnRZE0~I`n zGDjAEe^C1eL;jiw$P}|XC9@wyy$g(9RpJac3915;KlvttAXZFhNplyC53HPO^w2Y+ zSxsl%ylrgHUar3bLibDaaW{+Gfxz9p?D)rsh zTd+m`&7>L7!GfvBlccjerFF0SSsfD}Pw?A>RwU_NTvOlVP66*$A^|9A%!<}WI&W+K zVGV1f*45WG3O&sS*Gp<_3fq9^oWyz4RMt!(ml;BEm!gy~_mDBX#uS$Pk6xO4ed9xJa$#6>>2q5D?cv>qsKCZyw=xI1hwCFiR z$o`ix1GuP7&+5e;OU>&4#mD}d49Uh2)TaP6Isj8m^Rx17snt`UCG_!6=|9^?6+(66 zL=(Uvd|7D95LqZ*<*xc=J=FqCVMZ8BjDTZuP#~LI+`1BR0bc^P1L3vB6YdE;Jg~P3kvJi0|+2{@5WWze`x1M;$P}13ka%sL6E~V!MDG2v8@wa9^!%6**GrD`!GK9 zuonv}yTG-wnwlg-@Q~&jE zN2E<_KiW>?p|P`WeK0X8>jg*`m%qx!9RX!(Fa89uSoEhRCV^Q2P~|UEz|UXE;q`hC z)WbooHz!MXXCXMk?}4<{{@h?Z1H*F6-!eEr(kXZ@{or37IOBP=;=8K)R{g)YeZwaG zs-|CC$bk$W!^d?9H#O)Ud&>ep9SQ41J0_Me=17l6FUTL^rooB9~Yvh{=3^~Vn!ghMbiFto!E&V7eSq4a-r%%I5 zJnjHMKZq+bdWg@+?w_JkqRmqWTo(AKi%R36WniR7Fc=-=3UOYnI&YyeoH93T$0!MN zCsZxwk>>V^r#lw_vQ2t*1%x6xhTlOG9@bnj`NI%;ws>&>A;DpyQ$YVTkTeNQU1RMC=Ikl2W{| z8@Pa2R+1{@Rtv!-`+boWzABF-;0TN|40q0z5dr*u#Cm#zN~?Vg@QPRT1!?atFM}Le zO-4TLBsm&#+iZ5{t^k0hpzC=9nzATs$F!WN^KUU^tLOA8`Bawc3p6a2GLr3B^^g*{ zw%r;C=Rv;y{N!2Vrys}+Vr$gP^wO{Mfyf=7_5JQ9r-Ex2D@=}C7tr@gt$s@(FsB8lzhij>?4d^U>fu0Bj!Ga|w3aS3vd!|6iu zbj0s3O))v{Ih*@}(5fSp=u2%Pl9X$l?KNaZHOrL4E_nX%- z(QIjXRa>XLnH#pYq;K)qDthjvs%e1#~ajYZiV(MbbpmLnhC5NEAfKF>vlR{>@rVi?$D^MVv#5~lkGLd)Z=TjhTnb!TI3(T{7YjE;C)$=}u-TCer zP!tLSU){v#Qh9%_dv$Px@!%l!47?TuPWn9&rdfHbHI<&2Lj_phjA`0oxb@1gRz)?v zL*!|;;-7XB^+`I3kBQtmVUx~H#M78An!91J-cRTl#=Jk;aLrlLqI-)qpY+M`a^UIR zk+gU?lXf;~Dg_)b?kr|xz`e0J8sKUn6er=8-tPdI-OB*sA)RM+>v({T9APy}LAVe8 zoJ^GhH%ugQ3h>^;BIs;Q;XPPd`CNVeH=}Q)8h0qA_H%b~{j?h|WtrmU? z_?TqXB_V^N_tzmas6hI3-2f&(VihiVD9M{V6fYqjoz?Y%KXq;{rNeHBit{o*K*q$3 zQaC+mhB|>!YZC%*QR2A)RVyU1c06E2j^Fxf zN0A#@#4acIcopfz_a13j3?YkglxVoYYOVOwOcd{h;4xbx>+Phry z{Odr1^;PVsAmgz;pkMx6Bi|Apk%woNiV6=Sr^Q~0OZAeuzPESa#;;n6+nX({&f?=p zFszH}ciWbg0GcEq>^$czhdcPGZ%u8~EiII!wlro;;Krv}|%(Qa6QTOGc<$ zV|x^n&Z2$tv%6otc)lv`x`WW=1`v=ALa8JO(8hBPWNM86aI0=4DxtUrSFkPMwuC8c zR(XLE%#UeThi2flR{~{Ps`VKfBSMB>f`Wlqo}HpMG3ZxZ&>O+$+0r~gDmB>w7^Pb} zYLu2<7|KZZ)h>d{Y=rw`kRBmJg3~Dg_eU|le3yFP{`XxmotUmXT#m8T&wYFrcYpQ6VF3lbzz#&U9k@SgMS2}lf-PFQQ z*%Gu&U%f5}kbZy7XMZI%4*jkAgKh>##Xm$QR=hMjJ3Hz7H&u-sI2ldOV9z`A@6(;i zvbeYZ>sndI__)Euc-A9RXrR(CtKaFF%gLFG*J7r}*xuOlW6Pc0+Su2z zz2mC8s?6u(JK;UPF~GrbHPD8bahH1A2-^aqiPU-JYW$wSLpF_H(98&~Nu|lv7MJBE z;qRm{|7C${gt!BZZMp?xrjf&x1c7EDY0I}qra6^X``$0w$FUI<3ZC8%7O1y#o8mCxl>2Twx zCXkz0t@L@X90?R+cLM)|GMz6k)5&M?8+1i;XCh{bd-5dedrA0VTO^_Pv=~#BNG3Wm zSMs))ip!c-d5w&U27%nPi~fkSi2G(?XNBeoiI%h!=OO^RY{|rDfO36o$(Rp@-wwcc z;J8)mh(AL(!Lf86*@~62^@VGEfz?Cz-fvy^;X8Qol@uk@-Sr=S$a=|j<2Epq)>Eu( zXJI{&^#&CDaDD&h7M}sOc)d4V#JIn3q>}g=^Y|)Gu~sFBHs*nyV7ndp@gQ~)jxdAq zukJQ3p^wJ-@BAL_-P1DQ^sn%J&U@`P;%bTwzo9k13QD=%%5AZUfELb&$W zQ(%k>Y}a7S9k>2PjM~907Q6$n{AfA+K6wY0<~t%D??+wJ-w(evJpM8+M=H4_J2lBGyF#A+gJ~i=tNb@It?DH7?T8Z;e4{EgQ5XVc; zqx&hj`b9&*1~$!N7|)J~ZN&tx&P_Q=1DNo3Un%c4HbI*Eao81P!nHtA7!I{z3U$cd zA4dYKKICR*GGCO_+tl#-T$iXayg-Sf%qBQX)93KQ;{dpMvNn^nh9T{e|IE2hV3K+5 zX@uT{?rH_mu^Pv5K3V~~Cnm*$-nj9^PhQ6T?Q@#lXrb_JMURu&ru@TE>;!=^Y7XlkhkkU3E{c#8aSB0+|bO9 z(BxRPKI}&JUZ97=qz;cNBp~G5{%&y|PJbCpOIC*d@-q75aptBEcIwmNRy-;j+L~~| ziJBth%Ve@Zs=ucs4faYpmU{ZqlO0s*-aeiy?V#^|(G!;z@kajS@7fPLnp&inyf8wN zmoIS_298__q^cK_UvXs7J`j_}oR~5!rGA?sHSv{Kk!Tk?oBnCX^zR_uB@MP%50lmv z$E3I5MredWqwt3K9V4Vqo{kbsNCgl-WDN{m5ePvWyi}$OCUBa~CNDjZ$=8}<-(J;J@j2s@sB7~2N;Xmc0(tUPum+?uiHz^3xBlP)6Dy?~D{ zIYyqNASU@x{bUuGZeyu_zinXbPef6g{br{X_H@ zRCxMC4;}ENX-RFj7PjrNHDZ?>7f#tvJ?0I#O5n$ZOQ+G67x6|?p(~}`<#IEb^H75+ zM#}bM+9O6S)1kHV1Rn-5Bo6Ok6A~a$NnEmlc~erix~QIcQ~nV1)|cG``=Mwn4dMGl z<`_n0=3^#ATu?Klm$0JTyU~By(a%rb*W2Tjh26|6!*=F~O+6ChJMz`~;X|C0lT+9N z+1kl+$PRUB^J5A2_cOrqE@MLDr5QCGZsG1&f*K^t-0-ba8H35^ej>_qnjPwKlL4#b z*$S~F-MGa;;l7{{riDl`yoaatB0@(OskkBAK)%w{bt&sY;Vrb?%RA8g_$_ho0lre} z3Lmj9hnj|UU+-dNvNE68m`*iY_5`Ef7}SA4xR&D>RVK?K@c*ElnR}pRhz4^#`?Ks~ zQ~rmrL-)ndoyd(@c76Hiya-&jwzZaG-jOSoZl+O6G1{tqBw5Z0)BeC=QmG(5%oyiI z!X#5i(OC7S@U=W%46JtMRr(br>*q|oEe`cGQ_SXE7FbD)`P-6Znl{IAG59>Ndw@^f z?ZfR47|VkqB+R&)@`*&0`wviaurDa$d*&|-6-i8T*5E5Grf1n?;luzz%Xa=YnXr3) z(bfFRh20l&lkZcsP5TVDl3D3sCC@&MlS!WA9Xs=NSmtXnpkc%Q)z|vO0N2TR$Kd`x zg1bD{z4t94-yH!1X68EyW)sD!JO2vO$94=pcUmgalqES~{Si?#msaNv-tj#IIql3( z5@^4fzCAQTb6>brUV=vx$vN|h@ZzpUz8&u-?cd^r#g{z2SfA9i&DRc54y1$Z3E@e?Qat`%_N_&FH9rLGS?C{HGuX~W4907Vy9|wRvIxB zKjM)#?)_%N`K`-rbxqs$UE$-L{o%Ohx!j}@-IgNVqW)IK0B++^3K|h|D{}{OA2kn9 zYeK0ZDc#pvlH`tmX)PG4NX!Jm#3I=jhLQNv+V5trP>^kEmlM7AV|HVb?Le_umMgAH z{rvQco??I71eefoa;IPbVeyJv4Ol?QSg8!uvt+#?{C7vevYAo~nm4_)JCgdTqcUJ| z&leAa<0&57339oKXskVf%4t)^d0P25$yjTQaL5sappL!SOGmTJ=Xd7UtYW{(2pE&D z$?$11`5$O|&O5?ML0w#>g@Oqo#_s^6k(!6rR?WSSF8SJ;B)ex8qPwW{4aiB6Dj@zn#%DQUjL z^HbTxyBGYh?#gg}iWu+qvb+o@3y=D%(iaxt6{g5*JQ*#z!_yhDk z2J`}rNEk~Od=ea<&re@sKfSVp^c-0j^D5`Agt6i86eWkJGJM&!4vya;Ia95KOB9gko zuoAj8YtxyKnf#5klDi!$kC44_q%! z=z{MoZ%h=)Y|NySMUEmyUoWl5!vHS#b+oXzajO+t3+onxyIE#UodKuTZx*5`qQtKP z{a@Maca9#*eH_f=LVg+QX0mpZ*iW?S!Ju6;(atW~==dN%!&64)G{5#wF`F-I-35-w zH~s=0(}676{h!&6lzTvZp?F@GtyDe%&m8Y|uRIEGKYAlKDb}tc$Pqw2Ne&BY0DI9{ zq@v9(T(TPsch~N?^&z0Vhqm*S#d=d~;@WVuJpo@O`v6)|;0W1Sq%lF6=wrl*;<$$= zA;Dv#?0cW-z8fb^(SJ!7)Hlh>0ovY)s&GCQCy6qwr!=AYuwaE~Ks7{&-)o5ag7hvea>lf2h}gyMq2)EhRxPlT>0()yYOd3{Rr6yhwE zapYZ;l;sprec}}5zr7C8pGQwH6AVh(C`WRb$_+g59}JS`@^ph8^1pWV4*xcB;|i=E zaW){t_u;5QYY++#>O~>IyeEgB(kE#%Yt|~8jVR|LWg)3EL{1ZNeXDZ5`-M8nn10Gt z;wHe+`6-B~52djq>2HNT5X}qQGU1Q!y=7)Jt}d&Fp+UM`PT@r}$*;VE#0X9Tg3A>;Z{Z1+5>}fx1#r-w;gZqs zQ}3()l33zp1pgL|^$s*hHQ-E?P}e*(k$}=6VWpJjtKBjp@We(*#4?F!WTgZrjvU1gQgH&;QQvk^g(lGl&mZm?JVn87X&aJa=39k{a2kzglsto{v6wjHGj;cpiZQB-N;-4YiP3l}WGoNXLNP}TNgD{M;%Csou=dm*Y#$Fay9rsY68y8lw-is&t- zH;pzB?rPi~@D$M4>g>Caoy5O-Rt#n;w)Tp_0u7?do1Y=IN{>Z z+erka2*B+xw7(O{(lR zsuLQ8r8Jes=xD}QMp(FJ#(wASV|%2@p)2vrx0ON+U>nEt!M2+BzZ4n>rP>kjeHJ7Q zc5qC&wKcmN5kDA}7}eMxOkT&styZ=jen?n%pdHbSSBCingU81t<4WxjgT0%59w;o29eLg0 zHs5cK?6l98IVNGz(xoB&i8?BLTb;*rJZJ#BM&D>VDUbSm3mInR#i3A!7K-Be?UuVq zr3#sBo2;_eph0;YC{r|@En)}e;AEFFl9z%wGmiev$uu`Rh7Vi4ZCA%yb|*$v`LNQJ z{}xb&%eu69+7JI$bXAq^ONz8+rTD`fvX#`@*LbDs|NMly|7J>Wf``P=-PZeYzmxz9 z=%GxApvJC8d2_LFY`@UF+5PPS>?l|ZgFu1ke3=#59=9+@GkBz|;QiSE!<;#mR}9Ms z8Ol=FTqb1U-CTM+dI$sXf7B& z^u|Q8?!L^lR{ciu_DgI5E#Vr4iZ}Pj%7<~FAi>O3KYW4mZ7e5wF&6e=Nfm|m5gSx= zx{BhB)DA{-XDiXieXD;LCA^&zBk2lu#?X8zOr$xFL`=-}AP}!QtA4y7(b*{BLrZ@Z z)oO9nI#bJlg2xvaBALj{gh^He&SzG3%2*I)vE&JVJ%FP(q zKFUY2D$SoH^@i>9!I)VEyU{y&G~Zn8GHP8yP_zeyC)=KS7_j$^IgbZrJSLIt;=YKS z)g4~`e#er0@eZHmt0AS!g9b%89wA7b)ljC5QuH&}2)A~IQ~#qV+_5tvvcP<$qUQ_S z6+*7EhhB_0DT}-hrezT`%a|e0Rz6z}7^{4d*IJS(jxH0J1W8-|g2(R+Ib6c? zf<1m>9w4Lequc8KP+?)%-O2%593anUrGj$td=y=ZZ~t<{?#vSi;xux8NN0}^tUs)qIZS07jYtrB6_BR!VzxJDL_W=V~#_ zV?S;F^#kV&H9q|gol55A*q1M)TG5A>xWZ!ZI!JDfoJh_;XS7FxT`~vRL=)OW+GDa} zUdCCDncse3-3;`xnJ5^(H+rjIucW&Y9CzEzzlKewk|@l5qv5#M=Y2iF&5v*GvEb91 z9Yq$+REzb;Za7ZWC8Nokst8w~M5!Wu79*sAuUfFWa$ACAl?S0BV^=w+I&m`TWmum4@1NUnV z;xDHLFQ;eu>km@56<3y$7Ezd*xd`@GWnr5iYmqQz=9}G5GF*+O=DDn5+q+wrttO+=Hz6m;T)}gkbcZ4>ev-dDubaRi4fF$$ol?>p=#F+cs zSYK56DQ}b0cvs98YL)$KcQxSe9_-K;km>kN321GUrtN*Gqi zzYxHtP&@hW$-v=eBHYfshx004k$@Z@6f|(Uuvtwqkn6YB5cRNTBao2hWgT8bQ}<^FtepwWiFM;aexYTr+FoxO0c%Az6J;3QTx62wc3O}3 z8{q(u&4=o<)<=auOCyRpfY3Vw=>`v@UiPCV`N{zYEtN5m*IH0FKdRu=|2!r71E?AAyEMN$DXWA+g0b<>U;wZAbtf#@^!>F5_fiVr%6N}o1~`NTe)vyW#a2wx7;YNmNuR8AK~i(y>?+q1kWmO&1?FZJFO)_ zS3@qiK13cIwjp37&^Zd7-{5b(z8GqgdHE0`GB|dk;JFEG!%hNm2uxi>k&=%%K&^dz z)al|CF-hgUGgCZ){Km{{AcneX<+}QM4M^Z^5n7aYID0<#n1Cnkb<@>BrGiW___6C! zmthtW<=2@5C@fAT<1-rix`0W@v1HJOLUrTQ*T*Zi3t->a9DaZ_7`rkrvUZ|7gU_DN z$96PWJb>&JaU}G9*-A zMuAYWDW&?6qjiTUoN)PYQkF%&(WNq2KZ0 z!F08a+5ga^1Qv`bd=l7Zm{6VmD{s6S7y=vVm3in6(jow=&rLe{ z0DD-feTb4B+ylz8lEm=WDeDuDql8CWu~!b%5sBKj)w#+enw+qd8x&@{rOkA=9n62G z5hpdy>tZxE%LIjniCbyV&s9fREhc@|J}uLsL+8WlKers&a0p-C=bV<<_RE{LD+2@VZ8YWH}E zCux@+8#4C!=Z3~U*VRd7;XzNOd?=_)z%37`&Lvk^wILbf{DL1(LR!eE5P7i`CE}ST zJz^7++eguF0qhTfm4K55Sd>eoJNhju2WYnXi0tJ4#){}|RN(H|6cn4huiLqVV=@aC z^+@Z}qb_W7(e5u-bbH95yLC~m0T|qMV98hE*M~7hXPuEP4iY96X(A49EHEh#<`!e! zFBEP2UdE593JKW161NDda1d1vgRk)q-PRuUj%@gM=4}D!c2rOoY1Der3`rC)*uB+K zQ;b5l^CU(H$bLVl8uj)Ip5TYDUK9F^i3YD&Xb$}9La=lMh{n+wzu|jTc_f&kvOEtty||M zYk`hJ0+YfBlkk7od#kXxmMv9K!LwICK2qo(_pLrG1zV5j?aMxbVDWxwRBn{wh8fq$u%?d!;$hSvkta z`j{U6?&>94MwS_AbiN%&Q1qd6K7`V};gWSq_L(U&{$$kI%=IwS)O)Op4z zg%nvoo0qx-sDYnOq<1>pw!{g9;E{l;!Pm0uUJ~8*#TPE9pG8fo+%0!^CSSUARDNhz zeES{aks0Hoi?5jp*sP=v?o{cgzsCKXkJhzb>$vqQgml&e6FHws4k{1!Nc{rF&#Z(L z#gKq9zmSR~BOB}@CVgW7yn*}8yJKb`kY+hBEK7q5`hV)l{tVUJ$07#Zp8nd+kP; z=$)*cIQlNTmyu|J^ypVTF2xL(NWBs1BS9C3`i3W`$|jiRy^=<+z7KST@e3N%M`dyK zJR#*N&r_bGf6|KooGP-{Khn=tf1^Ce_2c-=Pi(Za0M>1(!O=VuEYTgl^&m0)+A)%z zYt!K4Od1!uSaV1vECy;lNixqRS0VjeGgEebp<~9_Rt|Am776_{x-H1Yq7y@P4DTL_%^LKxANZ7*yj$zw`Bq*5BT_9iG&wKAKwn*ouBw{qXCu-U$a_|vE*;l+)5(#5F4oOK zFyQtz3np|HmR)+?NV$oc&H*!3`yPIG>Mxlc@_V_GQz&vlYHUR1eB4k-XJ4*#zs-*l zjc5UXg$Yy7vBPpao(dTt6ZityT1ItfF)*)5!Kp7ovu82;f8&~PMmxz^^v!%xAI8d$ zcmk$V5{lqFM2U#<;=&hN4Xb$4{Nf~A93X>5R&m{^mWLjIGXz_mKfV{K9*}u+-3J;% zdw$ro;WClsH5;=$^ES{#uFg-7TG5O)H-94j1o@`_L^9D~0vVRLaoLYWC^rHlBKwa7ijuo9+Av%k<36Uqay=JrzGi>rx^d zOplmj=+i7V!SldtMoWxSNWI@EQYZp>st(SiQ+eG&n}N5Z`MD*LB%KG*w$7ppuI3HyQic_SXQZP+4FXM)SGKu zHILE4Jklgt?I-oN^NIF#?;H2<;a|Bmpe>9#Lmt~gP*ML3a_)GGX zLtspiBQ7YRJLcYJPj~q<;JM-mI&?tPc4DkcD9Z^=cB^N-8Tz%6a-F`qsh^WRc`~#C*jgyXZK+@WgII zsg@G6{PxK@&%uWf}GFRC*ZXn2)MeTR}Cha&vvRIObR zNvXWkjc^^e?qKEM_pxYuuh~;0hYHZ*DgTZvrA@46l3KFl=~^ha3ihFirKE>La6;Iy z;CdJ!m*zN25QqsF! z(9RlU z5JtbaA&U%FYw+G^H-H?=n3Y6xMkrRzQDIjW#qb*CVyg;hf1DfQeePXh$}kkh7m8=F z@a)%mC+i*WV-@m!K=?G8ici~^H=r4D!TPp?-KBwC@?69l>oPlX6ZE(57LO~@S1t}E{DZTg;5IjnjA zcZN3{OY+JoLaRTjwe>4rSH{|5o+D99A@5%PNT`0d)ZvRrB(#XZYxs!X1hY8}AZ)9v|h( z-Gu9;J^G<=f*z`>wVm~pD1@@W^%k@1FCAFrn-MRXy^pj|t_)IXkbs-sg78N+k=#`A z=zMY`>(vlUuue9EwhathK%g4RfqA!TUVud!K_7tzONdCC8RuWe$YZ`c3X$mN(3C2#Kc2*4~2FO)kk8N zZ^SoU0mXn?ak06w2tC@w)olRrPKX+D`N(hg6n>s+E88w*$H(Qh?KsYAzY*m~o_JRB zjL7tWE>Z`M`yY6CpBKjas zhIl@@Q13*fz}dIgAC{<&K4I-6csK9$y;!L!p!O=ok<5bG)N@3>7^0-8EVsnWijT?z zT3;3OvAA3BvR4u71T_Qn6{agdiZ3fbj2zYHcepi?Zi&(;47>=j>n;kgpNc6~tWat7 zQ&jDp0mFO8IY-eU^a_=fOo0d8_cWfa*ktCNxO*><)ukj=rvx!%P)#l$4>QM0$MNMY z?01tsQIm89q#5!)#~dqvwqU6(-el(amA;^IU!Mn=>riHky}>Mn-Yp$r|3Ez53X$HI zfnI+mVn=FIG5ZIO_Ga`9intngvYo}gFOTdA+Mt@F_!hLGYfb-7NwH3m&uIJB^m{75 zE}PNDuAm!h?wtSU>91mt9mQvXmJ%9Y6_5FtI;pGXH>mGn*m%8N<6q*7cj2y|1Re(^ z%zcVp4}S+6OSx?BifpMSh00u;67kD(nIr2C(MxL!7M^yC3p1>|F%0TmHr{+MrkDpKECe#aASQuXhD1g%~j=+*$|e)kwJD|gfkqk3$eM-97^#WV4SkJqdC zd%KFk*TobrliWx2;aoK-^PLU~`u1CUWcW8=+P?KrG6H87oGU#%6KDceyC7F;K?KkH z`Q9S0a8Qqu0wmYJA`7VI;m-(Cr*U!8`xLYuTwCFLUY>DHAX@j^N&Ne?vu(Ur?9eXC z8)sX2`kvOgk4o|;{_y0vcZ=S2GsL3uzwM?ur@u&2VcJXws`R|*;jFv@he~wh2Me}E z(Pucl`K5`x)>Z$$Td1cYh$d|sFuONsYb}VOwGm!^b2AAtmAQOeN;LD;wv_u4`O2X0 zxCXOtRiME@E$AfP)j8d*gNRna>(uedwv*|lC4>T8%^HmhcYeZl6Z8Xo z(>-%CAx6yvmSUXeJmsVEC$>2W`~c`_PUWeXH_8476J_^Q#<#9{lat8#I9NRO%~_#l zOQFYEk)eLefsI{}_jy%Dt{k%u9OM(d;R6sH-uvA4_$+up7?|6xC(FjyeWsdTIru8` z89&^#J?P$@NIW<-O5gU%iL3Eal7T+x)b&}DMlLsqHv;mZeqUqN28q4$I zSkFF11GTOYjS6R9?{0;ZYhSVA0kgove2F_toryliOiY6q2&!fQ>-ALtH-vujrUz(z zclqINxd;!kf&w$F-6?xUWwSf2Xm~xoyL^AJT9f60l^73Ya@-Ly0$x;GrWf*ygrg^b|n~PeY~Kr zp5Z4whL`pZVUZw84BA*{V!HQVFa=dw6{>&oFr79%isCpaoryQIU6NzqobE7 z=1kB?A3+m#kB6IOAQO?2j+x2F^_VC^)wCkYB%F6^#w2y=C{Q3rTeW=s@`w1q2QAuO z93H;9l`d-mSDEtEDIz{`p_Y@h!{YnvvIpUhuV|C7EVcsI!+8PT$@+L=p^9EJVPJKi z5^hzQ9b}zyA_i;doM9m5QG|Ny`MdO#O+kKY3}FH1Vjg~@0c`iSo5jQN!IOm6)7)<- ziR4V?MX&+qU?-txcS>?2qJ<(hv?7t$&$sjJQ`Zy)Ni%^*dz*Bd*Doq#USintXY zEam`hbJRoNj+4MZJmj3rQmKezIc$U z0mJlk!nL+bVcbEN4X2D7y$&piYCt|tHv{>Pe5 zCsD1$-GqelJyF{?JQS;225%pL&{~Qx6Zp-4-u1+B-USWJX}au5!H(2Y8WyD0R~5~!r}uQY((jP{61@wKq!6JFA-7*xu?~0WObR9ixI8-Lg?9FXfw)Jq-A4sA+%+K2p9JP3)FUwx=SGAH-8Nn zD!I(+^CF+1KeA=hrsxix=SW(k`mOdX{_}26uarmg)Dve3kG;T%fw1d&kxss}xgi4fj72uL6J3$N!3lN87o?NlC$6UJz93aFmvMV8HEM_#-w znm%<67!E)r4G=*8Uw!fa^2L8iicIDL`ad5czK-kB)Up2R@2~xpXpoWwV1Bx=?3;i8 z_FoqoC4Pj_qQB~pg&5YqFWpW@x030GFFWS-KOcS}*!c#uR(e!F`oE1C;p1yPedw*- zO15exTek6p(cvT_nrQEkqq{#^>$>IEAoYmkpZoNq@ScC`)#0}T0brhd8IR_GHOx&_ z^!T+2_wDx6@N&Tk#ASIrF`aMRulH+&i<0lQpH@9Civ#i+quhuANDw#;&EEX04U#FI$v-U>CmHKrXuU|7EF z7Esf79&~F2xQXM(Jr4D-yc7*NDu-WTYl6!+b~j;?5lF4cPQFg=mCje23HgnSw_e<7 z2sx&J5oq9~hA!tZu=pENLyn-600g>JbR8D!v&n8)Fa=0^|2_vKu;R+w(*ssU-t0G^ z-&_kb26Yub)ArJ+)`77}BY`S_w8!p5#=Wx(U=a7-s<@&XRreV!1x51r!?1gTdO)+P zP!&ilOd=8*B>*Y?PII5(3Mlr;sYvM$2wX{Vkm-TsAsh|YvdaPC2n|}ml2X0g6Hva< zoYr^KcLDP5FKNn78YF6mFqKGsflhLzHK{lFdIS6cpx;UX0MR^e7x2BY4@gHyFreOwWE{qoJyMPWrn zg@$1RdRc3ubP);N3CC(wk_F0h^(`Lkt4(g72I0+Eb@Mm5XQr564k36vd`{cS(d5ir zn1HyQV$G7r9&^(igTzm>K9|%?6_&P~ z&wU282yNA!#sFn5N>6jn2KqG4aqB;TagsVhI=;kQR!&YAc9pzR@Ie8Lq!$nNnPdbW z)X7L_m~J@~oiZn>y3FTpZxWcE4>nap{9ZLsoOL{(%g$5z(AfbuU|%9`e2oege1^s@ zuwXCU^`qjV^`d2xCB-`W*Qf)aN1NYn4iv7+yZ&sg9b}{H9PAcZI*yRN86DV1JGgL~ z4$}uz!JQtzuE@7E_j9cvrxXBQ?BBMfP3jMCC@Yk!o>vc83nylhrYJlB`7`;&MZgbF zrz#=#;}0IxjJzcG)n6Ld2i~l(nk?~rX{;O z@elbWKmw_DD4{O_{u-lcqiN}Ss!rn)`-<55ZnDff?sV6Z*h@;{Ciejp=~|J(4>nG^ z;eJ}7N~bz6$%1=45A?RqA&BQpy5 zK%RjJc|A0s11qevqWYt#%F!{VY!3k6&({9IkY5M#bS2G*igr=tC??gU#Ovdk0%lUT z@&XPT&s=_{o^#;5yX7v}du)$ea-Cx$TlOL27a-^>Km+}t7}&U~)B`AxE%6KO{|Mu6 z8kf}4Op;UEyjs;W%)p**OTZdO*L95A0LA2|>Ls=L+F+)?>y zhHHn0?fch{FL~llew>dnzA_yA8qpKL5RT{aiO<%wn-;u55E9ON?fV^~St+j>Eg@R-*OGhAEd?il25z(;-h2r_)8%!;AabY^x^?6;J7?)vIV@2xE zA<$YzRNtMcTJ+tD$;L(O5LoY84MHid!en?3Op=SJ-$AYS$|0@ieg%`xwVHJGA#}ZAR4kzBReo_MW0`vCnPm`l}5vOSo`7xe9@VNXl^jwt|YB4CYon zb)`GTx9e0XU5a-tN~v;RvUmW0-6P^WXuv% zC3)A5rh+J`I1$)FevELzcUX|Vo=1++M8zS`q?dt`b#a6%Y3{xjcf6mJ^{~H_b+^=} zZ`$dGE}G(uzmsgwJ2~pOIq2#uRPAa?t0^6?5b~biTq^H<^_L{d+hw(+INV>&eD&J- z<1ZDg2K zei$Jno#jJsXkAJjaf3Ie1@VMvkF8QAQu9&6AHOGl21||Rx}~_sFbsR>*8ByXYFuTC z^s@a4lHTb-xBc!?x;AlSvtbo2-5Mr(OMdVXTVT4n$cj*0`Cj`zy0xD+6C)b03(wJC z`Qbz%o=W6&Tl@uUFfHUg`KhrXtn4^^XN5&doK&RV`};@@%Gw+fE-#;mN`vjpfGcKZ zL60)dwc~u$2eEMG>2b*Z4&RwmMjFnMsvK%NyZbwJIeYYyFEN;bf(JZ<5pBc&VOB@&1;&H)UGj%s1 zxpi+yW@@eTD(k_=S|pr)553Hv36aD%f`1%uEmJ5dqPoj|MTH^>3mP;=Sr10a^trLe z4AR*^+1pa79`SqS;{;B%Su)jz+ z9*pf22Ez&N@aIyAZZ9%#n^WkXlIP-uwzG#g*uI)m6^Xh0Xu~DW&O$KvxogNl=R|kz z!<=tqNM>D$t%S&BJSG*WnqBmIY)m20jnr|is~z)KOd32*(*SHnOoY{-WoTIn)^h{# z#WR}&u2ZLGGYlj;EQ)d`?o9m3AtJZ(!FP!*b>- z#~7EHdF_$i;5AZ3X0}r5>(oR)__&~Tj4IbaUQ=X-6+G+ia-^@;s{;#^vqz_je{2S^ z^xa!^hFAy;l3`3N&;!xepK=2;&Q&__?-7Yb{FSY*6Lc$^4p#xB28pD~i6vI-sT;U% zeS%yx7qM=GGm_r}pTsmc^^i;LEC8x4a~#(*J>v0jlM-#moz1o_x-cTn8%`<(}Ey`!B{O z5U?s*n8Fpg%-6cf@N?$W@6`OL2^+xNIPPqL%zl-<9$bl%I3N_=68BTBd>Lh~^&zn3 z@&-Ql0}dHrdQ>%~1JAECyj>$@on`$Jr`I}3Bnw%h52A`Dt+{{cQp;myH7LS~3NYZ& zx{N^8gISRMVgn-gHL};m{+oYj#`{Vu3v82k+j-$T&p`L(NZW0;o-(4*#$uIIva1VNR5j_uLf8Gv%ays1!rvoGDA!TzBd;8d6|k)K6F(lxo`&Of zg|@RU{ZWo+fJM=9V>NcWs>9V9O^DL*Y1MRulYHf^{(eQ zw&EVEqd5~`StEM-_HgQLQNC9EZIBd6PCFqvu+3>kPP1+|gFdnB4EUS(=e zaywRTD9hwz!TpqRV&F?|xet0fj71yalN7-{bQWhBzwN+jG%oA9MhZ^;KHm3;6+hbgK# zg>OjN#gk7esYgUhEFUya%7lKI*qPO}VvtK2Ut5!Blzxj5@*<{3H`cRD*bQG0ywY`1 z*eOw8>KF>)|H-{p;sbD`Z3KI#1gI?eK$Da6O+mpN>u)Zd?%lk7u0Z-&v<2`p-V`i@ze)yl?sZ5 zHCtq;x`e!0oYdnK~V0*qCOJqzcnOm=Xs7!(2SH9>i4*x2O!urjLE7Y%%8HP=M`56MrurW>D z=sc^T-G#9WUw-lmEZL_eGm%EE?=v|4h-amoi0NF@~Gaw#(361WH=;0KklI_-ySw=jX?* zty32!fs6)xm(vccxToSX^m4k`eg37&K42hH*5*A9irl{BCgYvB0raW`SI?s&AxxZ_;BArR zg3p#s0FN@Sapmm(qOEPtIFwVz z2xOa+yY~*sF%?t>*<_!1UWH&E70{^4^yiD@9(oN~0VudLs)kKod}}}lCf$S*mm-_N z@i)!))ty3pFMVg{_J4Ouwi8olQdsc@SLi2a7r43?+8YHB* zM(+1V#*1MoOQ(6xVrz@|w`|-NRWJUa&|V@LW6{RLc~km}InJ~iF;p%;)Xeu8{s_8< zd0YkxG}E{2+`0K&xb+Pq>Wxc$)PK;15v&K;o)6sn@|uJ!qj8^B8jv+5p$%hEHnj4G z>rc1{lgh0shv(s zXB)c9*BUw+?^;oZLS1=LQsVIje?>=vryVF?FrzkJO)snRDl62m|EszHLA*uarK8C@ z=8ZMp@h@A79kyU&Dk29jJi~QQBoP~1X5d5`06+ePfAMHx9?6i!^ZaUy`vB-LmO%xNk6oqL`$FwKKe?yKmn}%Fpm_ ztk^DE6KT0mH(7s+06~2lWHaY4sJ7=w>Hrl7#5`4nen}7*L@{{!7Nas|L$k)}(oX9oZeMCSuM*{W3A29YSS0F-)n z0k&(Rh7Uq;;PVxHb7H)Ljm?CwJ{8b_FSsIR*Qh=ydz`Y6_n+2zgitjyQtNXusC@yg z*D^#Q|9m4tz5hMHkTy62AdLLR>)oOvgaK7aa|SiCf!{U&vNluD(EQ2oZ;sM734d7# zX!3kcbD2sK;Gf0+PY;Y75hG4zfWqC<$CC0MFnvrL9RZf});Rz%C_-Rz|H3_icYw&W z(M%K|8BYQ<%t>X_sew-qfOXwZ0Q7BBm~8_jA>Nyqm{1=uX148QhZaUfMJd1zevYF3 zXJp&M5d2?0T$mBt0sy!b#;6FhqLl*Y02N)3A>g#|E%VaK@J2@m*&YB!H8>vO6IcVp zdI_bbjViq_bXuoJop||1v70DIfQZ?WMnr17?1smOw(0n!U4TYWuLDrpCzMVACytoH z1wh&vfk7vaTUXF(q@Nl~@Vk9ZX{B=c{caKgPF3S<0POttAMF0~i1^BcwbxsEGvN1Z zxO0f@d|E&NK@m!WkXlpYDK}B`AoY86N^2gP0VFk#vq?=_gi24$0wCX~M&r`O>y+Hz zdNpJh!APlyskW%~>bvRa2@t;4%n3c*o~YkPGb`$V7YRoIp8Ta*f<8ZuBClFY&6JMS zZKgjiMH{e7=}ZCwyAQw3IU3}jS5!0FR2(OoL|FFqdDny=^nRj+0x-Jg?McSNt>D31 zblD0jQz{Xq6H?!6_ptE3f1~;BgN!^rWnV%UD#>Min`v{r3!{A>t5!RnnAo-THZ|pk4DTILDqkN$%QpBGj z07y-32Z-Pq>2wj|rkHbBI)HD8$4}y4F@e`u9x+C@qqxv}fX$!e?B5AkY)C}t=UoLM ztlw~7G`7JmXm+O)2MGm3qiC!ft zLYuMQ61p-0I!J^lyf=0@f#?WMWfgt_jOF_&*f!!Y%FtV^l|tp~NDQ@hLDWkU{B$df z-j7@njFT(Z>k6}Zqq=EDTtqIYFGjntrAK7b2|e%v6+6PP{o;9a3yyZWa8S{8v}P$C2>2UQ5myEuli zb(`OIE0qQ(K*cYicp8X33NR_t{Qy8;Zf7lrUfgIY(Xd)EfDtr~ZPbifi>NcHhgYEJaif@`ZCl&+Nd%!#5oNa0-Pwu)R@1+g1KLo^pzgdLA zAw&0;MJqgEvk(Yk>eb^~;a911V{)`<;(R3BRk=yJ@$c}pXaP-5KAKyV1U@9lWi~(*zgL zc_%&!d={Cotk!r-PZv{uCU_;98Jn4xuRlZmsNUcgA^N>qBKbUL?F>W78S_sw1!)Lw z@i)6Yjpod5CGJODR=!aI+K2swsI|cI=|!9&t|-OD-Hs^fung3y>Z*1FQb)Nijm49> zLj~ie4+^CjuI>)QkL)DmwY&d*;|mc2KZTv%OEpU0=UyY6P9{k4Pqi!bw&twLlppfC z8x7)#jx@V&hqwpmF;!lsC{l~lcQ2o*)sD$h=F%mqdg>!QWe1)>xU8L>U)0DP-2hxf z4z!LPAg5uYj(=C`*1~S*tph{B6{MThLhtw-TGNfMTmEjocl&1Ic}LuTmro%nqZ%R0 z+!lh?x6A44Vu3d`LWUv(`)#Ok z;^JS$S^wUgIcyQp%%5Yo!~Zk`RH{D%qKT$wfwO5q`P^R-AEKo-fKYb$|La9HGWPzo z?S3e`KD^u;(HwSZvK>M6?=8b$3FHR^P2u_c&42Fr@Av~TB!7o9{|Jx%?;n3p?*9ox z#qn8T#by=>iOmS11%PVM_|x-nn9_Gv%BbY+jSpY3N6ToP+0ne8A?Cf!QRXQ6Sd;{o z0qF90Wf?6cOp+T_5LAo1RS#xMJTd0_6#j|Hu4o|xn{pxWU-4tHF>1W*&AJ#F`6ofG zN=8!-`?WPsm!lKt>E7&$qJNNIzI5lit6HOvxd(Sqb%ypTofSXeQO!mX6O|udSh*F~ z+GxkiIa;Pk7}#+~9DIrwcnV9(ZZ0Xo{2)xnMp*uYLfLJB#s6R&NfmA_G;N-u z5s5zhxRuc=xB|lA{&SczqGX?<-~|2zyx8xgoj*^|-t%WHeN&7P2y6|W3ppqv*b`rn zhA-n+^!*!nUJc&wQ$tLS8lmc0HreAf&>e2e_1;TY`-y;>*tD{n`GAi%)7@ee^PAA= z$s7Ork9p~oi6?$S;!5h$Urs(gG!{mrY@Pw4-kA16`4ZX!m0;@;TyZvcnQ}{4{1rVR zfkdYur(RRkys$4^qv{$f-()jUkZB`t4PVAOMEU)}UdnhqI>au3eST42p6Uc^Fyi_q z@puavtagMB#X$FA26zaj?2NQX!Y|i8NdJI@=+uY=_7pwFFbc$I}0r;zT}= zpPaTkhtjh4VGG}bEz#R)L5ymUSJrEPqTX#+(GsQBzw8>nkEDcvb&mY;R{c%@w^zQ; z@4Y;MZ%u^4@O5*+b8-{=U%0`%nPt+@Q7`D}x{&K<9roI=N2uLYJ~P1_jmbQ6hxR;< zl<5AvHj$Gz?8YM0bE0e$64w4Sm|KaS-_rcbvnbQmVHwH22S`= zT5SiRY!|sZnaxZa@Cvqh6vJ>L7nnegh)K5tYL@i2@cLAr9i9xi4LzRnx!?lJ?xJuzZLL~KOBQ2ydt)ZpZ8ar)f|Me+u+#n zsjJv0Xwr<({myN3SP{dMu|Foz=&|$hUPiJV8LO{fQdukaB`Cwv`TS%Q8R-}M_o{Pk z`i%3r9Md*}_^zUKo{fkw!_!AhN0pZXWN4o2QN8{VCF#oHZkc9o2+_)MQoH{YFGOMq zPnZN-&vJvG!DXu;=Q7pmb0hckgg@(2ew}Bn=v^>Y_$bDc_LVvt$8Mb|7^ZH1{1JSv z*!11v)~zTYzB|4-wP*J^SGE9&S#%R4TqwRV%5>yOR*zrJ2uX`_eH!7FkT3bRwn4%R zFF`s%5OS2-IjpxySnDi^fis(P(<=1V0BDPik`sQiXjZ}4014plnldVcbg&7>AM@7Ad4$Divf3A>Qr?JcU&1X7_82x zdMt-hYzII5^{d8_{_*K4@(mMk$&w z4&m=3E;v1{@|C}d!e#t!&0NsWC-C6F%AJ7D=gCh|e48Z?ECK9?bMmxggEH^Idbf03 z&KKx0+`6zsYM1m4lRCzOr_9lmp{F0Ar)Bojbx@?Gbd#Sn~Eml0BV#=vQ zg0*Q#W;yrcu-`=FDbBk*h;^pjz^>@roS+s{!#lGV)=*x%z1`{~-qk$tAnC z!=i*AYG=OThI}=Jp0pdSN=XE4zs+Ipapg#KYSevjT->30(#o&p@d=v(OM^!Dbcwdv z@W>PbtNflXekb}o@thn5*4^ei#Hqccl~W)mz8S^}-=HOdI0F0NMpPgfB{d4EtViUE zm*3$NSzKx`4^5NJS34$JT`4k{d)B1{O4<8VEVdJ#TlL|Z!S`{C}Hs8f! zkeJkPq?kc_x4|2c|9x=GOL<$FI3{p4QQylvo9SFxrh3=`;`?frKUd7*< z6;jPd2}j;$8I65n>K!Z)jthLY*lVXc4Q^Sq4{EU5P&aIiY z+1%TE`?QShaMMauVnFJ2gcpBY!#sVZ)s>I&0eJo7MQxR{_gdROxvEQ;Lp$J z!q##OhLxR3gSjr^>)jQs6cJxHMhf|YDn+kZ7TUdpUr@zOlDNjdHqdT0jF;2lx7XVz z859iuIzQPT%n7H+C)&Hsnkh{4z5y-P$>^^_&*N7p{e@(+n(;JC2H&*x(hwHAVkyb= z$oN4Z@z-+85x7gtTCdZ0@uricwU<7n^`ILv&4yP&c<3G~jI1dh$1ku(O@ZH}iTRy0 zR*3ipGFUKOZWx`0w4=2Q#I7FP%F)rbU)hy#>d9m3k#vizlR?xg4F7DYMAxWg2QfNW z@Q}ij=&Xra@Ui}E(2pOMCV8dN4nD=lSynN}TkAMF)7fgB@vi(1!uyjDAGDGOovBwp zDuwFdR6J>FrM~?EXxg^yG%Br(ad=6`51&-+Yry?9KATCje|6~V;UY?^V6u^1AP+zchZ&45ccY={@%66v4PqG{UsQ zZ`S1TU{RO&MYQ;|jr%tYLX|KLw(fU|7L8!V41*ub^wjxBK3-ySZ&{&Q9xyh1-L#A? zu18*>FW|oJLA!{G%iuh*d=SfvqIdz8aaI<*G1CG zciCt(Y}wy5_U+l%>BmdV?gJ~UM*A;0c~AxWlw^(g@h^>^>GLXd?U}*_D^t@4-13s{ zKfI&e#&i%*Rn7LbcW((M4_vp(x-o7tDd*Axob37ag3F&!3rs>@rS*h$-9rU0#r)a` zM2X(78SHMXMa6WR)p~^-Ept%>DI&A~oaa3qd=r)4X7sqgj{QQzKrn8&ZqS&Th=UUP zy|$XRuk`ByDf(fm!eT;he#-vKyviyCt{&@WI8hw@cLEaRac54mi_*AUA*~}%3tGl; zPm5@k50tm54$PbOVJy9xD<5sS;CgVLW9ch*w{0W;}CNCxcpT2RNgL4tYFBM-pfSaM8O)%Rc1$w)BQo%gp1lvh^rA(RCE5h zF6HUO)Jv%0-ne{(P|x5uCD#&;Mb3iOSoq${_r~VuIl>Qn6?Ym(Vo*J!3%ox;TsT>2 z{te%0Xa-YYs%ViIBdA$Lln%#%Q`!xdH$%Ky1k65(ajn9NF&ul?a{a@2!xm9}{*Fm; zLh28>v@^PMzOrX1VTvBjgNfbVrB*+sBKI?QEVQrUqRgk9f>sv&g&yi~48$&fu7vtI zEpGRCx4DZGSGMc~DACnL?U(qowsF@_yZ6b??Qbbst`{10?@q0l>>cn|=#wnq!Rm8| zB3*vkTcr7@TR%J_({p=FgPX?*>Hh;275m)5S2;l#=5LIR9)@OeCduQ9i;+TtkIbzt zSv&U|DrW?q>SC-q6^~jHo!2C^z$hUNJ0T>HR0^~$3vg)P(K9&FHBo$9rEM=@Awjj7 zk1j3PiHA184|kknat*W^J{fqy@38s>7jH>4SL5EEUX*FxM^JIZ75=5zH%s10>T9_h zzrr;%5_<-oOrEjmXGn^3_i2;mF2d9@B1*sN&Q_tO#td__-svL;tHe}9aEIq--z+tr zbC)Qt(46)8ckS!e-D%###GGZ6I-pu+^NgPZJJUa)LajH`X4X@Wmqg}4A-A0_OWhUv z1}DlDn}T!9mvK-&nVkT#ZK}4_nPp+JM%zvEDQ}^NP4@FhXYboDhf+`mQ~LDZJnP^O zM?+95xkIdb_wpU(C?rL)HFpC8r-*HeC;{SrmqYT$WU8OeR&t?ksLLb^p|&MxJA3NT z0jqBBt8r3>A$K~Iw5QFYWDES*e-eIAHNJavlQ4Z7F;mFtY)cA}J*t65qG#YYo7lHn zoA}QhzV38^dR3TJ`Z}N_*t%rT0ElXPCgT?;sxt6TIg21Gp$)mu+EiG>AB5c>t;c&j zTzQC)=mMJ}lc!5g!*|Ek?;K`8m?2+o8r0VtjNKapjg3U3C4`tBO+Yt3N(-zR(=95` z=W3gcNB31y#>iPzo??l_qe;YW#!R7o795}rdGUg(_&iol+;S5y=H0|6>$v4ux!C|H z2t5)_F&;Zq9{i&}xWqKU49plsQwDxEKx@+r;`z=&%DwAIufF6EMY6d*X3Fl;NI+@| z^I{qYVPp`e(rW&wz^jPchea*1l4)-`=!M8KWI?X&ZS{}drafp`UtleXj;~nw_jz(?`S!m}le?EcQX)LaChbnw z^|QnA7S_6&meu*?zI{ctx4{W;tuoqig}JmUJ5HLkm%K62o}ek(ehm}f>HYpLt%-4n z_sc=Q2?zZtt6gGPn8(%l{O8$l6L#wA3PqmljA6J?0^Mzj>0#t)05tVSH8U7q;Nu)$ z$1y${4;{hy;eQTvlauT=N8rLRYEiZtB*fD(Ra7bJocymaiz8foKnUk-=fR#uB)V@6 z=Ww`x)a}htrLY}GEdv)S9vUO#ma&^&t;AEyPfS|RJ58T{o%m5|THPW5FE1dnAw-Kl zsMb&Zz{1(U(PRSOcox;EA!g%nTFngvB>l3xgL$Igo+WTR&mNP1*2a2xT1+KmEu!Ri zrg1jLOc9EDH4sbiKOq+TiE3Cg3rD@XLno7QbGKivKW7h?78`f8eu#LSjl*%w(pV#Z z4_#02X{8rGFr~0d@^8&*7N)s!MP~QZQ6pVXsi)E17;CEjcQSzyis<*IlY1LAo&Ok$ zey)Hw5Aq)wf(J4@;pGB*8w8yQgW|%slAge#dSmwO-?hr^n}qh)P=|^lGb0X0{Mm!# zbA&ni$uLckxg|m~qeS(5lN1xnG9`S`6B9+@Sw~1{AVXFqf!Xn2XjYHk|b^X2dCGMk$qZH4rIQ*^2L0v8O~be^`VzcJ?>2BxdPKCkPTY$g>XTE ze*T$hGN*{*F=gL@Ej;9+AzF3oJ|wy?Rr5dp^lVmm z7ca!{O$lUSh_}`Q5Ri|U-uAN$^^S_<5bmAlNc1-L0TC`;IiQPwHtaz@6Tl@w-vmp} zpStZC1y`(HTIdp1qSEaAD4S7C8P|`X6n1-zu6V6D?}omeUTJq20LKt*uCO=cGlVaE zTQPV|J%=&f<1a)DrAejRj}>F>ltAFqz6O{$LG32cIllwMD)lHcxs7_771Zy(8un57PY-E>M+A(ohV`&Y-UUToBEM8k|da6xpuKD z_v5fhdL(Wyc_X^qw3+GF8u|U;U-eJE>6qo>Bc`ZZjL6a-pW;-Ue{<2Lk4?uz|dSVS%!B@-W?xSuc6uJ|K1Y{vhg3wMV5S1}vHk-;;YlQ#^?FsRt`wf59_5;x%bQ%&HYJ<9q zPu6ZYtz}CW=Bo_t;S*X%_v-t|%d4$|m)$Gjc|SWR|4JFC%Nm`rPCLsm@+4wX5ihQ- z-L4?-(MWCH*9_er6z{k*Z+#lT8Jga!O%FK;@}oYQ`oeKEWYOVm?vejyPqr&kwFZv# zTbIB~OYOv|B0x~Hee!n%lDIV>G^GZ(2;9xQ)gp4CTfog+`!j`DeoIg1#f-Y8qJK|Fs{lC`@_=36njQRYQ{=~Hm!Fn zWt9=9F(z48a1NDt&5l#k?shLSNMf5~^BpiK_%X|@DBw8eI;`!%DTAQcw?!rRudZ@j zTA%b0A_jqgH`UHVGZue;$7er>MI@{+WE2U<7P#Y>e%y%t!w&nQk7s?P`6`2|fGS*M z8N#fUccl2BV@1 z*}Am_#zeQ_%8=>X(|20nZ4ex$htjFRjA7R(4`R(2O>uFrdcjR+wyBLHH0CnnDfhMzt0{2guS{fJi**2het6WWhAS-yG;I9)pFp|od|Lbg z1_5Y_t2=J*??cmlp!DL=0*NNbfob^EyqR?o9M;5qkLDC5J{*QD+G zAKEl9Gs;wz@4{A|ta#(#O1mj6+@}Z%9g0_|_~J&Sx#ZS+oIJ<-Uj3;d`NdK(+GWlrNwFj==EDSJwLogJ-_ojDZ9fTyEIqv|nh%vazEyDllE6aO;PnK0@&bh_JfXDK-*Jrc^ zk>Tf3bBg^8$+t7e<-46OP4*A3HXWLuEG=97mWHXeV=YBP7iF1hLzsTno-zMO20T> z(TcJYwT)v6{#Cpb2i70mv{i=m#0rWNZa_;7rW5DHaeZ%8K0U7yKIG^{t(fjM_YEGl z^59;L%3x*3S(u4oGB?5byQ96@up$;4%q&z7BUR*GmiJ*R4{9HH>uOGPbg2c~vG2=7 zt=6+8#h=rn-V(P#7PmY}VSd7<^lt#U*CpkKu-;xOI}twuxGHGITLdGyk}dH{waHd;b&-%=4(fZYf^? zfQ1Y?3q2a^&br0cLDG~Dl|Ri1GBaG!+;a66W&I^CaJf@bY71A(uf@Ya`hx9{}ET>2y!o9Zkb`$(6S0An+2&%lO%vXK%+elCN zwwG+qF7zmJ(kKA$U9;gNA?4(E}}m9vb-k8TZ= zO6O-YJKQz8HDG&Gg1hCAo&7Z78a!FzrEMJ!;QMckZ3FyTw}M@zOY!^=QVk7OAs_l}Sw|AwOrl8x97bP@H_Tspj`eR$I|7*vgJQe7h=9;f(vNi&m%fJN8X zlB&^ikUiJ#-xzu&ow8vWFg{IA_B@)M`TLo zE64T@4Z?3rz>^5LJcGDTt+w+IEj^k@Yb8M`(BrL=4YpS29L=d0kE!-8&yWbZldO2> zL4OR{gpD2Ol_NS&o_E?35<}amUUZ&i6LM{8= zmv}rkdGXcW3$=&^F{GV4*5$pCNL>^2aS6tJqgO8_T2f7)ad^stk5X3B{pq>Ze%N>~GlnbOBqpGM&q^(JWUuQ`wLJgue3Z(R$6L(3n8Uj77lnv8 zyyj-M{xKG0U6#SuTPj#Q){;P3+{;D^Y@3P_j-HmohhZ2|baEj%=5X$PDlL(rq)T|& z)!Q0B*IYp=yfES-@#J-pVFAWECa9S9aC90LM-Niec&g#U6 zPD>r=B?36RKKck6(Jgv3#j}MEH^9q=5Dt-M73=HfEWX){_g086#;3eV)cgh0vR|yN zUnVZ@4^Jy~-B?hg+!+JUxADe#yj=tz4B+Fi8@xac+6PCG#8_o+w|RJ+s;Ya`#GClm z--#W>Fekv=Q9=AJ?4y#v-SK(KTeM9-;}qSlH_v#COl(R8$8xt$2cU}U%Tz?KPBFAc z-z`+&r<*M4C;LG;r;;s(gyn%e)M#A6J-YT7cHU|xuhP-dEvkOsG%JX@Ij4XH31NF^ z6OYx`p}9wD2(in6^b-K(H^;1JY4^dlee-Mo=&H;r_nOheMnlZr?cX*|1QZ@;uYn3X zio8LNL=nv1m7Z885s65N2ORUi3kLrNt7(lXLF2Woa;((%~^LG5swxN zb-XMG0;|@b%r8kY8=OqoKZhoquqoISm>Lza^S6rKOh(4`e~yxfa9m3`GF+-+lJHzs?^GTKM5MDxBS5FvB)R^)UO6)xR81 z0%+`J(7qvBCA&UO@bn@ZUy@}-zfp=?{TYLttoM@TOo4+|pL@Guppyw{uU*X*PwP82 zE}D~@!2)g>vtJ?qXgszEdABBms3u%jBDL@$u_a$EBZsAAHY02uw4xzb&81&o*ts+j zgJengd1hHy<@CP7Fj~MheP813vht*TokTEVLKLx&LvAui{1ZKbq`FOmgPpS4{QD;T z<5!^{Laq2$IqaqumlpJ7=M@opOIuu@tOu0Z_CHE?I>qpJ-vLpbm9~l%ewO=R-Sif# zeYiVmYtNuFhpc260Pdo`Mi)NsO6A5XaAv0H@Mk8!HyE+&va_;Is+MpwDvK_DN4cGSH>o~dL&)`pn2mh3t|Jx?>9HI@P* zQFB47mNa36$(*FNRL19LLgtCkWrk(uZrU)~*AGh7uKHMx zH`J~3+NA6X_)SlJ14W-DqTjE|;0x;?el5~Gelj@sFS#17S&lERc~-~2(4BA;fAS-{ zT~8u<$7B5n1%DALp5)bLDuMw+=f1q|mY;}eWu#FPTE*>7u8ZvZ3Kk~vDy6BEVH>(fPtxN(fD`ja0)fCC1)?5^mYdltfk}E!^nnT{DMm!4%!aShIBNS>Kn7N}-!E zI_xP-I+U$cXvEWzN41`xpQ5Fg&w8ta*JTdX{ygLKP$6-rtewT4In=JnuSYa*l4U8! zv!DP6F-1-sb-|UBE{b8>8+Now>`52H=Kh8WT|%E(Leb%y3@RT&P2AU9UXX(BM!(98 zZ8GU^VHnhRD!sHqHeIS6XyIz>grVNxB`788;#-+XEElb|b)%Hn zu3N09&vSp06ylahA}`JfcdZ@@W*v??8^Fv?71!T~Le{h?Vaj%j5Oj9E@`IJ?-%vzD z;8T`%M`2T5Z&}_Ra;|#n-vW#MhoyCeUN>Ep9?Lwdi%v=SB@Y1yuhdG^@b2Y8>}^!{ zP1%Jaoabb)4lD3GoT~anPD5KiCd_+*4v_})F?cGx>PbtjETEmkZqcAbW}QuliWO>< z!$lrhTmvI(Z5oad&a_n&=ZwIFdf8L2kz^l4-1$~eSfa@p4U)f@>LjWC`}3~2H$_Nc z1fa{IOB_TFjJ|kS;_Q_}@Z)dO^^<1S+*eE@jE$Inomep7Os_v1x_>t@t?84xG2O@Wzyw>rV?NTT!YG(yS#*`;9 z%Z*j?*h;aqU$OsE@p0i-Nf58F2onK1>@z$L4M!yn5n-I^A4NC>zY0M-m9p4M8UJ)D zAWGg_Hd6v0qu$_xN8i7F15V8dhU%-d(4GXKM$$*XZoYGrhbs{#zR#d~F|$eH)lIR} zR%yT3>?`061`YRH9BtzhigZJ6N>FzgrAng}2?k%Lf^K`MklJF+r#T`4NN9xeIJQGI z(CY3+PHSrL*dvGYfE(G24sIfY61C1F$jYmmi={guYVAAmG5(KjY9VNR; z;E=0jMED2sVWr56hc21q0LfwvDg!Uh6yN*yFd#Wo*zN6t)MI3{^qRx{??eNgAy=<{ z1SN(cQ@6$1``m{r_1I6hv2s0Lq$Uy7PkoKCjdER%4#lRtGa+(VffYhX7fBBLji@V1 zUQQFZ!$hj(^5fh56HCIv`dg9&K31$!2cGw(hDGX-@(`;sf9{MeTAgRgH6at8UZAAG zi@5w~Besj^r4sSCK~_*yLxT|B@@5aleH@U8x*GpPCnO=SbN;P$&E zvXnJKgq`(}Lksn_d1SiBR4tOC{28d}^1(?es;lGGeY!AcpI#zxAlRFF-0dzpnZ~-H z?l>sA2ng@rIPPn6J|{YLJQK1EEzZ;5{SjX7FS?kbmSkmFZgp(zzQ(!VY*LC2(I1fz z5>=7f9keY!^Qot65d=Fl>k3%VN{y=RaU~XNclY1FV%nq@_X-Vw~qa>L|pS%@{VZWc?mv|7m#x(R13-`2=I z8G(%L=o-hS7iB=v{%LtNoh(kkP%faU7$$bWA9 z#ojH*^S=;W`*TLABaao`_*s^qhJIPN_U9JFKV03PxQ0n{H28l(|3CH?M0qAA;*ZGM poyJq}{_o=7JpcEzxns5?;AHjd?p=G@(=(^T*uY%B=C(`h{{Xk%rWF7H literal 0 HcmV?d00001 diff --git a/vendor/sigs.k8s.io/cluster-api/docs/proposals/images/machine-states-preboot/Figure3.plantuml b/vendor/sigs.k8s.io/cluster-api/docs/proposals/images/machine-states-preboot/Figure3.plantuml new file mode 100644 index 0000000000..31515a9e1b --- /dev/null +++ b/vendor/sigs.k8s.io/cluster-api/docs/proposals/images/machine-states-preboot/Figure3.plantuml @@ -0,0 +1,58 @@ +@startuml +title Figure 3: User creates a machine with kubeadm bootstrapper +actor User + +' -- GROUPS START --- + +box #lightgreen +participant "API Server" +end box + +box #lightblue +participant "Cluster API Machine Controller" +end box + +' -- GROUPS END --- + +User->"API Server":kubectl apply -f machine.yaml +"API Server"-->>"Cluster API Machine Controller": New Machine + +"Cluster API Machine Controller"-> "Cluster API Machine Controller":Machine Controller Reconcile +activate "Cluster API Machine Controller" + + +note over "Cluster API Machine Controller": - ✅ Machine.Status.Phase is empty\n- ✅ Machine.Spec.Bootstrap.Data is \n- ✅ Machine.Spec.Bootstrap.ConfigRef is populated\n- ✅ Machine.Spec.Bootstrap.ConfigRef -> Status.Ready is false + +opt Required only if the object hasn't been seen before +"Cluster API Machine Controller"-> "Cluster API Machine Controller":Add watcher for \nMachine.Spec.Bootstrap.ConfigRef.Kind objects +end + +opt Required only if the object hasn't been seen before +"Cluster API Machine Controller"-> "Cluster API Machine Controller":Add watcher for \nMachine.Spec.InfrastructureRef.Kind objects +end + +opt Required only if the object hasn't been seen before +"Cluster API Machine Controller"-> "Cluster API Machine Controller":Add watcher for \nMachine.Spec.Bootstrap.ConfigRef.Kind objects +end + +opt Required only if the object doesn't have a Machine owner reference +"Cluster API Machine Controller"-> "Cluster API Machine Controller":Set Machine.Spec.Bootstrap.ConfigRef -> OwnerReferences[0] to Machine +"Cluster API Machine Controller"->"API Server": Update KubeadmBootstrapConfig +"Cluster API Machine Controller"<<--"API Server": Response +end + +opt Required only if the object doesn't have a Machine owner reference +"Cluster API Machine Controller"-> "Cluster API Machine Controller":Set Machine.Spec.InfrastructureRef -> OwnerReferences[0] to Machine +"Cluster API Machine Controller"->"API Server": Update AWSInfrastructureConfig +"Cluster API Machine Controller"<<--"API Server": Response +end + +"Cluster API Machine Controller"-> "Cluster API Machine Controller":Set Machine.Status.Phase = "Pending" + +"Cluster API Machine Controller"->"API Server": Update Machine Status +"Cluster API Machine Controller"<<--"API Server": Response + +deactivate "Cluster API Machine Controller" + +hide footbox +@enduml diff --git a/vendor/sigs.k8s.io/cluster-api/docs/proposals/images/machine-states-preboot/Figure3.png b/vendor/sigs.k8s.io/cluster-api/docs/proposals/images/machine-states-preboot/Figure3.png new file mode 100644 index 0000000000000000000000000000000000000000..148ffd14c4888b94713a8f02e72fa19146643f5c GIT binary patch literal 68668 zcmbq)by(Ef_bw(T98p0UkxuCbQE;TYOF%%nh8kiNL_}a{krszek&Xco0cnSBq=$x~ z`|d%{Iex$Qcc14z_x_4%8HQdSIDm5;o)7Ec_O8Xhj(rQ{Hwck z7Q92{CK>>KFhixap{Di@9yVs?P&|m4otcv{)a)6Ji3iPdDAYlilheV**beGqYr|n` zZ+qju5CtCI*=0+0ZRp?cPLX8hP7vc&EQhyq9YCUk6KLsmYi^Wm{ZQJ>~z&|dI@)n*#sU7(QP})E)}7!+mdg2 zzow8y5lm>DKRQcEM?i0UhUhUjDT|*&+Ve{>X2IN)k~ZwOr=KMub1ub(xpY^*UDL8D znDE#1yL+ADW7eC5LiLYMmX|cQg&{v42|O-Sz42U?!_`}YwU1u>iPgeqn%SAiVS(Vd zzOW))2{!c`NwPo6gKl}uUbcMN0y`_o-+x;%)&A?~0L^n=;xmues0wEBshm_E{?KNk zg&kh2e6JnJ6w=L+!xfKxWxm_0O;D0+!NqZYZR((GPi5eqjM)v>*PkCKU--$C(d_T> zSClEyN@o*|Xc(RM5<;c2kmg+q4I;qQ7-aY-&!)(Nu%t^=Fl74l^xh(SI+k`}Jmn zsmPS3yu~h@#PJz7qmFd!Uj;!H%*K}Dsl*v9ubqOcnda1&qK%&hkP7oQNAPZiC2g7~ zz}8kMs&=)H&Ofu?=`ms1dB?E1(PN(wIXHCUFXX4MZ)d@HX@z9TBwTOGsBiAaG^G1@ zRkwO4#f{oFP|Of!zv4qP_C+?pF^V)-^!j2Mb}qcA3!YI)t9pnGol#Ev9{;s#6W=7J zR~{ZL1=*;D)Q+Bg%oN3^e08yR$W?n(Ukv&-N8v*aK}o+#9$Bxz2YBCkcnyQ*xQnjx zKo|X`EE72meIgR=n@vt^DSBk8_kw@&LY%!ExZ!yA?b<@tkDouGNG`KK$KUVgEiGU& zQ7x9^ERMX*Rgm{-zrrvF-_gauAtLhI>j(BKMnOq6nGXKnD5m5K;xeO1}%!}raHxyc>M(NI~S+V<^wcl_{UgZhV#lw4nCnF`P z?mo1Xi0^)@cj$%8J#tx=u=5Yk(me<>e5HKu%nfCli($0Byz{6_PwZ9iIYnomJ$v^h zje@f5gD?i^u(5kxIUk@sCnrymrK1gY#27JPOLw9}dv}uUB8tXNbH^Mg1IK#F!;}O?cW|;pc4j_OSB{qa~kwbP*2> zk2FoaV7a_jVb=O?wmp8XBXPK{?yhxTu5=Lba(6~&2)RJS)R}oD0&d@xYAJ->=pBk9 zkJ9bEV1B%x7(7zlrH79mAwPcPFs$C)U7vEE>u^~f(iqauQl+P$gunyi-Xf8 zxjkW6WF8V?d%RpWo=QYQLP8!|a9MaC45C|Rw>&o&BjjvuZ@&trZ9iJ6!EX6O5tXmw zfccS^_K5$0D9*KEw zFCFe}$dxn%k;JegNO>6CRz`F_Kg&{0YYD#xvB~Oo|7K!pYW7W@Au=Pw;%I*hoLo*> z|9rH3&kAD|O1%Vr;v;i%v@i$eul)?-tO?7gbY0HwSD4Da2D1$rd&~#n6BRvd3?VnU-BSPN&6{`u zdtfWtgZ@OhJaso%bv~yCzsI04Xyi-^RLZ`fn>99bv_P-S4x9TuHg#utIHQNnm4h%t zK9+W#N(_eK`vP;=j7u$px6r(a6%rI|i{)wBE{n%%9&Pm-HR#WzRKp`H+-wIvNm9w$ z&LyR&TNkR3&y7~j!18X-5tC~sMTf{fq{^nXJIvkMC0|;DMVcM;@%!w|yU+Fc1=za=j$jJnJX|CnKWEMy{7kgHvNKKY46}*(WSeb#m|r& zuE0H>X&#E86k(V@*jbI2@WM_9zBij~i@k~4n&A@b7_73q7L~Aw$l|h#RkOtLZhiH$ zkFK?R)_vcL>}&SYQ=3A_GkOlu5bEYf%~6|W<383^$VEQGq6}88e5lW38=KrcwaYc5 zrS{|bNq#W_*kgs+F%1;V#iQJ`iD1Kuoj-2CL+Mk=+t%#Pmbp;TT-CI zQ&SUgPY+ol*eGv)%ZGmAJ2vV?kdJ0@l)UySl8KiQ14u|{Q~<3WOxH%ATcs-bNk;QC zClDXluU^0I;_U3~=0;K%5fI_(=oo&sRHV`{lGk%#dYTR%!=_`huI_6u5LA{O6Qjkw zlYd^~j7@@DyWK*GW@F#Jj!Ss_dQl%a-@wEcL+d0e_Ggt+!M*0#TYBQpaIulXVCRx8 zhpoxo%VX4!h1d+LT-h(%);LV~R`ctX+VH>ABFM~sz%;krHhj2PP+JO|(0;6X;f$&n zxy6X09tWSA)h-BKHeMC$#5tOIES<-55+@$JYbg|^Ax(ogBN(nxnW7>U<;~<#xo5tF zfzjWrI$$=sDquSl1!4(9XV^Co+~|wIc3tLg+@=W15I@>f6A`I0X?Pn8_x?PyoL-|5 z4X4q@JOu$HNC_`Z7u7)~!Rr7G>eJOO$L2!o-dW@cSbeG=7Ua8JbFe84qQ(SF6TpH4 z6qylK!m&_0&)!%0rr?RLSp4o(s00HB(^A{fe&S~@GRcY^q7ZT#aKF|X!nrZuur19Wg5?K8#l|4n{n?lf+H}N z$3j-)Ghwo*M;CDKvCnVmRt;KA?50XMWL%--{gq6e3wbVu?tQSs>#pv446W{75k-CU zd(_-`g2E3PU7#~@Dng1U`-=sA{lP4Y%#8By*P5u|hld-TX>u`&KhR~i!wlJX-?5FI zfY1Zx5V-SQQmcsm^XU8c@4<5An(sjnOpJUG1cl`km;4PjkKXmECTdZ48n|_DHoZ_p z!L3Z4qFL-rYk@)4HUPd7>;4us)i3W{hn#k!NaEzvU%bF0eI!)bs^z~k=E>Q7?x?3;oTXWe2iGom~>E6dBH*mRt>mj-89 z833~B)%tiVDJcmEl*vZ3up3nNyc>TwRAiy#?p_`f6N5w|Dei$FUSrlu-2TpH;8TjE z-$m708$sEAgaC% z80=CcCrqYQRfyxg(G)?JyGr*yGyFzK*bP@#*X^C1WO46GkL%!QwzjuRGrK>qOc}hH7_Fh=D-h>xAF<*{qd?5S$q&A-$UY&A#sNXAmu`NCN?Y>W+?%rPZTS z*lBN;RZT95po3mI+f~&eUrX-*l)a-M{2|(f4QG6}+`-%A;oL9m3G}MJ=n>g9xkaeQ6ct zLyixclY^Ze|M;9$mQEBGeumO}a9!^Fde;T8y!jS1d>H<)WW(NfYyh3x8oIhQG*;ft zQrjQhW*@{?OAZfw*@$+8QY6xV*}a&XzzFP6gtTiai1+iq;zfOC-e0YSbBAuU&gEaX zqv_0F%L+=L!Uzf@6#o(w#(R)0vOhKMV>Gd``E0M_-RKFdy8q#iX!pZ|4h23vx!B%1 zg?xh=ClCk%2&uVoUqc{oo(IzkV?i9N6*OZ5OS52dVv<^X-U<^L7Tp3{$xhhm<{%PI zlZ8J%j71#J`9WG(V)P{Pc>jXBpo60HBdR*yvFmNGj#E=p!;kh|cqt)GU>&}=%~kC} zbNUo1h z4*xk~HtkPddsue8(gabD2(|e?W^aP5q+=#@6BLF~I&85qpRsyzUeH~RccMqhuVIg&9J=KxLU%V#0@cu%+>wQk2& zHl@l_8uPT6;W#%n#g9#GMsH4c_P2eo2v<7nRV8vEZbui*_XQc+9ThuyM-yXhinE;B zUY)%8P}Fzfk7f6xZz~wZl`M4g%pmT@TbGOtejH9;V0YtP{#>KFk>ScQVR&yAY01_3 z=Iz_bIF&ypV*nxPIPqE~F%cFPD?rTOevmU({P>4+u?X+i2M`t*)pE?D3cH{0z1KVL zz(cQBIM4YUgD)maz?X+g09#QxZ*aw#=ui7cgMGv;u%jj%%f*;pW7>oO0SjaC1h9+S zCUcBRN!9M_A8$SS@rPmeQW{Kr2_waFm3e+)oSX#X2711xb6pwn0LHmQ@y98(p$qKB z+}ytMF7+h{3Ju3ne?4fmNFX|>RQL*l z-q~NV>8G^tz7mRd=A4}eVrYG%M#n0fc0SKfybjG1hg1qm7WD4@d5yf=C-xQcWD!%P z)ZkN+MAP+@ZP@t=1pmdg2@Ioz0ZXzSaMpqohX+(ihM#8V2cZFB<(G@xL`?yI0IB1% z@@^C|C?oqsuK5L*_ZAI>feVx?CAgaBFH0d4lH4Zr8JZ)JegE5f-#TWxm-RPc9!wlQ zApVOz%8)2=jbT}oh_40NvLm!vs+;ExlKR9Q+uM#NWkEm`8_|4}V&_(_Ne#hsCNX$n z$CO92Nyx&%hcn3K@9#M^Bx_xlhxRt+wE9=??#YQ-+XWi1BRFE8!!9~Ph5kA|+EF59 z$+vm?-@|lJ9@!nH-4=J5akh0kD3nF8U@STk2G{FLXJ*E|VP6O`Po~n@BeI?g`yr4S z{pU`;J1!p-tV%d6eo65RKnM$49agLZqgG6wmW4suGr7^(%o~*qQS^>4}=}TxhwRF zG&2sBHN*wwo{Yn_Dl&aJC_Eces9hUo-4$egv}$8S=`U!n`<^ZYa#Xc>@Y!fJGb2tM ztVRU_nE;pzOb zYY>qJ0`rPNlZ^P)ZpY0{*v}K_7ySC!Yo6KjXcTVC4!=13y$17f z6>4u`HD2$YgbR`Hym)w4Ai#sV{E*aCTyyh$0)r0$Etxzy-Vs)DmekVM_gMJxwHHlr z7n{92WuBvy0yarlWUc91vgk|@^P|~o^E|am;~L%pE0j3dNovR|M=^eDZ$QZ+B;mNB z{6b-Ozb&x6pJ{HJM#MfjBUg?l1%}zq8wds3abO6OQ*I4xph;R3x(TJc{r==HErR>K z?FUhMDalEDCG5WW)w1lRd5gOlZl-OjTWkAy&vA@p3IhvgR%5W?ww#JI3{YIw;ud*> zI}qrmNYi9YMSql1A)pS020z$&y{E{<(q0#A^oZ10Q^zH7QF^yT)pT^kN*tFbH=NOy zOk&ag0skJhy|c2+D@Z5d{>gf2z^kefOVyu` z@C)tu!M3zUfnmN}OxbmU=GB^m$KB~Ap%gj{GX;hrL5QTk$aIN;^haU9kgu8+VLQYt z=q0#4QBQ3@I`rAR9lp?)myk{&CcUlEe2t)^%wvOMqfH^+T+ZA|5O(GDI>iKFTb5tZ zWi^8k@mhY(`43~3=!ZXSD~y9a&XW)%Ym@Hwbq|e^<|e`B)PYxTCj01uT!^{BXL~iY zPs9l%ePuvRxESz^gv5gDA}TLed%e&-y2yF<>;-HvS?|pHl$e!qH zn){h99QuN#(D|p$ozbmDX05RP0{c=bVHEd8QSEJuEwFoNKXGvKiYm(9ZDFb7NR22_v9D zK|*IIStE9*BhR2hoRk=2BnO0_N^k%Lv zaMtEVZ^qke-VRYVDtTJ>{^IqAws0Xtl01adQ1to1wnwugbRF9qc`#DFcjSj(<+{Aj zLQ5;ZK`#^k z!mdq(Mi7^@#P{ntu_3Ckxp(BX2XYdH`S{n83Tl%g?ge;8G88Bz4jJ)IHbbRrABA_ zP|d*zSOX_j0nDn%yZ{1G`oRi6*e0Azm#s8uAo`57>Z`Cf>Zz?7j}9?3wCj76q1E_Q z-X<9-8!g*1{6l{*8ZI(0AllA+KfjwcyseA{hZckA+Yz7AN4(gp&LWa-P#7S}{qW1w zsNa5OGorF|q%@puQ+)Z-=Nx`%g{ND}>i(1nfmkF8(?z z?3x1~T(c{})N_M3<4d{Xu%~gW-fh4fpo^Hf6FH*Ip~Iy6GWq8Xg(3DoXnIT}vnz-A zVt2B%i=XSGKfcV?eor1RsR2%iN~AQNt}%FisKPn&TMY$734vs^%?sMw|0@s}7=t`! zu-uWItAbf=->Qbr@@AaxL3SMJO`a4GmCTgXltOZpK#WqsV3X}c&on#I};?s#E5QBZi(xi*%>cmo(Cn0k&)48 zdTr)K%xPfI$W~W98u$Ht*akCryOqvv-t;k*-DRljDlmQSd}Ms&vqDW?G3jjI;#CX? zY_@}+lcddo6Gg`>G4^>reAN0FqEznXQICXFDJ5v5t349?@)lW@YzOXlcX!*ajD&+Q zbUYQhjQwPYVu5J@yr8)rS!<>E(GcwUAbNFt{5FwlwL#TdeSoPM5iX3oE@$-Z7)HfL zCr(c`Y;lt~Iu;pvq`lZzPZW;s%ZumOGbu~<=Filx(26<)Vb|+m*OJZp+KSTs9-r?Yv+(qK`|w(j?Z-gmXebo=xs*&|O?T%-N3Hf1XXRJa6&n&j>UvN51PFXMW2}>W-TLOJ=`^BSB^g(z{yde1z@;ZptKMdwY8e3&t7xyqaw^5_0Vq{W{H} z>Ck9^tF8yzwi^lKJ_qbzsSEXBV2{affuNMRy|(ryI9Q!qfg!TuGb7W{zZoW8sW7a< zX{N8h$UbQUD1o;@246W1>X4%&BT>+qmS~H%_b-ta&ma<1LOKMmNLf-_1uN5$7GDxy zx-*Yu1DUJ(8-nWQO|?%?>s%K5j<&DKoWFRnBT+=lqf{|b*ksLZb?gXiSGrO%$HRhY z`;l^VcegW;P6X^m!YpQtQLrtC&1*9lQho=hHH7g^@t$S_psMt)P-CDM+`W63sxwo& za1toay&0fVdXtRIiaXY6`umL=HypQ@bhffU)Tjabty5&~0?Hs#`uY@zBuNVIAgXC`M zc#yh$3PYS_$a8cp=yv4E{xJ&axpVM&{F6}qH+`NbPb|_}yBz0_*Ur`R6eHssQt8Ko z4t26XiM8oPsOv+=itVq)>)M_2zGMaX{6 z7)KjwafwENaRZwEF(oB^k}$}Bju#JtO2x}|KiN*rRR{%zb;!oxX0|@pIE9h*s&T1( z|D%ajKwcLc^5#Z(@txt10)Ant|4gsKVfFH_fj77UkgRuK3xkYJrFR1Vbx;u)^jlWj z#z=zn-y1jG8tDk7|MqfrS(J^N7bHK0w6vCY5BUEak<@#Q1dQka{mz6q{z$99cM-y< zdjHOF50#;K+nIl~0^p+fIAQrz8KFo7ztNZq3-eJKmSsh=g`iC=-@*N3fJNO!+S8~1QnlkfUcn4 zAa9O%-uUk+;Izu$Gy31AhX+x*qR;ad=?sQc#ElE&-ckw!H@R5uG}Q?bZ3`^xc%dnz z&i8^^iEXFxdDUigS{?2grvfVwXO29y4AW$z3u=$|8x$q6kc+2F$B8Ll(2=S#Lr|!e zRzDDw)9aYOoW>N~!hwq|U^*?Lubu`;T*P?r+7*hjeykxmQsrj9GE$MDm}vC@9r6CY z@k3C15fu}|(OeAr9{mY;5}6nbTI01RDk?fuZ0WqSVnBZaV&$U%f~}n$H#av>t@^C$ z_x5EluT8$h{gKGCe-8~@Bd3k%=kJs4Jv=fK$ z!U30<2dYJ9f{-)%qcj|_bnpov7yo$1?f>c(g@|ig`BC4wk3B-BjlrO_lokOVszX?1 zegW;K3!a*cK94Mryk8)#DN?RecyBHA0!QY>o@_PH^XI=R?+IO3#{ z-2O?Z)EfXI@cmi}HS+84?s>0p9Wu`gesuo-FBRLd^j0%QQtiD&>&GxEQJ&3-0@LIn5{(8t= zdqE3l4$ZQGAIYy-2>n>_>6_3MJG6)n6ba6e9X6!|_cM}x;y|@?w90LfG`oV0O2}zC z`Q}CdA^ydS$IdnUg1E*P+5MJi)-z|$$OwvLvSd?dXJ`4Di+i$@y;?>F zp!i3pm?RntgjLW-KtG7)G^k8XO(XJf-@}@1r33{9IUa0Xlqw?H__dq8It$B^8P3-Lprsd9*czvYs26~;bu`v}Dm8?Gj0RhfP zwTM_Xb3vhN4WQ;AQ1&gm(=8E?d|4fcY^wK_3yuQG{z5fo5^lX7_39dCw~1k8(wg4- z&K=Ss6&$L(S!}OA?%R9C+_4ked6jaBZ$@wR?!WVbfl3gtaz9^R2E~L-^S83u7IBLY%QPK)JZft7O(=z<@RC2Mq5flS23s?4t!jBI@74facc%=(#9k3nN zK+wojB^5{njH(mVr`ZK;WYUp~B}UEcz#yL5Y$i_FX@}RSy$x0Vj)0Xf$>WB+X@(XCvk?u;qS=Fi_nb`;=pLi{0)> z+S+5lMwdrJ$-udyRPTpkls87{kz60=o)<-;^DL_!qH8e7m_u)b6sh2SBcqnr<)_^ ze*Q6QJwaH!hpp4a+2Cs8tcCdluGP6KEi*r-e@@KZ;1Z{eMTjr2mB(%zD56CDJvk?T z6+IA1cbn)*C7N|=eU36SGlSF3Xpz#3w`}<`G_v>e1!3y>Rz|MYN6FNX$=`)I9AmpI zvTp=G0+A;dR4TH{jM`|}I@FJsL&vw-Cv(PpxV9!Y&MXrTgJSi(md04Zp7d%lT=#cr z4i5qkM{sn`w#L{4+b4A>2ext0xaBS0cVM=&GyS}hNomW!pa+4?jcZiO`CXf-c8By= zncwOvdnabGuEA?S%i=k6A#Fj*LqkI;2Zvw)6mbHtU}fWtIGQB5)SkN0lJZ>^Ev6mA zZ@74ZBjbpC$5-asLZL}RTvLlLgAyOAOByAVs77h^3Q?CPyB^;AMYB)Ao`N!|44LCw zBk25nvh8+lHu~w~CH%wJH-5Pipg)4b)psBylA*4df7u!tiaBfamV0DVE?N_Av*?SCvb zoI)u0B(D5hyoA9$pmyLVD6RopJPnl4kbAuK;7x5tT%mXc_2d*mPU}=SZS*uQfi^&} z0Njhy{XqQl5^UX?>My#Y&lBeb>5O9TZmr^Z>WO;}D2)k$l1<_hF^JN^%4v){B_=T1 zhv&kK7Ff#!FFb9ae&bdreKj{1!d-JwYH-ok+ZLlT$vbjbhLF z+K7-c^N-0QV%bT*W$J0Bu=+~u&TLIg?L!|vMxzQZ+u0Ho+kD?Gi3Lt|al9`Tf8uS+sujFg}{GV9HuM_!# zt}wp^QK|wBEy82A8f9QDiWR~QU(xkVuSl-2^FmL4I9fDCdsgXharDv`$yTu_FbITy zdsBZep-4z+*TpxLT**p5&HMIZO9OW1>VwWz7>izb@pPu9!AGe;g?RqaTKEYlMvPXv zSdZ7BA|ycY01a-S4`aVCPY>iLs<#305fv9Vr1PBq9v;oARq6_DKMRu1EYJ!AGNrUq zT&;b+8y_+kr*c}=O2j$3<<3}8HX^{~YEEy}JLz;c$1DZE z4PBzGfd0;R_wtSIG2`}aY^hc(+2tMX3Q5f{l%3# z7=!Pgph{$Ap8r9=p5R)MkA`yehd!gyZO$o9%e`Sc$tqust7PiOv!4W(DVG-s3kqgg zk+<&L5&VUH$9r?hpmS4o14Q3U4=OQBgkibE#P04c3C>ggaj>LYkQjIC5xtrC7%iR| zE@mq=VE_w+<2|1 zAltxU4F~Py*aZ}2BBi&{x@~PHDq=a4|MLfwcv4MXc18z=dVKVW{PO8rO-(kI(&^mTVT^kVw)Oey%D1^wlf6>AR+& zy9eYrrz!v}?#+O-BiHqdQdNCFi%)5k@UQZ`_`@Q$Cnl4^p4~tF`9p593nx zMfbmUW3|R^wHVD@kErR!x>=SznJI9Jy*K|XJ?52bI5gP6KW$a%TPi4=fwUyw`Wh=D ziGLOK-2>WztPP$+BmKzODMdxL1CYV&H~<091^<93`(X=nRp7Xa%3FnE2bV_ulS^$D zJ5GA-F#FVB0_(!1eW$R|{pASVu9$#}tsulRU&QT;<+dB(2^5Lrr%VBK34@_u{_3px zI})F2Ccih}KNO?&!+XH7NmJY+o@haK$D~Udm{wARYhdXZJ2!qY5Z0~t<6m3b8OI@wMTW@`? zs1OkrxnHB zA}*Q^v0A+e59wRC51y4SAW!z};xiri1$Nj54Opu+-Bvp4Wsf87)$-4Gi3Hdu<>u#4 zwp-b*hTci$DNBwlD(cx-Ur^|C&C#LR#`w>Q;ZL_j3dHB=X-joy9%bak>u$A|>2B^n zsH(+>dmuGDcPJ{PCh6}IUgwW#zE(2ec@E;fw>vaANZM~cfq{djWzc@OG1EFXF@Xk6 z>s4;6*?r`sr1OA^-6p%V4m6#W$w?FaUVs-;Ge<{9qoboBd&qqTq_OxHEt^%m zc|SOWhi}Aih&#B}B~7`ETdL*AT%|XHRy^O6P0kzVg6|Xx=(#Cdvl``e1ea4we~x!q z&B(5;7_}mHU-g@4VAYH*bd2fI%#%d!+Y0LnXasxgzfO!ph1p-FXVFMJIuZ{wxy>Rl zKh)H?eW;f^yXaiHqV$RXDg!`+p9~wR+Bl#CG;xUvRjHshWWgUI^{dBVnPcdw%0o|k}e1o zEC<=*M?rgD^291ISXcG3g=IYW0k1idlz|r>%_USE<(94d{CVfdx!RnwdgG!OwtYud zSC`(ls8DR7J~rNUST)`siI0y@E#adtV^rZpPeb#dWX#BCPx?UF>UBXu!EUjmhNPtA z!n{K?NYOxpt&~h_(aV=Ft!834jWoM*$}pdL@WqCo_tpwNUdREB&^ zZA%wHFNCqtM^7|WmS0Oinsu3yBFc-=!ou3L)YS;}s^L+gg|kOVQJGSY2Xv$H$wR84 zfh71z>{HiX8{d*FRaPRKF&j@$&tBNc@qxVcpyfdDnyFa?+-4QF7XW1)fJkJ6S8By4>w$>Pnr*@Lpj8p ze2!O4t!nHa__S|eWSlog_Rd*+J9<9SlWkwKrI;)kC>EMuA!IgMVOV4<5ejh3cuT50 z{g_O`Wo1N&nmTzLelq$Y+xNm%h6T~e*-~ECLZH&{|Eot?E+S@Egq*zB=3~(K@Yot- zizmwhJ%_|7d}4MZ8E*DIBx5^zn1D*-qiU~GpG}|;l*36we+G8Fcy%%2UO#Zdwqu|1 z?t@A;9pqSlno35}!WHb~T@5m~6|-s~EhWyucVjek+VLzUzf!zDATv_~@n`L49dJ{I z7PLQTw*_KN_lN94(dm!xYqhLydA|h)@zxBUDe0V9v zQN7Mpr#V8X1GHr#K4UbD%j!6yevZs#X<}w7zP(qWS|}Dl<+Xckz!C-6ztK*JoO-1fuA zOx@`UR|yC}0*$+z1Ki@~A_n*xt6F-nq(&&U1UTPMfJp@eK%PBIjoABH=LaMXPl-oF z3&O-=kp5h$jT(-)|8k<#jB(O-!Ls&=dJ4R*A|y`a#I?*Nx)=V-v3{otILyLPYk)w1 zQv}CfFOU4?p6TLhS3lEmX>7wOuVv)^da5aYX`RMlIgl8K$iYYcQ{V(|{1Q0-loLTq z7B0yDPjX^ns1U^|2bFs4VDiUkj+@^?mz6AH6m_d@c9?M>ibsL_EUr5)kYwpwVWD0O zFFA4I)E1xw34g=y=}MY1j1q31Kwam*S8z$kOB_)ut3~(R1Z%|mN$>pcYLZU)kS7U( zon&NF<1|ARZ|PJuKpP$2i{of6TEuEB;>07xN%tsv7PPyZGL_$XT@pWq0qU;l$jdZ8i;ON@9{Nv= zKnOAnf$lY&eEl1OPk)@j>q&N{{||{pTMALH-PD z!R|y#2)#Y?>=G`Ap%C*n;$caozvTNw$ zMOkXH%dMphb?5u26L9?p77Be;3SA(3O^y{sh24n&R zc4LRPK@LnbgL!OhWZXJA1IC8oOr&DxBUvcb^?~xjy%2I#PlDsr!-mX+ge5{ss{&@d zAQFLG^=75^EakgyP-r=eF(lOW%^Rkrc}pi}X97Y_=pI&LE|^*rb21SSeRyOrI-1C+ zn48kKJ!nyK?ySB710-wi%ga(?*$k}*abix-H1>!mCXB5{D|eTNy>sIiCMTbdaV{1O zHQJVPnBR?Cs0TX|%`(Hz9_88*r_nT^%&Cu=otZHi5kYbFjD)d2GWJy4vWg@VKPq;u z@v@8M5vj(?B0%T-ToC8Ng$o``>=It(DBUGwX!cx7v^3dxg)`}%>oc(kjqF`^{ecIg zld(LB-rhx)9k(P0p09!G=}@65ejG?&&LNU~C{P+XCX~WaaUwekGZQ~jqHjx%R}@8$ zT~9sss_mq)CSiNirnj86)vbS2hb9-^2D@}g5ls@`U1)1&tdOukZy*^Zu zBnFF8v$h`i^mD#D@!JK^UHlPnK4NR)_q@z(Y-~S!vJ_Q^MOJSu_bzHIruSv3eUOR# z$*wmR-gqauuh+ULDN0ZDRzq;$!|ze{ z@N}O!lOLydvQ{rkFFVa0OR+KMw%VOh4g1{pbl%@1-WIx9?O^~UV3%#1x$|1E1d;v{ z%WhhtMpU(AVR9dAKCUC4}q@6%~V-GnVo}Gl>f5?>OIKp;y3yW~Eu? zHIf81bc^aFh*u%7nyrNkR+>5e=HRBn&dLrMKO1IdhUnIGA^ z?*q+CmmJh;rfx`a_gA)ovc1mwXHca9mrR^`&Vg#_DUBHMR0*kcIk&T7dy}O9Yrnv7 z**L$#F=czhAqZwt)rv_=cW$U?w^NJl&%>b2#oD1sypYh~Tp**1qw73(_Ls-Ux0lO; zv}3X~+3nTU>#vejj@FK!0Dl6Go2{1d0;}GUpwm|H1~`#)p=|WwYr+Dj16S&-Jf9<) z67BAE>2HuUdfFQ9;ESV$DqdSd$cnpuDWCFn3bJO)Hqp85u8;x)k=oBxM6Zp^$q$GX2~wzGC$A&KIhJOsY^{Drz<`KOqo zBERC3#R77j7@gW09e4M;g9**k?>Uq8<~q#s4VSYFN#|Eq5}wAhzoiuXQTot-q(wnR z{2;e94mA9o^Qf4PVVOZ0KiZfEw>Ld`1)kl%)yKEU*Xv~vW^M~^bUGQ4kZvO=<#Oh} zvp>dWX-Nm*U+@rh|3TeA7qN=r;^LZZH2|fxW)(sl_TdzdH&WyHN9_WBKr8Efa|6Ks zFCjr#5@`8g(qDo}3rN`G)KEvXxa&QF`)cJtTm_i#>#l;xKYX>EZ;(gIaVl$^fy^Ca z&P(EMGX3hOVJ`EEr5R|)T?04;T6j42mHI?GbNOXg##65I1>GHpgh=!C90(=-f>McE z$W4l+Xx{6x8XlF7$o>~+?n+8had050sM0T9zml)l&$b0suIu24EOYBiR|xvnNNuFsvL0xvU zv3WI}Dv7fNc$F)CD5dAg7|Mq>4x5M6Eve43eF-pA&@JD3b!>)JYui%>r_5`7Gvf>WCj%MD$M0)@c{3i9z30@h{o|2-d8?*Wo|0*Aiea zyU!if-o%C=a?R|`f)|>ZVW8zrzxM+YA|fSqYkQNoqCzMjfTYr^A|T@Ar{oEtMDFzJ z>cYy3sH4gClG0A`9Lm3bT5$`>is2I!ml&pf(D_g7Ex(lJqGq5^(SBftzB-fPnwpp> z5_2O^AYZd+{bB^maC9E)e3jdQ%c@~V6Jt)9o2E>r#3C{_x%pb!K&4B&Y&2HWN6@k_ zS2)g$j?2U8eX^0ONR|9WwGaK)ro-a_PgZeEj2yriBd_&F``WG2e3m-Y_v-4NwFZ){XKiVUSsqOY&F37aslrbTp!bv% zOH!;23e*zEAMD4kxVpwBBvSI*S{YZ{JZ%i_U+gDT?Tc(n(1F;O)VIf@4i7)G&VPA8 zxOG0m!xr=h>~|!JFH~4KS9Sf3&rzHHS4YpD)#oeYpwj;+#Sz)e1^3wz_io9_=uI2) z!yjgzPlnrReRxsjSP~b~uOFKKbHX^nDmvp?_d;nZk&7N|kN94!K*1tVwL6lQZJu^v z!CrU7UFS(Yi!vk^**`isni0|SaBbnf)}p-2Rzp_K7P)w z0lSczIIC#$3!S5if1z{sR%S@}tK#-@-S);sm+7vw6~BubQIem0^7cwMvFw%Ao;#k` z$%!|yPM5D>wNaIUW1rp|HRK3ffT0xEL@V2zeEH&Qr3&89zUCEqPBmPLIygR1>O=eA2B;w%_V?#+sc$rV;qAn!FIR74<<}lQ4l4ISX>BjX_QHC4xA6&& zJs;~7fK`w~r~C5876wPYB|)_MqV!%d0fi!@myeOyjIA{Pbm9(Tu-13dA=ahOLk+q7 zuIoURoyGPi8?Mjjy{oF)0O^Jv02-q0kAQVITaS(;q)}xlp!i>aJd8#Gm*M@Nz*@Qp zfb}<@KfyY}?mxkrtK<<8<^%5UK$qgi8#$ECX0EFEsmiQ z_Ph-?txi|np4GGTLGt@6#kgCV)$Sd1f|b>>hOB%)OMYWfh9zXYtU#gAMCj2&Z02G{ z9Gj!%sXH52HXTE zH6Pa7a$3OGo{W&5~He+M*%onGrbh2*(5yT-TFKF>*SO^3^>C1giu z%y`F_T{|5PS6#UuDk;?^sh+=72A$W-;!T;2&S)3I#|b?+ZNXtP%55_<^L3mA)4jLK zsZR(tj7p-Q4xhBTdhTlYPc?=Zp1b(!`~{ElazU8;doo_8ayx^fo}1iT`Tk>LI`RDM zMAYN-TXbw1P0MQ&+x94p@7>)|LPd{2=8Hy8$mBHj_6}}SUslEc(b}43i27pMG^SM7 z(w&aXsKd<>k8|Ku5(6updJ~+>%<|V?Khv`=P1?4`oOnuL2L>)R%lLg&sd7DbU6y^j zJX+Pi3L?U9aA$w^pWqJ4A+P0XOyv`_r`lpWXzPZ{YRPY?>>rkCKS&Q)E^`!l}%% zIc*sk$=0yvnU$R~PK502`*@#HeXi^Jet+NJ?{VMvyNHJdV7!8>v$c<^Z7iU z$E>;7r>3T;`X?VxB8S&5yTHl1j&{1&)37rFnp+2010OprYVZuK6+CN;$>LJ0(<$Qe zIB%{_+sGaG;-XLkRCZANc_3@d*!p}!|iV2uI8uCrY#L{aeQD4xoX7g zejASjA^e@DCky*D6(29W?RtCY1X?B6AT3>wNGc zR*acCC(Z5RNUpZtD22+cNRRPdbf4cl&;K(R5ptY9 zH0MlK|MBCzOn#k8&K-LlWuGFBQWxLn4H2F8<89g|&G+~PEw~QBdwX$Pq9Ui2BclRETc(0xOkKL4d+-cx-3iSil0b9j9Cx7$j}cDx6QQnkb+Py#Q= z=91@~SWj|W{eJXhHdLq1$uDDflp}iv2KeMCn2x-%D!UW-#r8Agrao6WG_M6}_uq|4sxvr@qs!(0;Ogc`j-*x6a++rq`OY8l$(=^9+uGf)1oh_d|-DBw1fGvzH3Gt+~!Qzrib&u=F6W^&2dsCyi1yFb5j$rY0tHd+p)5`AtuRqpC4JVZ;>Kvn7I%Yj3`y6?ZR- zun(9|`f&5;f)gv3Mgf}vAA%I078L;@2%^S{r?#KiGu79_$eHO0{dZR77U63m;l76| ztE78Nd!}Iz9693IzJ|lbaE^su^Ibp)teTo|*loug3LUZrHr6c$+=`#rzcg!~m6qZw zv5xD3R8cW%ybV%+VSADkW3$ettXCXvllg1o;YOuqd?&Ly6^5J6k+Tk|pCY4HRni;K z#Y$Ivc^Be02Mr20-SRDw(tcl}=+|&=RJbv_w2T$Wypx6ghu22a_4Q1OWI^gO2Sx7X zDNj$tcYcu+DpHNlM1@u1<ZuL|Hc|846vC1HKzAOzhQK zx@slN&Zv}?oWqsc6S5Q084zH@Gc>LPW&D*-hY#$p#b(ARBw@4uuK7tH{iEjh*$G;V z|9Sp^y+X(zidHWF$sf*T-uo(vvUk1QnQ2^F8q?1td30ogf_IKdo&;>1{_Fff>R`MN zx23ZCNCVk$-E!Iz&}!E4GjS zk*1%QgaBVXonVW4SpbW`v*qu(-5q_M5oEqHZ4;?ZOf+5dkN5K-$Nn$qK)&Gl1u3SC z1&^$yCy&-YY&K>>e^me9WgKm<)oegsDP{0FHc`H-3g*LCsbopGJ zgcevxMu0z?A3b;Qt%mTvxtO!EGyD0nBCpz@3Q7|=j)8~{fi9e$3OT8&UMV$jV^Kcm zy_}^jD=he#?T+`Ls;J|@Tby~tgJ-d)4*|A$=#YxX9^%dn^H8Tntf5I*ZI6gKth=w@ ziW8!*CXTm`vp`Q0F}U%#EnyZr%?K;wrD$DY~Y&HjLk{QQ?~KgD7@Go#Ak zg?ttG8|=3-i-~gqP^QpM&v}W{pfG2k9*^hNouxJU`jOhC2~DxoUAC^bAmFELZ`=O9 zj_Z==;~|;nb#lQqd>|w6r7UK$^S-}eKC~*J+z2@ekxOGsiZWH2;w3m-<=<%4 zi%%T!J4wk}tqOqF9GwKOcrT}ZZ?QWQjnUB-}s5gk=?Y9j%E}P82$0%av0ar z^;uvDxZVsBKniw}lz*iMI?6q;|AJ8fKR{iu7tDuvr^803JFkj}IB?P{+}6#w{p}#T zHIGjuv%sj^rNGuhs5Ov7qPABBv4dD&RQ zO<$i75Kc8llyjS@@-<4dPCOZ*(c)FMNL%r}sO-ZlJ-0W&vTr;C6FuUm8Sp^A2A_Xd zOSNhc5U`s!-}4nmejzg|6>2=v5lPt?_0dV;;JxJCs1Jr!iRDOk<3cq#^KFxAl2vVu zg7a={2bVz=qzYQ--l`;RbeCZ@PfBCx68Eo=G5B9F4bgpd7xzG9CLT<)M4jiElRG-HZ@gZ%?rM^MLqe>>sdWH;*H^t$ zk`1M%+6O|j^!p6f58f|zA$@(!{b=FweDW}})}+VLYka2HfAkh+Rw1Cz+~JoZ_j9l# zZ6|p@J05KWEuTi<{pU%tC5zf_=26f*og93BLCxSBl}uY|*MQr^I=~wVjr#hWUtfvb zGo9|K67gHU;&FuEF=XYsodTa+w;;9C(VnR_ch5RI?0bqV)tv_}9G&J5 zCgt;uHF?c*hfBe(fsiH%B56_(HlK#^-=L8-s|hL|{VW6Vt6Hbd+uf!WH7UBE8wQXS z1MLZX9HLf{_fwY{C~QTE8klBUh7*%eE(+Twcm$SN$LU;pkUr8O#Ri+i7|Ocl>HcKJ z`wO??RGohqiZHCtHBXWpw0pO~1$xpap~+~?b0)y2N63Nnojl_jIw7~SMij#!pVe^To+bF%sBr%-4}U&Hu$ z^_%-UoG2CMkp4`8>Wow=?W+yNwU`GqJYNn@J;fQX47Z$2xEe9eW&|!jF$GE;%#?R?6(2h_DJj_cm{Ss?RN+jFh4+tbF|J!NdxH zYt7sC9hOn!XByZM>@su8e7QB~#cmDKt0Ik}RsH=$JSvsIZd#*GPpq$X-Jch@b?w%P0rzt7kDO+FOVr@xeSypH{QEcG`@`+b z^gb=(vWMkO#JU0BIL{A2LJ?MD40clbZYSlmtX{}8XG6W?DCMs~suzX()Se{N%! z;KxsVe|9}3_qQln14oGKD3tR;`H-30!<|#0FRE%!7p)}9)SE~hsaMjPy9k0!y#rT7 z3r&r5%lp}!7aPn($IVtf_HK$|UuhtbNcd)%m*DGBTnOJV#hIcvw#G&a3L?MZmp7UL zVz;lE-fYJ9OyT#8;u!(ekpzE>7f_krQ+H*fVo$f3S3tw&vqE6 zsnwq(Z~p-m!_xGUxP6+6H(BC%8%u0xDngNPuL4DqE|r(bq4uESyAmDCle#zsz++HLfL2)E3WbsKy4b7e3h{pYca29fL#atA+rp>_j{WDIs zG|N}t}AlHh}_j^SADj!VvXa76H^I{~D z`upO3jF-OiEpb&7U42vi$#|)QLxbPD?EMFtoaEbkmk!*!=+S)uSsn!9v&w$nb+!9A zTPV-Da{)ze!Y?spW@_IqdtQu+{Jy>(lPRh;SLwWm>=k}Atnb5zb0jbR=bX(Q8}DQH z-xyVY8c&r(l2|L80=c4M(7rAN|KZ#73;z*04)~8~O9N%{e~bS}9lR;`zm>oF_bgL= zKTokMQ{X*CB_IcE2JOK;FI*P2EWQcxeV#lwZ3W1|(32PR$Fr ziA?aR*)?DIJafQ|$C2w-!4AC>;%~2J#W<|ZtLP}*SgfKvYE?#+XDG2gk3;$^38+!E z`k1{fNSpte-Q@^~jXM5=F#?F@&X#J407mJTrvOYdHX0_Xk;y*U9gOZ!W?=n8Xe(Ps6e3%amMygi{22jXK;E#AAel$REn9yEz3@&Xp&Z))R_gQ< zKhQ)W_C6Ijo~!eE?FG$iV&Hi5a<($~#IAU(5%+PbjB>7f7qR3g81MD95fktnl>43d zf3)pNwk~`P+W0VHd@iB9^|q@*g6fHB@!9XFV~j4mHlGqwTTi5`(wc#98t5zA!I}2! zw{Jq@JCI{4x$ne3+jjAlF9iCdVxB+Sz^tPwo-5xd*{sPGU7ep0GW@cazWTg+s&E?> z&5`-x`mpNC%E^K9&99ZdGz$cLuhsHEp9uO=?}Oy$Wgo4c;Mfz1Q^k{N_QEBscr?^tAN2;xVu0)wU0I1c31{xMOyA zYwIo$Wy_+fBb5Z5Mx%M^IHseW!g$v+ru&^-Hz#h5E7#v#dbqdvsOmuCX7R3H*Q_q* zy|dO9JT?~=n{lnXO)nDi8qo9}ov*8_^YYsB>ypKkgZQY`KHuJ)lAHIdvZ!v2le%kN z&Ed=+5_}vp33u7)zCcf5#4-EL`%T$-`zqhuktY2@3Jc>;NmE~g!@`21Iy*W7W6krC zdxXkPuxMDgXm%Ez|L%v>Sb6pd^XGKpd3?hQ1ty#gj!Uf;rffm{d_I)v0BmC(DZV+#3Mnr`Fh`|V9t>-zpO^(A)A^!pFE6r$VD{!v~|3kcbVDDI? z1%=@{m=Q!z50yX#{QIrHBSQW!;h$asV;{IW5ltzFVG}bAt)28BUX$<(iL%*I)py7+ z7zeI~4S5w$pP95{4{i<2{{*~-}1_{x=C;lwXGkk?8|Qz|DUyg8=KBPcA6(BrG4%aZXTAgD+;{5f z*7I*fvPcB;x#fT86A zh>o3{kq%3q3m0Y}La;}3HhAngnDhGek+qXio?Sn2{-fT7C$8jvxzg@F=t+6XY#91r zlGXaynTMh2nB?gZa&oDMpcYqg?99|V?lzv9oDdFswrKT-fQ&JRn!HEal?(|BWhJG! zo;2mL0WcYYUR<^G_E!wU>GUdYmO<(16Cl=oyP5aBw=hV$%)`S&m6EjE;Qo%?At7n8 zswLN?1SLtWp86d~3$|QkrW&0!YQSSQx=dnjsP8`?gf-e;%-rmHMZuVGehX@!Zy>y5 zz82m@IRn<{k;3+Ad3nJ*^Hu-_ju;ga-tPn=_Oi_l=WqQeIzF@GzA?Oaj%G~tmlmg_ zJlXmB^=tO%0}s??P-2^Fb`K*V?y%a-Lwgmt8zvpp`f-JBcJAN5Uw22@+N|75M(%krVdM2yMe&*3(6pr4aqF4d+S(#5xkRJ|O&gMkqvJ7jjB@$(D^e(pVj>R1(?B2@Vg{LHXy;<3f(+1*}yd zJ>1X@J5+swhF0@6_FJE?HM+$42!_)`8hazaTlsB5!sGYIw}&${M~$S#47xfSzThn= z=2t&8V~@GHxrK4+-JCfjI)B(b2#clnHOZ1Gb6s78@3@=O+{7gLBd*q`tG%y<{ima& zl2*H2U)yUh_& zmYmF#pJzy9zRlX()p%xfAw*7Y|+MV6Ww>bcYLw;r$kgq~|578V~-&Z86*N!erFU+}Z! zZT=dcT;n|jq@=yn>rMl(d|6Ob^dG09(q{39w#WVRR-K%LolmX5_ayy(e;!=8Nup8U5rBrvgI7sL zCZv-V#sU>?8_OoXN72j0@a*jK<5{q-wZC0uY{uSR zT3V{ErFk(LM26Z+3%M%>LyWPkwYnsb85?nV=c{^LZGCEkS^f&B>C}Y-;p0J*_r2+U z{~)3BbIAF$ecI|2#mc$9W$z`3@V8IsQqui!XhCHS%Lh5;3x=tU?BBk9tLYmH2Rk8a zz*RtUBRt}Fj|D|1jAj-C17SpQty%PEwl%bZA8vFzjX+Lw3%)+m%M6wGLgNmCp2dup+K%EBTs4~-$tKv5G-UcPDgcA&FSKLw zI!IXWkUAn~;AL7-9-h{wCQImDr^VxCKV68&F5z)k(%Wx-{_|`fa2z<3bF=iuhdu7@?o3R{DJjgdL95}fp@9)BJVIpoY`%U3PXpUk7}k;AZU*)3 z!`QjpR@N_DEe{2OM}h4O zDJ7?$zkex=?0_LA-Um-sb>*sF+)K3vLUWc&B5#X|U_3?CduacVlaYz5c(7v^8LcoH zR=s&=W@Dt#+;Et|$m16=HGvFrrWKJQ4w+?TVi3}Lm{?d+OG+ZyUBHy&ir0Y~Gg?|& z;5N*~#)gdoPyO@Gi+3$IKYsjJS10>ij+#qkh5Hg;(Pxr&Q}AtZI%H-D7b$^rBS<^d5wo_oHX7}W*$Gw&$-AMKW~t`_SU+(v zGavrm)zT8Ho-Rcz%FO(vSiXsJA}x)TqRf6GG&h%rG^i{%G&INB-afy)T)bct+DSih z-6{DF-~L0^5w+{ngIg2nU(dakB?HSDm&OFh$x{(N6)f$tf>XsyOH0uuqdR3wUgY5Bj$@& zEA@t*1uJ-K@jsNIiRu%Cs5yG`XF)q;eTq@Sm0uxGdsiO7N!{$dA}k=_@TFqFSJHiM zb{1OMs%RC`l)$&9w^!G|pqZA_aYP4;d(w}h)H_H$p46+L_Q=0KV2BT-TklC$*m(*3>rP ze`g-6{)>Umb<83nI-s%B)@Ei2Jb^-0x3sjZt+{y6l)ipF6QZF%)=onFc2u7b0*g|R zgqcE67=LLyK5cFK-{I=78s&FDMCu+Swq@Vk%J4`0-s2=pY|+;#@?jN{kcz9zMpVg;R02aUE5Y+6kJFiq47 z!?|;hH_gM3Pk|u12mLYODB@oYK0$I=oS=wEx@sEis9k%>UQE^R`A}7rq>-f$nv-Ji z;g!Ix^56aZWqNvgXh?D_I5>D8uawzdWmTaz2yeIUoa@9Z4J zt^A;Pm5qA9e{auZ#lu~owor97dkhK9bu=Bxt?iKflJi*_UknWY02#Yv9xj3PY&YuB z+@QcCLHc+;bguHG7^fJ5rOsh; zS^Y9=E32yX%{+Hbm&{?1mqTffHhY?umIivo zpkfonqtaTsca1-1-vFzDX91L-;{pPjA>F&WmyMN`Q9k@!(V+}RMn-jY^*V=#su|1A zT%*O^_0d;rj%$`3TOI`8i}A2Jb!op7aT9oZd;7d*c)i2Uk|Sa(lm;#m9nLUB21gdi zIti(u*0=0#pCqo#GZ9gWhG~*=U3{`)eqhm7N%Mf%K!nX6zuoG zMo@m_jc5jRsY>J-t|9Az&e?gjs!AG{aEpX}z-R3|gYC-v9g3v3pR|n<>at`U1N7V& z1+~-iVJk-&)U?yc%J0RG)#p7h{b1{Jf;Thxz*d^_D}91?`3!Zrv-vPkWu;y;;oZHh z)8>qfahb7+N%dEmVEbXkPQJvQkJOae%G#kGtZPe5O!RCRtE4o5{^4_BWEv+(u6GR~ z527k%W@h&3)2Eap=z!r-26C>HS!hs&oCTaac&Qtq+DZub6!+CgBTX=CM2(0Qz2E7( zNK2W7eT$N0KJuZu6A{l1^Uvg&z1xXA~_l`QbC=*4S=M1h@P{^jWG>^ntYqer-5 zleM2Sa$%h08QAwHH)k=Nc63oah4Jx^%w#Q?Tj^)*qybwQW2=K&B3MLN&D)%2McM=| zhVyC{(UqzW2}GRlw$J84yh%v$5ee$&VPVPbW{{SE7rQB^7(Km*aPV2AKCc(_Z9 zzdyqTc%{N_Yd`IpMU#sU`N#M6_KuH_gCkgQNQgW*)ryHl_b`EcP4v>mn;-X0y}Oxu zONx|BO=0nNAf$Y6!b3x=L_g9TI~Eij%}EEzPwjrX3zoftpTt-Xv4n$@8D8v>df@oZ zRP8yH*r}usxw*Nis=@Cet$Vvvbd~(`X;iW#9J#6Im1#x6k_A$fy7?#l*UDF?L=Yo1 z@eR$vC-I!$sJ(5E(~fd;obJwc@=nLURpy4YyW;B>&_>7`DZsqsuQOH$vH?oP_=|A1 z0v@28dUZ$77#;$Bu%OlxEY`@>)YQ$bY)VY^=g`otx~AINvFE5$GBPr$>FM7VZ*YB$ z&47OV(de>g$ai~WYYUE1Oy|x)?ufB4yL;DVI7GuK|MHHFD@Wpyq|wsSQnwD~s3k5Y zus-(mgyi!onofdE#vbAaG0U^l%Ed}6V6Yb8=Qq?Jz^P{FK@)`u=QbuGfADl>s(4!Y zjMTAZm{0Ql!v|w0NtvY0wNY^x6*ve59y66vFR2NHSdGm8{QT4UkGE3Lme-hmnM_5! zS%+&J8686{^L?qgZ|>RI<(8JZ?u`~FWHZ|(%$uj$Yz%yS`Dtn65T)QCbDzxb6gD@f zBq#SNv)uQm&?3NcU(LXtz${;Ta(I~U{_GGur+r5UOe52RnQ+@U0wf-=^dN6TKmL<2 zRoun&za|ylhvXdFm5(7mYPup_WV=xN6(taOmT-mbI|S_UpO=Qb2Y^-l-K`o6`DcVM z^X=8aA5&6WDGnhu`j-pXbnMxxtHx>8r~G>;#WrV~+9%*vksrq7@*^lG#NqFFd#U87 zNgM;iv<9o|T(rv2jSqEU=$~ha#S$(s%0IO>e(0EK>%CFa)?f8zHaBwXP}Lc_p-cpt z9Nr$_z^^CkuD%m-I>3{px=>4e8m_}y<9rc#!a)+Yok^5Cj2RLpYhUyPW zA;McAycEu|d?QNq;eY;JZ}@6e4lTGf6i|J6fGZ3Fa2cXeqnT4-BxS3t-)b)X1z<6;mwz#lRbl0`27nWtDi3G%2@#4(KSP737bt26p=#j_RU3Mby z>y5^gVI2^~S`f-wB&n-nVq!AEyZ`(^x6x7#b1BzX?yT)x+4b#0QpB{#rxx|@QJi{< z`Xs}--4;X*4dAr%d)XgT*3{IzemxsR+I}WX8OvX+H2w1NuBSv7^1D*)Mv}OkC@tEX z7)BR}V|E(e!{^B`DGGarhJ{_(Mv|qI-oE__HUjtW-zTM)HZIxR0fdDU3^$mka*v9h z9gnzUntqQ{PFie}ifF`hca%Z|dNx7D)YzB-y%Qkw3tRAjUJDL!1)0Yy6@6m=zTOtU zKzHLpM|>yv{ajq&3QCrDr9TMwVT{l>yDCZmaA~;{q!5iJ$;im?R3HK{yT5cFs=Ao6 zI2H>y_a{EME611SA3tsOJ*?F9)knG8)aA{ns3awp`@S(7&H3#5MS`e7?#7KXQT$^= zGRZAK%KXOLYg*b9-@FOsGI8_$%~w)c*yNIp#RgBRX}6+-2yX*q#r6vN#e>lFrThnK zWNYWZ=7HtEjkExGgDr#&3Rl=dMF^X8`+^)#VdmU4747As!bLj46TYuQFZ8)k%3jgo zb6s88E83lG;GU-@LP=fJOw^)oYioOYSH-7oe4!iM9606rfYw1z@T2k9YlyPZsOoak zLA>MA%`n?}JRUC~a^$zLPQ*+UOK)vw=YdpS$j;zs!qu8j zh#XO+ms_>2wd#YZZ(!>LAJ_NT5h7YgMp(q#FM#lHq4tbxuxzixGSt<*Mpv~(kmVg9 zfIkq1`k!N(+!@;sn5eo*TUeuwLRw?dKSiKK03*)s?(T8X-sAGDAxT1W+ylgASW?6N zAzvNa#VD`3gC8W1CS{P^(+fCqFk-s5^0^VMyvPGT$vF_vDZ;Lac;LqN`i zF(|`4Jw1nqhpo2)y7dR}C^>+pprxg~W3He; z=6=lEXLr=A?#jwaO-z4Vo8*o(Mqhy5SK3llN=r+TqUyy90G*^L6gy{OS|5Nf`!@WW z-rrrrSIvFU^^4Q`5jS4`<2~u2{8<=~*qUZZ5j+c)Z^V^A1jkpv1WJ|?^G;3@@r5=O zW%+Y>RG;h5h=d$KuqE2#I%4@#Q&L(i)Kcfc(|QKZ=(KjTj=i>Jckd+9e2FKsN{AlS$*Nsv)lWK0LqN9osL2+%2K(79UtpYjMJlvn665N{ za(0VjLZ_na$G1ab0q3^TX+T3NgLbsAj@69DxN|<^(b3Uiz=*;70l%}nGo)hY zIM-ZFLt-ky+eIrJ@Zd)`hek$}PoFvSkgXyN3je5G88=YRk8VzsF%bwS2P=ng`0>k3Vc-GW?n7Ae5UkQh9l$+t* z?z}OuubVZ3<`2RVjhc?cWa3Oee~UZL$-)wMzVZjh8m!#AAv?dbDDr^OCh>aN{2cyq zOw0hh8O^mTheSUP4B_!LU%To5J>_{XRWCQ0nYpCJi%`0R&xU+Bb1Ru`;8ga5Lx88L`6mKfCF6m9+n)- zk4crwhNcvCiRxmJAt6$OW39=p1GFOdV1wCz05T{P zE0;LHkLo-Ki0YzVu-`MfO|V#HVqkb(Ed|W`aelp%Gy43wq07oa~j5X~Gwn3+|L;M8$WNWU!ZoGZUcTW4oj1W%gIZHD~*GEkrg z1gJI(3kpIMY^hQGjCMlU#g`;ctJpM5BnIWfp3eg9ro`Pe{148pG%1M@ijni@TWV`r z0P|&6|DhiS*3qJipN$S({6S>V*wi#arkig@zVs|*c;AXEcEjtr# zWItQoQaV(SAc;5#A~?Em8CP2%q54fSlyYb=rhj|4moAS&;_nQ8{*CTSFEZMv0gyw( z8U81cCrB&^Ow5_zsQ`pQcE>*(JmS+ zKOrg;X-00P$AZE*fE$t(V_*XACj%0PBdBT>&aEHWLRjnV=b?;$?Cq^`nw#4eU_D(I z6_r-N#Spx2VLTtEV=I^?eiZ^J1_1)z51nPduB?0%c(jKQmSG%4I2nv5H-R+8!h(b6 zHrbm^2O)t`kWMS!QM?z`DslfB;lhR%SkL zSn&Y7+WX*~hbf-$M9^^?CUj7uUQ0W@EIU7hLZ{$q?V0y3gU{KAFTIXTKhfmQXGvIE!sIp197)6O=0*#Ch*-x1QT zL41WO#%*KmzM^6?fK(){1|&gaIr3{ggMbu9w#io74ULY9FZJ31TIV9f0KEt0ilWru z^eG|n(6AYTSH3pKI36tOdM#|rG&R)JUjG1sJ~Apw>n$8DfIP7SZ9F+W-8|_&_?~zM zXrE%Ri2Y$MN#5p6;HqB!iSm1CC-4BeO)k3M2d~2}Mkr23rmhVLrG`8m1to?T$X19i zU%3MEkGY{BMdcSanWJ|~NlAgB18c}rK)+0w#_?;ql9l1*vN=szk1{ei3O;}WVP3c> zRyr?j$r=i&SRRq>jDHXh1jN4)E(_B-tV{|&rm?2mO68V=n{ame--5~dVVp^lskSG z+FMuxKm~B;z{tQ8z`G=DI0EpnO&1f&VW&>(`AnYLob2rDSe&79Y8Um#*kFz~287MI zT`)fa1GB=rX!JaonqL_2?mg{AwjYQV?|>@=7zNSi0fh~eOtrA^g51@s`!+jyS$bha z>YFY<8CpE-L#xt-P-(2qy?g4Zsy;qGd&y|d&xIYtfY8r0Uz_Jvo05O?x@o$%cXiEk z`UR&1RgWKX_G>?Fz?l#@-Iqo8q^(Gx=C?&8YwvM02)u%~Mg@Sm6f^Ueo3pdC%vC#S zML9VwLD;Ukk(87a9v%*q9<>JTFup1+l|e+96h_t=!=eERm!-~>?%Lo7ZzLGrpS#V| zMux;9@H`1Vd6@JOg=;hSIcr4_V3&Cc%kDXvWnj1K$I=%B!rM#jEb zUq%KsdN&GX`H$iVTRttK`1AmnS$FsJIB#yOljyvNRfxP`w1KeQ1!2vsb;^}yQ1=)Z#Ds^}C58qEtLp13e9AS&omE(0zRXy3<#$MB z&ff~DpdbS-9Wt)mz<=6OpSIP-ET-nQ22Zfwsqi; z*iR@z2Hhs+&PDKB1S)3|f^_41Bt~;6x|yk|sj<62%0D&2W>6#P0QOf_ckZx|LK3>V zy6QWkps)yonnZ+z7RFjlAXo3Tu$Nr~`+jy87>ZO;acLQWtBs${^4%PS8+?-V!Rj?s zBUxbT4V6@oz#TVbvp$F)o>@n3@wm z)_VTS^oNDwHpeeVFPIwc%b}P=!Or-FNFv4dE;(z2>^Sv?Kz5uu@uegIAaX`V=|trV zda%%pX3o{YN+aP@n%;Hd)@>J-y!`x-R5>|0QlCy=t};Q}p{ty#jE-h*Z?eSvexXL%z&xeVmo>_FGq@^Vu- zw|N{2eAt28$&lb^$f75fY^D58xb+s1{$So$rs4?myUjv{e!(MyzQNZUJX$%Q96E5| z+{={obaP9~%&e@@uQJzz2kP3{ZoPZ=t~W~CQx^ni?d>6QqfmddgHNE$R9UJjkZe`~ z*twddUm%A#BafbnZo-3pzIXO9?%Y-o; zcB>yND_@TWp!`m}6Afbzd1X3ykTu&6E0_Nnj_(@E)N@1kpipK9F+fnROkaZ60jeBR zTs)K!%C5(b9vwPq@GQ9P^&@tWWM=ogxL^dL#S|C!zVBs!$cr1Kz`evwsMIN;W0MjS zZJM#wA3r7}C)Wq;E&V1h^Wo_t3`W?J;sFi~S_e4)_kgezOx~B5GqiHOe3_J@1+1dA zGa#0Cb#+am=Ru4mTLQTX27`%=Ypu$0&x_^#=Au%%()8k!Q?ePZK z^eE?YZ{!0BK=GLW8ITW_Xc9UKihKB$GZZgFLWW017(?`c`cPFp?(A6uGL3&emxh5W z`KeQxF7CMR8_Uh^5E?%K-+TSj`!*kdEU+ZX1u#iS86C^d&tEoM$5yHxN3`_fX`ADp zmG+w(H@t?LKG&IS`S$V-QC6aD5N5d-G{o0WL9XRh-j5yQkg9p5-9S-k+3W-C?T# zt=>iF1%)Ne?!0gBOs+BE1$&URLw)3re*1hmR>ReGXyRSah9#xc^X1aby!1{wc~<1? z1+b#-*YiGHrdfCAr(9c#q!smE@4J0Cg+v6Hq0dNd!P}esdR+(`Ppy?WF+J%^~@}-V#*9!u>cQ%h1h_LiET z@X|lvisSz~Rsy{bO$I>-h@&k~)ilf&*enW`mvYyO zuDtKw0mrZnB*Tt=j#}A0Kj!hqZ2Bu@3+011+-_@*w(}f1Zvf^80S>SY9}W`M@gSZS zELmp&PX^%>g|uItMraxk31gs-**x&z-lazhqnCt_eOk`W z$vJAd_1aO>j!nOq20@Ph71Iz)i!cpB&zc^i#kTO{tpzR;4cfONc5+HeE<1nCm2wg2 zwZKi_xfEXVyP4JH%{$=KCv_=`4_(}MNrai5J;CmD)c!G-&n+!P0z?#}zZN5o6OTtr zq-A6zvbC<>8=NR#IRf1yr5G`m;r1|nXQRcse1tj?A{#W>VY(MEVl-I-&zY!0P^tC) zuEa~6fk_m0KkuTn!bRXIoa!d5nsr~yOeOcUs54qXKL??B03;nmr4UPDw-GW8!s^Af z{ng0<=FAAKbldhfLJr*wguej5HqkvJn)iky)LBbLbUA8Lm(fD+Wz;*hXx=YMw?q&uCJ^?X%Q^; zK0p6AH3Ul58ce4183!91s*8?4fbRc-UyFi~o6L}w?f2xkGYI;ISQ+w{a)nqDL@~?L zFT5CWNW|Ui@#CT4VH^sCyKiAJ3-7o#@jIp!9ni?st!-#2PTPsCKFcfO!gPeKRllm)Ohy|~7%Zx({?kVQGv zhjRSpC0h0yYX72fsX{sRWKl3O#-H;ljERUle@EEhBjgA+Fd^jACuNeB@zv>at2e9o zTkX)MQe`_2U>-?}#*zP`gVORNN(j*r~2x#v%c7tO4lr!+F9a7kI=D*5sLO2);_ zwYj{s2;#&N`w4Z43m_X~&W1{fWV$=w_r0R%La^$w{blB3Qy&J-h2*R99ZVz3-;*yR zCFNwp=I=5DlU~+i1GsM;iyHdB;A<|G zBV@Su5ohT=SQC51WUC6-PVf|A-N6_5h1$+7m}nX%LYeY@H)#G>fIhDq+O_8d|3B5_ z!4-depxoX4&G_)J6s8zRVVvY{goG*DYH;c2zDmBO*_T+YIpl{-0xEV;bo2Wy6pU)c zMMXy9Q$R(k0=la3zB?m2j%sS;I*AXa)a8!-|Gc|H6fovbQ`iDjOJaj^dOK9>v(pt9 zKBT9n9--myo*K9OAF>YoO||=$XT+g-2VhKR-E-{h*KV}tzrzi)rJT%4 zSl#L+$_SRgWEMPz>touB&z7m0|Rm2Wmh3iXx>d$oBm z1EyIn{%Rm&6NGZdg^N@vpf70Br9(=rx25IF__&#o(G`P~|0ml$vMsk#4}?k4^9m2A z&WVH)6Y}oZ*jPWm1Mi&muIm1KW5nv-yy1OCYfw|t{vT5lwfx)E#P^63?w1p$ln4|Z z|2OL|6%TtOc!Lv~NBmY+R&4F0?dxA9bU>91%Gl@QT6_0;KYMnV`Xxz(g4GT*8=Hy* z&T)90ow;O;TUnn)LYNQ$ezV#l$ykS*UFZ2M->=miu;#tifgDuSzbI6K^4i)vj^f z>cM!peHaenL;cAAv~sk_Am<(L9ADMAwT;c|e`6Yqelrb(+-~b$2+f#;SO)pjue2SK zUZ=mXTexlbZ-CqVZuEjgZi{Gu?4IzitXlaleWg6+KP&filKsj_NO0Orc}LhU{v(eh zLxs_AsiJ?82ixz1qYcNy!!!B)J4~SNdWgw9M@uh^kY#z=99u0Pe^C;EuK}n5Y+p0L zcVk?h#ZR!`SSYt1h`ntKgQJQ1oNJ=!=qN~hmYyC4snl13a=r1zg%5pwr68h)qU=k8 zI%_oj`SW0O7R0_bmK>9F3FZ+qO`Q9 zo>qND;6~Z-K7s3hO)vbePhQmF0IKMj+?<7>mbvl@3JMKdGChWr|5SUwQo^F`#xgZ{7r@Z$0S5720sig-@RHxrf|}G9U&mfTcIcBggE~68@%Y zgq9HrlY4&>V00`t_v9$#-ns`i-$2{l7eGihT#LkP9=X%$@kSq7u(!0Fs(4sV>9YZ* z{|8U<`-w!VVjW=L zk%!=)@LfA2wCoT{^jsAVcY&9eS6qDHG|K9acyy|e5~(kKLkShVP?h|uVYbLcL=uG|WAFb% z&mt8FfEdQVBslPIk)POJxe$ZzMOB=uZvwqnoHX`=5@iIh%@tHq;@>?Q&`j>L|6QNR z4jc{#_sgr~J$p>RQ~G2jPQ>iQpGJmx&QW|D7ZkE<2CWI!!2We7(KCj;0KUC+p+kq- zzpATCD^Zz&k@1wEbp_gh{{&>XAQPpAf$xCB`>&Q(-krP$ElQwu>3{$QYY)28;ZU;D z5#FL&z4&8)F%ZIus$bVah1}NOf)F2~&^1wR{~p>&#e-16|3yu=3}O`e)vsT#sDuJR z$&Xmr2^I}Ry3{oQ`uO`@DAoIB1sN4U}jSB7P@m$wS$X zSn#GkeX{sG_F0j*sg%$48$Z>)a2L6)3wJuji^lite1!Q4;UzT^dZRlhPp2M{%D7-u zb-;{@_45{skwh$cGIX<1esD{0{Re~6`-qbf{1@Uv<>_C80hc`^)q5(ZZK*Xy5x3| zOLq=IWZ=rb{LrEQK1xgr_uf}nm`_Sgtv$l3thO(gX+M^o^9}di&)LMJ6gAbP-&?Ay zO<=lqf(NwlfJEwOpSSw4W5?Lo*oKQQrFFQ}hyf5GrqSd){-=5Y!??!pGJv;yu;6_GRQ)O=A&}m7x7e|F$S9nQP z%+@d?r`Z_1L#K~Wp^{mjCCRz^1hR6dc^in}=8d&4<=rThJV31>yXPO!s;^*?dOE}N z?i^#%_2E;W3`IbcQ2FuWvjpyeoGpfQI=acCek9tR1<<&pWXzFZg@ zp+Grrwgbc|cL@)Yb{6(6v`Dw@E2^HI3w%ncx(p{NZRQHKBkhbome@4 zgx-!$@5V!^RSapXQZN14;3tMAX_dgi)w!Gass_{uuAt~#ho+`BCJP3}IYMF$*S1=3 zW;u*n{zJRKe;_@0{vthU&(HUBt4`q&(j!CtKad_($q1$%mb!1#$Lz{9HrJ$%_*n@Y z$JGF8gZ6()S?{!(xfslzHYh@;n$nW@{|s|Lzg)%lHo>A)RWZBr^3?Mfp759$MFXXK zlii8ExKv@?Z!H4Ca4}#0SN(&m!dtC0*@D~Z@-rCD;cE#;{v-1-g3}qslsPSofBvj_ z6bn@z{ANT2QMB5^T-Vr`k(pTrXn>Yh7qB+aMOoAnV96%;Vgd zHq0oT4JXnYmG1ENDTa*!8CLq|C_8BP-*O=d4}MW0k?bG0&sPHnI#w53TW({(V&`k< z0}>JvK>T2{noVib(gDe}oua{21|ZfXgoHqz0}Ku@?A82ye~63CxP7>vU}hj7Ai#FZOB9ie5e^n)5&mxbyd zMMeMqtSP;@|2sN4gp>c&Rq%dM1>@S#_X>29^ZonK47m(^P*i?C2!ppc6}`%>Vgpf` zBdo<=dw)9qcQ?m2(n^M+w48))j;6&eZeu%bA?UXrU<=5%CCCUEiJ;|L1hky+k+)Kv zZG80?34-AFgktiaSUm#gBLC9jkL-ftE8fAXx9rDqm_w#MB<+wH>Gk1kI=p^$!wh2d zHPGYup~T{nZ|3EjiC60O{Np#y4-`6oTP*Y!St8y9jpIgzO42cOnmEW!1>JB`@ z(D zzTJPw5dy$XCj1#kwKT;a)3g?oMQ`xv7YXC;ffyJdAI$?}kH!E6Bb>W3&A1Y!M5vX) z4kk%_OF_De6z2UA76+jV#7P5<21@{w2el`*42?iHKkLI3eS+8Ya5o7N+oPsP>$F)kOo(N^FM0RZmp%RmQ-?LQ6E)23)DoOS&WXaeWvS);flD)|mLdi1rVr-M; zf4^tc^E{vLa(?G`e&_t3b2<-Z<~{G_wcPi0U)ObC+0{$+G!7#BT09i3HVp$1Lcbai zA}?Ni0}ei?lM+it{c8|K#gYfHquVmGH7=o=V2qSXo^>;M@4nOXyq!Do{P;mb6O(<3 zR2i;tQuUv9uvPuvYda_~yMy`fV$1)HtbkttJ| zMMlKJkW`6ONrL*R*|&o*gpu(RMUP=`Wpzh&vr-Yf*Vx#`)^_~#e6{g1sld06tjthT z^jJvm9(Y>iEZCcqs1zc*YygrF4-avQBt#vnyN7%$HcMMfX!N?-aw^f3^7T>OgGm)9 zzxS(^KSvzCsHn^qLPFYEDZ_v)1K2{zK`3;m7Vu{KSLp?cFn^n94ef#fikN8qJ(D-h zx$ZztL-_Iv3M*}dzdp5ySY7#x5&wXBEG~b0XZgnK%}QFOI&*K%cwe^+0WZBq!hk*n zT^h`x}f`M5aD1&_dHpn8sO*L!N9*eTtfG$Zd>uqf< zcs`A@n!wKb@gr0a>N{?q^8~H@dab#o)uNlFd7d+K)IPs4NE87mgfzS=AR$#D{nJ_3 zrNdl4H^gy0?{ikhv(`&5MEQMe?h!G5!AYeqqzyGh^gfVm@+7_o?+qw*+OQ@jej!_9 z$^FmPSl)$N||&{A_c1=WC4p6=+2B2g4|bi{t~OoK zw|fUL%P&xGIl`$8?neDZ7D~X>fhh3&dkgG~VRCo8&5V%^7l_5&m~ zDcp_!=X#DeQW|es@A)2d6~QO27%1tqr%b5ZswZe`YZJNG&roz1ufoFRsv`2Fn3~`^e<&%wp{b!9izAY7Kr%A z{qF*i%stafgMN+zh(!}BnEBn%36=QExsaI(@$sczu(J~g%CtR%{UkCAssW~__d!%f zhXU&)dMzHrf^787V1Vw5f!&vv;pyYwzjN})J2(jKfL!L~R)p7kTE!e#IgRoCMk9zk z=6|r>q8kgdamy}J7*U!--K&Oj%)0UZeqZ2!=9eeBvc&jH`;f~uWWT7l01Kr5fE=3V zSwuwY3#QBT2L-{bAt^Gl9Xt=~#NgBa!|CS(0geXY$v)6w)eocO^ay-Vggvs2H4xF9Skrj3_6+|am5&xy8 zrPzJ`e^;FtY5Z25D11g#C&c&f-8O?l6DNbD+b1TeD8eeQWZUih9Y(CFccS0d|Siyx^XjPJ@dw0BiwV zGuEY!yPbmCk%pD?vyhM6Ty0gLEE>_|=V)zgOg&lo4F~kW?_gY}pA-My8J$g3nI>qi ztb^b@h=M*nXgu@H`p7O}w>O9%0TkeBOv z2w&mK((8?iQY?||{Z5!ZtoMcW4wQ`yEyOx#7G(lN&2zrs88Xc3`iYwh0R%~)FaT=w z;&~GI{y?J$#30~&s4pOEkX1vA9z)|wH^IKf8M8IG%ZyxvId{MDdn(^DK?37DIh+IF z{K8CsLRI0@K9r5+H&DA}+u^k8&As*bf`AA2;lmX!7kMc|e4=@_n5`RqqN>o&{+A5M zI@Q&*dN>Y;Qqa-zaQ*xY4}?!=`myM=wv2ihuydL9G>y%u9OU|=h4%nmEvr(z{gDpk z4txfT>=)#Tt-9VrfhVk@*C*KQyv^w#oz@I|u3 zjJ31u8L%bTt#xw9!PDzv1{XvQ`j5%ki!l?<6R`6gJ9kQmi$lpi{N3(>$s&h-Ts^zj zlPZ;Ckbz3x>devkaw8;$6RBMFvFdtm!6-QcM7Gck#V4%O@yKkgVjzqf-4+KC&1=s$ zj)%5DIzpy63;v)byk|gbU45TtdnhbcE3VUK0VvB@^zpvWpSN${J~dDU+$@MHkQ{_< zln}#9=5iswL)OuOKZ?+t$S+H*gcV5uLG1|TFDa7}h}+nMqar}~2ogLhe*_-rCQ$|X z^yVQc@e+kzLKeeprd>#KgalU(dr#ij56PF=_O0h|T>i<={^i#*+eo`dVq)sPyYM~0 zY!BUq_Xq1;Tt@xNtjDv>u zpo9BZ7DV57kd6)nEmA@c-$F=k+vnPp);%h!{c24fz_@l`@0VZc2RX; zP4g2G7K5}w(QSR9+Y|~wfa~WNHng^8=sZ4~(8^ps!#F}<5pzH+QEeY=1&fB~fvo+8 zl)G8KYx^zYFN#ACz102t&i?`RzG|z?J>mK-?JBTH-@dynCucuco!GYmUWVqbz|{p6 zk+Q04J{D{76RL7t8(k;=_7{DTTU}%nnQgN;)<+i zyu7@}f07ZkQD@Z^%^3mZ3`yHA0Y^tiXVcxc+%L#|cV*f5;{vqZQWPkockXLqbzyve zKqDqMPdH?J8zQ(;gSmAM;HBvfM)32oiP-m^wJ}k=zq__zYb+jOvx+X;+P_9*u}iSj zXi2WZ$_*`=1FWLO*XLqu1g^de<0v+Ja}TKJ>PI$h0a6g9$c=VW>stKJ2qqZusFe`fFyZ-@6OPYflT);lI)K?$0 z{^2krp@%qm%qfB}NmGd|Ov-HfoGKUeGKhbJj|bd83Z@e82o zVw(1h+uG!QOy*fqlIogfPvfHloM^Tx;OZ_OK6lG3As|Z#!qR0Zj%76i0D8u<85rS@ z2oBor6W4wBVj~V}-U%@T&y5{fGXNKxe>!MV?!e~LXXrHzIsaGl zA$e1)1u!8=-SM65w)DBLPfS$wF*lc%*^26L0B<<>uoRz6e?M!r&slmdNp!!)VT4|7 zLz{gAO6%KQPsCw%KEL4WyAJ*}sJjppdU<*lRlXzAhDqsOBmB2_2F!`Ub*=X8!ITWq zj?Ftmfuva1HbC!p$#QWMtA6)U?!hcInho|fC_I{92f`RJ*6WJ@H1LxT($mXEG=3Tu zdVjp}H{_i|9yt5H?Uvobmq51V%DDpT6=+pO?#&dgzDgU~p(E`!rLU?x`stKadfUh>S-qXWFp&ylb5OU1_^!BqqlzwmBMy;R0^7)IyEB$Gbl(EqVMO~X^ zgI_%~WW@C~exs!`SI^kk*x(#uVPst&AXUpJjDgE7E-yE&_O3W0eez^j{;7Xh9vRoj z&1#oU^YQ37KVZm2tAh34Tabwl&on3?5Nq7yAIW^8L}{GW_ejTZ=F_Gf{iv`?ZKNF0 z2DTjFl*?y`dLXtw!e%@wEm7_wC%2!goGyc%g<|I-LHK}Yum>Y-CINJs`vE4bFKqyKjt_j-uz`h zHc!C$UtRG*+n$#slfCYMAskVht32yJ!0?_7xcK__?<2~g8o>rufNNsOKZwL(bJgc8 zPCwD%+&)JSOZcrL8uf~02-hU^)YHBOwv`$0NLOqfL}d(>sDa0;*J-c1M+d$XV>J^Da3be7b$;V{`Nf&uO7e@6TNILU*^ldjc(dfsTFza_MnJ{H< z1uX{=ML;o&uT(*c$%R80kg=P^&(O6LP*12#1)Np?1+Wq+q+gvDmspD2LDQ>+WN+ zymMiC$5O5=?=o(temJu2p8lRwedx~K=Cdj-dg;8Z`zRCB<^6Ad(yB8j>`8iF_Or3N;WOi%kvaih8ndCt!m$5G{ty1nY?)#R2!FrXC zc;T#wA4Km^o7uN-ANVe7zKj=6J?}c9eTpSJ@Ut32+dV^(Zqxe9wVfxPeW(Pd1kFsn z(#Il(9{!#=FDoj*9WF;t1rRh))pX?^q#3%Mw4 zJ-rpH%Mx|O;piIo2*_|>fZB6n9$(6L^(ZPOMFK9B@I`GXXbT@}tQdFRqe#3BC3a}ou(rU`N3tgd1O3BU`(qM!rU;#rc%jc)ID|Bk zpqhbBz)tMY)&T+ikxmR0C>Y?~-#F(stzhvaW^fgKPQXfzsn{)nZxd z#ZUYU#!+h}dg{=uP^%W*P)lFl8%J+~N^4cmDa7pYGU%&lmrE z;v0BTH}`hD3au8KWs|s>FpKF{HOG6w>mBesNsc(d? z2M2aho+pxfAUVh8BZeR0FMpmk`Df&fAr3SzZ|p~v3n7Ps^aEA!dGlQVeA=}MpYD=3 zu&q;@+Q`aK#T?jZfB{C&fE^gTANtI~KPU7a#7-S3V|uv#lo5gokeDXOuVtrd>dcU$ zUz}YF1yHRHiuDkAmdz3td@*3_Vq#(fZVRt&eY><^-#dhr1{>k;p)xb0qtHjI7PcAt z-jZzR5pWU((-WQc+waMRB^Sy)*Y@1rTa5uhJ<5D(ZE3Kkw#@KdJ`XQ%1@JD?6e0mo zGSLEm44(Adk#co6x3WWvz@6>{BPq#tNAjhwpJ_9>-#bQ#aa9-5x(6Azr>7^bq9T}G z3I|5g4YA0!6sD|!lM7U)>X%7TtamD(j=DjK`y3HNKxk zR5@3Sv^ebr`IWPK0KS^eHHPsp+X-&I!%R%E(a~IM&gcjoz>HmfPlelbJS>KI?1wkta;LymjUJBxDkNUwtkuU0 zXZz6)ny@+IeV+<)blZeB4n2T0** z<5MHWz%odE3ewQd*mw8tZRG+BMttlg3^FxzbxPxPX;4i0rZxGhf_iB7{*?BEm*12- z4$e$Z+oRe1v9_fexdl6Ena_e5Ii|9KGatc)%fn3so@cb^CUf zCJ#s{8#0e)LU9n3@(^`>bVh#zm;+jQZZw&$rwTj_2mn_|Fq{B_B4EN3O3(k?_uTs! zzm2seO+$VCr$y%=lU|5Gu#x)u`h-?7LJ7cWkGmE5>+n9_-cRyxsPkiC!bW>*G@?L! z!Y2~0292-veDZ^Ix%h_(nvXoCaM~2^AF|5_yFi%+Vpew$q$u#xeYl2k)N9;!0nkyv z{L?ph=tj1%&2>S$5g0FpH&C~3y?_CgoNVgsToj-D^eJIzDAi9U{S|)z^ts2ZUb>>B zbmkl3Eg)XA6=Uopv9j{=5A_S?ZbyO@aZ#J=sjQr?vXY#f&a?Au#_w8MRD9*Y9$wxf z_r;4_K+!TuzmnoN{s`~wuL@5yQCpvx9b&_zBnNm=Sj88#Eu%JheIapXW=GS~wh^dCk@)`@VGWEnJOlF$@%((p~BAOqBzb z7I}MlJ|jUIk4})8U24>weafHAk>hlly0ifgrV`1KkuD$t2LuXW2qdyN=eZyuu>_5# zy513XHOs`#TWs(qp!Yv+PZOO0Mj*0J1;K?5PfkvbkBfUR-&z_)!}$?~7_-)ANV0S<&vDU0WD>va_oZZhHw7m>&aYW1)2*xCmj#9-VH zS~p{zizmWu93bh27w+_aC0z3DDi6qfRnfEdt;EZ>Mx)zU+tT0XGviV~LE?w&Coa)~ z-2_BVg4@U^Ea$gt_5C=j8N9B+R2qQI(KrgMPpEAFVsA7ibr+ zB^Yz!$*;gLVeRLBHrj2ykX2|@b%XN$&A-YYm6`0*Qkdi?`pHAANOs&q-Q14IexSkW zgRe;%D{!no#SZAUZ}ap;N`97nVFeDYlo~2 zO0?0dpsNDJ5korh>WZ+PgQ@NFd=ARi^fr;aH97q2 z+e*9eI+w%WB}99RUs6pvpIryZ@!Zmq-VlKG;3xwO)q#{Sa9H5|iN~P8Wk32QR8(zz zVUorRPXg14#|f-$Bs?5T{&@*-(2)gz%D}n^mYrX4sA%t>mOeh^#l>24h|?Jp)7Z=m z`@w@HKnp7kd;9hhSImXXcHYA6^79MAFYu;k4PWZ2{C?{>Q2}QkoDKLC6XHgnZ%^VVz?=0rGfYFT*a=xEqYe_Uzl&HI9a|I&j)- zjyAd&Z!VV7HR*pZVsP45>-DgCwNmMFkF_iLb?Pi2JFCl{da7IVbDClEPf{Vi=q^4dCW}jnPhY-&mpS;kq(?+^ zs8(yBj4GR#gonLKmv&-(L94=_7juWi2+e8EQ=_ohz#&$VvT*J{r_mqZESWw!!hsqFn3 z@~bzl{1`ksGBot5R@Wodq?nzmbjRVgGo|6~5Knl+=I0LW0;{)DWnFxv=+wreKY%O& zR4I>tOv$}C1Zun46&A3z+e<@Z4NMEA{mHsmqwpsgM{4eLxzboWs^2=zMu}~uAQWM9lTFle1Cb_C$+jHDEE z4PTP^*_P?~j(FVt688ZX$8dq@#wvE7x-6z7HWAXhhIT?OUk1)CYwI1sBlm>ZxkR~C zUBO9`l}(_w+tjZL+$|65pGlB8=hSjJ0)G}WUCS~__i5lh!)zEF$d!2Wp^VJ!um<+%hk`A(Kaw#*GL zTyR=9YHv|u;}yf1k3T1o!@Wb9MvzbI8phPy@-06-Q;q81rvmgw2PnqP%0*`E@P-&-f?Z#K!JP3dRX@2ZB=b%2>wT_*!@5x~E z(Rrucy-$bg#V#00_4qg3Vc+!k!|WczEN;Xe1E()f`x6nU(Um4ync_g)`OrfG1;|O3YLg@gC{5U_= z9ZAQ_$P_OdA&~sWaNGJ#-q9N5Br))z{6V$5W;sn#7D~ua!vwnIOWeht?e&(A|Uz`>_CjQ;Ar%3MX8q6&wVUrST@={hM0IsA+ zyDMmDXhIP{cAI#qfSrLi*e3!s2kY4EA1~sz}gaB|SaQCcM17h3?+>=?L_&`Dq z&Ra;|fd*)XG6BLjs58=b)UE2bWD~fF;bk&w#C8=W59oPkUO2^_HVpoVhqXCDaC#3y z1=Atm=Q1}3GUwU7;0VUWKuZHp)h7cIYMar{!~K}?i%7q#j5pc#_ijFEvZ zg^KF{lbHX6rV^iP{A#v?{hDI+vs!bFRu81}4}>3D#~ccxDH^R3Jtc>rMn6 znRfm3FILf8Cs?7u(v=ED%^`sC1G!LxZPx6w)mXP@kQ~J&B=}YuwmXEl&A%t>nnu#7IMAze)mDFIiJjd^s_W|7r9`D#VixY z+bw5yV0JKLpX^F~)@OdCA-8U%tC2W!^3>H_>#M*@mt%wlkG}vZe2<76J$lguD0YSw z!OX0^7Jd3N8jdHan!kjNcQ$qn5=>6wBR8X%e9R-}!czJWhOgde{NmCn@dZ>QV@zdvLXJ^N_5{n9T z&j^kX3`g|JF}C(gG8-#UF?r^URkE_m(jC+8z&E{zDcTrneoAz06*wJvFv3797TXOA zM0^cX$?mg;aqyuwI>fllvMc9HzXtv#-*?3oTTKAP(#tf}Jal}uKd@(ik#CVvZWefR zFE_f9Ntt+@yix5dnVvw`o1Lp(T~N& zm}Njkkhu{Tl3yD5`SWK!lgaB6)c_2A4&3!@Di5?vijp`Gz`jafAGtWGW!Q4IM%h_{|dMvwQm$xUrcNnhlvp95c_1i$5N8l%&B&~iZ(1Tz$f66zWoCMCbE+^poquU z#v9R%;5DPOJFr`uqD5V7EYR-S@9?>&^EB5M=ijKY_`hH=twr0yGF4pQBO|lGdW>%j%#@ z<;S*kF+D)@@;&B98|i~OT}x&Jkf<&Fx`iX#nQxVb2_Kr9k%V1*=?N}YbCJ8rSq!V3 z+T^bFfZKsqzr-rMD0X4?gTOCdtY!Ql>qrt_$QYNkM0~1M?d)|G#xc<`^h{Q{DuMieoN2wEs^8ua;d~EGH1tNaPubvEGZ^O4XrM`-h{7gKhk+Kq!uk#4sufc#S zXncwD`Bk>6?21NfbBB)TTyM}Q;4UD@x45IvD82Y@pLt2ZOoQOpqdTgN| z7d&l=#C%x3uNUrYbM=^S5u2@?l^(IC1AtUV<|TNOE}Ta79UvwPlHmCfXp~hOAT=ne zfzFEt^r)hzICLc-f5F|60IQ2Itp^++2lhCO(HadQhJiS7F1R!A+7S~W*TsbepsK3T zvNZrbV*jpP%di>$Noq4NeJLpjzz&Fe$UX>+4p36}844gFy6YZNoS^EkMg`zb9JYsQEk0fMFf4zlmq`X{HLe5mUB&4U0xOrC{Q-Zfit!`YdF%dZBh zNZfy;Zd7A}z!Qh>-obSHq6&OSpD6JBnTO+H@t@ol6s#BxutMHdNN9Fu>lReOQ;cbq zr)y&rFCc`Fc?0JF;ro`_>W#G8HyS|zQ3Vetp3@0j`}o{@bGx*>xR@QBSk%FI66mJ5 zZ7fB)W4ZaG@b)tt1=8IVI2j>&BwO9&idni z+pL1q75BbiCN4m`<&f50+p}^IbDaGg(pOJIA}?%O4aTNB#hr792#FG|y=C?Z@x|b` z*HHK-(u#}O#HZReJEphjUwDFn`6NK`(7>n1ebDD-Ya=AZi$8u`QC4o|wE#!yBAz&R z<(q}5+SP~{05k~W1O%2%3fff-!4EK>n2Cfa5 z-KoM*y9743QjCCx5_B&7xwOGS-j_nSNh}i4PwdtQJWe7znXn`h3=ZMpX zjnQVCJipz?^f^8fVRF)_#@B~lstke%@Z#Y`DQZP!P352BX*7$?4zieZEaAa2jyqn; z4I;|Gvg$g%su+68CyhSIbAop~4i;Mh5fPn!EqTPI2gpbZE&y1ZGXL-w&DL$(LfIrU z%zbsIIfV2^yQwkx4BcKNO=atokZWbYUR{&a%o_!30(+_3u zLpcg!pj81<2)Oy?{{8I>U-JCuL~!?XTUzX19%MOuctwSmt~p+$30)}9Utm#^VU$(} z!@3Ik&yBOsL&ITyhG<}Xv5VS0jZv6nN+0PlOmw_dOf*WX33RSk{*4I%2L8Tr4#FHL z&)~p)0dRE0%?Hwr;C$}66ZRc#;d0@ZXoj_*!V~rt!em%z86#tsfespFY6bVvu-_!G zETMviKKC%T0xtzio;YUamK8$ytV14fcO>@1*woK+V@_g7wrQ=G3 zC*9Cf2m%LKMP*+qI} z*W2B#7id&!ldh^Y9vc%GiKzoN<;3>1b%hSXZbK2V;%}OH61kqBkcM=o{T(y*RSZ7f zA)GE*(!JG6oFvutcSS?gdM&l&yix9a3mUzq^7POdz9H`+sc7~a+{+Ldr}>pg+d4X$ z>g#7EbDr)_xwwr6s0usx9y$WOGxtEduBe{tD>Ru`bv5wUbj$kq``ylnUAry1r9xR4Tw%ceG_UGqe8QR+K65;!Pn1uhek*NK^6(aa<6 ztoo^Yk2r0@I02%Qp<{5DqV+}`yCzVJvx9?R0}qa&>B0&$0r3da zz-yKzef-_XWm}5H4}3R#@bt{59O=+}Q?51t{0u+xGQ91MMN-u=5`{Z_(GP_Evr}Bg zCrZgorHNo$P}K$XJy1L!8io!bjl6tfVoM># z1|oA=b=;Z(E>p(|z2;itseJLW$}`OaEk%_Xc{_nd$>XG|8ZzE0#-lh{yffkB#Idzb zko2a%ZCp4$n&)^Ah{jS#MizfId{FQ7D;1f2BZ!*5XA%$GhP*oG9EjWfosT|#Tzy_fMpMWv86APnQ|Jp|SakUV63jh0Y_2^WEL4 zuwFoTTSVVtumbf;aFhbG^lZ{qCIjH0M9t;zGKJtLA~KT81ej9=1+y^w`Nf)*H)v8X zAuA^XHjq5u@!^*02p3i^dGh+n*ANW-fjO7smlrg1SrbnTej}7kouzVpgXhWwuT&=eB0@L zU<5op9v@Orxtx@9o31mbVqJopIbXV4Zh#G}z9l4cb@K3yMveGVPrH7cYlLw0{!Jo` z?cT?YA3p5bvqy{l<55iu+&t~$Po%sz+$FdVW~IVn`qs2_2742M@Lg(qJF z%BAL@#O)tu(Cl8wv|8XNY`iB5Ne*%Wan`5;^}d!bm*s4)qeE>OuA|eU*SuPxN9vSj zBizjv4?rKO_yq)5CEfD$c6%lV}3*7#ZB%M&_JU+xFM(+X@8@N|{5< zyZiAx1YA^RWVDAIy8%%`k8$QR_*L(rd?8@0V@52f0tMA=cQ6#D3gB6^|2ptL2`mV6 z;tr-S{~v)w?w8e{?<xq!0%8ljNZJG8Irb})IExG0eOWtd| z-?!0FF;yWCN27Vv2v)^iO5f)mQnQc;P>y^p-w74JLj4$jeR}xt8_QI!CdH5Ag`(3c=N#^p^Br2@Z*kvV(-x654OJ$Ebrv8nZrBCcMrEoc)5d_ zpOT|QjV#rbKU-uRgV{?px_$h8o({E08p%{(4U%n76cMD5k`RTLVtWqfeEpo$+!wEn zKAg5v|m4dlVaBQ{QWffRtyG{6b(&PHf|oIqT(Ft2_kb?7~D3iO4LZLib6;E z!%R994)+>)7zY4TrLbklz;;z{E6*K_QMWZx;v&P9Q0_v0{5>qlB`@Uqh^C=go~2{b zQy?p#-m#-IgR!%&no{Cxdd=nqp?BfTt~?ca0HqP~%b8LBUcDH$O{W%h7S{eEPh*Wi zKKC`nJzrpD0CO1t)$#;jt>|fQFV4!kVtiCcNHuuRo;_0M&zmO#(Oxy~G#3vKC>){T zP${r501P-?%neL)(n#OiS;nA^?GpbzNk%`X;Ljygy+7v(D=8r3&;rxdBU--!I%-D? z@8E)kq_nuW(s|@@(MxD0F4qHDW59k6p+RO?a8svPMD4<)^!sH_*gVE4mEZR85Hu(P zc+F_sPl;VR{N_n@rg=jWHG3gikX>1mYZ%;C`EnqVXZLAPzG>SxGY z0B?xmOvf6QHd;l}K0ZxwbNvZ{I8cn2fNt;PbRf8kPcc`H1e@P5w32E`lEkCR|pd*dY#eILiW74FjSAp~h z`f1R4s6bNykSG}G1OiV0crtSB*hmsmt4!D=&@`nj1bv#bE4+Ew67GF?(qS3jSL4x5 zWmo*@4HE?>yn&R1VH1+sZ(eS~Bj+{=?M(PO^c4T%;1FK(D?GT;iPQkT61vcUvA@G( zmC{jW6(NXSi>a6s(aOv$g-$7D^rpG_A;DolbphFtFF#hl2!KTN@~3nqDA8KT2(gcU z`=+6Jy;N=%wq53gUX6%Tgz*c7xYvtcj;W zqHedNAp|OP(DLk@xcK3PR)kqIEwetz;h(6>A^9 z&Rrr=0nb#iAV!X)=vSUo+1FgHxkub-=sum2H7p8Hu~tEik*i?Y#9ioHw@$vCR@)A< zOqD^nbL{wEfIc7$oc(hXu63};(@~arJ7f(b4`s1-E%K3 zt)P(5J{p?vccS*aSLpBg`|oFyFjv0V5mfgFrEC^jay(mNh!~bq(H~oDzF&^lsdlP>QU@5>BQ)0i&Wjl zI*0~H?Gu+^<3{8_BQ}vYlRPYemsj(vf3`&N7s%TH9=|FM^$1k1>+Yd?#9wf6QMA|G zKU&TlsH0JcQ%r1QY? zXo4e{y{-Z*^-8x92DvAM&w_xPGZcVr(}*V#I9m%_N5HltcLn0VoPfaRNMH#IWE2hw z_Nob`6U0kEVD(mX2FW~72~V57rt&w$g1bf4DQ$438U?siX9)5kTxDeqNPFm%EP)H0 zro6UR3awE93PKVLn0;+B8?U^}CPMN%aR@Evi6X$i2rWmnCnf90j~~$43Hq>So7maq zk!B#mG=HkFLh8n_986y@@K>>NE%J_2lug@)81C5jRJs?CuEqjnoZXE9^hHuyx|+_1 z>p@mGrs)XeEwD@?vaSnFj*Wq@aNg(%Zf>9@5@aBRi{|`-$7}c=&Dw9N5pI*k6r5$( zSB8a|p@`U)^*mm^6n2iEUkL!T?x=z%fCDHjpe2lh7zLrZc_Kg)uBdJsi25dvo0c(9 z=sP71JLN3Q&B6JRH}oFt92$es91PHs4hV+PjY0A(g5)gWB);L;%qJO5OQ0~OD|L=x zVDa6gRd>`FYG)My&k$HX@81KOm0n5SjrwguTBR_sPo9$6>axVKF%fqkc6U8#8SO&Z zboTuh?=`wC|G2giOj%@fT_7y74@;04I9GMd)$8l`p|c6k*z>Z0o2xDk+5Uee`~cQ= zjwC!hBU63z$CF^(Oai)JOmy$y)Z#5JAO#0BhX0H0v#7fnBs>q1z)3h&H;iBWc}O8! zb-%9R$1Qo_B#n?{JLpg~Ve!@YQ{J*`LXQ(8uZRFQ1!2tWpfF~5IDgciVX7u9J${Pg zlc>be_Kp4`l+qhaUCz2l#{Wo(=YSI9dm?dkvBl`7?y8ShlP}BE+v6LxZrlDv+mTPy z9W>)8>y+H7f&^*vYMj=oAf*c zrlPzJsk2hhMEfW3kyCJ*^91`4a9tepp{{ciI^zcJ;$Pvk3 zfSZ<(AX9+e)$Ph#VgWD&h&&f<5u(r?+z{Z2eFB;;T+8dfW&wH|z#56ULF`2d#vSNG zgs#^3_Ct*!5TscD0jYU&`9XCQuvv-3*1Arxa1VWoWZ#Vx_2rc%-5m+O-xUTk^KLG*7N%R6|G0-o|n86dh$9cNy7uc*F{(y zffrK^ZqZz(uMA8FK@kdX=3gr4Yjs`FV@4dFij>r_u z`1ehzVs1l1U%WNZ6?EUwl;Ib|OLWxK)VVGb76&qc^;?;j$LPGe?8F7yrKjTa1VdLI zqnG1<_)_O2IxJC4|L@s7a9rq>jQB{nO)a~Ow`)UAJ#x|ojRwky0)&9N`sEJBKyFmc z+(XV(_4a5aO#%&9!lr{~VMP!a)WDir;FH_BfMXIwi6Mlh6kX@s7ocK*G46!UnhbG> zyOF`}Zs>2Z-6z+*IqB#bVL!LO$S>#iQ+NzQol;O7e{t}?C-qRXR=N3O{NI5l;klHo z<$6C?>Qo6VK%2~Z-C8rlwfE)V2ZAILKo=z84RY&@qk>Ex1=?QqCIzbiZI4U({D=y9 z5u(oG0u~Vc%Ita1zAZ4d2S2Q=-y9|H8cJ`-OD4k4`961(x^og=;O+b2^;>fC(!Uib zH5AFgCa%_}fAKetb*CGy^IGAVDRPIR-j(sDT zxq!Ud3s^kxnZ%{#7j^D*3i9o9u{U2C#fB|%*DTzONY?~1MriU$r`5FBA$rSpJsoA& zY4sgcN=J_!mpHBojq1U$Nafa#uNPN35prByB73kYgWBo%(M1m%72`|2lC#0(SIAVG zM;gNCC2fd7k(v14M+pMz5BWII@JjZhLy}zU-yAcG_3guZ-)jD*xnfencOF~<}r5?^5lB3grqQg`nozq z$ZtfvC6F!52?1#kiV%f3y7>$FMI^(9SBQKW;9l2hfr=gNHvX0s;7q08NuGfSZw^RD zdL4M|%*;6wo2_KgJ~KAdSlfehWCpB4mn*SmTBgUb?hwZYBdse=zEuTx`Er|KRpF=h zzPT93TrVxPtJ*Hd7istAJk!z34N9=OUD2n5>(wOb&x(O9OA^!;W@|PEVivTsqpM|E zu&+A(k`HyzQ=kLn+~S7`HIoDL-%e21z`bQTU8FbL`n1H(qO!F=XNYij$@R8~MR5F? zmEf8yA+%84d^iY-qkA9}9^DVU$^oXeiNUa7L1h7(DEYpmHA;HQCeAqiQezf8xv(te z$GY_RX7On7vjn2A_v>E&n+N@LPT6!uMhw{W7Z%66I0pPOHC!*l%}s7B9}~z?!*C0^ zi@42@ZZ%Ixp`urV>svRzJ)lwXiSM7DTFq=Y)A32Z+3>4VXn{Wl6IDFrkKPv0W%$Ob zg*HOBwn_AbU1Iu~1-&yl(Gx=y5s9lrVol8k;=THd00A?&<8`h=I=vF*Eh-=&z{x4w zAOrF`Fz4$3@+JO{-sS0Mbp?e6AcI4`0enix9Y7OksuJR$?tz`Q5y=7GJffG82TU3Q z`z*T^Lk)N(K|;QB8aQKE;-DE?ij&#Fb z(uVgl%TyT0QV*=&F1MS+%h2j6P13DgX*}U2S417+qoCvGgA&`U3timS)YU^ckLyYe z4OO$T$;mQzb6W%!in1gLfXnU@Yb_BODAYj!y$K^B5*sKEYF0)QkT_B*ih-ou-aS9sNx%1~>4YHj_%X7HA?__t3E1!3Xp)u1S4^ zgM)7^Q#8mAn-3^b;N7YC49L_S_+Z}`unwP4eWre$cMC5>vMHjJ*$R~hexryozuq%5 z4ZpnPbM*WPZ8=@##b?^_-5la$6W>IRIz4!$n70uwm(B6;zN_Q#m&?5Etpg?vOd2MB z=bDJ#W!3QsCKq!@Ek(bid@gd$4@2a_|4N&=GYX;b0vR}x`n2zu0gwtfT>zdpW*)Gew~G}^?3m%zf;r*_(xuuY}<;NHgL0P?ft<~ z%SnuPd6TCED^-E2)>PHx@}Vw!agp|+a=c~O>@?F!w>NVlUZIW>Zr56>>20b*{q|ty z3DGWSY>>zfhD_0pr9UgKqIZ~ZuOhb1d3lq^S*tY$uSxXiKm z8Tbu3*X-;0Cj}nA0sZ zk+=|a_&mC0;QNH!k=mg#QQMxz=htL6y!w<4e&&0kM*CY^4MD>)O=uq=^+^eCon|?A zcEOtaLGH@2##{qy?3F&GHy+Pux1rH@=7|D=u<7?leF`+ZJU zR<^ypJ$HrxFlE?QiW4}5rz(zFX=#_ctow1LziTqRiB4!_HB|ID9|S^M;Cr^X6CpJL zRv!&EQ2Y{%Y9?WD((FeiakVlx-sIi?ZEdz6d$n1hU|;B zTwVSN`fdP;f*w73dh*&ZuvAwJSfaA*niqP zk)#m$Lx^h5ON@~ilFW*{|9iXDW*SBI>N~LS-9cEsODC z3<%QiU~0cpiKg`w&NMJSwn&|u!BTjmI@V zFHZCyj*P+Dbyu*EA14)67xcn`L2FBp!ep=+->bKj>iU4e8|S;b`sI5KrC;VUy*E>f zQ?k4f`o55+aDvpns5bdSpSqzYH^~RM&Cm)lb-D2ICs%NM|4$jPM()5%ryI9#zl;aX zQ5iUcmxdv58gLW9pBdV%PL$n;&XUluvA?aY?cF;I0AFpaL8JtDxkRFBW5b6JCvUl9 z8Je?mpyXjX(eK#tN+;@B_r^~-eeGm z8=(XrRAVgA`a8TKoOS!JLMW>a2*>>I-K$=mQ!-cuM3pOswKR*u(iwy{n$VPi)E6)5 z?$DMb1w|3g2f$!95d>r$38#)r0wHxX5);=64h$UEZL_CDe=2HWzo^R<^otdAfAy_L zp7IZnU%wV+%`2vlUijfD?$Jr&92+2^{p#rb>V99?9f=8o)}pe;5N~DXS$n*8AeZ|5 z2|w{3La7iWr1J|4A`=9ljY|WAnht!q8z6wm!Zb}9IaV>Su>5ivUpNlr&&4)oJuJZy zVSi5uy!5q(WmfU3y874QVeKw_aK4$Uj>TkXcjbu`S0ic^s$g{8Cz*|Fx5b=-NS^Mkwm6mJo+ovzp^K1ASYEp1oU`p2!Um3bLsN`~d1D z$psRmG&SxWQU}qoO~#GUEI=@pXEWZbzp-_{q@<)EN|=ui%0LE{ZdeG9Ogq5S4$Kp$ zq3O{J!9-O(eSNEvlA<7sZT@mDIiz{VHdh6yb(K$xYF_W9Jp9_OV9Kg^7HY#@%s`5< zY8YST^MKXo=V&PIvh=6V?>g&i-j$RO*8aJTz2gLvB~O**8XA)LcNzBC(Xk8(Qm$zg zY4SWNX(JA`;kZ7-n}bnW|H|M$mYi5qW-|8b)%i{|^kFL)bOrLifq}sYYkG~(VrjRD zE!bWF=V$L6XxE@)cw|C?7j*Movj^tU;Y3uyGfaY3XY$5UPmo`~`={|4c|`%*?xmxS zHFpa4#1!;+dAAqtv0Y45AL%m_p+?o02DI`)#LIeGeX)a{DKmdHS4m5a92!7pN@42X zlUGf^_iZM&?dksa0v}9{ST~oQlD7B0PJdko<=9_Y>MZ3sX@bR=2I|gXOha_%XQ!PC zCJ3!bt+nf-b_%T#!=7v;Pyxs1Lx&3RDC^7zWb*NoCsEJ9H63L0dk>v_eIH{x0$Req zBO4gxVvZ&-$hq3TdJ-GOapF4Dcuf6EPwc?T+>iH|9^T68$IL&p>jOObw+>H@R(sdi zXmijNCau0V?cxwGAKF;FFSnopms#4yCb^&W*C$8=(@I*aFddz^rSVGdUov9m=0TWB z&)ek95ms`~6`Xwe5JBW;Pf(L{EUo7a_4$vZ5;qf%)9frbl;?MiRM6(?RKdU6b88{z z5Y5|whe{c*7$kqyw7DKGymtR4znT~IuZ#s|Xkd|>kwDP}Z(L;xZ%V#IMYXF+;qpao zk8)5a0MDGFq>H{q1i{w!-|cC~9Vek|6EESK1G4$t+)1eIOsl3xnW z*I3l$?XX$DUw3fj<#4%`m+9MhmCY>zR%zK_cj5?B%&Ud2<>5RzomZHSW>QD#n>*9U zU7>*E6(O(~73mZwnbLPUVsC~rNM`D+Wefk=@{rgk8ch^)Yg@Oy{Drjh+T0ATk^>qN zZz__uQFYi+xV3IGhjqAH2OYbVYy@2r}rIs7{!((gfZO%Kv!obuD$zbCz% z2R6pvqNugl66cmQSKNz*zm>q`?Rtw%!+FwVz-ZMOUqWxj{;JtN7p%*Y>DBt`Nr`f5 z)4_z-=OyMLtcOD(8!oEc(9qYTr%@zwy|;1}_x_vhnkTk+(s@jT4RwNi)pr)y$e|9) zstjhLghbkD+>IO1P$2^{u;rWikhkIT|F5m<4@&Zk9S$Z4)y}TT}@B2JF-_Pg!{d_-9uSw*JxDw=Z>)G|Tz$XPLt%9WhUzjrn|g6zje!B#b0~9A~OgblzdU85hfo9J^(my$ubA zW%#k}<$TxuO21<5I5CRY^i9#crsA6Phg+@(q|H#35kt^2h%~ea7HUrp?R7GY}VGk|%i+~l7!{OLf zpzy(h!YvPk__hC#MG}!RHahlI-IsDMry~nC6JoP_qMa`WN2?ssa znpQytbt@$~IV?1^1Jk$itNtrMFqkLDW6rvlfw~0A#gdWXvK5ghi=_*s;_(ZicES4H zJ!fRNtKUYRYV#w-EZ$NXw{Tj_)qT{Xo@i%MQytr!u={D9$lKdnAP}5vf<6P#uz(Ss z#?}r>#dD;+v}55JEk>J-2k;c85CW&m!vL)}I-5_aEJVUEb}gy5CQl_^RG36ix;oRD_L|Z1{WtEb-sqMa=OS+r)X!c znPc^nTz+h~b8B_{(NYoHsZnhj-f?9!?|DmF?of-PWD$8nZ&A(nKE4mvaYFIAs+UwB9k-Pm*8*|DivCr-c#GTTyB&BV{8~f-52&YR`hYL z(g8@KwC=ZV_n1!RJPE;O%h^>skM0i{YR#B-rm_4Dr?pRgi{KT5r+Cs4oO6?!yQ=#7 zm?)w+9yjD%OHkao_PkjWF6~`5b_r+y+%NcdrmTX498wdP&Oqduz=p@b6=|As3j35o zRyFBnp6slU|<&8*23PV}zMwn2aQEIVi=w$%2~LRLPz64Y5gk)Oh-n1A(VbfqSF zOJ%6kKvtxb^{NZb+%TT#w(Tf>IiI-z?ga>n2f+&KawLy&7QdFMm$)v7cgiDJ_fA(i z2A?QqP}cMHzHw`N2JDgpfi-PY=#sS2qS9;Y7`*DSw~Lbgp18SbW!uot#-l>Hofn?) z*!9LH?OQq}9bR8#AQkF!#=P9)n}x;~)y#?F0cE0#Ww@P3!>7O!3(PQW;L~Kl0Pad@xOwPRAwG3 zBX`lg`u#nyGai#~?ugm^)hy&uRmO7b?$Fnux-qqX>e^ literal 0 HcmV?d00001 diff --git a/vendor/sigs.k8s.io/cluster-api/docs/proposals/images/machine-states-preboot/Figure4.plantuml b/vendor/sigs.k8s.io/cluster-api/docs/proposals/images/machine-states-preboot/Figure4.plantuml new file mode 100644 index 0000000000..3b8690d8cf --- /dev/null +++ b/vendor/sigs.k8s.io/cluster-api/docs/proposals/images/machine-states-preboot/Figure4.plantuml @@ -0,0 +1,45 @@ +@startuml +title Figure 4: User creates a machine with kubeadm bootstrapper + +' -- GROUPS START --- + +box #lightgreen +participant "API Server" +end box + +box #pink +participant "Kubeadm Bootstrap Controller" +end box + +' -- GROUPS END --- + +note right of "Kubeadm Bootstrap Controller":Watches KubeadmBootstrapConfig\nand Machine events with mapFunc + +"API Server"-->>"Kubeadm Bootstrap Controller": Machine Updated + +"Kubeadm Bootstrap Controller"-> "Kubeadm Bootstrap Controller":Enqueues KubeadmBootstrapConfig Reconcile + +"Kubeadm Bootstrap Controller"-> "Kubeadm Bootstrap Controller":KubeadmBootstrapConfig Controller Reconcile +activate "Kubeadm Bootstrap Controller" + +note over "Kubeadm Bootstrap Controller": - ✅ KubeadmBootstrapConfigStatus.OwnerReferences \ncontains a Machine + +"Kubeadm Bootstrap Controller"->"API Server": Get Machine +"Kubeadm Bootstrap Controller"<<--"API Server": Response + +"Kubeadm Bootstrap Controller"->"API Server": Get Cluster +"Kubeadm Bootstrap Controller"<<--"API Server": Response + +note over "Kubeadm Bootstrap Controller": - ✅ Machine.Status.Phase is "Pending" \n- ✅ Machine.Spec.Bootstrap.Data is \n- ✅ KubeadmBootstrapConfig instance is valid + +note over "Kubeadm Bootstrap Controller": Uses Cluster and Machine information\nto generate a cloud-init based script\nto be used by an infrastructure provider\nin a startup/userdata field. + +"Kubeadm Bootstrap Controller"-> "Kubeadm Bootstrap Controller":Generate bootstrap data and set\nKubeadmBootstrapConfig.Status.BootstrapData + +"Kubeadm Bootstrap Controller"-> "Kubeadm Bootstrap Controller":Set KubeadmBootstrapConfig.Status.Ready = true + +"Kubeadm Bootstrap Controller"->"API Server": Update KubeadmBootstrapConfig +"Kubeadm Bootstrap Controller"<<--"API Server": Response + +hide footbox +@enduml diff --git a/vendor/sigs.k8s.io/cluster-api/docs/proposals/images/machine-states-preboot/Figure4.png b/vendor/sigs.k8s.io/cluster-api/docs/proposals/images/machine-states-preboot/Figure4.png new file mode 100644 index 0000000000000000000000000000000000000000..f6a1a48046c397845e541abee2b329afc8a4d196 GIT binary patch literal 45500 zcmZ5|1z1$k*0zG8lo-SS3JB6QAR#S{baxF6Qqqlz3{oQ9Al)G;9YaV6(%mIp()I5_ zz2Ck6JdYloIdjh0Yp=c5yWaKg@t2ns$HpMSxN+kK_A3by#Tz$J-h)4TcTvDQO~vFs z;6Upjs_tNDW9w>ZY~pZ3+}PUK4(4ENL}uVhX6E2v%gxMeYYDSY>Kq=)Odsgs;NMcLPdvROiN#Sieun!)neDbZ#>Qb=$t`-!wmnfV9zC2;gZu&I16A#_} z=*XC=D){*MX9Pcb;iM)_8+usBN^?8;YxRI9)0vQ&q9G@;!p?(|4_a~ElDzTrBThNr zzL4T?suKQeQdeM5^1vx(gsgv-dGUal3l_^G{w*eco@tBx2LJixfqmd`;e)p2rXQ#& zTeCOV6}y91A1Sd4g_Z6Ypj-_E{g{K>`kN%4d#rW%Z(k)&Ht{M&G(4$Nmz= zd#r69GD}@dP0QnwL^OQ(VsvZa3Q&;>cxn3E7b+aPl&?@9NJ)`}ly`SCFbEZI(Zt!Y ztBGKWm4AIRVvso`LX~~?%zwR`xp#wt`d7;<5((+>S-y%*yACJE)@H0G+V0St%bNJE z(1K4>&!&CkqU(-%jr^`;=APS@>dqaU6nwiqZ?#3b+oIp-TADQ3#o^WYxTQMsE8nORQ<3U$SGr7w*S}ZKOF5=$I<|EPT=C+EF@M(Iwh`e5W}RPTSIH z1?4=im?L$S5fr9}OXGLs(NpDSJ^d`Bw2c!&{=-N4_b;j&H{RZOB_gQoGQN>`M;*8Q ztbJ9++>cGTl*~+|HNfE3&99=j*}k^mZZP6+V2ggBc~92%Fj(g%9;F2vOGOOCz@O!- z6fVP8aj!V_#>1-ylWrsX%9_b1A0K-(^qx&V=K-(hH9X0l&ZaiMFLBSWFxlYD=M;|gI z4Lj17i_wHWNeO@Vb}l;lU0CM^URZ3bMu8@~3?@}v{khH+UW)j@Q>;9EGzim={wUek z!i~1AYc+C7yyiAGSNpvJsEEd5CVV^y8vLogdg;EL`OtXz>(_(XFiT7AEZIbrJk=cU zh2G}%ks_PvTGtH8NSE`Y8E?{u4-L%C&5ezRQbhwdCn~!l_P3^Ll9G~M59cdP>4Zkh zhe9qdj%EbTcZ3ND2p&CpBre{b#OLmHH08{jAy5zSYrJdqY_lu}X3d8)xB2fMo|d_4(Vr4~cE z`1tsYS{1yGYZ2V8ot5e)CRF7yv9Ym2DZd7Dl&q}_2Q{c871IVnWZgU0M!y>3dR?A# zO<{7scp?85e@y0SVyxut&#tx4l%D16@B5K@6<1VbwvLsW=DAk&;Yn_a`Mt&HEhJ*6 zXR?=9Ez)J2LCtoZRFPs*8c*bL?q81Zud1qQXb@n`EmbYM$dGl3^~rFvk}MDL5Dl=G zAKWl{JGU1ySLc4D7}Mhu=$)rgY9g7HUteGE(eN`{fnx1@62E6B`O)TtsgcoOu1eni zO7F_bO2gIV*#;XPV@IiJFSpai3xk%seXIneh?_TW;`VfQ($Ua-bJ`rQc3hu!TW56a zq}61ne8TS$b9z@zZDgv(S-@py_V?;Qo!bGOY?pL0e_)xIK-I zK}{7!9z3g|;^Ja~%VQtzdlire58kZwB>w*G;Dmt}YZ}U^+Ti7B!LoMK(9qC4l#bkS z_*KT>uiwNRmXLys%jjlsNn@@@Z_Qa~Y5kiKemH7ZDP_u`={<55x7Iwcv9Y6>baF43 z+L(sAqM08hn1vS37$oV`IBlkic5e317>v%TgHB!bVbQEs>L(kWUtd# zF>iag0(ia7Uuf03?o%-i=c-7ReGaFV%ar=n5l&rn7qLMSPS>ja_@E1Et=o6*D8(_? z9}DH`YgS_EgqL;x_#yMxE@F~Acm;j2&rNC0cjkoW$e-}Pzf*-Gx~yRo_=MNxGvmW< zv%bMW<+c0;+N;xbW|(BKtbm=2+2({zgUsCVu}joGb+U(0AnvQVqWV)wPb?`s-Lg!r z3QLv?twmu>H9XgBKV=G)o7gO3Yinzrr(N5@>a#u{ zqr;F;QR1hM)uexAl9kG?SPh%U!;O-s>Lu68FE0MfsOU{6%6}3;l<3uh_x$;DdV2ab z*Hx#VVu>XkV3OwzakWu7M%YGE_u$ttF##6BBhxg^;C66c~>F7{}?;s z?5>j^6T|*eS2P6$1^OYcsh1q1o~&a)lil&m)n(T5@}9fLdO@Yx=K0A!CI-f@&IlXR zlYAqKf=k(@*Uw66!n4E}sX|~m$bqC+SWT2+pnZ4wwd7%u$-y)4E0qI5Hlaj zO7@a0G@7b*^mUe4)3!cWG!D5L;pV~E!<0et)|g&J;~-hBBU%-`%#u+>FtI$BCz5M! zh?ni(Fmm1^+TX%~`D{h1;xY7oP>@Q~kPpt@T$DU*bRBw}{5 z%AR+*JN7xQ5{TLv5l357hD9ZGD!J*hTtiht@87>49S~*bjGm!zFK&xFjn?yeu>A6( zT+!Nsqr{?MXeMVE64jxUhCgr*5trmeOG~@)gXW@8{j0%JXC$tEqMe0B?k^@~96x+W zX5)NoAT2$&pS_*kV8dj3lDCDY(^rDLJ)Dg4w&FOPj4{m|zkbQrDieCkAJkU#J1XVg zkVq0VfYnET3qE$Au^M;vlcM8CeD_Kg_e0C-uCs#?Jx@>1z?PkNsCTkCpWc5szUp+@ zpCL6^-sgBg2kkzUw=?TW;ye4*M{$acj-CPuFhzgw<5OBv^2<76%hq{gR4|X`9ru#* z3PuX{kM8ctsi_?7Rg}z{cOm?z66;aB`}=7!!v$L68CC zv7!7Lt7|bwA<*0pCLzEK#=Pv%!3sk#iS!yV{dEw z!tHlEB2VDs)C_ibhGZ8S41FZ`USF7I-uNXebuITDsd~>v zdc_1E6f9IkCn-a0eEdj}o_9wWWpgC~i2aG&j*2w6!^6Xjnq{Nk-lTDRquMQg4=pZ+ z0w@4{v>MCpaHGom>QW6j_1T#gX0=S_e4npm18ydy-U;{{aIw$V^R6;xlQ}bib=*6BifH;59A@D=L>~ zvWz0|!DpD+WPwsTeIYSJF(E$kE9rxmUo_ZI`Ar(%)~_o5TkkdN5?b zQBsZGaZ8e-v7DzyoiJCesK@QbA@7%TD1(yb;j6HdnbZ=`i-Qr1!ards(T8$|e-9as zZ^ECxNqq6+hs0r}~L`yqpS?9+op%4>83Y)6^j z-Q6t~OhiabtfirG`_?TZW8)MV0Fd6o^dU%~^cHsa{|+edr^x^PJwV{#bF#aC4u8%j zBg8WO#qQgo-pa*jn-BY@`SjO;{tHTh+f){K*odp6%?A)^acl@2d36Z930%A%ACcpV zj#(+2TO8%~<{q@Wy0l$yt06WF%_maBz4n4fA42B`p~6*Jtdzv8l(_}ha2A_-Jk8qN zSuzzDjfN$>gpV){e`K@Ol`Ze&WK9mu517Mnp=xwkvbYUT*$aYk_azHlRSAs+*AX@r z)4}OyV{Z&Z=HY>7n6q<%wYMA*pZyp`pOJ|<9T+6cGVQKr!-R-s;n2sct&dO`ln@m0 zR`4>K>I@Ib=uJ&rGD5L#BDlR_a>=94%5B+wTH+xS&p$~#KGJ#&4U#2QYe%;S89tL(F-@zPCi%VR~3ki8y7@u;{ zvqVZr^RsZZ0H}$ze(rm*`fayQDoyY63&X)tn>N`qW1@fe7^283kd}Pbx`!`b*MjrLsf{0?7Sb*5s6~Z^G(cUZ`Dcred%M|A1@v(SFz%AXq3CZ~!O- zpxj0%RQnK`pOtka88VyKL#nEHC;M0&0|KXGgNKKS#mB`xqYhE1Zj3RD%*vXGpuHgB zJyuu_bN~HIUwg8c+c8&5J!WiN({c6Tx7Q;E`s!-XdHPb>F*d28>1yZ#Xdt z&C8>>SS#aXn2h@7c=~(!@`B={di5t*b-iX;HYwrQ$zn(M`}e{pCufy5$usqJ0^oMl z0dvXTvPVZYjrtxN>hQrx*ONW9PoJDvqZ6vCXb*evm zRJ8tEU|<@6jfsiu&fhv{^5xYZ^$krvhA=(AD$*jjT8?M$Nf=`o5Xa=15e=jo$zQdw zv=s3(IoVrXd;Gi9w6xj@JJaX2l@1qg?LmQ-`w3N7luTAj1o6@+tsvmww?kU#Rzal)v-W7_JCPNhl*32-A)<$j3iBSln<7O!?m<0q2oDNz3FSBCq+8ME9>hr;w?>r zeKpR06O~6fO8O_fDq1x$YV=Snl^0kzq|@PUx)|N@oYsrqbu|rRs=uS&QT@0zf3_Vx zlDG1z;b_}bsTYrE++(>rg*R`wAYQxnn@_V%!v((MBab}x*CtL|kH1_lH`?y9BqZ>{ z&s%w%>myfUi}xL!ou?~pR+GI+Mr)k?bjE1K}&0x6YsLONL?RrT95D>!SI)@Jm<~R^U~;TCgylnX1+i@X|d48 zwPteD~_K16FYvGv{qDxhZbUM;eiKkG4L3>1uXJrDJ>; zxio|Qyzdi}&S^k_RbMLq5_39%JJquW$sAe+MtX(10EZUi9!1yza#*0^l014P6wZ9! zmu7yY`-IbJVoW1XN#Bv^!<_){aukGoYco_wA)M-9DbT?=A(CA!hu}F2OQ8b7xcP%b zbzP)4j>FkueI&i+1u{xXT(}<8K`H2P&3$1$)tU ztL9X3LA9l^NV&_}Iyyd0oo0u?=Hl-*`v}2w76p5|CS*UQ)zvZbG`a9vS%%t+J?4`6FJ2{b)=1t~%@TC^a(wNt3|P*<+ZI`NMr2tHnFa|zHR-bKeG(ph z)nO!hkMqO`H-^jp*KE_aSv7dpZomG;eB}a*29qDsv0|Bp+E1{$9+x*Zw%sbGSF5M$ zJ)1-L@N%wA8bcYjt#j?IwQDK@&wk{Z%~3{cWW-UIk8X@nR6Am(6oiF|hMR3||eXS_;*q%OW7B|90v<2ZlLNsfccweDm638l2 z)352(*7DN4Y?zEy^|sgl76YN;u{SBr%S*F`d`e6_2qvyMnU@pD=-`d89?FGbdb5!h%^-5f8Eu})UY|5)YeKBMP-@iOoUcUeSdt}3H59pqvU z63lT29_SA%*&g8@9Bg)6Al;yc@2b}e%7B{`iCJJ=| z6L|;}H))pM-+n~QSx6Blo#Uc76m2e@5dyzB_c%SoP9y3o7rVzsxj@R$fvby#*t@Oz z-}7>?KB9Qx7i?3n(ZWwQjEkso_Qoffj^ow0w^x#rtMq?#w&UDkG+0$;J@w)IhNjd_ zNrp%j%ME!$LyA*8vD^mWLmpVA&ifA3Ep6Wvy-zRCo{X7tIaufG5L0h>bR6U;Abclj z$&Xf&<&;*kpYX*E7pHJ8i9I>3bzS&Sqg%}uC;#lUDGVR7Y><2dy`A&(ZdjgI@w0Ne z=g&iP&CSdj5096ka~zJ$LGsK9kP#8Ne`PI68>_gRp36KFBjmm_+ipI9=H~7$PZbpv zIbOCqQDLPU>Kolu?D1}>L+RTqmLgSsNUr)eeAhIp5t^TW!Sd`64gjkAKd6QRe+OXO|9$uuIbdNS0FI-Jhw$@ohRXiNFjh9!SI0x( z?MS~}{fYE{fY9)U-@+)d5c>vfhs1Sc$A{pCU zGO@@EI;GZ}(*dtoRTv9H*+wgUC6#Af)aP1OT3>E{NPfP$B1U4AgIzyuT%{LCZjqc= za>v#vt{<)u+2a-r17=79M*yAdQ1Tl$G^n(br7Mvuv4(%n;8N)RO`pgIX^+BWrMS6P zaqFsA=>agcfg}Oc_{P1hj_gfacD9NFUQ2d~vT^PU6@jhF7y2;_05{HMyv{DAs5BH7 z!oX4$3zC)KW?WrNSNeSzr32R7G*r5tEahWD^VgXmO4i6YnXQ2WHs>rOWsO``m4sY@ z^?nBZn4oj4wdL<~-*s!l-1uUKD_9MnL@Bp||8uG7Lz{Q7>{?h1naCDdpV>I`z+I^f zll>s#sO7DQAUY|P%mAf>M@7^q2&2KK&^$pt9e@}Imzv5rxGVeaQkppk^_#z>pZ0jMo71dYb?F0~&AzPzJD<^|l|~hQFJ$Gp7~^5Tv+) zxUW3SqCJfD2iyO37*xPuLI95mgZ}&aHS&kF-DlK?z@KUYzl)^6TJWLNhmbNLYXWfY zI~ee;e{Kj|^1nS+#O-pCmMDeS5F%U#SQl22Kuuz_0fn0CiyJRAtyzL_qTbF0EL8Kk zvXY<;aHT0Z`-GB8Ah9me&wc7hu#&H8asKCxfo&TpiV@SU;PMfmLlx8$_Cd98~te`*S|vc=A;SAuZ{wz z|9(dbnD4PZuT@pRCcMydxRikft!IqP~3ylw>Fq#;^szxU+-%F;oZB-gLTB%d>nh?2W6($5PsWP z&-t;np+tvf;^kFL%yyk>HiyCN%teAV40vw>;Y%5rN*)3xEUJ%}JBYIf4|FrmaZ^5C zjk20m2=HM(YeQNEn(t1^U^NMu{%VMge>op$Jt$>8JsHVZVXg$&gu56C{8P!*I(r}T z3uF+@7A&&s=q>#MZfs6_i}=>osvI1dlfoJ0nD`&6YqVlx3E5w(9z^S%+7h+4)GlpV zTfKgLIOoUjye;tf&d!y2R z1#P;T-QakqjTgIWaHSWAyhkQ}xI{ECnrSwHOHfd7tKov<39lp4%n`844C3< zJ*swk){-O|IH%=Z>QPeu^U$Dp%4hPn#dlQvf>8SU7?&`~RK{|YE;RUEU4>Clr#_rG z|C0BykiiQIwlX$1@6S>Ck&qA_SfE*z>xNJ(#3JSy4k=1?C+LhVG3Jtsoo6O3Qda82 zSF#H3N#yImj4n!3)_U+HM=bR`oHqT}8v^_wKWZ6^zCs;GIugREb92AB^4BBpK`jg{ zvc5Ruj72<5LrWWj!l+g4aph?cVf zVg6b&c3xBV#s}>V*doW*;=DW+Nl6LoE&bT6I59Ca5rdPgL?LMsf$3;p?~8JqgEfx5 z+n;k8tiJE3SYYfoc(~FOeNpy2xTN1Z&xTs6lv`YN#74{0>E>jnGG>kwt@H+R=Jemh zv!b`y0gI8bx=Q+K#<*bl=LmFf%a>K)`SIvg(=#$QN53xY%pqJ|T>*oL>Ttcvhq1A) zek&RIpnbw23ChtLNy4wI+x%V**zq^N&7Miw?#%iXXw^L8f(T(@ABdj0J%3Iy)T0^j zX^<943IGwWA zh<=()o!n1~RzszxiLuX7*$&`C?Y90dh}R>9lzuUMaa4ZCfL4FYgp9-QV2LNzFIZSU zPUfiO=>~D@R^%Hlby9&PvR^(}Wojb!xj~3YLF5C$Hs$6fqf3ztKJnT76f{G@jgw*g ziPB|L6G<7D>so*>)Cal;&E7@)};c zMJ5cs3fKJJPS0Lr7D$_+Ko_a}Bx8K7JN9N7dg3$XhvRfSXQ=#&nz^5eU@@$7+whzl zew}UmAc#h4P1_5SfoSz^SgS2D{_1GM5Z)-yvJW-7C5W&C{AegQ$CqDUv zySjWh&;D$ia-|!++0&~%KetcLRiW>V!~3qyfVjqzncSi9Wz;KWpVLF^>tua&@0og zHZqP)_n5w`K|fzy-xJ^n+gOCH zFZq3~F z3nyl`>+D?XG-Q2FK?54VrVuzM)*l3i)jWdh8iR!ViaCJej^zB>pzRWA* z?4DXLQP~9vPR?6W81e{B&A7#tkkp;?lN1sD*oLgA@p45XM-#(A2NV$*76wh(K8kzS z@CvFecvUZK{-0ZKdJr&LNKNtG204#WcTzTY0e?$5ZS0}1POaDgErtvmHrL0+0Sb1C_DYly2eY97~kHRO!FJ}PQ`4xR3$Rd~D4i=4^i&|Zl=YHz&_Atcbm6cvTc ztU~|YAnh!YbGkS^QSk*AS4(T_Ubq~}(msPA)@!8g>WxeiaHn#9J-G7+s}{b5M8zJ; zmd!-O?a5JKNo^u$DUcy|&=M|5k&)5waj3zAit1?>7%0u4a>J2|5_eoqK$M8EF2Q(j zZ777+BFXJ*ANx~~VD*8FFt-O4!9=46t%K`|12RlG@!Y!u z{vkrezk{BDK@$g0oRHAWhn4)gW&sK0>;Ke|pnp5Q92~5R9!Z#ykzqAe%?>(B|B1cd zHpag$nJ?ZqrObTj-$4yGw|dS~-+)0!Opa!1Y#d7Ou`&8JPpt^$PxOk`>C%XHbXg@L zXE|vheyFIgspXK!LHwoU>7I{&lOGOgPe(_U=NZSBKxB~w)n101y{Q3}C-U^MK(&WK zFioxe)K!vfOBx=Y_;Y;&XjSty$k=Uq2nHTqg%kA+^ejMTtGV0ex?cG(1g1H`y8!2s zACuSBCAk>`zG5pVc=HP;(nz*rteZ36eUjjbODbfew!XxKFp@&S=Pwzs! z&?Tsuvle+ec;U^T${!E?el0aXj4zs)sm{n+=alfr4*(s($g;7vJ}yuo5VBg6$=rJ5 zI!Ys-=^GpLye}Xmmp!k|keA3^q%dglWuk|X-G%G{qa|Z^e)n3P4l}6B@db{CCo2pm z#;Zr;wBnupH=FIp*+*_3KyA z7_O+O2n`KA+MXG~Mc~E&p4i97M<@lt?O+CjJq^ri{QOjyroXR`<>}LRSpv>m+9QQJ zwA16`+`POAKZr7^KYKK#mt5y*q@l6lW)B3yt|TgoU;^{&56WIxu0uimxMMoRnRjoB z)uE@HtCXq7E$>RpYy0f$@uzT}VM`7B&OX}Rs@B&vrTBD{9L=L-REY5l73(mZoqVf)yx{TieN;#Vi6_i6hy~G`DBp(WcKx)eOzZ+W=b!INsM`zJ>2;7Z7Xp+QEwH$_~lgb=lbRw z4aHn_AKRPnG^3%RDgkZz;Z~9gUXFz*hlu{H<*B@Epr7kW=NJvg4<| z+Up<}^Xopu3%h3*(ancUGJ@6b=7tz~6kJDNGM;*F!Z1E3Ce%j|1$cEiAGdCcvus3T z(+YiyncF@;m3OK0Ds5(NXh(8c?;UO+jDO5xBLbGN-9K~kuFNghnLYYPdU-8@XH0A$ zuICd##YXwZZDnj;;#*IIaDeXpHSaa2cwr6MVg?Vv9RiE>MrJJ7c_98Z0p?MB3ky-^ zdkdtxzhM_yP5xnMuk+&{{`NZO{SDaK_1H+R>TdtdKj{wbI(VZ2D)v9o9i+>@Y_Gci zL||2=9uReHL`)lN13m%>2ba^7fO+jiHTve zo^)Ag3m#uGG&WX|m;a(u5WBIl0km~7;o+z@e@L+>2wSdwYj5pulYOdRttIniT@bt% zK!I1NTgUD8+iVZh@iwA`x7txcTDsO@^`YSJ*(M+0&5XtlyV#&*6_Nv*#5U9aKrXL- z%shdpjIFZ+$8(n5yajuM)-J*X3}<5;^!3c*67oSxETSGxwN5`~{t0)Qifu9+YEvz# zyzhsJTizFmPOHwnI=|LeMSWAq8K3ZI*vEz&!{b~S7>Ic?U*@tk$u&#zsefVt>HTi0l+O0D?op>#hMBe5l!0y{oY{ zsGub!=KH%#$Mw=VI5!C_6|fD$dWVNy~UUt`U{Uy={t~eg5I8mUrY?d+uM8S z{q9Z#trEDH_koQs9NuYo4Dvukz4<`&l!>CEqOGm1wY9aEmlufWX{o8|yi=UF(9rtd zq#TD*h+C*>_DL(VFsPvEUo+64MRqy`yhDNoTuZacBg%C&XCFGSTU1a3=Rhr*cle=ovWy9p|;#X81vhoTUJ-Y zt|WE&QgHm`u>p~flL7v!>?RK6m;mB2}JB?}L`c-bW04po&R-3uV)Wsv3?$yN~N z*Y#H{EG*2)$%$sxi*nVK3?yLI#jSg`Jyj#F%PQA)B+_WM`puVq%>2#Y&I zLW)%9UvDl^`NEj0_sNV(Tx@K%!NM-t0V(5qn(S~o?`@I|kUhm5UW)d#D4~MdqD9r0-X5Yj8w-nllRZBmo zZZ;yy@VxmMEVYInXX7_q9LC#zV}Gi2@*)>k`g$mu1jwNv)64(~V(~mt_n_zzZOe16 z9}LpgY#~Q>j=f!@xp57dtFe4D&c|IE9kLiaIVd*%QsjzW52Doc&h!dlnY4Ovhbi-QCi zAEvpUgB4!-AaWfSA7!>SL5bJbS615bJDx8yGjzO4WHk=(0C1Aek1_|yoac-yzgk)5%? zj4IbLln(Ve-9pQ!u>*xZySKE8>#1hXQEo63=TGi-yXctBi3sTPgPad*Q_}eCa zAO=TUvfO0$RLiB3NuG#%(PbPR{2q_5mAI@2eK&KJyisZ_BBoLbe#N*o)^Adr$V5`n zE17};hn`Fj#y(f>Afu-ff@-o0}bkc2Nk`v?O_eglvkKZw~ zP=Q)ykBx=JygwcNKSUf6RR6|igcF}(8y1~jk0cQA7mGFX@?mWw3Oj+aj{^5w8cY$ib z_?6%i@%8#He0yD4+G@1zd=I54(rfVAoo}6;omEH^d-vs<0k~#vwB00My?O&)wzj{( zo&=veI4|Y@7VHoz?hk*O*4HI$0{9C|vT);dYY(=B_2Iw2`tRS`RD#Egd8h=`%pT`UUf@~&iX0y zUL&Yzn$VfKEU|DGYY%Q?+>5ppS^)i8XwV;Sfk?P4X`4$VY4^$Q9hfEL#h z4f*nup1881S|=})c6rDR=(#_Byz46jGcf4?_GSSj8jI+V$Ve4I!KQUaAE#N`2iCg- zjXgbu04;EFaqS>01_)$W1?%dYE!VE)RD^os4&3$B)E!6X$0SR&`*svRw!>|N2qxG? z)6&u+70vWO1c!uo6tu1sw|?un6G~zr7|7w=5_SylX!$mGT9_U4fT+oJ&+zQto~>O)c^QzvuY$<>_1OM3hocpqh<^dS z$~|xT9iSm#NaA?>a5#A0{{r0yj=8V*!oq`nC6}tL&Bjgn%ImTp?@^{}^*4|&K_KEP zbxSkb-LjrBwYKPN2F-p*Ujb>#$)25asd9-XCBk zd%WR}?M>tn$D*k0zveQ|LBUol4mJ(}`UACq=r8-RJ(M>B6!W4*nB?;xA5bJDBzz3T zolRmgE;s8VcXoCLTNDHY8o>UdyPkr>2lh99s`{W|mz0-hNN3+j)prp!)@K2@Y$=!` zJYI43Ka7j=-uLg{&yKbOO=wzc0T3OSoYdyeqn1m$UjT&HmX?;k@I?YoC5=PS^><$hi=lB^hQ-%YdQT)xTaAm?osk3sipUc=U#IX^hoCEZg2Sy7xzse)nU7tUEnJN?k$SfS){(Ace2srAZ(%~8=@cB1t$ z-8#3(#O{N&p$QO^!9E>;_joKvi@h#RcS*ROJzG3#5Pi2gkX7okw#BUdjm%9){z{g% z=)3@_x)&GU&u?yS-rL*D<_z)o-#t1~Te^=U7t5OAd3LCFrD|rDO#|0cARILSyM0_1 zzAt9tN=Qm#+`HHF;|Jgy048%*R*o=(r$XxOSMCi}n-65}Y4jzE>+8>+?61&z0Xk%S zNR92$!JowiI`%;@3SnXLujhv9PpRYzCU^Q*bgSycR0!}7o%UbUP=)*27;QdNQ*_ij zs_Up0(h!Ta=KO|6EY8*~Lvnv_}$~_6Y?fXi>M+2r-98DuW&R$WE*( z?IW4Gx%r59jDSkfzL<1*`y$H&!{0bt6HW6BkUZ#jf~AZp8G?on@P3F~to;F%Sb;8e zTJvEaBlKD*_gqqOU6UE4FS9>;XQ|cwrtPR&*=OtqRF-kJbMf{k`y zmt0(OGV?|#lE8U65W?N@y6G{4kC{T zP&09b`4_l=jZ29P*GQ*=z8lGntFeY0`yF;0)u3z*^7MwbW6*| zoz!gfT#Aeicaq5rVp6|z%k30U-PKOT$ifgq=juiS-@-;LxKJ5|Z?8#5K z54UrMG)*3tkGR=87?W=#9%#2z>grWtK;l_I5|G%*>Scz$Y=7z^*M}O{4{UDE@pWnpjD3aPS z@BiRk3U(N!g$P`Z?%F846*vbVyKLy~O;oe2vUS4eCh|=pMNMIJ!itIyjmZy!s=rTu z5Dv^a0EixJsqysisIVMMrG-0hO^y}Agk$qb1iX07`=5bbs}_%|Z}%1c*P4YDE1nV* zli&HtOw+g5N-SaoPLc;|8}JSiuy0LTO$J4~{=$H!e4NaS<#Pw5*; za&oEu?Tz<7zeYt<<%($|@jny>%Mj$Z^OnQ;IH8z%t_S?oPcT8tPxN6bd~rI;)xf8FJCP?}pm&4^1MUC8q195)X24V0*~5 zt`0-!wx+v);Yv|mrisQKE62M(%3K2b-h`iM9jV#~uV0%86%H%HM=*@-^Z6BatDjnh z<8W$}k(2mb?`r|ZP&e0qVcqh}INY#auQ(`UrDqpSxIr3d-~e(2JtOxFND>FR#q_9u zO&9@s<$TXBLBRb#pCac4mJtB6{{MjGAE^h|wGj2Xn~m)Ifob?p^Y5Q80B{{?`d0>k zRIMU;Ze%C%pQVuZ|IOe;mZ{Lw;c8Q2RtQE5Yfi+cm;=00Kq*0xx&8A`odzqka@>0hxJ6q2vZa{j*M zweDvZqy`ZwAzSB#AAl{xG!u3QeP0!iw#xVS;I-8{(JlpY zNgD(tx>6}nux-F{@H;I9g>;Qae!lef3~8Flyuc-d%Q}Iy${A?nDk@BX7!NFD;WUgW zeV+3@sbr_kT(v?=A+ej>N0KX4VI4g^wZeWxraf3RP0NgD5Hx>)ypPLafXCEdbDNwh zFyTcinsK#?9NpqI#;z76v`o1Do5uL_Vm| zmT8%pmDhW&4@{HtLhOMi#=&$rH*)=T)6Q5a>c%GHayNRloiRkiz~DZkqEQZCSJfNC zV<6HwKcA*)$1zVo#l9A1>M_F6rD~jyo;CQjx3u4pw*iH-?YxLU&Zp(|qMGoc^C7jV z3~sV|iFb1R=nl5UWOCL!v+%)`{foAiTG4cB&UFNi=hk$v!ETn^5p|oq!Ryxp#l@N< zML4FrWpiHy>v82?#zFBfK+q-~JAXCpZln0c%`twV{jB7>aTmAUNR=42R*utWu8W9I5yg!s zCuzrA6xCIVo1t$600j#u=HY8 z$FREsLMtGA`Eu#7_??Pj`u_xxH=ut6k^AqzdhG4NZ=qGA?{EK1X{)kF%~|*VAGKoc zG_j-lqHOS!^>#IdEHNZ%H(i@qS^z(4u%YdGaSA0TtjY}yJxJF~vp1F7YhGFhT22vh zK<&k@rrg^Lb3n&c_F645Kzuk;hMhz%bh~z;&il$;S=qHx zE>mnR;aRE`4V%SH?v`eVrV+EuujQaE1kFpvm*UC#ujWqng;)8-GM}W&3S>~UO1bPW z-?hk~l1T`hOo*2Mq4WLOBSx;N&$n-VQ#hc#*qIAndQMvcW8aGeZN9j~MC^x1iIGY( zgoAC6nzhgP{CM}cFD<_BNp()n+;m-?5lIapA>n=hr8DEFR8^&W>qVX&&M+7=bB$8g zY3a^(15R3vzgipY>3(!+^#fscOR$+Kl6y1 zr>4f)t*9@r<|zgba=j9nkY8m`vA8M3&yIPU!)EJrtEPNR>9M;z=JI~ul(4F*TCCG& zZlt5_Bs@)jfD4Rblef%d6y_h5Q0uWpND%uf6dNcPLBAanSOU7qlKJlQA?M& zta)qjTG4c8eP2n)ZJiArDfHxhV_-iXXx3BvL2*~I=7q;G#h&KIMmLagPgQI_$uan; zb5AcK7mU5*{}WWwd)NzB-X`^suLLnJ^`vW1A~B6(94=&UTl{$ses$EK;IO9n#aibK zh+ElhF6R#zTjguJNiweepkJvQgsTr}UyM0cCUUDV9ZM9s)pL)G6k~D6iiuz@pvMY+ z@q-8aAce=Vt*$QUoFBbp06C56rp-5)F!kK$Jm&genLT8f)KMo=cZ_8YuI@qFj^b+4(k~b@HFI_f;mLAl;PaSX*J_>`&di- z{}_AgxG1-_ZP-Ry0bywAZs}4GkVaa1D3LA+DFX&+kZz^BOHvrRyA%-V4(WW?47&II z-0$~2zxSWL{S7nMx~^-jb;fZX=dm_NaPD^@q!>r@#9Ii(2qmbTZ^ETo0{>Z5l8EuRWt^1{*WKb=nzl&4U_h0WFb_ zZ~Q~n@1Kn^fInZfDgBtY4YBY~go4PNiL60Wd zdN2)t7nIBKn;I>qYQ|_CpR;6Trt;v){~9$ho7PvheRfFPRa zvjt1%3^UivCzlBq?uowONB^|=38t5hMAW}&IKKz=;e=0^O4R{ z)7zXHf!|tNjafDC2VD2YihC^wt3?^5gSLOe7(KSbO-E_RBbmSqno^y z1cl|Q|C8{^@m6&HY^`uW+?U7R(Vb&F=0?-Y+LRrK#ak2OF}n$41sig=GyHkYepXp% zC?eqKPl!zu_8glSUp)W;JX`)4kM-hZM!qv`P@8*OzOlDr^Sn{>jCW@5bE2})NG}`? zIyqtUTmI@wIg&CRX$m+4JgUMIlNI@T*`GKFr`$@h37yZyNJ^Gb{raW(O_ih8^Zd=M!wTpOL3{<-huKF)i?W#f#fzhE|@z_Az zDew-e`!r59_T5VeA^?S&+=iR@ORL6AmgnBR4E@+NJ?%940H@05PwxyjlU%B#!g}F{ zSQAr8B@84X4n**a?92k9T6P&7Zj^%18Qh7EN&f^ddB&GCwi%huvs`_l1FWDY#Gg6I zOy${w^7iUyJsc>1xB-_IDZf)6yo{@*GnccWi&PBzT`0Yf=M5Dz=!vVC@feH8ci!5R zhWgLCMRz_eWAM)_$xvBrL{JPstUcO5`)Ayg);%ZXW3xw>aqKTg+S-9x-WblN{fFln z3P_OgDea6F0w!b#8W{p_Kk1w2$@zEe%>8C3W&YenNmlOTQHxQ>6#WCG#4U+Le%z5` zF7xAC$`0LE!`_5n#x0+j#VLemF77#*_tre*x{OopP5BBcIDl~p!Z`XCSrf`hJS`(l z&tQJ=UJIiltpgH~--gj?B zu=?`bi|2Fw>goMI1*li0vqZn>`J6<7r00Vscy1lnsZk-he-=@g}FS2axhh31jkLC9UXH_vv<4gSl@PQ8zB^v^YYK7(CNY)sTNRjA{ zBB+mRC4YyP+FKzIwr$U0C^pMgHnSzg*{2>(Rw@+pkOh=TaQmQAB{2}t>FIV$U76Nv zw}0{#Rmx&eVil9?{mC3WA`+0+*`Feamj9V5N`Cm0cK(m7@(~{>H6bSV0~%sBp9s8% z>YgEr*+?wlD&fRPZA4-L652Y7*pNF95GgAdqQqyG)8{2}!YQ&+jH>v=%S;mz2TE*Y zed3$?$NQUFQ=Du!uCJlA+lb*syjYlDh+wTMN*49ft8ud0_=ai&QN4ypWdQ-mLN_Kx zg#%4PV(m*l9a#e3(qux_Z@crMQ#TY>0u*>beSy)w`3?+l!pwhEfh_XRb4BJ)xb=uh z`aH{*zo7aGPs5t05092(!1Yu#_`HDAvMt3VvOAdDP2AbnX9LJQBw0YNY7Lksu`fWh z3kw^2>35j0>#i|yzku1m!^7j}=SKn$z-O^{b_N{gRCfgB#QSCTEx-y42nyQY-YzOH z2S{(N*QE%ULS&ZC>Hw**FNC_@>3b_@4imRFaj)C+ zXcomBfq}ced4q$;&$l*`TL}<0K%@l=!g;(1dVaERw8I6GObiTA!*}=eyZ|*ueiJwn zOaGX9MVick;LC?bMBtH-B*e$de%;@k5@cjVL)cw}J&WeYAchcjf8~+7`}|A%+VTqD z$~HBAurNWctyi@=ARyDx(b23zV9n7FL?Y@v&jrwd z8rU~b$9MowK?Q)KL*)97rlvh$Mj#9g0>aQ)%zM?B zK|v*+#=fpJL~#;rHj)n-1s6Evf_`_@xtofe^Z{M7-u&_iObc20; zVIeX)T071_37(EG7pG-!`CmkR_4($(lMylzp1_M|6uu zfpRS60tfSLRXV!NygX+6Fi_O!2cRk$Il%KEkY`~sTd;DttqkbUde#o=9_Z-kA>^Wn zY5RNgXOouGk|fKom>F zKW4XCSwK!+ZDMW3%B`$L1_xG!PGe+ze7vWJg_}FFP1N(uWiU+|@Js>G3DlifkUU1o zST{E}pm=g}bYwYEa{KReG~CfHqI+}fTQ|6_&GLhT@R9)&SXiVzmj%q|uMM(`SlXwi z^pRXsRTC*z;q2uIVgPdWdM=PN*ExDIl|OU9c*biMWj$FNLHM_wmXQSZ^)%jkBiVXE zk%R5*=UAfwf_MtBIn`PA1nU8z5AOOOOfkN($7~XT3yOzsLR~J{?7BqPBdrH0;YZ{+ z@jQyTgXR}sATIE2*evGNh5?o3-_|s27w*H8)mB`A=XzD`w4Q}N0D`-Mut2lN$i?g6 z{rHTDdbLgf{(!{|4ATQ5_@!O&TBU3$n_4n*)ga5!;IrsvZ2WTq09k<-GVl|Daxnp% z@9IZ);aZB0cfmb>V<#d>6a;^pZ$NaDf-55FhF9 zhp4>%2dnYwzQv3*x5(9$RdaB)UhalxIxonwZd%w`l76`LLCm3q(U_nxp*TYQuiCNNSl^2tqUt0fZ(R~Qmh zt0=UDS`3uvccl?aGmGrJG5q%V8WReWteY%aKc!;Be-$;0B)`|WRTdVqNet$s%SM;9 zzwAqDLGtCsCnqH(C8ry_$p0GlPGZ;`TeEL4>T8STR(!ZAYy>Tfdw+cR=*Lm(77rE) zPW!KXl6*=H2^^(N(&WXl?;$V6{EQS@LZ5%0I&xx8`eC7!;At-qyQH0D)mFZ+^2dZ_ z|KWWPBqSz+Vk^*E1J#ycz+A(R{cF+Ulapsa$uTR7-r$dGU=)QcQZ`Fo;`IC?6(*%4 zO=@l`UNY=BG=awRT2h)y$cb|3GK8awDj3Y7PT=OKg4%@>nG;a;!=7^$N(aEYfJGJx zHd#!3d?{$Mv@kbc1wwE>EGruu9tZ0$@ayg7?(lbhW6!#VT-61&vw-8T;u)trRrn@( z#6z2x%uCLlpUu(J+aczz@DzyYn~Sq!px?GKKi`aBS6d6}s*Z8e6$afiKB|(bzFqE&F+I8N|7aG zaHMu+0B9^gSt3-sHrxJljsxQR_tM-TtQH0QfbG`A?-}zI1@VJt8XR)1?uH0zwR%$c zkp7u2&3V+;cEC)C;W*oC^Sc)JFRY9QO}ieWT~1msA_uR9BCMP281cbF8<$>qkJ~Kf z-}1#aQ>AwH%GuQR#xbD_XrmJk1?}=`*a+tIti~*0SHqvZ0u$MJ8`c#E(EivhSmY0V zgjcz$j_j}@1PHFI6(gdC`j-`ijK`1~1y>Oi0 zr2FN_G{h_L9~IYD6ymc3{UUZtb0?XHhgiF#MDXwsxr(yN_53BLmBDnNSO{oRK%o?9 zpCRK;PzkXRewzFL8{J_nDo>?7uaqREbB^$$&wr|3U*&dat*@^SEQ_=AA;QF|=`i0Kka8i+-JbSAIw!0$-jaVY4yFJgsX{WUl#I2=pSaCY3G7=*iF5YMpLef~-4_{a!1 zH~0STLKi1?I@kmML|qNBbTuwJlw>l0FV{)zbZ-a7ZxYlK=o8|zkGZFx&Fj9mJa8o- zcx84z((>@~65w|LU0gahpk5261nGN6X)t&7tfB9b#)Z8wBKT7RI`@{i`OD4g_Hx8E~$3-^*XtPt8xv zJASqT+{4AnIX?_O2e-IAoT4Vp`>B;2vPHbv^@~X(TP{o)l!M_HkNY+N-5y9-_+pVN zlQL{RI$U0y|M>9(u#JBo0Rx%Vb@xnATG}P4IhV&Cx zj7LO+Ci4AuRrl0B9dT3FepTZOuc&lI1;^^LP>Gu=XhP?Xj;R zXaXSyJ-x^+{^+9KXgYtsFB4PFE%i0bvy_0k!eU z|9pH8mVO}-z~5$JQ7#SQF$AcM81pll_bjCqhyi7Pp#)qYsfmD9XA9a}x+4GAY9?bF zWb{pnIJgEPgj@artk1kC@&zCa6M$Q;R`B0K_5T%n)m~B8{(KoACP*uC9e!izR0^>S zyhmf*jX9B)3At!wvEj`3oJ9&Y#w*C=g%CmApLv*s(8hbXRuso_pwnH3{eUW?fLs=v9vn?kezoj}+U*eN5(dEe!ZcSZ$)V$_*{lg95 z-kCR87#;@*oddmVAXEBp%N~&xFcx6j;YsHLk{J9h#CL!#r+{gIgL)^rVw0B7qJ6x#u2ca6MacT$U*xenc@l{&h`?mUiI8DudlAg z#l^ij-Zg>B0<9=SvzCv|kE$oZV6v`UUL-k2gUc-sI@eA`m7G(xtpNr z$H^@r6TGdNsfsibG@#Y(CNIW(Z1-0YUmpeo%ah#>p4(677|y+jHj`TrMEPjTe~9w< z(xLcK&ELLV`zNe<0E-2-qo=dc{9s`M&natV`*I@@Rp-IN#buao_`+LD*jscNd`4P- z!_RNCTqTqox=2-T;T_Uw{7}A6UEON+TqU3d^U#7Z!bJ1r&}k}M^%0l+P_C}dwrRs< zbZb%u|8EQjEZd*omVTeqy9oc@?k?@&|1B~T`*N{M$jFMyGNgx0*5?IHX+zw3H?c@g zCvj9u(Pto`W(7O}=w4_t1=h&l-#_rs2+^;P3KHKGi1=(D{+$UzT-XTn01mU0cAlGsGc?_CX2C}bU{Q#ApxI2X#Piq`Y@75JngL=ut1#k z8{F7%1bgXJOI!Q&_;_Jy2|X4Vd3i;}!=0Vtl9Jv1{i&}GbpJ6f=J7CY5}=gqSLkDt zAX-%abj+DUYT*>?W|iLBD=&~yBGOV2Ps%uv)`u3^C%?Q!NMnYJShs*H)5ZJ_D*k<= zvb2L`TD>8l;wlO>c85rQ7B{H6!`FsgxrnqIgfy^hF5?@4D>jjtJ{JiuG*3p&PP9vh z(!Hc;QN32uU}a=0TeRtxl`CQ^;o|@N1|+z|Jcc&o0U(X={r>ykFwi#rd;T7?lg-h= z_lYV*7L#` z_-_b^^yhcLn?UkOlzQ6OqmX$m5-k=gLQ;GFKh0%<735;WtKmI909|U>nV)`o+h{P- zT(Bj4!{Bi_I=tN751;$6;j+gYeB8KvTDAXG;UR}=4ZBVkIWWl3B6FWvCjQhXh6r*H zlwEVgAm-vi;a9K}7iWOKzx7Q0`O%Ki!&_HSvgtz%E*!7wH@cwz_!*MDf^mn79!BW* zTUq3q9|bcAgmZmjV`7FEaUJtXJ@bjG=1=e9hyw*k7lV#*#kTVKM^;5p^~z)8^K48M zty9Dw!3^tEn3x{ez4lNzxOLKmj+rEM%;h^MA`*O(sK~U>eD=F9+_d|y*{GaKWx-cT zBhve`=N)t(9L+0Js$cH<>gXf^<(Tw%UOIK{yQsQ2M#>dSY8R!C2Nma<*pWABOBfYL zBtyris3ke*=vadvyVbzmUBFPZ%GXJZ+;SHfda=J)T3)E>Z+ z6Wt4&_qVpVTx?~jnu;v@fcBUngc=Fn$3{kU^EsNa^9M7(oG396sCl6olO#e)<#y85 zxjK{`F4(ow{{`ygL@h{>B*>d#GlP$XHR|M~9v0SP^TTCP6e!Gk^d=~P7Vm=tbtVlb zlk)r*xSK1lEUc^)_RmgdQp?%2I`KbS)6vw4iQ={Qka9E~PS-I^>eA3@3b9(vsDAC6 zY@w#}sE}50isxY+7GGVe**Z|l=*1SL<`WoIBO|rJG9gyxdQk18=G6Wx={M0r&__?y zme!MjkL|g7^|b*`=e48(0hxe~AeG-AY*WuLHK56!XPiNb^>X2T^Svmu5j`NsdNCNo zVKrKK8DGS2qxbpS&r~Vj-?jGyZu44X9H0IEQYL*j^U*>PZc1U{P~(RWUdJYk6zr@8 z8_NUvK4%gEAPW#?)qyy#ssC#B+t9S~IC0Uhg~%@L+(sARst)L@D7pP99CU~aHJq|W znY*3u6VS=;j%}KUkjd-0^#C+~ESsxy8dBINv?;?g^<#Bu{wWSSt1gkKxhj(hhJJ&{ zmnR>pJ5%TOr79@6RlZngm~b#}@)tgz{}k}U-bCrmO&+o8S1OnecFpCwHBk@Kwq}yWE^-ehUQ|2| zzz=@e7Yx-Sa(!NsAAUKN6UoJ|8ASYDF?KMCzYQ(Z#x7aSu-`U;RsP!$!*Vn4<4Zh>a_tK?u&c3dBg)sf{S1L9qvafcI_i_s*9XD39 z2Q<-p?l1bd8_s%NYQr=b-JGIW9-rgsKjPuePj#O?pYJf+Jq2R8*_P$CBDu}m#YR*e z;SP>@-}7Qk4>!l~XlEwAPKQ#kc%9k3xA&>NXy-{YWgK=^GwE{;+LfdhIf+p6`us|2 z_9pE_F)h>u0O9}~I6c?^^a0K1&j-#=51$a+g%n6k+kUSo5c}0024!XK+ZZ3vPj@SG zd{y-I>q?19<0toBx6%By{UJp^&@@@~nkj5y$W?!ocoZAn6ECP!3&|Zi-aOcl=HZDF zi%am_X>jp~!lR-NO{GoicpbjC(Gd>v!-FLjHjPr{>~8)Dt7ko`Sz9@sU`MDy?rWPr z7WEq@U!MGl>2);g3r@BeuXS|*#0GHqLp#MC-G>HM_|U4CeHoWqmqDFf#A2QsqL=(d zXNo6-9Ug0=_4AE*_`z%6D{pt{*H-OKvzy3lVy^kl4iESDE3pCj78mx7cpFJ<$o6}u z*w3FOV`Du$k0@qm;VUy91cIh=@nphrcQ5VjbLjNmwyu??0WQ`iI=*AQvkltFXpJ+M zy5-Mr1Rw7W;~Emnr=DaQxn3=GpX~mE*fBL-Y+l4tU+iANBbz?zjO62-e185eNw|ip zhF6YzcsO8i{vLXCMh69=2(Wwu>Zi`qwC#tRs`sXY;o(o$-RF$+KEyRn`*a!|O-;k_ z)IA>RE;{_)xRHUWDaq7gy>iC11PCib?32h!vsp6}6UlQRV4ds~dlTi3gd|VqVtc^; zZV9nyJ%wz<00qf8e0@d>}CRUC? z+lX`39oe@<8YLDNC#Ql+LkW$2l#_qoOPGv5Mp24erloB;WR@y*F{?*M`}UvC#$ z+LJ#R4tvZjGdK6})Ku%lS3O5Zi;AaMSMbcA`lyj#6o*k&6pDcNtPyb3+_=DXp}xa{ zK2Ck^t#S+5iBu5u4vT3nkS$9gaWLlXO_X{iRXhz>_ncZ+qf}5(GM}1sJ=wc;yjc9bxK+E+j6?%9!fZ5sA9mpMHDnOx02pTIFoh33e7Pj-K^+QGJ2UN{(uc zr-zt~4E_1jej}xPE⁣ReJ!m> z2cEM;(DP(vZMZV04up~5Id71T4nFq%RpmC!i@1CraeIiULJZhk^6xDJMMv@3^f`?Y zmmi|kdz5*v@_9uIXB`y(mYB)YXL=mxhM;Gc({g=W*@ln0*Cgx!Z znR22k*9bOpVd$7l+iqYuKHD!6oSA>f*$}FcZ@Ki~j{V`LvE9!%iM3E^M+q>>C(ci! zuD)-1S2?#Zu}M!5lNNcCdzHW~%yYxUWd94fG6*B!usX+|-yRB!NF=*+emp2IElqio z#p9GrhU6Pmb+YRS-Xv8f#CI+P4sgAmxln7O_xB0t6niSB;fsa&B1^;xJ4UF8cX~N3 zfz7Xgo;XfPS&~y0xmwmAZ_Xs!pB=TM;!gT$$m9mz?O;-;C?;5WOH1c_bRw|w4mufd zkWN&jOmqWQ{4}@PpvvivfUyN^rD3l1n4|C`;<;$fjqPoVz!^~!KIn7f3o}PY(YiV^ z8c$Wk++S$&=;P=<2P-8l&^XEkJgWJ|yHIEChI*4+_1>Y3*Y`w${2e8Ap{7r>o+>#T zPT|&Dz~G)v@j2u-7)C+>AaZ_-fIF{Z@oPAem{$d%fA+OH%QssZR|DJiy;8P%$YfsZ zol*_yWC3AXAVFeI3)}ey>g(a<-yR>bXs7JOh|k4scU}yu8N5}vB2pyYBrVzFoe6Yu z$Ymp5{ag(lAHR7&i60mD!N`xs#>v6K^@V8N(o(AZ+_urt&d{yAENDa2PfSF7Yxxj^ z2}J;};kCtF>$K!7Ch_ZsO~04g233dp5@n-Xai2Dv*=x?!vjn~mK;%1!t=@tnkk)W| zh)u2k;%t<~M}vMDPdSu;D#R%;Xg}5c#^COPZu9=-`PQtSp3^)nXkYkQ+NY;qb1-7s z^GK{)(4Nxb@x!LZ58A9+c+GAtsg_G7Mwik8t<_R)h=>p6J>wTF`Ua~H1Zqk>Li89q z6$%qapUe$)a{N+`fUCtM8m@cI^gKa4T#+B`Jc8PVT-E^6gu}u-kkkCwH=b8&Aa-ZG zIE-<|I8Ei*6TLv8)flCad-zH^m$`*Jb+%BF#9*Kv3Oa(9nrBLecZXFw)6!j4F8IV;%J+XZ zvZx)XRQT3rUoFvsqsNHT5K_a3E37H;;-o=%K{HgtyefH2;d3Uun&8&?A&*C}hRx{s2gS>gNl zvzF8BF&i$g2o3_MX0jMV|l1-TvcRjX%E>X$+UZ9)$%n5)UYwY?hs2+FgA9`0nkOB zt|0Hh4|e332mXGqsJb_rdxVYS`ZinwYUz5~O2iAz@j^jl_swd_z(B>DMW@Zp1u9Er!UTk^@u zW4?B-v~wyTAf1Y1DgRfz2Bhdu5a~Vqia&+49EiUnehG+Ff7_D(aO84{L0%nG41NSc zFnKsTu1ESxfV{rV$)_cV93WVPHZF42sKz)LU0%BhSCWU@vUnNzWqQ+{S0Wl z^oOhg&c_MNhCL!`kEjAVV*dB%fzshi3b7UTz3$zS*J*1%J)KcHNgAw-tS<%U8lfO! zUYJ2uQPDOyDsOKpLw_P3GyRyQ#!4^tPA)xDqGHQLFfpm{>+Y%r6;p!(4EEVb>J!-D zVMKlYUSb_kkXycS9nO6VzE>JoKeBgP4aBbcl(g3SMYc8$4IY@J|IX(7J_o}}7T|od z9}yeFbW_fS#;^X%o^ZMilgCR?(ZQ~MteK)F&Mr6e^29`5e?287MNE8!C7{BGe5r}C zQ)WrUt^!x8t(%;n61r*l-;V&893X5@Z9}}oY;!e%@xEXFMMP8G81|A+i^#Q^w=LtSwvL9Bsu#X>-Sj-2MJy2}+#qhmySR(W~u&n1GG_&e|E zG42ditht5!Ov45yyoiE5@LmynjFalm9(+;+zJk2kom?Ue{b_b01df1&bqv%1mB;NB zJgW!)`ahscP^E$x68eH0z~HtFexfAO)EAEX0u*>SSGWt{=@DYV1DjnL$lrqG4^jVQ zkNLuZfxuHh?yigx4@6+*2!C9gDwvEt{uS`0C+=0?-tw|9G!z25>=C{WBYNto&m&z^ zns@IuwJV;EtP@6^>s?)p_%6I8^J}NzAz(ZHw}0yzGK_@B^SZIs_W$e%7j>ZDn42>T zG%71EXJk!MFZBxD@k{~$0=WLzBMhL#JKD1FHIfN&06~(%roJbC9sonQao~ICRJ@hs zT@%&iYzvS%2!tmPd_{?Lalb7EkENnI7QRy4{LYb3_ zVdVipa)MglG~b;8=8GN=|2r_K1T1LEBF8c(sr|UPVG)9ox7G8}?3^pJRip&Q9hO@A z6|e91Zmj1~QpL|yafEG@n9B2&ORzXC_ixW!lCjK#RE>paG^BHA%Wt8pX4{mFDOV9> zW6Zwh4;zvnEOMVL5A5+f2)Me8Pkz@P2zjuO^zo%!#Wfoq7ogSYXAE139`Z)r(9um! zEmnE&+VN>6y{F+)_(xY45CFAI8bad0wOUdUsSao=3TfmRZVQx?J?OJBw+c!uV^ zSD;&ihKBYSxkmCCzZJ!6pC=?d6Ickl6aY{M1s6YP({BDA+-8CjK*QpKt($?j(zb-4xW?c@}Ar$9uY}EOW zU{d1&0N`~gs3wZhrOwLGy#{$u<6`F?OZP+2nKn$e&FQNX%adn7j5ys|*MVXx&F?&0 zWk4*Bgw1E7t9@DF=*v?VmpQMqR1}+>mLMREJm9qGB7DCu#`5~K<#vYd*9Rr7L6@9P zacC1@sOLtzdJug_+6Ktq5^ae4ekwQoE@o$Fop|BTFz0_ZwuA1L! z)<8nr%WrQSlbM;w=c`ZxP!MN;u#Q_!{{ZezbNZF6vZlR_OUEYz6O5siNOvL%dA;EI zJjQGt`@-lk4j&aRezmpH{gai<=C+WRKdK`&X``~vR)+dEt&@ZZ^W9mXD}&(Zk|C3> z_vK7sRuP`i_BU`44;-_DT;R5ss=Tk7!u$D{oE;bwF`V3E?>>KXQ~E3WkDywFCH5Er z`a&U|ryJYRdiDTVv0v(?E)n;=({PkB1VUTyODvM38;Pf6LNSutRD}xda`NCj(S1sy z=NBLXCu9G4+o$PMI z8z6B%T96oj-~gXrc3kdP(!5^>tW{TZz7RjZcMeWrMASx1Q?~0^!G&tRk59I*)JokF zXYtcGculmVJLYrJ zjMEr`uOgod^<1m)9tKdn{#YnWfRF$Fo>G1*JDFyV=HEaE!BRpH%&$rX$Wk`KNg)HH zKpMbp$)hB=P+dh2$2{g)j3Yg{hlqp_PHv19Bl<-{92SHPmdiE#Cmtn$x2}yss9tyK zhlpWvj}sR(DKh+Ri3Q6T5r}=L;=T``v|DhQoo{Z{gFZ2tb}tQxy0B_xbbgEnYn%> zt>@*S24<;zdtzdvX(sD3u!sOcr1DkpWVvsK<%jYne;WVTDgKrhBy%nX*%bychc`T= z3Gr(a(MpN%eqpTQ`6*Jcei3^r#BF!(qUIMwV5QV>?IyVURXkfILU>+&I9;kqNTSQI zfJCt>*S!fe*WV0&L0Lk#kjd!mUY0YC7V@n$3iq7 z*xczPw+=a_BbLasGa!6RjwJ-Qbb*#<5~-i zw7c_~rI3>{DXz-1Hm5m*#440mwDu?vaa-Qdjw~9V7uv(pqr3~xa{~>t?H^7)SS6}^ z?sOL|6%@FD%jU7hkp0!U)70S?)LG;P2fM2)FfY@BT1474^8U_n^ZuL~4bOtvhuY)W zmY1rykmqz5vXU$e-IJ@M`5ZMiJ;db-&zVOso>qwIrt)@TJ%J7*76Cxy?2I&VG=d2f z6f7eM0YG^7pwf1h2_HX0poMUloApcQM;G&7Cp$Sf@%Cj;rq8y2aUmuL7e9*c!)5fC(H~!pGL7f|C}*PCGjWe zRKZxYySM?{=h42)6^|WBi9>`+Z!p#fmO}}Y6y>v?OuG>? z3N0sD(g`{{T`s5IRbL!T-?6sd2WJO-iZl3Fd(V4y?tzS{w!DzkMC^cwd2Q9ZPkOJ~ zNFwt6c`5C18AHm~m!2v|9!PCwCgTu4%5qy%r_A-Y&m!B2=W|WI9y!2;*uSkf`R1Sz z57n_fKr)kd_~90_{_K-OW5KV^Ti3|Rr36&aO|rfK-=KC!eBS5bFq__&6PMr;u2kWm z61$r;@tqtbUCl*RfT|crP_CpCxh`eBt|_|nBYXz{B2tF^*_1}Hp=|%UL{c`Dv-zzn^_^i(HclHJzJnC%4jId;Evt`DG z|DshTv(OtBvr{V}|H}2!u-$_0P}lZXUG2KJZfD@bY^goDt9@ne4ds4^`gSE zxWFW|Oja2Fy<*=Ac+H(T!Vh=KscW;bVOfDpy84*;B1x(1-DYEK#Joxrt0`msJZTW52=PwQNcy-F@eZo}W)5}21rRlzk6CAe4A^kH*H zKw2KaX!41ZjYC5U2fe&(%m(0SE024U9o^4@;|^>XCZwZ+`02C#D+|+5x!dBL@2cv( zov5^V{P^4Nh~@q-;O?ksPIpCaGN!)#vwjFL*6xEv5u^YXh2GhpE+eXkYLweokYYuKz?kOky!j5vrQL9cD36@Tq{8D${(GM^=#^Gm+O zvipg&TCE3-d=~#`t@y)inOUmC%^G1j`L$5(JmFXWUK=1gtM6yE%)CyuOF>9vL}m_6 z01}jl^0Pq|V~|dIVBjX`F>OwI0W>oHik2PC)ebr2IZ&%NufBh;p1=yT&Rb{q%`ypo8P<)*?Q3!`S^RzjG51K$&o! zFD&^Uk@~BNND9MJRScf~bL|2>h=MdKso-dz3rF4%E5ZjNHvWD62zA`{5Q0EOOYk$rXG6xj%I|TqI|7H z$FgIh+zSjQM@5e|Wc%DrIAZws;s4&hDsGWt%~DC&u4&(9`>$K7&)!Nb-1W#)i2Q;x zBA3Aodr+Mza*z57?-)MS^?=Z^rWo{9ZiN}5}(rd#bmc7iu2 zhmPqX-i;?W02LqU7494TUq7t_H4F`XE3M+|_%u^KW;3d4%hW29e z&bt4Z^#|F2FAH>pO-xNavr+*@aS7)pEb=cIJXTHEi0q2a%+oBUrFr0B@)lJC-$Q>{ zTYM%lC57_djhqq0O)QM@&bW(vv&BYj%L!r)o4ji&|0~>smSThU=Oz%nbI6n^T6WJN zsh4)IrXIjOiUcf9X32*KZh!B^5&WU&APf(p-Q+a~E^c_7@9f|cwaVWv`)fFVkVE7} z*slU&cSwD)hnJ4A7n=qzx`RVdkX6V5=^?8k7k-919&BWoidCf?!Xt#&HADFr3*g$| zU^+-t<2JVTA&}COC*|Aw0qemQ<=_2XAYL;gx3M{c@cD>19Q+R&89X~sDB11lL0veR zd92SC984n>TRZ<;*o|Q8GT}OdQ77}S%JOuR$Feg+Yo%Sx$wZ1$eZ-8jYJe5PPxu<>HaFiAGclx-5kLu-`DDByg_lf)5wN5GfwZ$G!^^3Yf-n`vKmy%5~m>?NMeUy8V1$aN&)w5c3X7tkN`$Xw^AY3?2K? z$*4z@X>P{!aV}T;aed|CkayN~jyg&cU1dEh91P#OA4%?SIAxeg_|@B!d4*s+g;o=? zqa%e=XZJZek3VI+(yCz9dJm)Kbb5_Nv*vdt1QpW?r&t|i=CKm(#;c<81hzZ5e#AIJ=ncYnP2O zp(x}*h4pz>y?B;<%%1s5+!6ZScO)5!DMJ4q+=ZJhSF;jzj@W36^Rm_Tg+ z-IJ~z7^0vB>A~R3-h@|^W!X-yoyI2Z`b@z3_ ztUZ78=Hzh3i>-HWX9pa4XXCTi6VKc69I?I;Ofn!E2Gb=aDX)XJ6939QvQGiRugqn` z5tkc7MtNsD&b+X){ju?Jw3cqZkzMofN9W7mO3`~GiYm(R1|#|z^auC+4tc_Aw#+P+ zHm1#H{3~4mQLb`koSGb{dr?zAO&t&-j)*0M_W>@c_amCT{QPq4dnW!}?1q0Qflyk_ zGJK4t7_^N(Nh$+x9H{%%dz^a9-T1nfg|Gr-fI;^TN(PhKJNlkSLFEdu9%wJ6chiVP zk8@;E6j_E%pl_{ro^oHV)zthR_wo^J>vk!ZHW#U!jdhl_E|>=A-@IyE&V(l|zhx&WwN zbeTkh#KHQ+-0CW$Wrf*5N>Y4MyewJ5rx1y*TK_w6p(SizIqK1G)T6BZDa2jIO3rO>ZFJduVwY)HtT6i9qE&96K*OP zok4G;X>wfGHPk*-&dz&iYnQ&e-oRiNzP|Q7;zj(r%&VZFjU$os3IAm&l=_5z=g$GH z?*egtp%0XRCo5fKODEIP);e*mY`-)bY&I$5s0E6~_P1D=nT3Re28V`P5^&XJEFQ&y z{sr8-h^5oo+Nyk=F)AUU`(R-t_R}Xr8ykI7j>xI-7WP6}LlYB|=ZQcg9|*1=A0LY? z0zK?(PjYf{CMG6*{TU!}*m*#R40AyQOZbaL>l*f7KXet7YKZ=E55OZ`O)l_D?+~JK zcfcAnsPd;IQd9d(ZPW&i*?Cu4$xCAW=Pw{XyzzSThby37Rh?iGGJ*O$LYkxHOt74_ ztEadzXUA`Z9KKE?|9W-UZh#L14Yi@dq!4Pzq;?n9mn=F1TKZ5>P-JDhKtGSwM2SSh z8_<1YlKhrY1+d=Zxh?Qr2SEXM#O#sB$sWJsa;O4dA^qy*f>L!A2GOHsa6ri$4U?bR z`*}-_4=2lBaXBn@11m;Yh20v2#D;FdG1#SMUPuX@x@;PzZo^vo@bzb^ZC1a1N%Z&* zL5G3%Tt8kQ%2EwF{ht8tT0JfS!E4%jx-kDZWb$FAgPR(oB^^ zh1=nQXJ?bFs?$etikDS>zS9&~z8Hih#vM2lR1&HRa&nHZU(+anqCj=Y>|L6Ho8>dL zPa>62j1^DjZ-tBJ{fu8FvJ5*Ux;J6)5lt>duXr1$b|SQAZ1a=TP{|alVwudw*F;{M zX(0RfI!=JwjuCn91s#y-3bSJ#CH&(wl9@FOq&KUFeGAGe4+>ui>THydbyDyRtWWGml_a86;G zCfj%P44!a5L2co{d?jV~@wVN$sP!ow`j_^AEVEsWFm#1P4!Muw(BE6M`6iiK@*l+! zM^w~41DwdXk8t!7U69OSAX^(=`fMRnKE5@QaT9|#?pc->By~K28D6N-jx9G&T_qCtBYhz#$ zeXBr6RS4*)n#@2K={3M2{PKFodyVLqZF?w1R}{<82(fwylzY19DIisO0Rdz0vQB#c zPliuv?T36G@o#>Tq-nhO;OM-It$|%%U&BU|Cn_cFm2|AVWNt(quaM<9dt!!ye1Z%z z;Iz1qS?L>^p(3!I>OO8V1>sY;WDwKzzp=-Huiy+D~%kSM=z;Kny2VV6x6`RP@!3$ zW`F&-LJk~Ip`g$UVuEBLSNr4L1+cgVSeqIET_XYy9RmXdMvSbi8IzQhl$so1_ta9H z0jjun@!Pl87AAf!$W~K-FH4i-PgN_qv4=aae|e%|Xqe)X?s&U38*xUDL6y1?ue-gK zDg2HClMx--1?4+=P4)y1lc)okSJDPg^pwQ(X$^-1BwB5BB#j0kZUEWqXH*FZ(b>0r1o0jmp{(fU_=^r2xsOYYir4YNppI zGYdOXr(p_@j+P?*%sJn}WBr3qL?o$}V^*Irm0rd_Ohe`fFN)@+v2l?*c$`lZXRh?5 zBzI>K(!xoYdi7~woyc}ZaB0!FSyurAm#Y*Su1MgSPe%QWt z$sg$mr}HbhM2W;TlDvR%UWHU(9(l6B=5Yk_8B7`l-Y^(YxgrED7`Y68s={z{Y8W71 zfCi_;#Kg8hLb$pD0`Rt}-w# z5}23g%dXkrR0r-al{3AFgDtO;064}wsJ&NTkZyJ96W)_C{4RyaHUs3~$hR;n$|1M8 z^^#O_A;!5odq|{4!PjcPMwRiyf7JOa75(G)p7e+IIO*DV-|#*ZT8g;?dD{q}hpWLc zsLHNbnTS`TK?;iw^w<9meXMZ)S(kOHas(THxoU)Qd-^0X2K3@yJ*jD^8=PTFM|8z{D0+;O=g zSipjA!hwqvuxNi~b13;bKkpGwg~yE#*y;7Dyz|YM0{L?-PpIv5NH( zo2HS3LZ-6>PtY|F0&f96K20sH3X3soVA)Paj4Uip0RkZG1Ol_^ycebI;RV{21(K;C zXY2zBqa%@}L=UQ9L~w90I6;SznK`c)!ULJQ@}KpP&CMCR?iosS(Mdh~qMxI|H~G=0 zF|4O5&LAQuy#l``+8hXoeR#V&MSCt!Kz^u#g0->!y;YN4E)5(7@d9X;fFukxMc-pz z4agE;$oT+XV+bJ^>N<-@pUn@-@4|8S;_xZXkA6n`oudrcW|_+LXM{F%G>dX@42+CK-g}1H=y%pNf5vK$ zc7ApS(j(ATKI3)iF~J$}OkV!AvooE30uRtuc}zkxHrm_E%E-71<~?uR|KCLamJo#( z@I$gu%wfWt!%Q0rXlf?L#!AsyM&K+Yphhsd30WiCctA`nE5fQKMjA?k!rwT5kLf7G zl5Jw3qt}nQplnIyx@E%|IC@L&nW)E!BRHW$PfzbrC^)0z0RaKip6}BkVgdrG<43EJ zOd4n-^}C%+pc5_6^JHpnagkQgHj%`6n(iGhYHhIr64N23fw?&?Kl*Gj9@gNsy&X>V z$t%5C9?fdai{D~tYRXl-Wk9{>mQzxaem+!f zjJ#7h+0GZM7^{q7oD}Ad`l`)=&pSbopUsLr{*KmQD__kFK<-5EHC ztP*ZF$Je~Pyr9uX-Cuia&`HG*OunQfY=(e*VZX%mUe`MZL-NoxKS3D_k{k>S$$f&a zVy`$^!3bi;?uYq#Yb{FENBj04Vg`Sqg7lerE`MdDKrYvnI4lRQH(y#DJ+RZYb_TLF z7cVZuBs;;UQOterT{%xYz$)MkUPs1j2Fj|ksneqnI-^TbnH;z=E*e$%@)J(AwbPqy{HajLvR zT?o^#tv*X1Eylj(HzspWX!=d|vo?_7_nzcHW5IC%o78k8fH5H{$#Zvmt?t|aY-1Eji<4+7gQ%_+HYkv9a=q?dCS^d*DgL!#;yKb1IOOtqC2i(V zeNoPESv2xp%89N>EoweR#gkj-udlxlcZj`g^LR@c3?0Dw3UsR$lrz9Fim3;*Rh)0$ ze0t*@M)-!1kh)^7>_K#RUGil7OoG9y#Ny62%T$>SM^4N?%e2VPPme()eTh;)+M6SI zuf%n8m`Rl4=dLVNpy9^1Ww=)Vz5sU$13r+4sDkMNG0ykK>OV>7 z-`QYN(sdCv)kfRQ$B3ab#x?V+x(v(Pq_;FBaLLr2Qqah9drOarlZxC@5!`J;IgeSq zY^ilq(rZyOsGRY3ava@`++Bg?`kn=vW4{GGTQmrt33K<2cn4jvyuEF~Pn*(RS7&S- z976fnMSJetg~^^g@Slj8hLj4A$p|WVJ@18qLtV~Qy zUFC~6&T-v1ZWPE#M953EQ4L_FdnZEQW5c`ov+~%=q9n(!8^NmK@D- zm_j+do{^zhsm2N0ff!rhm+*|;iG7#eR}it}-B=c;eJ+4eqiEO8{LC4|&4$vsom1XW zUfAWQ}E zFGAi%$|Gc=ACa#Ll5h7@oC{(w~J(Q#4q;3$k#QN*4r0b)ErQ>|RoY2cT$^t;SE+L^%?LQKNe+9$85%(IhQ zwt6M`=~Rbkb@L>o>>tK2n@&H+DB*NtZv7Ni$HoX|gFl!P+o$TU zUj-y3^>h74M&NnsbyvY)c4Z~IxVY{;MNO{s?+>bjSOpxbCj6OCoRD?U*4EyHY5sAa z6b)=lD|zL|4(P~DQ89P<{#LeCaP|TQgK6EzEzOSV&9iZHySfpEeKH!Le8=By4Hyto z)*jO6i*DQo>%q4q%zARut#hIym;N=5on-6TxoOSlU~7xb(~i`Gr~u;xo}-dSxIJl{ zgUgsGNcbJ*m1*^!rBVslieVPds;usse{xxyi;0LZyXQ^_TSQ0`z)%^?sZ;<24R&EE zX=yObxo5FY(Q2X)SAuU=5XdqmVoic3Lz#5~4DZ%*Yd`2)t`K7Hh74$m|o1%Pnv9B zc=1%%AC$zwcG6>gbBfS~^1*}HHk`L=;9Q%$o^$HZNUJMHhXhdDC)d8rt~|k^djqWQWJ6U6O_)&rT`nq0ck+aT$?l-pxpD>$2(u6Go4 zLEr|V#CdWve{f*2gf4pc6cv)7mQEr$X_22Eys&d)W6OWp5B=^0)thK%4siUsvnQG%+;1Gs=)Cp>{W?EHn4Sdo4)VC)=+Q z0w{?u3THa3qO#p>u+Q61R1-Mi^&GOw?gOL7kYI2@+$pLD=T!(?P;9xoEoiyu3j&bC zZ*3Tk(|Dlj^|qA*%_kD#3R4WmMXV;7zyYYey}9D8t{;8wS-y@8mY(rgSGReEdxSem z&Na})K^>kIWQ$oGY-~2b)it1k$>g3J>^Cdv_(UpZd5I%&WhA9^h$h|4d!K3R8*vTS{+I^&-x9B{HS#{H3d3^%3P_gs#1XzCMa4DZ4O|@V*t_U zXls|O5Bcf4EmQ%J1XZH+^z>q-sbE?zzc^P{$#e^2j(BoQU<3JLmuaTXmq*KXZIvo^ z7Sd0w9`qb^{P&W3)qf#rXI#Iy zjRrlh=IPeGQrth&1c$LIKYGCMVf;B+Sh~e^$}S51emA5&CT)GE3>l3p-Z>bZw?e; zT3T9E=_*0vO2%+B?X#gGD4na{j1vWSzm~+Q9vY`mN28T!33u&#yI`kto=~y776l`? zM})9~j zjFFj_;%)r{_>-90_Y4*<3cdi$7IL+}w&U|b>fB_9&vTE=U5bUP51d{SXR}B;M+TQ0 z8!9Z@0`3k)k1*9bC{BZ*8)yBeOzhF-kfY;5l(!nArcRCMel3apgAF>gwHzVgtI@!% zs;a6*nW06lEruCg29)*R4#b|CIt0W5z&D-w}&gv$ds#bK-gKPDNr)0>=|(jt8pn`K8d}P-jY8zPC5^6n`%{fnGpt^(1}i90SyS zXL?!nM;C+HjOX!Slu9|1dY4=m#=xxGPq#5qo*1;useD%S-i|i+8ZVaD4n@ly<`i|l zjG+9wW4M}mCQ6_6=`e_uRhQdzUaLrlHG18N7|f zA?%JitoV523u)Nv=Cn$>+sajP{o@@{&Hn?Rhm>P`G9xev@2l95;;#apbr7?~MBB(V zP0DH1b}+s$$6lc`CUa}b7VD);ZY7W>dsY@fiT7G3Tk~-**tC!Pu3LkXD85pUX6cPa zcD8p3yQp5w@0jFF$FEF((HsS#!_FJ>3JL%k-$lQC`BF7W`3Upn=f22haOIktn8>s0 z6GI9i5C|@=l)Sukz>3F?9WzvP1h=X@5oQ(^g0=f-?mb%2UMBr1C1`Aq6a?k-R>f$|Rc{lA6mOG=fni``Uc^eqR zH4~GPgwzw_<2!qL{50fSJi&D8{rmTB?(Vgtcgqly;OBLONtj?EZCxsS`t(CLLr{JL zUHDl@j*G)kQ|x+bZ#%RQ)T-pdXx9AvQCOy6jg=suoH{aMB#vkpF|c4d#$Ycf6sGGF z^Q)F=QH0$fRXj5zp`wZUUef|W{A;1?4(oxJAuk@HaEliKhW7SWR$X!*x@aSaO!!z` z?dTS_Gj!OEd`cz5{*4$PAMcjCHg?oEru1L`ZL$4!Aq*l+FM7v zLd^-Bk^?HkOJyZGcMtwNAy4QrKFYXHFwMh&>&di3b5J>%y{4)lOyCozgn3w88@4&- zOro4#N2eMBk|09DR0oEp)DFR(kHy#Y85)w000L#JSx0QGmWF)@V0~dm8-BO6CA%d; z%a_+zqkl(?ydObOi6ma}NS4fWx=z||7IKPnS5K?`R5qWOQABPA%`Xu(Z7(G1ev?hE z8D6ol=@92nxoha&+VQjNEzwA{d(QStitXWG>E$mtKBkOm9YQUN0EQu@D=hIdBM!jF z<**pBfWbf;7z_%y8Y6p0{<|>z8x+H5L;`c9JB`>IY9k%|^QwCZe&P)gJ}FXCK8=8- zV(pwi7(3UCei`t=QV|oWMxu@lT`_QNlt5vYd=%@(q);CSTqe8ICU1b|Dv#< zU@P7nZ_6lINPXffEsl3NrH9L0^fgss9kLw;Nhuvot;-i}8J62mtIPclRPux!pggNQ zw<%XxSvcgilDY&tdnN2$;0b#A^ba8174eXbs`>gg=)$Bvu5`EL`3C$GHf&c6#_VDw zDM7fu@-;NM!*^7vavrNtUy_G*HRr$IH=$;W>$T{<=WN0jvBuNiXKHUcO3JJ~O_7t6d-Uj0 zMSOmKKB})^1)2fC;5r~MknVyibTUXw3+R`)L7g1((aI4QAk#ydH-rr$f-GxkS6siw zX~7XQ+{q;6vfy1cg8$15KR%1+#}&>mOUZF$2R6|(Xa8k#=|S(cn^#;!v0G4DHQG?y z0Wen~Bknx^8vLWB3gBKr)(~>D#m<}%S4y^5b=cgO#*}PB9_^{TCEvhQrs3Yl;&;b{ z$sb&B8GgVvP|+==I3WfTOj~Ag%1g7O_~=SIWu=&E6w4QGxsn&W@mxsc%1oUwhtX2X zmOkfPP$N{)pNGv%Oiawq7NZ9w*n&^<%{@y=fvF9Wv>QoK?2L}J`J?pg*?({vx4ZKr zgqHks0y4#gU3pKKgf@_Nz+`T;CSB!YnE&}C?z);+Q1Dk2pZ3S|+}s!Xg9s}b=!9C2 z;QOtzm(JM^GSmA~_MbnN!<&k+wtH+**BBDxTCCt z{h7BkSRJYJdRnbK`T|;A)1&F_cR$8m8(`}Uex7h=}y1!yR^!JcccBtW3%2Wsl?sd2K zV4Z;|Q4sC^Je6rxeU?y<*=U@Z_&ihMX&M@7`aK4AgX zFV0WSI@|o+O$1Li>E{c`yn_6(GK3=a`1)Y`Rl*6U?^hure$ciIaNnZX7R}CFb_iYm zWRj$?8>mRL(=2zyOuTtONxsXf#k={OZv!z}JQkruGxTR7i;)70KEbS3Uwwb8orPL` zYR>K~9_&Gz4ihVW)d7QbdcYF*SB4Jmyqc+&CrII%AMiErMOvB^9TIM$tufykTQ7ufA_S?OaK4? literal 0 HcmV?d00001 diff --git a/vendor/sigs.k8s.io/cluster-api/docs/proposals/images/machine-states-preboot/Figure5.plantuml b/vendor/sigs.k8s.io/cluster-api/docs/proposals/images/machine-states-preboot/Figure5.plantuml new file mode 100644 index 0000000000..72f61432aa --- /dev/null +++ b/vendor/sigs.k8s.io/cluster-api/docs/proposals/images/machine-states-preboot/Figure5.plantuml @@ -0,0 +1,37 @@ +@startuml +title Figure 5: User creates a machine with kubeadm bootstrapper + +' -- GROUPS START --- + +box #lightgreen +participant "API Server" +end box + +box #lightblue +participant "Cluster API Machine Controller" +end box + +' -- GROUPS END --- + +"API Server"-->>"Cluster API Machine Controller": KubeadmBootstrapConfig updated + +"Cluster API Machine Controller"-> "Cluster API Machine Controller": Enqueue Reconcile\nfor Machine + + +"Cluster API Machine Controller"-> "Cluster API Machine Controller": Machine Controller Reconcile +activate "Cluster API Machine Controller" + +"Cluster API Machine Controller"->"API Server": Get Machine +"Cluster API Machine Controller"<<--"API Server": Response + +note over "Cluster API Machine Controller": - ✅ Machine.Status.Phase is "Pending" \n- ✅ Machine.Spec.Bootstrap.ConfigRef -> Status.Ready is true\n- ✅ Machine.Bootstrap.ConfigRef -> Status.BootstrapData is populated (not ) + +"Cluster API Machine Controller"-> "Cluster API Machine Controller": Copy Machine.Spec.Bootstrap.ConfigRef -> Status.BootstrapData\nto Machine.Spec.Bootstrap.Data + +"Cluster API Machine Controller"-> "Cluster API Machine Controller":Set Machine.Status.Phase = "Provisioning" + +"Cluster API Machine Controller"->"API Server": Update Machine +"Cluster API Machine Controller"<<--"API Server": Response + +hide footbox +@enduml diff --git a/vendor/sigs.k8s.io/cluster-api/docs/proposals/images/machine-states-preboot/Figure5.png b/vendor/sigs.k8s.io/cluster-api/docs/proposals/images/machine-states-preboot/Figure5.png new file mode 100644 index 0000000000000000000000000000000000000000..51718ba4015152441b601bb087c6fbdcddd6e028 GIT binary patch literal 34258 zcma(31z42Z`vnZEpeP|qNlHl#DIgspA|*8pAt5DQ(#DcZVP)EiK*f z?EyXK{J!^pUGMAVxrCW{p4odp_rBM<*Sh!dmX{UBLMKMQcI_ILq{MURwQD!#z<;-H z-vF;zMG!QAe;91VRBiPv-#VG;8`xeG*SFBO*0I%pL!s+LVPtFjmXDS7t(lI6t)00U zi=L(VeKuZFaECX>Fjd>XuV1?kZsQo23mdl}etgfZyh+&am-Kc4E?W=Wi?TNE77*QdT6nD?U@W3;(}2Quf)etQ~E=I0o;LO?oONd&t!yG%d+^K-0qjIV=`X!+TDG9sy6 z`MTj!qVlJsN0@@P9Jb9KPdRiwBF%^7T_&zSDe3)OI{$NDO01FCT6tA7*~4+1`u576 z+wa>Okp_9+Usv!b-#N5x8WG3n5hN9H%kQ`?6rKN2@Yw@BQ{6Nx?Us?;&AcY^l>W_6 zu?aUR^`IPF_D}OZ{c<94KrNRb5k2`987WeLwW%AS7Rubf7V$()Ncfx8HDS1`R%F9N z$?aF8(QTL_`tobPJ?KO6bhPa)(g{E1+h9j}Zq5?&Ea6K>s^}#Y2klh&UhwahI#Ww7 z;vr;edK}PYN69pZ(^fOYNn9G!ci>iKG?vrt zlS~aC$dg~fTon0er(XVTw-Tk4@E0FL!7Fqpcq|LY=)TX*pt2M!lWDv zInl|KRtG~<8a&yzzWXSz%phv!ozyyfRKGi^9zz*aJaiBc!udy5C4I3r^n|4Tj%3-eUZKg}#PSDeFbkT`Ke&_F~WdYqU3S!U|OU zu$a6BUwHd`A=Hy$GFtN?=I6rm7bL#uAI05q%YVgTu`#I`k^aCjVJE@GV5z4^c078z zr)R_Dj2uLe5c2 zo>4*>g#KYBR~zRJROI0kN(2Rd5IXo0^g~zWBh$T=VG9cjbg$%RUg(mNlH080ybdNN zCb?>*QsI#1Q*2V6djtdqW@cu(x;Xdl6_}5gJ*x`o35Hor*G`t2s-U5vxw*NitB>!m zk8e)bC3=NPWT=k7Y&Rw<%go1rjEz~PMaIR&C5A}8e3=@=$H#}KlmEIVLCEbGZZarq zjl&vmiht)JEGILPMRR3kMXLY!@UZWGhHSiA`b#2e>XdKazF|CIY4cXBY8n_A$ZQIe zO7rp(+ga?+*Q#R~S!sHBQ;Krop1L_HQ=Tz+9lA5NlloyL7QOX>8RlaXdz?b*Ki^24VFHQ5;& zRHgj(YhNZAa70BLf0UXIrpLi=c53C+OT!|LD;bo928i0_(%RPEPz0e%EL`lbjfqb&DCZLI?ZMCX9xPQlY`zv0tf{hFHPF`=bU&@IS?ZaHW(vmP z^Iv|HkyoIUtt=k+kohU}4mvv3L5n|$R*kdW!N%kh(~%*BqPQ&I&EoWAKR?Ws7i45) z4x3YFr$;+oU0rv{_~tX-(8|V1oSk<@vPQCM$-=P+*`6}R4k$>@t7+<@;WG_?{`?u- zTVcAx{KbnG=lkQ9rr+}xV&ns?7FBg?YinV;oDwgi*}b?JsC9E?ncn2ZOn-ZivaCg% z+~&4DFUew{i}&NDgapOG&w9^hoG-6=bR}_26JfVGg_c)VSk%j2o-en=XVsl1Qd470I;f9$Y%v-za^?k_m>r z2@7200*+5(6A}_Uyi@ObRfW48Y}7bz&1NLJZ*OlWCpb@5+1(U_OoXHB`&7$EB+Az3 zAm}+c!x~&K&JJ|%blSAbmy5x9d3n9{75O0E8qyLAUp#oL9Ji<{5k-fw@be?OVd6rA z%Qf;-H2#_)gMxxmsNDMOe5;lGG^oI9e(Q$QSealXxanR7+A=cG7GYEE4GkU>Lz6rFV;aF7vD}u2 z*ZutbhVDqx;5@cksw^*eaNlk_NS*Q@PlYMvX*jh7Q95i)1cxH$|JnCFOL!AbM0QAf zPmjf5x_^8;nWm66JXrKkxQOAP;dkA~lJy{(&yc6;A(dX+Tm3#ki^Z$^mV%63r91HB z>olmbuI@8WT+PVXn3$O8=zccQ*m*8a&eUt$l44Af9=uBQc)OinG|rdPt+UkB)E0~F z7DiFpavbW-e9|I1l+U!b!*=qIMQDh>s_G-+VtM~(eiCBJ!p-DWnXElqL{uB|r+eSE zc7hhcGixBl*Q%6YNf$*Q2B5D*ZM6RWLaFt8j}d@s;<27Ymp*fp6J zsG1>5c6`vuTIagAf^YYFV1bD&{CVC2IgC#nCl<$QF{>#RjP{L1tXAVktjc9)6Fc+s z`@H)blhO(PB-}lZA8)(C&W~5J^Gixg8I^O|K1uLqknk&2wm%9Nd5oPaord-^8NV}U zHyss`nX2q1Rsmn!vPK&g`TAnqtZ+SI->C#qcvMkUmG!}Od;X4tdJPN#wH{!| zz`b+l&P@82{sxQNbBkNCj4Oi~ayxooC}n84qP=nwOpnmIIS>*}(b+hiB#8O4qwaYA zNcA+4Ixqv(N^2V43MKN}_zmsk|6+x{X&0Cc{})u>f^aWw!D7KRDaXN5d*cGna`;NHQ`qI(a9J zGE;WKdX$MTUZ`K@@_8D(d1yZk!qI(TkD#4`pUe&e>CJ`D4^z7S&-K~P1kks& zS@-lF>P!f$<;61!{LF87L_|)rP2MW`uQjV|y{ii$`{#E6djil3*3```EiDZIq)d{0 zR8$n|&70;E75DX#yb5|90d50+02~NOwmhCf{y+RGLVjvWV z6O27C-K`q;*5K3ex2F(5{G_N5eMXugn{Qbs*wIrP=`PuL1@Hq zoYYT+;pj)=7<$5*B+^jJ(4rO-bHO)2`ct2|)lb7OG?LfO6Yzq32oYi5_OA_|7UKgh zNf910dLR=sV$;-f-|A$pC7zI**JgV+c2ERDfgfd%kZaS@6 zdzcS=`k?vqd1D=s5YrJ^PSbi|Y#c8UGR}<`$6|Qi%%v_|~wlM|gXua<^K!oz?NS+w4)A7kk=E=#CQkM->m>3~3)GwY#2Ie9{?hSKw6O3dg(W)$p)Fyv^+j{$r_1m|5 z=V!u^zry((EPj-kdtyA`l0QVl9#DiDpx#tMLEte%ld3Ro|MvGm7lsS14`=HIl5dWe zt#`Kgo0{Sw@FZ+#CYg{6?1P|6;NhNrRKyDfh*dZ4plQ5*CS2#acvzndzS{z?E z)riyCb|-dOSp-f^NK(>uyBOWczJuxBq$Dqv>z(0@{7%*v+?D~wnZIzx>g$yOdmk7G z(?kmm9&cy}`y#%$K@ol$D&>0m?ek}6C&GxR-f+fP`dg-^Hg0ZW!&y_N-({EN9N&ga zKSIy13|WeKG6=hM_`?52NNAn*46Ejk(JUqUC(jfI2bk4IPWHYFp6+YpsyRu04GksJ z!S%J-TRDU%eQlp^X-QVhpv5NNP-5jPWW&*?staf%U?!ehT>K*ctA8_uM(^kqfk27# zdZx_3V#vM_}bGl+f z@r(U&IooCU?yvITU`mhb`AS)gEHFteczmXO(!}y0A`aWC*D$d%m<~gZ;ia6`v!>I! zi*<6I0`uMkX5I^C^~JGqb@M5K&E-MQu*07C*diW=hRN#4po{f!em+~HiCQ=6y<}Vr zY6aTuRinm_sL!tZT(pOx>`HIQ#4dKoC1~SIv?6BPF2>N2A>IK8EDUocUaN-hg$1!D zDVce@c-m?b9D$2{2hm?{Y3iMKQN4+ilhgI11R?COaeGDb;&cb~gpsL>m0Ir!9M8(Z z;f+FJlI~p&VgE;d&KFa2;_(kKMk4~BjPU6JX(RLBp-0_U|Tuk=BNg5Z>ecU zaetL&^+yH5`*(h$!5-d@4~(5ehM`=NCd0U##3Lr%(XlZr|~&HM(5`KR8c`+B@t3f zclLW`Dx8sSaF|Y075Nyg=hb@>Z@*Ac`Q(?Hn=4Zt8Y|X}B`|^Ih~DmRvny?}!5Urd z3FCYZ%Z!(I+gl#=QEQ0j=Wmvvv9wcr*B!odx|$F8MZ)(yb4u#g@p8JOnd0Jdu@@gz z)njcQ7vm8V6AL((sF)?ZsA_uDAQ9$$%E!*lJ#!&#u^pT!P!9L5zkz)baEC~f zyEp1_s@*CZaWBX_s1EJw@dVm7`UwaG$ML~M95G2Otq%MqnhM=)rQK>P*oSqFR$)i# zs!=|{QUNry_g^Hxoc|uorzCH(KqvQBQ8v!nnsUeNCNhgJcnUfOS(vCDgMc|@u-Kka zzPlI-JACjORN}8I-j>8=W@7AbTT`N)6)_MUfpyxJgrI)5RvuDQi-Tq3oe`5_E9BTnNT$iW>?5Pf?V8L~1k0e)Mft{MbYxh7m* zzBDY1=wM^r`HXLFdtr0D+;XVY)Mg`JvzlN~Y}5xo^r6tiNh|p$qhiU>r-hjUg7FMG zyPdAqrTh{!!l~P{%?faM;zL2IoHT#rgmBu+aOw3AEb$1wfx$>vrhLHJ zaHhhy05XjB#FoRG7%!CaU%%gBRmoe!7G~0RAm6^6Ui!>W7g5Ibg|F$-kwUI#4(N z<(sa8Of>t`q`nlb+KkIBNQu8O^ZO3&v7<}%uT@4rf9c`;%{qPGmDp*cd&x7=E_l&u zoh!pzYpld|-mk)?+!z^)+uDLmzn8r|H#RQT)a(R`NkLF$?x2Txo^?Z_K(t0_7WYDM z%c9fHi*1LbwLYt5)(4U7T5!RnrxKUX@BUbWSIXoB>F%y=a{|5cfdYbTFNn8=f$jm4 z-JD}v4x%CE6JgqHt_Ns;3D_mTUUHy!uuBqzgz_MC%v(s=>WUj7SLHnn`8pvcQu0Eg zHuQZx@?}7s-hp@6kidw%OycG(6cEZ`lcJn3m(ZUr-f%}UT%z%{xSqjcIgwe{DE z3Tc|AbfmV~khig3d=rFT%o}UpAHJGOBgx6t=f#A%-|inn^+duXVu+`13XTpIh$aAr zn=u4+EF%Cfnc}n5+2=0_c|7Y~{|Ey40G56I9Eht96pt+TLgHcs?+2+$9 zwuSa)V_X2;mEU2bsi_I|)-3{j=#fb)C5QBCUg2EinxJglV?n!YxM#YUDb9^Q z)87tO9|_p8|&mkyCl4LNI1P%i>GRwoHLXIJsoxofW5ABpR9w!L4^xwWHg5%J03-AgBOOT zrsm$>p76<9F$&t<9M!^x#zs*D3@~P`uZ{}$$lNc^97IJ$d(KvdvnfSCj*g8fbj^Q} zxdUfTsL%!lZ5}iYovdeU%xSDh-$SP@I4bJb&!1laABY7ar^Rp<{zCK6P>FtL1U9+Q zn|gz;C>=dLQTUU-A{jx4(ADErPSY_RqnzKFZ}(pevIutPnBCTIt6!9+&zi!`P>-~d zCnR({R*o&!$6ji0B40Gl|F~Wi#`QQ&f)AUVk>@nqu}H?ck0m`qIZdXU*uhWNm{vUH z?qpl&>>B&5Xq?u$V-TC~slT7h=?eF2tNE|a4Q&JLmgVH^1L--`X^=-n=G#COFe(HA zc|%0LoeJN(sc*DT5K&#}dGx!ck!Df$Fm;w_o_D$75xX^~k9pIP_)GsU-BuYbGSx!4 z$4kSWCulb3`a+Bx+Jo<2IT*^+J+6_>=R1!-tK5AV#YV}*v<4n~xy57_&`HT}#BiC% zgop2~jTIjrI_l}o{r>$MXuMXjA|fIyHV_~q`}+Iq!-2FFpj*2aNWTc=2kDdSr-3y! zZfa`8R$bk(+%CJnC<#Aush1d8E%lID6hqUbT=v(Twq|c$9|UqR__^J~%IVROQFI9z zVWZs{Cxmsv?)h7o(I8wk?H!*qoKI4@Ki@v~dOs$G$;T&ZVeWn{hxVQyT~uwMRIo-+M%H1i)65_rYVMu2A9wtQbIUwO~z0a=ZLm@;HnfKg; z)u*`}ja;7c+)wMj{QBcuIu;lR0ti-GW87`3UJ-Gy2m-I;0gJ}Zj+Kc@8&_y00O%$6 z^HW}PoX-U^Tvo|@TG6qwiNf%NlWdJj>(i*^rvv8@yxCvBc;M%!TJA?;vm4mvQx#UC z1@{GDJsS&bFzwIH#hbXxystK;_|B~zZCZr?BrI?4C7w;B&Pp&-V@Yk2zl(2SWb_oi)XQ4=A@FQ$rg}13alCltocJ>_qYJrlBNX(6_rRZO^q}qO&#r^xj1w$oqM8p%Y0E)GhJ$Z z=cmVC;7M9Yim_dX+|ipcOh5ZMN3Qy#X?OZ~7~e0kZ>6qVRGd%kZ(@YsOI?adK*beQ zhO72GyUo7Sb5cqh$||!M+L4)(GS!P9Jz~qyowMLL-*wzOWO&-@MV6r%8*#{H^<&Ri zLtr9K%D{kDMR}<=u@@T=1dODPbaiX~jE3AN3isc)0ZnjyZ4I?ClQceAN=oW@y?pxo zcs0K@kbDQXeY-7D5iyvf-}+0TGF~xQ4r09syL~@IPRm8O-Rk!)d=XZ*u6J^7D7&C_ zfqX8%=Y@@qzozO%ud+@^byT#SBAO0AG0v@&(Vs;*vo|H*yu>odtJIJ=AGMTs*MmM3PTLF$e72?!#{p-#KM?l&)PZuXi7 z5OI9W6$gSa5x;#-NnL5_les(4YWwxuMj#!_r0QB)vJ$?aQ9|V;;kE!G_;9J|cePTJ zX1Pb5dNj<6WxwVpi-ncBCTD3`&X(t^4dEl^dcEf*EBqh85Vx?+CM}H~N_bJ}tim8@ zw=nKrOevX`kZ=kd!A6oN#Z5nO{uGy}e!B{DHAO4rEMFG(J5oe{<=7#yrZO$hLa@R2 ztWx`&mLLLCq^&A2;iXHk+)8DvckG}k_npW90!9<$-VJAh1|JP=wV_bUtiNoe^Qv%vrM7Xg7^ z;Lo0UL`M_(Isqtt{ra_3S`jchL4v!FnG%u(?9F}_bgzc?c4HuxpK4DO3x{k1Nz{&y=zF~r0=5sW#;vg5~n2^jt|fEb?ew zD7GRKG9~#?A|!G$Zz5btXO6<2MeFjuy?*t4@zJ13O+i*)pVcK!bf+zyHaTo&WlZDD zF5ZG2dsRQ3V4$T$@ST&e7d?G{c^?#nw(LzCNVBmlO>@ogaliE7-8Zvtmuc^kz4_eA zQG8W%(#RM7WZ7|tO$!q#hH=oH2VUA zq=Qgfots|Q{-J=EdhQLxxBsP;muwRvh2)VG;ksr>MhN7!wovp0=q&`^W)j|i?tmnZ z|CBw*isw(=1GM7H!Us@EWYS|a@fAP{lSyzS2S(oKn|6#oDm|F@6#*@1ATw2heHbt^ zGkDF+vzATvi7n&ry%>6mZ-{YrGX}l}NE@ZHV1it&%N3CVv|@Wx(5lLg2+iyL`wYuc zUiJOA=ZGZ>u9d#r5=3-#nqB05y|E3*RzY`3*ZBG;zY>O3ke4|x=}FF4uLsgQE1@d8 z;ZnXgcmRFG76!3*Dkz<$SRi@vv;SxsLgLZmp0U3!?A-(N=V$s!m0tj&s=LDzugKR8JWL3Iar=c_TPD; z*rB@$Yb!kcY1|X!2i zVJ;1$tYJ-ymhopRwzFL{{LnBTWwj?UB-uPZegaILxRdj(jCH zhHn%LFZWN3dfzNbKsX0>>Yu5L(^<|8qhx4)E!JL|*&a3tIJK_atv%)BP;9?#-qBlu ziXbE=-X8ptlHz$eZY0ZJn+xu!<_$ML;W?PdbvZ(D+Fmx@T;jN!U~sOlF*zFd37vi( z@>TUm#G0UmfqwDvJifQvdc3C1lME_Hrh4Q!J3ATq`AOUa|5W6o8I<8%XP=brV!j5H zt4Qj)T~wXJF!A~=EIFa@Qlo=@VoIzi_(6Xt17W?)Kbg8$-O;;b*km-PHv;o74();gvQ^AHBTv4Ge0*(*Q&i!Q#YV-bso4?n1|0Q{6}1kQ$25HVR^=zU={Ri5!h6_+B0o7Y*>&MFGy3ZKJnbXHA6CE!ysbh&AhgQeG@wf z)Stz|J4?L>`}%Zwl)m>6- z!ON1t(2{d~V6dyBrE4ID)GW{Ie4T*1f1u|z=r0lc7*#1fiuS2pz%3gqu{`W?YozeQ zUCX`_u=Rrry|vC-7z5(q7gVJfbtTKfUH|OuS7tSIvgIsuU)b2bIF6359&M)JVg^Mx zRi6a~bLo>w$3}g8UM3?HeiNDaDuU6Khdf|XYy2kBv3>7rv}=>K*11rUUq!_vVMVSB z(^T*)c+?cHmXyXC+iaH`&(o3<v&;(oK%=NjHz0% zQ_#`5YWD{uY$qo0sI{{FwNq_PnKUH`C&AB8i@#7TRNf2RHcIsj7n2aWre*mC48m6{K z)-ZxNJVYzo;D!kyJ|D%t8f$XjlK~n4h6CA_Wnb8l!{r{lwheog5yA~!7IJ1!bDb)4 z0|}qjLNC9AIMoAL`b7;R zwW7a!_`hEr!Ki$AbbS2El>3dw)iq#OBLzs#&#-}k{d`kyq2?@|A2GIFmp5p*L})P% z<)L#|x3g4y1U~;)mrptFPk(ffRh*!J9q{YocNmbzQn%aJ&mQ?$)z}GPfe3smWo23RA;1#!I?%>oS>1c{@uT zHRAb}Y59n`eY+B#kn5l9w5d3gt{SJ7s(M5D-=rerCT^~Fyu6nZNie{F$-=KBTUBp& zSwdv+Y|&k&>#!V$p$6n-_{>)d+J7`%29b1N#^YFv2j~vVJjh=|K4)a&1d_N9T0I^I zRHqV%cgRPA97lgjzGt=Jk7j=IhZt3kt@Yj1mSzg0&;Jj}>-h1KJNgG$$$5BMnm*{9 z%Gap$;6)N7WCjM2l=ec4cw3xVN>`{*%S}`WsTi*FyfHAId}4nZQq!u~GZmFkHC!h$ zm@EN_o9j;zpKI~YR>~&$Csl*MNqaF3Qs&p}5p5wr$UpX~cf43&g$qbLmH#o+)i&Cc z?>>hw-eYsV%5pl8e!<3gQk3@c<>!a0|4o>SW@9NrEf}{XI=@c3w@Jhn1%TurN;OWK z*a4L@i8o;8$MnbV+5F3%D8nex%%wo%*q@*NU7_t@Zx_@YBXms=#z*XK}&?JFH#NGrL z$jC?pX-PYkR*6w?ad9yev3qa;48XYOk5(MuO#|1i)XAIs43b{-^;2VZKYnzV@E(nG zT8CnjgvZx($VsWy?eFd>wX#0hAjBU@M_H2V`wc+;COy+f1Jx9<<<{Fz zeqa&zT>C5xCjN_nzdl3xC#VA%%Bp5^W5o9qaxhA%w>iLxO|yV;Wl zPfNOXU2hZocsq8k$lH?s3FDT`pDWZj@3jPy_fiFZ{@m8nL)#X`X$%F0 z91WEMXuIS58;)*L(4^_wP{Wbi#!mw}-q!=nz^N84I&RVjkD$jKh&2Y!4-1)v9b;_h z5^K}*VOfD&9@je}Y%aDxp~SfyC2@daZ747=J3G>vR-NrZSwPQD*?ChXgHjk*^s-K@ z@4=fkZy7Jx*P0s8yVO((I$t-}*1B5GLB0KX(nP1Iyqqp8D4=66YD%dp4P7IHJEly)`F`*XTz4#sxSQ7M*~NEB*CJGcRa? zwSRWqy~xhic69XOU>fcljD9vyhYTJ}%+rnP&D0bqB%D>N7Uun7Q}XT)SY2t;G@jq{ zc-U3sT+%k71s}DZ{{KLly)27odapEJMCExyZmOSe*EAqSZ)IsLQ=u)x<_gD;ms3ZK zM~d`zGbefww;u+m2U10t9oaxYnQaDre(y%=MU~S|dVKsOXkDX8-v0f&Z8*;7XCW$J zkbfQ?@RW=zU0Cx6l=G0=sN<9$)AuCD;F?ob4iZ9b$ptH+Pvf1&_tO%p zqbqo8>_gwq7{YVRs+wgqjtAQvde3vlTN2KUhaGOKznYyRAFY~}Uv7+6RUIg7LZ@sU z9qkhWjgbWf1)z%-1Z1M13+PrZtE9q*g1)K;tgo5wE4jM5zJ2=}C?|)~FI)VG8b=Tx zK78;a<5!M>{r?S^<^Kg+!+udFriMf~#Zs}V_h=^NZ?3%(vY3&ohuwdDIBcoW(ezv{ z>FoicYbqnenzs5MI(uNzboV>6{S7gyh%8dQIbkLa__?d4r6p)ZB_~gmiRD^|fZ4B& zDwLWid1rzyGY;onBhZd*W3$uL1bDGw-xraxi_fHMJpqJ>+b^gN*8;VPk^Tn?C_oQ4 zu>Z%7gTbx<2Nb+W@cw{=SqglAcPS#I%Xfw9mq7X#b=jumvlRa00CL*O{;7bGu+P@5 zd09Dw(hzwkwwJ&E#p}PE8wnEsL%NY=Ey(yrkiSX)O7^@0U?|?tpG&K&hPjhLR-hB= z-$C_LKqaMr1h0IjZZ2~F2igC^z9-BbG#h^3h3ynLYz8UaB6yho???(XzKN&}1caGP zLTyi{ANx>IG&7aRKmPALh!{=A9{KFoCC(|_az}l6*>H9Ricu|YA@{WCKm4^o=L6z* zEdjjR)flF#a!J^EOIVrpTz{Sg$bn~gT z^Tq$t1;6+!X4U-eQdI#Fn{+P#C zLqf8$zXl$gy~lr)l*^z%VwaSVl!i1$sz)L2<>Lk-jFgq80^YCNxgL2UiI{5n2GY{vgkcYbi=rgz=i*R_%D<60nRSGyiF_Gs=oTAD_v{|!6vyP(X}_YqbK9fl^qS87qljmKqqH$pVf;Nq>Ef&N+Y4=88%Ptky;_Ipr@IOa z>yy>r8yh_<-WQ6cNri(03p=yTzM#+0?dP}mf-d{>rvQ+NNlD{?k#x8<2QZiYp7#~T z4*kK{VIc88efI2`k7!UlaEnZLISGub@{Cs+x zO9|FaK3*waqyUj$tFg??%)q3uNVqhrY!RbwaddJCJUl#aEgT#ixTqWIPHZ-(1be{g z1{+`w+ClGQ{#;*oTOOe4al1HoGc+_r2Hs^tlInMn8f$u?E$j9E^E1TH>DB&`{s>7( zc8SU8_&6OM-QXtZyErtnIz8M1R)>v^4T~&rBzr(nSnGNOjfbIf50gP+KQNt!t^WG; zw?6?);UQwkOpC+JD2fmx)m*t(Klas!oA-t>Q`acO1PXOU3%rQ086DD}Vv3^|g+SKJS)R;Fn z%`k}WEc~r7<-WFAc!Uzq#ei~JyJ8HiDXI~)=!4h*M`fz)*VVF>bEDvwc*OPx0K4M( zxtW-lI5>8|qW~T90K5QC{u>q&wJG)uV|U!i&OWQy?%GPd3d>mtGc9K8KHd z^2pQH#?72 z=C0*cYpL`7?9>Zo%K$Y;*B|w^|1&`36T6<-)F{icsSd z)>keG#0F`2H2rR#l?5{X=P$y)%NCjeQWFMg98Jv!Ko~8^z5Oa^V@{~qnHta5c2=AYVRS~ z?t6jsk+4&~S3nm3IL1bpiNY6=TJAHEspaJdY0{BA2WKY-;2^}mU+9C6 zkjT$;bI4yR=hc(*V_wqAJ1L*!g@ecRiO`(Pya#?bH6tYB22w$heep(d9=7kBk(yU= zv83pp2jxL+l)vO=E)?HhSR@EIu~ShsfU|auje6khMI4<`OKREk^PFBYasnICXX9%vQ*- zvgWU?ttC(KAS3irAPGMWL65eq*j(~X3{AN*Mw`h0HAXSypA^NZ^7XIwZ+JQ*->&m5{d(44FH&%h zV|6O0s%Di~odjw6-NB&_2Ni;|lM_RD2Sh0gROp{}T*0z|^LaHjHK5i4S)0rJ2P*V0 zOM*cq(!P<~wVZ$rd{o52{U=^XuY5EhRG}+EbBNPF2EJ6UA-N(xsKFhru5QRq_z+fp zd;POa8iYmg8_+HDcURZS--h<{NUMh?0MD;EHQ^W!<1$xVX*2z83=t>~uJQr!>s-mk zJqH?dmHSb-dE0vATo3fgkC`bq_E2yU(-~S7J3n7P7)qQ#{BTE_bC-+oAUnLzs{@CCsO zFGVF6oS_cikcVZ#MNuC^(9d2=`>1977A~R(Jndwr@e@Gk1|F>UpktB{Go2xI`>wLG zJ+|57ZYO5QbPz_T%LdDe6K!U%Ix>0Gf?-%Ft*&A{;aow_E2x)0y^F_mp4Zx-Bm z50e^^*zYew7%#LBpuBls500mR%Vj9T=jRFR?bA?OqqY}vr@kKj6t%Fl?EH$w>%}K8 z=*($5F9COO0CWlTp6WYRv<8r^TiZ=|NF|Ka6_qj%SK2h69H5!5@pRL8%i(x1T+Gj> zhR70P?mtF;shcxY@+D^4<8NbQu(uyFmAe7wQ0}5*W)5-X41y7JzFqHcJeX~s>P^J1 zb8)y=s4`g2S*fd@k3wpdXHN`>5)o%*lcrb>#$di;&GjRER=SbGJ>63#eRKd z=VY^gMe+&f(A<)=+ecBp?v;92i&$Buo_KXfxH*GYSPzXfu_Sg^xg~Z<31?VOyebj; zLkU@hc`k&(7-BV|Lo%k?W;zUzl;W51_rvilT_T3Ku5C+~w! zL^o1sFgsbDn<+O^R`rsAXjZ}2hO@VQ@VhIzt>#Q@qO9j79u%6sB2Jr_>^|7sd>;Fp z7g!=(28V~Ln?Hunmj?-xKb)Us+iz}k9?2zWaMQOfZ!ZXemYg@gf37f<5ikoB6&(-A zbL%{#K%qE)_lQt?BnR-BWYg`052~A*zuY{)^pO@4 zyT6~Ns7P(}vc^7F3kIVxhF01b28D+w*={rqP&-oaiUp6SqKBXhFMw0Qo%c(#`{W}@ zxQ`7D53ujr<2=HzCle36`^6a?R=f7<>^KkXqn9YW1_15ALDgHJG)4D?+1izQa~S<} z+M(s5ZIM$~@BH@Ep*P{~K=Fnz!4tEoiTLWqB1bFg!pC#v(;xYG2yHC5xozx|x&qfn za^J_Dw1*mjFx|WUlI>k&`-M z5D$K6bG-0{yG5YxwU}?(Kq*FcU?V5+4hFM$!&Y1%Z#_RV|3>#s>X-Yvt0P>zcQtJl zWL1{3m7ip2S?_#`OA)Ug$zC$E$eX__{;25)m*sRz{H_g&2TGLoH7UA_4ZnbZvR$$~ z4nt?#WoV%!E_b~Vcm$y^cJ``LqZu7z<6j#VwffQ|-VNo2k5~GV!08XqRBAlwz@P8u z@w`q`J6=|fiZoj{<-1w8YQ_O zqW2)?{b2%v%pjr`S8SaR-)0sT3N4L{oXeHy8Y|yJznszngvUtK{#4vryJ4FBU55a&r9- zSG}4Ca&VX^zUef4_%;m>OdpNmb}+2}QN}FiNuT!NBOLPL$P(**XS}L2D)u7)@}d2D*E9(z@U~izBx6x$?hwcx%*g>QC=Hn4LA@Mj^Ub+BE;$<~*)3qR0g++*W+DD(aNJGGG^+?j1w zLtaN=^bl_voAfYw~b?j&Tu;nCiRQNFKqb6{CoS6O|()~f=8&*+MlRO%l9tlax zLqXRu9|J<<2>?)wfq#*0Ggn9l>JUqY*^ohX&8l3u*lz-qn;Gz*IG z8>@W%X#^wp4e-VTzow#F#@o*ThxxwqJjMsDrmqQ01G z$~bg)t3M@)TKsr0ygVr>p|X-)U;=mr_Fmk#-VTh6Afc|z{Ag|M+>1L7&!jS5c)D0$ zS`Gsvt+v*p)WjaZjgxO{Q`5Ve+gk3|LDMUj@vQxp?&b|YfeWk`2JauhYUay0g=Us- zr301gDJ1bDGBgn=*7jWZYf(nKKPt8wSDOb0sM^I*B-2DtzJnD&L;^YX5bW*GH7g@_ zOl#T0-}A+%zVd317R1>0YvD#CqU#tBHW*w zT4hhE@?q_?l$5(Y@ghfNY~+bBYbD}K3Ipj$eMY^yF}Xe_{;@q>1|xY&JgIM{P9kCY zajxb#6~)g+wzVadk1W-%K7~h*`L=LXQE~r4QA@U^i|vKmD>m_QFT1)hjVRs}GOP9h zjDL<8N^u&|VEYb}ts^vt)Gi!V-oazPzTI`*ByG!u>K#UiWd*O`Z)~qd>MXABsgw|aN zWV1iDyWBD*a?qQsyUX|28DA!7s1)=yu~VsvL%d<|SIlD1yzje;ug|qMsxIFd`Rsh> zGg0{K4B12lUQW}+gcxGVs0C6IzvCHHY1M+)s3hLb@lu=_q|&0vEev&TC(4v?$olf& zbfmrZmqxH*nB1R1LB@d)MMe*i#wIuMr`rg?k(4rZdkU#DWveXUGsalO&b16LvMsa) zv*_vx?6GfbES?-VB+Dd|0!wVw)73S{Vcq0n*BEop?d(KleQGjtCu55h3DNq)o987) zGr{4*kyTlDaBmP@+u|3hzgBGE_2C1LR?Uw{-PO^p;v~_VuWyfJDpYcre?3@VmGD;R z6*J1jIKk{b+4s<4aci;v7&Q=ps7*jR%4V5YIbT)nFPH#d!xsxE68gKCBcrGCUmRp(!} zizac|>?}Xb$Au2|%ThmfcqO40Z2B$IAv|Oha!}{v>X7;j)XB1{>c^0wLhII|jM+;+ za?LwLgugb%21T5U9Qu<#cE?uR;4b<;;Km{0bw4qu*VMEEpg&{DFj&!3nJD0G@IB!4 zU%=-%SRY4O_K^Gii?c#G>y4<&hrB$4{{AOpUescs1M_5)G@f@bMT3`P+2|%NtG$(E zGNaf&84VvNKY>_70L@}PG$bHkQ|Qd*%vpGOhLSNmQM=*ra($MUmjO!qX~S^tMC3II z>${RfYdLfB6=6T|9hR7~;9Hv<50(-&(^|eoO^_jVr;`%Do)6zt)>=xGNf0J}3emfh z`Fiqv+%ndN`=le{i84I#bzTUhAFo;Jsq71I=qa-)*k{zN(#tD^8k>#|mzw$}7KeIy zS^HKQr>33)T9t{C&9QztbHz3BK>J*k-sq1e8*iUKixb{EUlG+#sFRmio!hbFE*ygH~}C*Q1{%9T5)z%+O1!_C>sVkDQ2ufbIiyJ*)lUvk+LO@y~p9$J7i?bUfDwS-g|xTbJY9& z{(gSn$Im}?IOlcld0o%zd0*G_&S7m@jwW>Q(5_G09VPZwtChb+QT zP~zt0Xdf4)pm5kkPIP><(hcceuhu6>8vY@>70s^R-gn{p%R`ib3CC)i`X;;TR)KP3ccGW{S;Qdc2_X2?_2Ck{o{0Oy0`x z)w1_FHOmL0?BfLaKPZWD-g-LXDR@X9I<6T+CF0(lp>^f#q!8LKR&aei6)Hl;>ww?mshnzvoe|((E#d4vW1fq(lDkv zIFv+yMgHbZ(XCs4ZEeVhS3y1M%PLx_AZJH)>g#h#2UUmJNT=`^{$a7_Z@VYvYTE1T z3$?4#ZM)DLbbc#$|9GXFkPO`i5s_J+>-Tpi#l@?%5qAg>JTE)r z#8tSt&r@y!V-JrF@dR-lu+<>PJtd?LyKFc+_e7?}*B2KTRdOskex=^Uj?ePX0wV)y z90vX0b$lU`)sn@Qr{&TVFTAOl{66V3Q2R;0_eJJP=k}z2yOwKB($1pRDO?6+h;$7| zxf4pelT3l!@w>6T*QHcc_ARQ)<%#HlEu{>Edmi;b1($4={R|f|>@r-93|)*vtU!g| z_3cSzs&nruXGA950#*YYe{$X|RVk|W$QUbo#KD}QE$OxS`OaH`ZT(>oCR4GR>~OGVgV&EV|`aCG2@if?|^O`nm|FC0`a9y>;+RkNUkR-rm`y z|MZ?j`U?8!#JLcO)D>~vDAVa-s=C!4w`y!|o_xY|=`&$+Y4Vro5z(a>_;kyQb?s+T zfOn7~J__Y>Hd6z!UtckNxOlpt;E?KNj%u1?0yO!*fNSZ;#7LyK_xq!-ae25P>P}v) zzs3JsQ;e;LR*;&{I=SlGx6=ROxKhX(?os8NE^{c$G+d&xl80V=Z>)kp?V_%PF1htN zdv#`&hm)0TiSOTrWi;J<1Zg~VVvxk^evRQTrb9QEIM;0Z zI!m8rMto|Tj&^7X<2CNj2ATTZkChTE9ClZBjw~zXqbTcKp%JGGQf}Y0zZK&hzGRDt z)4owwXEf_EKrp9Wc}pZICOFtTBH|oz8A~x95%pvKs0T}#OW4ByDJQaQ##z|+)ozbic?z-z~(63RcBT2mgKnn#IWPa8!L{Pm%MB$ zETGR@su)lwpwc0ExI=MYKATw$2uzW(@FJh5SJC<1O{lxresi;mf=XxKj+D ztN3(hW`whe$p@X#h%S+Re;D9c$|f%L`RdRs=JfpTjqYXkgf;}w2(>78dDA@Xm}Uxx zd*5eeG+m%z<#y21BBV*P9D@|{rx+b<=~T}w|JG~%we_IO^oJRVd+L)ICfiDdS*h9f zK%wHcb+v`*+q30KGPu=&X7Awf;H3N?ie;7kB1q|K(`^o-*Q&c112uMV*gQVu@4Bye zrS!+A7!p0Y_|!a}2Tk7XM#!)8Utdp%^xOBc%Z6WV!tp{xUc&#p`SYzCo$X@ZcT)PJ z!Rg0#Jxld#)z#3YiVZygzPu`2jro3}4@=J2lRR2(fe{b3$`oHu_b$@G!}i8N5_pGK zo$7f8G8U~|m)*Ipb4$d#+lxfy6DQRkcgRuV#Ln|I9Y4hugtUpxM2(BVnhzRZn_$!H z=`tUByE@#jc)U~EJaO+Y4y#8!L8}t)K1tA|h*8TG2usybVjz@*_VvEVX&vz{GFc!i zowS}OHJhr-JepmuZ+{xG{ND1@6+2XcM|+Udc61oKrL>X16X(o;XS2}i1G0v(+Z2)> zl47c=QBE)UWX8(9{k_+dn0d~h9~E@wlk8w%)9C&DjeQsdz`yJL>!3U*1aqS!9{@Co zc{+=N#l+k@>H*9QSYD>bsXf`n)A7?{tAFo>m4eoE1teYe<9Z5QsPTW zGRlFdERFY|!;ez967-fvzcp8T#MqR}Nln7S*eC`I;vuBcA-U1_8P!t3v|nmR=dxdE zX5q`zab)Wlq15;lS%#f*s?*A%)@^lQ=xwuv%r!pyQ~^_ht|h}BM>p-8ls8u% zAt_5ME~@Mo3OdgLiKsp^LM<%j^6|W7CU})b!wQ<>dPoRM#hqZ{rbR!Ph!FVqOo<&<3&7FpCTx=R-nc zo??svUa(}Uts~)mk5n0*x18@GF-v+$w9lj{_}0MYiX(~7GBwkv|3#6nXU&L6$DqnFa}3B-Qs(=U9(gJXYiA5t_NMm zpfiWDMS#w}0+iM1d1d*jv;D;sAHByeB}CvzoTVCMg!zD?xP&?{3bzcEmq4@!GbG8s zC-ntQRhmFFGkqcZML*}0Qan7jb-lWu8LrOT>9SH3ELM4BDwim)j&lGiS1U@%3Ta#KAXA?{u=g zKSc7_wFJ?=Dkiv;bnSgR4WGw`FvPl@-#y&2%KVDbd!SqejrB`c>lzxXV)-^7E3ZU) zh$HC#@Lymy2Y}hVN^16!)|whosfz0_;!YdsIM3t53yNI7UW~hS#H22QK*|s2yrh9T zCi|BD1F;yU_3CS4U&rgs_oNb@ihiOyZ}*CnG?u&yP%GODJ4>z*y-V$w+BZRG71RX< zuKUo;+EDhi=lQOOv&!iM(&TQs0=?2xgRGcf<@H4kqtM)u1WG)ev(?px4f}&F#ptdS|f(y=2nv;l4 zcVV*0+ddF#n2Zc$N%GQ43;jZo0G2Cq`1A^`<}u)_)zyzhMWa7mM!yGd$JU8~!zz@> zv8A3~+CKp8XqHv?H55zOoE;hc;mQCdhJk264dV|r2GUkMssSQQYXYJcnE%oNDT&)i z&0xZU@HWmasV@C6e>t_-&l-NnFJCme;kX4yXhLq!Yd)9csJ@Va6Rj^2N;iOzR&Izc zMvTjC@U1)Yx5=ispSF61?6IU+wp@F(Y%bZEutgm+o&k)-gi^;0?@R0!+5P4Xpi-8L%Q?UE z?9_g)FX>s_x#Na^S_io3r=So_w;41I9A&!$+VgHdYAl)-C|kTW03shBSnv!s$(2EO zaiuztn8(%7eA2t6nJvfQ1D=)Y5&WaR37Hg?${3BX&C_Nj|x_{PFdW-xe2c{6I{ zbv8kUtTzgwyqaZ1XZ>1zN1B8W^@=OErfE*kmAQ$@OsgK~&l5a5SQZjw;LBqKDKICU*i_526U{%z=GdcAeWOeC6s)YQpQYdY z1gDXpfIH2{S99v@TopXT`Zh99nBz9GMny+w++oah*NvR%b@I5x70@2=*p!Q(n~#;Hk!p;PlLN~ zqK;yb&>NGq4}>kUvdqB&yv1l4ayE_do@}h$`tSOtCP|qd?Pc7Ghf?pFN zLXcTlMy3TN7xl8#It7P53_XT86=Y*;udS`^?CcyFDF=CQKm?*)8AHsH&(G8;LG9qB z*3$z61KL1Omp&O=WeW0B?;~^UZ*dhAu2A#4{rEKr>ZN4mM;FH8jVFQ#gF4g1q*@tw zGAx@gq^-4%)o};E(nNCXx6q-Jr$dGcsGy=D+xvmm$%m?&>ZwDP79JEJp&sKX7SmW}V4djB%YB(DSx)S2ihRB{4g2fV%(rI$uW|vd=W_xs zndlT{V^{&Go_fu~*s<~R_#*?0-XYm;_>19eJ~t^(R=pXv^=u7~eIR3T^Qk41egLMD zA^Le2^^GWyQ(XSL$*~Zv`nMQ3QBT^!JU`naU26^`3p-#WHkvlimg#vuMTi*gcs=t-Ar`S$li?=QwA9iH5+N>bd+% zP-rck()~ZG9>P@lg^v3h1~o*@k^g2#y0-z5X2E_JySYx?g`7)>#c;FZ! zi9nnJ6-hMX7Y5|c!thsi3U#Oj}fHO3Ed} z2=6P>6pWm;)mIqRYTUQ)4oLPc7CwH&xGkMvlM2cWN+EoC9@xApWTG_W`5jPqMd}wk z9uM-&Q-osfWz4C5YkT*a2OfNy?xhvP36SCfq>-lxpWL}~=i$SxgYCsYIw4f$BR;;! z&D3gskX{K&;Ez6Cp@F!Px~%@H7#L8qe*ztSNb|c-2v_Mbqm@!Xf@t~P=}!9v79aZ4!&?D zMZ72IOib<&4y5E*Iv$uNXj$I}cer zg&tTE)A*97tIIKl5~FZo@6m@75W54a% zgsv>}-gaC*9HC&~Hwvq{Hj9_jf6pL?mYlj;OIND-CA~e(>?9(#$U)KS4c~>Eq!VGq z%j1{Wpp6SG$b~2$osY2<E6dAm#1sPddgnfAvZd;khB<97C~Il8)uXT4 zv*g*v{7E0uE5+@daM3(JTZ2Pz z`FL4Spu%27`!e-4ZbALhfmSj(gNl)^Ck#gP$J5ylIKQf89koB!U}S{ zc;r3VAAIEgoWEAp+{Vgs`C(@94B4QXWb21n(Hiw9bP<9bk67$ld%5%05pH>ul$4r$ z23HrWwyTUl&Q4@(EG;#4dKLSrn%+F0_8`cc@%8r31L<|3V7LlVrPgl^GLDOZ;)Wt0 zpPl81x|)VYzL~qkLu=XkCPJc@hdOVDe6@1pMQyoy)Lo9Z`;Y7d^;W&q8t=bVo@=B- zjO6JP6kU3xucXus+%hpD3pfSE{=;VG9H5mfzGj85F>oD5>LNef)+l8q>$y~bU1;1U zx)2T|kAS?vhC6ShAS!=#RWq6^*LHcnWu?-!0UEd5+k=!dA+`0~!;@iQ<V!=1w?bBl}2 zkGBIW`{?*lHDgm9g|Alvbu6skUFo#}p=%g%!s1ld6Fv%M1qCZmS>?RDqE+PXh*k&UnVBz-3#OZuh=z2KPwM~K@(e3&6aLAH~y!bM*BdLfSRHtmF5ozeSFb~_u< zp`+(KO}^~>B7pn*BLroC>dXf#QSROp@FTOEK^L~dQLH~un|B8x*(PdVQ(3HR3*BPf zJU*hb#|oD_e|&)v@26;s$wqToJ%7+LX=zxir3Y%vbf~sL@;JaH^M|_tBOsATjIHff z%~T^LBeOm^a*T0=xYMwHbrQ=`v5DI92$#DZALo7J#*L3XK&fAyK)9~HUXfRLJ0aWi znB(VGTvWD(yf-A*JEW=MK z4MWI$l+eP&yuqN<@A$bVg8ENn7`41&_RA`K-2)_*wm+Z}vr3BdFgo=Q^JW%q7L%@h5^w;DT6eL;91Mgs_;{vLm+N*>~Pu`X4@NH)TG@3b;n0QxPTl>I{ z_UcuImoM4EC1#yn#wuM4eV^ZbV`G!lGhsAIX*w5O1ijR&>j$KX4$) z!OK^CTSs1YOJ^O=p}x+{fTC4U@e0c8%qDhcxLxmY@e@VJ^5}ejS6EmGvb3Jo9^iDo zk;6MPgOpJ|{d7lTP3$^M#;dNvO!r?y&by0>%Y`5PdjG6mnRvJ=Q(CJEJrzJ26|s;QFdt+j?MHp<56 zE{?wfYnwDL>UZjlg<;@*ROj$%sprk&T+v_0aUKg^oP9jSTHjhe`uPmG?e*5)EK$~e z>uB%ek!Zvlv4kw-RX6sI$hhVFfI&qCe&w6#(^Gs4_Zx+e!cHC|f?)EhrV(;;d)}U> ze5u+$+m)D{^W#s{1$4!c#Xcsa^h*10J(5Zzhmpjvnbrt^h-vhr z_2;`&*4NiHHFIAbA9yL_;9w>UcwEOsPKpJ;L7J^oj(eXGsV*mMy)n!qf~u6u32jRx zMdYdEDk0x2<1*c2#28I}kV)d!Xvc1?q4s_lALElapR#K&jF2O&_K5ZkD5fsF*`(w; z_ZtIQRC|7H+@ucCJ^**q+~wP;{}6GYO!N4ztr~pGr)Cs>%n!&3vs7egdpbKgTn}uk1qz>l z3P*6YiA@yfX{wp+^A)xk@*l0O4;$bmpXLRMU!Ab&4gRxE8M!F(Qppxpzl2mcDZIYm z;K{(`>_1=^9eC>4_e-prR`|hQe6=}&ywy5DXZ$iL2dKN@2}bOz4nl|SzLxy9-Fcss z*5;;O(xz9EF*EZO<@Ffm-3cY=P(Rw1wb^xRZ--~9xlI#yz$QMoOoMu<-!Dy8f+uX9 zT>k}DYnO&Hb8@I$cK0#uUGjnpRud2}L^T5hG?=wW9Ky1*K529w2MZ~`!(*G^X%a>q zy8s9IMEIccU#`qWe<=t$v8M7?Qv~~VAMKH8z@r&wu4|0e-U}8YiwNe8*q!XZ_@>!hIz_44Xv8;VaqHlJAx=1YqzVyd z?jyJt``cZ@WhtD{`EJbnFZRDS8LEKtp>O$18!QI_Y9j`|YdF?^_ zHkePJdApk>kIr<0^csGG^he74*F#>!NRL*OHVgoRv`A+cwGz+*400f*L zW%|&8YCOO;sij;7X6<1I{FVJqW-aP!)Wf(o*gA}$;vsV>o`Z0not3on>UlYXg8Lc%^R055>CvZ z_XzL{Zal#>yCW|3?jHHzYUN>olS7e4n>i{qwP61NsAk8ydb?OXIw~p&6^wQw7X?po zLa>AcULCEAd=OwjEMldfKfe%lgZ}MZz{3f+9qoY)zHf@Clm6Q;tW`~ML~WQ1y%>F! zFJX%KkJCM%GWFv`Zne>EuWjWo-Fu_36&;%wof6De*X}aCv@(~g*!7^tFELO;BHJHEVJlOr&I z&L>HjltfBV<1TN^ymn6iBGwdVTYJ&ScH zz(^VP*1)fxxgVV6VR)|e4X{N-zF#9e_s8jgm*;S>?ucS520!Xx5FhxXzxznm7GvbV zGj+sSvF7eQ8V6@6?2m85gl{;1ecRdl&K`uZjW{^quYX^H?`lo+MBm1|#(5WD{%6Sv z5H^}RWHTLXHT7fq^0!5LqNseE%{X5IhSy8fOe*hc*M{_v0JTgPRutPS7n29d1X z88&#kX#DTjU5(U*kl-txa7gJVxQ##1@_4kdrt*^<+a>JQ`_LHGeAIth=w_v<+dmOV z+o*qShyV521F9hg;e@!c3K##52(s7=g7#)I_4qARd_#SSCL>|ZSD4&^7IiG79ls5_ z@~3}3QmrucV!5QY!@^f@zlc+6{N|PFF?jq6GbVqVJu*aRKIr|W&f(z|(n#pDM_LlX zD%dB;w!=A+C{gZ*XcrCF^@*##&pz0nl0KvdLl{n|!c+@}4D~ip1AI6}rns)#*@!nV!F%ZGRNV z`Ke3rQ1H3h-odeb4XgIk0Pjxmk8I}Vy&m4oEwE0>OVmnIC7pk8ndVQA&uOJ4$c74uOD8+y$4PIqgq}G0H{u7R!p|Zg5bXt;(yh0chJ$X(2u* z@jYGP|1YX&-2Q!81R+i0gr#1>SoDD+_#ikB}zyJ)B#QpA; zNoOw?;I9|yDu|NZksgZ5EUar)4iF3ho(^Jk9M@teE6U=b)8;Sm=J;A~4tEiwP*l8H zZrd63K*JYSRw{vTajhytwq_|1nC$4@+}t!~A5zUyQB_ucGY@!9;MI$O5=ma(4PJ{8 zaA%F_xUig@94L3*Tw4PU6I9HB-@j&nb>$p^o6tg}Q%XypnE$GDbp}b~AcO(vkUXIc zDAzuD{8$r&oXb!qC=?o^S2914abQ4g!Rr=-$E|P)TvFGo03YXHEl;uuz?v3-Uf0ZI z4aU7Xbrrjq2_2|n(tbM5|1A7^^bvnL%uU&9YRhYGfa;ndH8)B$G~Q}gQ@!vLPk~Uc zQnbgZ5Cd2V=?G3GdCA6eU*}Y3pqj z8+!SX@0YCYEJD%X?kZ45tW{z)k*A7C^YA;w3;i`P&{S{vt9&p;hU(ZAu{S{gZW0?8 zh9DQfrBPF8~zZvZ7ney679bg_WMXn#Z+h$MxfqeR>B1+rBd5&ytW^FwfZ+*L^z{GGH1kN zOQ3lNm>%8YxA$|a9tHw{dwWY`Rb$^je*{6))Q@1mB$5M6=Dk{+E$9c%?lj~5htc+}= zMV~$6=HeQVIX>71n>0yvdzV6hekWZmf|Z8?^0^J>?dSIaUn(#)*0!Bv{CN*}XNFvYP0wp65 zlZX5Z<3t|)L2+tr`;M2ROU`C(IXe0lkoxP3>*{ac8) zv8ND)yFn$TrHXWxdl;J=B9Wp5EDj(cmVZ?oMuazKz4!{wL1vGL$aPiCy}&CCHMLuh zt%MQUXA5vp50*d%2#;y&(%@j2y**D6N14?bD${|KNU+8po|g>@W|Rj42#GX*kvIlb zeE%Q`g<6=IfuM(kh)70CO2p$Yn8F|mS+9{1%@ieYM6@+znv=KRb>{TX0M=g*j=^6z zIOa`Q4A7k*JN*aSo$-M_-T~OZ|5;@J3+4bqzy?!)G237E0}J7BFd_e0dvN|DoU=b* z7i{-ax#_pd?T^IbX!ohH02BMD?{L{Bc%kQXy$JWC9nvBBW{$JJ{V}4(;mzwqF*epd z%MyYmpE4h&Z9n`2J~2#6!VT1h&Q*gZTmR`!f`Gd>PFs@A6$1JF z=ah+g*EiPQ2z1AfiNOAWG#DzT1`G(c!zrv$(k6P%L-Bj^ukO@mvzsi{JbC{h6yJE4 zR(!D<4j^xMA{O@6^CfJw#(x>J-UQZ#Dz8t!2CNR>$F8f`PcZv%#2f<0apLhJUrLb z`KP;QToD#~yE0;?9p|<7A2yc|_y2;}e`Y~s>=G8~{o$MN&S3#4hWELM=`~KgepGWo zmLDkH_KqU_c@?xltJWu8Dnro z*TD_50KH*m?(ath3}B8Y`?I@CgSATb%R^#J9Dk8${Bne}uTd{G4~0*8_wx;RQoHi74)t$`C>6%AQg zSuHIZw77~2<+apq8pUuo=sw!~fBcDvPXM#2MOX_NBUJ=beO^1R|Ojq~LamwSWw0|6TjJ{M#H{jMT zg`%p^bX|iF|I;($tnrcIC(}7hMf-#TdvZ%*1`r{absZ2h>& zm?Kb?-)Ytf6j{^}mKGN6mVDaUV`XJ!jfLAehv0JI;TS$UAj6J?mW73>iilJL?9A;+gXx4C7)*mp zIEll3)XIE)Tfk`qDCBkpXBj7X;Hpq?t|98pw+RQdegm8WyZbS*b5@bG<8je;@UN&T z>Tw8Cib>(sM#GPzOUxwwCj<7x(UeWu*A(w@-1$s4pE9=uC=4>+Ycb_9EDWJn3}ZGo zHq;Pt!NK~brY+is&vMe?5N2j(A)zY3ihcbK2PKL?+KV|z60~uzu?(CzePNIR;ejTb zHn`&gP#c`eo>(gmV!r0!$mWl}ro5KW_KX+DQ=8`NBXs}l5P?T9$Eu|^nxsZ5zJP%e zp<{IBW%qyW|Fo2xL2Z4}i!cU?oy!SfKFJG|n1~uYuiW>AJ zgX%wvM#YC3H(!gqCW|pq*wyy|U$IHC6<_FLXoonk*=P0&2elEEi@sfywy5A=KmIAe z{at6n`7=XksSjC=Rm)?2lF(`KHbKTJ=x41i^6sMUs>&{haZ`Kc4>xf3w9fZ(Vu(K!%9+{x zA@=7G0JA2sL-jwVJBt{YCX7%0W4X^qV7VOZYaE<2UI1&;!5G2i>X%f@2JIGacJUs+ z<~w^H*06Kl0Z}k>dzRM`EV=PE3AVKW1OxY@XN?k_h#^{)OeSNiJoyIHOZ|#%3KmU< z?V6_UWUvdm-0NJ@DnK41$(X%kGPFiMK2~Y+jbPW{*Kj~O5J!@R*f4yO(H<`0dCa6X zb8m5FAm(o2kB!;CmbOSkOIf-8kp`fng@th#x@CYBm?i_KPodx@zln)EVJ+9l$iQW^ z=U{(*)({eXJzTLx)?j)cNlHXNf?xI~inB%EiD38TK~4&@sp?aqiT%%mcL2Io4^{Xq z+`g_d{bJ$mBjyiWM#@Ht@l8a=>{jzZ2%qG(R#*hN%g4j#Y(B5{_4V{X;)5iAettgg z`SYdXum|KCSvz2B5EB!FFBC-s&T^!51`mp(3ve;FU&D1 zLb);H&0A_~L^sPI5o)z=U=%i3I`rnvaBT6Us((jjb1&1Y^@(~-m-R4T8UdP;l8R4) zGV=T(4c$x#eSP|V1wc?Pb*B(fIoR2?wX}S*n!v4MP=@>j*ZP#~0Ts)U(NSH<96vau zroa&<;6f7gJ7s5e>=}u(+vOP3K_|NzlrCxOwX(LBZh6MBZ{3{iZ_AEK3Y%>G5|R1& zk-urlW3hWGBcLlqgJ|BqB;NalSNj(>VW2iy_G+JRTa3?O;0#~dd>sCLopZgN+$ml{3 z<8za^C6~{G>;3lQ>j+T4kDC6mw?1tWH;{Y_T>?nbr(p6gM@egHYTDUtgLK0tI5{C+ zSa?*_OLlC5wb zA0HQ2R}e=F3ky3sJ_#G>?F|VFOI1RHnCR%}$bR(~@qgEBp}m2vEx|4ZLJ35hVYcia z2v`67k=W!_^;;E^hi{m$HtTFZqo^P!6`THT^)Dxx6h81^Cmomo`3E1+;hLRpN-E$J z^f%E$b!UitC~%o8Wx+NU-JmJR3KjwSX8)L4HPw!eHEYx!_q&z>RDkt9kC+C1yy+ZB?3 z;OYO+if85z%OwH`3qCP7aQ1);z*PPro{pTiKCmCfhAH{Jw{XFPgb^y%xy@kwIv zz+d30;0F^BoV^+RgZU=#gZbD$zXOfJ{QN(D*Ew{AAPTY561`e<2lEywG3fJbQ62aH E13;xV)Bpeg literal 0 HcmV?d00001 diff --git a/vendor/sigs.k8s.io/cluster-api/docs/proposals/images/machine-states-preboot/Figure6.plantuml b/vendor/sigs.k8s.io/cluster-api/docs/proposals/images/machine-states-preboot/Figure6.plantuml new file mode 100644 index 0000000000..f0e675d606 --- /dev/null +++ b/vendor/sigs.k8s.io/cluster-api/docs/proposals/images/machine-states-preboot/Figure6.plantuml @@ -0,0 +1,49 @@ +@startuml +title Figure 6: User creates a machine with kubeadm bootstrapper + + +' -- GROUPS START --- + +box #lightgreen +participant "API Server" +end box + +box #lightsalmon +participant "AWS Infrastructure Controller" +end box + +' -- GROUPS END --- + +entity AWS + +note over "AWS Infrastructure Controller": Watches AWSInfrastructureConfig\nand Machine events with mapFunc + +"API Server"-->>"AWS Infrastructure Controller": Machine updated + +"AWS Infrastructure Controller"-> "AWS Infrastructure Controller": Enqueues AWSInfrastructureConfig Reconcile + +"AWS Infrastructure Controller"-> "AWS Infrastructure Controller": AWSInfrastructureConfig Controller Reconcile +activate "AWS Infrastructure Controller" + +note over "AWS Infrastructure Controller": - ✅ AWSInfrastructureConfig.OwnerReferences \ncontains a Machine + +"AWS Infrastructure Controller"->"API Server": Get Machine +"AWS Infrastructure Controller"<<--"API Server": Response + +"AWS Infrastructure Controller"->"API Server": Get Cluster +"AWS Infrastructure Controller"<<--"API Server": Response + +note over "AWS Infrastructure Controller": - ✅ Machine.Status.Phase is "Provisioning" \n- ✅ Machine.Spec.Bootstrap.Data is populated (not )\n- ✅ AWS supports user-data\n- ✅ AWSInfrastructureConfig is valid\n- ✅ AWSInfrastructureConfig.Status.Ready is false + +"AWS Infrastructure Controller"-> "AWS Infrastructure Controller": Copy Machine.Spec.Bootstrap.Data to *ec2.RunInstancesInput + +"AWS Infrastructure Controller"-> AWS: RunInstances(*ec2.RunInstancesInput) +"AWS Infrastructure Controller"<<--AWS: Response + +"AWS Infrastructure Controller"-> "AWS Infrastructure Controller":Set AWSInfrastructureConfig.Status.Ready = true + +"AWS Infrastructure Controller"->"API Server": Update AWSInfrastructureConfig +"AWS Infrastructure Controller"<<--"API Server": Response + +hide footbox +@enduml diff --git a/vendor/sigs.k8s.io/cluster-api/docs/proposals/images/machine-states-preboot/Figure6.png b/vendor/sigs.k8s.io/cluster-api/docs/proposals/images/machine-states-preboot/Figure6.png new file mode 100644 index 0000000000000000000000000000000000000000..9d279f5642b2d350b836b614ba22fc5c68d08060 GIT binary patch literal 53666 zcmb@ubySsI*FI{1gd#{QWw2>BN;iTC!lqj~q`Q%nR6qo&O{a8sgCHT@-Q6JFo3l1P z&-cFH8NYMJ`QtEjD4RRhnrp5(uX)XD;V&a4iiP|6y;ZT(v$XnXrf*<(OH|)N-$uty|HD(=k57&4?5wz%n5@inEbQ#f&7gXg z=8s--k%A`-8Y`&S{rmZ?+u$)yae0ct}r&Ig>g1K5)G5+i1n5C-fGgeCn(> z@Sp}Qij$`#K3?$Sr~0>v`O*U{cW&oZW@0IK#_hV!o(c3k7;L3VnR{%3A`~c{D>8+RnblHWX}Teh-vkiQ{Ts=K%D|--|_vVZlsoH>HktZm)89A zMFO*N(rMnP-|daZ8@i2e9)|^f&dB`gFMM^k=J9K_Z)`8vNgh)dW&P+E|FW7eh1SB1 z)>O%q^}y?S6ED-Vsh4zvah`EYJN)I&I)nKWnrU zntwUYny%1xpQ1qbQpBBRc3C>m!G)o(?Opqq`S!wfwLDYU;CAVnApIK@Gza>&D>*r7u9a3)`%q=xgQS%SfpQJN-4o!aO(SUDP+sh zs~^1a5C8dnDMeOOCqAhwJ!^o~JTfv=+O~;ZQ_GBJpIgtXjmshD)_G~q%0pHI-8R*w zjO?s|vz%lS)WlcWsBZL=q3lJQ9pw^v@(dlav}=L(SGGnRh|?pb8p%|l~)o)e?|S)^#c|cjmxT8t2!-~^h=*k{N=Ot zmJMvx6_)%6iM2VW)zRs)M)zrfd4nwS{F?g$qxauGTjxfn6EGQ$5y&3myJKu%1l4(& z5&zw%Z+?K^3GU67LQZ;bcp`yms*{XkhW1-DfghzX9Th{`W%e?K6B!kCq~tpW@q(;( znF*ht=Hbj}UCB<`RsJEMzjf==Eiqw11*eIvcr^84rHi(4bs5k5#SCwR{YLm?GbOAQ zQR7rZ6|HVzF0?AfDf$_i8s{kG5jOe`m}qS}Zzn~XOy5USO?x;LfQl-5ef>Fsz1zhS z7fSR2;hu=bPfgw4&Lu6i?-~{tYjjD0B8Y<`D038YPyH71e>jyWUQf}%AN4!`KYZ() zSF^;_h%FM_=e{QjsAu?4D-}$~*D>t*Yvl|*7HlL|IOP7pQXkh)YURqiT z)u@W5EnpngX=S?cz97Hg&9< z7zXQ{o13$2xQbMhOG(g`l*jQL3VHS>H#axxL{3hwV==n9Sy)vL1`EV|fQ99@ztnzB zJAERTwm*=k6f!5$qJFiWk;u)-S;(VS+R_q8#9=b@yRWbB*RRFPvxDq#+U+7T9*2;& z56sb7S;n*VZUtHm(2>pNNCx?nR14|1@w^{H2RsmG8C6wPzP_jsqkXuRd-8iJDJe{X zkjmS%vXCOg*)MDXViJN2c zzq{C~wJ}vy()21fD~r}G#cZ~oFMeFB-nE@Mn3O9eHFYpgts+OcfZzGhsQJsItxAGi zG2op>J8G77Z!>JTnJ3Fk+fcJH=W(T_rNuKcGKdHWu8t=ywOsd5&0!JFnCy~*@87?- z8z?O+;{!{oqciV&pU4l3M2Wn&_GHGzF4E(8YyA9Z(^$#LsZxa{Eilxmthkw-Zf*F} zr%!0uwn&;PM$;Fm8+Rrnz^ zZVSyAO3aTiB&jGVWt_pP=@Q5|d{c>NKBwmLJg+S;Kb|NvRTP*SOFrK$Xpk#S7vpyC z9Itsu%K17>VWPXCIY4nNGvGp_YDKpU()09=wykk$c#_yd{vWG!7g{O50=F5K->99h z;h7@sNq1w<`&B=N_8>DeGvNrt#qKzzh9M1u*sA0@-tyjJ+u8j-!FSPyJ#ji=Cp*)r zVupr>jpB0Qx`uT%j=M8ewx4lXt4xNo0~T-{2!w)ki5jy<v-GDAGgoP|Er|i7jyXbJ{fduYGE3YmJy$Si-b&n?yRRXJL7afwNQU+}C|SK7am9 zXB%lO*U3_UYO{NOy|QKW^hi$xb)+9P_Og*3zgvi=X5%1iLSb;||w7x{%7W@Orgf*3W2>sf*; zAk!aRF*JKr_+}4rb&;)5Y`xf8Y9%ZCDE0@6WPpYzZD71`Wf)bB-{R=msFnvApL1zm z9;2h&Tz>VB&8!Y9E~Td3hU@%=jR7^+&AbYu9W)Bp*HTIH5v7SYyJLeS!s2`L58-J2 z{U}!W7smJLJE9x?sc1EOQgs%6S}D!nUx?C_C>Lm^DTy4B`o5=dKTS?gHwYwTJpG;7 zG2cp-y(RKp2wfr7kv8tON|A0*L_+u+uI~h02wE&mQbs?2Uj)Kk8&H!zlq#M3Y?3%e3_GzlZGKWF|n8Y^JtE;bcA!ZO~;?TJsplEMRg75z%5Ny+oLS`j8llr zEqNNrx773%sz*WD-4I;;$acLAYq98R3R+rP|DgP&YZ7*|u}bR~B!7gj!Som_F_x#3 z@4;mj7J~)SQ!=sTE0nLS@LWD5Iqd3sv;0V0%D!}W6W$;b`}NJo6}n*I;I*spo* zu5`!R9bi!1$2bt>LmJ3!RR9Fi=8AyYH+?9Im?(QD`=FIH2 zme%ayp8faG3U!EOxEqE_vVwS9PT^pZy(~ zVAwm;(fqXT_bMuJoYq1uzk;x2mi<)7d7TR5>P)W@S83l|6k}n{CY>VX`>S-pE(EbT zTOR%o3p#p2kAv;rbd<;=z1Yfsjp3L_=k8takvuhz^Yx5N>ZH*Et=-9rf~>5pq9W_j ze2ogr+5C=+ad*U-;qwP9EG%SXWZX*Eo9L!!p9N6SJH9>B+1i z?TR>CEhWr@8A6rrEL+)v1jB4K_uAIhmYZj6V&dCl7C+r!x+8_XaKT2sz9d0>3ICKIYTfW=k>M5Gb!NqrC}5hAofD7i zOVvtXZnI+f3@KKzXx&(u88Q=F4EmsI=+X6?&kwU~DrL1(7sJ0P1vp?fu*elS7ICv0 z5BS929wSQyQP_W?SnCeSB_w;&193n>@Hgt`6qo$CdNCcczl9HQ35uMUVQ)g(o0gUq z)cj9R(X&@9pZooUl*tPU3bqAPFwxOf+pXieJf%CXM+5i^d58L)I?e5|qEs-l>gs9{ z5s`$1g!T3HKNvSK?H_=K0`>fFy!QX_EdX0caQE*|L+3&jf}~zzMP<0CT;V|4bFniN zD3IUT}C!RF1; z?M(tJ;YLk90e8s9VTbq1stBuD+F%NPIy$;L|GAzZb@xvC;P=Yjvf0J^7eA7HN@}~} zYw|9|`q4=Wx-zejHw&Q1eK-uD`t8-elDG!ZlD1Rqn5zd~(!vzNg~gP|g~I9QR*C zKBbL!v1BCk7QGb7x(zYQ#UQy?L+xyAV35ulgKpct5l29w-#qkQ&Fw`#)VBQ~IDIyj z?a{p)I+q0AyTHW5;#zE2DSCep6_dk@xW#eZ?FvmJgm+n(nSb;PN5N>=N0)o8To0GG zs=FZg#D8cBRR|`o-s3ewa1`9aJzjkdVvr>f9}E5rn=wtSE9mU0(%d0Nr@Y5VU;^1Rf zF+lVHX(sZ1Wai*dK8y6nhxZw$#6@IUJDgb2Rr*+qkxg9`n%La(f<4VtU6;Eg z87tp&_wutRo_Hmt>&$ARN4@Q8PpW9JfuSM&EHlV2afEtsKZ$FD^IS{AZXSNid0*e% zSyu}6JKl;Dw@(wlzhTj9i&tmn=YjHa&@u~rqysLREo0WVIgWb2^W!H&UtxJJEuXHL zLY+6_;vMyF7g7nl(Qr5#l=iChbY`naUM@qTJBoX;?}1PlL^%>HWT80EtqLp;>K9Z? zeYGLEzKiIR`}}dB+DM`s@;%|`zwP8aa%n?87yaf}W?Z<~{Vy3V`J|l zA6#kXJXYG9E-6MU81{a)N7YH^StZK7oYK2i2WuhWRrBkwl6|PC;a__t1CaJ4paXl& z?iPa6RgMe$k2iTEL~nAn4G(S=Yy-Z2Ledfm1>#=WA|}otMxosL=NyAXajLhVo{e#YB87-x#dl;K|EBWyxb+pbVtwSH%RzW-{6z9FQ$WE0vhR}YGe2m*T^!(pjwL=)B-bbfNe zZ8fK7B^ES?yZmr&&B*q6tD%5qrraEL`*3@5u`;I!NF$S_o8g^jR#=aRSQ^b1=4^0>1?e_Kpf99lZ^&5 zZB6QIDKCCq^H+MS{LN0@f2wAG)R&VHQM5#Cg4%a)z)(R_+mwJss$1E8~(<* zK3H`e*gx4J6PMK3Ssy1?mwQnuTy@vyGxoQ@*d|G-7}lYl9=hkc=`7= zjg9M^?NcmlFg_NTj*g^?p1hc&r#oNoPfc|>aqr84^B_a=XtID-E$`YRanS|AVC~`l zC9z{f9TcLq1>5F+zRi^O9i*46puXk?@Tt|9&1yKdak z0c-{*I}!NDYeFIU{zT9RE{IFRe41*N3b15DL#!+;D;Dh`vJ*o?Q9|(nIo`0LMcl@R zf0p4n)sBvf-kqWpEu}{N;hh){4iOtU!)ZH?jWQMbouTFC8pf8YDp3z9W_Xx((XSDt zYH~Wgn$_X_+Mk$?(DWA1OV=+>uuBGtOHNQwoMK};j}Gl7%WS8skKBAyP+;;qKQBo) z%(f>vQa2t2Jj(Ycpy_|@%TGJva#sF9424a&$+NxPmXyrg&fb3QnmVy-*|I{x1Cv{@ zT0*cQ{D>I2zt}ut4$=lch0d;woyaYF_E>6Jj_a=+oLG%Ys*QEyJ`WA>D41AkZ`5)v z3dhY+>i!b&B%K}3$@yi*2QUY{s%17Q*Qlt``0p;SY8W(0dkB`dr}tWe+``>2wky^i zbmoqdVNPVS9AzDuY!A?e=YrX8DJAz2Hp293~%nR>Zd)iwdV} zKZbdu34GjNx|pmu!6q4@=d8@IX^eXM5_trUe znqFf-$IIxGb>4&tdRJ3JBr{mH0*TtH-n+ZUmlT)(dGr}eQu}Q@cSI|e?wnTYpsLwx z?;9_g!}Wjo+G4imv2SleakbPqp30(enTq1%FRN5jS1jvxz*o*?wm zWJxfEhp1q98VPHr=vDHNC^2T5n9N%^G%u4PTP{iX?%8DMTAgcH`{D0vUA`)?X_gH6 z(+kzqvoGYUbO-e5D2I6NA>&xLqJo0m#YzF1klx^7HS^Zkf!}rTFY5RNw(Bh~-;DI` zXDVkGcwDzb;d)i_`P*$?8nyBqZdgrgrDPPaQuhYL;KE zt$jhl0sYbWzB(YSbB)5|GHzLZYG7pK`3qwclYx!!l>lAA?|ix$koQr^*Wm^w3hKaR z?!n*LeL6i|YY+3LePurJ+s7v>T&12jG>Gw*{^u5)%`{!VVTk(Hv1Fq}Z;|1o68%1k7a=CJbZolYCsf!XhHx`qIoY$bN>M z9~~$-J?!RNiX`4o?=Y%ls#g&wTA1R3Q?O75#Jtp*3xzxiSE=Q6U1%26*C11uQ-Xe9 zyS3W0ZRJp&A>8)OvY389(S4%JFY0h(*&T7&BX?30@P@Xf(x>4oTp3L$M6CjGaCz1% zok+gen-*w)C*AYz6-};9DP_6%S7;1WVeXl7BM_lPv-lZ5!8}q{|`uI^4ak_{Wvo$^WCYPpKBqN}R-kwbf4W8M9oSxZK zWzx^ryNIj0%BrQawYB-hHf$Mv1+0Ly1F3NF5t; zYAUC6E@XtlqxEEvV9S95&x$r9>c*wNr5Aq;3}hZpIA|URE$S3 zg1O4ax$P+uVI=QdpEOsUjL?l`RkgooQ9?QeqVMGRYgC!ryTg&&Ve(#7ZjXqQwZp|q zjBcxbB}C|Vl4H=&vcO9%wsS8rb1mz|40elX=;5Tirx-0StCiSa_Bq-V$MU&6;2`(i z`u)9ZW`=A%8@?jvMMy|*(w}0bp+R=u+pFMaZ{@$C{(SoO7#r(H&R2W0CwX})kKWdN z63~=$ZcxUD$8mli$d3n61nS$Xm}6jNi}4iw>j&mKKP*WvFV}pHi_qoWu}8aefBeeJ zBjwC)NXWA9&J@H z@V<8h67r^js0G4R%J-`KgiCv+Ru6??urc+u(aE6Xn4BuOmXBGgABfJ18!qYfrd zgQdEGGL*-0XQ~ws(1G14s79|;Ri20=uy>Qu;NkwDV05KG^Lol31ImTfl@$Qk@fl8DkPtfoD8VThtO3(pG(HF>lYT=tq%a&h2+VTj^%worcv-Z z;ryqH%*q1nM1fjGKDbt0%E`&e)s>fn%Xom2ho|Pt7j)>VH#Wa^*b9-b#LORl1J)T` zW@ok`F*0&xeI4UJmE2H2FvIlp^yBzAZuFaD#Ke-DH->XkQ&Ln6baax7ie4cB!DqnvTU)a>mkU8Ab-y^?P89|m0q7dg z_SZOCvNta^`{N-H2(zNY!$ZKIKLjm~;cP_^y_mG>QT|i(ZWsY>=GCiLQy;9X{_ITG zyyt%QOb2k_;Zf}7s=B(*AxgO_jjiF$EiFZ)>40B1n<(`+0DY34#>RO-9RFu-nOz&B z1&IS0eRsxk4_145czEPf^xD2PT<+h86c-nd;e=_{Icro}2^Rhg4-ZH7Z)(+TXkR5r z;n{a&w$I_VLhP+}e`R;yMhCz;8(A*~1qFeUv1u8&k*)W;cYx~!6bc&$hsV(dfn>ew z*+6siQcpb31*z&~hDi&ONl74KC;J?+^M*%D+k3&4^E)hXHOC z_1?YX-Py$0NkFErt*rsR-Ifc~XaG+Ga@`Ga`O(3_0oVexe1=v-hdCDb&u!RC#?qec zc9YkD>g_X&f!xo0yz==2P(JsD8BQg#eING_c{t5}8Q-1yhCet;s{hWtoClMI7@!tc z%mhA1-*9*Pr{~?NK#76DK{i%arF`}F?x4UxmX|M+T%oId$#0sWHdS%^@o{kmy$Rj! z8L3q8KjxsA%lIJ`eLP-zX1Xe0n*Ty1D2+<~SJv5mDaK5B^`?rhG3&>#p3&O=;(*48 zC4+ZTsa{-DqhcM$C@eQ{1``nNVimCsN&7PC7dA z%b{T~aj43mo)GQ3z9aevN?amWL4Ti7CH5UW;|~1N^x#`>thJE{M$8W>{~3G@(XUTn zlIec(2XOWGa)woupujer=fuReTdrm_;QjAMe58=N4XCHeB`q~j=(enYiZ=$)ZHGA= zVPM<(Ah)ZVmY6LvMgl8irNQIiin|kHmnH7$ng5aW%RZQ=(!H_Wb7NDO= z#6j`=mybbGZ8sI{KWr2Msh>hr2ljK zzii&k=l?}F&EbA^L4|AI+#^^fU*<7I?@s*y+PLeZgB+EZAhKxwOK1I`2Qvh@u)Q(^ zP~8*ua;MC}27~SPy&UUE626B>5=hHwmcCM$4Ek|Tm+FC%_gC(1_U#@YD&(VIAV;BH zXwnT)79e{IXev)YI3h=`$OrGcrceATKDcqHXCn?ei<+96pcBzE4XD5yE~_wm9=Eu& z{l%c&)VJJA1|A*~qdS6sXYx~9u^sYZJh47DecORXhpZ2oU;orABZHq(MI{QjaG19l z8P$j5VXUmICZcwiJ2mr?u^pz8wk9D3;XI7smK!=*Ok)+-(J^*=@@^xk{*mGt&nN8+ z)EMAMDC%8HR^Pvk1j~_my z%2Bet(zDce$Y*ko)5Y&YdUf|IZ4v~ zfjgdugWKNZK^tLFk>&U#N#}C0Y^o~1J8Ad+$u7l2dJu$BBFA-KOuF%PnPKnGa&t!7 z{$y`^k=ED4B)Zy32Xlhqb~FUJg+hAk9 zd5)lJ=A|H=a-DCo@alOc5zU(A9ihl~he+J&L`dO~K>2s2wYhPs!tY{zO#;qy$`>zQ zu(03($0!q-3+3*HNGD2Fl;*1LM`yeBt1%-nI&Jnx=LwB^UIgOmcY7F*Rp;UEuHDfFN`S|GLMOXzlVAIb3$CuN zTLtRTukSY;^u)@_lD+3wq+(?Jq)V((7EC@Z{t_P$MjxiDXTnt)>Om6tmd)X#M9TX6 zXszEdlMz?_xr&av9_@5N;$U~|yqR2=fS0*!I-0wzuGzB{HUAp?OTs;lXDc6@e7Cmt zydF|h?jM=Qh2e>;EbDVP=DS3@w&1>crO|$^fW`d01#>BsQLX%zBvN7l&4Q>45kBhY zf7S@cB`0T}b>oP83vUVJt2%9~_>7&p)}i(ihnu^jYVz4-q1~nu3T$;%l!SP#urx4m z{Oll^!R^8UMZ>lP^aEw(LT8UQ@mdyeTBK}ke+eL>IN`3;}e0U%&l`)tn_n5E@CwK>0AFI-zZDPC0yhZ7Nn-O)MlG4<**pG7PBg* zis6MZp9Rn_O;zDUrVr4Cz(8XyBs8?xa<(2cRpa8~E-zieoZnZW~1_-MS7{Vb=%7}k88LJ^73fG`i;?}u@`BJz543$OAEHS z%Un`ClLoV5Vew9vT-I{udRJABg7@S)Crh3uXAB2{9s%gMZ=~a1HPa~_gE;Oh2dmf6 z7b_HvCD|$G3Esen-+PIaOGgH+H9OF81!;KMvUnU&`dgrauVq5+ z6Um~Z_atD=6RMSFet3YwnA$;%PB%MR@hVSKg>!qU{gpQwhu+y6Ki1Twx6$~Wd3l@k zL$+7vGgEU)2rIno5<0B)k#2TK+V%G2Bd{+UZ1jbUj7U@~F4dpkSesKaIWu3woRHy< z(sC<*t0L=MT`00i6 z(rM_>kitS}ufVk{K=z`+&3<;aAORbv8Y<9*zSdG@(uXmF4U zZnMjDwiui>Kfle$2y+j9gR!%t5bgTb)Yqpp`CLtfxc+2DZ7DxsA+)rV6_FYT$aWtz zfU`cqOxWQ+wIhrhE}8u*LEQqJ@^2q7hdGwF>Q|oHBjp0A!@Z*5uUh~kKeZCWTD*Y} zKTX0QROb_8tc8UyNqNfIIE0D7&1E7;001yV&L$|ak|wB;aHZPgRpp3lIB^dWmLXI6 zkSa7w{@G)xTS%A&YB5N3-6c?$d!=qOFJvSW5}1|MHKro) z^ygVmd^@gI5j8zS9J`K?n(E9z@yx}pi@$X{$gNe_U7@lVApK4?>5{k|o1MpTX>4zO zm5?}8&ixaak8%B0)9XGyQSX-^x6YXv6(~ID$?*Q3p6Bc*8Ybl4{N;D=WLMOtQ|6nm z@3ZAE1T-qQmt8ObgKR$78d`V06(;RWH}pd+%tPy>r>-$Fz{W1zHb@aFpSz7&v-k`9 ziiCrGWN^@8rgq-r`l9Fjs4kX0qC~NB^cU>VN58Z7_gz-gg=UAL+dsZ5Z0VbN9pO(c zBEeMhvwO%$ehX?mK5+TdbB$6x|@`BXSWMS-C(M+ z90o^R6h%Q`2n0=$if7tLNOYO~RcYxbFE8WKd`VKi>IfmpFUMcVX#~HS_r2cjPo)h< z%-!`6y1mXZfN7zs6FpN$)}B+^mGX8v^H057tbyS*-*{fEc^04Nwl%YOSK@9m;HgLC zt%AJ)j0H1mw2i}cIeppDn(le1ymbaYvKyv<6c-xiB6FGQ)6np%jp779@WW*gH#ero zrF0?HWfXPgr~l4Rx%8GAR?)VrLTi`_(B*Mdx5d_0;K<=Lg+ou6OUf8O%3guYg&N;r zI(qB4?1|8GTiZ7dZ}j_~Dj#!7c)?IO@(Q~O8vJZKGm8U+r00%L*9Y~^X1DQbJzlgz zyhsRcmI1kaHlT1)9`=sONlhrcXe|X~7cy=9dS}plMnxUW&D*q`nSvuqET?Sm2bza# zRno!d2JXxJSwxd!WRzUa4qzNCF$`m>Uu)yH#Uq_kch*s?$A1$Uab$6gkTNj12XJbH zz~RBgt~|OQO@E5-SpwhP8oQ6)13KB3lq|nl`KDCAcs*S%e(p=Fj*R~D9AJv*4Pt~YfP6yD~Q(){^gR)^*c;2mGE;L{s z7c&I^8WS^J-!K>((}tEUq$_%Gcy`v4#5_s-sX`CYY8)M-ct$mJ)ii1*7*yu=POT3k=<&{v%~dz} zasO_#&O7iSduaP_;)}~-vYG`wwQtNv#>j$;q!{-^)o?8W8YBMWz8tD>03w(5|kouV24{(jqIMb|* z8*hvO5e8@88<`07T~It{GI2W&>Or#N?`sG?jRokl#J<%nt<)eW`xU3)8zrWpcNE#PtoPnoMtyo zS8;t^!Lu6h?tyD5?FZ`s_28>WPm~jZt!%>;UT#;eu-7AMUagfvUip^^MF+JpK|7?} z-29EaP*q$}Y@6J}e`txDw}H7i{;GKxPgWV%O7R`N2Pb{wyr$MoRkw5Yhnz z*6KyNs1=vXop7`V#IMg{I2qop|`MG9;`&ECcDBuEvzkP$^ z!B5Z5rV+4kr`@^}6@(Y+eH8={F4xx8RV}NPnb@EHna_|;BuL@Ds#g|y;H~`7qCW=}qVA^tnG)-f%ku+7_le|_=t8?+R9MKWZGccy z_4fe1jq+NDT||`+p>{?~QxkjLBG+nvo2!0cYTXeIEhuC4tZ z2%p;r!a#a-9N_Ojdg`?ts3;bp+xS=5q?rGt3$o|95X2B9l20afP1{ZFpY=JN+uoL$ z_0@-7&eGB{mHPa8i>Y|d^*x9y={$Ai;B%rO44G4DIj}UK2?d_5XDt5v19N0Lia??o z0?2EQ_x8SbE}N-MOibLo4Y^2v!_AK-_wkG4%Zf6)(sSrqPRMLu@;nbkUCH6ZKyz(# z+9=%SY~|a^-?T+U1dod&HNM8h!6zgvK8)vaJOv=I2{n@61HpUkjsf@Bnc}ke*fCpb zJP6hT2*=OX`d>h`&`_Y=r3-!5*8Eg`AXOBby}o&N z)&p^Q#^5ni`%#^rpoh^?Uq2!tK@)U(wKd^ZcyI82eS+KuIR@xU^EB%whKCikv>Nm~BVCTS zWRG{pOAM!{rhuNmuFefasQq2*C7UzA%VTh=@I$C1qFzU{(nF!&)ZBMx>Yz|4kof`b zxl|S^@a$P*bMud(8=sTy)y$9gwFZG5-Rl(O2-D*eYt6wx_0AWdgCtWtIkwoAf3@4@ zGw;}hyaa*u{HAXBA)f2hec1WSZ`X855^%Rj&i@9`A$h#)v3=bp_pJ$!jO3dAZM;)F zuo=N!VnOqvCKG8m*K`ZV?SntE{z*M1ByiWAe)z^Ok%ob5I%#>YuR)v1*JVr^`vTfD z7rlPw&oxF4BObkQ9Jb9kT<79hHJ|wFdWGk(4A7eBOpDEMLH^yph2|h72ezk-XSX$W3Jf zM2iq)nE}A}u{FLss9pqUt0Q+ zob1Wsl$9-6-$ah~_j!KS1;gKAyu7>)+Y`Qn6?6+1*?z6|`4e-YCeVMznbm+dA|n@M zvA*BPKcO09{l@hg(YlW&ODJ*_Fw(t5h;Fpnm6n(nuQT&AB=Tb%uf0+tR4Ow4o-_Bp zb1`B&4!eiA^-MGd26A^mKtT5Jk)!YDu0SFtaNr?2CT3%O-F~f42eg7VB2vo>Dtmxp za0G-gP+(3md5UBI;}?E5VtK|ZIanpJQL(NsrxFqJG5W`*o287rA!!u((J5(J(IX>jQf6O!m&Xsvj!PQ8%GcHq5c_u)i2k{ z*b$aO*6a%1FW42;LR?dndyOeidt0%BmXAK9&YCFxX}3Fy6yVfTQFDQ1KTtH@UYqf< zN0@ZXIUFm6udQjgsxt3yIUA}J{uQ|o%h>}w?tR0Mj9pnF$a%R5qsP0l-3Zmt$B zVPI+5;IOT_Y!es3XW6mKi#%K2T;Oq;`znk2H} z=^SGN2bo1doE^P&s3XgTTot8GW`X5F>*!(=d7eM{Oxatz-kuFcOLI97QDGU-^x(jI z-s)h=sIsb=M=C&a!46^fsh!R^nq}B`y<$*3nb~+J-n%iFCfZ5G;#?ye6QixK9=uXa zfaYj&d=>XyS|I*eBP8z4J>%N>ot=DAX4>fnk85!1f_*$&F$V;70k`u*&{cc%=+U!h z&lqMWCY*sh37-g@LE>`QQhDV(M+9jl6n+3&3&3n{hikG53gL%M9p57w{C#{-fCPSX zv_M|=Z>Bal!*2m{1}djGxv}GegPJ2a$k8BE_C}E6;Dm&Qy%FSQXIJV{W_Shq(V)S& zjDUD}co;)Jp8k2pIGZI;z0Q5V8kjNSm=TVXa~BCWK?awIw=U3Kgwa?VDpJCT+_|S; zOMgxyV_aH=f74qp348-v##J^gf$atxuGi$e*zW#1tTr<;WOQV8t%diZ`H2l9w5!&0 zzNCz$((?rhhuO%|4w>@9gVbiVp7Lm0(O0di+LBYgv=W8R4Y`2 z&h{%7iRj!P!bH$^LN%U^%v4&xDFm$U>g3p1A)s-$0nqZeI#M?NYb4O9Ue4czX{c}& z?{my=Y&fqEKqBj1PV&K70NCc*8n4T-M4_x?3@aDNt}uBvQZllt(A}x3Ujx3rzPj}b zn8*_qFnMEL^(Oz!Z^S~?Jzf^HrsipGosi~@TZ(yqyE8x1V*pj!C>Aiqa9r|7iw=&{ z_s2URGCke7Gw}-{kVxyyhXnK3HLi+1pDq4a{-tXQp&?#XAv|TTLWXzM>h5xCL!iZt85A>3p zBhQj~hI<0bELYA~pTEu&&HY~!8-DmVn{4zzz7g)E@@r?(Eb9y8xjrO#~8dVuR-x+FFQzbiNb0#wQPD{|pi8#x7l4j-c=S$d{~-}0_H+|anX za&-lFoJMPq9v@Pk-$bRZu3mY8ra*D~Sf3oo=UMFH~!!9xQ0{Ju=;FUZ%#{lnQ**fnN~& z(ZKq5NKf~$n!C~J%Ib9oCA5e=t3YNF>zSpkx7yh+Np`y1FE48?S3r30e@LBFxKU+G!Mp~L$eso&l z!OUBm)6ToJqJCi_BZ_9a^OmZ5LbX{gyvH9qTiXhYY4J1W=v;7A>@zlnK{N&i1{D>RZr^L79SaMK zEo$EOc2H@7pw`yhJT^A=d)Y<^tudu}L~*PcoTSVQ&Ckz2KR*Xuqp1=@Y>q%L3vKPE z5n14@Y>}Sm&J_~N$FdrSE^DQroD8K(s0*0}k#Kz5u+-5hvYHpNQO;5?_P9m>P$D+; zzK>BqFHifKS3}#Q1CoD&VQ%s4iE6B-hu7|+LX}h^|MuM6ErHj|M95YHAc%)%F}wlf zQq0fH%tVTQDy@HwjyiUP(^{b`y!x*mb;uKOaq(iX{Dp<+SXj$`YeN6KY5o_L-5|=F zZVM`WD4tWb>_+~>?E4qj6>{5E05(<7Uij~F0&Mp$4g?_af3V}f#UmhA8vkNJa8H^1 zU%uOmhY)#Twmi=Nej~EvRMKR2+S}_QSriqxHsZc1LH~J?oW8n^<%-X*Zs&B8k4r5+ z^X}ksfK=JML2n&U*+Ai4f9kXX!0q5>O;}FFW>pd0%i`E3m;t z{+cZds)R$~R^2-{Ekf>+S6g5F=R;+oM%#A3%OfjL_Io8gX@w9Yn?YoYn%C+A7fQmv z`y=Zd9y?qG$M>?6aU>zfZ<6>=g!j9WimGudmaI8&xPrdML(&=LPyeb5Or|bim6Oyp zL45o0w@tFZc7OSdxzKEnSyMr9PDJO0<{*-q`ggD@=7=;XatNSfdou+6T^C;Kd3PCBj@B!4C}2E zn8NqZpWDDmITKysTzz#pwO9B5j$^nd68pG+WZ)~<#Z`VT2ygfP0!~GXh52;n8Y$Rz?=7+q_-wOsT893S(79SsU0r_WD zx{pnMZGlWq@!$kB`wgP6Cb-S-V}DrxY?^LlY_j5p(?TUrb!Zqhkg6Xh)5|{INe#lW zSX3w>;o7=M#&f6wzO;fGH2Duu8$G6=@HkkJObGxgDJ#nd(Z7 zb1Q*21Q(PH!C@KVZBF8p!5hYw1`LD4*0!#TTMIIg?f#Nf_b$ex*!zYUqH8Y^KDWu# zB{v@vldPeYJI*_;0lS%RGiH&}nFFNVu}njh_UQ?Ly+HhM=Qmb_;(zc`){_+f!)Z zEFR!a)>AZVodi#H!LevvT_K5d5tkNB**Y$n<$v7Iq<7cHrOf8iY-=)n3}@dVam1tK z_rXy~uH?q7n#TT4*?YI@L!?a)&t7B>EYUM=t!%WN_;hV%1FZXc&|oil|*vPXh%Hw|Va zz7S95mCZ>$^i655{IGXRno0R0hn|XG6_0FrR?JyPAG$u<;9+3_jg@%!?$6Oto%_`} z01tlH6y#)N-TSl%jT#5Z!V3AH2mdFiJI@Hj9@EnrTrX|cL0UZC7^XjAI=i^IZ_{p? z?OlvVDK7u+OBF2nwcri>Evp8{83*1z3IJMVjXLLueIPg-GA+=4md$~d9rN$*zYSLp zyw75(eQLJ-z+&$AQRPc^Q=5}-^qZByFMi58&hK?#R)|Sh-@fPfWxBpqmK}Guc$| z4O#r1?>`hRTa}Nt(4q%h9h@ZA-e!KR{5Y~@)1bVxD5>2b=m}w7?7FVLgNf{yee?)h zn2_kl(j|)ubbhQyof(ITBnXX_Rn%0s7`DjKu%dJDTQ`a?@rv|-r^F6(SQWEQpY$G3 zE$^Sb^*4`y9gqypmhJ;q%|-+M{PC=ds;X+*OioDq2U4hv7gdXH0}8xPb?O&7-H+T8 zScg1t^KjKC@EbSpuurPGLA9)6xis$j|F;>~SNcppZTH6}s&x|@nHX6Jb95=9rrmWe z4WQVVZ0(WDEu9qzuy{Hs9}GL~Y0%NxYapPCm-iB0xR^uho%eOKNR0iP za4yY4FaX8wqlkFL?^Ei}NtQ1lm~Zz+uT)a=RD2w)t>^RS2wEK&G^$H;Pw2pzcf}pJ zf;JhxpBoKB`uP>^D@U=fl*_%<9}m(aB8Gvql`;T`VCe=yF=38DnBr^;=x`+Gt-bNp zWg2Inoue{BVCsY3JFQ5m2gfWqmWBGR0^;%pZqb?pg*#KB3Hc7jI-ZWoqWoF{o6hQE zPP^r$7H3sHQ7_!~e*!><5qi(gYI)`1_SEI&fHhb+T)0Z?kYTj2EK%e)Jo=9R0mNC( zAS_$k!XNzEw<*imKU;+12P_m!YQBNCLd3+xfLs=wlcs?}`E6Ia!0+!+ad&WFf7{pG z+^kk%K>&Uq#o?gzl%>u}SM;wSdvNqTn#VE!iMo)l)glb&OrH^t(ic7iT;Y(&tFJU= ztk7pGP=YESR;#(6!2$@CluQUL(h2Oi8iyc(|BJb|jH+_&!baT+N{Au|C?z7@NViBR zF1i~D>F!355D`#n(MY!lNS7d;vgi_N>F)aOXMy|OZ=CUsGsgLG{_HU};Ip1}$Goq3 z&1+sads^Nq0P?%@Pa>HwDIl!D`ubQmvIaHr{!byd27xBjzN7>s@1l$G2i7kp&liSR z4FU{rVVbX#7f^>UE^$Z4`5u>gl^eb|tT3?J!6u+*8;wd(YkQ4jxi%R1SA@)%=>=Y8 zU;|lyV6vpSoe@DmB~NX=XG1uD4mB7230;Pj3Q^y6MHfBh`jms^kO5sSrt`uv$d-sg>^inRt<;25(g3Ibpm*7&G39g~4%Ai>;tD>T^(VCOR zO97lS9_mSh;W)dkiZYST3tyS6s{>R>-VF=Jz6UEPP|7$^gV$kbk9 z!ex>^FGHvW;KmCuV%l@8tqV|14%Q`DRb1d!&;M5;t^O285yG5*5a(S;3h@UT6NUgl z^8|aR@)DpT`-)4X{`~F#9i86Wr!!?1Omxvd`MvN!>ITHYW>H~WzF8aNyBpgLp`~jH zma`uTaQ%-Vw3t(W`JVlnK)bowiTUQ|i2fEnV};hx>aH+^>Mn~+Op6pYZ)WTAT{BIp z(oGy+46Ic5eOWg1gUW?}z70ZR<+?WaAsb1vns3~>C?YX=yFI*wtYCKj zF+3MPM^e4lK_UaHY*ZI}5Xg9Iuu%p$FQE6o{~JiVdx?K9#}N|gUKZeT5-v-$*68Dy z!mt?4F7@H4F7TRp2s zTCb^~pNc@~3yZ45eAu!^Gf)I(Ng^N5a&N`K2K|(peI#RH@1H8%tG7{5-c9K2&kAAG5Lzyh!aTnAOhOV@v103|lNLEELXz9C0q$sSOmQ zly*kjEUlty%zTm*ZJs8#z?3qf|9AH%cjv3%O%x$YJS>Km_B20lDX>fHkEGU$^Ki^6 zoPpscCPs!0CQw>7rk@|^=TuZYWCR+~{`dDS?bFR(etHD&ING%vt@FxyH`UkrJV~c^ zRN336vO(mm!tSBBcXvj{V*!EHz14hqg=_uvnf$dtn=09pD77>+^eDBlEnJ`Op`zId z(1yMNY87T}Vilf*v9P%U}s3i_Sd)Pka}ZMmTREW9lfJB0Md%Da&!hjt5N?$&|G!fWqbUP z$8Mn#64FK(BN}HrV+t97Y*28#3-8&p3RNizLi(w_XQ3IgLLXJNlD+9HpX#yJ$I`Fc z1U`*$i)DvDkNKl=20Nfi^RcZ#(2pOSM4Z)KRJ07NG>4llAN6u~F66jgO_@Jy+|Ikm z|LB7G0ugM6<%h?7l5vAE2rf%sxhLrr=1szHG($-}7TUtIaWoNib$tW5xWCitUV1+p z`pnrbmxMSy;+d$v-%X`a$}VJ07Tr$q8y(Y(%TUrRN@iCAhWW|aeRZ_4H&r9B_gWnd zYblxD#?IF&cbAjX#=unyQ}RJo*0RYe0*T#~;oi#rfeON$a`eC^6f>ssmL`llD<-CsQf)X)h3e09YAX@HxC2B*9{Izi|`JEF|$ z*$+HUKusI77SItLzN=CZ9ssK(O%ia?JWgxtL@yO%$>6d^vRT)9NtNE?fYpDGzx;kx zf;eBo1Rhztq|5^{Io!f#x~c@+Cg&7ic@^e?B)Uqiym=ot%{=V-M9_T=`i@b-mcxKb zp%39S{Pw>(`xV8{!^7Q%NABMzTtc`U<`!I+i04A5Vn0(6Wiub#oJk$~TFAfn_y4uoz%8l>L;$UKm6dqI7 zoeTQ8qEQ+ztc|dJfx*Kbk^#*a4#yp{yia4a3y5@^lgZKNdD=NbO7?Csd*!MReFUT| zm`46Krho*%wzgZ_+jqjrvQogl`D-6GT2ibRxL)@_IN4@@%^L?3kk{jLc@3+$MauaM zXg0&)5g#9Ed{&*Bs4VeQHF0r~!y^2h-9mZtDwdt~aHUvqe@zS@udCckNM4r-mHJ)k zxK+@w@wLJf8p2*NHdx;)b}smI@YX=_1#9c`BzXNvx6hk6X6+TV!DGst;}?;~lp+Bb z@0ss&=JCMXTu0N>c8qST{XP#N5eEAJ0ew3ZgjN;jM@M`&>9qDGI?I$ijyvNsdVXko z>?IWjfGCLLv={}B6Uib0pp6R-hX<&FNE*i5NLk~`Ln=G4`*gPe&^WqWz?G68(eCi!$3Tje_7?hH@(=msPb774a-7gIVuT8gCqN% zfhJn~!`-{H4P7LU)9$1G0w=&-#w-VhaGG~Qi7NlCVqQb4-Ll-zjIV(_9?-BTJdKcU z`Z*q-i|tJ}?gpUg#d>&Se%4n`<1yPa+q#w*fhtU{+QzXlL8^>g!rGLl<(rhC3&f`* zUg*K-m*l?&ypIKLNokj3w`0Ba1R^-Fks`D!5IxNq^`H|1IaWLQ8Gl{V6gzZLDTZmsRmLTqM5?jWusWte zJDDLZ^WaDF8;*T)TrI79BCZA+DV3;*3}VYRtJ?d7*uNOy9R`mRnuCL>rOlmyX7;dx z;*ma?Rx6@(W#t9wHsGqw7oQz|%?I-R+r((|{!Fpazk@77m_G73SvGpA78(W%frP>o zh^+4hW^diiM-4)%*PEKyvci~{43CaRXI<7*UFJvf z<_wIC9z{LDh5)7az|;TIRtG>JJp*l>830Ao!%g*t2T-JVAIKOy(3(!9Gmd&tez)2 zx(dg}Ww)R^-y54^ciDK!l1>{iRte(RXPS~WJUoUg z4TYxAiHIXD=fbc^7(Gq2p@kOUVrzGj1Olc@$(8>+9fjtET2oy;7VR^X_$lRWoQ0{k zQu<6W@4eyR0Jb7g=l>Q>cU85KqmR#E(S{=fgrn&&0lY16h}-*(YEFYL7|@!8 zqImJ`e}z4^h9=mkXRRTad2_EcG;ooQRu5?0*g~fTtAP##ucqs$6Rt0b^cRDt1bHHrGYN)Egt!AjFO79a_8%KYtwoe1qBm8>-<^k+Qra9 z+8Hxi&?*S6eMJ%E;(o~jeI3&{C_TJ)^UwasbiVg@l6|>TkhOQ{4?d03B2%%8r)c@^ zLq!ARQ@}IAFh;#t?&bLd*C4P$N`hhBy%NFF7WO`=eVXb6bR3aqFPAg|uceQ}Ug%%f zQ7U(%*MjEUPE)?BrN*`$koK*u&m|;AR0q^KJkFZa@K{Rn8@2 zbmKKjr?A2vJ*qtYL}I@? znpn^Y;fvSTFqMnpWHlFbOZD;zK>G@wK=%F0S!9s_SxuisE9-N2>zHQR#n zR)oFPaO{(hbQJ?fS=ukZHO7B-~Ll82T1e*XHqS{$wkI<*6G z6fd=~3S%#bX%>HOid@aSwN`9vPUfyOwU+PR`xh*Lk)+xl4JZhph*q8!WsRQD`&%za z+5*(mN0f4uDp!FylddTkFvtA-QD>fgCgl44ZRO*Jl3x{p&nLRe%HV6*FYSM;vy~J4p(d_DJjdU8^u^DS zL?A?*pa|>zTN-^SrG=_>(7)+dV(R_z;|jDQz_fxDFc1sNiB~~#4-l~tHuQMSVJh8^ zR9`@HRkFmP1WW2>+#x3soKxBruiP#ZuF!Al=e*1)rV(Sr{j5Cm}{ z1>a3@uz#?hZ`^IKaKxsriLs)R!rm+^H`L6fys&?T7Cl)@U0~L(j^?sTOh`xw43q#41TB=H{EMuGyl>}m+snIJ@Ro7ozxB3P zpjs%B=}>fHVl85i-QFfHtoSSFTD(BQLPC7m&Jp6)8$M@0mu%ZQU*qN%B749(-~&8_ zm}h!+Ruq_Wivkj}#4IT*gE%v2p@D_qF`oO;@7pQPEk65Kl_!~;vFfpOwt%@0 z85|r0o}W8)Sy^voaG;3~4Gq;~GJ0idW>)MTBMDqa*j_Q+iC2`Sk;M75VfHya9rf}X zASLrDgS6%0IuS|t;HvB;(O#ytTk5f~u?Wql-D;)gF(Dz{$Lnm{000>>1d5QcCVwJE_l(J19!aIcoCWqUjj|I9B%P^5Le_KtQ za`qo1eKCfV<5JOwr=6wV7HXcS{=2jav9Lz8@9ph$e{>e-M(h0Fa<~W2Wg@RV=eYsA z4&Ai0_Pn3JzkJ&0 zL0F|t8?Yq>t+}^Jcons@YCw}GkYz!TXUz|UD1@x}NDL*tiXnjt1LDMFuvYEt`$?em z&dnW3yo{d(&PMC4^OG};%x}vaiaA&wf3dO>Iy&uj=g%wjD;(bEMrdxeQ6~ZOjeCTI zyu7^l1O(d$Krtbez^jz42wXc-Qc^N!!+_C*>^v}K0FD+wJ_2ZYh=aRW1-n)qbkKxt39o0_aw-`Cl$6p@Yh`r~aF`3h%A$0ds9m*-? zZ{~}=+yA|M=oS2@X5mrEKJU)x%X_kYfe7+X+7S795j1fW?BdsVd)9C#J}=Kr=&%4= zNDv31sM(_tY;_jaGC@t^R#4`dTmxG9!MTeR42b3&&#oRW`fMZF(~Cd<6_#QC=PzWp z=Wx0X0ow@u3ZgE*D{jXkrL)jJE?*as6|oY%h_IKl3~2-YLvn$feIH1NeDSd#qGM;= z!T(*Zj4TsOFoqYJi$6+%3m_|t`wJ&Q$RLNs;=zFD=$gsJKS92DnrCwj&c`p`s<&Kf zsCI4Frf>fJgn`u9UH5``SSx;_wlNKx!4y|P^@x+Hj$dj+BcjnO`aGpL6u#3j9G|?S zOh|a2j`(h12aXHXso}nv$pk{w5{I9+eB$Sa>i&lm1(=wI3Jalm%n?5;qxAt+JE{)HqY+nb_`^H zN=ixzybg4flzw9P`1sE2ub8%JiwX10Gww*g z1>%@gYuPVf=3r5qnA1kK2v+GpF8?#o_Wws_w4a(ih{DS9ohy00%Bq6Uvo}8j0 z#PD#mA1K1S3km{#5BWSnugt)$OQT;pDIrk?S< zYa(jDsq|4yMF!rq;2E=wxB4oFrNq)qVDC*Ke|XaZK`{&J&0x~Hn<=kf@~58%&72Jb zA(M=zW_)C1WL%tvn%ey!3R5}8z{Et-KLZVr%pJQx&Kd1C!_~M~mio0DJ5Mpj%11Y9 zait!G+%ML>NjV=QdFa_ba~8J#bqOfGa58|l%5t)HqPsi9!qwSXQBje-7KX8k-1@?@ z3H^BsZc1}FMIo#tHwdsYo}3?^PKb;La$+#wo%ae~ADe_zZUpT=>i^VA$W(Y54&(nW zFmiy?!y+RXlD5#8=oP}eIE5ccMA=0N9A!Xw%KQA8im9-y%;+QN;s}2Kp5OjAb`vl> zW`6kaugiTvhGvSd8MY@ETa+5+5sF(!_H(dn@LTJ*+M3hf`_GC|9w~=@u_<9?f2`};ZIdOOJN{i3d+g}C@%XVTB!!(PB(0{r}-lc1nr9f)L9XMw^R z%$0zE0QhX+v|egCfx%?bzjvGitaPAg@hgnr23K?)iYEDAlo|{^b(rsK*g1eZ?B6A; z{$eHOBzps6d9`Jh#~44TkfB6&8))pIEp9F0F*28Ze-gFX(*P%J!~@#dPh{<^u9sq1Yi1N-tn52~q+|atAgy}=mJiI{-X3W_3{sa*MwAyc0W@-;+~bnC zcQiHhM!^R!1j9qb0~Z$VcxScQocK`<<{kgW0tL@#gwnImvNS+M(+kuc!1rmxq&!U* zaTB>GAQ{}&O8&2x-VMTcIGN1b_Z9HZtu{g2%udfvoJs^mB$KMl7_~UcMeamihn$?8 zB>-9;u)Mqib#+5;(Eq+Xt$RlMG~n=AEk5nua-7P~6~2;!202|llc%%<30jSxd|`HQ z>1%vqGC@(Ep{f3a?f{u30OSW`I&Ezxz&&b(nkj0&{>=?gH@9(gjOv&J&=vS_&Bxru z!=sIS0W^=u!=UMu(1);yWimG_W-L}_Oh9U7sJy~v?co~RUU#&Yp65b+f(YAFvsGr+ zV$QhD?e~>7fi*R!K(PT{TN*e)hJ}$wmxDesa`I`=ECc+hV8Ecat!=7JPhY<$fiK_; ztZ-;Tu{5LWMEe2q07Fr@gh@CMf~~NbA(8jn) z+S1pz4KA}_04^n{z;G;a1vEH1hhDodsMSP7AH&R1j$qu5_dr;iO#8jGC^&Q$Cg8RQ zJg~sk6u<}?#4U6x(3Pdk7zSdN%-+V72=u*?nXkm7`LNF-mEwla-q{2O=XQ1K7@Dr7 z=jPH!>1KK-Xvb{6V~4jI6t%a%zy~c+)r2nC+TaciWYo$DCfMBA0EO=lt;Enqg=&$S z2-TLF?h}U$zXa5CQlqoLLona*u%n*!b<$~RAv|rG&U32CvO4Ag4n%cORS>KS-etk1+EDw_+{_Y{~X~f zI^5?5Ck{A=u-@NX85;}r^kRQ-+ss^-VUIK5LwZoiI08`w`Utfu?R(?v41m2nID%KI z;=;mkNOjcJm!_tKH^FkNa$ME>2n6gAkxTpBz%czkaUhfznkJe?XcnCW#jI1@*093e z+AlIueQ*OBHDr#|FLR@9Oe|*oXfMMysuWFiQ&(_s6#`h7II&+?kM!hchukur0O#-H z0%swDWu(d#+WsJNa;-%=eaI_Y+_VhH!eXqHLtjP(V$I9)GCD`lp*Bpa(t>3}$BIP$ z%_A1oH(b#C`^4tQHl0v<9e;kQC90XNd!=r@pbWen!Peky)Zfg)Ug$@EKA)vvvTh-9%{pD0SGNy$i(bMMoTE5VWV(}bLcVEAN;{-7j0Gl3)>++T~yzH;efFB z$v?x8^};~nqTt@k{;z8H|3lv_K-ibUi3>RWTr&D^kO|&{gX0BL@1aNrM&F*?YUXMp z^i?lj5fIwJFx8jZht=BQt~^dQ*O5QJNdI>`DGe}XnAs!l7j67AW=#uqdi9R9z>Z#- zcgwocV+G&DyY;T|RUm)3XYp~#S4$IeE_6~7P3PnfP0T!X6cmh&+a zL~!%)v>6nEJ7a*+99!n;aX}N9us*mTfi%&?HvvmQL7qA&Furn0b z#XBi+X0&se*ma(~q6z|;SHjVZB7UNsK}dseeM1zpS~K(8q#CbM zb*k{XxXy;~x`grebUaKA9*xa|lk@p)=NF(k(q%pige84_eSF}52g##6j=ttl1Be0I z96B3-WXjss_K!9K&9uREa2x*RER3t5mgh8c(cO;^&cQNqm#1p{~I6(yy#5Iee?7sTk6H$(Ejl9^xDUEN(pYj^gEoKJ>J`Yg?=)9 zipD`x}Iss2Do;j~IkFk?6?e*kix|8L2)ITuO2H zBFqcX&X`#C5dAlizq$N*k8+JW;$yPJtNib+G*99#F(ppk*bSy^qbh!mnW}HYp^`rA zRWiu$=mQeq=NC$u|3Zc}VXVB#b-Sd7bGn|HbyCcl+gwZ^Pty&r=vp+KFAneP3?>sf zoO%V5Lb=}s#=JtzZuD>gu_iu$Q&AA+q zf&)Mpg0Rq=uzx*-G3i540Y#X+;UZDJ0EPYif5SrX+{)+Pb5%%&KJuLh1omWRM^+w)N5BK9M2McH3dmcvRxY{ z>HZ6cIxnFI&~Lu=$-B0n&ffaq4|Mw@eKffE?D84+D71)3;g6D2MQuA zI54|pUx;&ND-g^xu>)e7hH@2m>T&z>v>5hb=Db*}_9TQ~ww>HxE?6I6uc>4o;mtFf zywxo7^vFv{$o@3}Y`>Rb;IKI-Cc2i27A_~oH_P&99 z3F~9WwS&!s)fGt#3n|(saRkhUC8n?37v=FM6ZFi@C27PfSx!{C_>;C`lZl((Z&EVL z`CC=r$x(i0-Bvt}L)*7~o@37jErOThxe>tm{UOJI(dwwm@cjGx!Z%+4DVXo@cS;W5 z&!4x>PpAVq15IcoSx-vW=3bp1E{ItEjDIfq;o}sSsp2#I^P^>fr%#DPiSNT0`+U(8 zTr3LYGH2ebHq$GIHu=FkSy?aUE;xWGPoy z@!nzxcH!e7@3*5FH69(oL^D|CMqoYbuy#CUOD!7nMijcq=;r+$+Q4Yn*zN)k| zl{(MQD_U*u@M@YYmsJW3`yO#%mRZm*$b8PJNZ<)?Yv8h+1y|`Am$Lex^YiE3B9)x$ z+1#;aaJDxctFE#5T`|(s^q4x6eqHdRv9DrB9woRJAevc=&}CZGHGkbEOHA^)@iQtd zzQQdu(&t1f#yX0tFm3HKBdXb-9>?;k(cQLjoKbhVtzUjkO+B2b3Rks?DK17G6N9F> zo>|``bGtz-fm*I$t%WWH#)9mwP1XkNmv%T3U1GM?+-jpgA@ACdufT*Etb$Lb9q0 z%I&mZ`mfA6dz309$9b-{V4+rbES>J_(`@{7V~tHUMZx^;h1XHsJ#=&Ko6dDP|6gIQ zQ(?d}__IzO@C#gNdEk|ol?9CWuE#Q=Y5JUn)kP;Ikzl%ScUgmw!KiWF^ze#+l{UDQ zkaRt3VRJ|GxSOvRYy~Byx$Ik1pGu#7sY|o8cwbeOQq8{_xE2U!Hj0ZWd8x}u=M$$I zGY}^CUA$+KK~iSc^i`^(>++zt-UC(>w$G(N&2upk{gsiLc~+@;=GITX!OXD{YZC>g z3im9IU`vi&pX$;cTDJ+EZH7854|;6W-vBogHhEvW0V+A~GmtGwXLnQQNv zT1{^C$>1i|CZ}9C?8|U=Wf5?-H+QeT5t_u7Y3PnFmJ`gJtwE%qc=yhoscNtD-|g+= zHEz$aNXMnwq(Vt=Ef2MT?md@hE9{E{wHS& z$9wrvOu=b7B@AG{F!b>1uthw4po4=q2O?y_J~x<;S5|hOCz~xG%}-tg2kgE5BZ9GF z>I#t?X0BPMyF(F1k8y!O=h`Cr&mXcqy@iH5S3;?|qxmc10(dvh`L9xARQ3ib~8SK5z%+bq}@5KFUH6iY|2=y{@B$tbHQ`( zv~_Dvj{mG?U(eX--Tr3b>B;s(nI1~LxjXzHeCgTk@adNnY}@iRa9e_x4a_vQS8t*! z>I~;=j%dipl;xR!iSnDOdpD=HVCHXoPhd3Q^9>@R@R8!GaJRoOb~j48{jnM;&rcSF zgpPiv7StZ)q@-MxBJi~-IFP<~-E3`dyC)S*X)1uAhmwfPQqd#Q4z!`Od&^$CI}fQo zUNSdON&a@9mo#*7adj?NP*x`3{zFgQRJKx9yql~TJ2D)dmS#UI5)DZXpko(V!PdNr zbmOgH%EEk8^D9V}VT{Mlb;HG;F#Cb)z9pNLHd;9dPD3Dxz&hlc@^_&}DR_N7mIAI> zm)MsRxoK4wZ1S4=shbcnHw$oL5w&x<0kDWSEb$RMPv-$Wrj@ zG6(IEE7jFkR8%So3%xa&f&y*E3LkAAPK?KZTTjd_q;PRYl@ty5>kPLJ4}*sq#>cbt zpT$XpUp*i$kSdwX9c&Q#ZsrD^5La(uVb3ShZ2znotKQcJq!U;V%x_`u-jV;NnwvFv z=)Aqu-)6ZzFGSro{FOFQkB|&5DpmmoAkC<+y<77#cd7PE^i?cT;16n>x&XJ?Zf}nTaHLeQTjRn3>@{Sy7(I{Li19 zUzz8~K00^B)&>TyLLG-pE_<3!SATaWh#dGh8f-5Fe4+GH%H&n5afys--r`UnxjRF| z*|-@unkl1u%uVn!bQ>l~<57R!|ufsJJh{|~I`~8pFS(NqwTij}CwEe^OD`5G76=l>$ z`AFRUxzw&&)!ruC_}>STsvxfsJ-`RD16-w{i z>86%3{2=#ozVqdxIVvXeppNaleuEtq2JBz}IP1F348V{H$QC1Dl z#o?b97#wKB=U^JY1C0|q{D;jUkw9R)lG7a+l`~mAhN)c<1u;!@tuj+@k)s{~`swrTsQNcv;ktf6nQC-S@zEd3o=@H^dL-1)kNNyGiv0mz))3XJir&<}JdW zW%AQ5Sk;jvbIOWaADUUCgUuiO21dB$)BsRHe*_|b?`O?PAnxg+_gL$5uc6P$h=dwS z%9>%btt=Wi9=&VO1Z#nIS4My{zU7#*^F(cBzS{bfuEWMlI?N=2I0+eVpJ$oJRb^#Z z0%Vots`In!KJ&FXEjU3cxm%$^ zwJ$;|b~=}q=+1}gt_N0#IZzZqyTKv9->UNqjP+V%hv^oH<)DKK}zjSASB;!|g0+=HOYaC1wQ!M=SLBJg1J%Y}W>Q$)fgX(juGf)CLthW~Q>l&&sZD!kzJEZDx7U%7g9v^X zPfS#lTY*hhEe70kkX8R?%p_y6_q*=t(QvU5+d4QOR#eqZb7TNqcaF`XA>c$wHj*JY zJJV)>CO2}s!4JJZ>iD?KFis)Eenubl_o5vD6^qVhX1D`)TI}~Tsou4VbaAei$-+|S zDGL?Il+Ehj$>IWYl3Wlgl}|~ZF&LF>y`{wEkGx#WtT*1J-@d5`(g*ib*IQaQPmd~0 zmDM^%1GOmZ5r;Ok-Y@e5-BMD=2HP>f^2QN|pH4STX=@KU{K6Rx+Dj*<*rGN#To5`2 zv^dmODCovCg2XDzuV2FtoZkY~fW_E##I^L~g6VCnoA21eFi4`#DM_>_dl zMYp}8m_rdeiBToERnLDv(QiIL2rvEsmtUg~#u^tW(a(TfHJ#atu^>3}u(^EjoCBTD znFVhp_DUX{0s-P-sEL61?+CCtF~P?z6GV{TzgET;Olrb-WR zsQv~2Eb_6kA1| zK~?!+voTw^00$-K8a2CuD_6i}F`~kXQRbFEs;ggvb7@@D3nPg?Mi~1^T@lHRi#!{_CWgV}4s34q zf<`f$VR>W^aAzEs`nl`BqvJ~uG2I!r`THyf*LV|huFS2hF!{WaW#i?@6VIE?DfS1K z%437OSaihwC$r>8P=hF8u3dA-osT_@~io^R;+!*5E;%se6G%_B>d6sL##Hoj|mvQdj!9l zQiL%b#QO8+DaH;Qkv`+Nj}(zEP*{VTM6-66>=bAWcFNMy;DE9T3%Dor2qGsSV(97Z`0TUP7(4s=miAUPot&By1=?bnL*L?wzyJNMN<&e~dddx* z$t9&}jZ*mo^<&(boAu---zk0g!ornEvNAL2m9GydWGLH`o}cbN0=NUXjKd^9(W+7u z8vI$*S#^y)Jt3&Ihlb_(`HNAE*C=s{Z{ZAx{Zy)wX$7nS=*-~rx$~$RpAlnuxL2H^ ztOH)$)v5mWG^ccZ5<1Xw~X?jbX?|2DoN2CeTj1DkDVcwb>-=b{v)D8B3M`A?lpmuiH~pW`m-| z8)1}FfQVWi+;xlK1_vfSAmR95{f~6iSf?u$4J@s~=QY1l_VhOO=v#A3`b;s~G4Uk^ z8sAZuKRWbne{!gB^0Out2#_{{aI5p zVgCGU4n2vppdaqqRqd2#8|t`#`pZ1T*WaH1D}irpJ5cf_)^65Rnzf4yp&zY(<_S0< z%ekE@G^xVxbMtAWWghp=RL#qw5Y)m|)3mMS+zOow@n#1`O-fMbP=nG;^_pH-==@`$ir6f+*%?QFAD<%Re6Ujc@R`L8^=nq(nLPwl8~4+7i~C~(7mNZ%8~n5 znrrlGhX{m%Z;WaoBcTH>loUew9WoVzi0FUGoUemxT|jt5n*3gb(JnV+;4Cj39@3$% zsBGVK%wRpjfZc5PMk%{}w>*v3PD1v>*7w`TGu>xtwXP{t=RBLK#xbxul1T zvgYmoh_ZcAK%uk2^R!h@f1=+oF#R&Z{z$FYx=>mdTRUy++f4ed-_D`8<;ufkkS<_M;XKT?on zW@5sCsZ$-Do|y^m{6r}|{NI5Xkh0akGIHx}(JPt;2km=PzRa}r1=5`zPfmf93=1V1 zSvp109bd+r#<>~<3iw%{m-)Or(MoXr{atg4qjN|9JyH+#9YT@%9q>GYA~j*(MWjAE zO9kS20&L~wB_OK>f&fh_wvBzO(+=j;wcNayEn&Tr7b#dNN+(DM`+e!}KE}jM1uLgp zgwK)U=wP^rM^)wXN`6eJp|W1b`a}(%$>`T7&z{-AV2_^OD9md#O>;%Fbru$O(i5!E zvv8ITa=Jbq~6nyEr z&*pP4!hL@NtxyY)1jVinlgFV^9aETBCo9<6e%8)a*l`qhgYy)efwS{R3J0@oH%(<1 zRZ3?*2`B1!aIDOC1>$Jp5G^ZfCyHb3<*C(sd%x&XPHJFfwak(`xR;;eFT`%4a{%r4AFS_gZKc=w5(JDiT zHnHmIC*!%8eGf&!K-i*d87e-izvMGQNfs%|(`I%F3N5OX8rc<+&4EA)DBigB&n(G@WCy9GeNsqF^z<+qX+MdY=3=A!eoHi1%6sq zY^mqwjh(FF3*4pP;nOfqyfuKE+(xO0IzV`#{ADt6)J)k!c*i|`m|6$&$*gp6yINYQ z#qpjnjuXBQdP3bhV}HnEqD@e+6hIjgUe&&g%uGB1>#8dHy%mW;cbA>z0$PyE|C(SN zt;UiIrg%3UH|Ztxn4wbw7~5}Qn-m@Zj+TRSu&cd&ctEKePSoG2!$Dt|iu>H;>6<;x z6j!vZFFE^w-vtsIIRRc&(YS~yAidN>J(calH>;3~qNp6GZZi~3dYAhvx zUS8O%O@Dj4Sl=7;i%Cn(TpnOyTU+e)Yq*cCtcacx4|)sf`3`})2npZl+d0QW+tL`d zARyr}C3HRTTuAsGL{OZQvjvcb{BdOTOF+M#jFEG;iZIZdGFy#1^Z%LwGjvAJ(Gz=0 zX{r{un(K&ml|$)Z zN`=3by_Ar>9ADO7{@;h4yLIkT>UJzgFGIC~1h;@efyU6f-1P4#Mh%b{ZA5^O1?V)l zJ@felzVHwd7))w6*&`_tmNhm0mG%sP1D~`5=i6syPoMIcz6Q7<+a`Uq@OcW&z-G0n z8tQdAqb`hEPl14U;Kn=pbBFHu*iFiax}^i0lDx(}B|a1$3i5^+$26bS2|m+}Uydhk z#5rg@UsA~5EN!ipJv!LuhH|PISVKcRs9@y1DWJg>tfipKiQ1&}`K(a9x2nOYMJR!q zyDMsZJwpmJ?QuLneBwMvtpt+7JSbcp2_FZ2egek`eY5;PK8K>dl>uNO_++7b&z&4s zL`1;UoJJEw1KoF1v_i)WtW4cnOz<{?>)qY>i;JE+=BGQFMHXXEoSaZeIsmb&h;qIQ zm{^*`khvNCV^9k^_yWA~9~FK--Rhw6Iv-r&ivTsSg8~$J&=9Ra*Oj&5d&>tod2g4N z`03ID0wO{T&_5l;`rJbD)hi}ljAR)A z{nUS!C+WT!VyW;Q*8?nyvE1uzcHQPChIujPPH6_@0V5>NR-Q) zG#mN|+0#VETKhc(muB82Sm<_no45;5{fMX$;9mzjpRv&w#%4Eg3op^VR!_TXh}n3!rUj6HRMg*3YQlxIsVZ#_Yc! zLuPC>DW5;Bj<1y8uGjRTTPhE%TZoyRKS5-(5($oy?zYf;{QBKU|*l(S(WMpf8M)2j_%i#M_4y zjhn3>>pDdWPAfff0#vKH`T55ehYQ);O^3h0i;ExhJR%2e6A$ks1qibD+Z^qDqTa&8 zluRIOT}Y;uzdMU#A^qWNPsvW;uk1UL;D$l`$Z8mYo5^}$WWZ-5TM{kbP6viD8NvH2 zizZD~M_E@Ir}a>O)MkyAi?MJOzJU5-3o$k237C38W=X_8asN+dx%elu)OJV~1Ucne z=1OK~r?j8nofYU*{O_|rk>K*|4^SLXKDj*m1B7UDOG-dc@M%gFp@}qPSotN(MsyU2 zyNF;+H&xbKH6AOyy1WM@133X38((;xSt9+wy_;zZhS0&^r&)C1X#svclY6XQFqYYc z9=2BzC3uIQL+=#3KWaPjR#P>#GU--i&a(4gyiTR(fRid5GGeYV$l!_Vz{OK?D?^*l zkV*$z6NSmdR5N9;zFR8!L+@l*n083WJQbCHKd4d*KA-M>Y7yD3BgX#S^Jp>C;K742 zP#gp~kx4kI#A3yd39UjSnWFiQR2?s=`+$C3w{$`5+PqCUmZjZol$i-&W~*vGTiL+U$R0f#O@sGe5LXP_bW;3rq`7qevWU!jjMu z3A-X@%f1J+WO;6o&kGc8WmSX_{7Ma-UTn|HX@^Og%X}geCIV|42EH7XGA%L_iq-cD z{8PC#B`h<)PxCR_;loCNN(sOUuKiSvx)Vc>#GOIuWw0~PP@6>r&&o(Pns)jb z2q>PQ*cgD90dHLj#frb7Li0f?6xdHxOvTubVf{roV=AlI{lq!EAI3gBTwUnKazUKo zta_wNc?CA2%jf1rUoJ*I=9bV*g;=v8?eL96RTrAX+u`Jh=}miMW@fXerqtA!^#vwN z%8G@0&lmpw7Bl5TfGj)PQP?8Txmi+Xb-0*`jtCseTkN<|mUHWcYB5!;A7Fzrjp`ki zj|5&2PZYfhN=ZRWEP8f3V0T#`tek2)BLq-EWM>P0UGK9)pP1ybc$YcoM@B37^UZne zhaVw?zn~h~wJb-5Fq#(*`~~XD-rntM#Xbc(J;~EhY1b0J^8Q^0Hb#Rk|OqqMZqJN6=0j9N@STC-HK8xmBAYd2kR0 zh}?R19O~!!MS6lm2x^~9@#)9+Sia4yd#zJWfmsFV`SVBtm&b43J1TbVfs(!P_G;Pj z^SXpP^f|vMDU@$6jyKYQdt%7myIF)xqOT)Sh6AXQ@35G34UZM%K6B7!@ zavjh6bL*3_9!!PqE_cr(M;F_|t7sz)6`g?*E)r3RIyjKxA-;ca2WCS2J$35jsZoI^ zA}*!I$udlH+wk#94x90E>C#e;$r;(#`_cj*3yceTQ$4ayz#&|0^Q(~s@JP;=q-z|#pnCcUWW?62P*MlV5Z(i!QqeySJ7YSxwst`6EH96#Yw-5K zfD6b1v^_^+z36WKK`w5+|EwYRpFA#A%MY8GHP{5il8pp-URI2)gb3()cyqxWY6Afm zC|m)80qUM!c!(rEkr&EBpL>C3ykU)i4SiEj_z7ajCG3dz+byO`19_HhGYVq0%QLJ~ zbHKHY!(WvYgohQPckqgm#ff6-ReHv2q@V`s8SS_Ur-pGV56;CBtb)1Pa6Y>M$)~uM z9aLQ>&%%|ZP~l8I10!h#*hdb%CoX>53k*8kw`*PJ*DP+SgL^iBk4Nq4-WX_Qx-6r+ zXmcJ4g;QI`9gH3kvd7e#@|IQfejnm68BqK}MxbmIUileWld zMyg%533S{GLGvdhOF;MiM%nciW_ZH>Xg#Q!kbPuBy}Odwl5K4NJ9#TlB+rbcKe(zz->AB(=5=M@B`C6;3wLP^7L1jVIyV11J{Hf23uZK zGDqe(z362;?>_6WgO{>yHdalLI>JcSX`$Ll;^sc;ia6UCU~{qxA1a&l1T_?GpD^%7 z6XiD7q{u*E7oX84h*xJ>ALdU9SF7tJe==3t`D5J}8YzO%Hy0ztVK)3NMnp#q&We5U z{c^Ih$jB?ebpyf@N=kUKPr#__!jkaNIgR^ITNE!p-zb=*(7f|gPv-mA1&I|v&u;qo z-*t)5>_h#6P+Q5LP8Fy@Hdr6J*WyA!zLR#Tc|6(M4R^ONT(9?QzNhtBScOgZLj z3iN8fyH~Q#Pt9%b6}~An*4(&Go#$Bj;N5c%t+x6!X%n9dJV{P=2MGnadA3?2!oty; z1A9=}3!N_!+*bOQmX=V$Kwjv&yAdtm3{nj(#dfoxz2P(&2>sLL`|}o?3j=XffVBw^ zqSwAamk2WxQwfv~L1Q(j9w57K$lv`Ma)ralcDC~ztX16WlKq7cIj$4UcU|Y{zet}yK}6u)7aGC%*uO^1Hr0)EXkZ|gQ{clQ^7B)Ro)xEBQLezPCt8Qs z?;`>TpY67JkB;!@QL29wR}$^fxO{DWam;Ev;|009d*j#@wu%ob6CtQ zsF1UZ9IrDz>{sH_xrAJLGXI%p`9@O)^H^f;*ur3es!UsYi51cDj>@#+g2Wvnir(&S zdeV0wX`!sDYLoi!mdM2KVb#E68xu}1At8Z8)&t{ZyVEL=*%pJAJ&~%iGL&)mz`x#J z1&^#Sy31K{v>`8Wtwd`+i(_NoOgb1VI7#Ul85J0VLGBnW>+03T-? z^%u@P{!~Ztj%@@2Vf%85^5lsVKtripf1)>1-B*zzH0eo3wN|`3-kn*50~z6tM4mV{ z<{GbVNyt{STz$pUk@$>g*_?IaGk$e7zNE063Uq1GR-VNNy0{;6g$tzHi%^xKQSkM4 zOoHTet5wC9yN%T6rT!YFZ7r^a>$~tcUWSb@=uN@v#>_oE`@yE0w|~E%L5lCP+&opwz- zO9SltDzy7tIm}Lwi3cHRZ{BPcBtD6eh#vvbt(&y8GSC$|-^*q49`w)D_WFx#L_|dD z>+2Z=`S>)8%g6S%S3y1sQ;$K&`!h`93=vTTk4>U2x|gbdaL^6(3)*F3uuZ07>F0d> z{RQk7DErnnHU^3{5Ct786n!@k-h<){<_sD|w^&#*zkc=X-Nm)G-XVErV}zh&!M%oC+IYTB zggRox2hbOT={pIhLS3Gk9ncP@3pKuGTK{^rCVT0iD8!i zhWQktNPWC7=)@2@-6dc4ebzN+)s>glx;gjR?@QfgBm=x}!ZcY*1!bJNA=Q<(%wVa) zwP-_s{282C@&so%mb<>TPIzhRjUg4)3xl_(#=KQ+=cXr`7K5R&(}ll%;m0f3&T^r| zYI3;BTYan2i)bzTcO~%f?>Wjlxe$^FgerWWO9J+)rKj?@_tf;4O*X9v2)su|M*bwp zA~|QiIj)RJR(Qf*A(Emb)uj=RTo@=Ze)dewv`If&Ui!AxbvBFSvKq(PjDjs^$}@SJ z+;)71z+1o(4CS+>ZApO|<_G7IUk4DxhQrX3(Re_XgPFsG-b51&YNl)yk`xcV>UgTQ zatsqUU^{&{_Ya6o0(o-fJnlIx~vm5g<{bTzh|ZamB2@JASD4HeC188nUS6j z7QL>n?q{zDH0Q1~!W8+_>g!J(gYW7(tsXd?a zPyf<3b@%P9lwuw@!lsqIjq0TmCrrqYl!r9*@HdCi`2RblyfzUhO>tecH z#wZfFGgzYXG#q~hrbwBgG^TAgeb z5-UeM>?8RlWrR%(GXpy9ww1qSYMpW52he(F|8*?=Dw02mu{g zC#z2YE1|_Czrbyu`%9(h-t1+BJeAf~VNke!d`)XsnuJ)F*{@5CwG)KLQ!dv~7|XFa z?4gfA<%CQPjsIzRDqV&9Eb6@9=sNsbQekr}D~co8>oZLxT(<2GZ9NpW>MZ37l4Wl4 zY0rUV#ouC^mGD?1m&5f`wS4-ElOb)=$FkmeY0Vgr`)g0QDm;QA}QnP9Tgl~79AJt zXr(}kDVptNl}ouBcavCH0DQsURd#Kb)Bo3eNb zKinK&S+O4M5^&pd=#%*BVM0^*gg-wjBqW4?vaL|sYs~`{*$J6CTJ`0pDD6dq&;d7GT|~=(83Z&G;C^k`-<#r z7iPF~uH_S!6bu^sCR1=VYl!$yG5&*BMO|H{>Qr}a3Dy+sx=@)U_%{&{|^?+!tu_KiQ+ZEAIZ}VzzwJ ztWn-|EZy_f{JYo=(l)57b_5psR=ukFxvF~+ab1{VYTO7tzr0#Hs1s;oFts$vi8=bH zAQQkI#dfiLZLF(fdC1 zuX!db7Zb~s_Jo0wr}}E#73KRwZ#RnzjM#(U9Vl?OmhT5yOF~;^eO;aTK(QUvKEi?~ ze);HIoah}6qlT%@jBABsJw}C~33Nl`?H6CKa8j&L+L^n{RvN=+Q2)`@YZ~?h=W}50RwG|3KkeL+QFUI~3JoD> zh6l;1aF#Y^kC1hZLd#gFTMIS;v7?TT4&sB>^h?tqX#ybyF_OM?Xtag|WMJYi1fYj| z0=JV*BRYX;ATq|eI?$?pkYa7|(pX|U`>RsJdg-ubz+JLrMu}@v^bVvJ1QFWXRSW1S z`%(jaBZJA-8-i_)O;i_&XQoA;SeZG>ylfvuCf{YF{aU)}TYZHzjI{ekQbD=sF`p{N zq3EAQ2IFb&AQOBACHI!fwm}w){`2GG<3VzDq60`H-oAYsf`C{Z~~b_``7`$oV;qKE0XER0X_XT2d`FKy1L+^|51EpKv@8bSK7ik8Y^qLzq)rDaussHC`fMZ!fIx<;C%6089=gBVCs^Za~@ zUS|FPMst7&K$OWwmJb44_@id#kSuxg{=$cbAx(pI<&U+?!HEo=8x_=fdyyAVZ6miZ$ekY(+A+kbPRn zFZ1v3awAVv{46m>co*1&MAWH8WMm~VMbXJu4=6jDaM7B`n{BPz+9A&T)i;wQLs74P zjgF0`UQK;`y9C7izEqX(tsqVQmha3k`7U3CZfkzfNwl@Ef)7V8$i>r=Sn}iF-1F#|ES%M#uMN6C*61;P_BJ{{^yi5*U?_Yxv-2#I>Rx~ z{MaJ%xAN>G?sDeY>7qARjt<$pQy0~{IJ45_=5Abbpsk#JBOkKsP<+Vl1!1-$8d#379C5IXgJmT?D3&G%i?;&JGSs zIyBNLWB|#4h0n(eD4Bi(T;7XQi96hU)^Dn>PaAFmmiVaCSA^xpweD&gupy+Utrf}F z7OkAe<4OED^j!v`qmjh>eKe6miB5Kcdaxvs8Fte+iRjt;fdTYd8imS+HK)8&?) zyiWefbEnU(+wvYfqbH=%&9!~j{2QfxUVyg~1Ah;;62-Ie-aO3OYG+TmYB-hfROXJM zo!vV0-VqZMhpJ9O#rf5%PJ3Hc`Jc$aP@&a3Cbr!`tBy$}{mTP6Ik|@qD=_+ok=&O^ zNLJUhgv~heXO&)5!uoTDTQbYg;IH%oqXy9vd9hPxFUy$%f-zio*O@p$Oz-5heKk+#btK8n!YM$P@E8ksbZc(re%c)YjlC!n({ zJ-gFI@tgupRZZ{M*X(i!f`#4($mer@TIP%yyCrv3#8p5dEj;IX{)D4N<5<_Pi!rek|zF$yKLIOf|6)581 zu0%vCQ^2RzSS3wg2fm|cy3!O_n4Ys}HFXhy@z2N1Iv9A#>#}usLfMo(mEUH$AyK=0 zD8o%p0;6~Q0};BIA8mH`D)CYe+5S^=OH^e= z%I&MOf>?gjBW(GIkFzyvnycrhTM{Cde z0?vJH(Z!I|t&P}j&+6VQc-NagRa-Rnn*7zFZ)2(B*w|qe+5~qKK*ct7Ql$3wYW7vy z*7ZY_&D^Jigc*NmEyH3Z3^ijZ9{qMp{I2*wx$Vc(oV3yr|3*$X9$@~Kz4@UD)h&~M za5skDZ)!VrrJ*{`eRpYLn~8PK=YOC1dUEW40x=wo5+WN|lhdMwv0 zgPn)l{VWkpD5|$pys6SG{f^__L6Q3}#humqT(!cLc}aAT_+6WvKup?6op2SqZW@#S zbI-%`qf?O+BNx!tCNEEVq7)fZqKtfcUsFt72ETc1A(fvz(W7CB%~xTnhb!!i%FRyHWzC-|Y0gv5HjdY=-)Ut7?C#_re zC4uN=y*JlnLd~#VRiJpcu0ELyFvP8|NG@I8f!UFAltqef^=I9--2yrk7Cje?CxtTx zh4HUk170s+o(UM@_;Ef3PlK-YwpRo8Ii?(ma4s9%6j+<&f4t|Zbt{bNz{gNd9xOYg=09!=@^Rh(eBHgNB1?oK72S| z7_Z{GJ6{08t6XKC?N@JY;hb2EDH;n?%Py&^8724vwN)nx2%wp{=9zB=Li&it=sB&H zb*aI~*p#{X_cIVvhUt(VvkM+kTTN=drvC6>OILw`YQjq|R@*dt?oA{Dv~ctUgh*Xh z!r0u~@TTq2=lIT>;n%LEOz0Yu@X1jf9E)+hx!Gq?UR_heMEZ{Wj)v_*|2FXUrfUj9 zLIZd{?7EkC)@MOk?WK(k$Gv;NxCR2Npu+0%wN8*JW@_&S6H^*M004UT3|NjWaI*8` zXzbSO5fJr@f4fDg9ja2w8}s+cQ}M%#vNcJ=fdr;W-*UdYFfo9u7%KK_tJw8!rVx>? z_Ca`b=E8VEZqLNdH?_A6#E6t&WOT>1$W!%*_BCilW>L=_Yfn)WB^z=--1j(kT*s25 z2XI?^yAq=$;GhH~>YjQ3Wny9lU5uZasTIcdR2-CV!8~GjB^W&Y{>s0Q!D6cnC7~JV zuM;+BL@q42I*;G#w6-B!$luSeaFcmSIyItyC@F*Qx970uq3WWgzNOpq`uR|X`s;2Z zB?8nPl2cNsH#C0uh?~xQ>gvDGp2P1g`{r(*YV>K!qV?mWgv!UN1xBmCJ-;1`5+F^16LC&_;=^d9YQK4nkmPt zIj*MX<(1-<7(t!z`6}cB|BLR!Z_xp*Pg}Vn=h*5AHrd6i{ zlRbrTzU~Rhll#smiWs>3M2FRcHf@k}+0r-{mtNL0y5Vi-@&A-T06!9LH!uK&YZ@|%1R#}9}+qlITak?DRcDI zrqP-JqG&DQub_j&5dedB&t~Frv<)yXi;11&dJg<{5aA6+3=cy@dUN{EWdBWwixOHB zA}#~d-Tfg9mm$?O%P|X!MTmdOvzs*GJllOy!5inhzda|3C1!glCH3wEN(;0yr67Z) ziYaF^?o3x7E6y`+X79>*ak#(eUSQhwA=@J%7N1!y`+?^@&RUUP9+RVC4`;=hyfDjJ z|3F_3eNR5#ZkVCHYMK)p%ac*$(Hkn>6z>g(iK^8h8=G%k3pvM5quchX0{8tswEt@x6x<8{J4nlUe0~<{_Ce^ zx^v#RS1LpP=P1H~7h zMqLE)8)w&zHYDq(&FEog%6tx%i4$fs_$g;_JAw4Pb0nA~%1{(9d$*sxwOm?OFW%Jj zOlKY91c{04$Do}P*)bbs(Ic}4nx(u$$mb#vkb+Ish+5P$UayaA%?|<0^>asF=_XC) zK1bt%dhPIhx3bku#O@6oHUB;&7fz*VYh>Wju0h`j{+j;5nMQ?haOFwIgMp6s9 zB=2O7u?lwmNy{%6U7sTT(x?$YuXgeP--rDZHSzM_ir2>p&cCPRVdv%5SW<&3kn>QB zP(L2vnwfPdDhOJo|K_m`Q$xzS@x7c2otYGaLQ$rGaHtT3aQevb@G|6rE?i-Mb9%bx z1FJmWsJ_g_G@@G_xR7OF;%3y+;l4o4<4#X6Lg=eIZOfl+buIiC?;|C9`FX(`vr|v< zRyqXdr0dcu29d-!T`0}+8ES*9eNDo@-&?Uu*$2=$ziFSSr5PQ!!K7CM?tGtmC@eKP zqlE;c_d>it^LubuV1G|eS$P|OvWf{MTXdbqexlC%<*?C>hs3w&8Bb{Ve)({bSMipV z@9pr@Zo%>FS7>Nu!Vi6dj^6pktI`)01 zvOqas(jzV0bDJ$!JIq_;*^SJj$aP=HYc7+bpe=6xFp99=@W>Ge73hrDBp!%eDR+O` z_U8}8lPv7e@v}N@!OO%XWd}Q^ASao3pD9xhjG(l%^jA|0b8{7wS^bDdqLZaifCIUU zCg&Xv<0c(MWvuJO2&IKN8nI%l@I2pgtV#e1m#6pxrPS?u!zYkf0zX{r2r!dq)RUC@^S4 z+4wAQ@w?BoGmLH{faqK*rmHsRcBvB3jJmj>xw9c7mY8^(i%U5qhTlY_0A+uIAUsIhfJ2rLJ>A{Ab{3RD;E@n=cX%L`_>@T!0Iv+>&ArL2oE+~|Ca z9)qAPh!(5}e7d9gonG5#DoHh6E%whe=T(0>0(CV7g^`7Y%(yT8}0`JuRp6+Vap_e=vV&GLImq235dwZYLyoh zY!b1ezbK^eE?^9%)iHeh&izF#&xC2hBXBzDA7 zo|>He-gi|fe!4ec3FUe>$x2@A#-0&@7VPs^_RN>6h$vRvpS#&z6X&lRxW*Jnt zX}c1Ecf1`Ly7>;3J;=m%x145jwONRwm+c2%5rbkPFk83uY~EB`B*V_=@UGO~z|Q;Z z8Mo_>t(I2*NVOlqi_Ol#!7`{qa$YP|byU85iWFXyCkoz|WXzZJ^v6km9G#tkNw>h; z;@8HadF`*;*RTKm{psdf3--8{!(n){c-YtLiMj|M?_w{u5Qn!TV?a?iFGUrZpQwP7CbP=;GRoIzZdOdL8F6z z^uKMYXant?r+tgg$#v5en*ueZvz+2Hm3*(`STdd8c2*(rmIBKb5Djn-={9bXgIP^1 zzxPH zj`{lWrCMimwxK#3i*r%D|wx~d!MxVetsD;tT zJRc1I6c6rl4up+Fc&;fVm@naXv5v=ZCK4YG!^3?I_rglHKQ7u*m<*d%E5QtS@QAY27stnSf{0ao;cT6YFXT1 z(IJDm6pRQ9gI7Xjv_q}5Hw|-+Uezf`LVW#7TjU4`1OQrRr;4HX*q8wVFmW#E!gWu= zq=rf+r_#aJR{6Nr$5FiNKVBWzMibw^e}8nRWFDk##0m$LYq#pN@)8GV=7#6E9X{?) zJ(yq2OR~$NzAxe3Y39gZgMF?m7j@bO2L)ZWmSAoB#n7U>0AfN$?|VZ;csK}RuxJ%t zzt6!G!<9b9 z&y}zIu4`e4{n}sqOAw6F)z|))-dU!DZr=6l58_%43=Cku2sK+Mb>4jI?d^AqmR28Z zO|B(W9R`3{T!YZo-{bPec6RywpeXOKSh^`XGBxD@8Na4{kz0_;U*6nQP3r&~358tq z4k;tX&!4BrB<1DjlSN>N%^)oVwDNDyULn?tE6V1P*f>TIlHk3)o2>kLPrw@;y_`Ll zZPC8^ltO1m4c%1L(b<{!G$8YZTY-SPox=N=q==FrjpWw}sSVVo*6%i`>bqq8&Ryme z9Zhe#%$I67yPg#&8BlCIu%?z571r`ujPO#KNTgSxkLVSmqE3|nSz>#-1vZnP%>kJa zean@f>n}~R8q>-n-E1VU@R8i@hhFq^JS>iGtG}P$d1DT06k!-^gXqd3 zwM&bsaJU)3PfL`gRrIyP)+?OAWHyfK(Pp%=Yko`hrR#w)Q5;ZsYei$ltaMI_0 zA_IehW@l%;$jC@ZVKcW!D}9oMshE~k78nsH?$2vG^)XrTk?dE0S))1E+zTzmCFr~- z;Zp(4pP55BSlR;xKn8CobuO%FK=^W%!|Ov`U8myhaRd+H1=}(k305{iF-Z-r{@)(O zU$?)e_B>tCW@k_cA=2hn<{WxqeET!~LG+Kj1R}A=g?)!bDQr@WwZjkBt9yja&nWy7 zo3*3L60KE5h{;lO32VLedE|Y+#|VZ`NiGV8aSF?^cTwC zdRp^Az-232=^c1!ka#S5&AkqCMG&m-Fj)s$btnf*JsWe`SyDvgbpz=5j7?2F>qN@L z2%2rE>UlTr^IlNxAvn8g$rKZ{WH{~FG(T)Bxgk9;!b0Ou=|A<($99c)RPpk+XgZyS zK*y)swC>F}*l7$b?6=nqjrj70brx!fCTq2{ex0`PSFou{6k{)6)5skC`!LGEH+hV; zyv$8U{u*w_u_n^X%4IdDzsJI0LZ6$Tr;HYT!8p!H5ZoZ9ua)Ugn@14S5%7nvh_80o z6|yjSI<&WbY%zF5MQPW3Cui4NRRknCE)nCl^IT0$O;c0z(IcO3G?0H&V|LJ6Xvlh=Vs{7X>Y2HAj0X;=Vv!kER?op0CuA z)zmy4b3LZ|FsHll*5r^4{~kXntNOv@cvlTlaHrOx)_MLny5kzZL&kEFG~ zKYeZ4+U>2Ur4Qs+WUgwQ|9E-vDZdNt>2v3xFd)rEX6+}5EL0W^*$=sfnC$M%W&wwP z$t2>}?)%GZ`ZySliTAvWuneMB<*pd4=5Y1qu9xPitqOeX)3n=W+C}HW`9dS!g|Q)V z)WKI$IphNuk63n=v8&a~8fqqindr(Zqd1gb3A;Q#o`dA(WN8z9cq~MKhXS zzhal@o4sk!nLtujw#S^mw-Ij%-vX0&Ez`&MBaOHujk{l{Ye&)3X#1MhUtW zw`CC}BxhtOhl9V$EGVe+`CEQ|M|*pGVq&-;q9;crtUvWG6k&qjuQ&my8IrvgrRyQT z#bW*Z{E`)WAeQzW>^THu>Vlad%!2{Q-O{+xeT0>PPlWLsN6@*1h<MH~% zJe8J_4>^$A6L^&`q@~ta+|d5}Q{67NaI_SSpO9v8ww>l+P4j>Qjq&*BXNG{{OOC4` z#O$k(qKLY^z@Hz7vBO0LNXsuL=lRnei#BhsbBZM?Vm=0)OnL9=!nNFKY6ci~YM+h! z20Z%8@l`B#i+rV)QG)kQ;^^vH`diF5cM{@XiV6yVs5LUqpFP{q z)MRdAGE;;Cp>0-fZnenz=H^AhYwig(fX;ckyA;6#m{_!-w+LaMZy%5}lf3#NGvXT8 z$m9xH^ql}%1;Nr>0r%?9Y+wbzT}Xc2lgdnm+dArv>0s5Le?2E+JJ{jp7VC1$00B;g z{N&$<8<)gqQCe=UJ(XLng63U=HJ5x>9#4j#MJc%~TtW-oEbdQIB>F4%^kLyg|LvU| z)C*osw4JGp<33U#hOB;bbT4RKr-VEvtMTlbbqxju{Xm-ktS5-+`@|ntY1+@Ut9pPO z>;xzj*mRR__|;o!KN*xkjsID-$Sn-|0tjYp<}sF!nmBB-@GNYqtB@b~h(S%$I-e7S z;#V!#U)n@pbBRaIu@rE2rX1XnX!RLDSYE zg-}NrgR}9@YOzii4yLiYpZWCjmqeoe-$3JcgKI^fm`%$NH z5Fdj);UxcD1N-qWf|C{CECwh8Vg(3=JlnsngW1C{(DaAKG)hN*^=1=w(@hfbO#WPA zESSZDNu5>Vu=K;(ouB{afM8LbqLed+$v5tGT1&O|9-v$nN8YFHEzp!uum ztls73E`Ay=KB*MZIW0@{gEx~S_0F%*!tWA@rZ!AAd3OdV# z>`u9=>@}YW%i7n^sJVL~tmLrYwb$=YC#CjN1<3=6ZT4oNNFh$kc+B z{lm9!PxZ*bzHe=7`*}NCud1%T9#hdW8p3(`ES*f`N5`j6gqaf1a(93-^1PD zuZLAQi{UhapcM`hHV$bbm6Xzxl6&zn+duGEAo{{;r9$P{`Qbm6UM4=3raB`wRmig> z=d9g)^*kSjNvyQwT^I<}WtWM`=8Uyrl|&->pg>nsGh&H2LxqV5s`N%iM@>yl#T+4+ zU}C~}G){OrZ2_h6OshJE{Ob#f#d1+DG{*2MT-F3%Q=WP*VZz~#VzA%p_^#_XG@fBC zH!VJ$o0ca%RAXiT&W&BeaWOGD_E|sLDpMzu7Qz{Sa>fu4lBlStFv;g-W}a(_70J#1 zKQWQfqk&UnOW)kda_I?O$5_PC-L6$Em|VcE+I}sm!E3~;t)VHCstrfh6jnU&Z_SVQ zv$h#89`>BL1ONtSCkTi7yise`T54>Eacqi1cJCIrU9iahNuy31&M%!-tfCtV5%9y} z{m5fEgK4Lq(~THsO|Cc@l=#JTqI7q|B5%9DGxtC>"Cluster API Machine Controller": AWSInfrastructureConfig updated + +"Cluster API Machine Controller"-> "Cluster API Machine Controller":Machine Controller Reconcile +activate "Cluster API Machine Controller" + +note over "Cluster API Machine Controller": - ✅ Machine.Status.Phase is "Provisioning" \n- ✅ Machine.Spec.InfrastructureRef -> Status.Phase is "Ready"\n- ✅ Machine.Spec.InfrastructureRef -> Status.Addresses is not empty + +"Cluster API Machine Controller"->"Cluster API Machine Controller": Set Machine.Status.Phase to "Provisioned" + +"Cluster API Machine Controller"->"API Server": Update Machine +"Cluster API Machine Controller"<--"API Server": Response + +deactivate "Cluster API Machine Controller" + +hide footbox +@enduml \ No newline at end of file diff --git a/vendor/sigs.k8s.io/cluster-api/docs/proposals/images/machine-states-preboot/Figure7.png b/vendor/sigs.k8s.io/cluster-api/docs/proposals/images/machine-states-preboot/Figure7.png new file mode 100644 index 0000000000000000000000000000000000000000..830a37899212ee8f44c640dfd75e14238b833fae GIT binary patch literal 20249 zcmZs@bzD?k+csI*qp9tCH!Cx9$2-zyHfHv(9nIcQnz&KEb##0$$iea6>a~rdleHDQsjW2@ zrvND!ct z5FjD@=lKh^&^oS%Cm}2E3GCy{(Cey}dwTMo69mgFAKw*)Xbu$Gf1<}muPA!K3&U(* z&5=*~tp7Qb#y9WHX`8%8>HY8bjvRN}<+Oe;#0F;YE*FoN1(OdRr_l<5F3dDB66XTyxu!~HdzAAYgT?e=UNYD z+TD}E9Yv<}DCqBzVFPz0I2E}-gNl)Ejsjkr^1+v;7HY4vXQpX)r@bV2R%BS0FmTcm zb0u~J-YB$wzq1jTu;XzsyTTxIQ={bECz!A&^uWd}RQpYOq~fa0<@jt5Ug-L_70>7Z#1k8*KPA1D4!mPd+dd7e5!;s+?kgY`ibq!L4A+8VNX79k(${ zzn?UB_)2r0z++zO{w`nby)2DOU1p4Sq>Dv4cG@t1Dos*32TU)}3~uEVhdGhuml&rzGxX>iwoozz(>#o&(=X#DWW*tFKUyXl zR#b*LuTl;d^`=wX`EnESlz(hZ2!m79tub_&P%7fR=`b1olwp=-lkEFQFVrVZBUj_R z*X#40I|??klFv060>M%0lXFm1|2|<5O zC&l_SB!Is2Eo&Kac&Cxc4l*w>uhe|fgdg$M4Fz}4vVBd%CYiLr>6PeU(BLNc*T1>x zc``|6gLaMKNBPhXH2`f02@~l#W)#YE@I#y!d_=knL0mzC{@+*re)WHU2Qw;9HTVX% zloZP0#Wn9y{3JBr$#B0I%h8@1jZ0jw^iUoTuX4_uGCOUCFJQ-r88yeBoJz5YQ(Vs| zfzMWtM5gUo!F{r4??3cfSzDt+??T))G$_$!?!)#gjK6H^HG4M&1qHpxD$poYGcn1& ze4vJ}q^MY;U1ro1L+5shA$EyLN@`H=2yt+5@bS6Xny%?%Pz8&rDWpV#Z;oXQ;7JbUiU1Z#R zohzTXyv#IfvbMH1YLb_icc?Q`Z9P3*V;g!TSRP00B({DWH{_Owv9VDDm2z)NB3s7f>R@-F4IdxBQowdjIJt&oHfz+z@lnC<#(0UkQt`_j z6TJrKlEa6T2lYjX2HBMj&Rf}SOtIK>a!5!>LwRXUep?*l3v}!W=S@uGqARpRnhx^U$4iRr&(P7)b9c&9cr9U{ce^NjzII<-oDi^R6~Ar^qEDmn=t&)Xq!B?083 zylfl>rJ<{p#l>$fJ9CK~#zx;g1%!o#8Q8eJ&K-#mc}SIXjHZ8(`Q z@veKf1)}FP@g%689Pe7jD8QM@`7pNOO$ z%oH2sqY(2}NQL($g4MI0s?3T53lnz(qtu6%TTpz#^*Bs0R%-cDSRflqAGb1X+l5^%GE6&BtqaoU~OGJ1Idchym#KOrL#TQ8`keyc2E6dxoJ(-Yze&xv#E><$Wk*Ggnt%Up=W48Q!2u18wrU&=T-XPEHn-)8tZ} zn5?j1A|kt3?ujKGR~3k{xnC_S0NmEA{Sy{&z$kUZ{6E3PF^o-+M+(fixVeR#)|dbM zk->mk5g@mJVM>peEYYO3Q)?j}W~A474#$>TPPPthaytmLhW(#F1}fTbEVT?w!ftu*AGl#6v~CQk0ihpN!wy%Cj$;`t@u1tBaY? zz3#?szcE}EwyKhIqmZ}??JFoYnbD?6;Y{k}oR`V1-*5xJsh3XKCzR&5uI6T#)Vzcz z8uKt)cSVq`6{zM&K^$>6d@j9Bl_Pr1zPs!gWr}(k7#O(vEDpNE;Bg0<$?zY>vF0AN zheRYiZ^b>DPZvXri&@v*R?cn<9bx z8*x@n@Ls(7SVaW|;MNx5>~#$dLUgsxTPgNhxa!qby76(qvMtK4!Fw>}r`fyftEgal z_|qP|U*rCk$WUCEJlN~-xm=l!KRLYE$nuO~j2Ln|wJ9pak?oqd!_>!g!PlimP3{9J zytuFK8+dv){$?8G2*Q`i`SH!HkCA1)ZKBLL(WfD}X$e;EQjVbt;~=k+O6ezvv?}`* zDjomV4&er@YsWo3pVqj(awK{ zsOq^NF@KbG?{D-tVMoUqm8J`sYVvUDwb=dLl44}fM?UO-JXWM_-0ZFBGnpnEvPxmg zXFI2$EPJ?IW!m**>gA*5ICUl4ocqAgW@ly!(AUfTe|M9USCH@LBy5&jl`C zX#XVBbN^8Qhw*n-Zs5K}GK)|>8bxa5a6JVfC}irY0B!R<@ddev`{C)HDc{2wo0(cF zSJb1vEHao3i*_k=WO8~M7?9q|R_BV{Zt_EqlkI`mJ5yDb+SMZxl~O4>kVzy=c+>ib zMEmiVnr6wMM2B+dv~)cpGJ!lxxpi}$(XRZ{yXxJTRbyanB_1CYv zCnxi@b~$N2lcH|>Z{EITber!8!OLMEtFoN9|KQ1|_*kXqJ-DdIcQr%vlfX6D5(!?U zpvYA&XUA^S#mUajukxJqbTTJa0)oWvUKg;hbopsq9wgyH>4Hw#RKfNtnK7lReM5%Q zYzn1cii$R1x$^N2lshdgEzjwcgNvb-C9J`*@4|W3gyA(w&N3sF7gzK1>SvbY#XWo* zqlFTwxJ=H7#o&eM*o->KC$gtUM^mjj=RA$xR8HW%2l7#3M20`(j<>u#46zi&IsU)SQ^1mB>d$2O_r|bVj zNB;$zZt>E8U4eY8#Pt{Ce8dtbawiclP#>GQQbzyK3QJ)K^(lyGtE4~aN2R0Q@`hohI{Pog(}`9tb( zlqa64zIY1q#x*1|{`Mi2MUM%D=Q7oxo9|})s3R(=U58&I-y|TxD9I?p&qsnV#PFFs zDmijL(Ud$ia@1*Qrt{@_C^K?>9h@@jv$OBajj_6#>V+M{e z+3D){eHpn8Pkfwq`_I)UK7hMoJ#G{-f7xpE77Xi4NA?j)YlMEsLWj1zI&+op3`-F( zh1XARzzD3SWHt014ymJ1?n3N$G6hV7!p_|L3!sr2G{)hiyp-rDeO>xoHtmN69NJQB z%Cuji%cLTR|M@oEQm3+^z!A-unpF6#i1XAWM4T8cA+l!q+c5VEtF9CngN@#L`dio% zz^%}pCLgtYwoit5LT2!|PwfxwN2!%xB4%+t7qi#b9AflM_}v<_{b<6rAQ}`P-6#}j z3t}esVIO$xxH&n0bQc+3;Ev}l9rd~)`|>byMB$SNO|?T$TLVlr&Y$9J?eE*CbLHOj zB}&t|A8$-_W#c2Ye+gJ4fEMwUH^LRlk%ebHoTwug_dkFh*FHZ>Vr z8Pauxg&jZk>G?Nnrtn_FIk?&_`+AiIyVho^RI66s02I$g?mb z^Eux9alB<*X+@;+IVvw!R5p5UZu{b-Oe12Z#uoYI3JT2eEo|bUjQ5dhqx#ST2)cdiT!gPP7+ zNz2A3GaIqEb+6m3@USc2E@Z~>1m!ooJ2~j+;H*RN@pGqYY%lvbF5-l&2U73~cS}nn z+O;!Q&yR+`f1ks(wY8mX@+e|ZJ}%-<*C7t(i}nfc#>S@iaDB#t+wo^-AFdu(#OnI) zs3^z#5$(Q)?K0_Hj5u+ezRbntL8;@k`m4P~I~wUgOA${1-&cmR#{BY67lO))w2rcy zIgCiNhQI1oxL25?!*zLW3@$)LfNL|7cXu}YvcPG>Jm4cwjL)+udK`$TXhsl5S@c_qqz@;(OCYQhQAvM#ZyusL(1a)Ae`j zC=Dv#TSMaTzA{``U~6!; z&YE*HfzVD#Uh_^>G9@HEKNIu0zJHawo4^*jOrX4rEwfsP-F?RP2>QIvuae(ZX^1Q% z-YKn>=S5xZGnFv8liVgAOeEwna-M>m_T9Y!{_^VTgmgh(!^*d;v*9~)>5C11{u>$3 z%8lG;_YWNxboUnM*{r=s^1OSct!D^nZr)QW(!&%KLr1E9K7z?5vdd~yO>65<89pOB zmc^GgucqiS7x{x=_22%Dlb>D6)fk$wx+%z zKD*G)nO01G-(MK7E8HSanVv64xmrUTg#t%~6)Y*Gj7HkR(wp5J(t7hxf%%Y=|HPp` z%wphL(_UB3*fg#%M-f?vsS68OOGlQP?R^w9HKpN-$;zT4Q+l0&N7DG~uULx1#!I$s&K*khH<|4+y2;#xqj70gZ%sEqPkV@;I^~p5n=as<}c5`I>ri=Qi?#=e**1%X~9zTLW_hZtV zTLu^UOVLTpM=JQ5#o`}`1{eRy5)yPLL^dBF7Ib~jposARX1~*sv)?Dk<&K@nZ!6gq z(ZaLa(HVg+EZOilzfH7&kzKFhv8yZhnY3D%HQ3c!n%OBeWGHU z#*dYu8K&Paix%`QsN4@_sAXbM7O0AHlfMUv+KxUxwF?Y*S-3jrqvE~y;_2pNwV`hfp9b&N!(F7Fk4oe)9On|xs=IhHk5RsRGVhZcCU`ujUn3Ly7UygN zgd^e;gGnKuqr{Kv`C4^?%uMiQrhI(hiMBF1K^p)z6-ovM)x5bugRbok7pM-z#kpZn zY`Yz-v{zZqJ8w;8YnN49O=O7l{Ry@`S}y<&f3xEg1gX$0Iycdc=h%@#&S@5w%4zeh zw2GGc{wXt4x!F4lgujsvcM$y|$mB;E_D1laKgBVYMpuZ4`dBoXyIiW?E;L(CsD&^P zdY-}C^{SZ5PwnBon=@y0K6|+x0VCF#aj|m~6RAq)iyhIi4E!3!Itg7n(>2)&L>Jc6 zi(~mg%Se!0`?a3J-B@3jp@V)KHbyN{hO3BG^^U2^-tNF1*k?}(Jz!R2Hze*S+epWQ z;weuk0P3`z|Nhj?oK7yu)vZn`?N_4DD&=&ukJ(aZ)mm`UvDydhP%~bK6_tQ&!Yj>) zI7FaXH7+s!E-Yw0C2nhb0{j3_3szI3I}JM;H)APMEW}-t3SR5j8xzt8hCD+fIAMhl zJAM7WCuDxzs-}BuYma*q*i5ag+ST8=FE~y!z@cJZf`wA(wqIz_7efgm)kQykkyy<& z+}_UgJR0Lkr&P>VTsEfRvX2oKPV3pjPJLfSo@4Jcs&1Z|pQd*QqT=l>--cm=rwB6q zX}I*?tVxK5$Rhx~_}>MY83_Z`Auw)gbXstITj5O#dEJ=v?0Ew(T6?!V)oZ8L$&iQ5y=VE|J=uJ=U-e`Hq z#}Xrbr0D9krsb$;u4Sd=n^$}u!iruna5r@_xFty5^5%VQNYVz zA8Zn;DWD3^@PqvR+_eDa!xQnNvf|IK>RTj3BBz(@QKM{y4y9sFVBabwiP8To=M8m> zZf)`qFCO&cv%Fh|uW5ua%WLL^s2>ps8>oy%NlBoHJRJE|&bssrotkqi?3|PMxwe#H zv>>gM{CXPG>dqZVRDw(&2a8Go(>odewf3Rvy4gm;7OGodi?EP;0Am5H2P_0|g#li? z-zfn4TC1`q#SfCY0|0OdC4ghViU4{B-{B-g06+vn`=3jQF*rq#Wc**dKScaCsT~8_ z!Wm5Q|C$>Z0zqfqTI20E$?t)0B5wV_Ba+eXv)qlv!NGC={(VX|{nYgR^jLk}cSXYF zEB0$)ZpK)*kFE=?JU`m-IT+yIg)QrJq&RhYl{4?S-TC|ei87W#O(oIgs@-9j)dCym z#cIyEzwFcdJ@qbj-}kpAsa5AP4$KHS_)DWqyZvHPz%x%U7cKD&v?6bX4Sl5x=W%d2 z02}}rPyO~~W12;-U*##;gR7KkX0zA23R$}aYS#UWcvI;7baL_L8$9?t9Ew8E{yG>u zo9)smE4)GRS0g7#aQM2pX7O`H9u>Qr`W5KmXgRi$6$2S89$c+%58Ng;bZz&0ddeeB zg5NiN--Z+YX2OBgzfkpkITd3V#Adze9-dk6c_VudHZyANJ)pOdsDJacg}SuK; z21^3VfZ*1qgp1R*C0$wqQ&|Ipw2F!fke#iK8l2f!S^KA^rdnIGy)KTGGlX9&)&s`G z*w|QIU7er52GDfh?#Caf4$1|nc^=F*xI{-smzI_$BqTh>XT|K3+trCzZ}hqG1}xzA z_O_OmmM@C4tcA_Dr`gX_j~8gV9%E3f?o@71&cnttlBDl$`>OuDpEp=_(XRX2MD4rY zK*0Q#X|zGF@aLQqmo?qq89%PIr{*D)3>wsD^vHiOACu_sQ|dc!#JVrAhvtii?#|)L zx*d~VC}$X7u43*bEe^<~PFPRb9}S1DBh$z?dp2);vO6|2DA0ILCU}sC*@r(Ofv6`& zu4u1ATJRdYhvV-p$hWfa7PV8GQgo#VB@Nc|g|@cP`1ep*?z`cjN*ji7)$r#xZHmvU z%wq{jw_lRd`107VJZ%g3 z&yD$Ju}~L#dpTw0!QS3_j}x0_@2mD7Icf2P3FZJp6B4?D;?-g>Bb-z~O|V=L7N?9w zLNX1NjRl;vx)wT`)zmRjcjrUm?}^(2%+EX>iMhUf@$tu-&!jI>6(1;I(N9#HgV6}QTlI+IW6HjexO3-qnK_~i04}N zovfWkLVvudl*$D&H%M=3J)r1k=u*OjxNOg0S&gg?X2y!Ox3aUd@9piKY|qA`MA~ba z=ou0l}A1h*QocWRH$;u~1NJ1blkjV7R^I2^z+uX6p<5-rJ zKkGjqm9G!!;wuSy6x9|MBEcAAf#>Qb*^l5t8t;TwZ>=JK{$SsNbYS4Ko)VrD`|8@P z*Uh{9-~qmo(xY-Oz0m2P*79^lvW8Z*mTuw$w%;UZXT}Qae4Kmr1w**ANVThqTH9e6t2BZ zWs~2A@U+}3zzxgK%=|vmRp!m)@K{Tl#?-XsGPuQ0kt%@=5md!f*rPjVz7<;JqY7HT zS@!j+-&JoW_r@*7G0LCqcQ*6v{giH|v_cxIMi2PDx;4Q1W8>feKIFoQBO)TKV!ej2`$C3}_TlpQ-lw}%^YSVhCoqj~*g{%xTs&rt?*4v_Omf&~#S0rO z9Nr_?GL2$Hq7*BY?G8uH+R&tr8)O<|*YpOSdqt)^N208}*S5;8B<$_Bl$(jIY_PC5 zYSNp30WFq((P?*N^2zM+-^kXtMg=_bj1&{DY`I_@Sn>X?<6GH{7?FgN)1=eB_pveb zTNX8|Od|J%J-lWuSE`cBBTZ=rCC^We^GzKhcv{E)N**?|^?3OBohQy~FgPH&Z08!O zpaKE{gVsjCeHlJ)1Ucn$&2gcy|qL4QL%3;{~Ph zj-rxMo~wO^rqx%fO>F4YQNlvzQz`d(t0@i>0Ur(t8e{_K%R2E z&wopULp%sint(@wa`XQ0%$wie_kT>b$qPM%^VKi9kRDo2eTfS&17?vU$NRm&Xf95j zL5BNOpO25PM8D>{_myBHqA&qfh@b><`D>FG)R9EZ%a$QbA7n0S59`7W%l6b0cf`)@ z)(%FkJ=YwrUJyUNJl`k-kmG253_%GZ%9=GaNDcwpNvUdTS6rD^Y$JuJr_Wxdgya+Z zu>Svm0anp1Sd)^3)TCfM6U0U-X`v21NpxZx$08v35xioz!E2opQz6Rx%zFZBX|u03 zj;rS3`jEg{zv59g-`f#U&t=zz^Ni+^dJ7+UeGe0IC z&1gg&0~H%6$Mp>j`ntMDYa^>2p#-GZc6N3@fBuZiLK5@7xjF#~u#CkK5DBR>i;9YJ zbK!o-k9FQ?X^nrvq9FwWHz_Hjc&hNh$B#(-{QLlPiHS8oCSaF&lYI>YQ#8!HpgR!p z6mJELtc9@mi_2Qh&ih(ZY2|$Bt+;JLwl%1DIoSO4nN3aG%VP<%=SVK77JHJ3y-t_S zGhVcN?eTm~qDysSk8Gl&?9!4(%}xoGk(*OBS%2%~PmuLlmqrgbxUO!Ak8rkys z`;ztfR~;w$JzqxA|M2jfQYU2>ecUoRDLG!{n6U zQGRJOOIvG1>Gn*Q+E44TtP0jN$73x!b#*5&F-G;D2chX=Uw6N>t#+5Gi8Yklm4oo) zSM~nAoqpq%2Y#NK>g1b*AY1lnmlsMl%rXbZ$KTzLm>-L|AAXvlc@iw-vMu8yPXkg1 zmN&El0FvdO_}X>$aCXD`_SVXYwl;r@>)izjeDAY;I+wf1+^W(kIz<0C$0G=+yL2W6 z!a@w?d?r2kL_G(U=hv3Bo8#(`Ndm&d!gU*4+a<|3*9$No7g%9i#dWz$(9nc8^1xeF zKJb|(tWs!eLi`k}>+Dc4x#L2u-@Iw3+5=4q!5G?=C!OIWM;jA2E2*~73NBt=udDMa zIr`pc+|P(|0#Na4Q|TO91NKWB zTy_Kn1+PKZ1-7o6n;T>~_fdxYe?Kvb9^V#XDF%|;a(#5r@(eH#epel?u^66jW-eZd>x=d*)VuvNuaFc~r-XM<|1WJ#^R`Mkzv zR_z*%55Tc&Ha14ax4m&pK*a5rB!ifkeL$3)_hEQ}Vthh{!pAw!U~7;(zg%u-YLaa&GLT>(jsgM&jskqKn}w;`2= z4bEc)suI*iHMa9gDLg%slasx@%`*j83TD*6p$>0Q4{XMY;OZ{ zpfQH;0zdixUBBN}=!od}fky%3E(~=mN&oXL>jQ$me;?o{{*_k#yX^nxwtuR4_(&Nd z*br-U-})ahDb3#z5i$Gazp9LmsM>PZG>K^pe|qiRTLgy~9OGuq)yRny zzHn#wp8GH9aBe!oRsl)TE0qYbYiho%Fjyf%jCe-*(Mn-{1bSaVYvAK7qRQiy0r&Zf; zsavJ^fiZ4LwG>gB@WIW&XAO3+5QFo-IU13(5?l~u+}}t}92Gbo_}MRl`6L1e=;9G75p^6o#(4fbj`ho%ff5t*b<<-l_kcq_ycv@c& zj1QVtWt5Z;2kW22cgrq3R7~952R*S2K%NCnVhA%s+`lSABSbNhSWxY=Fag>GICwNz z{vh6x5Dldd9U7gUI?pgxU?a7u9S{iC2@S;`4`^JnRdWHs@5EDn_W$_tnQ@-sW#c(# zQU-NNlq$MR-_g^iPhm%HS_AjSGGGD2^#!h?AK-K_RGhJp_QNPB)Hx!84*5Yu2p(vh zx?%`yotxvELT9WKM7uiQ=(bzB6c#Cwi(@o5Gb6@c1S&m2_|KxEMWRc#2SG6+?iy}~ ze-7>mp;l1u`31)}?uzJq_{6Cgt@XA*>y%-ytAjU7Xl7vG#qM>P?ugyN3MYAN#+Mh2TH={*uw!W|B)^vA?SR#JyjJ0I_SPGixU?YP$~9? zt&_{l6*p_JXvlaRn9s~L8TMo?Jgu<+qwe}>qdb=3qM<%TUoWLUb3SSH7@jyVGt;lA7^*62 zF?g&=B-??lQl67djOo~OwLaFjS~E{Q8Y=fQ*}iGii++u(h(Vp`=#*Z-XRJs_Oo0XNE%!E=gIl-{?us zxnnKrE!;X@-OkY*_B%*Dtf`5pLxv!5M{|%n$ScsEkn=~PUNBc0H-k!Ugg7xP!;k5D zr;TVD>BG2p0jLlW53RG!iuEGxn%Ak((Rafvq$7`UJ)pTyM@L51f46K`%07dFmIkbr zt}dIwjMC;C(UkXGZ)C#1dli+R5^)pry<2wQni3}-6}@T1A1pBxDof>mFf1-?t>>jA z2Vaole6FAX%J1oL5`RXJ0hl96b@iJH*!1EyMtd%P_}XX?LQdq1`%Tc)CmrpX<5P&` zXjV$}{Vxs+nZqj|U<8ina2N`e??23tQBr@ar|}$G7$;2I4up{`t5|Za6dt?5e$p5C zgntChUXJucZjcQQ&XU(fYwkRZmX^nMdOWsiM!&i^;Mb?!!!;TAD2vw^ z7%bO~#c9*ZV(6v|tAjKE$WUY;;b;`Ra#({|%y@j3wr{Bz`KnX(MMR_|k%>#>z#b}g~pXn?yvS5#e{TO0KaQ8!*o0s`*uVWwa?wlQ5 z=_Hlf*V!lF*1DF93BZ@rxhZC6XZ^^i4GF(%a}K5*67@!f98QS!XY4!(Y;pWl2|98Z z=;*e7l`pji;{fCaI*)*Q=HmXTSBS?3SS^|=&v{ZZ)1T}4>Js&8Q&JJ#5t`$AC+js1 z51MudQRiaqrA`|yt-MXgj|%w8250j(-}Vwl)9;rfX(+)7{H3G<6W89}URhZg78Vvt zz!8&!0)-FKUK8_@>i!c0-ROrY5i6Y8cRpPG$|H=e55vBC?*k~-J>+zrI=?4nUrG z63u64B62+&df6u%dwEtW7jH$thML#J^|wK(eDH)6&`Lbk7J`r>ERWxVlr~%PM8(kc9!Wp$(uV08* ztipi#JjO?Yzjt*%Fef!L%JK^j4t^SclWN;KI-DWJ-G=AZ|{M@UDWM#K_;lv?<^T<=HEk5YSWm zCriRICQ(`HE}Xv~1WZmYE?O5w^Vg?F<-%3l+;I{&*JFiZzot_aGv_{}5STty(3X)` zRMhl7alJZsqC{`rd*~032JIZi?!C{2eth6zWxwygR^)ZJe4^iC<>eKnbA+eyt6AqF`c|+}Lb7Frav^GfxsYn06^zcPdJ>TSs|Xlw0TUtY}+REI0HOC88b(3FSt8w{l%raWg9UFJ9!xNhwBYK(I zQ)JF|Ee56bC?4Z0lzBTp&5`;u*Thw&kjj@)SeSJw_lP(F2UcI;r}^_(P%bX5boBG{ z@3_j|>oaveY;g7F?+52cRKhp_@X?C_|NM3{g^0^Uu zH}*x#*SEACg_tpt6l*(8M@eQQ`wNZYiW4r2KsfCtzNzK?a;qBKo6+g%z`(}|fxT_s zwD@2}90Mz<8*0&RWM>>!&-XIh@%psB?wZ_*DjgQ8iq~z|Q*htHYbkCId{y3=7|a7# z|HK>pEmWe7@rKSU_)ss5?Bc{F=NL-}zwZ%5RpU{l)=$I(XVrU~1qb@nixNhrs;rsb z!+Sq~qMIP%<_Qwmc7@}%0!P0z4tAQ__rW^VegTQ>b&`#}y{yD+bU7K#_FC8*U%G8Q zk78IdUla%HxXZa6t>1x9hQZd%oJYl)D#u6VMv3pEoRSNU(o9dRfvK9SB4fn)kilnm zdgaf6=AGr~wozPcXIQU6J#NxT6P33ld->AAj;J@thY4ySyPX?s#-ekYMyV(eE+yYn zQ0$TuqhobO(z4%YASeyTHBp47ZH||;X#57--J`&9#a1F6$qX)bVd#zs5^ow`Lb>N*dlJz`>CuP*pNv zWq@WlJW0gqJr)d!_uPNu;~hHe+dFCV3-d)Pi&ka(4X8-!rS*ml3<3vTp-uN6obTtq zEVn{_T*d;0RK|t%y7M#Z2DACl=y)f~uNI_%Mb(Je`n6YBR_1?sDERch#!uTWynEks zQcd(=g;(n*9~~V60s)8zO0>Xv*LGW?AFs}>t*2Z|^(XKVtln!oAW)->n9^u-P-VHn zd!0SW#Q^pH`jHCa^2Ei@fq}(e?;r(tUOuSb8H+|kcdf0ex^jAGhCI%sbRJ@k7YPU|A%I@e^AXxhAh;pW!E}PWC%@;D2M& z7yRabDc zmKuB&hUEcxD@A2`svqQj`iPvRcYk5Q-s74!{whOUqDbS@K`Ow6ymQaJG+R&&S(oe# zV_4P^CHPvRM9wfp>8XQx8!R`p#(Kl;>fAWBcdqVtda0*WIi2kXo~C~%hj?Crm3f27 z7jDZ~-sjjIOZFuzeeD>%NTFG3O8Va8)a#&UJu!tu1o#S_*z?j+kx~DAr=jcUded{nXKC<Q9*`1 z;|ZofRK9NJ;(F}V;nROga%iw=@`}^28<_ccjpJfs^cXdDHZ<>$a0fgZ_*kkxgbalX z!aBFfg_zAolghdj3BdJH%EB`R5i*@;l?eFCb0C6$3K(#ZGT=SCB58xTQp;1 z%@`ynCpY!CDgx&!e0gSRtXo?iEf<8#oy+jEp7}#Ej8qM=$j6%Ap99#?fFms1oo&{M@b4Hkt3Rlkq{4zfo!zsJ zpDsa%OM)^2iF<$Wlf^BuK*jSe?S=*1VqJgEt-wG(EBkh5s2nFqF*Ku`9{u(fJVgZM zaT|RwqY`QmN(N$|??P0ll7VXdmk$9<7`TXteE;-Cf$#ri!Cprp-(Ce4@Xu`vFK%@c zalPl9_wV~>hw*$7eHLx&32WR#uq|Ma%IA9DbDqfbovg1qFCi$hsJuiOeV6@r7kAsz zW_t1YdD+ZtRP$vI)`~_doipAhR#==JvS7HP1*D5KR{E=yX3J+UPG%%lRhLR>`ZvRW z{~iL|xv|d`Z)9}d!0y#5)unhjGw!|5Uq*2e?_7B=ymI~MeyaDBbx&zqA;EU3sQv51 zR%@N(fkN%1sd@~M@Uc5cbhR^Z7J4o6edl#YG=*MuqC>C^?QpIle8lpAT zXfu*f6lwHk50bd;L=2?rLIqie1EZz<{TUR6Ov{ttiq`gz`?{yIqIk=n=rCF9`a_De zRhs%oGkSZFT7K{YZGU59!}8s`^_7)sd$=4VTiE@uyR$Q}i-DGQNY$}8Gjkps2GF?# zXB%2uTZ>CdKtl>>IUdx0g(W-;reyImZEbto#>{LiUpbS2!|2o5r$$0xDKt`H?;Ra! z6GQL$HIuOh@*FX@cXS8{36T&Jqpn;YuBq$mPlFzdy*8%Kxw#AzC<*5@ETk$!$G_3- zz`Q$(YV=Fo;N{uDW4ymCq@YaEFlTqOnT#VWVY#A4)A@*8Y#`t0>SI$E(5@kb*yGiM z;sQp*3++86o14oel>(;Lf&ol{6pLC}CK`)Uf&>dtvl>y*Poe91+PxRQaq3fd@i4sz zsE)2SPJa4;W@n&xA`%UZRU?ACn(!}mNJ z98bZ*!kTo40M!hzOy<~QRKBC8D z0cIkv5f!gX1FfsAZSU%uZ*bw5b}`^NbU9NOzFuexV!hml_W|jwfZ_r~mVIAZrRA+H zucLL%ZL#Qy7&cgMJd4X3j1HfEwX>~F;@VR1hYL0WJX9NDR@umA3G6482CV>S70 z6LSq^*`q^_W!&8a{lx?3RrP{fNlgzcZBOR=XtwpmeF?D=BEo*-0jB17E*`F!;L5lI zm*P!QsSftsW?U`NH{Rz;6wT+k{cJiJQk|Ww8MQI??CMkcYV^sD_sV1XifiSn*UiQs zf&w;^-lYW`cgB#EztXXCxf_i#(c6GWG z>DTpTR6Zh+UI)7zCF?#oeAV9yoR?Jtk#{Le{!o_KZ@K3XuZ=`};4cfJwHnenfk`9CM+4O)`sQ`zb9;^<6rX}4gNoKrc zVK=TD!I#^=udz`%Dj2=1}IcJH%xLL5PCiW zM@}8`LYz{oSigI9pGaVARoEh8uu3hu*jY4P1_Gt?!E;b1hGoLt|D}Ot{H& zqxsy<1nslJLYb`6dsS8d0fNo9>j1=OW?3MaIeg+Nk73D=E~u459_#DZ6XN3=hrwbZ zy_Tt?Vq54}=xAwoPnVb9?4vP)7HSu8##lgl3Y zfsvb3D2Iwpb0i<-xX^vdUwox_T&tUm(p-DaaDQ8f|GDpC8VwB% z^%<<-BiI65lYUsxsR*|(mpkMFXYYfQeiFN9U3@X6$ePp?_aPsVdqa23rrdvE6G z;4Ax{U#+XZfi%FVRH=lomSba!s!F8x%uzbQUmoz;Uw+i~h`_QQ9Pb%S6Bq#$cW>_( z(5Jv*)QI^}MNdypT>MTXI5j$5YezG<432ibe*JoUd>kD1zche?6Z0>nr0VPI!S~p8 zzJ6Fuh>H5XvI1hZ+?he``{&3`CXX~iQr@aO_NTeUm?__>OFVP-RTHzj8HY?fALA?E z#TUC;bSy(L624-H7bB`Sd)q(w)R<(Hl+Q}%=K2ab9s29nFM7q48E}FV9M1+QAo9~E zOi2JX`uaA&Umd8d10A-g>S%*s00{*ewLCyO3^>)3=UJ*-VUCP~0uI;4a#0A_(+;xn z@bGYR$LB%DYJAJ@j; zpV&*kNxVVeE$b|XA?aNvQK_TxR!!(m)*67Y*f67JyU?ru*ZFHjSN zLGf)DG}p<;F=j>FKC6nCv*q_g0KqY%w8Zm+2wB2FxB6#NoCD6zq3$S}a6Y%jR>_uD zT$PAb8Its;Q$<;c$+<)x18=RYSU>UH3*4WUp`oF93Nk4h*x^5-4y3$9~N=l zCx5|k-HjB%6S7lXISpE~W6|D;c^rrNw+s!b)U7dV6htK_GdaBI0)Qw4kA)9_qj*;g zjL83$_g44!_puKVX+}=<>1`hYu<+k{T}cs2@o9lx{8rnub#L0LpE!K^gNu#V8offc zJn%i&jsMt2v{WF}R!;|GsG8Ud;T z0NbGJsiUI-B)NUT4;mcN8O! z?75tr-0Un2=mkWmNBr`dGQ7d8{cD_w6BB*2>+KOz#B$hwjCxby&LrhT*z=0B>F-l` z7S^bY)548-E49_uJ}j$U`=I}SZCrgc(`y_@ToH<^$w-`T6M4-R(n*NC#Z_@x#ClPa zS@Lo%LNe}^9h0`130IrA-HKgwMM>qQdkt+#c^M(ysi92f{XV~~Ii1t}Yv(-Y`F;0$ zp6&U3Ki}v3`F?d<;_`0mB}MO1JqB+;CScFtvWLigLt$}{QKMNf)$T%w@Fn=>L)vk9 zr18;p-IA<5ghU(x>8gdHvpZT#A{-3!X0OP3ZL+Z)hs@P|OEuaKey zALIihu_{^*{BqBGwPC*nGs;yc%ujSa3tyGG4v)$6y|De_Z7?KCOuoehU zayj+3zL?*`-VhrtB2g8Tnp-t=R?t9H+`GY*_d2A^NQ2?v?jTHQWqj2H`>%DKOrfZj z^}&-KMDLyqb^PpSb|rM`oYM^_QoE4j)kgsY_mfCPMI|aK3N&I#PEOXb2o59FN(&CM zm?3~poy-NA(_;IWvikaZ3dJ8Nm5((f$gi#}&$$;IUIqy}hJHf9Wpt>T50-;JMBD3o zr+enB1BS@^VEBnJzH!z1CT#BgCo$nnMXiPMSx@J zy9Ev}pwm5)vZN6fX-pD|6;UX0AaYG!ny+g6A83~2$~>gUMX#-zcR+E#-@}_5^byzd zpFXy^%lV-=n`r(|V0?ZbW}YZ8rF+}$zC<+Er-vPVNhRX?aS4l{6oasbl>$W8XUl=0 ziMrh{jTjOfJUlWoIZ%JI|08kI!(+FiUPYGJzc4nzl(fYYuYMH7f z&6^De9a!GJg&HZqJRTjk_!Y1_a)uL>ttctIf>b%YOx-11M8+ge6K2Q!Uh#QFiIL-O zYxT)HQ?;hO5e5t~jTV1|N<;jDECXCIQW7iV;sVNlG_tYzp64Rl9&9R-9bweHm$YzA z0jRhk_`78K+3$ncW#nSt0$UNa`-P#W-{=9R{o-Zhq#^#ZIj6qLb|w5}Y%DM3Aqypn z*i}YHa_`3q&I`CK0E-*Ag@uKIqf(tVh$_540>?GkUuW~>?F|jh6PfD0$hNuy8^*_1 zm?&js%gp15=0a2ZnZd5&ZU4^s`<@@tMyLXLzso%}|LY&mU}0UhshyG8giXA2`<`&) zScs#;sJ`4ss2Ia4qujz|6W+42_UO zdQ0@9nQov?t*e<9gl6+}uv8NyH~*{W`O#Qm~IAjTE01~rIC2bGa) z(*QSyx^n?A1|JGsXvBRzxdm39Zn(5@-bzs(yGa9@Q}USj)s$}^0)_so)}&^>9!X=) atf*$m&n)LA=7J`5avrWe`|mi1rTquRpW+e# literal 0 HcmV?d00001 diff --git a/vendor/sigs.k8s.io/cluster-api/docs/proposals/images/machine-states-preboot/Figure8.plantuml b/vendor/sigs.k8s.io/cluster-api/docs/proposals/images/machine-states-preboot/Figure8.plantuml new file mode 100644 index 0000000000..bf848760e5 --- /dev/null +++ b/vendor/sigs.k8s.io/cluster-api/docs/proposals/images/machine-states-preboot/Figure8.plantuml @@ -0,0 +1,44 @@ +@startuml +title Figure 8: User creates a machine with kubeadm bootstrapper + +' -- GROUPS START --- + +box #lightgreen +participant "API Server" +end box + +box #lightblue +participant "Cluster API Machine Controller" +end box + +box #lightgreen +participant "Workload Cluster API Server" +end box + +' -- GROUPS END --- + + +"API Server"-->>"Cluster API Machine Controller": AWSInfrastructureConfig updated + +"Cluster API Machine Controller"-> "Cluster API Machine Controller":Machine Controller Reconcile +activate "Cluster API Machine Controller" + +note over "Cluster API Machine Controller": - ✅ Machine.Status.Phase is "Provisioned" \n- ✅ Machine.Spec.InfrastructureRef -> Status.Ready is true\n- ✅ Machine.Spec.InfrastructureRef -> Status.Addresses is not empty + +loop until timeout or Node's status is Ready +"Cluster API Machine Controller"->"Workload Cluster API Server": Get Node +"Cluster API Machine Controller"<<--"Workload Cluster API Server": Response +note over "Cluster API Machine Controller":Loop conditions:\n- ✅ Node.Spec.ProviderID matches Machine.Spec.ProviderID\n- ✅ Node status is "Ready" +end + +"Cluster API Machine Controller"->"Cluster API Machine Controller": Set Machine.Status.NodeRef + +"Cluster API Machine Controller"->"Cluster API Machine Controller": Set Machine.Status.Phase to "Ready" + +"Cluster API Machine Controller"->"API Server": Update Machine +"Cluster API Machine Controller"<--"API Server": Response + +deactivate "Cluster API Machine Controller" + +hide footbox +@enduml diff --git a/vendor/sigs.k8s.io/cluster-api/docs/proposals/images/machine-states-preboot/Figure8.png b/vendor/sigs.k8s.io/cluster-api/docs/proposals/images/machine-states-preboot/Figure8.png new file mode 100644 index 0000000000000000000000000000000000000000..d63636206a8cc76fef5b1623aff4aca1f78182f4 GIT binary patch literal 32285 zcmaI8bwE^IyEm+Ypdg^qjX`%eNOyOGba%IkC=4JiE#06r3?U$cv`9&dL#IOz65krU z@8_KJe&>DvV9(y0nYGut))l|2_FHvTIUFo9tQ$9O;3&vTYu>nVD;NAdzHLv5dhH=qa?bnhM*YH&?@~)2%bzMTR@{H_`o#x@w-0ZnDkj$o zE3mX4_lq<7hqcS(RH=POyk@u?;Ub{j?k}l6yCbWj>W@EG^{V)CCpgF&CmO$42IQ3T*}bo zp)S5<)_H%3LZuC+ZjJuCDT_CSDu>p)pfR@T)$#&tcA7`8gW1=b7D@Hus z@Z0>7*}wC_D27o5kJnq@gz)j{#nu`V0Uo9A0_=jc@lKayo9m&melDSn_i0gf8yiPV zV?F6l^paWn%$qS|kAJF@EM^9$SYuQc3euu)-sNDQd(6X7&-R&_{;5EG{xNP{c(qXX z36WD8rAPlhwCP~vf#T1HCU;G7yn`rY2+h&zB6Nust+e=gl)K;El{5WeRcLKqPf)|d zB<1O?D!lWZw6uZtP^di(`bf7;+p(DgI^=cahNIAAn@?DlFm&~)%fm0r)VEHnlr>Ry z#iiPG_owjmWz+y~%2GyyuPk5ZtM;G!Q$n=D{c)K`v==`4ro<(ZKjiITm9hs6E)?4) zBocPmcJ;YPPJ6zDNlcP?ADS;G(+k!e*t&sJ!La^)fz|DOA)-*OVq8Bqwru(`Lc` zi5w~`^YIDa_75oMTO0{a*yRj9uaD*4ZC)~7GW?j^JZG1oR{M+F&_-yZuUzx~h3Mj1 z_Vje~blP+pa(jDt_x%Mi#>-A~j`y7Dq)|7nPdTWrKS)UMQ2!vg2Z_A?z<_n5t}xKx z7XR(zWjF$L^|JG`-}Q&Nxd`gYoZ~;E&^@_$P9Cd3jibOjfS0YM&RHJ&Uy;J3Be8n}uE? z>1TtEHVK_)W@eb2avVsnelK;q%(wXa`>%}TEY8ojS@y<8;!&?k_7`X|Io^VLZD{^E z-VqfM(chq@r{B`#@?0IveMdw@L}0PE&{oQ$z1Pd2&>B>An=U}TG|%+YbD2-idl+WE zeNz|umBgqOMZoyn)|U9e1NW7FLR$C!RDDB3vSgA8EL+<(o1ygiwm`kINri~mX_w`_?LX4XB+f0gd`*k@?nE9oM}%LCPt`3&qq5u!})9n-{*d8 z*1#_G$k%%I)(q;yD*lnEK6*Nx*Ks-UM#P#4?+RDxe620sMb_ioW7D<(?-}hHy}I*4 zrq;neP3H{Fl_xn5OjM(~R52it8(&UeV&WxaFip30u!@Knf1?^48d?#~0wh#tM^2fA za*jwv;;sy&R)a}5x-Nzwe#xY=ndp<>S5cU4aGn;pjO&Prfxo>u-E(SK?+_n2kitqz zO4@P0KX7)mkZmhFKj zE`S5-9DjZe*PKp@Q>2?Jqw`iD)r^Ua4Y@oQJW_P3)hXBQxHz7DjYF6xtw^Q+jdo=sR;k7jIgj1_oxe!s)M%4Lj)cvqN!Ezehp+PPeW4oEq(U#Z6Qq$QJpHtQYsSb{Rn((yi{s8UoNgj zrR9?FRe_j%`OJurI(GZ5eu&4jL1%}I&KS3Ezoko%{Y}WMyIlNHvrw_xtYerB zR$Fe_Pe7i@s8(z@iLTq|VuE!r@4Ki#tGtzvi=hS$3JNNyVWdu0uCi8kQy--;f%?SE zxh}S^xQ3km4*x3slBC|fjP(;fz3T71b$ufvr3oRnr#g>9BPUSVP3 z@&(qZH;BE(jy24WwY6Gp4fmzH46ZIO>|IZG=R+@!robivX7AHV71NYyxrYLKlzmI+$6~M9d%MHOzGEMeUx=>I`Oh!+pjc{ z)-J=rC`Vh?OOR1fh?IjW1rAyBJsXcM&B_@m%~6w^7OxJ#r4qtE-9 zlT)YK0g$tR!}X<|ogrQh7ap1%Raz}?{d*l(m(Z?`0qRe(F=TbVOI;B_dd9a`1`NcQ z(u|qKOM+sp_^f}8mwZ_!Whd0k-;|sDAc5xNnGDN!zT)$s5b}B>LeA?@z{lAij1DDY zHI#n&^LwGv0eeRh4}Vo@4LQlJOgjxIy6BB)U8dUo3@!+9f$lT+hSZu5R@IvlfRVej z@*9p6tff;EUm(NGgIqYW=gZezxUqI8v_j=vTzEY9mlf(EwvCskzBJa5*Xmply;+R& z^K*vJG35LLIgV3R@oGTxp+7Qf59*_$CSp*3rXZBe)A9AIyw8}<(&9?o*IoXaJh9MA z5pj$c;WT^^5f$l@6}@I3&BNCp zX{$vNLFjvHtOppF%DVYo7ql1V3S6)qO`t!zosfY?hTNu0Z2=8>o2G5%)egu3Z9iIi zrS>4NX1(gbYeuvd$?%lh$*hXdS)2Q`roe48_+e`do0#oYL`13eIpDL>^2hz(z0q)_ zcNaUIUWFE9A(4$@H9&?o%&)FFx}KtYX7?c%$D&F5IYktoigp*jtaAmY)zwT zQ_+xnGEoGNPJsO)l%-8{4DX}Rc7zRlDb;vu-|Gjsmb5VZBFp^ua$jPxS`HV}yZIJx zrNjGW#Ztns#7g7l1l+aNRiEQ+gNUIRl~{O@@6KX}S?^4}?Fd!9=Wd-%tG2^*h0vwx zgrKixpw56lLonmv0&t(x)Oew*ZVsz5N($Vj|fvy|K}EBwG|o{UctXxNB98(jAC% zX5&p1Q=})##6e+~b4G^lc5s8i|W7D}r!^CBKpO-*BC zwNCjYpsqR{3QmFl{;#O&{|B!AcZfFj2Ek~P{D_p)8t1cp#J8uTnlzS`$C9v-Ku)}e z$q%?g4vx%dzWz-51CisuLrU@XGXPAGNYs&w7RD~0(`+?Mk~r}quvR%9QS4G!IgKq5 zP%>2Px`78*X$^dJFIxH+r1*yl(!MtV*uQs(@x)&e#05EuMo_&_64zhzPptHxCRm;y zHP_})DRMHJ+ZR_1^FZrvC z0xB*l*4tcn9)=?x(+az9uDJ)oWhto$L+L`kPbiG-sbRt&G2mq^&wcW=b)Mt&-D;-| z3_0Kb-Ry1a5{9@hj}CJ*sKJ*#Axq(MI&D~bf(!Z3rP7*w2PUOX#n)%hO{Zqq?6Wyj zH=W^&zuF-fLJEm|_L#$TAR&v0pXRvyhee-3gwm9{*Uj61e<3JEr)=En@oRtLl|R|g zm^z@--|w67JYPOdhSy^8TkFp)G`%{wYc7JmVv%Uf7xcYOX5h{NPkK(hdv6UIeW%}@ zr?=qsEYst!rWMK)V3icawL-7iKKvT_@)B`BsZqtFbDtfbm^G)#iV$ZdbsPEM`WaLZ zu++c%qWzr1)CiW5mX@pHpeF`1^fGSu8lfR!*Lgi{FrGT=Sxdy1cAMHD`ef@9e15!CEKE3HPJJT+b0SMR7t z!SNwW`IFDa_k!R*XL?mmMZeW@sN@yWG|F{vbC}|hs?nDi*4Kyz@Dx)qJ%KMr6N_)p zHGkB|-wc=-uPszfuyy;mm@CDM%*Mp4q8Z6F58_k2qzFeecb|r%%>r!#nWYeEdKnkj z`$1wyFy}V5>^}cdCZcHU_Ysb)|KMp($c%bhfS_*E+Hj$KRs$xU4-^XhN>E-|$*V(F zsvL4MZ!F~OMX1~AH>!|`(Ha_>9v#It*V;NaG&E#lOwm-%{jfPJi*o%QHnIpGW$bx5;YQoytk=AWHrym~;J@$Fn z&X2Z?3>&MIc!xT3%?DDvLoSYJI=<_cs3$yB6BRxZHqP9dq+%;gf~zCcoL+YHmC^P3pYr75LRbUdJdtZ>(xQ78V%8xhTVD`zQKnUN zZ#l0_tN43cAoS~eojuNGc2j7jiEyF+fnom@E{xeK$jR4`gG%5GI{i5!&A*en&@fZ9$S*HWx_RjS zoFAkZU+r!f_{}yTt4%2jzl?nii%xtz*AXg+6c1`D)+&~z6z0=r6)NT6vd%lFa9+ri zHo4n#wZHP8yLoQ~%5-{sbcSTUnHdOMvno>*@;872|LG$vu z)UphF#iiz?SfZk~Lit@X`#KzoLO;owVO>+LHAW>wXsU(hNHPNJn}K@ZfGQL}HO4!^ z|GV8wxN#&dj;5xKe8d$z?#Xx|m+wTGb%|sY%>USFzr%aGFQjEz?>}S!?odiPIW^Mh zFrULeulSjmYPi%1ojrr+E<>@fFWXbh@CJk0^kus_ffq+gw!=@gh^q?@Pj7eiyDfET zNY5EHHk39{rj-<2R-8Rtd)NHmc10?Zc4e4`#dlO31}dKUzpJQ@Pba8Y3d4!8-DKuex=#`!p?cxB8i2G~@9uS6 zC5(hz8fi}V_+4*~9l=)|_a9jb6Mr_#B8f67CFHLc70;QyytUh|SuDfqhxd7-lwYS0*ZUMMm5nlK^_t=j`w-fi~LxlII7L z;dl&&QJLqimqn{`8iTl820uOW)K49&Sx~p^0IrS4Dj@vgo#HR;1Z=ovP>xuoVME1O zo{aNwBo{gUShMTmBw2E{dE#(IW8OO5mpZrQZkgquiY2N?j2y(C% z_wy>EMyj{aWIP;fvt$E@X_0bL&Xb2YbmtdscIQuzvNOu!er}h%)+R4bFeagfPk;0q z&J6A72XQv_e%?@A%>Q>kVD0>wmB>zd;%o{E@tPMnO9#)e8WJe?iRLTqgmZ zPNP%32pAm%Pt$iaq@z2gbj5T&s1a?0bj`>5#n*AhCRurdam!GD_Vq&g|DIe zi4HFxqIFDfl4a!p$$@%P$E47)nW?lf;bhY=jL7K3Gdtqzs|k3L1!uXXvUHL`KbLi8O^zS)=r~D-)ks_||6b80k0Vcq6(Raca|x4XM1g}s z@<9eX_2?C%Xj)VfL;CR%q}UNNDvRwgSl(!-hl*ujL*G#MUFoz!7A+X#AK)4A^TNgi z-UeuraSY8gy%f@w7pC8B*%1D9Wj19`uo-?HOi#L+an<&v*RRc!3;RbCEO-HKc60zz4OTvg8a8Z zZTouy1G-LIH>q&6{b$R`^StQe17K^(hfJJBFh2&PS2aT$zlJFG7|V~r%qI&3F@1iik@joi7OT~0YI`GW_DoyNuI6xGtS z%-U3++;n!)9{w{jKL(6HC!9IzXt<4CXxy6m=c3(^#8J-idUcOD#7=llkjBK~sgvA>W9-do zwi~jA(9rp;=Hi7k2N#QMKjwI6ACu|iN8$opO%79KmTGY-0SVGy_l|WM!!~bXk>;2< zO&Y=2XAbi7qa7RL;i=R@DzCD$Tk;hz=w9>wz75ERQWiKN!zMXNVy4z@UzhM#$LQK^DHEIYt-X>-Ke(~odG9qI7$B)yq zvnmkixrnEZbI`u!o3Bh2JV4&Of#t9_)@ z?+&XmC|ii?d_Tyye2K-sUHmx_gfB08It$$-FneWlj^x|Ca*&Dn7R9^E3hq8JfB1(Z zt^^f{8J1o3zKXS)=2yp=>8g-0?re6)O35}69u;ZJG&qb9MrhZ)_r!!rp$==OMntsv ziay3a;wiF-ys=8DyF?PR>87ExYR#MxbAU?f;;2NQXIS?q3Hts1Ios%pN*zhO#0b7b zt_?1Zjd8QGvOakNzr6U<7I2jCm{XjIQB^ICtI%g>=5H>e=UcAS3R*c=cIcaef`S(B zt%bJ0)Vwl_)$bK@W|Q_0M^d(h*=BMYC^;&W>&ckg#g&*SP1|-pli;=`=aaFCx+Sdt zuyAv7Ln@Y)tj;-~;jx-=d#o&Z7Sv2Q+V}e@e(t9Zc^sdA@nLDOV;&rGXd&2pMtRF$ zCNlUkJD|sh-Ys-*9A*`GE_a{cOP0&B%!fWdDs|+|Xk3kwWHLmFx)e&%N2X`)Uugw( zcHOoW&F&GkixVdv@tN2*sql}hgRXE8OtpUbne(94I=N?{QZksow{3~YpipU#Ie_y;de5tc6OCbT87@Y@;+7kX}Q9?~in6c?8dHj;E_vG5x)QH`ZXD?4w6<*Y zkCdyKz|Tv+oJKNh4u)=oCzkcSiy}f<46u}u&S_F0q`&AU5!55ThI4P5^YQbs1&G&c zT9tHv_GDFH>o;c3Bw&-Oi+-5Zo2OfRpLxi`cxfC|zdNzs(ZZd5+GahF0%D}XzPoiU z^Xap*xl)nLdR107HV@&VqM~WV<>gx-_+6M+X4-c4e-@DIqY(p?-7`AsWYS)JDYr=)+E! zMW%S+J8U&l(&lyM{+@*3UPj`~j<#BCOjpej&ct=I<-~xE%S>m?raV9(&)v^{cfjG4 zBFXo??_x~w?d-P1z(22$ZtVR=h6bvwT=8=}bM*X3w?~*E{2!IaxO`%L3`XF=CXdD9 z-Ury0?LrotJ?U{`D=R8MI**7&|MSlLM+-FM*l!^gmhGRNFXxi@<>cfTZ;Y9@DJC;H zF^*(VzFzF^2m%Q?3tpy0&$@x`imDtXv(Bo`uaUY~1S^n4~tU#h2UKgyj z#bB#jE3x9$*~dhzROZRz9j@vMRSX4r7kb3IFaN@9I>VY%)3(-CC()m)E0!g$C1y=I znoSZ82M_GGj$hVa18{(Iate#)1FD4rLZ&;la&?}hd+%9(Hp>(-PhAS1}s#YPma6i*1a<&Px~#rZ((9R?YOUxt_OWBB#Z`$q591=8)^{1ruOxWiuHmlCSxUf^i9ap5;Bt-sDqi z;1|AYpZ&T^a7cQk6k!O)o0a@A2Jt@2Obc;;AgPAm$@YdN zu2CuZU3h8e3DzA&=<)iZ+Lm$P@(OhcWwK}b3bqerm1ea&F!MpWx|f9muwk?Kj1%&j z2!zd*D;pb|N;*%o{{bEBUy!S=%9(907_i|mY`dIS{Z$T+w8L&BE|#roUN+V&WZes3 z=P@l`?b5|B_en=6MsmdC;^L|e>zV$8kXH!ckt0S_#_bQ+@U~s4eG+$MBP)PD@R^!RLQhfR8%r%H_zggeb?Z5HZu`P z83}P?-Zc5P@Tf-T&$4HuPNHGH{eY^JT-4ghe}_IFRMNWB5D@Ii;BjBhDg4pK9Z3v+_+&ZI`&IhtdB<=V;lUau>pgiLu5X<2bW+p5N1XJQDGGxfdVO z(pN3iEuGKT7gzJ2Rmf@2st)G38I^g|_XNgOig{PsH!c`E{ewhk<}KV^@W01tMf ziP?pGcPa4kyTe=mVyy(y-mBwTS2!%D+_>Lu0Z$({)=YiZW`M1=aohabW&d$ibhL0bAKD*o)qfc8y)PVrF(SrvM7DN72 z25AF8<2b~Gb%xIDSZ)w!%gGN=%weBUV;>%Sw_gk0>0+&U>@M*wlw-3%vVgfo!TlPe zKPxhh@N|s9=l3D9T+MXeYh`VsxXS%9yS+!c7cs-xF3)*Yh^g97UoShBlG6j3^rB!7 z5ICMQVVdsmMbIiH{lxgCi8pWUGv5)q*xd~WR-H@ZHU@?RPb_g^Q+Y>uf_tk`*zD;6 zd~`+fuQDQs7oJmMLt~5&32}Dq@0k>a;173 z0ccgcHk3h3NazjV)!^VD;N%9^Mbjs+M39ZPnXa+s(x9cId;Na8_ljU7G|AM}uPrpE zcj{gRwUJpiLIq|m8b>Uh*MFL#Wu#e`uLe;KNZ{FD|N1e-Vm;&^m%2OKlYZ~o99kKz z)-nN2HPytJc?BCdF*>>E>glfi(1zx&CkHDw36_s1{?|D6X(+WnP$3K#1YXmv<6~q+ z#VyGJ_r2a?V|mxP=HmQl=C>4cSw5xR-qdN&;^hk!ot@78kFbeY?%?8r>^LqC4k!gE zFM=%A+NvSgTpKi9oSSAk(PZ(Rxn zV_c8a1@%P5I&!&DlexBKs418e{?|+%wSC@H^W`6!`G_26y`CEisba!oULox5Bl&$u zP@rPw42`506~6l$J8?br+_n2DhS(a{q^xb1vejYT$q;SQySB&Iww+_)Hm)!eF}m=CCXL{r-^*Se5*d zL&|fi>fo$Kz~8H!8cAVh;WrxioKmz?mJGyZafbVyV0Q%@>6oPh3-xTy z>ka*jD6Y!{{zisSjDQ;8-v#~~2+gpq?ag-|7{lc(Jxts967%n$l*Xo7f|S z?yv3M+Hf@hnQUx99RV0}_>^UEWU%Xexl|NB@?{No6~)2C?bbH|Yof29!>@YIlZ?AW z{%*sa7axqD2NA8>E>o?d^ko>StBs9KKV{@wTWV`uQUw;yrFtuj zIqx%70n)L?hI`_m{+aW{b2lN;$j^`9{?22i6qfVLiw?16TwI^gs;$9M+!N$M?h{$< zyLa4b>W9{rt;SmYe`bqP@(8*3(2i_h4b8VbOJys}5kX42`gbRMp;cyDk##en{mO`L z-+wySDT0!r>8stjOhNC`y@Mlf@DOJ0-#=;!dDjwCCZmE5-yx2+h=HHAUGEsY(E9db zCAHIQLbJBW*OW6`aCe^kTak)eZ|ozCFMgv*FA?whg3;s*$yvJKdN_miX~*|Ts&B%% zx#hMO7vo>IW{!-P)|bF*YigRjHath!Ji1>KO_ce&i!Ck5Xlmv)x9@LGEL0jMyZJP@ zyM2s}#SJ(=&52)x`np^8>K-6I`Fho53)bQi&;IH;3J%_JcG}kvSsOXg-gzT~rd|W` z#u9puc%JP|vL42XKi5uF?Dh?5*Or^ux7Wp?`+2jm*;XymV`YvpB58PKfV|&~StLl{ zYfq1^n6IWaLjVn}Os>7AOf8E%a(g=7)HGsnsP80I^Nsbf<9Dk1;l4Sd8w#ziszFYR4V5BYZ>I)i7O0e^^Nyq)O2uXDuq_K{NUQ+Ao^z!2C zQg^q#zKyf9|8H}8q*0bqDxcjXOV~V~Swlsxwi$BPRm#4kckc31b?#j>_3Vf^-t*m5 zc1yeE%6`Am8?=f|#?9X|$j!c&s2;9=dCO%zXZKmJ;=QG^vbBpo`&jM4Sj9WU!ou;v zs?2=btl7?m#f#^Ej%gM4n-dk|*29wsBK8ZI)Q~%~SF(~lfr#hqt3|cOtxZn^*~~8I zdV4SDp#na{y(1&x7c!`g9TliAd3Y!nA`&K)gb&I2_*`;Z?-QpzCnzA27s%nbxb%8Q z2pIttaxZ3wGc&WCNoq@l8)|Fws62aP8ytj#$pS}doOWhx4hLNFJUDX%Mk`e*&FJaL zeFR!wn_r^Oh+k=QXX{dmDc=6zwZB(rr2l?EJg&(tS8uASkevURI7XLg2#YjyQ`?u! zprz>t;1uV@1>K_G^K;um-dIQ=i~A203*`=G1;bpMoPBPz_}n8?h1F=ek~@977cwb{l$_T#uY zqU44p>LIH(YVm>!g$DN_b`ofEt3|m5G|pdID~)F5+aMpTRC8vy2t?l{b$u8I>eHzBnl+yF05J~`h35?Z#z~LX9eV$uFYYFXZfhRkGhi>=d*I^_>Dua^5j;G|FMw}`1QY$Qz! zltYVeLKj=#iuzXBLVaj;y2mRvvNGp>EpcFvFP+_K z|16<)OPDs?2h>+(^Rq`CGb!I|)MHXrDgOdWZ8oRaefAb^JiL7ao||cYeulGU8zg8s zm}>cavggY#L`5Ly2C#eqou{?7fk|vTiZ5BoepwCZIW{;uNX2zz@sVJ_@6c6_q zFDV16Dl^l`W+Zkz9+aCrVc362Yg|#u%6PFDGHb{?xX?zKm;Z9zjpy?Cz;k`ELnH>$ zj5LUiopcyuxg(peKo(cy|1+9+YVCZg(or>({tBqblbx*+VR+&9Cjm5RdFn9+Or=6x zmm}hlcoh1b^P-NpB7&!Q)+y|+b`}<@XaNy#myQ*CO6`+BC>4qYca@MeHBS${*Z9k7 zrKP3kvquQqUnumq*!+?PgeOp^jsueS(bK4|5#Y$>-kb!mu4e$_T;l?eW4!~J;!Ybz zFL@#-go!R-(M61fzH=K@j;LkZia4^KC=Ya=8GqRJ2NaK4MiAGJqr#a)K-MN4y0Dbcz$_5}^yWN) z@ORc*8`O;zP55}$=6fCQnPtDDp{}(1RoK3epKjjCRsSNwAi4S}bITm7I8*6fcS(^M zvcLI-lsw^st37tpd+?N~2RPLMUW^)#ympPF6pQS~!hmYtReq}|u7|H{M8@U)%*$f# z&7ErRt#nVsV)NMISnUTII96hrcZ}xu8`q$EBoLOdJyYYo9C3vvn zc_?9Rp&O%{ptIwN)KED3su^)5(J6O=9{G;xG28iAu@nqas#^+b%zrB${@Lr+t#mMk zHpE3ovvThJ;6(J7MwZ`YX2B^ve8UETdfGyNniD&CvD~8#>v&x~sW4F@%V-~l0#C3t zQMihavw=%nA~tb=`&O2`40F049iHK$%4;bQHsvU?BJJ(n5pRJe^>wRaEc9gS(syD_$v6sQaTd% z(JQw!tWu8B^ht&oGMeigYEX8Yux+~IfTqZt6B-%4JKV3-P6odk8;-Q#F;vj~I9EyY zaeT8eZ$q!iqv|JWm3TgDs-cR%qlvo-UZ;jqEfy--V`4hy9lge3 zg(!i_EpJLOk-4cUi8|#ZdL1ZPp$r;uhXDE{aYa3p#HDRC;3z7FT+c+zaP*5~k~t~N zu`Z#)yxB-XTeEyIs>p2VFKflq=!0s(6c+mrBCSbptZHp!zAI=y)OWpRD9w#{0}|hh zv(>uh=Q2-_$0a_z7CR0}34}2yh>l{V)tQ>P)_@t13nSrwm=LAQg8+p&fQtxOe3{=U zIvg2|9x@vX>bc!1ef$u87HDt%_t5>{(~sLsRP>$g?SiGv>Ekwj|72h zfePb{9U!Rp+-}13x{FJy@H{H%q(wkIN?b)vtmzy54#sqz%(?0E=&;idAW*r5@+c zu1Zf-_H4t&pSO0SE6=UZ2~#@k5~M|Wqa>i)O1!gE)s~<%b$v-AY+oMNr4&1@pGIqu zh_!5QZS9?Shqb)?5-5Rknka+ays5hVCY3#uZS4DPIqe!mjU~*h&c+rOWYiPfi=@h z<%?X+LMD~S_ae)uXrTHk*I46Em+=e>|5-e7aex zac{=jOhI(HQJ56QTp`X?uh;irqG}=bxEY6_k*&7MV=xnW@>o0*CY} zR;YsxoKEFjhkB&iaANu8p~ol9{~{!fVOnw|fm1A%Npc{SS#j*%d(H{zXp`tOy0Kkp zqn6DFD^r`O{Qmzrs8Q~V$W6*^RwF~biglz!K(jt$Xv7M8`cnt=>B6yJHHOfJc+oET+avV>dXuqEE*Hh7s#UO%aV6&vt= zK^&{eNqxX|mf!o%i{AdTwdD6eA7LZt>qoMF8? zG2|&+ETASH!Q88LF%UcyM}8Hk6982##v* z7p(Q+Yw#;&v3fU;Bj_^KK!E3z;%_NYl!v|t(dx!6?i1cZh13a{T~hTInSpW+)wJu3y!+&4eAWV%))`ZQwHL*$+z=_k$bkAPKOXc%7XisW5a=2J* zyp?wygXG&{P@}z7(v_Hgj8v2igr4lq27mq;Zu#*4GO-24FCZWx)+;lWpgp^u$K~Qm zq0>G$Wwq9i2^nQD)0!&!XCz_oy@}Gj&zA_T|9vFY4slAW+i*`~?U8kl1SaDyBzl>D zb2U(j<>0;U8Y3&+6+0~Lk& zCuik;9iu>*&cA6MOt=Ik*irzft|MUoL}&inIjZ0XRrYgz3&m}6v>5Q4_pUD=0c|nP z^~w6#QP{T58-rX!6rDAb%O$7t_-thG3u2BjFfjjMpcAqiU|APJJ(?B1FV(c^g@15* zGDup-?zE$Wl0O0s4|%)AfFjQsrkTrShpX;KK{)n{UuOVZF)*P*?~%ku@$L`Egx_7h}&8JOSSomsidtZ`@B zvEE^VOB=S8K*CBgvg=183P(2jdR3dA`w_s>Od|P=VdQ3am;Gax@!z$1Q>A#~pcnDN zK|iPO_K#1Xz<%^}3G{Wbo3*3ERnigD6KPBqt%GLw2qak=9z2mPklG5TDT2)i;`1%SB6USaQz6R8 z$e4xt?tS*F9BXi#V)JJ?}j5>b|erLq`s2@3jyHrRjLOHwV*57tIN*X{!X0xL&H zauN~&=NX=T4QO=FN2|UBkjMwEnqv#dgS9O|cp=8G^Vtk4Dk^e5r~7_2?;Ls&ikDr3 z!^2g|Xk9AdF5?)Td~#IN+8$Li{;Z0j7<}*Ih1=gz-5p3}PXsl*O3QTA^-%qL#j07)GnUO8)je>3^Q;ScEWe2b@s+xy z7ry{r6Aw9OKpSwf5TRVD?znuu3wj-YcYm{cFFsk<&|nkl$a2Ks{#(%J>S|!A>%KEnfBsvObQx0R{dI9!oBFvgqB?h-j9)B{D7=Kpcoy2ZN zb+TyzG!MH~YfKDIbBESYb5M& zA8&8kUa*KVib03k{0H!r0XL9y+2#uaO?>?{w&E9uT3%i}&?8DBq9Tqk8ic8->GozK zo5Au(k=;hA%1fefP#E|cm$IL_S_xE<{gNS7OG!$S-UWqRW3Hf&o&*o2-5$X&E-nf& zwz!}k&J@+WKXn3yq9XYK2SK0rV9_4dH#XAq^B-$=X+(rR0G%H585G@-c*Kz6LYI$9 z)^hJKR!%lo2Hvgz+Sn*vHVpH z6_39KZ;qEb__~k4t3jve#n}-C4%xTWt_bWS%-?0A^Ps$Hety2N$gZ@-b~v*SqaAB) zj`1qG1bwwNT5XLCp&$LhYxFjaw`3D=hmHlC}`*0HIy#WUuJ$e!(;bAfRi(Rj@NNAi4m*x z%avBamGD|DDft(zQ2NkEx4=6f=!DjGLHj?Yun!B)G~Hx4yCh8b{odUU%}|2lrYam_Iwg8z&%<;RxBUem8)5L%o3l1lYHR zZvqGpS}40AF^HCtW^P<&{f{&^@W9(EWX(}A0NXXFCzsM@*Y%q!P>k{S4HqbT`SRcAX3UwL1c?3>!a|at*I-W z#!NE*%gaA|5)u+#qaXUWK`qrIsn>pfeoatl;!D1?P#c?7FuBaEtiDjtj%w^TivAg_ zALZ$Ccv)MCkL%No06%T`&h_p~i$y=cL>hvAbbE)v*jhFUTC_00@+wu@SegBn8Z$d*xWi(}PvMZaA zy;m|bLUPPgB;lY!h(i%VMrOuwvW~s?j$`j*kK_2?&*6RF-|zqV_3=@i^E~%+&+Go) z*Y&-=kC?>F`=}Ki#*M=v_clW&ZodZJ^l^azy~p_dx7fUR6~mq-|)(kON!KX;N;O z`?Nla%Y>D#)}?iM*2MPGF-XM(8l(&}i0=qAIVzho;Gk|Gr(3wXC02-wl{M&P`xpFp zP4MDikrg2;D+|x!s*bSYX|p&vE&OspcgfxB>C=f{PS|KT;A{DvCTAenQ{A^e0pMZw zz8@Ub*!A;p%0hB39#+-c99iT(S3a(<>K}&txIz2*SUumDNY>nlW8H~AYqXrYaGAd6 zFwzHUzev^K@5G)-pB@#FPPzOeQ{)E}dIdo}AOsVA++3o$@U+&(kAZmf?w!)lcr$Tm zNkjE$0%Rr(8NYS+Tc>%nH$DV-@OI^ zC|KUy(G?^$v2&I|)iKvH` zJP4&i9rO4604dy1-3~Qxpmuv+s6Pfjs1GOET(9@x9ibvI3Ae>T3W73Sj(B8)kaf{*RFZ(@48O;tWNwz{v&2^aL}{D z_Y@_A*m-Fn?-2=2JdvQ{--I_xYXEB7PqZ$ZOnO zEe?PVz|7W`maZ-u^=ZThmU`h0balB8@~m1r&JWep+~($f-kW7)cnh$3%^jTk`uezq zgr)a)D9>!iCK9{kZ?n@1vjn}owXy;ONX0<}H{(8C4_uzH0JW!=+7~+4f z!y6i<=w~~z%JRAbvZ-Jf>E?_70T3WS^*3wF+}D7{(7PdkifOov?MBPR$pDL-v{KOh z;#HDJJsch*3QuZqEP_C{qXmda67 zSectqihrHM2pz4IrU}}E`7xJTQD(ahJ9a4bbE!kzR_>!s-^{bFDwuMan!Lp%ZYcu3d^6ncacpbf%h zi70Q?iTPiq0nsJly-ns1)_1{EF$4OOzD)9t8`jfm+yIh1tKX(%lm*d{+0@*n-HD77 zO;w0bIAIZBVFH-sTj}5~v(vI~-J#)dnT2RAzD(j)V*AD>U3}fS4Y=fbKr;h?wXv>_ zd;BjpqMcx>dqb;QQkX+(Mra$#{*D!VL1cA6EC>FNqjE+@eMoLN)2xzIE$9Yds(>fx z<|*&4?o)t+eE6Ap3-VJUKGEO}letBilPQs8Q6`$ohD|mN3qE#+*BW!R*&1w$)8tt4 z&%Wt_q+QqR>S!sOK*vBvkjdP%ERMYr-j{PK8q9+dUc$zFUxgo=2aM{u8`ph`frzbC z`4r`h#VO0_1-ni(d`HOfi5Avti_-ywT_hwnL41nOsz>eW*gv6u{{R|ccscW0YV_U) zSyIyD?b*&}WJ}$#xJU0rl_~QNWBc{tFF8y`&i9@_|BX`T7|;e0eYrpj=pLXsDhOuU z-pu)^c!^h4(oVq9XSuU=k-5ievUc=VPea8sfs5k2ucsUHs?9FFOm2W2|4xBN_ zKU3yiItqe+w{A&mW=z%Kfl1CI`MHwLPV+QiuxtH(eEca5jig!iKP^atxJLwF@5(dM zqJhGgUlq~Q=DxEOcQ-%ivP&84Cyh&ITOrIz&K(}~W)1!PE6sFk6dHX=NKxC9DQIOn zfs|*fF^Z?Ka3?zU#7Y(5;-`?VZWbm8am_6a6J~U;%fo38HhMt>nHikWfl8DH7<%BM zs3sDhvU#w85wX8B{e0>UseQwq#;rtxx>LINd#dzPW08Euog_$+ZX{-lld}-GnFouC zWVX&iAvd1{pPf~UD-FcWJmD&KTPaGy%Hb@NNGP)|#K2R&b?Ohri?N4XTKTcD&%{Eb zSVFdz^!&bBBo6qShh&b8nskyBIGTLS z`U@~lZeu(>sH@jtXF7uQwpduIx50tKZlr7nD98Zlb_g>xs$j2wmiCj7YCm^t{p_zo zK`?SqgF8S*PIkG9ygx^$i}OM*QX_}skpCw9By;+4O4#DQEm9exbkZ_kFAO)+PmxMb-jsc6#se>MJjJ9#nmC7=D=c?6LR_ z@{il~mu|_v$TR@)wq8`^+%SX0wTba3jXOX^V0KdLowt;ZK&)y41&d+dX0J)Wb2u}j zBJIv|CtLk9zJ7|{842FhD}WLcXld1+xa;FXv9o6I_6-#{9JPXw=Oe62;qRr%#)G(u z3?0uRTP@#FL+EaWst8)GL=c#M5LvlX-6!L~}_tUlq*%Lw)*|T0RR)yCJW% zMuyGmwtt%1t{PunvZXJMyQ#SGHpXpHzXUJ|OyI*2z-g{y^-9vGETc(4)QGyRZEkV! zUbkag5GT~yg&%q!K71%Ia@a^>2-x)nGA3yZKh3Jf?FUaEtW;TL8`jiK zgwRIv_-{QWkwGAuYtTNVcFZkUZn0rnajGLe$0P|{f^1DOe<+v9h@WsM z!{Ydo@t@M=fx8J|Qv3SPbI3w0BM?}b-XQX`&OI*_Tk@==$Syau-u`F(bdTHLNV8iS z=+07%OkB9II@>vX>YzLSeAjB=L06v3w7NcD{Kd^~9p!il`i~!Yd=H|vb6#Z=s|L5U zpkAANBLC<|Bv-;Iw{SgsTLHy*eN2+&l=Y!e;p8b-y9x9kJg(hq3*;<o6TTY-HvCrPe1DdaLN`hG|03bs9jkno<;r$BbEc$d6&djzttqH*{ks2j zb8sRmomkz?Z(Jsr$8d4L&`axYJWHqgLs{vWOqB&|jCQ7?JjWk4eGhdfMGHk+9+OG# zaA*o^G{U-?u4Tpm)9kY39Eh`O-Mniw5SfLW-8Bl{vbDZ1i}D*tHcIXZYHqsrU|%Kh zB4w2=a}?H-NkYy(NMW{cyiUU&jRrK5zP=Sev}02!?3yaHx3{jgw6GY%hHvB;+8#G) z?+6dlVAW6$-eI)@+H3#zDhNr@jv%YH=5Q#nGXj@D_?)=iLy2)`3=m@ZiofynOm(zWvz@BShN;h_`mfD*jtQ}^w+Lsr*C6aU!X;H9A% z1O_A9Xg-__NcX^nak@T*VkEc`x&ZaeNr>X`Z=rl6bZ>#Pglq{t4kGU<`(Og@27)8c@ zE;+z^m>!B-9iC4N|1QJg*quD6jLK80k#98jZ(6#F)xnA_5ND!L5D^XLv%o$dqceMJT9@AE5TW98)(Lkpxw#3)4T$B`_KMuisw>J_=E zXLaN%bmSaAh;aXMMC*g3oP5czk z7x@uXi(8eZSAgTbDo#`Y9Jhah@EVZSg_HQr+O1(Rjmt(krWL>or}IPib*`^Pu-d4M zIPHcxz{p4Lg|A3aw@}5W-~`^Ksa<7S(jRfhXktqxhS#F!hu$0ah|7=Ml8l3QIp-?A zGkUJiTzCbEBs@&Yz2`bdb@{9jnd6&^7kRkr8xvc1ZruXfK<2X@$sppmB#WZnFzWJC zliXbB=Q~CT@UOot4O;Wsi_2c1mL%NK4ubu~xp?C%wIG=d4w5ha`=-@NrK6k3{j%U` z{vaoJ6nYYf7Ef+*8vc3?f-xXGC8byKLTIkw`*I=!V)rKkaSq%B`jVEtHHEy>*y|;g zWy4!=F7|Rji;@!kZW8>!DV|M-1xV&ug=w+1eN)h#qrfWAJ*=#%Qq$({v}3yaWw=x zf5;OUn8ry3)SRPd5Oo;86%o1AFnLEIpfj1|wLF`_y7cv#yD_$0yX{g~FSpQawE#Kt zQ9&<=CQzGsEwx}m(#*0g!BWXE|5^ienuS*afy85zal5AO=Uukp$VTOl6sbz2CqSe= z&6aBelwtL^)`*#eg>KUvZRYKRzxezJ{DalEGrrh;(8stf4{zm-?C*xs2IqDd6%XB- zeO~f{31ka=Oy5*z6zBKBsFsKXa;qlo{6aZNd;D{MSAosrLU|5s>Bej?U<3GI?|l97 zLWfgGK_rfo{>({>;BV=uE;-7O1RM||Q7wy<{|3}n?&tLCuO@Kw&8X&a^sn8F;81Ve zHYP#ONZ3g?{qXiS{ zy>l`$^Smhh6b)<)9;%%-`t6HflF{|O6_1c|37?o9$Ffx@8{A(!m^!CuS8`NT#vKuATtT~)c;djzCj{i%PME+!)Q4v?PtcjOAX$bULCH_}FB6thTE5lt16zpb^Or4q$fi+}?Id zdhTPF5gKo`G_mG(l!2hmq7&^Db1s+vSvBlm!HX$laTgFm4P_A5|Dy*Ky8A7^{$Qis z!PZsQoaGY|mZfK`*>RD7N%?}&#rB+wYB^7T|DfKASbWpSun&F*XIlmK*TWTbGe$=> zY0+JjqC3<<=j%%>K`5(AS*J>H(4qIVOMAnqwBA~Nughnx2(M1}`4_uKJ@*`B#sraf3a?Vf`n zbCBfXOt|(DDD4o0mZ!WdTlejke(__aG15Fq0bt<_GDB~mu}SHqZsa` zUBQ3=+9G>#3y(QF$C@jHg!Ug*>_$cBbhd=MYiDGFPC^N0$8(qZ5|l3hON(G@`o#q1 zzh$y=#kT(=Zpdlx0=Nshukau!y4d#G{?<(&(5)#CXKaEUttij#) z?wz5!%&$#dpr#!{{nNVp8|#(F{JiDosZPJwO#`n|xcrtTOhBM8_HO@@?IZp|Szn&P zGu^kynuAltB6PGT5nPDTgB|)7KFTQ3Es2V~u^9Iy>15m`q)};-(IVsf zf<+6S;!(nsg4Fn}V)#_Fo^`F5xy#?JP(cp2~jx@#Dv0m+1ziu&}V(NLW}mbC!Gr zCz%3U_>GG<9&OzDu=4A9%mjha!d1N9}+5Yk2h~A8%E0IjN7Nu)cXx| zM$2)+qMLyVg&DDxCcOp=hD$VUDh4Y*Ox~+ps}IX{S#JOTt;*~Fp`BrYn4g;i?}^Cw zDgW`~(NgyJ@3bJdV+Mqk(=#(s8Mr5ygoub+ZXnR*?W^&nsK?-z;7x5G@A90qwrI=s z$DS}7)W=@7oG z7-Bj-6o(fy6?6LS4&A8Z3SGwhPjR%W;4y!JFP<%de}Dq@Txyw2#kN|^4jN{tcSxqD5?! zGdu*onc8X}=;?uG9U13MpERF{tKk^v;G>@6i9ZkZpWBZ;?7XB+OwZo!o@>H2+)u=a1GH?N;KH1;(VgKo zlG+N52s3T#XHf)m=#KlwVO!(Du!8Ob{e9HK;7Lb=@!bz}wd%PO(CC_j4n{B^4XW%_ zFem}IA<=-Nu{a3^>=ME;5a^>|?aJ=$*Wy?2%D$SH#PH?Psxok48B6xT)8yYkmk_?>RkhnT;cTZYo%NAL>-_ zv_hYQXOHmPvw4O4wSf&KO4(&bQF{2b>o=2@Yqkjo7mL&FvWLQf9cf=Xb7iz8P!fhs z#-t|r>~?9WHpdC)iC*Cn5cDo%v9VC^tILsVCeCKiQ+TX(S#(Vho&6x=6F8w0elY8D z9JTk(#-Xx{ZW4W1Q z!aJ|KOtj!TX)=v?SCqI`da+(>qi17XbMNL^?O&zA`&0+%Js4BZtNHE|^kTRk=DywN z8koMLrBrTn6=1N)%E+)pHng?10bWyqdD(a1%9fOrEJg!6fRCuGzilV2+Y`t>>&s+W z{~`RQxR-bY>tqh&xxw`SW%YwZzhK{hz^{+3cdgZnQWTLjCe)I}y9&zhFj9*=wmsiD z5*%zek`2{W%{0u0D(`8%YA~1w2~}pbAvFD7?s*T>tPlDZk-o+K*TNZY?$&59$9_y6 z-V9z6nobs8CG;W}7dZR!VxZHdhENpSdlhd>e|)N60nXmRbh%CxHZBPD++bYo zpy&6_SzB9MhoN6i;1s&PzP=f9(3wD(SYGDZO8xeYUr=z#^$HJ<=aW%2?_{!v+1F9j zjQWNv=u?%&JdzCNumPqGvYn32l)pau`@ZexK(bubsKi$G5`659rAFS(-W;c?8FejT zzcW5A=PX%wxjn3%Sv;!C3A@HQbvsM`934XkA#YIB|1Z5u)tf0E;p<%;4m3UN=gv`5 z!8zpz-)Y2_IXGKQAFZpfBpe#He3R!jMKCRUO66o^K&(Jr4uU#IF?Q{ zjiic?z~Jt{;=*{x11!fhVJ3sz&~k*cJg9IZ%r0$b%=K`xKLtBg$}T@=y>S2*48@bH{?RE*;q9j-M|0)hhCn<0e($zG%Tv3 zQS4J%ShweQae})Tr}{xqk@h(xf0qU}Z86*Eal!pLDma~&=iRtv+p zg+EuPYfLU@yE`gNN}=7eTb>b@L9<39=_f+)`m5u|WlX5Z69f(M^i|Vf?<4MGrOwMk z8L6CLzI_}(?QU|3ZaU1{$03o+C5jH4tKC^y4tabocDprR`!V=T&rxv`ql5Yp4aSE3 zlD2aD7Hp|y8S3WppTNO5(|PkxvIcgR;&qD2tAK8-ZqS6buR{ac?O0WSpRex#JhL3> zIsJ5{`l(ybrk*>^+8cLwa(BUpWY&D_bm_+;*B`gtZB1X4iK|1kV$mBu?yqduCYu+< zN>Li;_cKQHZWrW@wtU*FQP{ZB!8R6iQu$p<^Tu(=@-HF52I5$?4U8vWFuZ)PdQXed zkX3Y71-!o?p!SaZ=;tDj{hW65!g9hOFtsw-^n4OA>>C;UK8bnH-10N_NpcHO?02>X zN5yR9<8E!W8h#R=2y9R6oZ8>cqH|oX%Pg{)uC`an+2B!1%IvlUP9HAH+;(gJm`6I< z#`f0M@+vBy{8f~dGqbZn94Cc{+3u9(VaOhIZY7h=^$;BFCnn^J;oDtfm6k4fJB7H{ z92nL5*ozD_oPVhH^=k7HQ=|*ZD`7yvIyVT>dJR6kzpd*%9#q}J`6}#DXq+)W3Taj3 zcJ?2%cLC?uQ;ID0%z{Phe-1hWHWctfFCgKj3Yl+|ro>sdtyh2E2j+EDztQr!AnM(W z8QDL5*JJ$*Ib#IMJFG0RStB{OD68BX>+*8pQn;OtkUu)BsIPd4>kG2*j~xLZZmYqBle}r(9}r-#ytK3%RIeI?Z%6}-Q9HOPTTBz>FLD`nW(ApJpXgQo$>Nz zEdvA3WsH@k<-47q8I|2aeQve{&0=_ql8ZFD(&&O1I}G==X0_qvPvtKXZOy~VLb^im z1ALE8bVLY?xVJ~c9*Z^WyXJQrJs!dD-r~pIYeP(6re@EdJ{N6uSf+Zji{VcS7^qU3 zk1Uhu_b9UQ?8x6QkV&Aqeh3E2ZUO|Dpwl|3*7E)@si}p}RrgHAZU*BZ#ISsig^&yL zz`Yn|LVE38t03-}Kz9`*T9?}M>>+U@M4 zQSa|9(|rC(T{QD4?1NjN&#}KjrB5JF{K~;MBK&Htdm=VlhKr$VJZo;bycLvjF51*d zNO4`;qZ&eYGiHbAZgg=pY&2(3UBlyx?TfcHHw!x?FZ=@t^IbOtYFo4|>Kl?% zN7uq0>H^e8yKKEx+Y9gD&D>IYD~mWf%Ytrt`BVMI(OE2-lI{h z+Xi_hWt#)foLj+#GJvx}gsQX$^t4=8!d;j>ZBSxf;!mb(qhp)Z(bm=9YTv0iX1e9F z=#5BBzfb`NqkVa7LImiTa+PmCT-D;VnJM3yiUrg!K&IZ=-AzeOHbQw6C`V?brz2#O zhRnHX(=#$AkzVWVa^R(2o!0yk5)!K}fxCO72jxKA^!@wu1^Pz1x_6g>%pf~Izh;_d z(0*5UcYmLwBT1~(@I3zsAfZFt^hO>aD8tBvx?`oK%^&uhOf%vCMi`=W-Y{QZ-9)`z z*Y~n^syLu7TUbZkR{GSGAc=0a$?vyg5PfMUB~=C9pk~<`4{h3lAvEQ48@u4uWA6eS zQ5&BCUUJu|dP@#o#IZkba-oZF^Qe z<*kQ{=w2P2IxQF=QqOYc@A{Q{{%HA?f$L|^OMbaI*iZ1@PcF2s+t*{x9q`H|aKU$! zK>AB8MowB9INKlC3JD1X_vfK>dO<4ChqyTQD?rT*5~18mFWqnuQ2Ky9Q&%)?-ik?55I^l0uuZ^If~Ayl7}>JTarcoTO4aBcq~%vlH!68J(%B%(rd< zE#CpLDD|{5pM8(L269l{x@@sEXcizalG0JI8b zcef98c+c4%!G4wT!};qdMn>HJyKmDUzY)GgDmcSohQI8}Sno%cbD?t5a=KWY2l8!l zBlI&jK~mUiTVzB8;;N9!`Ve480+Km+%hpUK{UTtmlTtEJh3^6VEq%$}6y?})ELJA9 zc@JmAH!}4&3CVB$8rscHVoT+M(5&&r_Ld3XB@`porTMF+hjv4DKDln#QewrD@ag5& zw4I1V4KFtLeWdFKT_oLck`soPvj{5Uib_frEX|gA`k95sI$Y&IwR&u6N#ey!8+iAW zYsEI`0Ha5_m(R+`%fH)j*wBw2=j zn%mf5KY5ybsGaV<^!LP&yy&KfU+7!Qew3h=6*BB|Yg$y#6{QC(bS5W`!}V~0kC_&cn}(U0FL`*~R5I)ZwCas-og{D4W08mk8ICv^TD6u;y? zZrsjSQaSlo`&!PcT4t;phA^k6Ip(2ELLi>#plxK z{e;yEu_};X$vjo#rrd;$spSi^$_J%-6`>`4=BH~UM`Me*Jkzcy*5{hWL2ZGZ@J)-E>d%(R|Cy$o)7`s4tGziA&Te zVmXJK+2Iy-=zjsmkqumR=qG`kDfQSQ$RkBGn$Qvx?LxU6&i#0%3;$uxp&f|m38;X{ zQWIS;*NFyMjvf?8Z1Rc`waNjzYRL4T>I&gh7s!l3-W~`TlJXea+U>$LIvT#^d@zHG zZ?%T}24zxoT)8`|mj3G33xd(?KF%BApU;N`448NQP&1IQ-x=*@7x{G;rkhh~^mR5# zVfZua@>A!fr(Bs)0oig*cv_b{y*VyBR)hg<>eM-*`ZDGzdTE!N1>eqXb) zRIcP79)$x{aGBsBtbhdX5^e@jk;=+SFK_S8jt)RB4kCgRqC3M42+Rk&2eMt1dK(~5 zsUev1rq(}ue}ACtZP|rRxoWPUs9mks>G>#|hx4w}J#>~=Cy?(#lsE2NXpep`@L*0= zKjs$0l+Sy%M8+gl4^D*?N>ex|txyo@!MtPq#{NZII59=p^WK6bO0cKWG~G zkMmY6@@YcTr-6P)tG%s_r~w}{A7?l?+83MHjdpe2BJV1A*6Xpkz*4ZcKC7Xx?*W*J z^B)lJ-u-$$=G*1q;6Sb6g&>Y&&k)ZUTKJay+V*&MP;XgLO2XICGM#CK{j|Vt0`FdT+Xl&#L9!4hj(I zjk6^|I$akF;Y5zaMPP~#pY)KsJPD(u=jMQ9yQINhMgfnLJv-rc+M9w7PiHwGiyGxU`LOwjAJqpnz)8{s!oX_RR~fbSKY zxc!HwEM0nVKfbf5b)2qBrJ%73ZW+Xoj_^p_xc<;XQeu4X!s^}QJbIiGaYTk+A63gMf$E&NaBPjs4 z{SOfBW#cEz)qFw2%fIvnOm}5mD%ifaJ3}0?hFIO*@lEmVL+CnY7QM5s%N`u$D^E;( z*fbESXrZa8X>XrdmQ|jg&wt}a68r#ph--U{J1SZBa4TrfhK ztp%Lests4+Hu3m99;J&c{5e&}aJ?>f8f*#|wkhUwz!PjWL|;=A zXDnW5tDvUcLiVH~Ucf1Io7KYICosB#>`%=tpI*mS?iY~ZsU(`I;o~r#Fdf>AbF%_m zC{Ay-Y?m+6#k zR3lA~oT6tETyzRHZ{Jy>1r`?a&N7b5v#o%uq5XNQ*1U}5Mm2CbAL8OeQwrHpU_l|j z^uIrlFBrN7v=DswZ0XtntV88mH|*gyb3K}B{5^V8o2N=7Z>CN)L_{7rTE+aI@O z+;K1*mssM66MdwH5B|Mf*r?X~OD#IUncv5u1W2%E2-)rk(L5X*R61n4{-**wI s|NcIUWr4n`{2vz_esy%gT_VEjEPl82F + + + +- [Generating figures](#generating-figures) + + + +Most of the figures for this proposal are generated with [PlantUML](http://plantuml.com/), an [open-source](https://sourceforge.net/projects/plantuml/) tool that can generate sequence, use case, class, activity, state, object, and other kinds of UML digrams. + +PlantUML requires the Java runtime, so we have published a Docker container image that includes all dependencies (to publish your own, use `Dockerfile` in this directory). + +## Generating figures + +To generate diagrams in this directory, `make figures`. + +In general, to generate the figure described in `foo.plantuml`: +``` +SRC="foo.plantuml" +docker run \ + --rm \ + --volume ${PWD}:/figures \ + --user $(shell id --user):$(shell id --group) \ + dpf9/plantuml:1.2019.6 \ + -v /figures/${SRC} +``` \ No newline at end of file diff --git a/vendor/sigs.k8s.io/cluster-api/docs/scope-and-objectives.md b/vendor/sigs.k8s.io/cluster-api/docs/scope-and-objectives.md index adc7e6177e..32bef883af 100644 --- a/vendor/sigs.k8s.io/cluster-api/docs/scope-and-objectives.md +++ b/vendor/sigs.k8s.io/cluster-api/docs/scope-and-objectives.md @@ -1,5 +1,26 @@ --- title: Cluster API Scope and Objectives + + + + +- [Cluster API Scope and Objectives](#cluster-api-scope-and-objectives) + - [Table of Contents](#table-of-contents) + - [Summary](#summary) + - [What is Cluster API?](#what-is-cluster-api) + - [Glossary](#glossary) + - [Motivation](#motivation) + - [Goals](#goals) + - [Non-goals](#non-goals) + - [Requirements](#requirements) + - [Foundation](#foundation) + - [User Experience](#user-experience) + - [Organization](#organization) + - [Validation](#validation) + - [Extension](#extension) + + + authors: - "@vincepri" - "@ncdc" @@ -40,6 +61,8 @@ We are building a set of Kubernetes cluster management APIs to enable common clu ## Glossary +[See ./book/GLOSSARY.md](./book/GLOSSARY.md) + - __Cluster API__: Unless otherwise specified, this refers to the project as a whole. - __Infrastructure provider__: Refers to the source of computational resources (e.g. machines, networking, etc.). Examples for cloud include AWS, Azure, Google, etc.; for bare metal include VMware, MAAS, etc. When there is more than one way to obtain resources from the same infrastructure provider (e.g. EC2 vs. EKS) each way is referred to as a variant. - __Provider implementation__: Existing Cluster API implementations consist of generic and infrastructure provider-specific logic. The infrastructure provider-specific logic is currently maintained in infrastructure provider repositories. diff --git a/vendor/sigs.k8s.io/cluster-api/docs/staging-use-cases.md b/vendor/sigs.k8s.io/cluster-api/docs/staging-use-cases.md index 577c58fbaa..3474545f3d 100644 --- a/vendor/sigs.k8s.io/cluster-api/docs/staging-use-cases.md +++ b/vendor/sigs.k8s.io/cluster-api/docs/staging-use-cases.md @@ -6,7 +6,7 @@ last-updated: 2019-04-16 -**Table of Contents** *generated with [DocToc](https://github.com/thlorenz/doctoc)* + - [Cluster API Reference Use Cases](#cluster-api-reference-use-cases) - [Role Glossary](#role-glossary) @@ -33,6 +33,7 @@ last-updated: 2019-04-16 - [Managing Providers](#managing-providers) - [Creating Workload Clusters](#creating-workload-clusters) - [Provider Implementors](#provider-implementors) + - [Cluster Health Checking](#cluster-health-checking) @@ -227,3 +228,22 @@ Multitenancy Management ### Provider Implementors - As a provider, I want the machine controller to reconcile a Machine in response to an event from some other resource in the cluster. This is the sort of thing that other controllers do on a regular basis, so that's nothing particularly interesting. But having made a machine actuator, there's not an easy way to get access to the machine controller object in order to call its Watch method. + +## Cluster Health Checking + +Cluster Health Checking is a service to provide the health status of Kubernetes cluster and its components. + +- As an operator, given I have created a Kubernetes-conformant cluster with ClusterAPI, I want to check the Kubernetes cluster node status. + - Describe nodes and provide details if they are ready/healthy or not ready/healthy. + - List conditions for any nodes which are `NotReady`, list information about allocated resources. + +- As an operator, given I have created a Kubernetes-conformant cluster with ClusterAPI, I want to check the kube-apiserver status. + +- As an operator, given I have created a Kubernetes-conformant cluster with ClusterAPI, I want to check the etcd status. + +- 🔭 As an operator, given I have created a Kubernetes-conformant cluster with ClusterAPI, I want to check the Kubernetes components status, like ingress controller, other add-on components etc. + +- 🔭 As an operator, given I have created a Kubernetes-conformant cluster with ClusterAPI, I want to check unhealthy Pods statuses in configured namespace. + - Provide the details on any pods which are unhealthy in `kube-system` namespace. Filter the unhealthy pods for their status(`kubectl get pods --show-labels -n kube-system | grep -vE "Running|Completed"`) + - Describe any Pods which are not `Completed|Running`, list the Events to provide hints on the failure. + - Look for Pods which don't have all of their containers running. diff --git a/vendor/sigs.k8s.io/cluster-api/go.mod b/vendor/sigs.k8s.io/cluster-api/go.mod index c2ff9acd5a..e1be0beb9b 100644 --- a/vendor/sigs.k8s.io/cluster-api/go.mod +++ b/vendor/sigs.k8s.io/cluster-api/go.mod @@ -3,67 +3,26 @@ module sigs.k8s.io/cluster-api go 1.12 require ( - cloud.google.com/go v0.35.1 // indirect - contrib.go.opencensus.io/exporter/ocagent v0.2.0 // indirect - github.com/Azure/go-autorest v11.5.0+incompatible // indirect - github.com/appscode/jsonpatch v0.0.0-20190108182946-7c0e3b262f30 // indirect - github.com/census-instrumentation/opencensus-proto v0.1.0 // indirect + github.com/Azure/go-autorest/autorest v0.2.0 // indirect github.com/davecgh/go-spew v1.1.1 - github.com/dgrijalva/jwt-go v3.2.0+incompatible // indirect - github.com/evanphx/json-patch v4.2.0+incompatible // indirect - github.com/go-logr/logr v0.1.0 // indirect - github.com/go-logr/zapr v0.1.0 // indirect - github.com/gobuffalo/envy v1.6.12 // indirect - github.com/gogo/protobuf v1.2.0 // indirect - github.com/golang/groupcache v0.0.0-20181024230925-c65c006176ff // indirect - github.com/google/btree v1.0.0 // indirect - github.com/google/go-cmp v0.3.0 // indirect - github.com/google/gofuzz v1.0.0 // indirect - github.com/google/uuid v1.1.1 // indirect - github.com/googleapis/gnostic v0.2.0 // indirect - github.com/gophercloud/gophercloud v0.0.0-20190221164956-3f3cc5a566b2 // indirect - github.com/gregjones/httpcache v0.0.0-20181110185634-c63ab54fda8f // indirect - github.com/hashicorp/golang-lru v0.5.0 // indirect - github.com/imdario/mergo v0.3.7 // indirect - github.com/json-iterator/go v1.1.6 // indirect - github.com/markbates/deplist v1.0.5 // indirect - github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect - github.com/modern-go/reflect2 v1.0.1 // indirect + github.com/go-logr/logr v0.1.0 + github.com/gophercloud/gophercloud v0.2.0 // indirect github.com/onsi/ginkgo v1.8.0 github.com/onsi/gomega v1.5.0 - github.com/pborman/uuid v1.2.0 // indirect - github.com/peterbourgon/diskv v2.0.1+incompatible // indirect github.com/pkg/errors v0.8.1 - github.com/prometheus/client_model v0.0.0-20190115171406-56726106282f // indirect - github.com/prometheus/common v0.1.0 // indirect - github.com/prometheus/procfs v0.0.0-20190117184657-bf6a532e95b1 // indirect github.com/sergi/go-diff v1.0.0 github.com/spf13/cobra v0.0.3 github.com/spf13/pflag v1.0.3 - go.uber.org/atomic v1.3.2 // indirect - go.uber.org/multierr v1.1.0 // indirect - go.uber.org/zap v1.9.1 // indirect golang.org/x/net v0.0.0-20190613194153-d28f0bde5980 - golang.org/x/oauth2 v0.0.0-20190115181402-5dab4167f31c // indirect - golang.org/x/sys v0.0.0-20190616124812-15dcb6c0061f // indirect - golang.org/x/text v0.3.2 // indirect - golang.org/x/tools v0.0.0-20190124215303-cc6a436ffe6b // indirect - google.golang.org/genproto v0.0.0-20190219182410-082222b4a5c5 // indirect - google.golang.org/grpc v1.18.0 // indirect - k8s.io/api v0.0.0-20190222213804-5cb15d344471 - k8s.io/apiextensions-apiserver v0.0.0-20190228180357-d002e88f6236 // indirect - k8s.io/apimachinery v0.0.0-20190703205208-4cfb76a8bf76 - k8s.io/apiserver v0.0.0-20190122042701-b6aa1175dafa - k8s.io/client-go v10.0.0+incompatible - k8s.io/code-generator v0.0.0-20181117043124-c2090bec4d9b - k8s.io/component-base v0.0.0-20190703210340-65d72cfeb85d - k8s.io/gengo v0.0.0-20190116091435-f8a0810f38af // indirect - k8s.io/klog v0.3.2 - k8s.io/kube-openapi v0.0.0-20190228160746-b3a7cee44a30 // indirect - sigs.k8s.io/controller-runtime v0.1.12 - sigs.k8s.io/controller-tools v0.1.11 - sigs.k8s.io/testing_frameworks v0.1.1 - sigs.k8s.io/yaml v1.1.0 // indirect + k8s.io/api v0.0.0-20190409021203-6e4e0e4f393b + k8s.io/apiextensions-apiserver v0.0.0-20190409022649-727a075fdec8 + k8s.io/apimachinery v0.0.0-20190404173353-6a84e37a896d + k8s.io/apiserver v0.0.0-20190409021813-1ec86e4da56c + k8s.io/client-go v11.0.1-0.20190409021438-1a26190bd76a+incompatible + k8s.io/component-base v0.0.0-20190409021516-bd2732e5c3f7 + k8s.io/klog v0.3.1 + k8s.io/utils v0.0.0-20190506122338-8fab8cb257d5 + sigs.k8s.io/controller-runtime v0.2.0-beta.5 + sigs.k8s.io/controller-tools v0.2.0-beta.5 + sigs.k8s.io/testing_frameworks v0.1.2-0.20190130140139-57f07443c2d4 ) - -replace k8s.io/apimachinery => k8s.io/apimachinery v0.0.0-20190221213512-86fb29eff628 diff --git a/vendor/sigs.k8s.io/cluster-api/go.sum b/vendor/sigs.k8s.io/cluster-api/go.sum index 40bf86c96f..3cc3b3a784 100644 --- a/vendor/sigs.k8s.io/cluster-api/go.sum +++ b/vendor/sigs.k8s.io/cluster-api/go.sum @@ -1,646 +1,285 @@ cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= -cloud.google.com/go v0.31.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= +cloud.google.com/go v0.34.0 h1:eOI3/cP2VTU6uZLDYAoic+eyzzB9YyGmJ7eIjl8rOPg= cloud.google.com/go v0.34.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= -cloud.google.com/go v0.35.1 h1:LMe/Btq0Eijsc97JyBwMc0KMXOe0orqAMdg7/EkywN8= -cloud.google.com/go v0.35.1/go.mod h1:wfjPZNvXCBYESy3fIynybskMP48KVPrjSPCnXiK7Prg= -contrib.go.opencensus.io/exporter/ocagent v0.2.0 h1:Q/jXnVbliDYozuWJni9452xsSUuo+y8yrioxRgofBhE= -contrib.go.opencensus.io/exporter/ocagent v0.2.0/go.mod h1:0fnkYHF+ORKj7HWzOExKkUHeFX79gXSKUQbpnAM+wzo= -dmitri.shuralyov.com/app/changes v0.0.0-20180602232624-0a106ad413e3/go.mod h1:Yl+fi1br7+Rr3LqpNJf1/uxUdtRUV+Tnj0o93V2B9MU= -dmitri.shuralyov.com/html/belt v0.0.0-20180602232347-f7d459c86be0/go.mod h1:JLBrvjyP0v+ecvNYvCpyZgu5/xkfAUhi6wJj28eUfSU= -dmitri.shuralyov.com/service/change v0.0.0-20181023043359-a85b471d5412/go.mod h1:a1inKt/atXimZ4Mv927x+r7UpyzRUf4emIoiiSC2TN4= -dmitri.shuralyov.com/state v0.0.0-20180228185332-28bcc343414c/go.mod h1:0PRwlb0D6DFvNNtx+9ybjezNCa8XF0xaYcETyp6rHWU= -git.apache.org/thrift.git v0.0.0-20180902110319-2566ecd5d999/go.mod h1:fPE2ZNJGynbRyZ4dJvy6G277gSllfV2HJqblrnkyeyg= -github.com/Azure/go-autorest v11.5.0+incompatible h1:zp9GQJhEX+EBqEYC2MEGQ+gjKFEPRAWtfwcmstS2hGk= -github.com/Azure/go-autorest v11.5.0+incompatible/go.mod h1:r+4oMnoxhatjLLJ6zxSWATqVooLgysK6ZNox3g/xq24= +contrib.go.opencensus.io/exporter/ocagent v0.4.12 h1:jGFvw3l57ViIVEPKKEUXPcLYIXJmQxLUh6ey1eJhwyc= +contrib.go.opencensus.io/exporter/ocagent v0.4.12/go.mod h1:450APlNTSR6FrvC3CTRqYosuDstRB9un7SOx2k/9ckA= +github.com/Azure/go-autorest/autorest v0.2.0 h1:zBtSTOQTtjzHVRe+mhkiHvHwRTKHhjBEyo1m6DfI3So= +github.com/Azure/go-autorest/autorest v0.2.0/go.mod h1:AKyIcETwSUFxIcs/Wnq/C+kwCtlEYGUVd7FPNb2slmg= +github.com/Azure/go-autorest/autorest/adal v0.1.0 h1:RSw/7EAullliqwkZvgIGDYZWQm1PGKXI8c4aY/87yuU= +github.com/Azure/go-autorest/autorest/adal v0.1.0/go.mod h1:MeS4XhScH55IST095THyTxElntu7WqB7pNbZo8Q5G3E= +github.com/Azure/go-autorest/autorest/date v0.1.0 h1:YGrhWfrgtFs84+h0o46rJrlmsZtyZRg470CqAXTZaGM= +github.com/Azure/go-autorest/autorest/date v0.1.0/go.mod h1:plvfp3oPSKwf2DNjlBjWF/7vwR+cUD/ELuzDCXwHUVA= +github.com/Azure/go-autorest/autorest/mocks v0.1.0 h1:Kx+AUU2Te+A3JIyYn6Dfs+cFgx5XorQKuIXrZGoq/SI= +github.com/Azure/go-autorest/autorest/mocks v0.1.0/go.mod h1:OTyCOPRA2IgIlWxVYxBee2F5Gr4kF2zd2J5cFRaIDN0= +github.com/Azure/go-autorest/logger v0.1.0 h1:ruG4BSDXONFRrZZJ2GUXDiUyVpayPmb1GnWeHDdaNKY= +github.com/Azure/go-autorest/logger v0.1.0/go.mod h1:oExouG+K6PryycPJfVSxi/koC6LSNgds39diKLz7Vrc= +github.com/Azure/go-autorest/tracing v0.1.0 h1:TRBxC5Pj/fIuh4Qob0ZpkggbfT8RC0SubHbpV3p4/Vc= +github.com/Azure/go-autorest/tracing v0.1.0/go.mod h1:ROEEAFwXycQw7Sn3DXNtEedEvdeRAgDr0izn4z5Ij88= github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= -github.com/Masterminds/semver v1.4.2/go.mod h1:MB6lktGJrhw8PrUyiEoblNEGEQ+RzHPF078ddwwvV3Y= -github.com/ajg/form v0.0.0-20160822230020-523a5da1a92f/go.mod h1:uL1WgH+h2mgNtvBq0339dVnzXdBETtL2LeUXaIv25UY= +github.com/Shopify/sarama v1.19.0/go.mod h1:FVkBWblsNy7DGZRfXLU0O9RCGt5g3g3yEuWXgklEdEo= +github.com/Shopify/toxiproxy v2.1.4+incompatible/go.mod h1:OXgGpZ6Cli1/URJOF1DMxUHB2q5Ap20/P/eIdh4G0pI= github.com/alecthomas/template v0.0.0-20160405071501-a0175ee3bccc/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc= github.com/alecthomas/units v0.0.0-20151022065526-2efee857e7cf/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0= -github.com/anmitsu/go-shlex v0.0.0-20161002113705-648efa622239/go.mod h1:2FmKhYUyUczH0OGQWaF5ceTx0UBShxjsH6f8oGKYe2c= -github.com/appscode/jsonpatch v0.0.0-20190108182946-7c0e3b262f30 h1:Kn3rqvbUFqSepE2OqVu0Pn1CbDw9IuMlONapol0zuwk= -github.com/appscode/jsonpatch v0.0.0-20190108182946-7c0e3b262f30/go.mod h1:4AJxUpXUhv4N+ziTvIcWWXgeorXpxPZOfk9HdEVr96M= -github.com/armon/consul-api v0.0.0-20180202201655-eb2c6b5be1b6/go.mod h1:grANhF5doyWs3UAsr3K4I6qtAmlQcZDesFNEHPZAzj8= +github.com/apache/thrift v0.12.0/go.mod h1:cp2SuWMxlEZw2r+iP2GNCdIi4C1qmUzdZFSVb+bacwQ= github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973 h1:xJ4a3vCFaGF/jqvzLMYoU8P317H5OQ+Via4RmuPwCS0= github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q= -github.com/blang/semver v3.5.0+incompatible/go.mod h1:kRBLl5iJ+tD4TcOOxsy/0fnwebNt5EWlYSAyrTnjyyk= -github.com/bradfitz/go-smtpd v0.0.0-20170404230938-deb6d6237625/go.mod h1:HYsPBTaaSFSlLx/70C2HPIMNZpVV8+vt/A+FMnYP11g= -github.com/census-instrumentation/opencensus-proto v0.0.2-0.20180913191712-f303ae3f8d6a/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= -github.com/census-instrumentation/opencensus-proto v0.1.0 h1:VwZ9smxzX8u14/125wHIX7ARV+YhR+L4JADswwxWK0Y= -github.com/census-instrumentation/opencensus-proto v0.1.0/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= +github.com/census-instrumentation/opencensus-proto v0.2.0 h1:LzQXZOgg4CQfE6bFvXGM30YZL1WW/M337pXml+GrcZ4= +github.com/census-instrumentation/opencensus-proto v0.2.0/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw= -github.com/cockroachdb/apd v1.1.0/go.mod h1:8Sl8LxpKi29FqWXR16WEFZRNSz3SoPzUzeMeY4+DwBQ= -github.com/cockroachdb/cockroach-go v0.0.0-20181001143604-e0a95dfd547c/go.mod h1:XGLbWH/ujMcbPbhZq52Nv6UrCghb1yGn//133kEsvDk= -github.com/codegangsta/negroni v1.0.0/go.mod h1:v0y3T5G7Y1UlFfyxFn/QLRU4a2EuNau2iZY63YTKWo0= -github.com/coreos/etcd v3.3.10+incompatible/go.mod h1:uF7uidLiAD3TWHmW31ZFd/JWoc32PjwdhPthX9715RE= -github.com/coreos/go-etcd v2.0.0+incompatible/go.mod h1:Jez6KQU2B/sWsbdaef3ED8NzMklzPG4d5KIOhIy30Tk= -github.com/coreos/go-semver v0.2.0/go.mod h1:nnelYz7RCh+5ahJtPPxZlU+153eP4D4r3EedlOD2RNk= -github.com/coreos/go-systemd v0.0.0-20181012123002-c6f51f82210d/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4= github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/dgrijalva/jwt-go v3.2.0+incompatible h1:7qlOGliEKZXTDg6OTjfoBKDXWrumCAMpl/TFQ4/5kLM= github.com/dgrijalva/jwt-go v3.2.0+incompatible/go.mod h1:E3ru+11k8xSBh+hMPgOLZmtrrCbhqsmaPHjLKYnJCaQ= -github.com/dustin/go-humanize v0.0.0-20180713052910-9f541cc9db5d/go.mod h1:HtrtbFcZ19U5GC7JDqmcUSB87Iq5E25KnS6fMYU6eOk= -github.com/dustin/go-humanize v1.0.0/go.mod h1:HtrtbFcZ19U5GC7JDqmcUSB87Iq5E25KnS6fMYU6eOk= -github.com/evanphx/json-patch v4.0.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk= -github.com/evanphx/json-patch v4.2.0+incompatible h1:fUDGZCv/7iAN7u0puUVhvKCcsR6vRfwrJatElLBEf0I= -github.com/evanphx/json-patch v4.2.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk= +github.com/eapache/go-resiliency v1.1.0/go.mod h1:kFI+JgMyC7bLPUVY133qvEBtVayf5mFgVsvEsIPBvNs= +github.com/eapache/go-xerial-snappy v0.0.0-20180814174437-776d5712da21/go.mod h1:+020luEh2TKB4/GOp8oxxtq0Daoen/Cii55CzbTV6DU= +github.com/eapache/queue v1.1.0/go.mod h1:6eCeP0CKFpHLu8blIFXhExK/dRa7WDZfr6jVFPTqq+I= +github.com/evanphx/json-patch v4.5.0+incompatible h1:ouOWdg56aJriqS0huScTkVXPC5IcNrDCXZ6OoTAWu7M= +github.com/evanphx/json-patch v4.5.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk= +github.com/fatih/color v1.7.0 h1:DkWD4oS2D8LGGgTQ6IvwJJXSL5Vp2ffcQg58nFV38Ys= github.com/fatih/color v1.7.0/go.mod h1:Zm6kSWBoL9eyXnKyktHP6abPY2pDugNf5KwzbycvMj4= -github.com/fatih/structs v1.0.0/go.mod h1:9NiDSp5zOcgEDl+j00MP/WkGVPOlPRLejGD8Ga6PJ7M= -github.com/fatih/structs v1.1.0/go.mod h1:9NiDSp5zOcgEDl+j00MP/WkGVPOlPRLejGD8Ga6PJ7M= -github.com/flynn/go-shlex v0.0.0-20150515145356-3f9db97f8568/go.mod h1:xEzjJPgXI435gkrCt3MPfRiAkVrwSbHsst4LCFVfpJc= github.com/fsnotify/fsnotify v1.4.7 h1:IXs+QLmnXW2CcXuY+8Mzv/fWEsPGWxqefPtCP5CnV9I= github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo= -github.com/ghodss/yaml v1.0.0 h1:wQHKEahhL6wmXdzwWG11gIVCkOv05bNOh+Rxn0yngAk= github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04= -github.com/gliderlabs/ssh v0.1.1/go.mod h1:U7qILu1NlMHj9FlMhZLlkCdDnU1DBEAqr0aevW3Awn0= github.com/go-kit/kit v0.8.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as= github.com/go-logfmt/logfmt v0.3.0/go.mod h1:Qt1PoO58o5twSAckw1HlFXLmHsOX5/0LbT9GBnD5lWE= github.com/go-logr/logr v0.1.0 h1:M1Tv3VzNlEHg6uyACnRdtrploV2P7wZqH8BoQMtz0cg= github.com/go-logr/logr v0.1.0/go.mod h1:ixOQHD9gLJUVQQ2ZOR7zLEifBX6tGkNJF4QyIY7sIas= github.com/go-logr/zapr v0.1.0 h1:h+WVe9j6HAA01niTJPA/kKH0i7e0rLZBCwauQFcRE54= github.com/go-logr/zapr v0.1.0/go.mod h1:tabnROwaDl0UNxkVeFRbY8bwB37GwRv0P8lg6aAiEnk= -github.com/go-sql-driver/mysql v1.4.0/go.mod h1:zAC/RDZ24gD3HViQzih4MyKcchzm+sOG5ZlKdlhCg5w= github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY= -github.com/gobuffalo/buffalo v0.12.8-0.20181004233540-fac9bb505aa8/go.mod h1:sLyT7/dceRXJUxSsE813JTQtA3Eb1vjxWfo/N//vXIY= -github.com/gobuffalo/buffalo v0.13.0/go.mod h1:Mjn1Ba9wpIbpbrD+lIDMy99pQ0H0LiddMIIDGse7qT4= -github.com/gobuffalo/buffalo-plugins v1.0.2/go.mod h1:pOp/uF7X3IShFHyobahTkTLZaeUXwb0GrUTb9ngJWTs= -github.com/gobuffalo/buffalo-plugins v1.0.4/go.mod h1:pWS1vjtQ6uD17MVFWf7i3zfThrEKWlI5+PYLw/NaDB4= -github.com/gobuffalo/buffalo-plugins v1.4.3/go.mod h1:uCzTY0woez4nDMdQjkcOYKanngeUVRO2HZi7ezmAjWY= -github.com/gobuffalo/buffalo-plugins v1.5.1/go.mod h1:jbmwSZK5+PiAP9cC09VQOrGMZFCa/P0UMlIS3O12r5w= -github.com/gobuffalo/buffalo-plugins v1.6.4/go.mod h1:/+N1aophkA2jZ1ifB2O3Y9yGwu6gKOVMtUmJnbg+OZI= -github.com/gobuffalo/buffalo-plugins v1.6.5/go.mod h1:0HVkbgrVs/MnPZ/FOseDMVanCTm2RNcdM0PuXcL1NNI= -github.com/gobuffalo/buffalo-plugins v1.6.7/go.mod h1:ZGZRkzz2PiKWHs0z7QsPBOTo2EpcGRArMEym6ghKYgk= -github.com/gobuffalo/buffalo-plugins v1.6.9/go.mod h1:yYlYTrPdMCz+6/+UaXg5Jm4gN3xhsvsQ2ygVatZV5vw= -github.com/gobuffalo/buffalo-plugins v1.6.11/go.mod h1:eAA6xJIL8OuynJZ8amXjRmHND6YiusVAaJdHDN1Lu8Q= -github.com/gobuffalo/buffalo-plugins v1.8.2/go.mod h1:9te6/VjEQ7pKp7lXlDIMqzxgGpjlKoAcAANdCgoR960= -github.com/gobuffalo/buffalo-plugins v1.8.3/go.mod h1:IAWq6vjZJVXebIq2qGTLOdlXzmpyTZ5iJG5b59fza5U= -github.com/gobuffalo/buffalo-plugins v1.9.4/go.mod h1:grCV6DGsQlVzQwk6XdgcL3ZPgLm9BVxlBmXPMF8oBHI= -github.com/gobuffalo/buffalo-plugins v1.10.0/go.mod h1:4osg8d9s60txLuGwXnqH+RCjPHj9K466cDFRl3PErHI= -github.com/gobuffalo/buffalo-pop v1.0.5/go.mod h1:Fw/LfFDnSmB/vvQXPvcXEjzP98Tc+AudyNWUBWKCwQ8= -github.com/gobuffalo/envy v1.6.4/go.mod h1:Abh+Jfw475/NWtYMEt+hnJWRiC8INKWibIMyNt1w2Mc= -github.com/gobuffalo/envy v1.6.5/go.mod h1:N+GkhhZ/93bGZc6ZKhJLP6+m+tCNPKwgSpH9kaifseQ= -github.com/gobuffalo/envy v1.6.6/go.mod h1:N+GkhhZ/93bGZc6ZKhJLP6+m+tCNPKwgSpH9kaifseQ= -github.com/gobuffalo/envy v1.6.7/go.mod h1:N+GkhhZ/93bGZc6ZKhJLP6+m+tCNPKwgSpH9kaifseQ= -github.com/gobuffalo/envy v1.6.8/go.mod h1:N+GkhhZ/93bGZc6ZKhJLP6+m+tCNPKwgSpH9kaifseQ= -github.com/gobuffalo/envy v1.6.9/go.mod h1:N+GkhhZ/93bGZc6ZKhJLP6+m+tCNPKwgSpH9kaifseQ= -github.com/gobuffalo/envy v1.6.10/go.mod h1:X0CFllQjTV5ogsnUrg+Oks2yTI+PU2dGYBJOEI2D1Uo= -github.com/gobuffalo/envy v1.6.11/go.mod h1:Fiq52W7nrHGDggFPhn2ZCcHw4u/rqXkqo+i7FB6EAcg= -github.com/gobuffalo/envy v1.6.12 h1:zkhss8DXz/pty2HAyA8BnvWMTYxo4gjd4+WCnYovoxY= -github.com/gobuffalo/envy v1.6.12/go.mod h1:qJNrJhKkZpEW0glh5xP2syQHH5kgdmgsKss2Kk8PTP0= -github.com/gobuffalo/events v1.0.3/go.mod h1:Txo8WmqScapa7zimEQIwgiJBvMECMe9gJjsKNPN3uZw= -github.com/gobuffalo/events v1.0.7/go.mod h1:z8txf6H9jWhQ5Scr7YPLWg/cgXBRj8Q4uYI+rsVCCSQ= -github.com/gobuffalo/events v1.0.8/go.mod h1:A5KyqT1sA+3GJiBE4QKZibse9mtOcI9nw8gGrDdqYGs= -github.com/gobuffalo/events v1.1.3/go.mod h1:9yPGWYv11GENtzrIRApwQRMYSbUgCsZ1w6R503fCfrk= -github.com/gobuffalo/events v1.1.4/go.mod h1:09/YRRgZHEOts5Isov+g9X2xajxdvOAcUuAHIX/O//A= -github.com/gobuffalo/events v1.1.5/go.mod h1:3YUSzgHfYctSjEjLCWbkXP6djH2M+MLaVRzb4ymbAK0= -github.com/gobuffalo/events v1.1.7/go.mod h1:6fGqxH2ing5XMb3EYRq9LEkVlyPGs4oO/eLzh+S8CxY= -github.com/gobuffalo/events v1.1.8/go.mod h1:UFy+W6X6VbCWS8k2iT81HYX65dMtiuVycMy04cplt/8= -github.com/gobuffalo/events v1.1.9/go.mod h1:/0nf8lMtP5TkgNbzYxR6Bl4GzBy5s5TebgNTdRfRbPM= -github.com/gobuffalo/fizz v1.0.12/go.mod h1:C0sltPxpYK8Ftvf64kbsQa2yiCZY4RZviurNxXdAKwc= -github.com/gobuffalo/flect v0.0.0-20180907193754-dc14d8acaf9f/go.mod h1:rCiQgmAE4axgBNl3jZWzS5rETRYTGOsrixTRaCPzNdA= -github.com/gobuffalo/flect v0.0.0-20181002182613-4571df4b1daf/go.mod h1:rCiQgmAE4axgBNl3jZWzS5rETRYTGOsrixTRaCPzNdA= -github.com/gobuffalo/flect v0.0.0-20181007231023-ae7ed6bfe683/go.mod h1:rCiQgmAE4axgBNl3jZWzS5rETRYTGOsrixTRaCPzNdA= -github.com/gobuffalo/flect v0.0.0-20181018182602-fd24a256709f/go.mod h1:rCiQgmAE4axgBNl3jZWzS5rETRYTGOsrixTRaCPzNdA= -github.com/gobuffalo/flect v0.0.0-20181019110701-3d6f0b585514/go.mod h1:rCiQgmAE4axgBNl3jZWzS5rETRYTGOsrixTRaCPzNdA= -github.com/gobuffalo/flect v0.0.0-20181024204909-8f6be1a8c6c2/go.mod h1:rCiQgmAE4axgBNl3jZWzS5rETRYTGOsrixTRaCPzNdA= -github.com/gobuffalo/flect v0.0.0-20181104133451-1f6e9779237a/go.mod h1:rCiQgmAE4axgBNl3jZWzS5rETRYTGOsrixTRaCPzNdA= -github.com/gobuffalo/flect v0.0.0-20181114183036-47375f6d8328/go.mod h1:0HvNbHdfh+WOvDSIASqJOSxTOWSxCCUF++k/Y53v9rI= -github.com/gobuffalo/flect v0.0.0-20181210151238-24a2b68e0316/go.mod h1:en58vff74S9b99Eg42Dr+/9yPu437QjlNsO/hBYPuOk= -github.com/gobuffalo/flect v0.0.0-20190104192022-4af577e09bf2/go.mod h1:en58vff74S9b99Eg42Dr+/9yPu437QjlNsO/hBYPuOk= -github.com/gobuffalo/genny v0.0.0-20180924032338-7af3a40f2252/go.mod h1:tUTQOogrr7tAQnhajMSH6rv1BVev34H2sa1xNHMy94g= -github.com/gobuffalo/genny v0.0.0-20181003150629-3786a0744c5d/go.mod h1:WAd8HmjMVrnkAZbmfgH5dLBUchsZfqzp/WS5sQz+uTM= -github.com/gobuffalo/genny v0.0.0-20181005145118-318a41a134cc/go.mod h1:WAd8HmjMVrnkAZbmfgH5dLBUchsZfqzp/WS5sQz+uTM= -github.com/gobuffalo/genny v0.0.0-20181007153042-b8de7d566757/go.mod h1:+oG5Ljrw04czAHbPXREwaFojJbpUvcIy4DiOnbEJFTA= -github.com/gobuffalo/genny v0.0.0-20181012161047-33e5f43d83a6/go.mod h1:+oG5Ljrw04czAHbPXREwaFojJbpUvcIy4DiOnbEJFTA= -github.com/gobuffalo/genny v0.0.0-20181017160347-90a774534246/go.mod h1:+oG5Ljrw04czAHbPXREwaFojJbpUvcIy4DiOnbEJFTA= -github.com/gobuffalo/genny v0.0.0-20181024195656-51392254bf53/go.mod h1:o9GEH5gn5sCKLVB5rHFC4tq40rQ3VRUzmx6WwmaqISE= -github.com/gobuffalo/genny v0.0.0-20181025145300-af3f81d526b8/go.mod h1:uZ1fFYvdcP8mu0B/Ynarf6dsGvp7QFIpk/QACUuFUVI= -github.com/gobuffalo/genny v0.0.0-20181027191429-94d6cfb5c7fc/go.mod h1:x7SkrQQBx204Y+O9EwRXeszLJDTaWN0GnEasxgLrQTA= -github.com/gobuffalo/genny v0.0.0-20181027195209-3887b7171c4f/go.mod h1:JbKx8HSWICu5zyqWOa0dVV1pbbXOHusrSzQUprW6g+w= -github.com/gobuffalo/genny v0.0.0-20181106193839-7dcb0924caf1/go.mod h1:x61yHxvbDCgQ/7cOAbJCacZQuHgB0KMSzoYcw5debjU= -github.com/gobuffalo/genny v0.0.0-20181107223128-f18346459dbe/go.mod h1:utQD3aKKEsdb03oR+Vi/6ztQb1j7pO10N3OBoowRcSU= -github.com/gobuffalo/genny v0.0.0-20181114215459-0a4decd77f5d/go.mod h1:kN2KZ8VgXF9VIIOj/GM0Eo7YK+un4Q3tTreKOf0q1ng= -github.com/gobuffalo/genny v0.0.0-20181119162812-e8ff4adce8bb/go.mod h1:BA9htSe4bZwBDJLe8CUkoqkypq3hn3+CkoHqVOW718E= -github.com/gobuffalo/genny v0.0.0-20181127225641-2d959acc795b/go.mod h1:l54xLXNkteX/PdZ+HlgPk1qtcrgeOr3XUBBPDbH+7CQ= -github.com/gobuffalo/genny v0.0.0-20181128191930-77e34f71ba2a/go.mod h1:FW/D9p7cEEOqxYA71/hnrkOWm62JZ5ZNxcNIVJEaWBU= -github.com/gobuffalo/genny v0.0.0-20181203165245-fda8bcce96b1/go.mod h1:wpNSANu9UErftfiaAlz1pDZclrYzLtO5lALifODyjuM= -github.com/gobuffalo/genny v0.0.0-20181203201232-849d2c9534ea/go.mod h1:wpNSANu9UErftfiaAlz1pDZclrYzLtO5lALifODyjuM= -github.com/gobuffalo/genny v0.0.0-20181206121324-d6fb8a0dbe36/go.mod h1:wpNSANu9UErftfiaAlz1pDZclrYzLtO5lALifODyjuM= -github.com/gobuffalo/genny v0.0.0-20181207164119-84844398a37d/go.mod h1:y0ysCHGGQf2T3vOhCrGHheYN54Y/REj0ayd0Suf4C/8= -github.com/gobuffalo/genny v0.0.0-20181211165820-e26c8466f14d/go.mod h1:sHnK+ZSU4e2feXP3PA29ouij6PUEiN+RCwECjCTB3yM= -github.com/gobuffalo/genny v0.0.0-20190104222617-a71664fc38e7/go.mod h1:QPsQ1FnhEsiU8f+O0qKWXz2RE4TiDqLVChWkBuh1WaY= -github.com/gobuffalo/genny v0.0.0-20190112155932-f31a84fcacf5/go.mod h1:CIaHCrSIuJ4il6ka3Hub4DR4adDrGoXGEEt2FbBxoIo= -github.com/gobuffalo/github_flavored_markdown v1.0.4/go.mod h1:uRowCdK+q8d/RF0Kt3/DSalaIXbb0De/dmTqMQdkQ4I= -github.com/gobuffalo/github_flavored_markdown v1.0.5/go.mod h1:U0643QShPF+OF2tJvYNiYDLDGDuQmJZXsf/bHOJPsMY= -github.com/gobuffalo/github_flavored_markdown v1.0.7/go.mod h1:w93Pd9Lz6LvyQXEG6DktTPHkOtCbr+arAD5mkwMzXLI= -github.com/gobuffalo/httptest v1.0.2/go.mod h1:7T1IbSrg60ankme0aDLVnEY0h056g9M1/ZvpVThtB7E= -github.com/gobuffalo/licenser v0.0.0-20180924033006-eae28e638a42/go.mod h1:Ubo90Np8gpsSZqNScZZkVXXAo5DGhTb+WYFIjlnog8w= -github.com/gobuffalo/licenser v0.0.0-20181025145548-437d89de4f75/go.mod h1:x3lEpYxkRG/XtGCUNkio+6RZ/dlOvLzTI9M1auIwFcw= -github.com/gobuffalo/licenser v0.0.0-20181027200154-58051a75da95/go.mod h1:BzhaaxGd1tq1+OLKObzgdCV9kqVhbTulxOpYbvMQWS0= -github.com/gobuffalo/licenser v0.0.0-20181109171355-91a2a7aac9a7/go.mod h1:m+Ygox92pi9bdg+gVaycvqE8RVSjZp7mWw75+K5NPHk= -github.com/gobuffalo/licenser v0.0.0-20181128165715-cc7305f8abed/go.mod h1:oU9F9UCE+AzI/MueCKZamsezGOOHfSirltllOVeRTAE= -github.com/gobuffalo/licenser v0.0.0-20181203160806-fe900bbede07/go.mod h1:ph6VDNvOzt1CdfaWC+9XwcBnlSTBz2j49PBwum6RFaU= -github.com/gobuffalo/licenser v0.0.0-20181211173111-f8a311c51159/go.mod h1:ve/Ue99DRuvnTaLq2zKa6F4KtHiYf7W046tDjuGYPfM= -github.com/gobuffalo/logger v0.0.0-20181022175615-46cfb361fc27/go.mod h1:8sQkgyhWipz1mIctHF4jTxmJh1Vxhp7mP8IqbljgJZo= -github.com/gobuffalo/logger v0.0.0-20181027144941-73d08d2bb969/go.mod h1:7uGg2duHKpWnN4+YmyKBdLXfhopkAdVM6H3nKbyFbz8= -github.com/gobuffalo/logger v0.0.0-20181027193913-9cf4dd0efe46/go.mod h1:7uGg2duHKpWnN4+YmyKBdLXfhopkAdVM6H3nKbyFbz8= -github.com/gobuffalo/logger v0.0.0-20181109185836-3feeab578c17/go.mod h1:oNErH0xLe+utO+OW8ptXMSA5DkiSEDW1u3zGIt8F9Ew= -github.com/gobuffalo/logger v0.0.0-20181117211126-8e9b89b7c264/go.mod h1:5etB91IE0uBlw9k756fVKZJdS+7M7ejVhmpXXiSFj0I= -github.com/gobuffalo/logger v0.0.0-20181127160119-5b956e21995c/go.mod h1:+HxKANrR9VGw9yN3aOAppJKvhO05ctDi63w4mDnKv2U= -github.com/gobuffalo/makr v1.1.5/go.mod h1:Y+o0btAH1kYAMDJW/TX3+oAXEu0bmSLLoC9mIFxtzOw= -github.com/gobuffalo/mapi v1.0.0/go.mod h1:4VAGh89y6rVOvm5A8fKFxYG+wIW6LO1FMTG9hnKStFc= -github.com/gobuffalo/mapi v1.0.1/go.mod h1:4VAGh89y6rVOvm5A8fKFxYG+wIW6LO1FMTG9hnKStFc= -github.com/gobuffalo/meta v0.0.0-20181018155829-df62557efcd3/go.mod h1:XTTOhwMNryif3x9LkTTBO/Llrveezd71u3quLd0u7CM= -github.com/gobuffalo/meta v0.0.0-20181018192820-8c6cef77dab3/go.mod h1:E94EPzx9NERGCY69UWlcj6Hipf2uK/vnfrF4QD0plVE= -github.com/gobuffalo/meta v0.0.0-20181025145500-3a985a084b0a/go.mod h1:YDAKBud2FP7NZdruCSlmTmDOZbVSa6bpK7LJ/A/nlKg= -github.com/gobuffalo/meta v0.0.0-20181114191255-b130ebedd2f7/go.mod h1:K6cRZ29ozr4Btvsqkjvg5nDFTLOgTqf03KA70Ks0ypE= -github.com/gobuffalo/meta v0.0.0-20181127070345-0d7e59dd540b/go.mod h1:RLO7tMvE0IAKAM8wny1aN12pvEKn7EtkBLkUZR00Qf8= -github.com/gobuffalo/mw-basicauth v1.0.3/go.mod h1:dg7+ilMZOKnQFHDefUzUHufNyTswVUviCBgF244C1+0= -github.com/gobuffalo/mw-contenttype v0.0.0-20180802152300-74f5a47f4d56/go.mod h1:7EvcmzBbeCvFtQm5GqF9ys6QnCxz2UM1x0moiWLq1No= -github.com/gobuffalo/mw-csrf v0.0.0-20180802151833-446ff26e108b/go.mod h1:sbGtb8DmDZuDUQoxjr8hG1ZbLtZboD9xsn6p77ppcHo= -github.com/gobuffalo/mw-forcessl v0.0.0-20180802152810-73921ae7a130/go.mod h1:JvNHRj7bYNAMUr/5XMkZaDcw3jZhUZpsmzhd//FFWmQ= -github.com/gobuffalo/mw-i18n v0.0.0-20180802152014-e3060b7e13d6/go.mod h1:91AQfukc52A6hdfIfkxzyr+kpVYDodgAeT5cjX1UIj4= -github.com/gobuffalo/mw-paramlogger v0.0.0-20181005191442-d6ee392ec72e/go.mod h1:6OJr6VwSzgJMqWMj7TYmRUqzNe2LXu/W1rRW4MAz/ME= -github.com/gobuffalo/mw-tokenauth v0.0.0-20181001105134-8545f626c189/go.mod h1:UqBF00IfKvd39ni5+yI5MLMjAf4gX7cDKN/26zDOD6c= -github.com/gobuffalo/packd v0.0.0-20181027182251-01ad393492c8/go.mod h1:SmdBdhj6uhOsg1Ui4SFAyrhuc7U4VCildosO5IDJ3lc= -github.com/gobuffalo/packd v0.0.0-20181027190505-aafc0d02c411/go.mod h1:SmdBdhj6uhOsg1Ui4SFAyrhuc7U4VCildosO5IDJ3lc= -github.com/gobuffalo/packd v0.0.0-20181027194105-7ae579e6d213/go.mod h1:SmdBdhj6uhOsg1Ui4SFAyrhuc7U4VCildosO5IDJ3lc= -github.com/gobuffalo/packd v0.0.0-20181031195726-c82734870264/go.mod h1:Yf2toFaISlyQrr5TfO3h6DB9pl9mZRmyvBGQb/aQ/pI= -github.com/gobuffalo/packd v0.0.0-20181104210303-d376b15f8e96/go.mod h1:Yf2toFaISlyQrr5TfO3h6DB9pl9mZRmyvBGQb/aQ/pI= -github.com/gobuffalo/packd v0.0.0-20181111195323-b2e760a5f0ff/go.mod h1:Yf2toFaISlyQrr5TfO3h6DB9pl9mZRmyvBGQb/aQ/pI= -github.com/gobuffalo/packd v0.0.0-20181114190715-f25c5d2471d7/go.mod h1:Yf2toFaISlyQrr5TfO3h6DB9pl9mZRmyvBGQb/aQ/pI= -github.com/gobuffalo/packd v0.0.0-20181124090624-311c6248e5fb/go.mod h1:Foenia9ZvITEvG05ab6XpiD5EfBHPL8A6hush8SJ0o8= -github.com/gobuffalo/packd v0.0.0-20181207120301-c49825f8f6f4/go.mod h1:LYc0TGKFBBFTRC9dg2pcRcMqGCTMD7T2BIMP7OBuQAA= -github.com/gobuffalo/packd v0.0.0-20181212173646-eca3b8fd6687/go.mod h1:LYc0TGKFBBFTRC9dg2pcRcMqGCTMD7T2BIMP7OBuQAA= -github.com/gobuffalo/packr v1.13.7/go.mod h1:KkinLIn/n6+3tVXMwg6KkNvWwVsrRAz4ph+jgpk3Z24= -github.com/gobuffalo/packr v1.15.0/go.mod h1:t5gXzEhIviQwVlNx/+3SfS07GS+cZ2hn76WLzPp6MGI= -github.com/gobuffalo/packr v1.15.1/go.mod h1:IeqicJ7jm8182yrVmNbM6PR4g79SjN9tZLH8KduZZwE= -github.com/gobuffalo/packr v1.19.0/go.mod h1:MstrNkfCQhd5o+Ct4IJ0skWlxN8emOq8DsoT1G98VIU= -github.com/gobuffalo/packr v1.20.0/go.mod h1:JDytk1t2gP+my1ig7iI4NcVaXr886+N0ecUga6884zw= -github.com/gobuffalo/packr v1.21.0/go.mod h1:H00jGfj1qFKxscFJSw8wcL4hpQtPe1PfU2wa6sg/SR0= -github.com/gobuffalo/packr/v2 v2.0.0-rc.8/go.mod h1:y60QCdzwuMwO2R49fdQhsjCPv7tLQFR0ayzxxla9zes= -github.com/gobuffalo/packr/v2 v2.0.0-rc.9/go.mod h1:fQqADRfZpEsgkc7c/K7aMew3n4aF1Kji7+lIZeR98Fc= -github.com/gobuffalo/packr/v2 v2.0.0-rc.10/go.mod h1:4CWWn4I5T3v4c1OsJ55HbHlUEKNWMITG5iIkdr4Px4w= -github.com/gobuffalo/packr/v2 v2.0.0-rc.11/go.mod h1:JoieH/3h3U4UmatmV93QmqyPUdf4wVM9HELaHEu+3fk= -github.com/gobuffalo/packr/v2 v2.0.0-rc.12/go.mod h1:FV1zZTsVFi1DSCboO36Xgs4pzCZBjB/tDV9Cz/lSaR8= -github.com/gobuffalo/packr/v2 v2.0.0-rc.13/go.mod h1:2Mp7GhBFMdJlOK8vGfl7SYtfMP3+5roE39ejlfjw0rA= -github.com/gobuffalo/packr/v2 v2.0.0-rc.14/go.mod h1:06otbrNvDKO1eNQ3b8hst+1010UooI2MFg+B2Ze4MV8= -github.com/gobuffalo/plush v3.7.16+incompatible/go.mod h1:rQ4zdtUUyZNqULlc6bqd5scsPfLKfT0+TGMChgduDvI= -github.com/gobuffalo/plush v3.7.20+incompatible/go.mod h1:rQ4zdtUUyZNqULlc6bqd5scsPfLKfT0+TGMChgduDvI= -github.com/gobuffalo/plush v3.7.21+incompatible/go.mod h1:rQ4zdtUUyZNqULlc6bqd5scsPfLKfT0+TGMChgduDvI= -github.com/gobuffalo/plush v3.7.22+incompatible/go.mod h1:rQ4zdtUUyZNqULlc6bqd5scsPfLKfT0+TGMChgduDvI= -github.com/gobuffalo/plush v3.7.23+incompatible/go.mod h1:rQ4zdtUUyZNqULlc6bqd5scsPfLKfT0+TGMChgduDvI= -github.com/gobuffalo/plush v3.7.30+incompatible/go.mod h1:rQ4zdtUUyZNqULlc6bqd5scsPfLKfT0+TGMChgduDvI= -github.com/gobuffalo/plush v3.7.31+incompatible/go.mod h1:rQ4zdtUUyZNqULlc6bqd5scsPfLKfT0+TGMChgduDvI= -github.com/gobuffalo/plush v3.7.32+incompatible/go.mod h1:rQ4zdtUUyZNqULlc6bqd5scsPfLKfT0+TGMChgduDvI= -github.com/gobuffalo/plushgen v0.0.0-20181128164830-d29dcb966cb2/go.mod h1:r9QwptTFnuvSaSRjpSp4S2/4e2D3tJhARYbvEBcKSb4= -github.com/gobuffalo/plushgen v0.0.0-20181203163832-9fc4964505c2/go.mod h1:opEdT33AA2HdrIwK1aibqnTJDVVKXC02Bar/GT1YRVs= -github.com/gobuffalo/plushgen v0.0.0-20181207152837-eedb135bd51b/go.mod h1:Lcw7HQbEVm09sAQrCLzIxuhFbB3nAgp4c55E+UlynR0= -github.com/gobuffalo/plushgen v0.0.0-20190104222512-177cd2b872b3/go.mod h1:tYxCozi8X62bpZyKXYHw1ncx2ZtT2nFvG42kuLwYjoc= -github.com/gobuffalo/pop v4.8.2+incompatible/go.mod h1:DwBz3SD5SsHpTZiTubcsFWcVDpJWGsxjVjMPnkiThWg= -github.com/gobuffalo/pop v4.8.3+incompatible/go.mod h1:DwBz3SD5SsHpTZiTubcsFWcVDpJWGsxjVjMPnkiThWg= -github.com/gobuffalo/pop v4.8.4+incompatible/go.mod h1:DwBz3SD5SsHpTZiTubcsFWcVDpJWGsxjVjMPnkiThWg= -github.com/gobuffalo/release v1.0.35/go.mod h1:VtHFAKs61vO3wboCec5xr9JPTjYyWYcvaM3lclkc4x4= -github.com/gobuffalo/release v1.0.38/go.mod h1:VtHFAKs61vO3wboCec5xr9JPTjYyWYcvaM3lclkc4x4= -github.com/gobuffalo/release v1.0.42/go.mod h1:RPs7EtafH4oylgetOJpGP0yCZZUiO4vqHfTHJjSdpug= -github.com/gobuffalo/release v1.0.52/go.mod h1:RPs7EtafH4oylgetOJpGP0yCZZUiO4vqHfTHJjSdpug= -github.com/gobuffalo/release v1.0.53/go.mod h1:FdF257nd8rqhNaqtDWFGhxdJ/Ig4J7VcS3KL7n/a+aA= -github.com/gobuffalo/release v1.0.54/go.mod h1:Pe5/RxRa/BE8whDpGfRqSI7D1a0evGK1T4JDm339tJc= -github.com/gobuffalo/release v1.0.61/go.mod h1:mfIO38ujUNVDlBziIYqXquYfBF+8FDHUjKZgYC1Hj24= -github.com/gobuffalo/release v1.0.72/go.mod h1:NP5NXgg/IX3M5XmHmWR99D687/3Dt9qZtTK/Lbwc1hU= -github.com/gobuffalo/release v1.1.1/go.mod h1:Sluak1Xd6kcp6snkluR1jeXAogdJZpFFRzTYRs/2uwg= -github.com/gobuffalo/release v1.1.3/go.mod h1:CuXc5/m+4zuq8idoDt1l4va0AXAn/OSs08uHOfMVr8E= -github.com/gobuffalo/release v1.1.6/go.mod h1:18naWa3kBsqO0cItXZNJuefCKOENpbbUIqRL1g+p6z0= -github.com/gobuffalo/shoulders v1.0.1/go.mod h1:V33CcVmaQ4gRUmHKwq1fiTXuf8Gp/qjQBUL5tHPmvbA= -github.com/gobuffalo/syncx v0.0.0-20181120191700-98333ab04150/go.mod h1:HhnNqWY95UYwwW3uSASeV7vtgYkT2t16hJgV3AEPUpw= -github.com/gobuffalo/syncx v0.0.0-20181120194010-558ac7de985f/go.mod h1:HhnNqWY95UYwwW3uSASeV7vtgYkT2t16hJgV3AEPUpw= -github.com/gobuffalo/tags v2.0.11+incompatible/go.mod h1:9XmhOkyaB7UzvuY4UoZO4s67q8/xRMVJEaakauVQYeY= -github.com/gobuffalo/tags v2.0.14+incompatible/go.mod h1:9XmhOkyaB7UzvuY4UoZO4s67q8/xRMVJEaakauVQYeY= -github.com/gobuffalo/tags v2.0.15+incompatible/go.mod h1:9XmhOkyaB7UzvuY4UoZO4s67q8/xRMVJEaakauVQYeY= -github.com/gobuffalo/uuid v2.0.3+incompatible/go.mod h1:ErhIzkRhm0FtRuiE/PeORqcw4cVi1RtSpnwYrxuvkfE= -github.com/gobuffalo/uuid v2.0.4+incompatible/go.mod h1:ErhIzkRhm0FtRuiE/PeORqcw4cVi1RtSpnwYrxuvkfE= -github.com/gobuffalo/uuid v2.0.5+incompatible/go.mod h1:ErhIzkRhm0FtRuiE/PeORqcw4cVi1RtSpnwYrxuvkfE= -github.com/gobuffalo/validate v2.0.3+incompatible/go.mod h1:N+EtDe0J8252BgfzQUChBgfd6L93m9weay53EWFVsMM= -github.com/gobuffalo/x v0.0.0-20181003152136-452098b06085/go.mod h1:WevpGD+5YOreDJznWevcn8NTmQEW5STSBgIkpkjzqXc= -github.com/gobuffalo/x v0.0.0-20181007152206-913e47c59ca7/go.mod h1:9rDPXaB3kXdKWzMc4odGQQdG2e2DIEmANy5aSJ9yesY= -github.com/gofrs/uuid v3.1.0+incompatible/go.mod h1:b2aQJv3Z4Fp6yNu3cdSllBxTCLRxnplIgP/c0N/04lM= +github.com/gobuffalo/flect v0.1.5 h1:xpKq9ap8MbYfhuPCF0dBH854Gp9CxZjr/IocxELFflo= +github.com/gobuffalo/flect v0.1.5/go.mod h1:W3K3X9ksuZfir8f/LrfVtWmCDQFfayuylOJ7sz/Fj80= github.com/gogo/protobuf v1.1.1/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ= -github.com/gogo/protobuf v1.2.0 h1:xU6/SpYbvkNYiptHJYEDRseDLvYE7wSqhYYNy0QSUzI= github.com/gogo/protobuf v1.2.0/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ= +github.com/gogo/protobuf v1.2.1 h1:/s5zKNz0uPFCZ5hddgPdo2TK2TVrUNMn0OOX8/aZMTE= +github.com/gogo/protobuf v1.2.1/go.mod h1:hp+jE20tsWTFYpLwKvXlhS1hjn+gTNwPg2I6zVXpSg4= github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b h1:VKtxabqXZkF25pY9ekfRL6a582T4P37/31XEstQ5p58= github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q= -github.com/golang/groupcache v0.0.0-20181024230925-c65c006176ff h1:kOkM9whyQYodu09SJ6W3NCsHG7crFaJILQ22Gozp3lg= -github.com/golang/groupcache v0.0.0-20181024230925-c65c006176ff/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= -github.com/golang/lint v0.0.0-20180702182130-06c8688daad7/go.mod h1:tluoj9z5200jBnyusfRPU2LqT6J+DAorxEvtC7LHB+E= +github.com/golang/groupcache v0.0.0-20180513044358-24b0969c4cb7 h1:u4bArs140e9+AfE52mFHOXVFnOSBJBRlzTHrOPLOIhE= +github.com/golang/groupcache v0.0.0-20180513044358-24b0969c4cb7/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= -github.com/golang/mock v1.2.0/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= -github.com/golang/protobuf v1.1.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= -github.com/golang/protobuf v1.2.0 h1:P3YflyNX/ehuJFLhxviNdFxQPkGK5cDcApsge1SqnvM= github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= -github.com/google/btree v0.0.0-20180813153112-4030bb1f1f0c/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= -github.com/google/btree v1.0.0 h1:0udJVsspx3VBr5FwtLhQQtuAsVc79tTq0ocGIPAU6qo= -github.com/google/btree v1.0.0/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= -github.com/google/go-cmp v0.2.0 h1:+dTQ8DZQJz0Mb/HjFlkptS1FeQ4cWSnN941F8aEG4SQ= +github.com/golang/protobuf v1.3.1 h1:YF8+flBXS5eO826T4nzqPrxfhQThhXl0YzfuUPu4SBg= +github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= +github.com/golang/snappy v0.0.0-20180518054509-2e65f85255db/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M= github.com/google/go-cmp v0.3.0 h1:crn/baboCvb5fXaQ0IJ1SGTsTVrWpDsCWC8EGETZijY= github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= -github.com/google/go-github v17.0.0+incompatible/go.mod h1:zLgOLi98H3fifZn+44m+umXrS52loVEgC2AApnigrVQ= -github.com/google/go-querystring v1.0.0/go.mod h1:odCYkC5MyYFN7vkCjXpyrEuKhc/BUO6wN/zVPAxq5ck= -github.com/google/gofuzz v1.0.0 h1:A8PeW59pxE9IoFRqBp37U+mSNaQoZ46F1f0f863XSXw= -github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= -github.com/google/martian v2.1.0+incompatible/go.mod h1:9I4somxYTbIHy5NJKHRl3wXiIaQGbYVAs8BPL6v8lEs= -github.com/google/pprof v0.0.0-20181206194817-3ea8567a2e57/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc= -github.com/google/uuid v1.0.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= -github.com/google/uuid v1.1.1 h1:Gkbcsh/GbpXz7lPftLA3P6TYMwjCLYm83jiFQZF/3gY= -github.com/google/uuid v1.1.1/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= -github.com/googleapis/gax-go v2.0.0+incompatible/go.mod h1:SFVmujtThgffbyetf+mdk2eWhX2bMyUtNHzFKcPA9HY= -github.com/googleapis/gax-go/v2 v2.0.3/go.mod h1:LLvjysVCY1JZeum8Z6l8qUty8fiNwE08qbEPm1M08qg= +github.com/google/gofuzz v0.0.0-20170612174753-24818f796faf h1:+RRA9JqSOZFfKrOeqr2z77+8R2RKyh8PG66dcu1V0ck= +github.com/google/gofuzz v0.0.0-20170612174753-24818f796faf/go.mod h1:HP5RmnzzSNb993RKQDq4+1A4ia9nllfqcQFTQJedwGI= github.com/googleapis/gnostic v0.2.0 h1:l6N3VoaVzTncYYW+9yOz2LJJammFZGBO13sqgEhpy9g= github.com/googleapis/gnostic v0.2.0/go.mod h1:sJBsCZ4ayReDTBIg8b9dl28c5xFWyhBTVRp3pOg5EKY= -github.com/gophercloud/gophercloud v0.0.0-20190221164956-3f3cc5a566b2 h1:UVNu/91ffTOzayxrORCp4UVGrYH05xTq8DuNR7ILW1Q= -github.com/gophercloud/gophercloud v0.0.0-20190221164956-3f3cc5a566b2/go.mod h1:vxM41WHh5uqHVBMZHzuwNOHh8XEoIEcSTewFxm1c5g8= -github.com/gopherjs/gopherjs v0.0.0-20181017120253-0766667cb4d1/go.mod h1:wJfORRmW1u3UXTncJ5qlYoELFm8eSnnEO6hX4iZ3EWY= +github.com/gophercloud/gophercloud v0.2.0 h1:lD2Bce2xBAMNNcFZ0dObTpXkGLlVIb33RPVUNVpw6ic= +github.com/gophercloud/gophercloud v0.2.0/go.mod h1:vxM41WHh5uqHVBMZHzuwNOHh8XEoIEcSTewFxm1c5g8= github.com/gorilla/context v1.1.1/go.mod h1:kBGZzfjB9CEq2AlWe17Uuf7NDRt0dE0s8S51q0aT7Yg= github.com/gorilla/mux v1.6.2/go.mod h1:1lud6UwP+6orDFRuTfBEV8e9/aOM/c4fVVCaMa2zaAs= -github.com/gorilla/pat v0.0.0-20180118222023-199c85a7f6d1/go.mod h1:YeAe0gNeiNT5hoiZRI4yiOky6jVdNvfO2N6Kav/HmxY= -github.com/gorilla/securecookie v1.1.1/go.mod h1:ra0sb63/xPlUeL+yeDciTfxMRAA+MP+HVt/4epWDjd4= -github.com/gorilla/sessions v1.1.2/go.mod h1:8KCfur6+4Mqcc6S0FEfKuN15Vl5MgXW92AE8ovaJD0w= -github.com/gorilla/sessions v1.1.3/go.mod h1:8KCfur6+4Mqcc6S0FEfKuN15Vl5MgXW92AE8ovaJD0w= -github.com/gregjones/httpcache v0.0.0-20180305231024-9cad4c3443a7/go.mod h1:FecbI9+v66THATjSRHfNgh1IVFe/9kFxbXtjV0ctIMA= -github.com/gregjones/httpcache v0.0.0-20181110185634-c63ab54fda8f h1:ShTPMJQes6tubcjzGMODIVG5hlrCeImaBnZzKF2N8SM= -github.com/gregjones/httpcache v0.0.0-20181110185634-c63ab54fda8f/go.mod h1:FecbI9+v66THATjSRHfNgh1IVFe/9kFxbXtjV0ctIMA= -github.com/grpc-ecosystem/grpc-gateway v1.5.0/go.mod h1:RSKVYQBd5MCa4OVpNdGskqpgL2+G+NZTnrVHpWWfpdw= +github.com/grpc-ecosystem/grpc-gateway v1.8.5 h1:2+KSC78XiO6Qy0hIjfc1OD9H+hsaJdJlb8Kqsd41CTE= +github.com/grpc-ecosystem/grpc-gateway v1.8.5/go.mod h1:vNeuVxBJEsws4ogUvrchl83t/GYV9WGTSLVdBhOQFDY= +github.com/hashicorp/golang-lru v0.0.0-20180201235237-0fb14efe8c47/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= github.com/hashicorp/golang-lru v0.5.0 h1:CL2msUPvZTLb5O648aiLNJw3hnBxN2+1Jq8rCOH9wdo= github.com/hashicorp/golang-lru v0.5.0/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= -github.com/hashicorp/hcl v1.0.0/go.mod h1:E5yfLk+7swimpb2L/Alb/PJmXilQ/rhwaUYs4T20WEQ= github.com/hpcloud/tail v1.0.0 h1:nfCOvKYfkgYP8hkirhJocXT2+zOD8yUNjXaWfTlyFKI= github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU= -github.com/imdario/mergo v0.3.7 h1:Y+UAYTZ7gDEuOfhxKWy+dvb5dRQ6rJjFSdX2HZY1/gI= -github.com/imdario/mergo v0.3.7/go.mod h1:2EnlNZ0deacrJVfApfmtdGgDfMuh/nq6Ok1EcJh5FfA= +github.com/imdario/mergo v0.3.6 h1:xTNEAn+kxVO7dTZGu0CegyqKZmoWFI0rF8UxjlB2d28= +github.com/imdario/mergo v0.3.6/go.mod h1:2EnlNZ0deacrJVfApfmtdGgDfMuh/nq6Ok1EcJh5FfA= github.com/inconshreveable/mousetrap v1.0.0 h1:Z8tu5sraLXCXIcARxBp/8cbvlwVa7Z1NHg9XEKhtSvM= github.com/inconshreveable/mousetrap v1.0.0/go.mod h1:PxqpIevigyE2G7u3NXJIT2ANytuPF1OarO4DADm73n8= -github.com/jackc/fake v0.0.0-20150926172116-812a484cc733/go.mod h1:WrMFNQdiFJ80sQsxDoMokWK1W5TQtxBFNpzWTD84ibQ= -github.com/jackc/pgx v3.2.0+incompatible/go.mod h1:0ZGrqGqkRlliWnWB4zKnWtjbSWbGkVEFm4TeybAXq+I= -github.com/jellevandenhooff/dkim v0.0.0-20150330215556-f50fe3d243e1/go.mod h1:E0B/fFc00Y+Rasa88328GlI/XbtyysCtTHZS8h7IrBU= -github.com/jmoiron/sqlx v0.0.0-20180614180643-0dae4fefe7c0/go.mod h1:IiEW3SEiiErVyFdH8NTuWjSifiEQKUoyK3LNqr2kCHU= -github.com/joho/godotenv v1.2.0/go.mod h1:7hK45KPybAkOC6peb+G5yklZfMxEjkZhHbwpqxOKXbg= -github.com/joho/godotenv v1.3.0 h1:Zjp+RcGpHhGlrMbJzXTrZZPrWj+1vfm90La1wgB6Bhc= -github.com/joho/godotenv v1.3.0/go.mod h1:7hK45KPybAkOC6peb+G5yklZfMxEjkZhHbwpqxOKXbg= +github.com/json-iterator/go v1.1.5/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU= github.com/json-iterator/go v1.1.6 h1:MrUvLMLTMxbqFJ9kzlvat/rYZqZnW3u4wkLzWTaFwKs= github.com/json-iterator/go v1.1.6/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU= github.com/julienschmidt/httprouter v1.2.0/go.mod h1:SYymIcj16QtmaHHD7aYtjjsJG7VTCxuUUipMqKk8s4w= -github.com/karrick/godirwalk v1.7.5/go.mod h1:2c9FRhkDxdIbgkOnCEvnSWs71Bhugbl46shStcFDJ34= -github.com/karrick/godirwalk v1.7.7/go.mod h1:2c9FRhkDxdIbgkOnCEvnSWs71Bhugbl46shStcFDJ34= -github.com/karrick/godirwalk v1.7.8/go.mod h1:2c9FRhkDxdIbgkOnCEvnSWs71Bhugbl46shStcFDJ34= -github.com/kballard/go-shellquote v0.0.0-20180428030007-95032a82bc51/go.mod h1:CzGEWj7cYgsdH8dAjBGEr58BoE7ScuLd+fwFZ44+/x8= +github.com/kisielk/errcheck v1.1.0/go.mod h1:EZBBE59ingxPouuu3KfxchcWSUPOHkagtvWXihfKN4Q= github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= -github.com/konsorten/go-windows-terminal-sequences v0.0.0-20180402223658-b729f2633dfe/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= github.com/kr/logfmt v0.0.0-20140226030751-b84e30acd515/go.mod h1:+0opPa2QZZtGFBFZlji/RkVcI2GknAs/DXo4wKdlNEc= github.com/kr/pretty v0.1.0 h1:L/CwN0zerZDmRFUapSPitk6f+Q3+0za1rQkzVuMiMFI= github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= -github.com/kr/pty v1.1.3/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= github.com/kr/text v0.1.0 h1:45sCR5RtlFHMR4UwH9sdQ5TC8v0qDQCHnXt+kaKSTVE= github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= -github.com/lib/pq v1.0.0/go.mod h1:5WUZQaWbwv1U+lTReE5YruASi9Al49XbQIvNi/34Woo= -github.com/magiconair/properties v1.8.0/go.mod h1:PppfXfuXeibc/6YijjN8zIbojt8czPbwD3XqdrwzmxQ= -github.com/markbates/deplist v1.0.4/go.mod h1:gRRbPbbuA8TmMiRvaOzUlRfzfjeCCBqX2A6arxN01MM= -github.com/markbates/deplist v1.0.5/go.mod h1:gRRbPbbuA8TmMiRvaOzUlRfzfjeCCBqX2A6arxN01MM= -github.com/markbates/going v1.0.2/go.mod h1:UWCk3zm0UKefHZ7l8BNqi26UyiEMniznk8naLdTcy6c= -github.com/markbates/grift v1.0.4/go.mod h1:wbmtW74veyx+cgfwFhlnnMWqhoz55rnHR47oMXzsyVs= -github.com/markbates/hmax v1.0.0/go.mod h1:cOkR9dktiESxIMu+65oc/r/bdY4bE8zZw3OLhLx0X2c= -github.com/markbates/inflect v1.0.0/go.mod h1:oTeZL2KHA7CUX6X+fovmK9OvIOFuqu0TwdQrZjLTh88= -github.com/markbates/inflect v1.0.1/go.mod h1:uv3UVNBe5qBIfCm8O8Q+DW+S1EopeyINj+Ikhc7rnCk= -github.com/markbates/inflect v1.0.3/go.mod h1:1fR9+pO2KHEO9ZRtto13gDwwZaAKstQzferVeWqbgNs= -github.com/markbates/inflect v1.0.4 h1:5fh1gzTFhfae06u3hzHYO9xe3l3v3nW5Pwt3naLTP5g= -github.com/markbates/inflect v1.0.4/go.mod h1:1fR9+pO2KHEO9ZRtto13gDwwZaAKstQzferVeWqbgNs= -github.com/markbates/oncer v0.0.0-20180924031910-e862a676800b/go.mod h1:Ld9puTsIW75CHf65OeIOkyKbteujpZVXDpWK6YGZbxE= -github.com/markbates/oncer v0.0.0-20180924034138-723ad0170a46/go.mod h1:Ld9puTsIW75CHf65OeIOkyKbteujpZVXDpWK6YGZbxE= -github.com/markbates/oncer v0.0.0-20181014194634-05fccaae8fc4/go.mod h1:Ld9puTsIW75CHf65OeIOkyKbteujpZVXDpWK6YGZbxE= -github.com/markbates/oncer v0.0.0-20181203154359-bf2de49a0be2/go.mod h1:Ld9puTsIW75CHf65OeIOkyKbteujpZVXDpWK6YGZbxE= -github.com/markbates/refresh v1.4.10/go.mod h1:NDPHvotuZmTmesXxr95C9bjlw1/0frJwtME2dzcVKhc= -github.com/markbates/safe v1.0.0/go.mod h1:nAqgmRi7cY2nqMc92/bSEeQA+R4OheNU2T1kNSCBdG0= -github.com/markbates/safe v1.0.1/go.mod h1:nAqgmRi7cY2nqMc92/bSEeQA+R4OheNU2T1kNSCBdG0= -github.com/markbates/sigtx v1.0.0/go.mod h1:QF1Hv6Ic6Ca6W+T+DL0Y/ypborFKyvUY9HmuCD4VeTc= -github.com/markbates/willie v1.0.9/go.mod h1:fsrFVWl91+gXpx/6dv715j7i11fYPfZ9ZGfH0DQzY7w= -github.com/mattn/go-colorable v0.0.9/go.mod h1:9vuHe8Xs5qXnSaW/c/ABM9alt+Vo+STaOChaDxuIBZU= -github.com/mattn/go-isatty v0.0.4/go.mod h1:M+lRXTBqGeGNdLjl/ufCoiOlB5xdOkqRJdNxMWT7Zi4= -github.com/mattn/go-sqlite3 v1.9.0/go.mod h1:FPy6KqzDD04eiIsT53CuJW3U88zkxoIYsOqkbpncsNc= +github.com/mattn/go-colorable v0.1.2 h1:/bC9yWikZXAL9uJdulbSfyVNIR3n3trXl+v8+1sx8mU= +github.com/mattn/go-colorable v0.1.2/go.mod h1:U0ppj6V5qS13XJ6of8GYAs25YV2eR4EVcfRqFIhoBtE= +github.com/mattn/go-isatty v0.0.8 h1:HLtExJ+uU2HOZ+wI0Tt5DtUDrx8yhUqDcp7fYERX4CE= +github.com/mattn/go-isatty v0.0.8/go.mod h1:Iq45c/XA43vh69/j3iqttzPXn0bhXyGjM0Hdxcsrc5s= github.com/matttproud/golang_protobuf_extensions v1.0.1 h1:4hp9jkHxhMHkqkrB3Ix0jegS5sx/RkqARlsWZ6pIwiU= github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0= -github.com/microcosm-cc/bluemonday v1.0.1/go.mod h1:hsXNsILzKxV+sX77C5b8FSuKF00vh2OMYv+xgHpAMF4= -github.com/microcosm-cc/bluemonday v1.0.2/go.mod h1:iVP4YcDBq+n/5fb23BhYFvIMq/leAFZyRl6bYmGDlGc= -github.com/mitchellh/go-homedir v1.0.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0= -github.com/mitchellh/mapstructure v1.0.0/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y= -github.com/mitchellh/mapstructure v1.1.2/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y= github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd h1:TRLaZ9cD/w8PVh93nsPXa1VrQ6jlwL5oN8l14QlcNfg= github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= +github.com/modern-go/reflect2 v0.0.0-20180701023420-4b7aa43c6742/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0= github.com/modern-go/reflect2 v1.0.1 h1:9f412s+6RmYXLWZSEzVVgPGK7C2PphHj5RJrvfx9AWI= github.com/modern-go/reflect2 v1.0.1/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0= -github.com/monoculum/formam v0.0.0-20180901015400-4e68be1d79ba/go.mod h1:RKgILGEJq24YyJ2ban8EO0RUVSJlF1pGsEvoLEACr/Q= github.com/mwitkow/go-conntrack v0.0.0-20161129095857-cc309e4a2223/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U= -github.com/neelance/astrewrite v0.0.0-20160511093645-99348263ae86/go.mod h1:kHJEU3ofeGjhHklVoIGuVj85JJwZ6kWPaJwCIxgnFmo= -github.com/neelance/sourcemap v0.0.0-20151028013722-8c68805598ab/go.mod h1:Qr6/a/Q4r9LP1IltGz7tA7iOK1WonHEYhu1HRBA7ZiM= -github.com/nicksnyder/go-i18n v1.10.0/go.mod h1:HrK7VCrbOvQoUAQ7Vpy7i87N7JZZZ7R2xBGjv0j365Q= github.com/onsi/ginkgo v1.6.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= github.com/onsi/ginkgo v1.7.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= github.com/onsi/ginkgo v1.8.0 h1:VkHVNpR4iVnU8XQR6DBm8BqYjN7CRzw+xKUbVVbbW9w= github.com/onsi/ginkgo v1.8.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= -github.com/onsi/gomega v1.4.1/go.mod h1:C1qb7wdrVGGVU+Z6iS04AVkA3Q65CEZX59MT0QO5uiA= github.com/onsi/gomega v1.4.2/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY= github.com/onsi/gomega v1.4.3/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY= github.com/onsi/gomega v1.5.0 h1:izbySO9zDPmjJ8rDjLvkA2zJHIo+HkYXHnf7eN7SSyo= github.com/onsi/gomega v1.5.0/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY= -github.com/openzipkin/zipkin-go v0.1.1/go.mod h1:NtoC/o8u3JlF1lSlyPNswIbeQH9bJTmOf0Erfk+hxe8= -github.com/pborman/uuid v1.2.0 h1:J7Q5mO4ysT1dv8hyrUGHb9+ooztCXu1D8MY8DZYsu3g= -github.com/pborman/uuid v1.2.0/go.mod h1:X/NO0urCmaxf9VXbdlT7C2Yzkj2IKimNn4k+gtPdI/k= -github.com/pelletier/go-toml v1.2.0/go.mod h1:5z9KED0ma1S8pY6P1sdut58dfprrGBbd/94hg7ilaic= -github.com/peterbourgon/diskv v2.0.1+incompatible h1:UBdAOUP5p4RWqPBg048CAvpKN+vxiaj6gdUUzhl4XmI= -github.com/peterbourgon/diskv v2.0.1+incompatible/go.mod h1:uqqh8zWWbv1HBMNONnaR/tNboyR3/BZd58JJSHlUSCU= +github.com/openzipkin/zipkin-go v0.1.6/go.mod h1:QgAqvLzwWbR/WpD4A3cGpPtJrZXNIiJc5AZX7/PBEpw= +github.com/pborman/uuid v0.0.0-20170612153648-e790cca94e6c h1:MUyE44mTvnI5A0xrxIxaMqoWFzPfQvtE2IWUollMDMs= +github.com/pborman/uuid v0.0.0-20170612153648-e790cca94e6c/go.mod h1:VyrYX9gd7irzKovcSS6BIIEwPRkP2Wm2m9ufcdFSJ34= +github.com/pierrec/lz4 v2.0.5+incompatible/go.mod h1:pdkljMzZIN41W+lC3N2tnIh5sFi+IEE17M5jbnwPHcY= github.com/pkg/errors v0.8.0/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= github.com/pkg/errors v0.8.1 h1:iURUrRGxPUNPdy5/HRSm+Yj6okJ6UtLINN0Q9M4+h3I= github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= -github.com/prometheus/client_golang v0.8.0/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw= +github.com/prometheus/client_golang v0.9.0/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw= github.com/prometheus/client_golang v0.9.1/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw= -github.com/prometheus/client_golang v0.9.2 h1:awm861/B8OKDd2I/6o1dy3ra4BamzKhYOiGItCeZ740= -github.com/prometheus/client_golang v0.9.2/go.mod h1:OsXs2jCmiKlQ1lTBmv21f2mNfw4xf/QclQDMrYNZzcM= +github.com/prometheus/client_golang v0.9.3-0.20190127221311-3c4408c8b829 h1:D+CiwcpGTW6pL6bv6KI3KbyEyCKyS+1JWS2h8PNDnGA= +github.com/prometheus/client_golang v0.9.3-0.20190127221311-3c4408c8b829/go.mod h1:p2iRAGwDERtqlqzRXnrOVns+ignqQo//hLXqYxZYVNs= github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo= github.com/prometheus/client_model v0.0.0-20190115171406-56726106282f h1:BVwpUVJDADN2ufcGik7W992pyps0wZ888b/y9GXcLTU= github.com/prometheus/client_model v0.0.0-20190115171406-56726106282f/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo= github.com/prometheus/common v0.0.0-20180801064454-c7de2306084e/go.mod h1:daVV7qP5qjZbuso7PdcryaAu0sAZbrN9i7WWcTMWvro= -github.com/prometheus/common v0.0.0-20181126121408-4724e9255275/go.mod h1:daVV7qP5qjZbuso7PdcryaAu0sAZbrN9i7WWcTMWvro= -github.com/prometheus/common v0.1.0 h1:IxU7wGikQPAcoOd3/f4Ol7+vIKS1Sgu08tzjktR4nJE= -github.com/prometheus/common v0.1.0/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4= +github.com/prometheus/common v0.2.0 h1:kUZDBDTdBVBYBj5Tmh2NZLlF60mfjA27rM34b+cVwNU= +github.com/prometheus/common v0.2.0/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4= github.com/prometheus/procfs v0.0.0-20180725123919-05ee40e3a273/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= -github.com/prometheus/procfs v0.0.0-20181204211112-1dc9a6cbc91a/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= github.com/prometheus/procfs v0.0.0-20190117184657-bf6a532e95b1 h1:/K3IL0Z1quvmJ7X0A1AwNEK7CRkVK3YwfOU/QAL4WGg= github.com/prometheus/procfs v0.0.0-20190117184657-bf6a532e95b1/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= -github.com/rogpeppe/go-internal v1.0.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4= -github.com/rogpeppe/go-internal v1.1.0 h1:g0fH8RicVgNl+zVZDCDfbdWxAWoAEJyI7I3TZYXFiig= -github.com/rogpeppe/go-internal v1.1.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4= -github.com/russross/blackfriday v1.5.2/go.mod h1:JO/DiYxRf+HjHt06OyowR9PTA263kcR/rfWxYHBV53g= -github.com/satori/go.uuid v1.2.0/go.mod h1:dA0hQrYB0VpLJoorglMZABFdXlWrHn1NEOzdhQKdks0= -github.com/serenize/snaker v0.0.0-20171204205717-a683aaf2d516/go.mod h1:Yow6lPLSAXx2ifx470yD/nUe22Dv5vBvxK/UK9UUTVs= +github.com/rcrowley/go-metrics v0.0.0-20181016184325-3113b8401b8a/go.mod h1:bCqnVzQkZxMG4s8nGwiZ5l3QUCyqpo9Y+/ZMZ9VjZe4= +github.com/rogpeppe/fastuuid v0.0.0-20150106093220-6724a57986af/go.mod h1:XWv6SoW27p1b0cqNHllgS5HIMJraePCO15w5zCzIWYg= github.com/sergi/go-diff v1.0.0 h1:Kpca3qRNrduNnOQeazBd0ysaKrUJiIuISHxogkT9RPQ= github.com/sergi/go-diff v1.0.0/go.mod h1:0CfEIISq7TuYL3j771MWULgwwjU+GofnZX9QAmXWZgo= -github.com/shopspring/decimal v0.0.0-20180709203117-cd690d0c9e24/go.mod h1:M+9NzErvs504Cn4c5DxATwIqPbtswREoFCre64PpcG4= -github.com/shurcooL/component v0.0.0-20170202220835-f88ec8f54cc4/go.mod h1:XhFIlyj5a1fBNx5aJTbKoIq0mNaPvOagO+HjB3EtxrY= -github.com/shurcooL/events v0.0.0-20181021180414-410e4ca65f48/go.mod h1:5u70Mqkb5O5cxEA8nxTsgrgLehJeAw6Oc4Ab1c/P1HM= -github.com/shurcooL/github_flavored_markdown v0.0.0-20181002035957-2122de532470/go.mod h1:2dOwnU2uBioM+SGy2aZoq1f/Sd1l9OkAeAUvjSyvgU0= -github.com/shurcooL/go v0.0.0-20180423040247-9e1955d9fb6e/go.mod h1:TDJrrUr11Vxrven61rcy3hJMUqaf/CLWYhHNPmT14Lk= -github.com/shurcooL/go-goon v0.0.0-20170922171312-37c2f522c041/go.mod h1:N5mDOmsrJOB+vfqUK+7DmDyjhSLIIBnXo9lvZJj3MWQ= -github.com/shurcooL/gofontwoff v0.0.0-20180329035133-29b52fc0a18d/go.mod h1:05UtEgK5zq39gLST6uB0cf3NEHjETfB4Fgr3Gx5R9Vw= -github.com/shurcooL/gopherjslib v0.0.0-20160914041154-feb6d3990c2c/go.mod h1:8d3azKNyqcHP1GaQE/c6dDgjkgSx2BZ4IoEi4F1reUI= -github.com/shurcooL/highlight_diff v0.0.0-20170515013008-09bb4053de1b/go.mod h1:ZpfEhSmds4ytuByIcDnOLkTHGUI6KNqRNPDLHDk+mUU= -github.com/shurcooL/highlight_go v0.0.0-20170515013102-78fb10f4a5f8/go.mod h1:UDKB5a1T23gOMUJrI+uSuH0VRDStOiUVSjBTRDVBVag= -github.com/shurcooL/highlight_go v0.0.0-20181028180052-98c3abbbae20/go.mod h1:UDKB5a1T23gOMUJrI+uSuH0VRDStOiUVSjBTRDVBVag= -github.com/shurcooL/home v0.0.0-20181020052607-80b7ffcb30f9/go.mod h1:+rgNQw2P9ARFAs37qieuu7ohDNQ3gds9msbT2yn85sg= -github.com/shurcooL/htmlg v0.0.0-20170918183704-d01228ac9e50/go.mod h1:zPn1wHpTIePGnXSHpsVPWEktKXHr6+SS6x/IKRb7cpw= -github.com/shurcooL/httperror v0.0.0-20170206035902-86b7830d14cc/go.mod h1:aYMfkZ6DWSJPJ6c4Wwz3QtW22G7mf/PEgaB9k/ik5+Y= -github.com/shurcooL/httpfs v0.0.0-20171119174359-809beceb2371/go.mod h1:ZY1cvUeJuFPAdZ/B6v7RHavJWZn2YPVFQ1OSXhCGOkg= -github.com/shurcooL/httpgzip v0.0.0-20180522190206-b1c53ac65af9/go.mod h1:919LwcH0M7/W4fcZ0/jy0qGght1GIhqyS/EgWGH2j5Q= -github.com/shurcooL/issues v0.0.0-20181008053335-6292fdc1e191/go.mod h1:e2qWDig5bLteJ4fwvDAc2NHzqFEthkqn7aOZAOpj+PQ= -github.com/shurcooL/issuesapp v0.0.0-20180602232740-048589ce2241/go.mod h1:NPpHK2TI7iSaM0buivtFUc9offApnI0Alt/K8hcHy0I= -github.com/shurcooL/notifications v0.0.0-20181007000457-627ab5aea122/go.mod h1:b5uSkrEVM1jQUspwbixRBhaIjIzL2xazXp6kntxYle0= -github.com/shurcooL/octicon v0.0.0-20180602230221-c42b0e3b24d9/go.mod h1:eWdoE5JD4R5UVWDucdOPg1g2fqQRq78IQa9zlOV1vpQ= -github.com/shurcooL/octicon v0.0.0-20181028054416-fa4f57f9efb2/go.mod h1:eWdoE5JD4R5UVWDucdOPg1g2fqQRq78IQa9zlOV1vpQ= -github.com/shurcooL/reactions v0.0.0-20181006231557-f2e0b4ca5b82/go.mod h1:TCR1lToEk4d2s07G3XGfz2QrgHXg4RJBvjrOozvoWfk= -github.com/shurcooL/sanitized_anchor_name v0.0.0-20170918181015-86672fcb3f95/go.mod h1:1NzhyTcUVG4SuEtjjoZeVRXNmyL/1OwPU0+IJeTBvfc= -github.com/shurcooL/users v0.0.0-20180125191416-49c67e49c537/go.mod h1:QJTqeLYEDaXHZDBsXlPCDqdhQuJkuw4NOtaxYe3xii4= -github.com/shurcooL/webdavfs v0.0.0-20170829043945-18c3829fa133/go.mod h1:hKmq5kWdCj2z2KEozexVbfEZIWiTjhE0+UjmZgPqehw= -github.com/sirupsen/logrus v1.0.6/go.mod h1:pMByvHTf9Beacp5x1UXfOR9xyW/9antXMhjMPG0dEzc= -github.com/sirupsen/logrus v1.1.0/go.mod h1:zrgwTnHtNr00buQ1vSptGe8m1f/BbgsPukg8qsT7A+A= -github.com/sirupsen/logrus v1.1.1/go.mod h1:zrgwTnHtNr00buQ1vSptGe8m1f/BbgsPukg8qsT7A+A= github.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo= -github.com/sirupsen/logrus v1.3.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo= -github.com/sourcegraph/annotate v0.0.0-20160123013949-f4cad6c6324d/go.mod h1:UdhH50NIW0fCiwBSr0co2m7BnFLdv4fQTgdqdJTHFeE= -github.com/sourcegraph/syntaxhighlight v0.0.0-20170531221838-bd320f5d308e/go.mod h1:HuIsMU8RRBOtsCgI77wP899iHVBQpCmg4ErYMZB+2IA= -github.com/spf13/afero v1.1.2/go.mod h1:j4pytiNVoe2o6bmDsKpLACNPDBIoEAkihy7loJ1B0CQ= -github.com/spf13/afero v1.2.0 h1:O9FblXGxoTc51M+cqr74Bm2Tmt4PvkA5iu/j8HrkNuY= -github.com/spf13/afero v1.2.0/go.mod h1:9ZxEEn6pIJ8Rxe320qSDBk6AsU0r9pR7Q4OcevTdifk= -github.com/spf13/afero v1.2.2 h1:5jhuqJyZCZf2JRofRvN/nIFgIWNzPa3/Vz8mYylgbWc= github.com/spf13/afero v1.2.2/go.mod h1:9ZxEEn6pIJ8Rxe320qSDBk6AsU0r9pR7Q4OcevTdifk= -github.com/spf13/cast v1.2.0/go.mod h1:r2rcYCSwa1IExKTDiTfzaxqT2FNHs8hODu4LnUfgKEg= -github.com/spf13/cast v1.3.0/go.mod h1:Qx5cxh0v+4UWYiBimWS+eyWzqEqokIECu5etghLkUJE= github.com/spf13/cobra v0.0.3 h1:ZlrZ4XsMRm04Fr5pSFxBgfND2EBVa1nLpiy1stUsX/8= github.com/spf13/cobra v0.0.3/go.mod h1:1l0Ry5zgKvJasoi3XT1TypsSe7PqH0Sj9dhYf7v3XqQ= -github.com/spf13/jwalterweatherman v1.0.0/go.mod h1:cQK4TGJAtQXfYWX+Ddv3mKDzgVb68N+wFjFa4jdeBTo= github.com/spf13/pflag v1.0.2/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4= github.com/spf13/pflag v1.0.3 h1:zPAT6CGy6wXeQ7NtTnaTerfKOsV6V6F8agHXFiazDkg= github.com/spf13/pflag v1.0.3/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4= -github.com/spf13/viper v1.2.1/go.mod h1:P4AexN0a+C9tGAnUFNwDMYYZv3pjFuvmeiMyKRaNVlI= -github.com/spf13/viper v1.3.1/go.mod h1:ZiWeW+zYFKm7srdB9IoDzzZXaJaI5eL9QjNiN/DMA2s= github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= github.com/stretchr/testify v1.3.0 h1:TivCn/peBQ7UY8ooIcPgZFpTNSz0Q2U6UrFlUfqbe0Q= github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= -github.com/tarm/serial v0.0.0-20180830185346-98f6abe2eb07/go.mod h1:kDXzergiv9cbyO7IOYJZWg1U88JhDg3PB6klq9Hg2pA= -github.com/ugorji/go/codec v0.0.0-20181204163529-d75b2dcb6bc8/go.mod h1:VFNgLljTbGfSG7qAOspJ7OScBnGdDN/yBr0sguwnwf0= -github.com/unrolled/secure v0.0.0-20180918153822-f340ee86eb8b/go.mod h1:mnPT77IAdsi/kV7+Es7y+pXALeV3h7G6dQF6mNYjcLA= -github.com/unrolled/secure v0.0.0-20181005190816-ff9db2ff917f/go.mod h1:mnPT77IAdsi/kV7+Es7y+pXALeV3h7G6dQF6mNYjcLA= -github.com/xordataexchange/crypt v0.0.3-0.20170626215501-b2862e3d0a77/go.mod h1:aYKd//L2LvnjZzWKhF00oedf4jCCReLcmhLdhm1A27Q= -go.opencensus.io v0.17.0/go.mod h1:mp1VrMQxhlqqDpKvH4UcQUa4YwlzNmymAjPrDdfxNpI= -go.opencensus.io v0.18.0 h1:Mk5rgZcggtbvtAun5aJzAtjKKN/t0R3jJPlWILlv938= -go.opencensus.io v0.18.0/go.mod h1:vKdFvxhtzZ9onBp9VKHK8z/sRpBMnKAsufL7wlDrCOA= +go.opencensus.io v0.20.1/go.mod h1:6WKK9ahsWS3RSO+PY9ZHZUfv2irvY6gN279GOPZjmmk= +go.opencensus.io v0.20.2 h1:NAfh7zF0/3/HqtMvJNZ/RFrSlCE6ZTlHmKfhL/Dm1Jk= +go.opencensus.io v0.20.2/go.mod h1:6WKK9ahsWS3RSO+PY9ZHZUfv2irvY6gN279GOPZjmmk= go.uber.org/atomic v1.3.2 h1:2Oa65PReHzfn29GpvgsYwloV9AVFHPDk8tYxt2c2tr4= go.uber.org/atomic v1.3.2/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE= go.uber.org/multierr v1.1.0 h1:HoEmRHQPVSqub6w2z2d2EOVs2fjyFRGyofhKuyDq0QI= go.uber.org/multierr v1.1.0/go.mod h1:wR5kodmAFQ0UK8QlbwjlSNy0Z68gJhDJUG5sjR94q/0= go.uber.org/zap v1.9.1 h1:XCJQEf3W6eZaVwhRBof6ImoYGJSITeKWsyeh3HFu/5o= go.uber.org/zap v1.9.1/go.mod h1:vwi/ZaCAaUcBkycHslxD9B2zi4UTXhF60s6SWpuDF0Q= -go4.org v0.0.0-20180809161055-417644f6feb5/go.mod h1:MkTOUMDaeVYJUOUsaDXIhWPZYa1yOyC1qaOBpL57BhE= -golang.org/x/build v0.0.0-20190111050920-041ab4dc3f9d/go.mod h1:OWs+y06UdEOHN4y+MfF/py+xQ/tYqIWW03b70/CG9Rw= +golang.org/x/crypto v0.0.0-20180820150726-614d502a4dac/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= -golang.org/x/crypto v0.0.0-20180910181607-0e37d006457b/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= -golang.org/x/crypto v0.0.0-20181001203147-e3636079e1a4/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= -golang.org/x/crypto v0.0.0-20181009213950-7c1a557ab941/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= -golang.org/x/crypto v0.0.0-20181015023909-0c41d7ab0a0e/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= -golang.org/x/crypto v0.0.0-20181024171144-74cb1d3d52f4/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= -golang.org/x/crypto v0.0.0-20181025113841-85e1b3f9139a/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= -golang.org/x/crypto v0.0.0-20181025213731-e84da0312774/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= -golang.org/x/crypto v0.0.0-20181030102418-4d3f4d9ffa16/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= -golang.org/x/crypto v0.0.0-20181106171534-e4dc69e5b2fd/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= -golang.org/x/crypto v0.0.0-20181112202954-3d3f9f413869/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= -golang.org/x/crypto v0.0.0-20181127143415-eb0de9b17e85/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= -golang.org/x/crypto v0.0.0-20181203042331-505ab145d0a9/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= -golang.org/x/crypto v0.0.0-20190102171810-8d7daa0c54b3/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= -golang.org/x/crypto v0.0.0-20190103213133-ff983b9c42bc/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= -golang.org/x/crypto v0.0.0-20190211182817-74369b46fc67 h1:ng3VDlRp5/DHpSWl02R4rM9I+8M2rhmsuLwAMmkLQWE= golang.org/x/crypto v0.0.0-20190211182817-74369b46fc67/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2 h1:VklqNMn3ovrHsnt90PveolxSbWFaJdECFbxSq0Mqo2M= golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= -golang.org/x/lint v0.0.0-20180702182130-06c8688daad7/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= +golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU= +golang.org/x/lint v0.0.0-20190301231843-5614ed5bae6f/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/net v0.0.0-20180816102801-aaf60122140d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/net v0.0.0-20180921000356-2f5d2388922f/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/net v0.0.0-20180926154720-4dfa2610cdf3/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/net v0.0.0-20181005035420-146acd28ed58/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/net v0.0.0-20181011144130-49bb7cea24b1/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/net v0.0.0-20181017193950-04a2e542c03f/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/net v0.0.0-20181023162649-9b4f9f5ad519/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/net v0.0.0-20181029044818-c44066c5c816/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/net v0.0.0-20181102091132-c10e9556a7bc/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/net v0.0.0-20181106065722-10aee1819953/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20181114220301-adae6a3d119a/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/net v0.0.0-20181201002055-351d144fa1fc/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/net v0.0.0-20181207154023-610586996380/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20181220203305-927f97764cc3/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20190108225652-1e06a53dbb7e/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20190125091013-d26f9f9a57f3/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/net v0.0.0-20190501004415-9ce7a6920f09 h1:KaQtG+aDELoNmXYas3TVkGNYRuq8JQ1aa7LJt8EXVyo= +golang.org/x/net v0.0.0-20190501004415-9ce7a6920f09/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= golang.org/x/net v0.0.0-20190613194153-d28f0bde5980 h1:dfGZHvZk057jK2MCeWus/TowKpJ8y4AmooUzdBSR9GU= golang.org/x/net v0.0.0-20190613194153-d28f0bde5980/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= -golang.org/x/oauth2 v0.0.0-20181017192945-9dcd33a902f4/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= -golang.org/x/oauth2 v0.0.0-20181203162652-d668ce993890/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= -golang.org/x/oauth2 v0.0.0-20190115181402-5dab4167f31c h1:pcBdqVcrlT+A3i+tWsOROFONQyey9tisIQHI4xqVGLg= -golang.org/x/oauth2 v0.0.0-20190115181402-5dab4167f31c/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= -golang.org/x/perf v0.0.0-20180704124530-6e6d33e29852/go.mod h1:JLpeXjPJfIyPr5TlbXLkXWLhP8nz10XfvxElABhCtcw= +golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421 h1:Wo7BWFiOk0QRFMLYMqJGFMd9CgUAcGx7V+qEg/h5IBI= +golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4 h1:YUO/7uOKsKeq9UokNS62b8FYywz3ker1l1vDZRCRefw= golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sys v0.0.0-20180816055513-1c9583448a9c/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sync v0.0.0-20190227155943-e225da77a7e6/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20190423024810-112230192c58 h1:8gQV6CLnAEikrhgkHFbMAEhagSSnXWGV915qUMm9mrU= +golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20180906133057-8cf3aee42992/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20180921163948-d47a0f339242/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20180927150500-dad3d9fb7b6e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20181005133103-4497e2df6f9e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20181011152604-fa43e7bc11ba/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20181022134430-8a28ead16f52/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20181024145615-5cd93ef61a7c/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20181025063200-d989b31c8746/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20181026064943-731415f00dce/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20181026203630-95b1ffbd15a5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20181029174526-d69651ed3497/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20181106135930-3a76605856fd/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20181107165924-66b7b1311ac8/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20181116152217-5ac8a444bdc5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20181205085412-a5c9d58dba9a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20181206074257-70b957f3b65e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20190102155601-82a175fd1598/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20181122145206-62eef0e2fa9b/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190209173611-3b5209105503/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20190616124812-15dcb6c0061f h1:25KHgbfyiSm6vwQLbM3zZIe1v9p/3ea4Rz+nnM5K/i4= -golang.org/x/sys v0.0.0-20190616124812-15dcb6c0061f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190222072716-a9d3bda3a223/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20190429190828-d89cdac9e872 h1:cGjJzUd8RgBw428LXP65YXni0aiGNA4Bl+ls8SmLOm8= +golang.org/x/sys v0.0.0-20190429190828-d89cdac9e872/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= -golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.2 h1:tW2bmiBqwgJj/UpqtC8EpXEZVYOwU0yG4iWbprSVAcs= golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk= +golang.org/x/time v0.0.0-20180412165947-fbb02b2291d2 h1:+DCIGbF/swA92ohVg0//6X2IVY3KZs6p9mix0ziNYJM= golang.org/x/time v0.0.0-20180412165947-fbb02b2291d2/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= -golang.org/x/time v0.0.0-20181108054448-85acf8d2951c h1:fqgJT0MGcGpPgpWU7VRdRjuArfcOvC4AoJmILihzhDg= -golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= +golang.org/x/tools v0.0.0-20180221164845-07fd8470d635/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20180828015842-6cd1fcedba52/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= -golang.org/x/tools v0.0.0-20181003024731-2f84ea8ef872/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= -golang.org/x/tools v0.0.0-20181006002542-f60d9635b16a/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= -golang.org/x/tools v0.0.0-20181008205924-a2b3f7f249e9/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= -golang.org/x/tools v0.0.0-20181013182035-5e66757b835f/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= -golang.org/x/tools v0.0.0-20181017214349-06f26fdaaa28/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= -golang.org/x/tools v0.0.0-20181024171208-a2dc47679d30/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= -golang.org/x/tools v0.0.0-20181026183834-f60e5f99f081/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= -golang.org/x/tools v0.0.0-20181030000716-a0a13e073c7b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= -golang.org/x/tools v0.0.0-20181105230042-78dc5bac0cac/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= -golang.org/x/tools v0.0.0-20181107215632-34b416bd17b3/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= -golang.org/x/tools v0.0.0-20181114190951-94339b83286c/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= -golang.org/x/tools v0.0.0-20181119130350-139d099f6620/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= -golang.org/x/tools v0.0.0-20181127195227-b4e97c0ed882/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= -golang.org/x/tools v0.0.0-20181127232545-e782529d0ddd/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= -golang.org/x/tools v0.0.0-20181203210056-e5f3ab76ea4b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= -golang.org/x/tools v0.0.0-20181205224935-3576414c54a4/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= -golang.org/x/tools v0.0.0-20181206194817-bcd4e47d0288/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= -golang.org/x/tools v0.0.0-20181207183836-8bc39b988060/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= -golang.org/x/tools v0.0.0-20181212172921-837e80568c09/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= -golang.org/x/tools v0.0.0-20190102213336-ca9055ed7d04/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= -golang.org/x/tools v0.0.0-20190104182027-498d95493402/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= -golang.org/x/tools v0.0.0-20190111214448-fc1d57b08d7b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= -golang.org/x/tools v0.0.0-20190124215303-cc6a436ffe6b h1:QhDb4isMLF+1hiAgAqj1YPRPQh+eAqwfG6wJzp57Iuo= -golang.org/x/tools v0.0.0-20190124215303-cc6a436ffe6b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= -google.golang.org/api v0.0.0-20180910000450-7ca32eb868bf/go.mod h1:4mhQ8q/RsB7i+udVvVy5NUi08OU8ZlA0gRVgrF7VFY0= -google.golang.org/api v0.0.0-20181030000543-1d582fd0359e/go.mod h1:4mhQ8q/RsB7i+udVvVy5NUi08OU8ZlA0gRVgrF7VFY0= -google.golang.org/api v0.1.0 h1:K6z2u68e86TPdSdefXdzvXgR1zEMa+459vBSfWYAZkI= -google.golang.org/api v0.1.0/go.mod h1:UGEZY7KEX120AnNLIHFMKIo4obdJhkp2tPbaPlQx13Y= +golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY= +golang.org/x/tools v0.0.0-20190312170243-e65039ee4138/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= +golang.org/x/tools v0.0.0-20190501045030-23463209683d h1:D7DVZUZEUgsSIDTivnUtVeGfN5AvhDIKtdIZAqx0ieE= +golang.org/x/tools v0.0.0-20190501045030-23463209683d/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= +gomodules.xyz/jsonpatch/v2 v2.0.0 h1:lHNQverf0+Gm1TbSbVIDWVXOhZ2FpZopxRqpr2uIjs4= +gomodules.xyz/jsonpatch/v2 v2.0.0/go.mod h1:IhYNNY4jnS53ZnfE4PAmpKtDpTCj1JFXc+3mwe7XcUU= +google.golang.org/api v0.3.1 h1:oJra/lMfmtm13/rgY/8i3MzjFWYXvQIAKjQ3HqofMk8= +google.golang.org/api v0.3.1/go.mod h1:6wY9I6uQWHQ8EM57III9mq/AjF+i8G65rmVagqKMtkk= google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM= -google.golang.org/appengine v1.2.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= -google.golang.org/appengine v1.3.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= google.golang.org/appengine v1.4.0 h1:/wp5JvzpHIxhs/dumFmF7BXTf3Z+dd4uXta4kVyO508= google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= -google.golang.org/genproto v0.0.0-20180831171423-11092d34479b/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= -google.golang.org/genproto v0.0.0-20181029155118-b69ba1387ce2/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= -google.golang.org/genproto v0.0.0-20181202183823-bd91e49a0898/go.mod h1:7Ep/1NZk928CDR8SjdVbjWNpdIf6nzjE3BTgJDr2Atg= -google.golang.org/genproto v0.0.0-20190122154452-ba6ebe99b011/go.mod h1:7Ep/1NZk928CDR8SjdVbjWNpdIf6nzjE3BTgJDr2Atg= -google.golang.org/genproto v0.0.0-20190219182410-082222b4a5c5 h1:SdCO7As+ChE1iV3IjBleIIWlj8VjZWuYEUF5pjELOOQ= -google.golang.org/genproto v0.0.0-20190219182410-082222b4a5c5/go.mod h1:L3J43x8/uS+qIUoksaLKe6OS3nUKxOKuIFz1sl2/jx4= -google.golang.org/grpc v1.14.0/go.mod h1:yo6s7OP7yaDglbqo1J04qKzAhqBH6lvTonzMVmEdcZw= -google.golang.org/grpc v1.15.0/go.mod h1:0JHn/cJsOMiMfNA9+DeHDlAU7KAAB5GDlYFpa9MZMio= -google.golang.org/grpc v1.16.0/go.mod h1:0JHn/cJsOMiMfNA9+DeHDlAU7KAAB5GDlYFpa9MZMio= +google.golang.org/genproto v0.0.0-20190307195333-5fe7a883aa19 h1:Lj2SnHtxkRGJDqnGaSjo+CCdIieEnwVazbOXILwQemk= +google.golang.org/genproto v0.0.0-20190307195333-5fe7a883aa19/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= google.golang.org/grpc v1.17.0/go.mod h1:6QZJwpn2B+Zp71q/5VxRsJ6NXXVCE5NRUHRo+f3cWCs= -google.golang.org/grpc v1.18.0 h1:IZl7mfBGfbhYx2p2rKRtYgDFw6SBz+kclmxYrCksPPA= -google.golang.org/grpc v1.18.0/go.mod h1:6QZJwpn2B+Zp71q/5VxRsJ6NXXVCE5NRUHRo+f3cWCs= -gopkg.in/airbrake/gobrake.v2 v2.0.9/go.mod h1:/h5ZAUhDkGaJfjzjKLSjv6zCL6O0LLBxU4K+aSYdM/U= +google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= +google.golang.org/grpc v1.19.1 h1:TrBcJ1yqAl1G++wO39nD/qtgpsW9/1+QGrluyMGEYgM= +google.golang.org/grpc v1.19.1/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw= -gopkg.in/alexcesaro/quotedprintable.v3 v3.0.0-20150716171945-2caba252f4dc/go.mod h1:m7x9LTH6d71AHyAX77c9yqWCCa3UKHcVEj9y7hAtKDk= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127 h1:qIbj1fsPNlZgppZ+VLlY7N33q108Sa+fhmuc+sWQYwY= gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= -gopkg.in/errgo.v2 v2.1.0/go.mod h1:hNsd1EY+bozCKY1Ytp96fpM3vjJbqLJn88ws8XvfDNI= gopkg.in/fsnotify.v1 v1.4.7 h1:xOHLXZwVvI9hhs+cLKq5+I5onOuwQLhQwiu63xxlHs4= gopkg.in/fsnotify.v1 v1.4.7/go.mod h1:Tz8NjZHkW78fSQdbUxIjBTcgA1z1m8ZHf0WmKUhAMys= -gopkg.in/gemnasium/logrus-airbrake-hook.v2 v2.1.2/go.mod h1:Xk6kEKp8OKb+X14hQBKWaSkCsqBpgog8nAV2xsGOxlo= -gopkg.in/gomail.v2 v2.0.0-20160411212932-81ebce5c23df/go.mod h1:LRQQ+SO6ZHR7tOkpBDuZnXENFzX8qRjMDMyPD6BRkCw= gopkg.in/inf.v0 v0.9.1 h1:73M5CoZyi3ZLMOyDlQh031Cx6N9NDJ2Vvfl76EDAgDc= gopkg.in/inf.v0 v0.9.1/go.mod h1:cWUDdTG/fYaXco+Dcufb5Vnc6Gp2YChqWtbxRZE0mXw= -gopkg.in/mail.v2 v2.0.0-20180731213649-a0242b2233b4/go.mod h1:htwXN1Qh09vZJ1NVKxQqHPBaCBbzKhp5GzuJEA4VJWw= +gopkg.in/resty.v1 v1.12.0/go.mod h1:mDo4pnntr5jdWRML875a/NmxYqAlA73dVijT2AXvQQo= gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7 h1:uRGJdciOHaEIrze2W8Q3AKkepLTh2hOroT7a+7czfdQ= gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7/go.mod h1:dt/ZhP58zS4L8KSrWDmTeBkI65Dw0HsyUHuEVlX15mw= +gopkg.in/yaml.v2 v2.0.0-20170812160011-eb3733d160e7/go.mod h1:JAlM8MvJe8wmxCU4Bli9HhUf9+ttbYbLASfIpnQbh74= gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.2.2 h1:ZCJp+EgiOT7lHqUV2J862kp8Qj64Jo6az82+3Td9dZw= gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= -grpc.go4.org v0.0.0-20170609214715-11d0a25b4919/go.mod h1:77eQGdRu53HpSqPFJFmuJdjuHRquDANNeA4x7B8WQ9o= honnef.co/go/tools v0.0.0-20180728063816-88497007e858/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= -honnef.co/go/tools v0.0.0-20190106161140-3f1c8253044a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= -k8s.io/api v0.0.0-20190222213804-5cb15d344471 h1:MzQGt8qWQCR+39kbYRd0uQqsvSidpYqJLFeWiJ9l4OE= -k8s.io/api v0.0.0-20190222213804-5cb15d344471/go.mod h1:iuAfoD4hCxJ8Onx9kaTIt30j7jUFS00AXQi6QMi99vA= -k8s.io/apiextensions-apiserver v0.0.0-20190228180357-d002e88f6236 h1:JfFtjaElBIgYKCWEtYQkcNrTpW+lMO4GJy8NP6SVQmM= -k8s.io/apiextensions-apiserver v0.0.0-20190228180357-d002e88f6236/go.mod h1:IxkesAMoaCRoLrPJdZNZUQp9NfZnzqaVzLhb2VEQzXE= -k8s.io/apimachinery v0.0.0-20190221213512-86fb29eff628 h1:UYfHH+KEF88OTg+GojQUwFTNxbxwmoktLwutUzR0GPg= -k8s.io/apimachinery v0.0.0-20190221213512-86fb29eff628/go.mod h1:ccL7Eh7zubPUSh9A3USN90/OzHNSVN6zxzde07TDCL0= -k8s.io/apiserver v0.0.0-20190122042701-b6aa1175dafa h1:lje7gmtp7V1upmFWKah1sprCvmd/9GvlRZsBlRauCVc= -k8s.io/apiserver v0.0.0-20190122042701-b6aa1175dafa/go.mod h1:6bqaTSOSJavUIXUtfaR9Os9JtTCm8ZqH2SUl2S60C4w= -k8s.io/client-go v10.0.0+incompatible h1:F1IqCqw7oMBzDkqlcBymRq1450wD0eNqLE9jzUrIi34= -k8s.io/client-go v10.0.0+incompatible/go.mod h1:7vJpHMYJwNQCWgzmNV+VYUl1zCObLyodBc8nIyt8L5s= -k8s.io/code-generator v0.0.0-20181117043124-c2090bec4d9b h1:KH0fUlgdFZH8UMxJ/FDCYHpczfSQKefetq5NjL6BVF0= -k8s.io/code-generator v0.0.0-20181117043124-c2090bec4d9b/go.mod h1:MYiN+ZJZ9HkETbgVZdWw2AsuAi9PZ4V80cwfuf2axe8= -k8s.io/component-base v0.0.0-20190703210340-65d72cfeb85d h1:rnHKkkKa8rR4DkemPeIzuL+/Ks2zZj6d3OV/HYD0t/E= -k8s.io/component-base v0.0.0-20190703210340-65d72cfeb85d/go.mod h1:lD66vYoxfIYwZTZunUYSg/e6fEj0fcgked2esAmsIfA= -k8s.io/gengo v0.0.0-20190116091435-f8a0810f38af h1:SwjZbO0u5ZuaV6TRMWOGB40iaycX8sbdMQHtjNZ19dk= -k8s.io/gengo v0.0.0-20190116091435-f8a0810f38af/go.mod h1:ezvh/TsK7cY6rbqRK0oQQ8IAqLxYwwyPxAX1Pzy0ii0= +honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= +k8s.io/api v0.0.0-20190409021203-6e4e0e4f393b h1:aBGgKJUM9Hk/3AE8WaZIApnTxG35kbuQba2w+SXqezo= +k8s.io/api v0.0.0-20190409021203-6e4e0e4f393b/go.mod h1:iuAfoD4hCxJ8Onx9kaTIt30j7jUFS00AXQi6QMi99vA= +k8s.io/apiextensions-apiserver v0.0.0-20190409022649-727a075fdec8 h1:q1Qvjzs/iEdXF6A1a8H3AKVFDzJNcJn3nXMs6R6qFtA= +k8s.io/apiextensions-apiserver v0.0.0-20190409022649-727a075fdec8/go.mod h1:IxkesAMoaCRoLrPJdZNZUQp9NfZnzqaVzLhb2VEQzXE= +k8s.io/apimachinery v0.0.0-20190404173353-6a84e37a896d h1:Jmdtdt1ZnoGfWWIIik61Z7nKYgO3J+swQJtPYsP9wHA= +k8s.io/apimachinery v0.0.0-20190404173353-6a84e37a896d/go.mod h1:ccL7Eh7zubPUSh9A3USN90/OzHNSVN6zxzde07TDCL0= +k8s.io/apiserver v0.0.0-20190409021813-1ec86e4da56c h1:k7ALUVzrOEgz4hOF+pr4pePn7TqZ9lB/8Z8ndMSsWSU= +k8s.io/apiserver v0.0.0-20190409021813-1ec86e4da56c/go.mod h1:6bqaTSOSJavUIXUtfaR9Os9JtTCm8ZqH2SUl2S60C4w= +k8s.io/client-go v11.0.1-0.20190409021438-1a26190bd76a+incompatible h1:U5Bt+dab9K8qaUmXINrkXO135kA11/i5Kg1RUydgaMQ= +k8s.io/client-go v11.0.1-0.20190409021438-1a26190bd76a+incompatible/go.mod h1:7vJpHMYJwNQCWgzmNV+VYUl1zCObLyodBc8nIyt8L5s= +k8s.io/component-base v0.0.0-20190409021516-bd2732e5c3f7 h1:f+AySqWvoqyCD7aArN3EZ+g2boKIS52pcSo6zdZkc+4= +k8s.io/component-base v0.0.0-20190409021516-bd2732e5c3f7 h1:f+AySqWvoqyCD7aArN3EZ+g2boKIS52pcSo6zdZkc+4= +k8s.io/component-base v0.0.0-20190409021516-bd2732e5c3f7/go.mod h1:DMaomcf3j3MM2j1FsvlLVVlc7wA2jPytEur3cP9zRxQ= +k8s.io/component-base v0.0.0-20190409021516-bd2732e5c3f7/go.mod h1:DMaomcf3j3MM2j1FsvlLVVlc7wA2jPytEur3cP9zRxQ= +k8s.io/klog v0.2.0/go.mod h1:Gq+BEi5rUBO/HRz0bTSXDUcqjScdoY3a9IHpCEIOOfk= k8s.io/klog v0.3.0/go.mod h1:Gq+BEi5rUBO/HRz0bTSXDUcqjScdoY3a9IHpCEIOOfk= +k8s.io/klog v0.3.1 h1:RVgyDHY/kFKtLqh67NvEWIgkMneNoIrdkN0CxDSQc68= k8s.io/klog v0.3.1/go.mod h1:Gq+BEi5rUBO/HRz0bTSXDUcqjScdoY3a9IHpCEIOOfk= -k8s.io/klog v0.3.2 h1:qvP/U6CcZ6qyi/qSHlJKdlAboCzo3mT0DAm0XAarpz4= -k8s.io/klog v0.3.2/go.mod h1:Gq+BEi5rUBO/HRz0bTSXDUcqjScdoY3a9IHpCEIOOfk= -k8s.io/kube-openapi v0.0.0-20190228160746-b3a7cee44a30 h1:TRb4wNWoBVrH9plmkp2q86FIDppkbrEXdXlxU3a3BMI= -k8s.io/kube-openapi v0.0.0-20190228160746-b3a7cee44a30/go.mod h1:BXM9ceUBTj2QnfH2MK1odQs778ajze1RxcmP6S8RVVc= -k8s.io/utils v0.0.0-20190607212802-c55fbcfc754a/go.mod h1:sZAwmy6armz5eXlNoLmJcl4F1QuKu7sr+mFQ0byX7Ew= -sigs.k8s.io/controller-runtime v0.1.12 h1:ovDq28E64PeY1yR+6H7DthakIC09soiDCrKvfP2tPYo= -sigs.k8s.io/controller-runtime v0.1.12/go.mod h1:HFAYoOh6XMV+jKF1UjFwrknPbowfyHEHHRdJMf2jMX8= -sigs.k8s.io/controller-tools v0.1.11 h1:1ln8Ipmg2xosbeHmnlzG53kwO1f3yf9l6auvCHdbZpI= -sigs.k8s.io/controller-tools v0.1.11/go.mod h1:6g08p9m9G/So3sBc1AOQifHfhxH/mb6Sc4z0LMI8XMw= -sigs.k8s.io/testing_frameworks v0.1.1 h1:cP2l8fkA3O9vekpy5Ks8mmA0NW/F7yBdXf8brkWhVrs= +k8s.io/kube-openapi v0.0.0-20180731170545-e3762e86a74c h1:3KSCztE7gPitlZmWbNwue/2U0YruD65DqX3INopDAQM= +k8s.io/kube-openapi v0.0.0-20180731170545-e3762e86a74c/go.mod h1:BXM9ceUBTj2QnfH2MK1odQs778ajze1RxcmP6S8RVVc= +k8s.io/utils v0.0.0-20190506122338-8fab8cb257d5 h1:VBM/0P5TWxwk+Nw6Z+lAw3DKgO76g90ETOiA6rfLV1Y= +k8s.io/utils v0.0.0-20190506122338-8fab8cb257d5/go.mod h1:sZAwmy6armz5eXlNoLmJcl4F1QuKu7sr+mFQ0byX7Ew= +sigs.k8s.io/controller-runtime v0.2.0-beta.5 h1:W2jTb239QEwQ+HejhTCF9GriFPy2zmo1I6pPmJTeEy8= +sigs.k8s.io/controller-runtime v0.2.0-beta.5/go.mod h1:HweyYKQ8fBuzdu2bdaeBJvsFgAi/OqBBnrVGXcqKhME= +sigs.k8s.io/controller-tools v0.2.0-beta.5 h1:imdq9hdihQFOjjtSXvJ3xfTm2FiFAUqGRlRLAo7Evxo= +sigs.k8s.io/controller-tools v0.2.0-beta.5/go.mod h1:8t/X+FVWvk6TaBcsa+UKUBbn7GMtvyBKX30SGl4em6Y= sigs.k8s.io/testing_frameworks v0.1.1/go.mod h1:VVBKrHmJ6Ekkfz284YKhQePcdycOzNH9qL6ht1zEr/U= +sigs.k8s.io/testing_frameworks v0.1.2-0.20190130140139-57f07443c2d4 h1:GtDhkj3cF4A4IW+A9LScsuxvJqA9DE7G7PGH1f8B07U= +sigs.k8s.io/testing_frameworks v0.1.2-0.20190130140139-57f07443c2d4/go.mod h1:VVBKrHmJ6Ekkfz284YKhQePcdycOzNH9qL6ht1zEr/U= sigs.k8s.io/yaml v1.1.0 h1:4A07+ZFc2wgJwo8YNlQpr1rVlgUDlxXHhPJciaPY5gs= sigs.k8s.io/yaml v1.1.0/go.mod h1:UJmg0vDUVViEyp3mgSv9WPwZCDxu4rQW1olrI1uml+o= -sourcegraph.com/sourcegraph/go-diff v0.5.0/go.mod h1:kuch7UrkMzY0X+p9CRK03kfuPQ2zzQcaEFbx8wA8rck= -sourcegraph.com/sqs/pbtypes v0.0.0-20180604144634-d3ebe8f20ae4/go.mod h1:ketZ/q3QxT9HOBeFhu6RdvsftgpsbFHBF5Cas6cDKZ0= diff --git a/vendor/sigs.k8s.io/cluster-api/go.vendor b/vendor/sigs.k8s.io/cluster-api/go.vendor new file mode 100644 index 0000000000..e69de29bb2 diff --git a/vendor/sigs.k8s.io/cluster-api/hack/BUILD.bazel b/vendor/sigs.k8s.io/cluster-api/hack/BUILD.bazel index d306c1084e..642b9dbab4 100644 --- a/vendor/sigs.k8s.io/cluster-api/hack/BUILD.bazel +++ b/vendor/sigs.k8s.io/cluster-api/hack/BUILD.bazel @@ -6,14 +6,6 @@ go_library( importpath = "sigs.k8s.io/cluster-api/hack", visibility = ["//visibility:public"], deps = [ - "//vendor/k8s.io/code-generator/cmd/client-gen:go_default_library", - "//vendor/k8s.io/code-generator/cmd/conversion-gen:go_default_library", - "//vendor/k8s.io/code-generator/cmd/deepcopy-gen:go_default_library", - "//vendor/k8s.io/code-generator/cmd/defaulter-gen:go_default_library", - "//vendor/k8s.io/code-generator/cmd/informer-gen:go_default_library", - "//vendor/k8s.io/code-generator/cmd/lister-gen:go_default_library", - "//vendor/k8s.io/code-generator/cmd/register-gen:go_default_library", - "//vendor/k8s.io/code-generator/cmd/set-gen:go_default_library", "//vendor/sigs.k8s.io/controller-tools/cmd/controller-gen:go_default_library", "//vendor/sigs.k8s.io/testing_frameworks/integration:go_default_library", ], diff --git a/vendor/sigs.k8s.io/cluster-api/hack/boilerplate.go.txt b/vendor/sigs.k8s.io/cluster-api/hack/boilerplate.go.txt new file mode 100644 index 0000000000..b7c650da47 --- /dev/null +++ b/vendor/sigs.k8s.io/cluster-api/hack/boilerplate.go.txt @@ -0,0 +1,16 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + diff --git a/vendor/sigs.k8s.io/cluster-api/hack/build-gitbook.sh b/vendor/sigs.k8s.io/cluster-api/hack/build-gitbook.sh index 8eee619ddb..17e4be0698 100644 --- a/vendor/sigs.k8s.io/cluster-api/hack/build-gitbook.sh +++ b/vendor/sigs.k8s.io/cluster-api/hack/build-gitbook.sh @@ -17,11 +17,10 @@ set -o errexit set -o nounset set -o pipefail -export KUBE_ROOT=$(dirname "${BASH_SOURCE}")/.. +KUBE_ROOT=$(dirname "${BASH_SOURCE[0]}")/.. +export KUBE_ROOT -cd $KUBE_ROOT - -pushd docs/book/ +pushd "${KUBE_ROOT}/docs/book/" npm install gitbook-cli -g npm install phantomjs-prebuilt npm ci diff --git a/vendor/sigs.k8s.io/cluster-api/hack/pin-dependency.sh b/vendor/sigs.k8s.io/cluster-api/hack/pin-dependency.sh new file mode 100644 index 0000000000..a7ac52dceb --- /dev/null +++ b/vendor/sigs.k8s.io/cluster-api/hack/pin-dependency.sh @@ -0,0 +1,92 @@ +#!/usr/bin/env bash + +# Copyright 2019 The Kubernetes Authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +set -o errexit +set -o nounset +set -o pipefail + +KUBE_ROOT=$(dirname "${BASH_SOURCE[0]}")/.. + +# Usage: +# hack/pin-dependency.sh $MODULE $SHA-OR-TAG +# +# Example: +# hack/pin-dependency.sh github.com/docker/docker 501cb131a7b7 + +# Explicitly opt into go modules, even though we're inside a GOPATH directory +export GO111MODULE=on +# Explicitly clear GOFLAGS, since GOFLAGS=-mod=vendor breaks dependency resolution while rebuilding vendor +export GOFLAGS= +# Detect problematic GOPROXY settings that prevent lookup of dependencies +if [[ "${GOPROXY:-}" == "off" ]]; then + kube::log::error "Cannot run with \$GOPROXY=off" + exit 1 +fi + +kube::golang::verify_go_version +kube::util::require-jq + +dep="${1:-}" +sha="${2:-}" +if [[ -z "${dep}" || -z "${sha}" ]]; then + echo "Usage:" + echo " hack/pin-dependency.sh \$MODULE \$SHA-OR-TAG" + echo "" + echo "Example:" + echo " hack/pin-dependency.sh github.com/docker/docker 501cb131a7b7" + echo "" + exit 1 +fi + +_tmp="${KUBE_ROOT}/_tmp" +cleanup() { + rm -rf "${_tmp}" +} +trap "cleanup" EXIT SIGINT +cleanup +mkdir -p "${_tmp}" + +# Add the require directive +echo "Running: go get ${dep}@${sha}" +go get -m -d "${dep}@${sha}" + +# Find the resolved version +rev=$(go mod edit -json | jq -r ".Require[] | select(.Path == \"${dep}\") | .Version") + +# No entry in go.mod, we must be using the natural version indirectly +if [[ -z "${rev}" ]]; then + # backup the go.mod file, since go list modifies it + cp go.mod "${_tmp}/go.mod.bak" + # find the revision + rev=$(go list -m -json "${dep}" | jq -r .Version) + # restore the go.mod file + mv "${_tmp}/go.mod.bak" go.mod +fi + +# No entry found +if [[ -z "${rev}" ]]; then + echo "Could not resolve ${sha}" + exit 1 +fi + +echo "Resolved to ${dep}@${rev}" + +# Add the replace directive +echo "Running: go mod edit -replace ${dep}=${dep}@${rev}" +go mod edit -replace "${dep}=${dep}@${rev}" + +echo "" +echo "Run hack/update-vendor.sh to rebuild the vendor directory" diff --git a/vendor/sigs.k8s.io/cluster-api/hack/tools.go b/vendor/sigs.k8s.io/cluster-api/hack/tools.go index c8fb0d3280..ac3764797d 100644 --- a/vendor/sigs.k8s.io/cluster-api/hack/tools.go +++ b/vendor/sigs.k8s.io/cluster-api/hack/tools.go @@ -18,14 +18,6 @@ limitations under the License. package tools import ( - _ "k8s.io/code-generator/cmd/client-gen" - _ "k8s.io/code-generator/cmd/conversion-gen" - _ "k8s.io/code-generator/cmd/deepcopy-gen" - _ "k8s.io/code-generator/cmd/defaulter-gen" - _ "k8s.io/code-generator/cmd/informer-gen" - _ "k8s.io/code-generator/cmd/lister-gen" - _ "k8s.io/code-generator/cmd/register-gen" - _ "k8s.io/code-generator/cmd/set-gen" _ "sigs.k8s.io/controller-tools/cmd/controller-gen" _ "sigs.k8s.io/testing_frameworks/integration" ) diff --git a/vendor/sigs.k8s.io/cluster-api/hack/update-vendor.sh b/vendor/sigs.k8s.io/cluster-api/hack/update-vendor.sh index 544eaa1498..e043693465 100644 --- a/vendor/sigs.k8s.io/cluster-api/hack/update-vendor.sh +++ b/vendor/sigs.k8s.io/cluster-api/hack/update-vendor.sh @@ -19,8 +19,20 @@ set -o nounset set -o pipefail KUBE_ROOT=$(dirname "${BASH_SOURCE[0]}")/.. +# shellcheck source=./ensure-go.sh source "${KUBE_ROOT}/hack/ensure-go.sh" +GOPROXY=$(go env GOPROXY) +export GOPROXY="${GOPROXY:-https://proxy.golang.org}" go mod tidy go mod vendor + +# Copy full dependencies if needed. +while IFS= read -r dep; do + src="$(go mod download -json "${dep}" | jq -r .Dir)" + dst="${KUBE_ROOT}/vendor/${dep}" + cp -af "${src}/" "${dst}" + chmod -R +w "${dst}" +done < "${KUBE_ROOT}/go.vendor" + go mod verify diff --git a/vendor/sigs.k8s.io/cluster-api/hack/verify-bazel.sh b/vendor/sigs.k8s.io/cluster-api/hack/verify-bazel.sh index 0518bbca71..bc065cda86 100644 --- a/vendor/sigs.k8s.io/cluster-api/hack/verify-bazel.sh +++ b/vendor/sigs.k8s.io/cluster-api/hack/verify-bazel.sh @@ -17,7 +17,7 @@ set -o errexit set -o nounset set -o pipefail -if ! which bazel &>/dev/null; then echo "Bazel not available, skipping validation"; exit; fi +if ! command -v bazel &>/dev/null; then echo "Bazel not available, skipping validation"; exit; fi diff=$(bazel run //:gazelle -- update -mode diff -external vendored) diff --git a/vendor/sigs.k8s.io/cluster-api/hack/verify_clientset.sh b/vendor/sigs.k8s.io/cluster-api/hack/verify-clientset.sh similarity index 81% rename from vendor/sigs.k8s.io/cluster-api/hack/verify_clientset.sh rename to vendor/sigs.k8s.io/cluster-api/hack/verify-clientset.sh index 8554bbac04..0a2a0f82ac 100644 --- a/vendor/sigs.k8s.io/cluster-api/hack/verify_clientset.sh +++ b/vendor/sigs.k8s.io/cluster-api/hack/verify-clientset.sh @@ -18,10 +18,14 @@ set -o errexit set -o nounset set -o pipefail -SCRIPT_ROOT=$(dirname "${BASH_SOURCE[0]}")/.. -DIFFROOT="${SCRIPT_ROOT}/pkg/client" -TMP_DIFFROOT="${SCRIPT_ROOT}/_tmp/pkg" -_tmp="${SCRIPT_ROOT}/_tmp" +REPO_ROOT=$(dirname "${BASH_SOURCE[0]}")/.. +# shellcheck source=./ensure-go.sh +source "${REPO_ROOT}/hack/ensure-go.sh" + +DIFFROOT="${REPO_ROOT}/pkg/client" +TMP_DIFFROOT="${REPO_ROOT}/_tmp/pkg" +_tmp="${REPO_ROOT}/_tmp" + GOPATH=$(go env GOPATH) export GOPATH @@ -35,7 +39,7 @@ cleanup mkdir -p "${TMP_DIFFROOT}" cp -a "${DIFFROOT}"/* "${TMP_DIFFROOT}" -make clientset +make generate-clientset gazelle echo "diffing ${DIFFROOT} against freshly generated codegen" ret=0 diff --git a/vendor/sigs.k8s.io/cluster-api/hack/verify-doctoc.sh b/vendor/sigs.k8s.io/cluster-api/hack/verify-doctoc.sh new file mode 100644 index 0000000000..3488d37359 --- /dev/null +++ b/vendor/sigs.k8s.io/cluster-api/hack/verify-doctoc.sh @@ -0,0 +1,56 @@ +#!/usr/bin/env bash + +# Copyright 2019 The Kubernetes Authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +set -o errexit +set -o nounset +set -o pipefail + +doctoc_files="README.md \ + CONTRIBUTING.md \ + cmd/clusterctl/README.md \ + docs/how-to-use-clusterctl.md \ + docs/scope-and-objectives.md \ + docs/staging-use-cases.md \ + docs/developer/releasing.md \ + docs/proposals/20181121-machine-api.md \ + docs/proposals/20190610-machine-states-preboot-bootstrapping.md \ + docs/proposals/YYYYMMDD-template.md" + +function check_doctoc(){ + changed_files="" + for file in $doctoc_files + do + res=$(git diff --cached $file) + if [ "$res" ] + then + changed_files="$changed_files\n$file" + fi + done + + if [ $changed_files ];then + echo -e "Please update these files: $changed_files." + echo "Update with doctoc FILENAME." + echo "Re-commit with -n/--no-verify option." + exit -1 + fi + +} + +doctoc $doctoc_files >/dev/null 2>&1 +check=$(git diff) +if [ ! -z "$check" ];then + check_doctoc +fi diff --git a/vendor/sigs.k8s.io/cluster-api/main.go b/vendor/sigs.k8s.io/cluster-api/main.go new file mode 100644 index 0000000000..effcd3aa2d --- /dev/null +++ b/vendor/sigs.k8s.io/cluster-api/main.go @@ -0,0 +1,100 @@ +/* +Copyright 2019 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ +package main + +import ( + "flag" + "os" + + "k8s.io/apimachinery/pkg/runtime" + clientgoscheme "k8s.io/client-go/kubernetes/scheme" + "k8s.io/klog" + "k8s.io/klog/klogr" + "sigs.k8s.io/cluster-api/api/v1alpha2" + "sigs.k8s.io/cluster-api/controllers" + ctrl "sigs.k8s.io/controller-runtime" + // +kubebuilder:scaffold:imports +) + +var ( + scheme = runtime.NewScheme() + setupLog = ctrl.Log.WithName("setup") +) + +func init() { + klog.InitFlags(nil) + + _ = clientgoscheme.AddToScheme(scheme) + _ = v1alpha2.AddToScheme(scheme) + // +kubebuilder:scaffold:scheme +} + +func main() { + var metricsAddr string + var enableLeaderElection bool + flag.StringVar(&metricsAddr, "metrics-addr", ":8080", "The address the metric endpoint binds to.") + flag.BoolVar(&enableLeaderElection, "enable-leader-election", false, + "Enable leader election for controller manager. Enabling this will ensure there is only one active controller manager.") + flag.Parse() + + ctrl.SetLogger(klogr.New()) + + mgr, err := ctrl.NewManager(ctrl.GetConfigOrDie(), ctrl.Options{ + Scheme: scheme, + MetricsBindAddress: metricsAddr, + LeaderElection: enableLeaderElection, + }) + if err != nil { + setupLog.Error(err, "unable to start manager") + os.Exit(1) + } + + if err = (&controllers.ClusterReconciler{ + Client: mgr.GetClient(), + Log: ctrl.Log.WithName("controllers").WithName("Cluster"), + }).SetupWithManager(mgr); err != nil { + setupLog.Error(err, "unable to create controller", "controller", "Cluster") + os.Exit(1) + } + if err = (&controllers.MachineReconciler{ + Client: mgr.GetClient(), + Log: ctrl.Log.WithName("controllers").WithName("Machine"), + }).SetupWithManager(mgr); err != nil { + setupLog.Error(err, "unable to create controller", "controller", "Machine") + os.Exit(1) + } + if err = (&controllers.MachineSetReconciler{ + Client: mgr.GetClient(), + Log: ctrl.Log.WithName("controllers").WithName("MachineSet"), + }).SetupWithManager(mgr); err != nil { + setupLog.Error(err, "unable to create controller", "controller", "MachineSet") + os.Exit(1) + } + if err = (&controllers.MachineDeploymentReconciler{ + Client: mgr.GetClient(), + Log: ctrl.Log.WithName("controllers").WithName("MachineDeployment"), + }).SetupWithManager(mgr); err != nil { + setupLog.Error(err, "unable to create controller", "controller", "MachineDeployment") + os.Exit(1) + } + // +kubebuilder:scaffold:builder + + setupLog.Info("starting manager") + if err := mgr.Start(ctrl.SetupSignalHandler()); err != nil { + setupLog.Error(err, "problem running manager") + os.Exit(1) + } +} diff --git a/vendor/sigs.k8s.io/cluster-api/netlify.toml b/vendor/sigs.k8s.io/cluster-api/netlify.toml index 9189c74cb9..aabcce4ee0 100644 --- a/vendor/sigs.k8s.io/cluster-api/netlify.toml +++ b/vendor/sigs.k8s.io/cluster-api/netlify.toml @@ -5,20 +5,20 @@ # Standard Netlify redirects [[redirects]] - from = "https://release-0-1--kubernetes-sigs-cluster-api.netlify.com/*" - to = "https://release-0-1.cluster-api.sigs.k8s.io/:splat" + from = "https://kubernetes-sigs-cluster-api.netlify.com/*" + to = "https://cluster-api.sigs.k8s.io/:splat" status = 301 force = true # HTTP-to-HTTPS rules [[redirects]] - from = "http://release-0-1.cluster-api.sigs.k8s.io/*" - to = "https://release-0-1.cluster-api.sigs.k8s.io/:splat" + from = "http://go.cluster-api.sigs.k8s.io/*" + to = "https://go.cluster-api.sigs.k8s.io/:splat" status = 301 force = true [[redirects]] - from = "http://release-0-1--kubernetes-sigs-cluster-api.netlify.com/*" - to = "http://release-0-1.cluster-api.sigs.k8s.io/:splat" + from = "http://kubernetes-sigs-cluster-api.netlify.com/*" + to = "http://cluster-api.sigs.k8s.io/:splat" status = 301 force = true diff --git a/vendor/sigs.k8s.io/cluster-api/pkg/apis/BUILD.bazel b/vendor/sigs.k8s.io/cluster-api/pkg/apis/BUILD.bazel index 5fb1263e99..e69de29bb2 100644 --- a/vendor/sigs.k8s.io/cluster-api/pkg/apis/BUILD.bazel +++ b/vendor/sigs.k8s.io/cluster-api/pkg/apis/BUILD.bazel @@ -1,15 +0,0 @@ -load("@io_bazel_rules_go//go:def.bzl", "go_library") - -go_library( - name = "go_default_library", - srcs = [ - "addtoscheme_cluster_v1alpha1.go", - "apis.go", - ], - importpath = "sigs.k8s.io/cluster-api/pkg/apis", - visibility = ["//visibility:public"], - deps = [ - "//pkg/apis/cluster/v1alpha1:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/runtime:go_default_library", - ], -) diff --git a/vendor/sigs.k8s.io/cluster-api/pkg/apis/apis.go b/vendor/sigs.k8s.io/cluster-api/pkg/apis/apis.go deleted file mode 100644 index b8b48e9c8b..0000000000 --- a/vendor/sigs.k8s.io/cluster-api/pkg/apis/apis.go +++ /dev/null @@ -1,33 +0,0 @@ -/* -Copyright 2018 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Generate deepcopy for apis -//go:generate go run ../../vendor/k8s.io/code-generator/cmd/deepcopy-gen/main.go -O zz_generated.deepcopy -i ./... -h ../../hack/boilerplate/boilerplate.generatego.txt - -// Package apis contains Kubernetes API groups. -package apis - -import ( - "k8s.io/apimachinery/pkg/runtime" -) - -// AddToSchemes may be used to add all resources defined in the project to a Scheme -var AddToSchemes runtime.SchemeBuilder - -// AddToScheme adds all Resources to the Scheme -func AddToScheme(s *runtime.Scheme) error { - return AddToSchemes.AddToScheme(s) -} diff --git a/vendor/sigs.k8s.io/cluster-api/pkg/apis/cluster/common/BUILD.bazel b/vendor/sigs.k8s.io/cluster-api/pkg/apis/cluster/common/BUILD.bazel deleted file mode 100644 index b2c8bd8e09..0000000000 --- a/vendor/sigs.k8s.io/cluster-api/pkg/apis/cluster/common/BUILD.bazel +++ /dev/null @@ -1,15 +0,0 @@ -load("@io_bazel_rules_go//go:def.bzl", "go_library") - -go_library( - name = "go_default_library", - srcs = [ - "consts.go", - "plugins.go", - ], - importpath = "sigs.k8s.io/cluster-api/pkg/apis/cluster/common", - visibility = ["//visibility:public"], - deps = [ - "//vendor/github.com/pkg/errors:go_default_library", - "//vendor/k8s.io/klog:go_default_library", - ], -) diff --git a/vendor/sigs.k8s.io/cluster-api/pkg/apis/cluster/common/plugins.go b/vendor/sigs.k8s.io/cluster-api/pkg/apis/cluster/common/plugins.go deleted file mode 100644 index 8c2083d88c..0000000000 --- a/vendor/sigs.k8s.io/cluster-api/pkg/apis/cluster/common/plugins.go +++ /dev/null @@ -1,51 +0,0 @@ -/* -Copyright 2018 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package common - -import ( - "sync" - - "github.com/pkg/errors" - "k8s.io/klog" -) - -var ( - providersMutex sync.Mutex - providers = make(map[string]interface{}) -) - -// RegisterClusterProvisioner registers a ClusterProvisioner by name. This -// is expected to happen during app startup. -func RegisterClusterProvisioner(name string, provisioner interface{}) { - providersMutex.Lock() - defer providersMutex.Unlock() - if _, found := providers[name]; found { - klog.Fatalf("Cluster provisioner %q was registered twice", name) - } - klog.V(1).Infof("Registered cluster provisioner %q", name) - providers[name] = provisioner -} - -func ClusterProvisioner(name string) (interface{}, error) { - providersMutex.Lock() - defer providersMutex.Unlock() - provisioner, found := providers[name] - if !found { - return nil, errors.Errorf("unable to find provisioner for %s", name) - } - return provisioner, nil -} diff --git a/vendor/sigs.k8s.io/cluster-api/pkg/apis/cluster/v1alpha1/common_types.go b/vendor/sigs.k8s.io/cluster-api/pkg/apis/cluster/v1alpha1/common_types.go deleted file mode 100644 index 7a9c647fe2..0000000000 --- a/vendor/sigs.k8s.io/cluster-api/pkg/apis/cluster/v1alpha1/common_types.go +++ /dev/null @@ -1,58 +0,0 @@ -/* -Copyright 2018 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package v1alpha1 - -import ( - corev1 "k8s.io/api/core/v1" - runtime "k8s.io/apimachinery/pkg/runtime" -) - -// ProviderSpec defines the configuration to use during node creation. -type ProviderSpec struct { - - // No more than one of the following may be specified. - - // Value is an inlined, serialized representation of the resource - // configuration. It is recommended that providers maintain their own - // versioned API types that should be serialized/deserialized from this - // field, akin to component config. - // +optional - Value *runtime.RawExtension `json:"value,omitempty"` - - // Source for the provider configuration. Cannot be used if value is - // not empty. - // +optional - ValueFrom *ProviderSpecSource `json:"valueFrom,omitempty"` -} - -// ProviderSpecSource represents a source for the provider-specific -// resource configuration. -type ProviderSpecSource struct { - // The machine class from which the provider config should be sourced. - // +optional - MachineClass *MachineClassRef `json:"machineClass,omitempty"` -} - -// MachineClassRef is a reference to the MachineClass object. Controllers should find the right MachineClass using this reference. -type MachineClassRef struct { - // +optional - *corev1.ObjectReference `json:",inline"` - - // Provider is the name of the cloud-provider which MachineClass is intended for. - // +optional - Provider string `json:"provider,omitempty"` -} diff --git a/vendor/sigs.k8s.io/cluster-api/pkg/apis/cluster/BUILD.bazel b/vendor/sigs.k8s.io/cluster-api/pkg/apis/deprecated/BUILD.bazel similarity index 72% rename from vendor/sigs.k8s.io/cluster-api/pkg/apis/cluster/BUILD.bazel rename to vendor/sigs.k8s.io/cluster-api/pkg/apis/deprecated/BUILD.bazel index f8a8882a79..90d609dd0c 100644 --- a/vendor/sigs.k8s.io/cluster-api/pkg/apis/cluster/BUILD.bazel +++ b/vendor/sigs.k8s.io/cluster-api/pkg/apis/deprecated/BUILD.bazel @@ -3,6 +3,6 @@ load("@io_bazel_rules_go//go:def.bzl", "go_library") go_library( name = "go_default_library", srcs = ["group.go"], - importpath = "sigs.k8s.io/cluster-api/pkg/apis/cluster", + importpath = "sigs.k8s.io/cluster-api/pkg/apis/deprecated", visibility = ["//visibility:public"], ) diff --git a/vendor/sigs.k8s.io/cluster-api/pkg/apis/cluster/group.go b/vendor/sigs.k8s.io/cluster-api/pkg/apis/deprecated/group.go similarity index 97% rename from vendor/sigs.k8s.io/cluster-api/pkg/apis/cluster/group.go rename to vendor/sigs.k8s.io/cluster-api/pkg/apis/deprecated/group.go index 4450b450fb..3d24b72404 100644 --- a/vendor/sigs.k8s.io/cluster-api/pkg/apis/cluster/group.go +++ b/vendor/sigs.k8s.io/cluster-api/pkg/apis/deprecated/group.go @@ -15,4 +15,4 @@ limitations under the License. */ // Package cluster contains cluster API versions -package cluster +package deprecated diff --git a/vendor/sigs.k8s.io/cluster-api/pkg/apis/cluster/v1alpha1/BUILD.bazel b/vendor/sigs.k8s.io/cluster-api/pkg/apis/deprecated/v1alpha1/BUILD.bazel similarity index 89% rename from vendor/sigs.k8s.io/cluster-api/pkg/apis/cluster/v1alpha1/BUILD.bazel rename to vendor/sigs.k8s.io/cluster-api/pkg/apis/deprecated/v1alpha1/BUILD.bazel index 074fbc10c0..95359c71b6 100644 --- a/vendor/sigs.k8s.io/cluster-api/pkg/apis/cluster/v1alpha1/BUILD.bazel +++ b/vendor/sigs.k8s.io/cluster-api/pkg/apis/deprecated/v1alpha1/BUILD.bazel @@ -14,10 +14,10 @@ go_library( "register.go", "zz_generated.deepcopy.go", ], - importpath = "sigs.k8s.io/cluster-api/pkg/apis/cluster/v1alpha1", + importpath = "sigs.k8s.io/cluster-api/pkg/apis/deprecated/v1alpha1", visibility = ["//visibility:public"], deps = [ - "//pkg/apis/cluster/common:go_default_library", + "//pkg/errors:go_default_library", "//vendor/k8s.io/api/core/v1:go_default_library", "//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", "//vendor/k8s.io/apimachinery/pkg/apis/meta/v1/validation:go_default_library", @@ -26,7 +26,7 @@ go_library( "//vendor/k8s.io/apimachinery/pkg/runtime/schema:go_default_library", "//vendor/k8s.io/apimachinery/pkg/util/intstr:go_default_library", "//vendor/k8s.io/apimachinery/pkg/util/validation/field:go_default_library", - "//vendor/sigs.k8s.io/controller-runtime/pkg/runtime/scheme:go_default_library", + "//vendor/sigs.k8s.io/controller-runtime/pkg/scheme:go_default_library", ], ) diff --git a/vendor/sigs.k8s.io/cluster-api/pkg/apis/cluster/v1alpha1/cluster_types.go b/vendor/sigs.k8s.io/cluster-api/pkg/apis/deprecated/v1alpha1/cluster_types.go similarity index 96% rename from vendor/sigs.k8s.io/cluster-api/pkg/apis/cluster/v1alpha1/cluster_types.go rename to vendor/sigs.k8s.io/cluster-api/pkg/apis/deprecated/v1alpha1/cluster_types.go index d6c78af5f1..c8bdae0217 100644 --- a/vendor/sigs.k8s.io/cluster-api/pkg/apis/cluster/v1alpha1/cluster_types.go +++ b/vendor/sigs.k8s.io/cluster-api/pkg/apis/deprecated/v1alpha1/cluster_types.go @@ -20,7 +20,7 @@ import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/util/validation/field" - "sigs.k8s.io/cluster-api/pkg/apis/cluster/common" + capierrors "sigs.k8s.io/cluster-api/pkg/errors" ) const ClusterFinalizer = "cluster.cluster.k8s.io" @@ -31,8 +31,7 @@ const ClusterFinalizer = "cluster.cluster.k8s.io" /// [Cluster] // Cluster is the Schema for the clusters API // +k8s:openapi-gen=true -// +kubebuilder:resource:shortName=cl -// +kubebuilder:subresource:status +// +kubebuilder:resource:path=clusters,shortName=cl type Cluster struct { metav1.TypeMeta `json:",inline"` metav1.ObjectMeta `json:"metadata,omitempty"` @@ -99,7 +98,7 @@ type ClusterStatus struct { // state, and will be set to a token value suitable for // programmatic interpretation. // +optional - ErrorReason common.ClusterStatusError `json:"errorReason,omitempty"` + ErrorReason capierrors.ClusterStatusError `json:"errorReason,omitempty"` // If set, indicates that there is a problem reconciling the // state, and will be set to a descriptive error message. diff --git a/vendor/sigs.k8s.io/cluster-api/pkg/apis/cluster/v1alpha1/cluster_types_test.go b/vendor/sigs.k8s.io/cluster-api/pkg/apis/deprecated/v1alpha1/cluster_types_test.go similarity index 100% rename from vendor/sigs.k8s.io/cluster-api/pkg/apis/cluster/v1alpha1/cluster_types_test.go rename to vendor/sigs.k8s.io/cluster-api/pkg/apis/deprecated/v1alpha1/cluster_types_test.go diff --git a/vendor/sigs.k8s.io/cluster-api/pkg/apis/deprecated/v1alpha1/common_types.go b/vendor/sigs.k8s.io/cluster-api/pkg/apis/deprecated/v1alpha1/common_types.go new file mode 100644 index 0000000000..296d5c5309 --- /dev/null +++ b/vendor/sigs.k8s.io/cluster-api/pkg/apis/deprecated/v1alpha1/common_types.go @@ -0,0 +1,143 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1alpha1 + +import ( + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + runtime "k8s.io/apimachinery/pkg/runtime" +) + +// ProviderSpec defines the configuration to use during node creation. +type ProviderSpec struct { + + // No more than one of the following may be specified. + + // Value is an inlined, serialized representation of the resource + // configuration. It is recommended that providers maintain their own + // versioned API types that should be serialized/deserialized from this + // field, akin to component config. + // +optional + Value *runtime.RawExtension `json:"value,omitempty"` + + // Source for the provider configuration. Cannot be used if value is + // not empty. + // +optional + ValueFrom *ProviderSpecSource `json:"valueFrom,omitempty"` +} + +// ProviderSpecSource represents a source for the provider-specific +// resource configuration. +type ProviderSpecSource struct { + // The machine class from which the provider config should be sourced. + // +optional + MachineClass *MachineClassRef `json:"machineClass,omitempty"` +} + +// MachineClassRef is a reference to the MachineClass object. Controllers should find the right MachineClass using this reference. +type MachineClassRef struct { + // +optional + *corev1.ObjectReference `json:",inline"` + + // Provider is the name of the cloud-provider which MachineClass is intended for. + // +optional + Provider string `json:"provider,omitempty"` +} + +// ObjectMeta is metadata that all persisted resources must have, which includes all objects +// users must create. This is a copy of customizable fields from metav1.ObjectMeta. +// +// ObjectMeta is embedded in `Machine.Spec`, `MachineDeployment.Template` and `MachineSet.Template`, +// which are not top-level Kubernetes objects. Given that metav1.ObjectMeta has lots of special cases +// and read-only fields which end up in the generated CRD validation, having it as a subset simplifies +// the API and some issues that can impact user experience. +// +// During the [upgrade to controller-tools@v2](https://github.com/kubernetes-sigs/cluster-api/pull/1054) +// for v1alpha2, we noticed a failure would occur running Cluster API test suite against the new CRDs, +// specifically `spec.metadata.creationTimestamp in body must be of type string: "null"`. +// The investigation showed that `controller-tools@v2` behaves differently than its previous version +// when handling types from [metav1](k8s.io/apimachinery/pkg/apis/meta/v1) package. +// +// In more details, we found that embedded (non-top level) types that embedded `metav1.ObjectMeta` +// had validation properties, including for `creationTimestamp` (metav1.Time). +// The `metav1.Time` type specifies a custom json marshaller that, when IsZero() is true, returns `null` +// which breaks validation because the field isn't marked as nullable. +// +// In future versions, controller-tools@v2 might allow overriding the type and validation for embedded +// types. When that happens, this hack should be revisited. +type ObjectMeta struct { + // Name must be unique within a namespace. Is required when creating resources, although + // some resources may allow a client to request the generation of an appropriate name + // automatically. Name is primarily intended for creation idempotence and configuration + // definition. + // Cannot be updated. + // More info: http://kubernetes.io/docs/user-guide/identifiers#names + // +optional + Name string `json:"name,omitempty" protobuf:"bytes,1,opt,name=name"` + + // GenerateName is an optional prefix, used by the server, to generate a unique + // name ONLY IF the Name field has not been provided. + // If this field is used, the name returned to the client will be different + // than the name passed. This value will also be combined with a unique suffix. + // The provided value has the same validation rules as the Name field, + // and may be truncated by the length of the suffix required to make the value + // unique on the server. + // + // If this field is specified and the generated name exists, the server will + // NOT return a 409 - instead, it will either return 201 Created or 500 with Reason + // ServerTimeout indicating a unique name could not be found in the time allotted, and the client + // should retry (optionally after the time indicated in the Retry-After header). + // + // Applied only if Name is not specified. + // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#idempotency + // +optional + GenerateName string `json:"generateName,omitempty" protobuf:"bytes,2,opt,name=generateName"` + + // Namespace defines the space within each name must be unique. An empty namespace is + // equivalent to the "default" namespace, but "default" is the canonical representation. + // Not all objects are required to be scoped to a namespace - the value of this field for + // those objects will be empty. + // + // Must be a DNS_LABEL. + // Cannot be updated. + // More info: http://kubernetes.io/docs/user-guide/namespaces + // +optional + Namespace string `json:"namespace,omitempty" protobuf:"bytes,3,opt,name=namespace"` + + // Map of string keys and values that can be used to organize and categorize + // (scope and select) objects. May match selectors of replication controllers + // and services. + // More info: http://kubernetes.io/docs/user-guide/labels + // +optional + Labels map[string]string `json:"labels,omitempty" protobuf:"bytes,11,rep,name=labels"` + + // Annotations is an unstructured key value map stored with a resource that may be + // set by external tools to store and retrieve arbitrary metadata. They are not + // queryable and should be preserved when modifying objects. + // More info: http://kubernetes.io/docs/user-guide/annotations + // +optional + Annotations map[string]string `json:"annotations,omitempty" protobuf:"bytes,12,rep,name=annotations"` + + // List of objects depended by this object. If ALL objects in the list have + // been deleted, this object will be garbage collected. If this object is managed by a controller, + // then an entry in this list will point to this controller, with the controller field set to true. + // There cannot be more than one managing controller. + // +optional + // +patchMergeKey=uid + // +patchStrategy=merge + OwnerReferences []metav1.OwnerReference `json:"ownerReferences,omitempty" patchStrategy:"merge" patchMergeKey:"uid" protobuf:"bytes,13,rep,name=ownerReferences"` +} diff --git a/vendor/sigs.k8s.io/cluster-api/pkg/apis/cluster/v1alpha1/defaults.go b/vendor/sigs.k8s.io/cluster-api/pkg/apis/deprecated/v1alpha1/defaults.go similarity index 90% rename from vendor/sigs.k8s.io/cluster-api/pkg/apis/cluster/v1alpha1/defaults.go rename to vendor/sigs.k8s.io/cluster-api/pkg/apis/deprecated/v1alpha1/defaults.go index f9aeb4b438..cdb6ca58b6 100644 --- a/vendor/sigs.k8s.io/cluster-api/pkg/apis/cluster/v1alpha1/defaults.go +++ b/vendor/sigs.k8s.io/cluster-api/pkg/apis/deprecated/v1alpha1/defaults.go @@ -19,7 +19,6 @@ package v1alpha1 import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/util/intstr" - "sigs.k8s.io/cluster-api/pkg/apis/cluster/common" ) // PopulateDefaultsMachineDeployment fills in default field values @@ -50,11 +49,11 @@ func PopulateDefaultsMachineDeployment(d *MachineDeployment) { } if d.Spec.Strategy.Type == "" { - d.Spec.Strategy.Type = common.RollingUpdateMachineDeploymentStrategyType + d.Spec.Strategy.Type = RollingUpdateMachineDeploymentStrategyType } // Default RollingUpdate strategy only if strategy type is RollingUpdate. - if d.Spec.Strategy.Type == common.RollingUpdateMachineDeploymentStrategyType { + if d.Spec.Strategy.Type == RollingUpdateMachineDeploymentStrategyType { if d.Spec.Strategy.RollingUpdate == nil { d.Spec.Strategy.RollingUpdate = &MachineRollingUpdateDeployment{} } diff --git a/vendor/sigs.k8s.io/cluster-api/pkg/apis/cluster/v1alpha1/doc.go b/vendor/sigs.k8s.io/cluster-api/pkg/apis/deprecated/v1alpha1/doc.go similarity index 96% rename from vendor/sigs.k8s.io/cluster-api/pkg/apis/cluster/v1alpha1/doc.go rename to vendor/sigs.k8s.io/cluster-api/pkg/apis/deprecated/v1alpha1/doc.go index 9bd92e4352..41a32b31ce 100644 --- a/vendor/sigs.k8s.io/cluster-api/pkg/apis/cluster/v1alpha1/doc.go +++ b/vendor/sigs.k8s.io/cluster-api/pkg/apis/deprecated/v1alpha1/doc.go @@ -20,4 +20,5 @@ limitations under the License. // +k8s:conversion-gen=sigs.k8s.io/cluster-api/pkg/apis/cluster // +k8s:defaulter-gen=TypeMeta // +groupName=cluster.k8s.io +// +groupGoName=ClusterDeprecated package v1alpha1 diff --git a/vendor/sigs.k8s.io/cluster-api/pkg/apis/cluster/v1alpha1/machine_types.go b/vendor/sigs.k8s.io/cluster-api/pkg/apis/deprecated/v1alpha1/machine_types.go similarity index 93% rename from vendor/sigs.k8s.io/cluster-api/pkg/apis/cluster/v1alpha1/machine_types.go rename to vendor/sigs.k8s.io/cluster-api/pkg/apis/deprecated/v1alpha1/machine_types.go index e6e978d8f8..356150032e 100644 --- a/vendor/sigs.k8s.io/cluster-api/pkg/apis/cluster/v1alpha1/machine_types.go +++ b/vendor/sigs.k8s.io/cluster-api/pkg/apis/deprecated/v1alpha1/machine_types.go @@ -20,7 +20,7 @@ import ( corev1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/runtime" - "sigs.k8s.io/cluster-api/pkg/apis/cluster/common" + capierrors "sigs.k8s.io/cluster-api/pkg/errors" ) const ( @@ -37,11 +37,7 @@ const ( /// [Machine] // Machine is the Schema for the machines API // +k8s:openapi-gen=true -// +kubebuilder:resource:shortName=ma -// +kubebuilder:subresource:status -// +kubebuilder:printcolumn:name="ProviderID",type="string",JSONPath=".spec.providerID",description="Provider ID" -// +kubebuilder:printcolumn:name="Phase",type="string",JSONPath=".status.phase",description="Machine status such as Terminating/Pending/Running/Failed etc" -// +kubebuilder:printcolumn:name="NodeName",type="string",JSONPath=".status.nodeRef.name",description="Node name associated with this machine",priority=1 +// +kubebuilder:resource:path=machines,shortName=ma type Machine struct { metav1.TypeMeta `json:",inline"` metav1.ObjectMeta `json:"metadata,omitempty"` @@ -59,7 +55,7 @@ type MachineSpec struct { // indicate what labels, annotations, name prefix, etc., should be used // when creating the Node. // +optional - metav1.ObjectMeta `json:"metadata,omitempty"` + ObjectMeta `json:"metadata,omitempty"` // The list of the taints to be applied to the corresponding Node in additive // manner. This list will not overwrite any other taints added to the Node on @@ -151,7 +147,7 @@ type MachineStatus struct { // can be added as events to the Machine object and/or logged in the // controller's output. // +optional - ErrorReason *common.MachineStatusError `json:"errorReason,omitempty"` + ErrorReason *capierrors.MachineStatusError `json:"errorReason,omitempty"` // ErrorMessage will be set in the event that there is a terminal problem // reconciling the Machine and will contain a more verbose string suitable diff --git a/vendor/sigs.k8s.io/cluster-api/pkg/apis/cluster/v1alpha1/machine_types_test.go b/vendor/sigs.k8s.io/cluster-api/pkg/apis/deprecated/v1alpha1/machine_types_test.go similarity index 100% rename from vendor/sigs.k8s.io/cluster-api/pkg/apis/cluster/v1alpha1/machine_types_test.go rename to vendor/sigs.k8s.io/cluster-api/pkg/apis/deprecated/v1alpha1/machine_types_test.go diff --git a/vendor/sigs.k8s.io/cluster-api/pkg/apis/cluster/v1alpha1/machineclass_types.go b/vendor/sigs.k8s.io/cluster-api/pkg/apis/deprecated/v1alpha1/machineclass_types.go similarity index 97% rename from vendor/sigs.k8s.io/cluster-api/pkg/apis/cluster/v1alpha1/machineclass_types.go rename to vendor/sigs.k8s.io/cluster-api/pkg/apis/deprecated/v1alpha1/machineclass_types.go index 1bc0028e0a..5625d7093b 100644 --- a/vendor/sigs.k8s.io/cluster-api/pkg/apis/cluster/v1alpha1/machineclass_types.go +++ b/vendor/sigs.k8s.io/cluster-api/pkg/apis/deprecated/v1alpha1/machineclass_types.go @@ -29,8 +29,7 @@ import ( // MachineClass can be used to templatize and re-use provider configuration // across multiple Machines / MachineSets / MachineDeployments. // +k8s:openapi-gen=true -// +resource:path=machineclasses -// +kubebuilder:resource:shortName=mc +// +kubebuilder:resource:path=machineclasses,shortName=mc type MachineClass struct { metav1.TypeMeta `json:",inline"` // +optional diff --git a/vendor/sigs.k8s.io/cluster-api/pkg/apis/cluster/v1alpha1/machinedeployment_types.go b/vendor/sigs.k8s.io/cluster-api/pkg/apis/deprecated/v1alpha1/machinedeployment_types.go similarity index 94% rename from vendor/sigs.k8s.io/cluster-api/pkg/apis/cluster/v1alpha1/machinedeployment_types.go rename to vendor/sigs.k8s.io/cluster-api/pkg/apis/deprecated/v1alpha1/machinedeployment_types.go index 743bdc2e0e..bfc74fd390 100644 --- a/vendor/sigs.k8s.io/cluster-api/pkg/apis/cluster/v1alpha1/machinedeployment_types.go +++ b/vendor/sigs.k8s.io/cluster-api/pkg/apis/deprecated/v1alpha1/machinedeployment_types.go @@ -19,7 +19,14 @@ package v1alpha1 import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/util/intstr" - "sigs.k8s.io/cluster-api/pkg/apis/cluster/common" +) + +type MachineDeploymentStrategyType string + +const ( + // Replace the old MachineSet by new one using rolling update + // i.e. gradually scale down the old MachineSet and scale up the new one. + RollingUpdateMachineDeploymentStrategyType MachineDeploymentStrategyType = "RollingUpdate" ) /// [MachineDeploymentSpec] @@ -77,7 +84,7 @@ type MachineDeploymentStrategy struct { // "RollingUpdate". // Default is RollingUpdate. // +optional - Type common.MachineDeploymentStrategyType `json:"type,omitempty"` + Type MachineDeploymentStrategyType `json:"type,omitempty"` // Rolling update config params. Present only if // MachineDeploymentStrategyType = RollingUpdate. @@ -167,8 +174,7 @@ type MachineDeploymentStatus struct { /// [MachineDeployment] // MachineDeployment is the Schema for the machinedeployments API // +k8s:openapi-gen=true -// +kubebuilder:resource:shortName=md -// +kubebuilder:subresource:status +// +kubebuilder:resource:path=machinedeployments,shortName=md // +kubebuilder:subresource:scale:specpath=.spec.replicas,statuspath=.status.replicas,selectorpath=.status.labelSelector type MachineDeployment struct { metav1.TypeMeta `json:",inline"` diff --git a/vendor/sigs.k8s.io/cluster-api/pkg/apis/cluster/v1alpha1/machinedeployment_types_test.go b/vendor/sigs.k8s.io/cluster-api/pkg/apis/deprecated/v1alpha1/machinedeployment_types_test.go similarity index 100% rename from vendor/sigs.k8s.io/cluster-api/pkg/apis/cluster/v1alpha1/machinedeployment_types_test.go rename to vendor/sigs.k8s.io/cluster-api/pkg/apis/deprecated/v1alpha1/machinedeployment_types_test.go diff --git a/vendor/sigs.k8s.io/cluster-api/pkg/apis/cluster/v1alpha1/machineset_types.go b/vendor/sigs.k8s.io/cluster-api/pkg/apis/deprecated/v1alpha1/machineset_types.go similarity index 96% rename from vendor/sigs.k8s.io/cluster-api/pkg/apis/cluster/v1alpha1/machineset_types.go rename to vendor/sigs.k8s.io/cluster-api/pkg/apis/deprecated/v1alpha1/machineset_types.go index 55f8c13fb1..9d71a11a03 100644 --- a/vendor/sigs.k8s.io/cluster-api/pkg/apis/cluster/v1alpha1/machineset_types.go +++ b/vendor/sigs.k8s.io/cluster-api/pkg/apis/deprecated/v1alpha1/machineset_types.go @@ -23,7 +23,7 @@ import ( metav1validation "k8s.io/apimachinery/pkg/apis/meta/v1/validation" "k8s.io/apimachinery/pkg/labels" "k8s.io/apimachinery/pkg/util/validation/field" - "sigs.k8s.io/cluster-api/pkg/apis/cluster/common" + capierrors "sigs.k8s.io/cluster-api/pkg/errors" ) // +genclient @@ -32,8 +32,7 @@ import ( /// [MachineSet] // MachineSet ensures that a specified number of machines replicas are running at any given time. // +k8s:openapi-gen=true -// +kubebuilder:resource:shortName=ms -// +kubebuilder:subresource:status +// +kubebuilder:resource:path=machinesets,shortName=ms // +kubebuilder:subresource:scale:specpath=.spec.replicas,statuspath=.status.replicas,selectorpath=.status.labelSelector type MachineSet struct { metav1.TypeMeta `json:",inline"` @@ -61,7 +60,7 @@ type MachineSetSpec struct { // DeletePolicy defines the policy used to identify nodes to delete when downscaling. // Defaults to "Random". Valid values are "Random, "Newest", "Oldest" - // +kubebuilder:validation:Enum=Random,Newest,Oldest + // +kubebuilder:validation:Enum=Random;Newest;Oldest DeletePolicy string `json:"deletePolicy,omitempty"` // Selector is a label query over machines that should match the replica count. @@ -108,7 +107,7 @@ type MachineTemplateSpec struct { // Standard object's metadata. // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata // +optional - metav1.ObjectMeta `json:"metadata,omitempty"` + ObjectMeta `json:"metadata,omitempty"` // Specification of the desired behavior of the machine. // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status @@ -159,7 +158,7 @@ type MachineSetStatus struct { // can be added as events to the MachineSet object and/or logged in the // controller's output. // +optional - ErrorReason *common.MachineSetStatusError `json:"errorReason,omitempty"` + ErrorReason *capierrors.MachineSetStatusError `json:"errorReason,omitempty"` // +optional ErrorMessage *string `json:"errorMessage,omitempty"` } diff --git a/vendor/sigs.k8s.io/cluster-api/pkg/apis/cluster/v1alpha1/machineset_types_test.go b/vendor/sigs.k8s.io/cluster-api/pkg/apis/deprecated/v1alpha1/machineset_types_test.go similarity index 100% rename from vendor/sigs.k8s.io/cluster-api/pkg/apis/cluster/v1alpha1/machineset_types_test.go rename to vendor/sigs.k8s.io/cluster-api/pkg/apis/deprecated/v1alpha1/machineset_types_test.go diff --git a/vendor/sigs.k8s.io/cluster-api/pkg/apis/cluster/v1alpha1/register.go b/vendor/sigs.k8s.io/cluster-api/pkg/apis/deprecated/v1alpha1/register.go similarity index 91% rename from vendor/sigs.k8s.io/cluster-api/pkg/apis/cluster/v1alpha1/register.go rename to vendor/sigs.k8s.io/cluster-api/pkg/apis/deprecated/v1alpha1/register.go index 2456af99e9..bcbbb139bc 100644 --- a/vendor/sigs.k8s.io/cluster-api/pkg/apis/cluster/v1alpha1/register.go +++ b/vendor/sigs.k8s.io/cluster-api/pkg/apis/deprecated/v1alpha1/register.go @@ -22,11 +22,12 @@ limitations under the License. // +k8s:conversion-gen=sigs.k8s.io/cluster-api/pkg/apis/cluster // +k8s:defaulter-gen=TypeMeta // +groupName=cluster.k8s.io +// +groupGoName=ClusterDeprecated package v1alpha1 import ( "k8s.io/apimachinery/pkg/runtime/schema" - "sigs.k8s.io/controller-runtime/pkg/runtime/scheme" + "sigs.k8s.io/controller-runtime/pkg/scheme" ) var ( @@ -37,8 +38,6 @@ var ( SchemeBuilder = &scheme.Builder{GroupVersion: SchemeGroupVersion} // AddToScheme adds registered types to the builder. - // Required by pkg/client/... - // TODO(pwittrock): Remove this after removing pkg/client/... AddToScheme = SchemeBuilder.AddToScheme ) diff --git a/vendor/sigs.k8s.io/cluster-api/pkg/apis/cluster/v1alpha1/testutil/BUILD.bazel b/vendor/sigs.k8s.io/cluster-api/pkg/apis/deprecated/v1alpha1/testutil/BUILD.bazel similarity index 53% rename from vendor/sigs.k8s.io/cluster-api/pkg/apis/cluster/v1alpha1/testutil/BUILD.bazel rename to vendor/sigs.k8s.io/cluster-api/pkg/apis/deprecated/v1alpha1/testutil/BUILD.bazel index bcee50bdc5..70a9935167 100644 --- a/vendor/sigs.k8s.io/cluster-api/pkg/apis/cluster/v1alpha1/testutil/BUILD.bazel +++ b/vendor/sigs.k8s.io/cluster-api/pkg/apis/deprecated/v1alpha1/testutil/BUILD.bazel @@ -3,7 +3,7 @@ load("@io_bazel_rules_go//go:def.bzl", "go_library") go_library( name = "go_default_library", srcs = ["testutil.go"], - importpath = "sigs.k8s.io/cluster-api/pkg/apis/cluster/v1alpha1/testutil", + importpath = "sigs.k8s.io/cluster-api/pkg/apis/deprecated/v1alpha1/testutil", visibility = ["//visibility:public"], - deps = ["//pkg/apis/cluster/v1alpha1:go_default_library"], + deps = ["//pkg/apis/deprecated/v1alpha1:go_default_library"], ) diff --git a/vendor/sigs.k8s.io/cluster-api/pkg/apis/cluster/v1alpha1/testutil/testutil.go b/vendor/sigs.k8s.io/cluster-api/pkg/apis/deprecated/v1alpha1/testutil/testutil.go similarity index 94% rename from vendor/sigs.k8s.io/cluster-api/pkg/apis/cluster/v1alpha1/testutil/testutil.go rename to vendor/sigs.k8s.io/cluster-api/pkg/apis/deprecated/v1alpha1/testutil/testutil.go index 2af50c96d8..ce9d5de6eb 100644 --- a/vendor/sigs.k8s.io/cluster-api/pkg/apis/cluster/v1alpha1/testutil/testutil.go +++ b/vendor/sigs.k8s.io/cluster-api/pkg/apis/deprecated/v1alpha1/testutil/testutil.go @@ -16,7 +16,7 @@ limitations under the License. package testutil -import "sigs.k8s.io/cluster-api/pkg/apis/cluster/v1alpha1" +import "sigs.k8s.io/cluster-api/pkg/apis/deprecated/v1alpha1" // GetVanillaCluster return a bare minimum functional cluster resource object func GetVanillaCluster() v1alpha1.Cluster { diff --git a/vendor/sigs.k8s.io/cluster-api/pkg/apis/cluster/v1alpha1/v1alpha1_suite_test.go b/vendor/sigs.k8s.io/cluster-api/pkg/apis/deprecated/v1alpha1/v1alpha1_suite_test.go similarity index 100% rename from vendor/sigs.k8s.io/cluster-api/pkg/apis/cluster/v1alpha1/v1alpha1_suite_test.go rename to vendor/sigs.k8s.io/cluster-api/pkg/apis/deprecated/v1alpha1/v1alpha1_suite_test.go diff --git a/vendor/sigs.k8s.io/cluster-api/pkg/apis/cluster/v1alpha1/zz_generated.deepcopy.go b/vendor/sigs.k8s.io/cluster-api/pkg/apis/deprecated/v1alpha1/zz_generated.deepcopy.go similarity index 94% rename from vendor/sigs.k8s.io/cluster-api/pkg/apis/cluster/v1alpha1/zz_generated.deepcopy.go rename to vendor/sigs.k8s.io/cluster-api/pkg/apis/deprecated/v1alpha1/zz_generated.deepcopy.go index 0f670f0738..468f2a2c7e 100644 --- a/vendor/sigs.k8s.io/cluster-api/pkg/apis/cluster/v1alpha1/zz_generated.deepcopy.go +++ b/vendor/sigs.k8s.io/cluster-api/pkg/apis/deprecated/v1alpha1/zz_generated.deepcopy.go @@ -16,21 +16,21 @@ See the License for the specific language governing permissions and limitations under the License. */ -// Code generated by main. DO NOT EDIT. +// autogenerated by controller-gen object, do not modify manually package v1alpha1 import ( - v1 "k8s.io/api/core/v1" - runtime "k8s.io/apimachinery/pkg/runtime" - intstr "k8s.io/apimachinery/pkg/util/intstr" - common "sigs.k8s.io/cluster-api/pkg/apis/cluster/common" + "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/util/intstr" + "sigs.k8s.io/cluster-api/pkg/errors" ) // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *APIEndpoint) DeepCopyInto(out *APIEndpoint) { *out = *in - return } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new APIEndpoint. @@ -50,7 +50,6 @@ func (in *Cluster) DeepCopyInto(out *Cluster) { in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) in.Spec.DeepCopyInto(&out.Spec) in.Status.DeepCopyInto(&out.Status) - return } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Cluster. @@ -83,7 +82,6 @@ func (in *ClusterList) DeepCopyInto(out *ClusterList) { (*in)[i].DeepCopyInto(&(*out)[i]) } } - return } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClusterList. @@ -109,7 +107,6 @@ func (in *ClusterNetworkingConfig) DeepCopyInto(out *ClusterNetworkingConfig) { *out = *in in.Services.DeepCopyInto(&out.Services) in.Pods.DeepCopyInto(&out.Pods) - return } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClusterNetworkingConfig. @@ -127,7 +124,6 @@ func (in *ClusterSpec) DeepCopyInto(out *ClusterSpec) { *out = *in in.ClusterNetwork.DeepCopyInto(&out.ClusterNetwork) in.ProviderSpec.DeepCopyInto(&out.ProviderSpec) - return } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClusterSpec. @@ -153,7 +149,6 @@ func (in *ClusterStatus) DeepCopyInto(out *ClusterStatus) { *out = new(runtime.RawExtension) (*in).DeepCopyInto(*out) } - return } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClusterStatus. @@ -188,7 +183,6 @@ func (in *LastOperation) DeepCopyInto(out *LastOperation) { *out = new(string) **out = **in } - return } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LastOperation. @@ -208,7 +202,6 @@ func (in *Machine) DeepCopyInto(out *Machine) { in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) in.Spec.DeepCopyInto(&out.Spec) in.Status.DeepCopyInto(&out.Status) - return } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Machine. @@ -235,7 +228,6 @@ func (in *MachineClass) DeepCopyInto(out *MachineClass) { out.TypeMeta = in.TypeMeta in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) in.ProviderSpec.DeepCopyInto(&out.ProviderSpec) - return } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MachineClass. @@ -268,7 +260,6 @@ func (in *MachineClassList) DeepCopyInto(out *MachineClassList) { (*in)[i].DeepCopyInto(&(*out)[i]) } } - return } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MachineClassList. @@ -297,7 +288,6 @@ func (in *MachineClassRef) DeepCopyInto(out *MachineClassRef) { *out = new(v1.ObjectReference) **out = **in } - return } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MachineClassRef. @@ -317,7 +307,6 @@ func (in *MachineDeployment) DeepCopyInto(out *MachineDeployment) { in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) in.Spec.DeepCopyInto(&out.Spec) out.Status = in.Status - return } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MachineDeployment. @@ -350,7 +339,6 @@ func (in *MachineDeploymentList) DeepCopyInto(out *MachineDeploymentList) { (*in)[i].DeepCopyInto(&(*out)[i]) } } - return } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MachineDeploymentList. @@ -401,7 +389,6 @@ func (in *MachineDeploymentSpec) DeepCopyInto(out *MachineDeploymentSpec) { *out = new(int32) **out = **in } - return } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MachineDeploymentSpec. @@ -417,7 +404,6 @@ func (in *MachineDeploymentSpec) DeepCopy() *MachineDeploymentSpec { // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *MachineDeploymentStatus) DeepCopyInto(out *MachineDeploymentStatus) { *out = *in - return } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MachineDeploymentStatus. @@ -438,7 +424,6 @@ func (in *MachineDeploymentStrategy) DeepCopyInto(out *MachineDeploymentStrategy *out = new(MachineRollingUpdateDeployment) (*in).DeepCopyInto(*out) } - return } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MachineDeploymentStrategy. @@ -463,7 +448,6 @@ func (in *MachineList) DeepCopyInto(out *MachineList) { (*in)[i].DeepCopyInto(&(*out)[i]) } } - return } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MachineList. @@ -497,7 +481,6 @@ func (in *MachineRollingUpdateDeployment) DeepCopyInto(out *MachineRollingUpdate *out = new(intstr.IntOrString) **out = **in } - return } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MachineRollingUpdateDeployment. @@ -517,7 +500,6 @@ func (in *MachineSet) DeepCopyInto(out *MachineSet) { in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) in.Spec.DeepCopyInto(&out.Spec) in.Status.DeepCopyInto(&out.Status) - return } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MachineSet. @@ -550,7 +532,6 @@ func (in *MachineSetList) DeepCopyInto(out *MachineSetList) { (*in)[i].DeepCopyInto(&(*out)[i]) } } - return } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MachineSetList. @@ -581,7 +562,6 @@ func (in *MachineSetSpec) DeepCopyInto(out *MachineSetSpec) { } in.Selector.DeepCopyInto(&out.Selector) in.Template.DeepCopyInto(&out.Template) - return } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MachineSetSpec. @@ -599,7 +579,7 @@ func (in *MachineSetStatus) DeepCopyInto(out *MachineSetStatus) { *out = *in if in.ErrorReason != nil { in, out := &in.ErrorReason, &out.ErrorReason - *out = new(common.MachineSetStatusError) + *out = new(errors.MachineSetStatusError) **out = **in } if in.ErrorMessage != nil { @@ -607,7 +587,6 @@ func (in *MachineSetStatus) DeepCopyInto(out *MachineSetStatus) { *out = new(string) **out = **in } - return } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MachineSetStatus. @@ -643,7 +622,6 @@ func (in *MachineSpec) DeepCopyInto(out *MachineSpec) { *out = new(string) **out = **in } - return } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MachineSpec. @@ -675,7 +653,7 @@ func (in *MachineStatus) DeepCopyInto(out *MachineStatus) { } if in.ErrorReason != nil { in, out := &in.ErrorReason, &out.ErrorReason - *out = new(common.MachineStatusError) + *out = new(errors.MachineStatusError) **out = **in } if in.ErrorMessage != nil { @@ -710,7 +688,6 @@ func (in *MachineStatus) DeepCopyInto(out *MachineStatus) { *out = new(string) **out = **in } - return } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MachineStatus. @@ -728,7 +705,6 @@ func (in *MachineTemplateSpec) DeepCopyInto(out *MachineTemplateSpec) { *out = *in in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) in.Spec.DeepCopyInto(&out.Spec) - return } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MachineTemplateSpec. @@ -744,7 +720,6 @@ func (in *MachineTemplateSpec) DeepCopy() *MachineTemplateSpec { // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *MachineVersionInfo) DeepCopyInto(out *MachineVersionInfo) { *out = *in - return } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MachineVersionInfo. @@ -765,7 +740,6 @@ func (in *NetworkRanges) DeepCopyInto(out *NetworkRanges) { *out = make([]string, len(*in)) copy(*out, *in) } - return } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NetworkRanges. @@ -778,6 +752,42 @@ func (in *NetworkRanges) DeepCopy() *NetworkRanges { return out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ObjectMeta) DeepCopyInto(out *ObjectMeta) { + *out = *in + if in.Labels != nil { + in, out := &in.Labels, &out.Labels + *out = make(map[string]string, len(*in)) + for key, val := range *in { + (*out)[key] = val + } + } + if in.Annotations != nil { + in, out := &in.Annotations, &out.Annotations + *out = make(map[string]string, len(*in)) + for key, val := range *in { + (*out)[key] = val + } + } + if in.OwnerReferences != nil { + in, out := &in.OwnerReferences, &out.OwnerReferences + *out = make([]metav1.OwnerReference, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ObjectMeta. +func (in *ObjectMeta) DeepCopy() *ObjectMeta { + if in == nil { + return nil + } + out := new(ObjectMeta) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *ProviderSpec) DeepCopyInto(out *ProviderSpec) { *out = *in @@ -791,7 +801,6 @@ func (in *ProviderSpec) DeepCopyInto(out *ProviderSpec) { *out = new(ProviderSpecSource) (*in).DeepCopyInto(*out) } - return } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ProviderSpec. @@ -812,7 +821,6 @@ func (in *ProviderSpecSource) DeepCopyInto(out *ProviderSpecSource) { *out = new(MachineClassRef) (*in).DeepCopyInto(*out) } - return } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ProviderSpecSource. diff --git a/vendor/sigs.k8s.io/cluster-api/pkg/client/clientset_generated/clientset/BUILD.bazel b/vendor/sigs.k8s.io/cluster-api/pkg/client/clientset_generated/clientset/BUILD.bazel deleted file mode 100644 index 9665ba8d3d..0000000000 --- a/vendor/sigs.k8s.io/cluster-api/pkg/client/clientset_generated/clientset/BUILD.bazel +++ /dev/null @@ -1,17 +0,0 @@ -load("@io_bazel_rules_go//go:def.bzl", "go_library") - -go_library( - name = "go_default_library", - srcs = [ - "clientset.go", - "doc.go", - ], - importpath = "sigs.k8s.io/cluster-api/pkg/client/clientset_generated/clientset", - visibility = ["//visibility:public"], - deps = [ - "//pkg/client/clientset_generated/clientset/typed/cluster/v1alpha1:go_default_library", - "//vendor/k8s.io/client-go/discovery:go_default_library", - "//vendor/k8s.io/client-go/rest:go_default_library", - "//vendor/k8s.io/client-go/util/flowcontrol:go_default_library", - ], -) diff --git a/vendor/sigs.k8s.io/cluster-api/pkg/client/clientset_generated/clientset/fake/BUILD.bazel b/vendor/sigs.k8s.io/cluster-api/pkg/client/clientset_generated/clientset/fake/BUILD.bazel deleted file mode 100644 index e3d124b8b1..0000000000 --- a/vendor/sigs.k8s.io/cluster-api/pkg/client/clientset_generated/clientset/fake/BUILD.bazel +++ /dev/null @@ -1,27 +0,0 @@ -load("@io_bazel_rules_go//go:def.bzl", "go_library") - -go_library( - name = "go_default_library", - srcs = [ - "clientset_generated.go", - "doc.go", - "register.go", - ], - importpath = "sigs.k8s.io/cluster-api/pkg/client/clientset_generated/clientset/fake", - visibility = ["//visibility:public"], - deps = [ - "//pkg/apis/cluster/v1alpha1:go_default_library", - "//pkg/client/clientset_generated/clientset:go_default_library", - "//pkg/client/clientset_generated/clientset/typed/cluster/v1alpha1:go_default_library", - "//pkg/client/clientset_generated/clientset/typed/cluster/v1alpha1/fake:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/runtime:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/runtime/schema:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/runtime/serializer:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/util/runtime:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/watch:go_default_library", - "//vendor/k8s.io/client-go/discovery:go_default_library", - "//vendor/k8s.io/client-go/discovery/fake:go_default_library", - "//vendor/k8s.io/client-go/testing:go_default_library", - ], -) diff --git a/vendor/sigs.k8s.io/cluster-api/pkg/client/clientset_generated/clientset/fake/clientset_generated.go b/vendor/sigs.k8s.io/cluster-api/pkg/client/clientset_generated/clientset/fake/clientset_generated.go deleted file mode 100644 index 4c8feaedae..0000000000 --- a/vendor/sigs.k8s.io/cluster-api/pkg/client/clientset_generated/clientset/fake/clientset_generated.go +++ /dev/null @@ -1,82 +0,0 @@ -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by client-gen. DO NOT EDIT. - -package fake - -import ( - "k8s.io/apimachinery/pkg/runtime" - "k8s.io/apimachinery/pkg/watch" - "k8s.io/client-go/discovery" - fakediscovery "k8s.io/client-go/discovery/fake" - "k8s.io/client-go/testing" - clientset "sigs.k8s.io/cluster-api/pkg/client/clientset_generated/clientset" - clusterv1alpha1 "sigs.k8s.io/cluster-api/pkg/client/clientset_generated/clientset/typed/cluster/v1alpha1" - fakeclusterv1alpha1 "sigs.k8s.io/cluster-api/pkg/client/clientset_generated/clientset/typed/cluster/v1alpha1/fake" -) - -// NewSimpleClientset returns a clientset that will respond with the provided objects. -// It's backed by a very simple object tracker that processes creates, updates and deletions as-is, -// without applying any validations and/or defaults. It shouldn't be considered a replacement -// for a real clientset and is mostly useful in simple unit tests. -func NewSimpleClientset(objects ...runtime.Object) *Clientset { - o := testing.NewObjectTracker(scheme, codecs.UniversalDecoder()) - for _, obj := range objects { - if err := o.Add(obj); err != nil { - panic(err) - } - } - - cs := &Clientset{} - cs.discovery = &fakediscovery.FakeDiscovery{Fake: &cs.Fake} - cs.AddReactor("*", "*", testing.ObjectReaction(o)) - cs.AddWatchReactor("*", func(action testing.Action) (handled bool, ret watch.Interface, err error) { - gvr := action.GetResource() - ns := action.GetNamespace() - watch, err := o.Watch(gvr, ns) - if err != nil { - return false, nil, err - } - return true, watch, nil - }) - - return cs -} - -// Clientset implements clientset.Interface. Meant to be embedded into a -// struct to get a default implementation. This makes faking out just the method -// you want to test easier. -type Clientset struct { - testing.Fake - discovery *fakediscovery.FakeDiscovery -} - -func (c *Clientset) Discovery() discovery.DiscoveryInterface { - return c.discovery -} - -var _ clientset.Interface = &Clientset{} - -// ClusterV1alpha1 retrieves the ClusterV1alpha1Client -func (c *Clientset) ClusterV1alpha1() clusterv1alpha1.ClusterV1alpha1Interface { - return &fakeclusterv1alpha1.FakeClusterV1alpha1{Fake: &c.Fake} -} - -// Cluster retrieves the ClusterV1alpha1Client -func (c *Clientset) Cluster() clusterv1alpha1.ClusterV1alpha1Interface { - return &fakeclusterv1alpha1.FakeClusterV1alpha1{Fake: &c.Fake} -} diff --git a/vendor/sigs.k8s.io/cluster-api/pkg/client/clientset_generated/clientset/fake/register.go b/vendor/sigs.k8s.io/cluster-api/pkg/client/clientset_generated/clientset/fake/register.go deleted file mode 100644 index 76b5cf7a8f..0000000000 --- a/vendor/sigs.k8s.io/cluster-api/pkg/client/clientset_generated/clientset/fake/register.go +++ /dev/null @@ -1,56 +0,0 @@ -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by client-gen. DO NOT EDIT. - -package fake - -import ( - v1 "k8s.io/apimachinery/pkg/apis/meta/v1" - runtime "k8s.io/apimachinery/pkg/runtime" - schema "k8s.io/apimachinery/pkg/runtime/schema" - serializer "k8s.io/apimachinery/pkg/runtime/serializer" - utilruntime "k8s.io/apimachinery/pkg/util/runtime" - clusterv1alpha1 "sigs.k8s.io/cluster-api/pkg/apis/cluster/v1alpha1" -) - -var scheme = runtime.NewScheme() -var codecs = serializer.NewCodecFactory(scheme) -var parameterCodec = runtime.NewParameterCodec(scheme) -var localSchemeBuilder = runtime.SchemeBuilder{ - clusterv1alpha1.AddToScheme, -} - -// AddToScheme adds all types of this clientset into the given scheme. This allows composition -// of clientsets, like in: -// -// import ( -// "k8s.io/client-go/kubernetes" -// clientsetscheme "k8s.io/client-go/kubernetes/scheme" -// aggregatorclientsetscheme "k8s.io/kube-aggregator/pkg/client/clientset_generated/clientset/scheme" -// ) -// -// kclientset, _ := kubernetes.NewForConfig(c) -// _ = aggregatorclientsetscheme.AddToScheme(clientsetscheme.Scheme) -// -// After this, RawExtensions in Kubernetes types will serialize kube-aggregator types -// correctly. -var AddToScheme = localSchemeBuilder.AddToScheme - -func init() { - v1.AddToGroupVersion(scheme, schema.GroupVersion{Version: "v1"}) - utilruntime.Must(AddToScheme(scheme)) -} diff --git a/vendor/sigs.k8s.io/cluster-api/pkg/client/clientset_generated/clientset/scheme/BUILD.bazel b/vendor/sigs.k8s.io/cluster-api/pkg/client/clientset_generated/clientset/scheme/BUILD.bazel deleted file mode 100644 index 967e9635f2..0000000000 --- a/vendor/sigs.k8s.io/cluster-api/pkg/client/clientset_generated/clientset/scheme/BUILD.bazel +++ /dev/null @@ -1,19 +0,0 @@ -load("@io_bazel_rules_go//go:def.bzl", "go_library") - -go_library( - name = "go_default_library", - srcs = [ - "doc.go", - "register.go", - ], - importpath = "sigs.k8s.io/cluster-api/pkg/client/clientset_generated/clientset/scheme", - visibility = ["//visibility:public"], - deps = [ - "//pkg/apis/cluster/v1alpha1:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/runtime:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/runtime/schema:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/runtime/serializer:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/util/runtime:go_default_library", - ], -) diff --git a/vendor/sigs.k8s.io/cluster-api/pkg/client/clientset_generated/clientset/typed/cluster/v1alpha1/BUILD.bazel b/vendor/sigs.k8s.io/cluster-api/pkg/client/clientset_generated/clientset/typed/cluster/v1alpha1/BUILD.bazel deleted file mode 100644 index d7034eae03..0000000000 --- a/vendor/sigs.k8s.io/cluster-api/pkg/client/clientset_generated/clientset/typed/cluster/v1alpha1/BUILD.bazel +++ /dev/null @@ -1,26 +0,0 @@ -load("@io_bazel_rules_go//go:def.bzl", "go_library") - -go_library( - name = "go_default_library", - srcs = [ - "cluster.go", - "cluster_client.go", - "doc.go", - "generated_expansion.go", - "machine.go", - "machineclass.go", - "machinedeployment.go", - "machineset.go", - ], - importpath = "sigs.k8s.io/cluster-api/pkg/client/clientset_generated/clientset/typed/cluster/v1alpha1", - visibility = ["//visibility:public"], - deps = [ - "//pkg/apis/cluster/v1alpha1:go_default_library", - "//pkg/client/clientset_generated/clientset/scheme:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/runtime/serializer:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/types:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/watch:go_default_library", - "//vendor/k8s.io/client-go/rest:go_default_library", - ], -) diff --git a/vendor/sigs.k8s.io/cluster-api/pkg/client/clientset_generated/clientset/typed/cluster/v1alpha1/cluster.go b/vendor/sigs.k8s.io/cluster-api/pkg/client/clientset_generated/clientset/typed/cluster/v1alpha1/cluster.go deleted file mode 100644 index b5f95edc18..0000000000 --- a/vendor/sigs.k8s.io/cluster-api/pkg/client/clientset_generated/clientset/typed/cluster/v1alpha1/cluster.go +++ /dev/null @@ -1,191 +0,0 @@ -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by client-gen. DO NOT EDIT. - -package v1alpha1 - -import ( - "time" - - v1 "k8s.io/apimachinery/pkg/apis/meta/v1" - types "k8s.io/apimachinery/pkg/types" - watch "k8s.io/apimachinery/pkg/watch" - rest "k8s.io/client-go/rest" - v1alpha1 "sigs.k8s.io/cluster-api/pkg/apis/cluster/v1alpha1" - scheme "sigs.k8s.io/cluster-api/pkg/client/clientset_generated/clientset/scheme" -) - -// ClustersGetter has a method to return a ClusterInterface. -// A group's client should implement this interface. -type ClustersGetter interface { - Clusters(namespace string) ClusterInterface -} - -// ClusterInterface has methods to work with Cluster resources. -type ClusterInterface interface { - Create(*v1alpha1.Cluster) (*v1alpha1.Cluster, error) - Update(*v1alpha1.Cluster) (*v1alpha1.Cluster, error) - UpdateStatus(*v1alpha1.Cluster) (*v1alpha1.Cluster, error) - Delete(name string, options *v1.DeleteOptions) error - DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error - Get(name string, options v1.GetOptions) (*v1alpha1.Cluster, error) - List(opts v1.ListOptions) (*v1alpha1.ClusterList, error) - Watch(opts v1.ListOptions) (watch.Interface, error) - Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1alpha1.Cluster, err error) - ClusterExpansion -} - -// clusters implements ClusterInterface -type clusters struct { - client rest.Interface - ns string -} - -// newClusters returns a Clusters -func newClusters(c *ClusterV1alpha1Client, namespace string) *clusters { - return &clusters{ - client: c.RESTClient(), - ns: namespace, - } -} - -// Get takes name of the cluster, and returns the corresponding cluster object, and an error if there is any. -func (c *clusters) Get(name string, options v1.GetOptions) (result *v1alpha1.Cluster, err error) { - result = &v1alpha1.Cluster{} - err = c.client.Get(). - Namespace(c.ns). - Resource("clusters"). - Name(name). - VersionedParams(&options, scheme.ParameterCodec). - Do(). - Into(result) - return -} - -// List takes label and field selectors, and returns the list of Clusters that match those selectors. -func (c *clusters) List(opts v1.ListOptions) (result *v1alpha1.ClusterList, err error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } - result = &v1alpha1.ClusterList{} - err = c.client.Get(). - Namespace(c.ns). - Resource("clusters"). - VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). - Do(). - Into(result) - return -} - -// Watch returns a watch.Interface that watches the requested clusters. -func (c *clusters) Watch(opts v1.ListOptions) (watch.Interface, error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } - opts.Watch = true - return c.client.Get(). - Namespace(c.ns). - Resource("clusters"). - VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). - Watch() -} - -// Create takes the representation of a cluster and creates it. Returns the server's representation of the cluster, and an error, if there is any. -func (c *clusters) Create(cluster *v1alpha1.Cluster) (result *v1alpha1.Cluster, err error) { - result = &v1alpha1.Cluster{} - err = c.client.Post(). - Namespace(c.ns). - Resource("clusters"). - Body(cluster). - Do(). - Into(result) - return -} - -// Update takes the representation of a cluster and updates it. Returns the server's representation of the cluster, and an error, if there is any. -func (c *clusters) Update(cluster *v1alpha1.Cluster) (result *v1alpha1.Cluster, err error) { - result = &v1alpha1.Cluster{} - err = c.client.Put(). - Namespace(c.ns). - Resource("clusters"). - Name(cluster.Name). - Body(cluster). - Do(). - Into(result) - return -} - -// UpdateStatus was generated because the type contains a Status member. -// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). - -func (c *clusters) UpdateStatus(cluster *v1alpha1.Cluster) (result *v1alpha1.Cluster, err error) { - result = &v1alpha1.Cluster{} - err = c.client.Put(). - Namespace(c.ns). - Resource("clusters"). - Name(cluster.Name). - SubResource("status"). - Body(cluster). - Do(). - Into(result) - return -} - -// Delete takes name of the cluster and deletes it. Returns an error if one occurs. -func (c *clusters) Delete(name string, options *v1.DeleteOptions) error { - return c.client.Delete(). - Namespace(c.ns). - Resource("clusters"). - Name(name). - Body(options). - Do(). - Error() -} - -// DeleteCollection deletes a collection of objects. -func (c *clusters) DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error { - var timeout time.Duration - if listOptions.TimeoutSeconds != nil { - timeout = time.Duration(*listOptions.TimeoutSeconds) * time.Second - } - return c.client.Delete(). - Namespace(c.ns). - Resource("clusters"). - VersionedParams(&listOptions, scheme.ParameterCodec). - Timeout(timeout). - Body(options). - Do(). - Error() -} - -// Patch applies the patch and returns the patched cluster. -func (c *clusters) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1alpha1.Cluster, err error) { - result = &v1alpha1.Cluster{} - err = c.client.Patch(pt). - Namespace(c.ns). - Resource("clusters"). - SubResource(subresources...). - Name(name). - Body(data). - Do(). - Into(result) - return -} diff --git a/vendor/sigs.k8s.io/cluster-api/pkg/client/clientset_generated/clientset/typed/cluster/v1alpha1/cluster_client.go b/vendor/sigs.k8s.io/cluster-api/pkg/client/clientset_generated/clientset/typed/cluster/v1alpha1/cluster_client.go deleted file mode 100644 index fe439fda99..0000000000 --- a/vendor/sigs.k8s.io/cluster-api/pkg/client/clientset_generated/clientset/typed/cluster/v1alpha1/cluster_client.go +++ /dev/null @@ -1,110 +0,0 @@ -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by client-gen. DO NOT EDIT. - -package v1alpha1 - -import ( - serializer "k8s.io/apimachinery/pkg/runtime/serializer" - rest "k8s.io/client-go/rest" - v1alpha1 "sigs.k8s.io/cluster-api/pkg/apis/cluster/v1alpha1" - "sigs.k8s.io/cluster-api/pkg/client/clientset_generated/clientset/scheme" -) - -type ClusterV1alpha1Interface interface { - RESTClient() rest.Interface - ClustersGetter - MachinesGetter - MachineClassesGetter - MachineDeploymentsGetter - MachineSetsGetter -} - -// ClusterV1alpha1Client is used to interact with features provided by the cluster.k8s.io group. -type ClusterV1alpha1Client struct { - restClient rest.Interface -} - -func (c *ClusterV1alpha1Client) Clusters(namespace string) ClusterInterface { - return newClusters(c, namespace) -} - -func (c *ClusterV1alpha1Client) Machines(namespace string) MachineInterface { - return newMachines(c, namespace) -} - -func (c *ClusterV1alpha1Client) MachineClasses(namespace string) MachineClassInterface { - return newMachineClasses(c, namespace) -} - -func (c *ClusterV1alpha1Client) MachineDeployments(namespace string) MachineDeploymentInterface { - return newMachineDeployments(c, namespace) -} - -func (c *ClusterV1alpha1Client) MachineSets(namespace string) MachineSetInterface { - return newMachineSets(c, namespace) -} - -// NewForConfig creates a new ClusterV1alpha1Client for the given config. -func NewForConfig(c *rest.Config) (*ClusterV1alpha1Client, error) { - config := *c - if err := setConfigDefaults(&config); err != nil { - return nil, err - } - client, err := rest.RESTClientFor(&config) - if err != nil { - return nil, err - } - return &ClusterV1alpha1Client{client}, nil -} - -// NewForConfigOrDie creates a new ClusterV1alpha1Client for the given config and -// panics if there is an error in the config. -func NewForConfigOrDie(c *rest.Config) *ClusterV1alpha1Client { - client, err := NewForConfig(c) - if err != nil { - panic(err) - } - return client -} - -// New creates a new ClusterV1alpha1Client for the given RESTClient. -func New(c rest.Interface) *ClusterV1alpha1Client { - return &ClusterV1alpha1Client{c} -} - -func setConfigDefaults(config *rest.Config) error { - gv := v1alpha1.SchemeGroupVersion - config.GroupVersion = &gv - config.APIPath = "/apis" - config.NegotiatedSerializer = serializer.DirectCodecFactory{CodecFactory: scheme.Codecs} - - if config.UserAgent == "" { - config.UserAgent = rest.DefaultKubernetesUserAgent() - } - - return nil -} - -// RESTClient returns a RESTClient that is used to communicate -// with API server by this client implementation. -func (c *ClusterV1alpha1Client) RESTClient() rest.Interface { - if c == nil { - return nil - } - return c.restClient -} diff --git a/vendor/sigs.k8s.io/cluster-api/pkg/client/clientset_generated/clientset/typed/cluster/v1alpha1/fake/BUILD.bazel b/vendor/sigs.k8s.io/cluster-api/pkg/client/clientset_generated/clientset/typed/cluster/v1alpha1/fake/BUILD.bazel deleted file mode 100644 index 76a6933b28..0000000000 --- a/vendor/sigs.k8s.io/cluster-api/pkg/client/clientset_generated/clientset/typed/cluster/v1alpha1/fake/BUILD.bazel +++ /dev/null @@ -1,27 +0,0 @@ -load("@io_bazel_rules_go//go:def.bzl", "go_library") - -go_library( - name = "go_default_library", - srcs = [ - "doc.go", - "fake_cluster.go", - "fake_cluster_client.go", - "fake_machine.go", - "fake_machineclass.go", - "fake_machinedeployment.go", - "fake_machineset.go", - ], - importpath = "sigs.k8s.io/cluster-api/pkg/client/clientset_generated/clientset/typed/cluster/v1alpha1/fake", - visibility = ["//visibility:public"], - deps = [ - "//pkg/apis/cluster/v1alpha1:go_default_library", - "//pkg/client/clientset_generated/clientset/typed/cluster/v1alpha1:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/labels:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/runtime/schema:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/types:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/watch:go_default_library", - "//vendor/k8s.io/client-go/rest:go_default_library", - "//vendor/k8s.io/client-go/testing:go_default_library", - ], -) diff --git a/vendor/sigs.k8s.io/cluster-api/pkg/client/clientset_generated/clientset/typed/cluster/v1alpha1/fake/fake_cluster.go b/vendor/sigs.k8s.io/cluster-api/pkg/client/clientset_generated/clientset/typed/cluster/v1alpha1/fake/fake_cluster.go deleted file mode 100644 index 1d9430a0c0..0000000000 --- a/vendor/sigs.k8s.io/cluster-api/pkg/client/clientset_generated/clientset/typed/cluster/v1alpha1/fake/fake_cluster.go +++ /dev/null @@ -1,140 +0,0 @@ -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by client-gen. DO NOT EDIT. - -package fake - -import ( - v1 "k8s.io/apimachinery/pkg/apis/meta/v1" - labels "k8s.io/apimachinery/pkg/labels" - schema "k8s.io/apimachinery/pkg/runtime/schema" - types "k8s.io/apimachinery/pkg/types" - watch "k8s.io/apimachinery/pkg/watch" - testing "k8s.io/client-go/testing" - v1alpha1 "sigs.k8s.io/cluster-api/pkg/apis/cluster/v1alpha1" -) - -// FakeClusters implements ClusterInterface -type FakeClusters struct { - Fake *FakeClusterV1alpha1 - ns string -} - -var clustersResource = schema.GroupVersionResource{Group: "cluster.k8s.io", Version: "v1alpha1", Resource: "clusters"} - -var clustersKind = schema.GroupVersionKind{Group: "cluster.k8s.io", Version: "v1alpha1", Kind: "Cluster"} - -// Get takes name of the cluster, and returns the corresponding cluster object, and an error if there is any. -func (c *FakeClusters) Get(name string, options v1.GetOptions) (result *v1alpha1.Cluster, err error) { - obj, err := c.Fake. - Invokes(testing.NewGetAction(clustersResource, c.ns, name), &v1alpha1.Cluster{}) - - if obj == nil { - return nil, err - } - return obj.(*v1alpha1.Cluster), err -} - -// List takes label and field selectors, and returns the list of Clusters that match those selectors. -func (c *FakeClusters) List(opts v1.ListOptions) (result *v1alpha1.ClusterList, err error) { - obj, err := c.Fake. - Invokes(testing.NewListAction(clustersResource, clustersKind, c.ns, opts), &v1alpha1.ClusterList{}) - - if obj == nil { - return nil, err - } - - label, _, _ := testing.ExtractFromListOptions(opts) - if label == nil { - label = labels.Everything() - } - list := &v1alpha1.ClusterList{ListMeta: obj.(*v1alpha1.ClusterList).ListMeta} - for _, item := range obj.(*v1alpha1.ClusterList).Items { - if label.Matches(labels.Set(item.Labels)) { - list.Items = append(list.Items, item) - } - } - return list, err -} - -// Watch returns a watch.Interface that watches the requested clusters. -func (c *FakeClusters) Watch(opts v1.ListOptions) (watch.Interface, error) { - return c.Fake. - InvokesWatch(testing.NewWatchAction(clustersResource, c.ns, opts)) - -} - -// Create takes the representation of a cluster and creates it. Returns the server's representation of the cluster, and an error, if there is any. -func (c *FakeClusters) Create(cluster *v1alpha1.Cluster) (result *v1alpha1.Cluster, err error) { - obj, err := c.Fake. - Invokes(testing.NewCreateAction(clustersResource, c.ns, cluster), &v1alpha1.Cluster{}) - - if obj == nil { - return nil, err - } - return obj.(*v1alpha1.Cluster), err -} - -// Update takes the representation of a cluster and updates it. Returns the server's representation of the cluster, and an error, if there is any. -func (c *FakeClusters) Update(cluster *v1alpha1.Cluster) (result *v1alpha1.Cluster, err error) { - obj, err := c.Fake. - Invokes(testing.NewUpdateAction(clustersResource, c.ns, cluster), &v1alpha1.Cluster{}) - - if obj == nil { - return nil, err - } - return obj.(*v1alpha1.Cluster), err -} - -// UpdateStatus was generated because the type contains a Status member. -// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). -func (c *FakeClusters) UpdateStatus(cluster *v1alpha1.Cluster) (*v1alpha1.Cluster, error) { - obj, err := c.Fake. - Invokes(testing.NewUpdateSubresourceAction(clustersResource, "status", c.ns, cluster), &v1alpha1.Cluster{}) - - if obj == nil { - return nil, err - } - return obj.(*v1alpha1.Cluster), err -} - -// Delete takes name of the cluster and deletes it. Returns an error if one occurs. -func (c *FakeClusters) Delete(name string, options *v1.DeleteOptions) error { - _, err := c.Fake. - Invokes(testing.NewDeleteAction(clustersResource, c.ns, name), &v1alpha1.Cluster{}) - - return err -} - -// DeleteCollection deletes a collection of objects. -func (c *FakeClusters) DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error { - action := testing.NewDeleteCollectionAction(clustersResource, c.ns, listOptions) - - _, err := c.Fake.Invokes(action, &v1alpha1.ClusterList{}) - return err -} - -// Patch applies the patch and returns the patched cluster. -func (c *FakeClusters) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1alpha1.Cluster, err error) { - obj, err := c.Fake. - Invokes(testing.NewPatchSubresourceAction(clustersResource, c.ns, name, pt, data, subresources...), &v1alpha1.Cluster{}) - - if obj == nil { - return nil, err - } - return obj.(*v1alpha1.Cluster), err -} diff --git a/vendor/sigs.k8s.io/cluster-api/pkg/client/clientset_generated/clientset/typed/cluster/v1alpha1/fake/fake_cluster_client.go b/vendor/sigs.k8s.io/cluster-api/pkg/client/clientset_generated/clientset/typed/cluster/v1alpha1/fake/fake_cluster_client.go deleted file mode 100644 index bcef2b1542..0000000000 --- a/vendor/sigs.k8s.io/cluster-api/pkg/client/clientset_generated/clientset/typed/cluster/v1alpha1/fake/fake_cluster_client.go +++ /dev/null @@ -1,56 +0,0 @@ -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by client-gen. DO NOT EDIT. - -package fake - -import ( - rest "k8s.io/client-go/rest" - testing "k8s.io/client-go/testing" - v1alpha1 "sigs.k8s.io/cluster-api/pkg/client/clientset_generated/clientset/typed/cluster/v1alpha1" -) - -type FakeClusterV1alpha1 struct { - *testing.Fake -} - -func (c *FakeClusterV1alpha1) Clusters(namespace string) v1alpha1.ClusterInterface { - return &FakeClusters{c, namespace} -} - -func (c *FakeClusterV1alpha1) Machines(namespace string) v1alpha1.MachineInterface { - return &FakeMachines{c, namespace} -} - -func (c *FakeClusterV1alpha1) MachineClasses(namespace string) v1alpha1.MachineClassInterface { - return &FakeMachineClasses{c, namespace} -} - -func (c *FakeClusterV1alpha1) MachineDeployments(namespace string) v1alpha1.MachineDeploymentInterface { - return &FakeMachineDeployments{c, namespace} -} - -func (c *FakeClusterV1alpha1) MachineSets(namespace string) v1alpha1.MachineSetInterface { - return &FakeMachineSets{c, namespace} -} - -// RESTClient returns a RESTClient that is used to communicate -// with API server by this client implementation. -func (c *FakeClusterV1alpha1) RESTClient() rest.Interface { - var ret *rest.RESTClient - return ret -} diff --git a/vendor/sigs.k8s.io/cluster-api/pkg/client/clientset_generated/clientset/typed/cluster/v1alpha1/fake/fake_machine.go b/vendor/sigs.k8s.io/cluster-api/pkg/client/clientset_generated/clientset/typed/cluster/v1alpha1/fake/fake_machine.go deleted file mode 100644 index a4629ed60f..0000000000 --- a/vendor/sigs.k8s.io/cluster-api/pkg/client/clientset_generated/clientset/typed/cluster/v1alpha1/fake/fake_machine.go +++ /dev/null @@ -1,140 +0,0 @@ -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by client-gen. DO NOT EDIT. - -package fake - -import ( - v1 "k8s.io/apimachinery/pkg/apis/meta/v1" - labels "k8s.io/apimachinery/pkg/labels" - schema "k8s.io/apimachinery/pkg/runtime/schema" - types "k8s.io/apimachinery/pkg/types" - watch "k8s.io/apimachinery/pkg/watch" - testing "k8s.io/client-go/testing" - v1alpha1 "sigs.k8s.io/cluster-api/pkg/apis/cluster/v1alpha1" -) - -// FakeMachines implements MachineInterface -type FakeMachines struct { - Fake *FakeClusterV1alpha1 - ns string -} - -var machinesResource = schema.GroupVersionResource{Group: "cluster.k8s.io", Version: "v1alpha1", Resource: "machines"} - -var machinesKind = schema.GroupVersionKind{Group: "cluster.k8s.io", Version: "v1alpha1", Kind: "Machine"} - -// Get takes name of the machine, and returns the corresponding machine object, and an error if there is any. -func (c *FakeMachines) Get(name string, options v1.GetOptions) (result *v1alpha1.Machine, err error) { - obj, err := c.Fake. - Invokes(testing.NewGetAction(machinesResource, c.ns, name), &v1alpha1.Machine{}) - - if obj == nil { - return nil, err - } - return obj.(*v1alpha1.Machine), err -} - -// List takes label and field selectors, and returns the list of Machines that match those selectors. -func (c *FakeMachines) List(opts v1.ListOptions) (result *v1alpha1.MachineList, err error) { - obj, err := c.Fake. - Invokes(testing.NewListAction(machinesResource, machinesKind, c.ns, opts), &v1alpha1.MachineList{}) - - if obj == nil { - return nil, err - } - - label, _, _ := testing.ExtractFromListOptions(opts) - if label == nil { - label = labels.Everything() - } - list := &v1alpha1.MachineList{ListMeta: obj.(*v1alpha1.MachineList).ListMeta} - for _, item := range obj.(*v1alpha1.MachineList).Items { - if label.Matches(labels.Set(item.Labels)) { - list.Items = append(list.Items, item) - } - } - return list, err -} - -// Watch returns a watch.Interface that watches the requested machines. -func (c *FakeMachines) Watch(opts v1.ListOptions) (watch.Interface, error) { - return c.Fake. - InvokesWatch(testing.NewWatchAction(machinesResource, c.ns, opts)) - -} - -// Create takes the representation of a machine and creates it. Returns the server's representation of the machine, and an error, if there is any. -func (c *FakeMachines) Create(machine *v1alpha1.Machine) (result *v1alpha1.Machine, err error) { - obj, err := c.Fake. - Invokes(testing.NewCreateAction(machinesResource, c.ns, machine), &v1alpha1.Machine{}) - - if obj == nil { - return nil, err - } - return obj.(*v1alpha1.Machine), err -} - -// Update takes the representation of a machine and updates it. Returns the server's representation of the machine, and an error, if there is any. -func (c *FakeMachines) Update(machine *v1alpha1.Machine) (result *v1alpha1.Machine, err error) { - obj, err := c.Fake. - Invokes(testing.NewUpdateAction(machinesResource, c.ns, machine), &v1alpha1.Machine{}) - - if obj == nil { - return nil, err - } - return obj.(*v1alpha1.Machine), err -} - -// UpdateStatus was generated because the type contains a Status member. -// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). -func (c *FakeMachines) UpdateStatus(machine *v1alpha1.Machine) (*v1alpha1.Machine, error) { - obj, err := c.Fake. - Invokes(testing.NewUpdateSubresourceAction(machinesResource, "status", c.ns, machine), &v1alpha1.Machine{}) - - if obj == nil { - return nil, err - } - return obj.(*v1alpha1.Machine), err -} - -// Delete takes name of the machine and deletes it. Returns an error if one occurs. -func (c *FakeMachines) Delete(name string, options *v1.DeleteOptions) error { - _, err := c.Fake. - Invokes(testing.NewDeleteAction(machinesResource, c.ns, name), &v1alpha1.Machine{}) - - return err -} - -// DeleteCollection deletes a collection of objects. -func (c *FakeMachines) DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error { - action := testing.NewDeleteCollectionAction(machinesResource, c.ns, listOptions) - - _, err := c.Fake.Invokes(action, &v1alpha1.MachineList{}) - return err -} - -// Patch applies the patch and returns the patched machine. -func (c *FakeMachines) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1alpha1.Machine, err error) { - obj, err := c.Fake. - Invokes(testing.NewPatchSubresourceAction(machinesResource, c.ns, name, pt, data, subresources...), &v1alpha1.Machine{}) - - if obj == nil { - return nil, err - } - return obj.(*v1alpha1.Machine), err -} diff --git a/vendor/sigs.k8s.io/cluster-api/pkg/client/clientset_generated/clientset/typed/cluster/v1alpha1/fake/fake_machineclass.go b/vendor/sigs.k8s.io/cluster-api/pkg/client/clientset_generated/clientset/typed/cluster/v1alpha1/fake/fake_machineclass.go deleted file mode 100644 index c6bbc3c5e4..0000000000 --- a/vendor/sigs.k8s.io/cluster-api/pkg/client/clientset_generated/clientset/typed/cluster/v1alpha1/fake/fake_machineclass.go +++ /dev/null @@ -1,128 +0,0 @@ -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by client-gen. DO NOT EDIT. - -package fake - -import ( - v1 "k8s.io/apimachinery/pkg/apis/meta/v1" - labels "k8s.io/apimachinery/pkg/labels" - schema "k8s.io/apimachinery/pkg/runtime/schema" - types "k8s.io/apimachinery/pkg/types" - watch "k8s.io/apimachinery/pkg/watch" - testing "k8s.io/client-go/testing" - v1alpha1 "sigs.k8s.io/cluster-api/pkg/apis/cluster/v1alpha1" -) - -// FakeMachineClasses implements MachineClassInterface -type FakeMachineClasses struct { - Fake *FakeClusterV1alpha1 - ns string -} - -var machineclassesResource = schema.GroupVersionResource{Group: "cluster.k8s.io", Version: "v1alpha1", Resource: "machineclasses"} - -var machineclassesKind = schema.GroupVersionKind{Group: "cluster.k8s.io", Version: "v1alpha1", Kind: "MachineClass"} - -// Get takes name of the machineClass, and returns the corresponding machineClass object, and an error if there is any. -func (c *FakeMachineClasses) Get(name string, options v1.GetOptions) (result *v1alpha1.MachineClass, err error) { - obj, err := c.Fake. - Invokes(testing.NewGetAction(machineclassesResource, c.ns, name), &v1alpha1.MachineClass{}) - - if obj == nil { - return nil, err - } - return obj.(*v1alpha1.MachineClass), err -} - -// List takes label and field selectors, and returns the list of MachineClasses that match those selectors. -func (c *FakeMachineClasses) List(opts v1.ListOptions) (result *v1alpha1.MachineClassList, err error) { - obj, err := c.Fake. - Invokes(testing.NewListAction(machineclassesResource, machineclassesKind, c.ns, opts), &v1alpha1.MachineClassList{}) - - if obj == nil { - return nil, err - } - - label, _, _ := testing.ExtractFromListOptions(opts) - if label == nil { - label = labels.Everything() - } - list := &v1alpha1.MachineClassList{ListMeta: obj.(*v1alpha1.MachineClassList).ListMeta} - for _, item := range obj.(*v1alpha1.MachineClassList).Items { - if label.Matches(labels.Set(item.Labels)) { - list.Items = append(list.Items, item) - } - } - return list, err -} - -// Watch returns a watch.Interface that watches the requested machineClasses. -func (c *FakeMachineClasses) Watch(opts v1.ListOptions) (watch.Interface, error) { - return c.Fake. - InvokesWatch(testing.NewWatchAction(machineclassesResource, c.ns, opts)) - -} - -// Create takes the representation of a machineClass and creates it. Returns the server's representation of the machineClass, and an error, if there is any. -func (c *FakeMachineClasses) Create(machineClass *v1alpha1.MachineClass) (result *v1alpha1.MachineClass, err error) { - obj, err := c.Fake. - Invokes(testing.NewCreateAction(machineclassesResource, c.ns, machineClass), &v1alpha1.MachineClass{}) - - if obj == nil { - return nil, err - } - return obj.(*v1alpha1.MachineClass), err -} - -// Update takes the representation of a machineClass and updates it. Returns the server's representation of the machineClass, and an error, if there is any. -func (c *FakeMachineClasses) Update(machineClass *v1alpha1.MachineClass) (result *v1alpha1.MachineClass, err error) { - obj, err := c.Fake. - Invokes(testing.NewUpdateAction(machineclassesResource, c.ns, machineClass), &v1alpha1.MachineClass{}) - - if obj == nil { - return nil, err - } - return obj.(*v1alpha1.MachineClass), err -} - -// Delete takes name of the machineClass and deletes it. Returns an error if one occurs. -func (c *FakeMachineClasses) Delete(name string, options *v1.DeleteOptions) error { - _, err := c.Fake. - Invokes(testing.NewDeleteAction(machineclassesResource, c.ns, name), &v1alpha1.MachineClass{}) - - return err -} - -// DeleteCollection deletes a collection of objects. -func (c *FakeMachineClasses) DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error { - action := testing.NewDeleteCollectionAction(machineclassesResource, c.ns, listOptions) - - _, err := c.Fake.Invokes(action, &v1alpha1.MachineClassList{}) - return err -} - -// Patch applies the patch and returns the patched machineClass. -func (c *FakeMachineClasses) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1alpha1.MachineClass, err error) { - obj, err := c.Fake. - Invokes(testing.NewPatchSubresourceAction(machineclassesResource, c.ns, name, pt, data, subresources...), &v1alpha1.MachineClass{}) - - if obj == nil { - return nil, err - } - return obj.(*v1alpha1.MachineClass), err -} diff --git a/vendor/sigs.k8s.io/cluster-api/pkg/client/clientset_generated/clientset/typed/cluster/v1alpha1/fake/fake_machinedeployment.go b/vendor/sigs.k8s.io/cluster-api/pkg/client/clientset_generated/clientset/typed/cluster/v1alpha1/fake/fake_machinedeployment.go deleted file mode 100644 index 6d0d271029..0000000000 --- a/vendor/sigs.k8s.io/cluster-api/pkg/client/clientset_generated/clientset/typed/cluster/v1alpha1/fake/fake_machinedeployment.go +++ /dev/null @@ -1,140 +0,0 @@ -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by client-gen. DO NOT EDIT. - -package fake - -import ( - v1 "k8s.io/apimachinery/pkg/apis/meta/v1" - labels "k8s.io/apimachinery/pkg/labels" - schema "k8s.io/apimachinery/pkg/runtime/schema" - types "k8s.io/apimachinery/pkg/types" - watch "k8s.io/apimachinery/pkg/watch" - testing "k8s.io/client-go/testing" - v1alpha1 "sigs.k8s.io/cluster-api/pkg/apis/cluster/v1alpha1" -) - -// FakeMachineDeployments implements MachineDeploymentInterface -type FakeMachineDeployments struct { - Fake *FakeClusterV1alpha1 - ns string -} - -var machinedeploymentsResource = schema.GroupVersionResource{Group: "cluster.k8s.io", Version: "v1alpha1", Resource: "machinedeployments"} - -var machinedeploymentsKind = schema.GroupVersionKind{Group: "cluster.k8s.io", Version: "v1alpha1", Kind: "MachineDeployment"} - -// Get takes name of the machineDeployment, and returns the corresponding machineDeployment object, and an error if there is any. -func (c *FakeMachineDeployments) Get(name string, options v1.GetOptions) (result *v1alpha1.MachineDeployment, err error) { - obj, err := c.Fake. - Invokes(testing.NewGetAction(machinedeploymentsResource, c.ns, name), &v1alpha1.MachineDeployment{}) - - if obj == nil { - return nil, err - } - return obj.(*v1alpha1.MachineDeployment), err -} - -// List takes label and field selectors, and returns the list of MachineDeployments that match those selectors. -func (c *FakeMachineDeployments) List(opts v1.ListOptions) (result *v1alpha1.MachineDeploymentList, err error) { - obj, err := c.Fake. - Invokes(testing.NewListAction(machinedeploymentsResource, machinedeploymentsKind, c.ns, opts), &v1alpha1.MachineDeploymentList{}) - - if obj == nil { - return nil, err - } - - label, _, _ := testing.ExtractFromListOptions(opts) - if label == nil { - label = labels.Everything() - } - list := &v1alpha1.MachineDeploymentList{ListMeta: obj.(*v1alpha1.MachineDeploymentList).ListMeta} - for _, item := range obj.(*v1alpha1.MachineDeploymentList).Items { - if label.Matches(labels.Set(item.Labels)) { - list.Items = append(list.Items, item) - } - } - return list, err -} - -// Watch returns a watch.Interface that watches the requested machineDeployments. -func (c *FakeMachineDeployments) Watch(opts v1.ListOptions) (watch.Interface, error) { - return c.Fake. - InvokesWatch(testing.NewWatchAction(machinedeploymentsResource, c.ns, opts)) - -} - -// Create takes the representation of a machineDeployment and creates it. Returns the server's representation of the machineDeployment, and an error, if there is any. -func (c *FakeMachineDeployments) Create(machineDeployment *v1alpha1.MachineDeployment) (result *v1alpha1.MachineDeployment, err error) { - obj, err := c.Fake. - Invokes(testing.NewCreateAction(machinedeploymentsResource, c.ns, machineDeployment), &v1alpha1.MachineDeployment{}) - - if obj == nil { - return nil, err - } - return obj.(*v1alpha1.MachineDeployment), err -} - -// Update takes the representation of a machineDeployment and updates it. Returns the server's representation of the machineDeployment, and an error, if there is any. -func (c *FakeMachineDeployments) Update(machineDeployment *v1alpha1.MachineDeployment) (result *v1alpha1.MachineDeployment, err error) { - obj, err := c.Fake. - Invokes(testing.NewUpdateAction(machinedeploymentsResource, c.ns, machineDeployment), &v1alpha1.MachineDeployment{}) - - if obj == nil { - return nil, err - } - return obj.(*v1alpha1.MachineDeployment), err -} - -// UpdateStatus was generated because the type contains a Status member. -// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). -func (c *FakeMachineDeployments) UpdateStatus(machineDeployment *v1alpha1.MachineDeployment) (*v1alpha1.MachineDeployment, error) { - obj, err := c.Fake. - Invokes(testing.NewUpdateSubresourceAction(machinedeploymentsResource, "status", c.ns, machineDeployment), &v1alpha1.MachineDeployment{}) - - if obj == nil { - return nil, err - } - return obj.(*v1alpha1.MachineDeployment), err -} - -// Delete takes name of the machineDeployment and deletes it. Returns an error if one occurs. -func (c *FakeMachineDeployments) Delete(name string, options *v1.DeleteOptions) error { - _, err := c.Fake. - Invokes(testing.NewDeleteAction(machinedeploymentsResource, c.ns, name), &v1alpha1.MachineDeployment{}) - - return err -} - -// DeleteCollection deletes a collection of objects. -func (c *FakeMachineDeployments) DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error { - action := testing.NewDeleteCollectionAction(machinedeploymentsResource, c.ns, listOptions) - - _, err := c.Fake.Invokes(action, &v1alpha1.MachineDeploymentList{}) - return err -} - -// Patch applies the patch and returns the patched machineDeployment. -func (c *FakeMachineDeployments) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1alpha1.MachineDeployment, err error) { - obj, err := c.Fake. - Invokes(testing.NewPatchSubresourceAction(machinedeploymentsResource, c.ns, name, pt, data, subresources...), &v1alpha1.MachineDeployment{}) - - if obj == nil { - return nil, err - } - return obj.(*v1alpha1.MachineDeployment), err -} diff --git a/vendor/sigs.k8s.io/cluster-api/pkg/client/clientset_generated/clientset/typed/cluster/v1alpha1/fake/fake_machineset.go b/vendor/sigs.k8s.io/cluster-api/pkg/client/clientset_generated/clientset/typed/cluster/v1alpha1/fake/fake_machineset.go deleted file mode 100644 index 29b7dc9e63..0000000000 --- a/vendor/sigs.k8s.io/cluster-api/pkg/client/clientset_generated/clientset/typed/cluster/v1alpha1/fake/fake_machineset.go +++ /dev/null @@ -1,140 +0,0 @@ -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by client-gen. DO NOT EDIT. - -package fake - -import ( - v1 "k8s.io/apimachinery/pkg/apis/meta/v1" - labels "k8s.io/apimachinery/pkg/labels" - schema "k8s.io/apimachinery/pkg/runtime/schema" - types "k8s.io/apimachinery/pkg/types" - watch "k8s.io/apimachinery/pkg/watch" - testing "k8s.io/client-go/testing" - v1alpha1 "sigs.k8s.io/cluster-api/pkg/apis/cluster/v1alpha1" -) - -// FakeMachineSets implements MachineSetInterface -type FakeMachineSets struct { - Fake *FakeClusterV1alpha1 - ns string -} - -var machinesetsResource = schema.GroupVersionResource{Group: "cluster.k8s.io", Version: "v1alpha1", Resource: "machinesets"} - -var machinesetsKind = schema.GroupVersionKind{Group: "cluster.k8s.io", Version: "v1alpha1", Kind: "MachineSet"} - -// Get takes name of the machineSet, and returns the corresponding machineSet object, and an error if there is any. -func (c *FakeMachineSets) Get(name string, options v1.GetOptions) (result *v1alpha1.MachineSet, err error) { - obj, err := c.Fake. - Invokes(testing.NewGetAction(machinesetsResource, c.ns, name), &v1alpha1.MachineSet{}) - - if obj == nil { - return nil, err - } - return obj.(*v1alpha1.MachineSet), err -} - -// List takes label and field selectors, and returns the list of MachineSets that match those selectors. -func (c *FakeMachineSets) List(opts v1.ListOptions) (result *v1alpha1.MachineSetList, err error) { - obj, err := c.Fake. - Invokes(testing.NewListAction(machinesetsResource, machinesetsKind, c.ns, opts), &v1alpha1.MachineSetList{}) - - if obj == nil { - return nil, err - } - - label, _, _ := testing.ExtractFromListOptions(opts) - if label == nil { - label = labels.Everything() - } - list := &v1alpha1.MachineSetList{ListMeta: obj.(*v1alpha1.MachineSetList).ListMeta} - for _, item := range obj.(*v1alpha1.MachineSetList).Items { - if label.Matches(labels.Set(item.Labels)) { - list.Items = append(list.Items, item) - } - } - return list, err -} - -// Watch returns a watch.Interface that watches the requested machineSets. -func (c *FakeMachineSets) Watch(opts v1.ListOptions) (watch.Interface, error) { - return c.Fake. - InvokesWatch(testing.NewWatchAction(machinesetsResource, c.ns, opts)) - -} - -// Create takes the representation of a machineSet and creates it. Returns the server's representation of the machineSet, and an error, if there is any. -func (c *FakeMachineSets) Create(machineSet *v1alpha1.MachineSet) (result *v1alpha1.MachineSet, err error) { - obj, err := c.Fake. - Invokes(testing.NewCreateAction(machinesetsResource, c.ns, machineSet), &v1alpha1.MachineSet{}) - - if obj == nil { - return nil, err - } - return obj.(*v1alpha1.MachineSet), err -} - -// Update takes the representation of a machineSet and updates it. Returns the server's representation of the machineSet, and an error, if there is any. -func (c *FakeMachineSets) Update(machineSet *v1alpha1.MachineSet) (result *v1alpha1.MachineSet, err error) { - obj, err := c.Fake. - Invokes(testing.NewUpdateAction(machinesetsResource, c.ns, machineSet), &v1alpha1.MachineSet{}) - - if obj == nil { - return nil, err - } - return obj.(*v1alpha1.MachineSet), err -} - -// UpdateStatus was generated because the type contains a Status member. -// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). -func (c *FakeMachineSets) UpdateStatus(machineSet *v1alpha1.MachineSet) (*v1alpha1.MachineSet, error) { - obj, err := c.Fake. - Invokes(testing.NewUpdateSubresourceAction(machinesetsResource, "status", c.ns, machineSet), &v1alpha1.MachineSet{}) - - if obj == nil { - return nil, err - } - return obj.(*v1alpha1.MachineSet), err -} - -// Delete takes name of the machineSet and deletes it. Returns an error if one occurs. -func (c *FakeMachineSets) Delete(name string, options *v1.DeleteOptions) error { - _, err := c.Fake. - Invokes(testing.NewDeleteAction(machinesetsResource, c.ns, name), &v1alpha1.MachineSet{}) - - return err -} - -// DeleteCollection deletes a collection of objects. -func (c *FakeMachineSets) DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error { - action := testing.NewDeleteCollectionAction(machinesetsResource, c.ns, listOptions) - - _, err := c.Fake.Invokes(action, &v1alpha1.MachineSetList{}) - return err -} - -// Patch applies the patch and returns the patched machineSet. -func (c *FakeMachineSets) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1alpha1.MachineSet, err error) { - obj, err := c.Fake. - Invokes(testing.NewPatchSubresourceAction(machinesetsResource, c.ns, name, pt, data, subresources...), &v1alpha1.MachineSet{}) - - if obj == nil { - return nil, err - } - return obj.(*v1alpha1.MachineSet), err -} diff --git a/vendor/sigs.k8s.io/cluster-api/pkg/client/clientset_generated/clientset/typed/cluster/v1alpha1/generated_expansion.go b/vendor/sigs.k8s.io/cluster-api/pkg/client/clientset_generated/clientset/typed/cluster/v1alpha1/generated_expansion.go deleted file mode 100644 index fe1acdd0d8..0000000000 --- a/vendor/sigs.k8s.io/cluster-api/pkg/client/clientset_generated/clientset/typed/cluster/v1alpha1/generated_expansion.go +++ /dev/null @@ -1,29 +0,0 @@ -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by client-gen. DO NOT EDIT. - -package v1alpha1 - -type ClusterExpansion interface{} - -type MachineExpansion interface{} - -type MachineClassExpansion interface{} - -type MachineDeploymentExpansion interface{} - -type MachineSetExpansion interface{} diff --git a/vendor/sigs.k8s.io/cluster-api/pkg/client/clientset_generated/clientset/typed/cluster/v1alpha1/machineclass.go b/vendor/sigs.k8s.io/cluster-api/pkg/client/clientset_generated/clientset/typed/cluster/v1alpha1/machineclass.go deleted file mode 100644 index d1cca05178..0000000000 --- a/vendor/sigs.k8s.io/cluster-api/pkg/client/clientset_generated/clientset/typed/cluster/v1alpha1/machineclass.go +++ /dev/null @@ -1,174 +0,0 @@ -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by client-gen. DO NOT EDIT. - -package v1alpha1 - -import ( - "time" - - v1 "k8s.io/apimachinery/pkg/apis/meta/v1" - types "k8s.io/apimachinery/pkg/types" - watch "k8s.io/apimachinery/pkg/watch" - rest "k8s.io/client-go/rest" - v1alpha1 "sigs.k8s.io/cluster-api/pkg/apis/cluster/v1alpha1" - scheme "sigs.k8s.io/cluster-api/pkg/client/clientset_generated/clientset/scheme" -) - -// MachineClassesGetter has a method to return a MachineClassInterface. -// A group's client should implement this interface. -type MachineClassesGetter interface { - MachineClasses(namespace string) MachineClassInterface -} - -// MachineClassInterface has methods to work with MachineClass resources. -type MachineClassInterface interface { - Create(*v1alpha1.MachineClass) (*v1alpha1.MachineClass, error) - Update(*v1alpha1.MachineClass) (*v1alpha1.MachineClass, error) - Delete(name string, options *v1.DeleteOptions) error - DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error - Get(name string, options v1.GetOptions) (*v1alpha1.MachineClass, error) - List(opts v1.ListOptions) (*v1alpha1.MachineClassList, error) - Watch(opts v1.ListOptions) (watch.Interface, error) - Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1alpha1.MachineClass, err error) - MachineClassExpansion -} - -// machineClasses implements MachineClassInterface -type machineClasses struct { - client rest.Interface - ns string -} - -// newMachineClasses returns a MachineClasses -func newMachineClasses(c *ClusterV1alpha1Client, namespace string) *machineClasses { - return &machineClasses{ - client: c.RESTClient(), - ns: namespace, - } -} - -// Get takes name of the machineClass, and returns the corresponding machineClass object, and an error if there is any. -func (c *machineClasses) Get(name string, options v1.GetOptions) (result *v1alpha1.MachineClass, err error) { - result = &v1alpha1.MachineClass{} - err = c.client.Get(). - Namespace(c.ns). - Resource("machineclasses"). - Name(name). - VersionedParams(&options, scheme.ParameterCodec). - Do(). - Into(result) - return -} - -// List takes label and field selectors, and returns the list of MachineClasses that match those selectors. -func (c *machineClasses) List(opts v1.ListOptions) (result *v1alpha1.MachineClassList, err error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } - result = &v1alpha1.MachineClassList{} - err = c.client.Get(). - Namespace(c.ns). - Resource("machineclasses"). - VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). - Do(). - Into(result) - return -} - -// Watch returns a watch.Interface that watches the requested machineClasses. -func (c *machineClasses) Watch(opts v1.ListOptions) (watch.Interface, error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } - opts.Watch = true - return c.client.Get(). - Namespace(c.ns). - Resource("machineclasses"). - VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). - Watch() -} - -// Create takes the representation of a machineClass and creates it. Returns the server's representation of the machineClass, and an error, if there is any. -func (c *machineClasses) Create(machineClass *v1alpha1.MachineClass) (result *v1alpha1.MachineClass, err error) { - result = &v1alpha1.MachineClass{} - err = c.client.Post(). - Namespace(c.ns). - Resource("machineclasses"). - Body(machineClass). - Do(). - Into(result) - return -} - -// Update takes the representation of a machineClass and updates it. Returns the server's representation of the machineClass, and an error, if there is any. -func (c *machineClasses) Update(machineClass *v1alpha1.MachineClass) (result *v1alpha1.MachineClass, err error) { - result = &v1alpha1.MachineClass{} - err = c.client.Put(). - Namespace(c.ns). - Resource("machineclasses"). - Name(machineClass.Name). - Body(machineClass). - Do(). - Into(result) - return -} - -// Delete takes name of the machineClass and deletes it. Returns an error if one occurs. -func (c *machineClasses) Delete(name string, options *v1.DeleteOptions) error { - return c.client.Delete(). - Namespace(c.ns). - Resource("machineclasses"). - Name(name). - Body(options). - Do(). - Error() -} - -// DeleteCollection deletes a collection of objects. -func (c *machineClasses) DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error { - var timeout time.Duration - if listOptions.TimeoutSeconds != nil { - timeout = time.Duration(*listOptions.TimeoutSeconds) * time.Second - } - return c.client.Delete(). - Namespace(c.ns). - Resource("machineclasses"). - VersionedParams(&listOptions, scheme.ParameterCodec). - Timeout(timeout). - Body(options). - Do(). - Error() -} - -// Patch applies the patch and returns the patched machineClass. -func (c *machineClasses) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1alpha1.MachineClass, err error) { - result = &v1alpha1.MachineClass{} - err = c.client.Patch(pt). - Namespace(c.ns). - Resource("machineclasses"). - SubResource(subresources...). - Name(name). - Body(data). - Do(). - Into(result) - return -} diff --git a/vendor/sigs.k8s.io/cluster-api/pkg/client/clientset_generated/clientset/typed/cluster/v1alpha1/machinedeployment.go b/vendor/sigs.k8s.io/cluster-api/pkg/client/clientset_generated/clientset/typed/cluster/v1alpha1/machinedeployment.go deleted file mode 100644 index 7fd953042c..0000000000 --- a/vendor/sigs.k8s.io/cluster-api/pkg/client/clientset_generated/clientset/typed/cluster/v1alpha1/machinedeployment.go +++ /dev/null @@ -1,191 +0,0 @@ -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by client-gen. DO NOT EDIT. - -package v1alpha1 - -import ( - "time" - - v1 "k8s.io/apimachinery/pkg/apis/meta/v1" - types "k8s.io/apimachinery/pkg/types" - watch "k8s.io/apimachinery/pkg/watch" - rest "k8s.io/client-go/rest" - v1alpha1 "sigs.k8s.io/cluster-api/pkg/apis/cluster/v1alpha1" - scheme "sigs.k8s.io/cluster-api/pkg/client/clientset_generated/clientset/scheme" -) - -// MachineDeploymentsGetter has a method to return a MachineDeploymentInterface. -// A group's client should implement this interface. -type MachineDeploymentsGetter interface { - MachineDeployments(namespace string) MachineDeploymentInterface -} - -// MachineDeploymentInterface has methods to work with MachineDeployment resources. -type MachineDeploymentInterface interface { - Create(*v1alpha1.MachineDeployment) (*v1alpha1.MachineDeployment, error) - Update(*v1alpha1.MachineDeployment) (*v1alpha1.MachineDeployment, error) - UpdateStatus(*v1alpha1.MachineDeployment) (*v1alpha1.MachineDeployment, error) - Delete(name string, options *v1.DeleteOptions) error - DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error - Get(name string, options v1.GetOptions) (*v1alpha1.MachineDeployment, error) - List(opts v1.ListOptions) (*v1alpha1.MachineDeploymentList, error) - Watch(opts v1.ListOptions) (watch.Interface, error) - Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1alpha1.MachineDeployment, err error) - MachineDeploymentExpansion -} - -// machineDeployments implements MachineDeploymentInterface -type machineDeployments struct { - client rest.Interface - ns string -} - -// newMachineDeployments returns a MachineDeployments -func newMachineDeployments(c *ClusterV1alpha1Client, namespace string) *machineDeployments { - return &machineDeployments{ - client: c.RESTClient(), - ns: namespace, - } -} - -// Get takes name of the machineDeployment, and returns the corresponding machineDeployment object, and an error if there is any. -func (c *machineDeployments) Get(name string, options v1.GetOptions) (result *v1alpha1.MachineDeployment, err error) { - result = &v1alpha1.MachineDeployment{} - err = c.client.Get(). - Namespace(c.ns). - Resource("machinedeployments"). - Name(name). - VersionedParams(&options, scheme.ParameterCodec). - Do(). - Into(result) - return -} - -// List takes label and field selectors, and returns the list of MachineDeployments that match those selectors. -func (c *machineDeployments) List(opts v1.ListOptions) (result *v1alpha1.MachineDeploymentList, err error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } - result = &v1alpha1.MachineDeploymentList{} - err = c.client.Get(). - Namespace(c.ns). - Resource("machinedeployments"). - VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). - Do(). - Into(result) - return -} - -// Watch returns a watch.Interface that watches the requested machineDeployments. -func (c *machineDeployments) Watch(opts v1.ListOptions) (watch.Interface, error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } - opts.Watch = true - return c.client.Get(). - Namespace(c.ns). - Resource("machinedeployments"). - VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). - Watch() -} - -// Create takes the representation of a machineDeployment and creates it. Returns the server's representation of the machineDeployment, and an error, if there is any. -func (c *machineDeployments) Create(machineDeployment *v1alpha1.MachineDeployment) (result *v1alpha1.MachineDeployment, err error) { - result = &v1alpha1.MachineDeployment{} - err = c.client.Post(). - Namespace(c.ns). - Resource("machinedeployments"). - Body(machineDeployment). - Do(). - Into(result) - return -} - -// Update takes the representation of a machineDeployment and updates it. Returns the server's representation of the machineDeployment, and an error, if there is any. -func (c *machineDeployments) Update(machineDeployment *v1alpha1.MachineDeployment) (result *v1alpha1.MachineDeployment, err error) { - result = &v1alpha1.MachineDeployment{} - err = c.client.Put(). - Namespace(c.ns). - Resource("machinedeployments"). - Name(machineDeployment.Name). - Body(machineDeployment). - Do(). - Into(result) - return -} - -// UpdateStatus was generated because the type contains a Status member. -// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). - -func (c *machineDeployments) UpdateStatus(machineDeployment *v1alpha1.MachineDeployment) (result *v1alpha1.MachineDeployment, err error) { - result = &v1alpha1.MachineDeployment{} - err = c.client.Put(). - Namespace(c.ns). - Resource("machinedeployments"). - Name(machineDeployment.Name). - SubResource("status"). - Body(machineDeployment). - Do(). - Into(result) - return -} - -// Delete takes name of the machineDeployment and deletes it. Returns an error if one occurs. -func (c *machineDeployments) Delete(name string, options *v1.DeleteOptions) error { - return c.client.Delete(). - Namespace(c.ns). - Resource("machinedeployments"). - Name(name). - Body(options). - Do(). - Error() -} - -// DeleteCollection deletes a collection of objects. -func (c *machineDeployments) DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error { - var timeout time.Duration - if listOptions.TimeoutSeconds != nil { - timeout = time.Duration(*listOptions.TimeoutSeconds) * time.Second - } - return c.client.Delete(). - Namespace(c.ns). - Resource("machinedeployments"). - VersionedParams(&listOptions, scheme.ParameterCodec). - Timeout(timeout). - Body(options). - Do(). - Error() -} - -// Patch applies the patch and returns the patched machineDeployment. -func (c *machineDeployments) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1alpha1.MachineDeployment, err error) { - result = &v1alpha1.MachineDeployment{} - err = c.client.Patch(pt). - Namespace(c.ns). - Resource("machinedeployments"). - SubResource(subresources...). - Name(name). - Body(data). - Do(). - Into(result) - return -} diff --git a/vendor/sigs.k8s.io/cluster-api/pkg/client/clientset_generated/clientset/typed/cluster/v1alpha1/machineset.go b/vendor/sigs.k8s.io/cluster-api/pkg/client/clientset_generated/clientset/typed/cluster/v1alpha1/machineset.go deleted file mode 100644 index 1b6d1f1926..0000000000 --- a/vendor/sigs.k8s.io/cluster-api/pkg/client/clientset_generated/clientset/typed/cluster/v1alpha1/machineset.go +++ /dev/null @@ -1,191 +0,0 @@ -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by client-gen. DO NOT EDIT. - -package v1alpha1 - -import ( - "time" - - v1 "k8s.io/apimachinery/pkg/apis/meta/v1" - types "k8s.io/apimachinery/pkg/types" - watch "k8s.io/apimachinery/pkg/watch" - rest "k8s.io/client-go/rest" - v1alpha1 "sigs.k8s.io/cluster-api/pkg/apis/cluster/v1alpha1" - scheme "sigs.k8s.io/cluster-api/pkg/client/clientset_generated/clientset/scheme" -) - -// MachineSetsGetter has a method to return a MachineSetInterface. -// A group's client should implement this interface. -type MachineSetsGetter interface { - MachineSets(namespace string) MachineSetInterface -} - -// MachineSetInterface has methods to work with MachineSet resources. -type MachineSetInterface interface { - Create(*v1alpha1.MachineSet) (*v1alpha1.MachineSet, error) - Update(*v1alpha1.MachineSet) (*v1alpha1.MachineSet, error) - UpdateStatus(*v1alpha1.MachineSet) (*v1alpha1.MachineSet, error) - Delete(name string, options *v1.DeleteOptions) error - DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error - Get(name string, options v1.GetOptions) (*v1alpha1.MachineSet, error) - List(opts v1.ListOptions) (*v1alpha1.MachineSetList, error) - Watch(opts v1.ListOptions) (watch.Interface, error) - Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1alpha1.MachineSet, err error) - MachineSetExpansion -} - -// machineSets implements MachineSetInterface -type machineSets struct { - client rest.Interface - ns string -} - -// newMachineSets returns a MachineSets -func newMachineSets(c *ClusterV1alpha1Client, namespace string) *machineSets { - return &machineSets{ - client: c.RESTClient(), - ns: namespace, - } -} - -// Get takes name of the machineSet, and returns the corresponding machineSet object, and an error if there is any. -func (c *machineSets) Get(name string, options v1.GetOptions) (result *v1alpha1.MachineSet, err error) { - result = &v1alpha1.MachineSet{} - err = c.client.Get(). - Namespace(c.ns). - Resource("machinesets"). - Name(name). - VersionedParams(&options, scheme.ParameterCodec). - Do(). - Into(result) - return -} - -// List takes label and field selectors, and returns the list of MachineSets that match those selectors. -func (c *machineSets) List(opts v1.ListOptions) (result *v1alpha1.MachineSetList, err error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } - result = &v1alpha1.MachineSetList{} - err = c.client.Get(). - Namespace(c.ns). - Resource("machinesets"). - VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). - Do(). - Into(result) - return -} - -// Watch returns a watch.Interface that watches the requested machineSets. -func (c *machineSets) Watch(opts v1.ListOptions) (watch.Interface, error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } - opts.Watch = true - return c.client.Get(). - Namespace(c.ns). - Resource("machinesets"). - VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). - Watch() -} - -// Create takes the representation of a machineSet and creates it. Returns the server's representation of the machineSet, and an error, if there is any. -func (c *machineSets) Create(machineSet *v1alpha1.MachineSet) (result *v1alpha1.MachineSet, err error) { - result = &v1alpha1.MachineSet{} - err = c.client.Post(). - Namespace(c.ns). - Resource("machinesets"). - Body(machineSet). - Do(). - Into(result) - return -} - -// Update takes the representation of a machineSet and updates it. Returns the server's representation of the machineSet, and an error, if there is any. -func (c *machineSets) Update(machineSet *v1alpha1.MachineSet) (result *v1alpha1.MachineSet, err error) { - result = &v1alpha1.MachineSet{} - err = c.client.Put(). - Namespace(c.ns). - Resource("machinesets"). - Name(machineSet.Name). - Body(machineSet). - Do(). - Into(result) - return -} - -// UpdateStatus was generated because the type contains a Status member. -// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). - -func (c *machineSets) UpdateStatus(machineSet *v1alpha1.MachineSet) (result *v1alpha1.MachineSet, err error) { - result = &v1alpha1.MachineSet{} - err = c.client.Put(). - Namespace(c.ns). - Resource("machinesets"). - Name(machineSet.Name). - SubResource("status"). - Body(machineSet). - Do(). - Into(result) - return -} - -// Delete takes name of the machineSet and deletes it. Returns an error if one occurs. -func (c *machineSets) Delete(name string, options *v1.DeleteOptions) error { - return c.client.Delete(). - Namespace(c.ns). - Resource("machinesets"). - Name(name). - Body(options). - Do(). - Error() -} - -// DeleteCollection deletes a collection of objects. -func (c *machineSets) DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error { - var timeout time.Duration - if listOptions.TimeoutSeconds != nil { - timeout = time.Duration(*listOptions.TimeoutSeconds) * time.Second - } - return c.client.Delete(). - Namespace(c.ns). - Resource("machinesets"). - VersionedParams(&listOptions, scheme.ParameterCodec). - Timeout(timeout). - Body(options). - Do(). - Error() -} - -// Patch applies the patch and returns the patched machineSet. -func (c *machineSets) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1alpha1.MachineSet, err error) { - result = &v1alpha1.MachineSet{} - err = c.client.Patch(pt). - Namespace(c.ns). - Resource("machinesets"). - SubResource(subresources...). - Name(name). - Body(data). - Do(). - Into(result) - return -} diff --git a/vendor/sigs.k8s.io/cluster-api/pkg/client/informers_generated/externalversions/BUILD.bazel b/vendor/sigs.k8s.io/cluster-api/pkg/client/informers_generated/externalversions/BUILD.bazel deleted file mode 100644 index 039490a151..0000000000 --- a/vendor/sigs.k8s.io/cluster-api/pkg/client/informers_generated/externalversions/BUILD.bazel +++ /dev/null @@ -1,21 +0,0 @@ -load("@io_bazel_rules_go//go:def.bzl", "go_library") - -go_library( - name = "go_default_library", - srcs = [ - "factory.go", - "generic.go", - ], - importpath = "sigs.k8s.io/cluster-api/pkg/client/informers_generated/externalversions", - visibility = ["//visibility:public"], - deps = [ - "//pkg/apis/cluster/v1alpha1:go_default_library", - "//pkg/client/clientset_generated/clientset:go_default_library", - "//pkg/client/informers_generated/externalversions/cluster:go_default_library", - "//pkg/client/informers_generated/externalversions/internalinterfaces:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/runtime:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/runtime/schema:go_default_library", - "//vendor/k8s.io/client-go/tools/cache:go_default_library", - ], -) diff --git a/vendor/sigs.k8s.io/cluster-api/pkg/client/informers_generated/externalversions/cluster/BUILD.bazel b/vendor/sigs.k8s.io/cluster-api/pkg/client/informers_generated/externalversions/cluster/BUILD.bazel deleted file mode 100644 index b8cbc69a99..0000000000 --- a/vendor/sigs.k8s.io/cluster-api/pkg/client/informers_generated/externalversions/cluster/BUILD.bazel +++ /dev/null @@ -1,12 +0,0 @@ -load("@io_bazel_rules_go//go:def.bzl", "go_library") - -go_library( - name = "go_default_library", - srcs = ["interface.go"], - importpath = "sigs.k8s.io/cluster-api/pkg/client/informers_generated/externalversions/cluster", - visibility = ["//visibility:public"], - deps = [ - "//pkg/client/informers_generated/externalversions/cluster/v1alpha1:go_default_library", - "//pkg/client/informers_generated/externalversions/internalinterfaces:go_default_library", - ], -) diff --git a/vendor/sigs.k8s.io/cluster-api/pkg/client/informers_generated/externalversions/cluster/interface.go b/vendor/sigs.k8s.io/cluster-api/pkg/client/informers_generated/externalversions/cluster/interface.go deleted file mode 100644 index fe64976bcb..0000000000 --- a/vendor/sigs.k8s.io/cluster-api/pkg/client/informers_generated/externalversions/cluster/interface.go +++ /dev/null @@ -1,46 +0,0 @@ -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by informer-gen. DO NOT EDIT. - -package cluster - -import ( - v1alpha1 "sigs.k8s.io/cluster-api/pkg/client/informers_generated/externalversions/cluster/v1alpha1" - internalinterfaces "sigs.k8s.io/cluster-api/pkg/client/informers_generated/externalversions/internalinterfaces" -) - -// Interface provides access to each of this group's versions. -type Interface interface { - // V1alpha1 provides access to shared informers for resources in V1alpha1. - V1alpha1() v1alpha1.Interface -} - -type group struct { - factory internalinterfaces.SharedInformerFactory - namespace string - tweakListOptions internalinterfaces.TweakListOptionsFunc -} - -// New returns a new Interface. -func New(f internalinterfaces.SharedInformerFactory, namespace string, tweakListOptions internalinterfaces.TweakListOptionsFunc) Interface { - return &group{factory: f, namespace: namespace, tweakListOptions: tweakListOptions} -} - -// V1alpha1 returns a new v1alpha1.Interface. -func (g *group) V1alpha1() v1alpha1.Interface { - return v1alpha1.New(g.factory, g.namespace, g.tweakListOptions) -} diff --git a/vendor/sigs.k8s.io/cluster-api/pkg/client/informers_generated/externalversions/cluster/v1alpha1/BUILD.bazel b/vendor/sigs.k8s.io/cluster-api/pkg/client/informers_generated/externalversions/cluster/v1alpha1/BUILD.bazel deleted file mode 100644 index ffabb236fe..0000000000 --- a/vendor/sigs.k8s.io/cluster-api/pkg/client/informers_generated/externalversions/cluster/v1alpha1/BUILD.bazel +++ /dev/null @@ -1,25 +0,0 @@ -load("@io_bazel_rules_go//go:def.bzl", "go_library") - -go_library( - name = "go_default_library", - srcs = [ - "cluster.go", - "interface.go", - "machine.go", - "machineclass.go", - "machinedeployment.go", - "machineset.go", - ], - importpath = "sigs.k8s.io/cluster-api/pkg/client/informers_generated/externalversions/cluster/v1alpha1", - visibility = ["//visibility:public"], - deps = [ - "//pkg/apis/cluster/v1alpha1:go_default_library", - "//pkg/client/clientset_generated/clientset:go_default_library", - "//pkg/client/informers_generated/externalversions/internalinterfaces:go_default_library", - "//pkg/client/listers_generated/cluster/v1alpha1:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/runtime:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/watch:go_default_library", - "//vendor/k8s.io/client-go/tools/cache:go_default_library", - ], -) diff --git a/vendor/sigs.k8s.io/cluster-api/pkg/client/informers_generated/externalversions/cluster/v1alpha1/cluster.go b/vendor/sigs.k8s.io/cluster-api/pkg/client/informers_generated/externalversions/cluster/v1alpha1/cluster.go deleted file mode 100644 index 69378ad45c..0000000000 --- a/vendor/sigs.k8s.io/cluster-api/pkg/client/informers_generated/externalversions/cluster/v1alpha1/cluster.go +++ /dev/null @@ -1,89 +0,0 @@ -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by informer-gen. DO NOT EDIT. - -package v1alpha1 - -import ( - time "time" - - v1 "k8s.io/apimachinery/pkg/apis/meta/v1" - runtime "k8s.io/apimachinery/pkg/runtime" - watch "k8s.io/apimachinery/pkg/watch" - cache "k8s.io/client-go/tools/cache" - clusterv1alpha1 "sigs.k8s.io/cluster-api/pkg/apis/cluster/v1alpha1" - clientset "sigs.k8s.io/cluster-api/pkg/client/clientset_generated/clientset" - internalinterfaces "sigs.k8s.io/cluster-api/pkg/client/informers_generated/externalversions/internalinterfaces" - v1alpha1 "sigs.k8s.io/cluster-api/pkg/client/listers_generated/cluster/v1alpha1" -) - -// ClusterInformer provides access to a shared informer and lister for -// Clusters. -type ClusterInformer interface { - Informer() cache.SharedIndexInformer - Lister() v1alpha1.ClusterLister -} - -type clusterInformer struct { - factory internalinterfaces.SharedInformerFactory - tweakListOptions internalinterfaces.TweakListOptionsFunc - namespace string -} - -// NewClusterInformer constructs a new informer for Cluster type. -// Always prefer using an informer factory to get a shared informer instead of getting an independent -// one. This reduces memory footprint and number of connections to the server. -func NewClusterInformer(client clientset.Interface, namespace string, resyncPeriod time.Duration, indexers cache.Indexers) cache.SharedIndexInformer { - return NewFilteredClusterInformer(client, namespace, resyncPeriod, indexers, nil) -} - -// NewFilteredClusterInformer constructs a new informer for Cluster type. -// Always prefer using an informer factory to get a shared informer instead of getting an independent -// one. This reduces memory footprint and number of connections to the server. -func NewFilteredClusterInformer(client clientset.Interface, namespace string, resyncPeriod time.Duration, indexers cache.Indexers, tweakListOptions internalinterfaces.TweakListOptionsFunc) cache.SharedIndexInformer { - return cache.NewSharedIndexInformer( - &cache.ListWatch{ - ListFunc: func(options v1.ListOptions) (runtime.Object, error) { - if tweakListOptions != nil { - tweakListOptions(&options) - } - return client.ClusterV1alpha1().Clusters(namespace).List(options) - }, - WatchFunc: func(options v1.ListOptions) (watch.Interface, error) { - if tweakListOptions != nil { - tweakListOptions(&options) - } - return client.ClusterV1alpha1().Clusters(namespace).Watch(options) - }, - }, - &clusterv1alpha1.Cluster{}, - resyncPeriod, - indexers, - ) -} - -func (f *clusterInformer) defaultInformer(client clientset.Interface, resyncPeriod time.Duration) cache.SharedIndexInformer { - return NewFilteredClusterInformer(client, f.namespace, resyncPeriod, cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc}, f.tweakListOptions) -} - -func (f *clusterInformer) Informer() cache.SharedIndexInformer { - return f.factory.InformerFor(&clusterv1alpha1.Cluster{}, f.defaultInformer) -} - -func (f *clusterInformer) Lister() v1alpha1.ClusterLister { - return v1alpha1.NewClusterLister(f.Informer().GetIndexer()) -} diff --git a/vendor/sigs.k8s.io/cluster-api/pkg/client/informers_generated/externalversions/cluster/v1alpha1/interface.go b/vendor/sigs.k8s.io/cluster-api/pkg/client/informers_generated/externalversions/cluster/v1alpha1/interface.go deleted file mode 100644 index c1d72eb2d3..0000000000 --- a/vendor/sigs.k8s.io/cluster-api/pkg/client/informers_generated/externalversions/cluster/v1alpha1/interface.go +++ /dev/null @@ -1,73 +0,0 @@ -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by informer-gen. DO NOT EDIT. - -package v1alpha1 - -import ( - internalinterfaces "sigs.k8s.io/cluster-api/pkg/client/informers_generated/externalversions/internalinterfaces" -) - -// Interface provides access to all the informers in this group version. -type Interface interface { - // Clusters returns a ClusterInformer. - Clusters() ClusterInformer - // Machines returns a MachineInformer. - Machines() MachineInformer - // MachineClasses returns a MachineClassInformer. - MachineClasses() MachineClassInformer - // MachineDeployments returns a MachineDeploymentInformer. - MachineDeployments() MachineDeploymentInformer - // MachineSets returns a MachineSetInformer. - MachineSets() MachineSetInformer -} - -type version struct { - factory internalinterfaces.SharedInformerFactory - namespace string - tweakListOptions internalinterfaces.TweakListOptionsFunc -} - -// New returns a new Interface. -func New(f internalinterfaces.SharedInformerFactory, namespace string, tweakListOptions internalinterfaces.TweakListOptionsFunc) Interface { - return &version{factory: f, namespace: namespace, tweakListOptions: tweakListOptions} -} - -// Clusters returns a ClusterInformer. -func (v *version) Clusters() ClusterInformer { - return &clusterInformer{factory: v.factory, namespace: v.namespace, tweakListOptions: v.tweakListOptions} -} - -// Machines returns a MachineInformer. -func (v *version) Machines() MachineInformer { - return &machineInformer{factory: v.factory, namespace: v.namespace, tweakListOptions: v.tweakListOptions} -} - -// MachineClasses returns a MachineClassInformer. -func (v *version) MachineClasses() MachineClassInformer { - return &machineClassInformer{factory: v.factory, namespace: v.namespace, tweakListOptions: v.tweakListOptions} -} - -// MachineDeployments returns a MachineDeploymentInformer. -func (v *version) MachineDeployments() MachineDeploymentInformer { - return &machineDeploymentInformer{factory: v.factory, namespace: v.namespace, tweakListOptions: v.tweakListOptions} -} - -// MachineSets returns a MachineSetInformer. -func (v *version) MachineSets() MachineSetInformer { - return &machineSetInformer{factory: v.factory, namespace: v.namespace, tweakListOptions: v.tweakListOptions} -} diff --git a/vendor/sigs.k8s.io/cluster-api/pkg/client/informers_generated/externalversions/cluster/v1alpha1/machine.go b/vendor/sigs.k8s.io/cluster-api/pkg/client/informers_generated/externalversions/cluster/v1alpha1/machine.go deleted file mode 100644 index d3ba8d0a4b..0000000000 --- a/vendor/sigs.k8s.io/cluster-api/pkg/client/informers_generated/externalversions/cluster/v1alpha1/machine.go +++ /dev/null @@ -1,89 +0,0 @@ -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by informer-gen. DO NOT EDIT. - -package v1alpha1 - -import ( - time "time" - - v1 "k8s.io/apimachinery/pkg/apis/meta/v1" - runtime "k8s.io/apimachinery/pkg/runtime" - watch "k8s.io/apimachinery/pkg/watch" - cache "k8s.io/client-go/tools/cache" - clusterv1alpha1 "sigs.k8s.io/cluster-api/pkg/apis/cluster/v1alpha1" - clientset "sigs.k8s.io/cluster-api/pkg/client/clientset_generated/clientset" - internalinterfaces "sigs.k8s.io/cluster-api/pkg/client/informers_generated/externalversions/internalinterfaces" - v1alpha1 "sigs.k8s.io/cluster-api/pkg/client/listers_generated/cluster/v1alpha1" -) - -// MachineInformer provides access to a shared informer and lister for -// Machines. -type MachineInformer interface { - Informer() cache.SharedIndexInformer - Lister() v1alpha1.MachineLister -} - -type machineInformer struct { - factory internalinterfaces.SharedInformerFactory - tweakListOptions internalinterfaces.TweakListOptionsFunc - namespace string -} - -// NewMachineInformer constructs a new informer for Machine type. -// Always prefer using an informer factory to get a shared informer instead of getting an independent -// one. This reduces memory footprint and number of connections to the server. -func NewMachineInformer(client clientset.Interface, namespace string, resyncPeriod time.Duration, indexers cache.Indexers) cache.SharedIndexInformer { - return NewFilteredMachineInformer(client, namespace, resyncPeriod, indexers, nil) -} - -// NewFilteredMachineInformer constructs a new informer for Machine type. -// Always prefer using an informer factory to get a shared informer instead of getting an independent -// one. This reduces memory footprint and number of connections to the server. -func NewFilteredMachineInformer(client clientset.Interface, namespace string, resyncPeriod time.Duration, indexers cache.Indexers, tweakListOptions internalinterfaces.TweakListOptionsFunc) cache.SharedIndexInformer { - return cache.NewSharedIndexInformer( - &cache.ListWatch{ - ListFunc: func(options v1.ListOptions) (runtime.Object, error) { - if tweakListOptions != nil { - tweakListOptions(&options) - } - return client.ClusterV1alpha1().Machines(namespace).List(options) - }, - WatchFunc: func(options v1.ListOptions) (watch.Interface, error) { - if tweakListOptions != nil { - tweakListOptions(&options) - } - return client.ClusterV1alpha1().Machines(namespace).Watch(options) - }, - }, - &clusterv1alpha1.Machine{}, - resyncPeriod, - indexers, - ) -} - -func (f *machineInformer) defaultInformer(client clientset.Interface, resyncPeriod time.Duration) cache.SharedIndexInformer { - return NewFilteredMachineInformer(client, f.namespace, resyncPeriod, cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc}, f.tweakListOptions) -} - -func (f *machineInformer) Informer() cache.SharedIndexInformer { - return f.factory.InformerFor(&clusterv1alpha1.Machine{}, f.defaultInformer) -} - -func (f *machineInformer) Lister() v1alpha1.MachineLister { - return v1alpha1.NewMachineLister(f.Informer().GetIndexer()) -} diff --git a/vendor/sigs.k8s.io/cluster-api/pkg/client/informers_generated/externalversions/cluster/v1alpha1/machineclass.go b/vendor/sigs.k8s.io/cluster-api/pkg/client/informers_generated/externalversions/cluster/v1alpha1/machineclass.go deleted file mode 100644 index bcd3c4480c..0000000000 --- a/vendor/sigs.k8s.io/cluster-api/pkg/client/informers_generated/externalversions/cluster/v1alpha1/machineclass.go +++ /dev/null @@ -1,89 +0,0 @@ -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by informer-gen. DO NOT EDIT. - -package v1alpha1 - -import ( - time "time" - - v1 "k8s.io/apimachinery/pkg/apis/meta/v1" - runtime "k8s.io/apimachinery/pkg/runtime" - watch "k8s.io/apimachinery/pkg/watch" - cache "k8s.io/client-go/tools/cache" - clusterv1alpha1 "sigs.k8s.io/cluster-api/pkg/apis/cluster/v1alpha1" - clientset "sigs.k8s.io/cluster-api/pkg/client/clientset_generated/clientset" - internalinterfaces "sigs.k8s.io/cluster-api/pkg/client/informers_generated/externalversions/internalinterfaces" - v1alpha1 "sigs.k8s.io/cluster-api/pkg/client/listers_generated/cluster/v1alpha1" -) - -// MachineClassInformer provides access to a shared informer and lister for -// MachineClasses. -type MachineClassInformer interface { - Informer() cache.SharedIndexInformer - Lister() v1alpha1.MachineClassLister -} - -type machineClassInformer struct { - factory internalinterfaces.SharedInformerFactory - tweakListOptions internalinterfaces.TweakListOptionsFunc - namespace string -} - -// NewMachineClassInformer constructs a new informer for MachineClass type. -// Always prefer using an informer factory to get a shared informer instead of getting an independent -// one. This reduces memory footprint and number of connections to the server. -func NewMachineClassInformer(client clientset.Interface, namespace string, resyncPeriod time.Duration, indexers cache.Indexers) cache.SharedIndexInformer { - return NewFilteredMachineClassInformer(client, namespace, resyncPeriod, indexers, nil) -} - -// NewFilteredMachineClassInformer constructs a new informer for MachineClass type. -// Always prefer using an informer factory to get a shared informer instead of getting an independent -// one. This reduces memory footprint and number of connections to the server. -func NewFilteredMachineClassInformer(client clientset.Interface, namespace string, resyncPeriod time.Duration, indexers cache.Indexers, tweakListOptions internalinterfaces.TweakListOptionsFunc) cache.SharedIndexInformer { - return cache.NewSharedIndexInformer( - &cache.ListWatch{ - ListFunc: func(options v1.ListOptions) (runtime.Object, error) { - if tweakListOptions != nil { - tweakListOptions(&options) - } - return client.ClusterV1alpha1().MachineClasses(namespace).List(options) - }, - WatchFunc: func(options v1.ListOptions) (watch.Interface, error) { - if tweakListOptions != nil { - tweakListOptions(&options) - } - return client.ClusterV1alpha1().MachineClasses(namespace).Watch(options) - }, - }, - &clusterv1alpha1.MachineClass{}, - resyncPeriod, - indexers, - ) -} - -func (f *machineClassInformer) defaultInformer(client clientset.Interface, resyncPeriod time.Duration) cache.SharedIndexInformer { - return NewFilteredMachineClassInformer(client, f.namespace, resyncPeriod, cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc}, f.tweakListOptions) -} - -func (f *machineClassInformer) Informer() cache.SharedIndexInformer { - return f.factory.InformerFor(&clusterv1alpha1.MachineClass{}, f.defaultInformer) -} - -func (f *machineClassInformer) Lister() v1alpha1.MachineClassLister { - return v1alpha1.NewMachineClassLister(f.Informer().GetIndexer()) -} diff --git a/vendor/sigs.k8s.io/cluster-api/pkg/client/informers_generated/externalversions/cluster/v1alpha1/machinedeployment.go b/vendor/sigs.k8s.io/cluster-api/pkg/client/informers_generated/externalversions/cluster/v1alpha1/machinedeployment.go deleted file mode 100644 index c32da260e3..0000000000 --- a/vendor/sigs.k8s.io/cluster-api/pkg/client/informers_generated/externalversions/cluster/v1alpha1/machinedeployment.go +++ /dev/null @@ -1,89 +0,0 @@ -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by informer-gen. DO NOT EDIT. - -package v1alpha1 - -import ( - time "time" - - v1 "k8s.io/apimachinery/pkg/apis/meta/v1" - runtime "k8s.io/apimachinery/pkg/runtime" - watch "k8s.io/apimachinery/pkg/watch" - cache "k8s.io/client-go/tools/cache" - clusterv1alpha1 "sigs.k8s.io/cluster-api/pkg/apis/cluster/v1alpha1" - clientset "sigs.k8s.io/cluster-api/pkg/client/clientset_generated/clientset" - internalinterfaces "sigs.k8s.io/cluster-api/pkg/client/informers_generated/externalversions/internalinterfaces" - v1alpha1 "sigs.k8s.io/cluster-api/pkg/client/listers_generated/cluster/v1alpha1" -) - -// MachineDeploymentInformer provides access to a shared informer and lister for -// MachineDeployments. -type MachineDeploymentInformer interface { - Informer() cache.SharedIndexInformer - Lister() v1alpha1.MachineDeploymentLister -} - -type machineDeploymentInformer struct { - factory internalinterfaces.SharedInformerFactory - tweakListOptions internalinterfaces.TweakListOptionsFunc - namespace string -} - -// NewMachineDeploymentInformer constructs a new informer for MachineDeployment type. -// Always prefer using an informer factory to get a shared informer instead of getting an independent -// one. This reduces memory footprint and number of connections to the server. -func NewMachineDeploymentInformer(client clientset.Interface, namespace string, resyncPeriod time.Duration, indexers cache.Indexers) cache.SharedIndexInformer { - return NewFilteredMachineDeploymentInformer(client, namespace, resyncPeriod, indexers, nil) -} - -// NewFilteredMachineDeploymentInformer constructs a new informer for MachineDeployment type. -// Always prefer using an informer factory to get a shared informer instead of getting an independent -// one. This reduces memory footprint and number of connections to the server. -func NewFilteredMachineDeploymentInformer(client clientset.Interface, namespace string, resyncPeriod time.Duration, indexers cache.Indexers, tweakListOptions internalinterfaces.TweakListOptionsFunc) cache.SharedIndexInformer { - return cache.NewSharedIndexInformer( - &cache.ListWatch{ - ListFunc: func(options v1.ListOptions) (runtime.Object, error) { - if tweakListOptions != nil { - tweakListOptions(&options) - } - return client.ClusterV1alpha1().MachineDeployments(namespace).List(options) - }, - WatchFunc: func(options v1.ListOptions) (watch.Interface, error) { - if tweakListOptions != nil { - tweakListOptions(&options) - } - return client.ClusterV1alpha1().MachineDeployments(namespace).Watch(options) - }, - }, - &clusterv1alpha1.MachineDeployment{}, - resyncPeriod, - indexers, - ) -} - -func (f *machineDeploymentInformer) defaultInformer(client clientset.Interface, resyncPeriod time.Duration) cache.SharedIndexInformer { - return NewFilteredMachineDeploymentInformer(client, f.namespace, resyncPeriod, cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc}, f.tweakListOptions) -} - -func (f *machineDeploymentInformer) Informer() cache.SharedIndexInformer { - return f.factory.InformerFor(&clusterv1alpha1.MachineDeployment{}, f.defaultInformer) -} - -func (f *machineDeploymentInformer) Lister() v1alpha1.MachineDeploymentLister { - return v1alpha1.NewMachineDeploymentLister(f.Informer().GetIndexer()) -} diff --git a/vendor/sigs.k8s.io/cluster-api/pkg/client/informers_generated/externalversions/cluster/v1alpha1/machineset.go b/vendor/sigs.k8s.io/cluster-api/pkg/client/informers_generated/externalversions/cluster/v1alpha1/machineset.go deleted file mode 100644 index 31b05122bc..0000000000 --- a/vendor/sigs.k8s.io/cluster-api/pkg/client/informers_generated/externalversions/cluster/v1alpha1/machineset.go +++ /dev/null @@ -1,89 +0,0 @@ -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by informer-gen. DO NOT EDIT. - -package v1alpha1 - -import ( - time "time" - - v1 "k8s.io/apimachinery/pkg/apis/meta/v1" - runtime "k8s.io/apimachinery/pkg/runtime" - watch "k8s.io/apimachinery/pkg/watch" - cache "k8s.io/client-go/tools/cache" - clusterv1alpha1 "sigs.k8s.io/cluster-api/pkg/apis/cluster/v1alpha1" - clientset "sigs.k8s.io/cluster-api/pkg/client/clientset_generated/clientset" - internalinterfaces "sigs.k8s.io/cluster-api/pkg/client/informers_generated/externalversions/internalinterfaces" - v1alpha1 "sigs.k8s.io/cluster-api/pkg/client/listers_generated/cluster/v1alpha1" -) - -// MachineSetInformer provides access to a shared informer and lister for -// MachineSets. -type MachineSetInformer interface { - Informer() cache.SharedIndexInformer - Lister() v1alpha1.MachineSetLister -} - -type machineSetInformer struct { - factory internalinterfaces.SharedInformerFactory - tweakListOptions internalinterfaces.TweakListOptionsFunc - namespace string -} - -// NewMachineSetInformer constructs a new informer for MachineSet type. -// Always prefer using an informer factory to get a shared informer instead of getting an independent -// one. This reduces memory footprint and number of connections to the server. -func NewMachineSetInformer(client clientset.Interface, namespace string, resyncPeriod time.Duration, indexers cache.Indexers) cache.SharedIndexInformer { - return NewFilteredMachineSetInformer(client, namespace, resyncPeriod, indexers, nil) -} - -// NewFilteredMachineSetInformer constructs a new informer for MachineSet type. -// Always prefer using an informer factory to get a shared informer instead of getting an independent -// one. This reduces memory footprint and number of connections to the server. -func NewFilteredMachineSetInformer(client clientset.Interface, namespace string, resyncPeriod time.Duration, indexers cache.Indexers, tweakListOptions internalinterfaces.TweakListOptionsFunc) cache.SharedIndexInformer { - return cache.NewSharedIndexInformer( - &cache.ListWatch{ - ListFunc: func(options v1.ListOptions) (runtime.Object, error) { - if tweakListOptions != nil { - tweakListOptions(&options) - } - return client.ClusterV1alpha1().MachineSets(namespace).List(options) - }, - WatchFunc: func(options v1.ListOptions) (watch.Interface, error) { - if tweakListOptions != nil { - tweakListOptions(&options) - } - return client.ClusterV1alpha1().MachineSets(namespace).Watch(options) - }, - }, - &clusterv1alpha1.MachineSet{}, - resyncPeriod, - indexers, - ) -} - -func (f *machineSetInformer) defaultInformer(client clientset.Interface, resyncPeriod time.Duration) cache.SharedIndexInformer { - return NewFilteredMachineSetInformer(client, f.namespace, resyncPeriod, cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc}, f.tweakListOptions) -} - -func (f *machineSetInformer) Informer() cache.SharedIndexInformer { - return f.factory.InformerFor(&clusterv1alpha1.MachineSet{}, f.defaultInformer) -} - -func (f *machineSetInformer) Lister() v1alpha1.MachineSetLister { - return v1alpha1.NewMachineSetLister(f.Informer().GetIndexer()) -} diff --git a/vendor/sigs.k8s.io/cluster-api/pkg/client/informers_generated/externalversions/factory.go b/vendor/sigs.k8s.io/cluster-api/pkg/client/informers_generated/externalversions/factory.go deleted file mode 100644 index fa18f49b89..0000000000 --- a/vendor/sigs.k8s.io/cluster-api/pkg/client/informers_generated/externalversions/factory.go +++ /dev/null @@ -1,180 +0,0 @@ -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by informer-gen. DO NOT EDIT. - -package externalversions - -import ( - reflect "reflect" - sync "sync" - time "time" - - v1 "k8s.io/apimachinery/pkg/apis/meta/v1" - runtime "k8s.io/apimachinery/pkg/runtime" - schema "k8s.io/apimachinery/pkg/runtime/schema" - cache "k8s.io/client-go/tools/cache" - clientset "sigs.k8s.io/cluster-api/pkg/client/clientset_generated/clientset" - cluster "sigs.k8s.io/cluster-api/pkg/client/informers_generated/externalversions/cluster" - internalinterfaces "sigs.k8s.io/cluster-api/pkg/client/informers_generated/externalversions/internalinterfaces" -) - -// SharedInformerOption defines the functional option type for SharedInformerFactory. -type SharedInformerOption func(*sharedInformerFactory) *sharedInformerFactory - -type sharedInformerFactory struct { - client clientset.Interface - namespace string - tweakListOptions internalinterfaces.TweakListOptionsFunc - lock sync.Mutex - defaultResync time.Duration - customResync map[reflect.Type]time.Duration - - informers map[reflect.Type]cache.SharedIndexInformer - // startedInformers is used for tracking which informers have been started. - // This allows Start() to be called multiple times safely. - startedInformers map[reflect.Type]bool -} - -// WithCustomResyncConfig sets a custom resync period for the specified informer types. -func WithCustomResyncConfig(resyncConfig map[v1.Object]time.Duration) SharedInformerOption { - return func(factory *sharedInformerFactory) *sharedInformerFactory { - for k, v := range resyncConfig { - factory.customResync[reflect.TypeOf(k)] = v - } - return factory - } -} - -// WithTweakListOptions sets a custom filter on all listers of the configured SharedInformerFactory. -func WithTweakListOptions(tweakListOptions internalinterfaces.TweakListOptionsFunc) SharedInformerOption { - return func(factory *sharedInformerFactory) *sharedInformerFactory { - factory.tweakListOptions = tweakListOptions - return factory - } -} - -// WithNamespace limits the SharedInformerFactory to the specified namespace. -func WithNamespace(namespace string) SharedInformerOption { - return func(factory *sharedInformerFactory) *sharedInformerFactory { - factory.namespace = namespace - return factory - } -} - -// NewSharedInformerFactory constructs a new instance of sharedInformerFactory for all namespaces. -func NewSharedInformerFactory(client clientset.Interface, defaultResync time.Duration) SharedInformerFactory { - return NewSharedInformerFactoryWithOptions(client, defaultResync) -} - -// NewFilteredSharedInformerFactory constructs a new instance of sharedInformerFactory. -// Listers obtained via this SharedInformerFactory will be subject to the same filters -// as specified here. -// Deprecated: Please use NewSharedInformerFactoryWithOptions instead -func NewFilteredSharedInformerFactory(client clientset.Interface, defaultResync time.Duration, namespace string, tweakListOptions internalinterfaces.TweakListOptionsFunc) SharedInformerFactory { - return NewSharedInformerFactoryWithOptions(client, defaultResync, WithNamespace(namespace), WithTweakListOptions(tweakListOptions)) -} - -// NewSharedInformerFactoryWithOptions constructs a new instance of a SharedInformerFactory with additional options. -func NewSharedInformerFactoryWithOptions(client clientset.Interface, defaultResync time.Duration, options ...SharedInformerOption) SharedInformerFactory { - factory := &sharedInformerFactory{ - client: client, - namespace: v1.NamespaceAll, - defaultResync: defaultResync, - informers: make(map[reflect.Type]cache.SharedIndexInformer), - startedInformers: make(map[reflect.Type]bool), - customResync: make(map[reflect.Type]time.Duration), - } - - // Apply all options - for _, opt := range options { - factory = opt(factory) - } - - return factory -} - -// Start initializes all requested informers. -func (f *sharedInformerFactory) Start(stopCh <-chan struct{}) { - f.lock.Lock() - defer f.lock.Unlock() - - for informerType, informer := range f.informers { - if !f.startedInformers[informerType] { - go informer.Run(stopCh) - f.startedInformers[informerType] = true - } - } -} - -// WaitForCacheSync waits for all started informers' cache were synced. -func (f *sharedInformerFactory) WaitForCacheSync(stopCh <-chan struct{}) map[reflect.Type]bool { - informers := func() map[reflect.Type]cache.SharedIndexInformer { - f.lock.Lock() - defer f.lock.Unlock() - - informers := map[reflect.Type]cache.SharedIndexInformer{} - for informerType, informer := range f.informers { - if f.startedInformers[informerType] { - informers[informerType] = informer - } - } - return informers - }() - - res := map[reflect.Type]bool{} - for informType, informer := range informers { - res[informType] = cache.WaitForCacheSync(stopCh, informer.HasSynced) - } - return res -} - -// InternalInformerFor returns the SharedIndexInformer for obj using an internal -// client. -func (f *sharedInformerFactory) InformerFor(obj runtime.Object, newFunc internalinterfaces.NewInformerFunc) cache.SharedIndexInformer { - f.lock.Lock() - defer f.lock.Unlock() - - informerType := reflect.TypeOf(obj) - informer, exists := f.informers[informerType] - if exists { - return informer - } - - resyncPeriod, exists := f.customResync[informerType] - if !exists { - resyncPeriod = f.defaultResync - } - - informer = newFunc(f.client, resyncPeriod) - f.informers[informerType] = informer - - return informer -} - -// SharedInformerFactory provides shared informers for resources in all known -// API group versions. -type SharedInformerFactory interface { - internalinterfaces.SharedInformerFactory - ForResource(resource schema.GroupVersionResource) (GenericInformer, error) - WaitForCacheSync(stopCh <-chan struct{}) map[reflect.Type]bool - - Cluster() cluster.Interface -} - -func (f *sharedInformerFactory) Cluster() cluster.Interface { - return cluster.New(f, f.namespace, f.tweakListOptions) -} diff --git a/vendor/sigs.k8s.io/cluster-api/pkg/client/informers_generated/externalversions/generic.go b/vendor/sigs.k8s.io/cluster-api/pkg/client/informers_generated/externalversions/generic.go deleted file mode 100644 index ce44284704..0000000000 --- a/vendor/sigs.k8s.io/cluster-api/pkg/client/informers_generated/externalversions/generic.go +++ /dev/null @@ -1,70 +0,0 @@ -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by informer-gen. DO NOT EDIT. - -package externalversions - -import ( - "fmt" - - schema "k8s.io/apimachinery/pkg/runtime/schema" - cache "k8s.io/client-go/tools/cache" - v1alpha1 "sigs.k8s.io/cluster-api/pkg/apis/cluster/v1alpha1" -) - -// GenericInformer is type of SharedIndexInformer which will locate and delegate to other -// sharedInformers based on type -type GenericInformer interface { - Informer() cache.SharedIndexInformer - Lister() cache.GenericLister -} - -type genericInformer struct { - informer cache.SharedIndexInformer - resource schema.GroupResource -} - -// Informer returns the SharedIndexInformer. -func (f *genericInformer) Informer() cache.SharedIndexInformer { - return f.informer -} - -// Lister returns the GenericLister. -func (f *genericInformer) Lister() cache.GenericLister { - return cache.NewGenericLister(f.Informer().GetIndexer(), f.resource) -} - -// ForResource gives generic access to a shared informer of the matching type -// TODO extend this to unknown resources with a client pool -func (f *sharedInformerFactory) ForResource(resource schema.GroupVersionResource) (GenericInformer, error) { - switch resource { - // Group=cluster.k8s.io, Version=v1alpha1 - case v1alpha1.SchemeGroupVersion.WithResource("clusters"): - return &genericInformer{resource: resource.GroupResource(), informer: f.Cluster().V1alpha1().Clusters().Informer()}, nil - case v1alpha1.SchemeGroupVersion.WithResource("machines"): - return &genericInformer{resource: resource.GroupResource(), informer: f.Cluster().V1alpha1().Machines().Informer()}, nil - case v1alpha1.SchemeGroupVersion.WithResource("machineclasses"): - return &genericInformer{resource: resource.GroupResource(), informer: f.Cluster().V1alpha1().MachineClasses().Informer()}, nil - case v1alpha1.SchemeGroupVersion.WithResource("machinedeployments"): - return &genericInformer{resource: resource.GroupResource(), informer: f.Cluster().V1alpha1().MachineDeployments().Informer()}, nil - case v1alpha1.SchemeGroupVersion.WithResource("machinesets"): - return &genericInformer{resource: resource.GroupResource(), informer: f.Cluster().V1alpha1().MachineSets().Informer()}, nil - - } - - return nil, fmt.Errorf("no informer found for %v", resource) -} diff --git a/vendor/sigs.k8s.io/cluster-api/pkg/client/informers_generated/externalversions/internalinterfaces/BUILD.bazel b/vendor/sigs.k8s.io/cluster-api/pkg/client/informers_generated/externalversions/internalinterfaces/BUILD.bazel deleted file mode 100644 index d6a8332c99..0000000000 --- a/vendor/sigs.k8s.io/cluster-api/pkg/client/informers_generated/externalversions/internalinterfaces/BUILD.bazel +++ /dev/null @@ -1,14 +0,0 @@ -load("@io_bazel_rules_go//go:def.bzl", "go_library") - -go_library( - name = "go_default_library", - srcs = ["factory_interfaces.go"], - importpath = "sigs.k8s.io/cluster-api/pkg/client/informers_generated/externalversions/internalinterfaces", - visibility = ["//visibility:public"], - deps = [ - "//pkg/client/clientset_generated/clientset:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/runtime:go_default_library", - "//vendor/k8s.io/client-go/tools/cache:go_default_library", - ], -) diff --git a/vendor/sigs.k8s.io/cluster-api/pkg/client/informers_generated/externalversions/internalinterfaces/factory_interfaces.go b/vendor/sigs.k8s.io/cluster-api/pkg/client/informers_generated/externalversions/internalinterfaces/factory_interfaces.go deleted file mode 100644 index 92c2887f69..0000000000 --- a/vendor/sigs.k8s.io/cluster-api/pkg/client/informers_generated/externalversions/internalinterfaces/factory_interfaces.go +++ /dev/null @@ -1,40 +0,0 @@ -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by informer-gen. DO NOT EDIT. - -package internalinterfaces - -import ( - time "time" - - v1 "k8s.io/apimachinery/pkg/apis/meta/v1" - runtime "k8s.io/apimachinery/pkg/runtime" - cache "k8s.io/client-go/tools/cache" - clientset "sigs.k8s.io/cluster-api/pkg/client/clientset_generated/clientset" -) - -// NewInformerFunc takes clientset.Interface and time.Duration to return a SharedIndexInformer. -type NewInformerFunc func(clientset.Interface, time.Duration) cache.SharedIndexInformer - -// SharedInformerFactory a small interface to allow for adding an informer without an import cycle -type SharedInformerFactory interface { - Start(stopCh <-chan struct{}) - InformerFor(obj runtime.Object, newFunc NewInformerFunc) cache.SharedIndexInformer -} - -// TweakListOptionsFunc is a function that transforms a v1.ListOptions. -type TweakListOptionsFunc func(*v1.ListOptions) diff --git a/vendor/sigs.k8s.io/cluster-api/pkg/client/listers_generated/cluster/v1alpha1/BUILD.bazel b/vendor/sigs.k8s.io/cluster-api/pkg/client/listers_generated/cluster/v1alpha1/BUILD.bazel deleted file mode 100644 index 40ed528680..0000000000 --- a/vendor/sigs.k8s.io/cluster-api/pkg/client/listers_generated/cluster/v1alpha1/BUILD.bazel +++ /dev/null @@ -1,21 +0,0 @@ -load("@io_bazel_rules_go//go:def.bzl", "go_library") - -go_library( - name = "go_default_library", - srcs = [ - "cluster.go", - "expansion_generated.go", - "machine.go", - "machineclass.go", - "machinedeployment.go", - "machineset.go", - ], - importpath = "sigs.k8s.io/cluster-api/pkg/client/listers_generated/cluster/v1alpha1", - visibility = ["//visibility:public"], - deps = [ - "//pkg/apis/cluster/v1alpha1:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/api/errors:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/labels:go_default_library", - "//vendor/k8s.io/client-go/tools/cache:go_default_library", - ], -) diff --git a/vendor/sigs.k8s.io/cluster-api/pkg/client/listers_generated/cluster/v1alpha1/cluster.go b/vendor/sigs.k8s.io/cluster-api/pkg/client/listers_generated/cluster/v1alpha1/cluster.go deleted file mode 100644 index 1ed56f8f7c..0000000000 --- a/vendor/sigs.k8s.io/cluster-api/pkg/client/listers_generated/cluster/v1alpha1/cluster.go +++ /dev/null @@ -1,94 +0,0 @@ -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by lister-gen. DO NOT EDIT. - -package v1alpha1 - -import ( - "k8s.io/apimachinery/pkg/api/errors" - "k8s.io/apimachinery/pkg/labels" - "k8s.io/client-go/tools/cache" - v1alpha1 "sigs.k8s.io/cluster-api/pkg/apis/cluster/v1alpha1" -) - -// ClusterLister helps list Clusters. -type ClusterLister interface { - // List lists all Clusters in the indexer. - List(selector labels.Selector) (ret []*v1alpha1.Cluster, err error) - // Clusters returns an object that can list and get Clusters. - Clusters(namespace string) ClusterNamespaceLister - ClusterListerExpansion -} - -// clusterLister implements the ClusterLister interface. -type clusterLister struct { - indexer cache.Indexer -} - -// NewClusterLister returns a new ClusterLister. -func NewClusterLister(indexer cache.Indexer) ClusterLister { - return &clusterLister{indexer: indexer} -} - -// List lists all Clusters in the indexer. -func (s *clusterLister) List(selector labels.Selector) (ret []*v1alpha1.Cluster, err error) { - err = cache.ListAll(s.indexer, selector, func(m interface{}) { - ret = append(ret, m.(*v1alpha1.Cluster)) - }) - return ret, err -} - -// Clusters returns an object that can list and get Clusters. -func (s *clusterLister) Clusters(namespace string) ClusterNamespaceLister { - return clusterNamespaceLister{indexer: s.indexer, namespace: namespace} -} - -// ClusterNamespaceLister helps list and get Clusters. -type ClusterNamespaceLister interface { - // List lists all Clusters in the indexer for a given namespace. - List(selector labels.Selector) (ret []*v1alpha1.Cluster, err error) - // Get retrieves the Cluster from the indexer for a given namespace and name. - Get(name string) (*v1alpha1.Cluster, error) - ClusterNamespaceListerExpansion -} - -// clusterNamespaceLister implements the ClusterNamespaceLister -// interface. -type clusterNamespaceLister struct { - indexer cache.Indexer - namespace string -} - -// List lists all Clusters in the indexer for a given namespace. -func (s clusterNamespaceLister) List(selector labels.Selector) (ret []*v1alpha1.Cluster, err error) { - err = cache.ListAllByNamespace(s.indexer, s.namespace, selector, func(m interface{}) { - ret = append(ret, m.(*v1alpha1.Cluster)) - }) - return ret, err -} - -// Get retrieves the Cluster from the indexer for a given namespace and name. -func (s clusterNamespaceLister) Get(name string) (*v1alpha1.Cluster, error) { - obj, exists, err := s.indexer.GetByKey(s.namespace + "/" + name) - if err != nil { - return nil, err - } - if !exists { - return nil, errors.NewNotFound(v1alpha1.Resource("cluster"), name) - } - return obj.(*v1alpha1.Cluster), nil -} diff --git a/vendor/sigs.k8s.io/cluster-api/pkg/client/listers_generated/cluster/v1alpha1/expansion_generated.go b/vendor/sigs.k8s.io/cluster-api/pkg/client/listers_generated/cluster/v1alpha1/expansion_generated.go deleted file mode 100644 index 37f03c9c2d..0000000000 --- a/vendor/sigs.k8s.io/cluster-api/pkg/client/listers_generated/cluster/v1alpha1/expansion_generated.go +++ /dev/null @@ -1,59 +0,0 @@ -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by lister-gen. DO NOT EDIT. - -package v1alpha1 - -// ClusterListerExpansion allows custom methods to be added to -// ClusterLister. -type ClusterListerExpansion interface{} - -// ClusterNamespaceListerExpansion allows custom methods to be added to -// ClusterNamespaceLister. -type ClusterNamespaceListerExpansion interface{} - -// MachineListerExpansion allows custom methods to be added to -// MachineLister. -type MachineListerExpansion interface{} - -// MachineNamespaceListerExpansion allows custom methods to be added to -// MachineNamespaceLister. -type MachineNamespaceListerExpansion interface{} - -// MachineClassListerExpansion allows custom methods to be added to -// MachineClassLister. -type MachineClassListerExpansion interface{} - -// MachineClassNamespaceListerExpansion allows custom methods to be added to -// MachineClassNamespaceLister. -type MachineClassNamespaceListerExpansion interface{} - -// MachineDeploymentListerExpansion allows custom methods to be added to -// MachineDeploymentLister. -type MachineDeploymentListerExpansion interface{} - -// MachineDeploymentNamespaceListerExpansion allows custom methods to be added to -// MachineDeploymentNamespaceLister. -type MachineDeploymentNamespaceListerExpansion interface{} - -// MachineSetListerExpansion allows custom methods to be added to -// MachineSetLister. -type MachineSetListerExpansion interface{} - -// MachineSetNamespaceListerExpansion allows custom methods to be added to -// MachineSetNamespaceLister. -type MachineSetNamespaceListerExpansion interface{} diff --git a/vendor/sigs.k8s.io/cluster-api/pkg/client/listers_generated/cluster/v1alpha1/machine.go b/vendor/sigs.k8s.io/cluster-api/pkg/client/listers_generated/cluster/v1alpha1/machine.go deleted file mode 100644 index 4a947d1b04..0000000000 --- a/vendor/sigs.k8s.io/cluster-api/pkg/client/listers_generated/cluster/v1alpha1/machine.go +++ /dev/null @@ -1,94 +0,0 @@ -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by lister-gen. DO NOT EDIT. - -package v1alpha1 - -import ( - "k8s.io/apimachinery/pkg/api/errors" - "k8s.io/apimachinery/pkg/labels" - "k8s.io/client-go/tools/cache" - v1alpha1 "sigs.k8s.io/cluster-api/pkg/apis/cluster/v1alpha1" -) - -// MachineLister helps list Machines. -type MachineLister interface { - // List lists all Machines in the indexer. - List(selector labels.Selector) (ret []*v1alpha1.Machine, err error) - // Machines returns an object that can list and get Machines. - Machines(namespace string) MachineNamespaceLister - MachineListerExpansion -} - -// machineLister implements the MachineLister interface. -type machineLister struct { - indexer cache.Indexer -} - -// NewMachineLister returns a new MachineLister. -func NewMachineLister(indexer cache.Indexer) MachineLister { - return &machineLister{indexer: indexer} -} - -// List lists all Machines in the indexer. -func (s *machineLister) List(selector labels.Selector) (ret []*v1alpha1.Machine, err error) { - err = cache.ListAll(s.indexer, selector, func(m interface{}) { - ret = append(ret, m.(*v1alpha1.Machine)) - }) - return ret, err -} - -// Machines returns an object that can list and get Machines. -func (s *machineLister) Machines(namespace string) MachineNamespaceLister { - return machineNamespaceLister{indexer: s.indexer, namespace: namespace} -} - -// MachineNamespaceLister helps list and get Machines. -type MachineNamespaceLister interface { - // List lists all Machines in the indexer for a given namespace. - List(selector labels.Selector) (ret []*v1alpha1.Machine, err error) - // Get retrieves the Machine from the indexer for a given namespace and name. - Get(name string) (*v1alpha1.Machine, error) - MachineNamespaceListerExpansion -} - -// machineNamespaceLister implements the MachineNamespaceLister -// interface. -type machineNamespaceLister struct { - indexer cache.Indexer - namespace string -} - -// List lists all Machines in the indexer for a given namespace. -func (s machineNamespaceLister) List(selector labels.Selector) (ret []*v1alpha1.Machine, err error) { - err = cache.ListAllByNamespace(s.indexer, s.namespace, selector, func(m interface{}) { - ret = append(ret, m.(*v1alpha1.Machine)) - }) - return ret, err -} - -// Get retrieves the Machine from the indexer for a given namespace and name. -func (s machineNamespaceLister) Get(name string) (*v1alpha1.Machine, error) { - obj, exists, err := s.indexer.GetByKey(s.namespace + "/" + name) - if err != nil { - return nil, err - } - if !exists { - return nil, errors.NewNotFound(v1alpha1.Resource("machine"), name) - } - return obj.(*v1alpha1.Machine), nil -} diff --git a/vendor/sigs.k8s.io/cluster-api/pkg/client/listers_generated/cluster/v1alpha1/machineclass.go b/vendor/sigs.k8s.io/cluster-api/pkg/client/listers_generated/cluster/v1alpha1/machineclass.go deleted file mode 100644 index 5e7bd68e52..0000000000 --- a/vendor/sigs.k8s.io/cluster-api/pkg/client/listers_generated/cluster/v1alpha1/machineclass.go +++ /dev/null @@ -1,94 +0,0 @@ -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by lister-gen. DO NOT EDIT. - -package v1alpha1 - -import ( - "k8s.io/apimachinery/pkg/api/errors" - "k8s.io/apimachinery/pkg/labels" - "k8s.io/client-go/tools/cache" - v1alpha1 "sigs.k8s.io/cluster-api/pkg/apis/cluster/v1alpha1" -) - -// MachineClassLister helps list MachineClasses. -type MachineClassLister interface { - // List lists all MachineClasses in the indexer. - List(selector labels.Selector) (ret []*v1alpha1.MachineClass, err error) - // MachineClasses returns an object that can list and get MachineClasses. - MachineClasses(namespace string) MachineClassNamespaceLister - MachineClassListerExpansion -} - -// machineClassLister implements the MachineClassLister interface. -type machineClassLister struct { - indexer cache.Indexer -} - -// NewMachineClassLister returns a new MachineClassLister. -func NewMachineClassLister(indexer cache.Indexer) MachineClassLister { - return &machineClassLister{indexer: indexer} -} - -// List lists all MachineClasses in the indexer. -func (s *machineClassLister) List(selector labels.Selector) (ret []*v1alpha1.MachineClass, err error) { - err = cache.ListAll(s.indexer, selector, func(m interface{}) { - ret = append(ret, m.(*v1alpha1.MachineClass)) - }) - return ret, err -} - -// MachineClasses returns an object that can list and get MachineClasses. -func (s *machineClassLister) MachineClasses(namespace string) MachineClassNamespaceLister { - return machineClassNamespaceLister{indexer: s.indexer, namespace: namespace} -} - -// MachineClassNamespaceLister helps list and get MachineClasses. -type MachineClassNamespaceLister interface { - // List lists all MachineClasses in the indexer for a given namespace. - List(selector labels.Selector) (ret []*v1alpha1.MachineClass, err error) - // Get retrieves the MachineClass from the indexer for a given namespace and name. - Get(name string) (*v1alpha1.MachineClass, error) - MachineClassNamespaceListerExpansion -} - -// machineClassNamespaceLister implements the MachineClassNamespaceLister -// interface. -type machineClassNamespaceLister struct { - indexer cache.Indexer - namespace string -} - -// List lists all MachineClasses in the indexer for a given namespace. -func (s machineClassNamespaceLister) List(selector labels.Selector) (ret []*v1alpha1.MachineClass, err error) { - err = cache.ListAllByNamespace(s.indexer, s.namespace, selector, func(m interface{}) { - ret = append(ret, m.(*v1alpha1.MachineClass)) - }) - return ret, err -} - -// Get retrieves the MachineClass from the indexer for a given namespace and name. -func (s machineClassNamespaceLister) Get(name string) (*v1alpha1.MachineClass, error) { - obj, exists, err := s.indexer.GetByKey(s.namespace + "/" + name) - if err != nil { - return nil, err - } - if !exists { - return nil, errors.NewNotFound(v1alpha1.Resource("machineclass"), name) - } - return obj.(*v1alpha1.MachineClass), nil -} diff --git a/vendor/sigs.k8s.io/cluster-api/pkg/client/listers_generated/cluster/v1alpha1/machinedeployment.go b/vendor/sigs.k8s.io/cluster-api/pkg/client/listers_generated/cluster/v1alpha1/machinedeployment.go deleted file mode 100644 index a51fb3b60b..0000000000 --- a/vendor/sigs.k8s.io/cluster-api/pkg/client/listers_generated/cluster/v1alpha1/machinedeployment.go +++ /dev/null @@ -1,94 +0,0 @@ -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by lister-gen. DO NOT EDIT. - -package v1alpha1 - -import ( - "k8s.io/apimachinery/pkg/api/errors" - "k8s.io/apimachinery/pkg/labels" - "k8s.io/client-go/tools/cache" - v1alpha1 "sigs.k8s.io/cluster-api/pkg/apis/cluster/v1alpha1" -) - -// MachineDeploymentLister helps list MachineDeployments. -type MachineDeploymentLister interface { - // List lists all MachineDeployments in the indexer. - List(selector labels.Selector) (ret []*v1alpha1.MachineDeployment, err error) - // MachineDeployments returns an object that can list and get MachineDeployments. - MachineDeployments(namespace string) MachineDeploymentNamespaceLister - MachineDeploymentListerExpansion -} - -// machineDeploymentLister implements the MachineDeploymentLister interface. -type machineDeploymentLister struct { - indexer cache.Indexer -} - -// NewMachineDeploymentLister returns a new MachineDeploymentLister. -func NewMachineDeploymentLister(indexer cache.Indexer) MachineDeploymentLister { - return &machineDeploymentLister{indexer: indexer} -} - -// List lists all MachineDeployments in the indexer. -func (s *machineDeploymentLister) List(selector labels.Selector) (ret []*v1alpha1.MachineDeployment, err error) { - err = cache.ListAll(s.indexer, selector, func(m interface{}) { - ret = append(ret, m.(*v1alpha1.MachineDeployment)) - }) - return ret, err -} - -// MachineDeployments returns an object that can list and get MachineDeployments. -func (s *machineDeploymentLister) MachineDeployments(namespace string) MachineDeploymentNamespaceLister { - return machineDeploymentNamespaceLister{indexer: s.indexer, namespace: namespace} -} - -// MachineDeploymentNamespaceLister helps list and get MachineDeployments. -type MachineDeploymentNamespaceLister interface { - // List lists all MachineDeployments in the indexer for a given namespace. - List(selector labels.Selector) (ret []*v1alpha1.MachineDeployment, err error) - // Get retrieves the MachineDeployment from the indexer for a given namespace and name. - Get(name string) (*v1alpha1.MachineDeployment, error) - MachineDeploymentNamespaceListerExpansion -} - -// machineDeploymentNamespaceLister implements the MachineDeploymentNamespaceLister -// interface. -type machineDeploymentNamespaceLister struct { - indexer cache.Indexer - namespace string -} - -// List lists all MachineDeployments in the indexer for a given namespace. -func (s machineDeploymentNamespaceLister) List(selector labels.Selector) (ret []*v1alpha1.MachineDeployment, err error) { - err = cache.ListAllByNamespace(s.indexer, s.namespace, selector, func(m interface{}) { - ret = append(ret, m.(*v1alpha1.MachineDeployment)) - }) - return ret, err -} - -// Get retrieves the MachineDeployment from the indexer for a given namespace and name. -func (s machineDeploymentNamespaceLister) Get(name string) (*v1alpha1.MachineDeployment, error) { - obj, exists, err := s.indexer.GetByKey(s.namespace + "/" + name) - if err != nil { - return nil, err - } - if !exists { - return nil, errors.NewNotFound(v1alpha1.Resource("machinedeployment"), name) - } - return obj.(*v1alpha1.MachineDeployment), nil -} diff --git a/vendor/sigs.k8s.io/cluster-api/pkg/client/listers_generated/cluster/v1alpha1/machineset.go b/vendor/sigs.k8s.io/cluster-api/pkg/client/listers_generated/cluster/v1alpha1/machineset.go deleted file mode 100644 index 783ad250d3..0000000000 --- a/vendor/sigs.k8s.io/cluster-api/pkg/client/listers_generated/cluster/v1alpha1/machineset.go +++ /dev/null @@ -1,94 +0,0 @@ -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by lister-gen. DO NOT EDIT. - -package v1alpha1 - -import ( - "k8s.io/apimachinery/pkg/api/errors" - "k8s.io/apimachinery/pkg/labels" - "k8s.io/client-go/tools/cache" - v1alpha1 "sigs.k8s.io/cluster-api/pkg/apis/cluster/v1alpha1" -) - -// MachineSetLister helps list MachineSets. -type MachineSetLister interface { - // List lists all MachineSets in the indexer. - List(selector labels.Selector) (ret []*v1alpha1.MachineSet, err error) - // MachineSets returns an object that can list and get MachineSets. - MachineSets(namespace string) MachineSetNamespaceLister - MachineSetListerExpansion -} - -// machineSetLister implements the MachineSetLister interface. -type machineSetLister struct { - indexer cache.Indexer -} - -// NewMachineSetLister returns a new MachineSetLister. -func NewMachineSetLister(indexer cache.Indexer) MachineSetLister { - return &machineSetLister{indexer: indexer} -} - -// List lists all MachineSets in the indexer. -func (s *machineSetLister) List(selector labels.Selector) (ret []*v1alpha1.MachineSet, err error) { - err = cache.ListAll(s.indexer, selector, func(m interface{}) { - ret = append(ret, m.(*v1alpha1.MachineSet)) - }) - return ret, err -} - -// MachineSets returns an object that can list and get MachineSets. -func (s *machineSetLister) MachineSets(namespace string) MachineSetNamespaceLister { - return machineSetNamespaceLister{indexer: s.indexer, namespace: namespace} -} - -// MachineSetNamespaceLister helps list and get MachineSets. -type MachineSetNamespaceLister interface { - // List lists all MachineSets in the indexer for a given namespace. - List(selector labels.Selector) (ret []*v1alpha1.MachineSet, err error) - // Get retrieves the MachineSet from the indexer for a given namespace and name. - Get(name string) (*v1alpha1.MachineSet, error) - MachineSetNamespaceListerExpansion -} - -// machineSetNamespaceLister implements the MachineSetNamespaceLister -// interface. -type machineSetNamespaceLister struct { - indexer cache.Indexer - namespace string -} - -// List lists all MachineSets in the indexer for a given namespace. -func (s machineSetNamespaceLister) List(selector labels.Selector) (ret []*v1alpha1.MachineSet, err error) { - err = cache.ListAllByNamespace(s.indexer, s.namespace, selector, func(m interface{}) { - ret = append(ret, m.(*v1alpha1.MachineSet)) - }) - return ret, err -} - -// Get retrieves the MachineSet from the indexer for a given namespace and name. -func (s machineSetNamespaceLister) Get(name string) (*v1alpha1.MachineSet, error) { - obj, exists, err := s.indexer.GetByKey(s.namespace + "/" + name) - if err != nil { - return nil, err - } - if !exists { - return nil, errors.NewNotFound(v1alpha1.Resource("machineset"), name) - } - return obj.(*v1alpha1.MachineSet), nil -} diff --git a/vendor/sigs.k8s.io/cluster-api/pkg/cmdrunner/BUILD.bazel b/vendor/sigs.k8s.io/cluster-api/pkg/cmdrunner/BUILD.bazel deleted file mode 100644 index be0dd64e36..0000000000 --- a/vendor/sigs.k8s.io/cluster-api/pkg/cmdrunner/BUILD.bazel +++ /dev/null @@ -1,14 +0,0 @@ -load("@io_bazel_rules_go//go:def.bzl", "go_library", "go_test") - -go_library( - name = "go_default_library", - srcs = ["cmd_runner.go"], - importpath = "sigs.k8s.io/cluster-api/pkg/cmdrunner", - visibility = ["//visibility:public"], -) - -go_test( - name = "go_default_test", - srcs = ["cmd_runner_test.go"], - embed = [":go_default_library"], -) diff --git a/vendor/sigs.k8s.io/cluster-api/pkg/cmdrunner/cmd_runner_test.go b/vendor/sigs.k8s.io/cluster-api/pkg/cmdrunner/cmd_runner_test.go deleted file mode 100644 index 9679841d50..0000000000 --- a/vendor/sigs.k8s.io/cluster-api/pkg/cmdrunner/cmd_runner_test.go +++ /dev/null @@ -1,83 +0,0 @@ -/* -Copyright 2018 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package cmdrunner - -import ( - "io/ioutil" - "os" - "testing" -) - -func TestInvalidCommand(t *testing.T) { - runner := New() - output, err := runner.CombinedOutput("asdf") - if err == nil { - t.Errorf("invalid error: expected 'nil', got '%v'", err) - } - if output != "" { - t.Errorf("unexpected output: expected empty string '', got '%v'", output) - } -} - -func TestValidCommandErrors(t *testing.T) { - skipIfCommandNotPresent(t, "ls") - runner := New() - _, err := runner.CombinedOutput("ls /invalid/path") - if err == nil { - t.Errorf("invalid error: expected 'nil', got '%v'", err) - } -} - -func TestValidCommandSucceeds(t *testing.T) { - skipIfCommandNotPresent(t, "ls") - dir, err := ioutil.TempDir("", "") - if err != nil { - t.Errorf("unable to create temp dir: %v", err) - t.FailNow() - } - defer os.RemoveAll(dir) - runner := New() - output, err := runner.CombinedOutput("ls", "-al", dir) - if err != nil { - t.Errorf("invalid error: expected 'nil', got '%v'", err) - } - if output == "" { - t.Errorf("expected valid output got empty string") - } -} - -func TestCombinedOutputShouldIncludeStdOutAndErr(t *testing.T) { - skipIfCommandNotPresent(t, "echo") - skipIfCommandNotPresent(t, "sh") - runner := New() - output, err := runner.CombinedOutput("sh", "-c", "echo \"stdout\" && (>&2 echo \"stderr\")") - if err != nil { - t.Errorf("invalid error: expected 'nil', got '%v'", err) - } - expectedOutput := "stdout\nstderr\n" - if output != expectedOutput { - t.Errorf("invalid output: expected '%v', got '%v'", expectedOutput, output) - } -} - -func skipIfCommandNotPresent(t *testing.T, cmd string) { - runner := New() - _, err := runner.CombinedOutput("ls") - if err != nil { - t.Skipf("unable to run test, 'ls' reults in error: %v", err) - } -} diff --git a/vendor/sigs.k8s.io/cluster-api/pkg/controller/BUILD.bazel b/vendor/sigs.k8s.io/cluster-api/pkg/controller/BUILD.bazel index 0808edfe87..bc3ec5d287 100644 --- a/vendor/sigs.k8s.io/cluster-api/pkg/controller/BUILD.bazel +++ b/vendor/sigs.k8s.io/cluster-api/pkg/controller/BUILD.bazel @@ -3,19 +3,19 @@ load("@io_bazel_rules_go//go:def.bzl", "go_library") go_library( name = "go_default_library", srcs = [ + "add_cluster.go", + "add_machine.go", "add_machinedeployment.go", "add_machineset.go", - "add_node.go", - "add_noderef.go", "controller.go", ], importpath = "sigs.k8s.io/cluster-api/pkg/controller", visibility = ["//visibility:public"], deps = [ + "//pkg/controller/cluster:go_default_library", + "//pkg/controller/machine:go_default_library", "//pkg/controller/machinedeployment:go_default_library", "//pkg/controller/machineset:go_default_library", - "//pkg/controller/node:go_default_library", - "//pkg/controller/noderef:go_default_library", "//vendor/sigs.k8s.io/controller-runtime/pkg/manager:go_default_library", ], ) diff --git a/vendor/sigs.k8s.io/cluster-api/pkg/controller/add_noderef.go b/vendor/sigs.k8s.io/cluster-api/pkg/controller/add_cluster.go similarity index 86% rename from vendor/sigs.k8s.io/cluster-api/pkg/controller/add_noderef.go rename to vendor/sigs.k8s.io/cluster-api/pkg/controller/add_cluster.go index 4a5d4cad7c..85d58531ea 100644 --- a/vendor/sigs.k8s.io/cluster-api/pkg/controller/add_noderef.go +++ b/vendor/sigs.k8s.io/cluster-api/pkg/controller/add_cluster.go @@ -17,10 +17,10 @@ limitations under the License. package controller import ( - "sigs.k8s.io/cluster-api/pkg/controller/noderef" + "sigs.k8s.io/cluster-api/pkg/controller/cluster" ) func init() { // AddToManagerFuncs is a list of functions to create controllers and add them to a manager. - AddToManagerFuncs = append(AddToManagerFuncs, noderef.Add) + AddToManagerFuncs = append(AddToManagerFuncs, cluster.Add) } diff --git a/vendor/sigs.k8s.io/cluster-api/pkg/controller/add_node.go b/vendor/sigs.k8s.io/cluster-api/pkg/controller/add_machine.go similarity index 81% rename from vendor/sigs.k8s.io/cluster-api/pkg/controller/add_node.go rename to vendor/sigs.k8s.io/cluster-api/pkg/controller/add_machine.go index f6783fb534..e6cc2ca53b 100644 --- a/vendor/sigs.k8s.io/cluster-api/pkg/controller/add_node.go +++ b/vendor/sigs.k8s.io/cluster-api/pkg/controller/add_machine.go @@ -1,5 +1,5 @@ /* -Copyright 2018 The Kubernetes Authors. +Copyright 2019 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -17,10 +17,10 @@ limitations under the License. package controller import ( - "sigs.k8s.io/cluster-api/pkg/controller/node" + "sigs.k8s.io/cluster-api/pkg/controller/machine" ) func init() { // AddToManagerFuncs is a list of functions to create controllers and add them to a manager. - AddToManagerFuncs = append(AddToManagerFuncs, node.Add) + AddToManagerFuncs = append(AddToManagerFuncs, machine.Add) } diff --git a/vendor/sigs.k8s.io/cluster-api/pkg/controller/cluster/BUILD.bazel b/vendor/sigs.k8s.io/cluster-api/pkg/controller/cluster/BUILD.bazel index 06224f9c9e..f153e1d305 100644 --- a/vendor/sigs.k8s.io/cluster-api/pkg/controller/cluster/BUILD.bazel +++ b/vendor/sigs.k8s.io/cluster-api/pkg/controller/cluster/BUILD.bazel @@ -3,27 +3,27 @@ load("@io_bazel_rules_go//go:def.bzl", "go_library", "go_test") go_library( name = "go_default_library", srcs = [ - "actuator.go", "cluster_controller.go", - "testactuator.go", + "cluster_controller_phases.go", ], importpath = "sigs.k8s.io/cluster-api/pkg/controller/cluster", visibility = ["//visibility:public"], deps = [ - "//pkg/apis/cluster/v1alpha1:go_default_library", - "//pkg/client/clientset_generated/clientset/typed/cluster/v1alpha1:go_default_library", - "//pkg/controller/error:go_default_library", + "//api/v1alpha2:go_default_library", + "//pkg/controller/external:go_default_library", + "//pkg/errors:go_default_library", "//pkg/util:go_default_library", "//vendor/github.com/pkg/errors:go_default_library", + "//vendor/k8s.io/api/core/v1:go_default_library", "//vendor/k8s.io/apimachinery/pkg/api/errors:go_default_library", "//vendor/k8s.io/apimachinery/pkg/api/meta:go_default_library", "//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/labels:go_default_library", + "//vendor/k8s.io/apimachinery/pkg/apis/meta/v1/unstructured:go_default_library", "//vendor/k8s.io/apimachinery/pkg/runtime:go_default_library", "//vendor/k8s.io/apimachinery/pkg/util/errors:go_default_library", - "//vendor/k8s.io/client-go/tools/pager:go_default_library", - "//vendor/k8s.io/client-go/util/retry:go_default_library", + "//vendor/k8s.io/client-go/tools/record:go_default_library", "//vendor/k8s.io/klog:go_default_library", + "//vendor/k8s.io/utils/pointer:go_default_library", "//vendor/sigs.k8s.io/controller-runtime/pkg/client:go_default_library", "//vendor/sigs.k8s.io/controller-runtime/pkg/controller:go_default_library", "//vendor/sigs.k8s.io/controller-runtime/pkg/handler:go_default_library", @@ -41,16 +41,14 @@ go_test( ], embed = [":go_default_library"], deps = [ - "//pkg/apis:go_default_library", - "//pkg/apis/cluster/v1alpha1:go_default_library", + "//api/v1alpha2:go_default_library", + "//vendor/github.com/onsi/gomega:go_default_library", "//vendor/golang.org/x/net/context:go_default_library", "//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/types:go_default_library", "//vendor/k8s.io/client-go/kubernetes/scheme:go_default_library", "//vendor/k8s.io/client-go/rest:go_default_library", "//vendor/sigs.k8s.io/controller-runtime/pkg/client:go_default_library", "//vendor/sigs.k8s.io/controller-runtime/pkg/envtest:go_default_library", "//vendor/sigs.k8s.io/controller-runtime/pkg/manager:go_default_library", - "//vendor/sigs.k8s.io/controller-runtime/pkg/reconcile:go_default_library", ], ) diff --git a/vendor/sigs.k8s.io/cluster-api/pkg/controller/cluster/actuator.go b/vendor/sigs.k8s.io/cluster-api/pkg/controller/cluster/actuator.go deleted file mode 100644 index d41a6cbb4b..0000000000 --- a/vendor/sigs.k8s.io/cluster-api/pkg/controller/cluster/actuator.go +++ /dev/null @@ -1,33 +0,0 @@ -/* -Copyright 2018 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package cluster - -import ( - clusterv1 "sigs.k8s.io/cluster-api/pkg/apis/cluster/v1alpha1" -) - -/// [Actuator] -// Actuator controls clusters on a specific infrastructure. All -// methods should be idempotent unless otherwise specified. -type Actuator interface { - // Reconcile creates or applies updates to the cluster. - Reconcile(*clusterv1.Cluster) error - // Delete the cluster. - Delete(*clusterv1.Cluster) error -} - -/// [Actuator] diff --git a/vendor/sigs.k8s.io/cluster-api/pkg/controller/cluster/cluster_controller.go b/vendor/sigs.k8s.io/cluster-api/pkg/controller/cluster/cluster_controller.go index d101166903..7af536b1e2 100644 --- a/vendor/sigs.k8s.io/cluster-api/pkg/controller/cluster/cluster_controller.go +++ b/vendor/sigs.k8s.io/cluster-api/pkg/controller/cluster/cluster_controller.go @@ -18,22 +18,21 @@ package cluster import ( "context" + "path" + "sync" "time" "github.com/pkg/errors" apierrors "k8s.io/apimachinery/pkg/api/errors" "k8s.io/apimachinery/pkg/api/meta" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/labels" "k8s.io/apimachinery/pkg/runtime" - errorag "k8s.io/apimachinery/pkg/util/errors" - "k8s.io/client-go/tools/pager" - "k8s.io/client-go/util/retry" + kerrors "k8s.io/apimachinery/pkg/util/errors" + "k8s.io/client-go/tools/record" "k8s.io/klog" - clusterv1 "sigs.k8s.io/cluster-api/pkg/apis/cluster/v1alpha1" - clusterv1alpha1 "sigs.k8s.io/cluster-api/pkg/apis/cluster/v1alpha1" - "sigs.k8s.io/cluster-api/pkg/client/clientset_generated/clientset/typed/cluster/v1alpha1" - controllerError "sigs.k8s.io/cluster-api/pkg/controller/error" + "sigs.k8s.io/cluster-api/api/v1alpha2" + clusterv1 "sigs.k8s.io/cluster-api/api/v1alpha2" + "sigs.k8s.io/cluster-api/pkg/controller/external" + capierrors "sigs.k8s.io/cluster-api/pkg/errors" "sigs.k8s.io/cluster-api/pkg/util" "sigs.k8s.io/controller-runtime/pkg/client" "sigs.k8s.io/controller-runtime/pkg/controller" @@ -43,47 +42,47 @@ import ( "sigs.k8s.io/controller-runtime/pkg/source" ) -const deleteRequeueAfter = 5 * time.Second +const ( + controllerName = "cluster-controller" -var DefaultActuator Actuator - -func AddWithActuator(mgr manager.Manager, actuator Actuator) error { - reconciler, err := newReconciler(mgr, actuator) - if err != nil { - return err - } + // deleteRequeueAfter is how long to wait before checking again to see if the cluster still has children during + // deletion. + deleteRequeueAfter = 5 * time.Second +) - return add(mgr, reconciler) +// Add creates a new Cluster Controller and adds it to the Manager with default RBAC. +// The Manager will set fields on the Controller and Start it when the Manager is Started. +func Add(mgr manager.Manager) error { + r := newReconciler(mgr) + c, err := addController(mgr, r) + r.controller = c + return err } // newReconciler returns a new reconcile.Reconciler -func newReconciler(mgr manager.Manager, actuator Actuator) (reconcile.Reconciler, error) { - cclient, err := v1alpha1.NewForConfig(mgr.GetConfig()) - if err != nil { - return nil, errors.WithStack(err) - } +func newReconciler(mgr manager.Manager) *ReconcileCluster { return &ReconcileCluster{ - Client: mgr.GetClient(), - clusterClient: cclient, - scheme: mgr.GetScheme(), - actuator: actuator}, nil + Client: mgr.GetClient(), + scheme: mgr.GetScheme(), + recorder: mgr.GetEventRecorderFor(controllerName), + } } // add adds a new Controller to mgr with r as the reconcile.Reconciler -func add(mgr manager.Manager, r reconcile.Reconciler) error { +func addController(mgr manager.Manager, r reconcile.Reconciler) (controller.Controller, error) { // Create a new controller - c, err := controller.New("cluster-controller", mgr, controller.Options{Reconciler: r}) + c, err := controller.New(controllerName, mgr, controller.Options{Reconciler: r}) if err != nil { - return err + return nil, err } // Watch for changes to Cluster - err = c.Watch(&source.Kind{Type: &clusterv1alpha1.Cluster{}}, &handler.EnqueueRequestForObject{}) + err = c.Watch(&source.Kind{Type: &clusterv1.Cluster{}}, &handler.EnqueueRequestForObject{}) if err != nil { - return err + return nil, err } - return nil + return c, nil } var _ reconcile.Reconciler = &ReconcileCluster{} @@ -91,173 +90,167 @@ var _ reconcile.Reconciler = &ReconcileCluster{} // ReconcileCluster reconciles a Cluster object type ReconcileCluster struct { client.Client - // TODO: remove this once the controller-runtime Client has pagination support - clusterClient v1alpha1.ClusterV1alpha1Interface - scheme *runtime.Scheme - actuator Actuator + scheme *runtime.Scheme + controller controller.Controller + recorder record.EventRecorder + externalWatchers sync.Map } -// +kubebuilder:rbac:groups=cluster.k8s.io,resources=clusters,verbs=get;list;watch;create;update;patch;delete -func (r *ReconcileCluster) Reconcile(request reconcile.Request) (reconcile.Result, error) { - cluster := &clusterv1alpha1.Cluster{} - err := r.Get(context.Background(), request.NamespacedName, cluster) +func (r *ReconcileCluster) Reconcile(request reconcile.Request) (_ reconcile.Result, reterr error) { + ctx := context.TODO() + + // Fetch the Cluster instance. + cluster := &clusterv1.Cluster{} + err := r.Get(ctx, request.NamespacedName, cluster) if err != nil { if apierrors.IsNotFound(err) { // Object not found, return. Created objects are automatically garbage collected. // For additional cleanup logic use finalizers. return reconcile.Result{}, nil } + // Error reading the object - requeue the request. - return reconcile.Result{}, errors.WithStack(err) + return reconcile.Result{}, err } - name := cluster.Name - klog.Infof("Running reconcile Cluster for %q", name) - - // If object hasn't been deleted and doesn't have a finalizer, add one - // Add a finalizer to newly created objects. - if cluster.ObjectMeta.DeletionTimestamp.IsZero() { - finalizerCount := len(cluster.Finalizers) + // Store Cluster early state to allow patching. + patchCluster := client.MergeFrom(cluster.DeepCopy()) - if !util.Contains(cluster.Finalizers, metav1.FinalizerDeleteDependents) { - cluster.Finalizers = append(cluster.ObjectMeta.Finalizers, metav1.FinalizerDeleteDependents) + // Always issue a Patch for the Cluster object and its status after each reconciliation. + defer func() { + gvk := cluster.GroupVersionKind() + if err := r.Client.Patch(ctx, cluster, patchCluster); err != nil { + klog.Errorf("Error Patching Cluster %q in namespace %q: %v", cluster.Name, cluster.Namespace, err) + if reterr == nil { + reterr = err + } + return } + // TODO(vincepri): This is a hack because after a Patch, the object loses TypeMeta information. + // Remove when https://github.com/kubernetes-sigs/controller-runtime/issues/526 is fixed. + cluster.SetGroupVersionKind(gvk) + if err := r.Client.Status().Patch(ctx, cluster, patchCluster); err != nil { + klog.Errorf("Error Patching Cluster status %q in namespace %q: %v", cluster.Name, cluster.Namespace, err) + if reterr == nil { + reterr = err + } + } + }() + // If object hasn't been deleted and doesn't have a finalizer, add one. + if cluster.ObjectMeta.DeletionTimestamp.IsZero() { if !util.Contains(cluster.Finalizers, clusterv1.ClusterFinalizer) { cluster.Finalizers = append(cluster.ObjectMeta.Finalizers, clusterv1.ClusterFinalizer) } + } - if len(cluster.Finalizers) > finalizerCount { - if err := r.Update(context.Background(), cluster); err != nil { - klog.Infof("Failed to add finalizer to cluster %q: %v", name, err) - return reconcile.Result{}, errors.WithStack(err) - } - - // Since adding the finalizer updates the object return to avoid later update issues. - return reconcile.Result{Requeue: true}, nil + if err := r.reconcile(ctx, cluster); err != nil { + if requeueErr, ok := errors.Cause(err).(capierrors.HasRequeueAfterError); ok { + klog.Infof("Reconciliation for Cluster %q in namespace %q asked to requeue: %v", cluster.Name, cluster.Namespace, err) + return reconcile.Result{Requeue: true, RequeueAfter: requeueErr.GetRequeueAfter()}, nil } - + return reconcile.Result{}, err } if !cluster.ObjectMeta.DeletionTimestamp.IsZero() { - // no-op if finalizer has been removed. - if !util.Contains(cluster.ObjectMeta.Finalizers, clusterv1.ClusterFinalizer) { - klog.Infof("Reconciling cluster object %v causes a no-op as there is no finalizer.", name) - return reconcile.Result{}, nil - } - - children, err := r.listChildren(context.Background(), cluster) - if err != nil { - klog.Errorf("Failed to list dependent objects of cluster %s/%s: %v", cluster.ObjectMeta.Namespace, cluster.ObjectMeta.Name, err) - return reconcile.Result{}, err - } - - if len(children) > 0 { - klog.Infof("Deleting cluster %s: %d children still exist, will requeue", name, len(children)) + return r.reconcileDelete(ctx, cluster) + } - var errList []error + return reconcile.Result{}, nil +} - for _, child := range children { - accessor, err := meta.Accessor(child) - if err != nil { - klog.Errorf("cluster %s: couldn't create accessor for %T: %v", name, child, err) - continue - } +// reconcileDelete handles cluster deletion. +// TODO(ncdc): consolidate all deletion logic in here. +func (r *ReconcileCluster) reconcileDelete(ctx context.Context, cluster *v1alpha2.Cluster) (reconcile.Result, error) { + children, err := r.listChildren(ctx, cluster) + if err != nil { + klog.Errorf("Failed to list children of cluster %s/%s: %v", cluster.Namespace, cluster.Name, err) + return reconcile.Result{}, err + } - if accessor.GetDeletionTimestamp() != nil { - continue - } + if len(children) > 0 { + klog.Infof("Cluster %s/%s still has %d children - deleting them first", cluster.Namespace, cluster.Name, len(children)) - gvk := child.GetObjectKind().GroupVersionKind().String() + var errs []error - klog.V(4).Infof("Deleting cluster %s: Deleting %s %s", name, gvk, accessor.GetName()) - if err := r.Delete(context.Background(), child, client.PropagationPolicy(metav1.DeletePropagationForeground)); err != nil { - err = errors.Wrapf(err, "deleting cluster %s: failed to delete %s %s", name, gvk, accessor.GetName()) - klog.Errorf("Deletion error: %v", err) - errList = append(errList, err) - } + for _, child := range children { + accessor, err := meta.Accessor(child) + if err != nil { + klog.Errorf("Cluster %s/%s: couldn't create accessor for type %T: %v", cluster.Namespace, cluster.Name, child, err) + continue } - if len(errList) > 0 { - return reconcile.Result{}, errorag.NewAggregate(errList) - + if !accessor.GetDeletionTimestamp().IsZero() { + // Don't handle deleted child + continue } - return reconcile.Result{Requeue: true, RequeueAfter: deleteRequeueAfter}, nil - } - - klog.Infof("Reconciling cluster object %v triggers delete.", name) - if err := r.actuator.Delete(cluster); err != nil { - klog.Errorf("Error deleting cluster object %v; %v", name, err) - return reconcile.Result{}, errors.WithStack(err) - } + gvk := child.GetObjectKind().GroupVersionKind().String() - // Remove finalizer on successful deletion. - klog.Infof("Cluster object %v deletion successful, removing finalizer.", name) - err = retry.RetryOnConflict(retry.DefaultRetry, func() error { - // It's possible the actuator's Delete call modified the cluster. We can't guarantee that every provider - // updated the in memory cluster object with the latest copy of the cluster, so try to get a fresh copy. - // - // Note, because the get from the client is a cached read from the shared informer's cache, there's still a - // chance this could be a stale read. - // - // Note 2, this is not a Patch call because the version of controller-runtime in the release-0.1 branch - // does not support patching. - err := r.Get(context.Background(), request.NamespacedName, cluster) - if err != nil { - return err + klog.Infof("Cluster %s/%s: deleting child %s %s", cluster.Namespace, cluster.Name, gvk, accessor.GetName()) + if err := r.Delete(context.Background(), child); err != nil { + err = errors.Wrapf(err, "error deleting cluster %s/%s: failed to delete %s %s", cluster.Namespace, cluster.Name, gvk, accessor.GetName()) + klog.Errorf(err.Error()) + errs = append(errs, err) } + } - cluster.ObjectMeta.Finalizers = util.Filter(cluster.ObjectMeta.Finalizers, clusterv1.ClusterFinalizer) - - return r.Client.Update(context.Background(), cluster) - }) - if err != nil { - klog.Errorf("Error removing finalizer from cluster object %v; %v", name, err) - return reconcile.Result{}, errors.WithStack(err) + if len(errs) > 0 { + return reconcile.Result{}, kerrors.NewAggregate(errs) } - return reconcile.Result{}, nil + // Requeue so we can check the next time to see if there are still any children left. + return reconcile.Result{RequeueAfter: deleteRequeueAfter}, nil } - klog.Infof("Reconciling cluster object %v triggers idempotent reconcile.", name) - if err := r.actuator.Reconcile(cluster); err != nil { - if requeueErr, ok := errors.Cause(err).(controllerError.HasRequeueAfterError); ok { - klog.Infof("Actuator returned requeue-after error: %v", requeueErr) - return reconcile.Result{Requeue: true, RequeueAfter: requeueErr.GetRequeueAfter()}, nil + if cluster.Spec.InfrastructureRef != nil { + _, err := external.Get(r.Client, cluster.Spec.InfrastructureRef, cluster.Namespace) + switch { + case apierrors.IsNotFound(err): + // All good - the infra resource has been deleted + case err != nil: + return reconcile.Result{}, errors.Wrapf(err, "failed to get %s %q for Cluster %s/%s", + path.Join(cluster.Spec.InfrastructureRef.APIVersion, cluster.Spec.InfrastructureRef.Kind), + cluster.Spec.InfrastructureRef.Name, cluster.Namespace, cluster.Name) + default: + // The infra resource still exists. Once it's been deleted, the cluster will get processed again. + // Return here so we don't remove the finalizer yet. + return reconcile.Result{}, nil } - klog.Errorf("Error reconciling cluster object %v; %v", name, err) - return reconcile.Result{}, errors.WithStack(err) } + + cluster.Finalizers = util.Filter(cluster.Finalizers, clusterv1.ClusterFinalizer) + return reconcile.Result{}, nil } -// listChildren returns a list of Deployments, Sets, and Machines than have an ownerref to the given cluster -func (r *ReconcileCluster) listChildren(ctx context.Context, cluster *clusterv1.Cluster) ([]runtime.Object, error) { - var children []runtime.Object - - ns := cluster.GetNamespace() - opts := metav1.ListOptions{ - LabelSelector: labels.FormatLabels( - map[string]string{clusterv1.MachineClusterLabelName: cluster.GetName()}, - ), +// listChildren returns a list of MachineDeployments, MachineSets, and Machines than have an owner reference to cluster +func (r *ReconcileCluster) listChildren(ctx context.Context, cluster *v1alpha2.Cluster) ([]runtime.Object, error) { + listOptions := []client.ListOption{ + client.InNamespace(cluster.Namespace), + client.MatchingLabels(map[string]string{clusterv1.MachineClusterLabelName: cluster.Name}), } - dfunc := func(ctx context.Context, m metav1.ListOptions) (runtime.Object, error) { - return r.clusterClient.MachineDeployments(ns).List(m) + machineDeployments := &clusterv1.MachineDeploymentList{} + if err := r.Client.List(ctx, machineDeployments, listOptions...); err != nil { + return nil, errors.Wrapf(err, "failed to list MachineDeployments for cluster %s/%s", cluster.Namespace, cluster.Name) } - sfunc := func(ctx context.Context, m metav1.ListOptions) (runtime.Object, error) { - return r.clusterClient.MachineSets(ns).List(m) + + machineSets := &clusterv1.MachineSetList{} + if err := r.Client.List(ctx, machineSets, listOptions...); err != nil { + return nil, errors.Wrapf(err, "failed to list MachineSets for cluster %s/%s", cluster.Namespace, cluster.Name) } - mfunc := func(ctx context.Context, m metav1.ListOptions) (runtime.Object, error) { - return r.clusterClient.Machines(ns).List(m) + + machines := &clusterv1.MachineList{} + if err := r.Client.List(ctx, machines, listOptions...); err != nil { + return nil, errors.Wrapf(err, "failed to list Machines for cluster %s/%s", cluster.Namespace, cluster.Name) } + var children []runtime.Object eachFunc := func(o runtime.Object) error { - acc, err := meta.Accessor(o) if err != nil { - klog.Errorf("cluster %s: couldn't create accessor for %T: %v", cluster.Name, o, err) + klog.Errorf("Cluster %s/%s: couldn't create accessor for type %T: %v", cluster.Namespace, cluster.Name, o, err) return nil } @@ -268,28 +261,15 @@ func (r *ReconcileCluster) listChildren(ctx context.Context, cluster *clusterv1. return nil } - deployments, err := pager.New(dfunc).List(ctx, opts) - if err != nil { - return nil, errors.Wrapf(err, "failed to list MachineDeployments in %s/%s", ns, cluster.Name) + lists := map[string]runtime.Object{ + "MachineDeployment": machineDeployments, + "MachineSet": machineSets, + "Machine": machines, } - if err := meta.EachListItem(deployments, eachFunc); err != nil { - return nil, errors.Wrapf(err, "couldn't iterate MachinesDeployments for cluster %s/%s", ns, cluster.Name) - } - - sets, err := pager.New(sfunc).List(ctx, opts) - if err != nil { - return nil, errors.Wrapf(err, "failed to list MachineSets in %s/%s", ns, cluster.Name) - } - if err := meta.EachListItem(sets, eachFunc); err != nil { - return nil, errors.Wrapf(err, "couldn't iterate MachineSets for cluster %s/%s", ns, cluster.Name) - } - - machines, err := pager.New(mfunc).List(ctx, opts) - if err != nil { - return nil, errors.Wrapf(err, "failed to list Machines in %s/%s", ns, cluster.Name) - } - if err := meta.EachListItem(machines, eachFunc); err != nil { - return nil, errors.Wrapf(err, "couldn't iterate Machines for cluster %s/%s", ns, cluster.Name) + for name, list := range lists { + if err := meta.EachListItem(list, eachFunc); err != nil { + return nil, errors.Wrapf(err, "error finding %s children of cluster %s/%s", name, cluster.Namespace, cluster.Name) + } } return children, nil diff --git a/vendor/sigs.k8s.io/cluster-api/pkg/controller/cluster/cluster_controller_phases.go b/vendor/sigs.k8s.io/cluster-api/pkg/controller/cluster/cluster_controller_phases.go new file mode 100644 index 0000000000..fa9aa25297 --- /dev/null +++ b/vendor/sigs.k8s.io/cluster-api/pkg/controller/cluster/cluster_controller_phases.go @@ -0,0 +1,198 @@ +/* +Copyright 2019 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package cluster + +import ( + "context" + "time" + + "github.com/pkg/errors" + corev1 "k8s.io/api/core/v1" + apierrors "k8s.io/apimachinery/pkg/api/errors" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" + "k8s.io/klog" + "k8s.io/utils/pointer" + "sigs.k8s.io/cluster-api/api/v1alpha2" + clusterv1 "sigs.k8s.io/cluster-api/api/v1alpha2" + "sigs.k8s.io/cluster-api/pkg/controller/external" + capierrors "sigs.k8s.io/cluster-api/pkg/errors" + "sigs.k8s.io/cluster-api/pkg/util" + "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/controller-runtime/pkg/handler" + "sigs.k8s.io/controller-runtime/pkg/source" +) + +func (r *ReconcileCluster) reconcile(ctx context.Context, cluster *v1alpha2.Cluster) (err error) { + // TODO(vincepri): These can be generalized with an interface and possibly a for loop. + errors := []error{} + errors = append(errors, r.reconcileInfrastructure(ctx, cluster)) + errors = append(errors, r.reconcilePhase(ctx, cluster)) + + // Determine the return error, giving precedence to the first non-nil and non-requeueAfter errors. + for _, e := range errors { + if e == nil { + continue + } + if err == nil || capierrors.IsRequeueAfter(err) { + err = e + } + } + return err +} + +func (r *ReconcileCluster) reconcilePhase(ctx context.Context, cluster *v1alpha2.Cluster) error { + // Set the phase to "pending" if nil. + if cluster.Status.Phase == "" { + cluster.Status.SetTypedPhase(v1alpha2.ClusterPhasePending) + } + + // Set the phase to "provisioning" if the Cluster has an InfrastructureRef object associated. + if cluster.Spec.InfrastructureRef != nil { + cluster.Status.SetTypedPhase(v1alpha2.ClusterPhaseProvisioning) + } + + // Set the phase to "provisioned" if the infrastructure is ready. + if cluster.Status.InfrastructureReady { + cluster.Status.SetTypedPhase(v1alpha2.ClusterPhaseProvisioned) + } + + // Set the phase to "failed" if any of Status.ErrorReason or Status.ErrorMessage is not-nil. + if cluster.Status.ErrorReason != nil || cluster.Status.ErrorMessage != nil { + cluster.Status.SetTypedPhase(v1alpha2.ClusterPhaseFailed) + } + + // Set the phase to "deleting" if the deletion timestamp is set. + if !cluster.DeletionTimestamp.IsZero() { + cluster.Status.SetTypedPhase(v1alpha2.ClusterPhaseDeleting) + } + + return nil +} + +// reconcileExternal handles generic unstructured objects referenced by a Cluster. +func (r *ReconcileCluster) reconcileExternal(ctx context.Context, cluster *v1alpha2.Cluster, ref *corev1.ObjectReference) (*unstructured.Unstructured, error) { + obj, err := external.Get(r.Client, ref, cluster.Namespace) + if err != nil { + if apierrors.IsNotFound(err) && !cluster.DeletionTimestamp.IsZero() { + return nil, nil + } else if apierrors.IsNotFound(err) { + return nil, errors.Wrapf(&capierrors.RequeueAfterError{RequeueAfter: 30 * time.Second}, + "could not find %v %q for Cluster %q in namespace %q, requeuing", + ref.GroupVersionKind(), ref.Name, cluster.Name, cluster.Namespace) + } + return nil, err + } + + objPatch := client.MergeFrom(obj.DeepCopy()) + + // Delete the external object if the Cluster is being deleted. + if !cluster.DeletionTimestamp.IsZero() { + if err := r.Delete(ctx, obj); err != nil { + return nil, errors.Wrapf(err, + "failed to delete %v %q for Cluster %q in namespace %q", + obj.GroupVersionKind(), ref.Name, cluster.Name, cluster.Namespace) + } + return obj, nil + } + + // Set external object OwnerReference to the Cluster. + ownerRef := metav1.OwnerReference{ + APIVersion: cluster.APIVersion, + Kind: cluster.Kind, + Name: cluster.Name, + UID: cluster.UID, + } + + if !util.HasOwnerRef(obj.GetOwnerReferences(), ownerRef) { + obj.SetOwnerReferences(util.EnsureOwnerRef(obj.GetOwnerReferences(), ownerRef)) + if err := r.Patch(ctx, obj, objPatch); err != nil { + return nil, errors.Wrapf(err, + "failed to set OwnerReference on %v %q for Cluster %q in namespace %q", + obj.GroupVersionKind(), ref.Name, cluster.Name, cluster.Namespace) + } + } + + // Add watcher for external object, if there isn't one already. + _, loaded := r.externalWatchers.LoadOrStore(obj.GroupVersionKind().String(), struct{}{}) + if !loaded && r.controller != nil { + klog.Infof("Adding watcher on external object %q", obj.GroupVersionKind()) + err := r.controller.Watch( + &source.Kind{Type: obj}, + &handler.EnqueueRequestForOwner{OwnerType: &clusterv1.Cluster{}}, + ) + if err != nil { + r.externalWatchers.Delete(obj.GroupVersionKind().String()) + return nil, errors.Wrapf(err, "failed to add watcher on external object %q", obj.GroupVersionKind()) + } + } + + // Set error reason and message, if any. + errorReason, errorMessage, err := external.ErrorsFrom(obj) + if err != nil { + return nil, err + } + if errorReason != "" { + clusterStatusError := capierrors.ClusterStatusError(errorReason) + cluster.Status.ErrorReason = &clusterStatusError + } + if errorMessage != "" { + cluster.Status.ErrorMessage = pointer.StringPtr(errorMessage) + } + + return obj, nil +} + +// reconcileInfrastructure reconciles the Spec.InfrastructureRef object on a Cluster. +func (r *ReconcileCluster) reconcileInfrastructure(ctx context.Context, cluster *v1alpha2.Cluster) error { + if cluster.Spec.InfrastructureRef == nil { + return nil + } + + // Call generic external reconciler. + infraConfig, err := r.reconcileExternal(ctx, cluster, cluster.Spec.InfrastructureRef) + if infraConfig == nil && err == nil { + return nil + } else if err != nil { + return err + } + + if cluster.Status.InfrastructureReady || !infraConfig.GetDeletionTimestamp().IsZero() { + return nil + } + + // Determine if the infrastructure provider is ready. + ready, err := external.IsReady(infraConfig) + if err != nil { + return err + } else if !ready { + klog.V(3).Infof("Infrastructure provider for Cluster %q in namespace %q is not ready, requeuing", cluster.Name, cluster.Namespace) + return &capierrors.RequeueAfterError{RequeueAfter: 30 * time.Second} + } + + // Get and parse Status.APIEndpoint field from the infrastructure provider. + if err := util.UnstructuredUnmarshalField(infraConfig, &cluster.Status.APIEndpoints, "status", "apiEndpoints"); err != nil { + return errors.Wrapf(err, "failed to retrieve Status.APIEndpoints from infrastructure provider for Cluster %q in namespace %q", + cluster.Name, cluster.Namespace) + } else if len(cluster.Status.APIEndpoints) == 0 { + return errors.Wrapf(err, "retrieved empty Status.APIEndpoints from infrastructure provider for Cluster %q in namespace %q", + cluster.Name, cluster.Namespace) + } + + cluster.Status.InfrastructureReady = true + return nil +} diff --git a/vendor/sigs.k8s.io/cluster-api/pkg/controller/cluster/cluster_reconciler_suite_test.go b/vendor/sigs.k8s.io/cluster-api/pkg/controller/cluster/cluster_reconciler_suite_test.go index f7c248ae91..fc380606b6 100644 --- a/vendor/sigs.k8s.io/cluster-api/pkg/controller/cluster/cluster_reconciler_suite_test.go +++ b/vendor/sigs.k8s.io/cluster-api/pkg/controller/cluster/cluster_reconciler_suite_test.go @@ -24,19 +24,18 @@ import ( "k8s.io/client-go/kubernetes/scheme" "k8s.io/client-go/rest" - "sigs.k8s.io/cluster-api/pkg/apis" + "sigs.k8s.io/cluster-api/api/v1alpha2" "sigs.k8s.io/controller-runtime/pkg/envtest" "sigs.k8s.io/controller-runtime/pkg/manager" - "sigs.k8s.io/controller-runtime/pkg/reconcile" ) var cfg *rest.Config func TestMain(m *testing.M) { t := &envtest.Environment{ - CRDDirectoryPaths: []string{filepath.Join("..", "..", "..", "config", "crds")}, + CRDDirectoryPaths: []string{filepath.Join("..", "..", "..", "config", "crd", "bases")}, } - apis.AddToScheme(scheme.Scheme) + v1alpha2.AddToScheme(scheme.Scheme) var err error if cfg, err = t.Start(); err != nil { @@ -48,18 +47,6 @@ func TestMain(m *testing.M) { os.Exit(code) } -// SetupTestReconcile returns a reconcile.Reconcile implementation that delegates to inner and -// writes the request to requests after Reconcile is finished. -func SetupTestReconcile(inner reconcile.Reconciler) (reconcile.Reconciler, chan reconcile.Request) { - requests := make(chan reconcile.Request) - fn := reconcile.Func(func(req reconcile.Request) (reconcile.Result, error) { - result, err := inner.Reconcile(req) - requests <- req - return result, err - }) - return fn, requests -} - // StartTestManager adds recFn func StartTestManager(mgr manager.Manager, t *testing.T) chan struct{} { t.Helper() diff --git a/vendor/sigs.k8s.io/cluster-api/pkg/controller/cluster/cluster_reconciler_test.go b/vendor/sigs.k8s.io/cluster-api/pkg/controller/cluster/cluster_reconciler_test.go index dd956e7eeb..d93423b320 100644 --- a/vendor/sigs.k8s.io/cluster-api/pkg/controller/cluster/cluster_reconciler_test.go +++ b/vendor/sigs.k8s.io/cluster-api/pkg/controller/cluster/cluster_reconciler_test.go @@ -20,62 +20,49 @@ import ( "testing" "time" + . "github.com/onsi/gomega" "golang.org/x/net/context" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/types" - clusterv1alpha1 "sigs.k8s.io/cluster-api/pkg/apis/cluster/v1alpha1" + clusterv1alpha2 "sigs.k8s.io/cluster-api/api/v1alpha2" "sigs.k8s.io/controller-runtime/pkg/client" "sigs.k8s.io/controller-runtime/pkg/manager" - "sigs.k8s.io/controller-runtime/pkg/reconcile" ) -var c client.Client - -var expectedRequest = reconcile.Request{NamespacedName: types.NamespacedName{Name: "foo", Namespace: "default"}} - const timeout = time.Second * 5 func TestReconcile(t *testing.T) { - instance := &clusterv1alpha1.Cluster{ + RegisterTestingT(t) + ctx := context.TODO() + + instance := &clusterv1alpha2.Cluster{ ObjectMeta: metav1.ObjectMeta{Name: "foo", Namespace: "default"}, - Spec: clusterv1alpha1.ClusterSpec{ - ClusterNetwork: clusterv1alpha1.ClusterNetworkingConfig{ - Services: clusterv1alpha1.NetworkRanges{CIDRBlocks: []string{"10.96.0.0/12"}}, - Pods: clusterv1alpha1.NetworkRanges{CIDRBlocks: []string{"192.168.0.0/16"}}, - }, - }, + Spec: clusterv1alpha2.ClusterSpec{}, } // Setup the Manager and Controller. Wrap the Controller Reconcile function so it writes each request to a // channel when it is finished. - mgr, err := manager.New(cfg, manager.Options{}) + mgr, err := manager.New(cfg, manager.Options{MetricsBindAddress: "0"}) if err != nil { t.Fatalf("error creating new manager: %v", err) } - c = mgr.GetClient() + c := mgr.GetClient() - a := newTestActuator() - r, err := newReconciler(mgr, a) - if err != nil { - t.Fatalf("Couldn't create controller: %v", err) - } - recFn, requests := SetupTestReconcile(r) - if err := add(mgr, recFn); err != nil { - t.Fatalf("error adding controller to manager: %v", err) - } + reconciler := newReconciler(mgr) + controller, err := addController(mgr, reconciler) + Expect(err).To(BeNil()) + reconciler.controller = controller defer close(StartTestManager(mgr, t)) // Create the Cluster object and expect the Reconcile and Deployment to be created - if err := c.Create(context.TODO(), instance); err != nil { - t.Fatalf("error creating instance: %v", err) - } - defer c.Delete(context.TODO(), instance) - select { - case recv := <-requests: - if recv != expectedRequest { - t.Error("received request does not match expected request") + Expect(c.Create(ctx, instance)).To(BeNil()) + defer c.Delete(ctx, instance) + + // Make sure the Cluster exists. + Eventually(func() bool { + key := client.ObjectKey{Namespace: instance.Namespace, Name: instance.Name} + if err := c.Get(ctx, key, instance); err != nil { + return false } - case <-time.After(timeout): - t.Error("timed out waiting for request") - } + return true + }, timeout).Should(BeTrue()) } diff --git a/vendor/sigs.k8s.io/cluster-api/pkg/controller/cluster/testactuator.go b/vendor/sigs.k8s.io/cluster-api/pkg/controller/cluster/testactuator.go deleted file mode 100644 index 5de8a13b98..0000000000 --- a/vendor/sigs.k8s.io/cluster-api/pkg/controller/cluster/testactuator.go +++ /dev/null @@ -1,68 +0,0 @@ -/* -Copyright 2018 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package cluster - -import ( - "sync" - - "sigs.k8s.io/cluster-api/pkg/apis/cluster/v1alpha1" -) - -type TestActuator struct { - unblock chan string - BlockOnReconcile bool - BlockOnDelete bool - ReconcileCallCount int64 - DeleteCallCount int64 - Lock sync.Mutex -} - -func (a *TestActuator) Reconcile(*v1alpha1.Cluster) error { - defer func() { - if a.BlockOnReconcile { - <-a.unblock - } - }() - - a.Lock.Lock() - defer a.Lock.Unlock() - a.ReconcileCallCount++ - return nil -} - -func (a *TestActuator) Delete(*v1alpha1.Cluster) error { - defer func() { - if a.BlockOnDelete { - <-a.unblock - } - }() - - a.Lock.Lock() - defer a.Lock.Unlock() - a.DeleteCallCount++ - return nil -} - -func newTestActuator() *TestActuator { - ta := new(TestActuator) - ta.unblock = make(chan string) - return ta -} - -func (a *TestActuator) Unblock() { - close(a.unblock) -} diff --git a/vendor/sigs.k8s.io/cluster-api/pkg/controller/controller.go b/vendor/sigs.k8s.io/cluster-api/pkg/controller/controller.go index c71a824e82..2777fcc007 100644 --- a/vendor/sigs.k8s.io/cluster-api/pkg/controller/controller.go +++ b/vendor/sigs.k8s.io/cluster-api/pkg/controller/controller.go @@ -20,11 +20,16 @@ import ( "sigs.k8s.io/controller-runtime/pkg/manager" ) +// +kubebuilder:rbac:groups=core,resources=events,verbs=get;list;watch;create;patch +// +kubebuilder:rbac:groups=core,resources=secrets,verbs=get;list;watch +// +kubebuilder:rbac:groups=core,resources=nodes,verbs=get;list;watch;create;update;patch;delete +// +kubebuilder:rbac:groups=cluster.x-k8s.io,resources=clusters;clusters/status;machines;machines/status;machinedeployments;machinedeployments/status;machinesets;machinesets/status,verbs=get;list;watch;create;update;patch;delete +// +kubebuilder:rbac:groups=infrastructure.cluster.x-k8s.io;bootstrap.cluster.x-k8s.io,resources=*,verbs=get;list;watch;create;update;patch;delete + // AddToManagerFuncs is a list of functions to add all Controllers to the Manager var AddToManagerFuncs []func(manager.Manager) error // AddToManager adds all Controllers to the Manager -// +kubebuilder:rbac:groups=core,resources=events,verbs=get;list;watch;create;patch func AddToManager(m manager.Manager) error { for _, f := range AddToManagerFuncs { if err := f(m); err != nil { diff --git a/vendor/sigs.k8s.io/cluster-api/pkg/controller/error/BUILD.bazel b/vendor/sigs.k8s.io/cluster-api/pkg/controller/error/BUILD.bazel deleted file mode 100644 index db3ab41550..0000000000 --- a/vendor/sigs.k8s.io/cluster-api/pkg/controller/error/BUILD.bazel +++ /dev/null @@ -1,8 +0,0 @@ -load("@io_bazel_rules_go//go:def.bzl", "go_library") - -go_library( - name = "go_default_library", - srcs = ["requeue_error.go"], - importpath = "sigs.k8s.io/cluster-api/pkg/controller/error", - visibility = ["//visibility:public"], -) diff --git a/vendor/sigs.k8s.io/cluster-api/pkg/controller/external/BUILD.bazel b/vendor/sigs.k8s.io/cluster-api/pkg/controller/external/BUILD.bazel new file mode 100644 index 0000000000..6b018643a6 --- /dev/null +++ b/vendor/sigs.k8s.io/cluster-api/pkg/controller/external/BUILD.bazel @@ -0,0 +1,19 @@ +load("@io_bazel_rules_go//go:def.bzl", "go_library") + +go_library( + name = "go_default_library", + srcs = [ + "testing.go", + "util.go", + ], + importpath = "sigs.k8s.io/cluster-api/pkg/controller/external", + visibility = ["//visibility:public"], + deps = [ + "//vendor/github.com/pkg/errors:go_default_library", + "//vendor/k8s.io/api/core/v1:go_default_library", + "//vendor/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1:go_default_library", + "//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", + "//vendor/k8s.io/apimachinery/pkg/apis/meta/v1/unstructured:go_default_library", + "//vendor/sigs.k8s.io/controller-runtime/pkg/client:go_default_library", + ], +) diff --git a/vendor/sigs.k8s.io/cluster-api/pkg/controller/external/testing.go b/vendor/sigs.k8s.io/cluster-api/pkg/controller/external/testing.go new file mode 100644 index 0000000000..113d3b603e --- /dev/null +++ b/vendor/sigs.k8s.io/cluster-api/pkg/controller/external/testing.go @@ -0,0 +1,74 @@ +/* +Copyright 2019 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package external + +import ( + apiextensionsv1beta1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +var ( + TestGenericInfrastructureCRD = &apiextensionsv1beta1.CustomResourceDefinition{ + ObjectMeta: metav1.ObjectMeta{ + Name: "genericmachines.infrastructure.cluster.x-k8s.io", + }, + Spec: apiextensionsv1beta1.CustomResourceDefinitionSpec{ + Group: "infrastructure.cluster.x-k8s.io", + Scope: apiextensionsv1beta1.NamespaceScoped, + Names: apiextensionsv1beta1.CustomResourceDefinitionNames{ + Kind: "InfrastructureMachine", + Plural: "genericmachines", + }, + Subresources: &apiextensionsv1beta1.CustomResourceSubresources{ + Status: &apiextensionsv1beta1.CustomResourceSubresourceStatus{}, + }, + Validation: &apiextensionsv1beta1.CustomResourceValidation{}, + Versions: []apiextensionsv1beta1.CustomResourceDefinitionVersion{ + { + Name: "v1alpha2", + Served: true, + Storage: true, + }, + }, + }, + } + + TestGenericInfrastructureTemplateCRD = &apiextensionsv1beta1.CustomResourceDefinition{ + ObjectMeta: metav1.ObjectMeta{ + Name: "genericmachinetemplates.infrastructure.cluster.x-k8s.io", + }, + Spec: apiextensionsv1beta1.CustomResourceDefinitionSpec{ + Group: "infrastructure.cluster.x-k8s.io", + Scope: apiextensionsv1beta1.NamespaceScoped, + Names: apiextensionsv1beta1.CustomResourceDefinitionNames{ + Kind: "InfrastructureMachineTemplate", + Plural: "genericmachinetemplates", + }, + Subresources: &apiextensionsv1beta1.CustomResourceSubresources{ + Status: &apiextensionsv1beta1.CustomResourceSubresourceStatus{}, + }, + Validation: &apiextensionsv1beta1.CustomResourceValidation{}, + Versions: []apiextensionsv1beta1.CustomResourceDefinitionVersion{ + { + Name: "v1alpha2", + Served: true, + Storage: true, + }, + }, + }, + } +) diff --git a/vendor/sigs.k8s.io/cluster-api/pkg/controller/external/util.go b/vendor/sigs.k8s.io/cluster-api/pkg/controller/external/util.go new file mode 100644 index 0000000000..312a23b01c --- /dev/null +++ b/vendor/sigs.k8s.io/cluster-api/pkg/controller/external/util.go @@ -0,0 +1,92 @@ +/* +Copyright 2019 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package external + +import ( + "context" + "fmt" + + "github.com/pkg/errors" + corev1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" + "sigs.k8s.io/controller-runtime/pkg/client" +) + +// Get uses the client and reference to get an external, unstructured object. +func Get(c client.Client, ref *corev1.ObjectReference, namespace string) (*unstructured.Unstructured, error) { + obj := new(unstructured.Unstructured) + obj.SetAPIVersion(ref.APIVersion) + obj.SetKind(ref.Kind) + obj.SetName(ref.Name) + key := client.ObjectKey{Name: obj.GetName(), Namespace: namespace} + if err := c.Get(context.Background(), key, obj); err != nil { + return nil, err + } + return obj, nil +} + +// CloneTemplate uses the client and the reference to create a new object from the template. +func CloneTemplate(c client.Client, ref *corev1.ObjectReference, namespace string) (*unstructured.Unstructured, error) { + from, err := Get(c, ref, namespace) + if err != nil { + return nil, err + } + template, found, err := unstructured.NestedMap(from.Object, "spec", "template") + if !found { + return nil, errors.Errorf("missing Spec.Template on %v %q", from.GroupVersionKind(), from.GetName()) + } else if err != nil { + return nil, errors.Wrapf(err, "failed to retrieve Spec.Template map on %v %q", from.GroupVersionKind(), from.GetName()) + } + to := &unstructured.Unstructured{Object: template} + to.SetResourceVersion("") + to.SetOwnerReferences(nil) + to.SetFinalizers(nil) + to.SetUID("") + to.SetSelfLink("") + to.SetName("") + to.SetGenerateName(fmt.Sprintf("%s-", from.GetName())) + to.SetNamespace(namespace) + if err := c.Create(context.Background(), to); err != nil { + return nil, err + } + return to, nil +} + +// ErrorsFrom returns the ErrorReason and ErrorMessage fields from the external object status. +func ErrorsFrom(obj *unstructured.Unstructured) (string, string, error) { + errorReason, _, err := unstructured.NestedString(obj.Object, "status", "errorReason") + if err != nil { + return "", "", errors.Wrapf(err, "failed to determine errorReason on %v %q", + obj.GroupVersionKind(), obj.GetName()) + } + errorMessage, _, err := unstructured.NestedString(obj.Object, "status", "errorMessage") + if err != nil { + return "", "", errors.Wrapf(err, "failed to determine errorMessage on %v %q", + obj.GroupVersionKind(), obj.GetName()) + } + return errorReason, errorMessage, nil +} + +// IsReady returns true if the Status.Ready field on an external object is true. +func IsReady(obj *unstructured.Unstructured) (bool, error) { + ready, found, err := unstructured.NestedBool(obj.Object, "status", "ready") + if err != nil { + return false, errors.Wrapf(err, "failed to determine %v %q readiness", + obj.GroupVersionKind(), obj.GetName()) + } + return ready && found, nil +} diff --git a/vendor/sigs.k8s.io/cluster-api/pkg/controller/machine/BUILD.bazel b/vendor/sigs.k8s.io/cluster-api/pkg/controller/machine/BUILD.bazel index 38e7392834..d819fdce7b 100644 --- a/vendor/sigs.k8s.io/cluster-api/pkg/controller/machine/BUILD.bazel +++ b/vendor/sigs.k8s.io/cluster-api/pkg/controller/machine/BUILD.bazel @@ -3,23 +3,29 @@ load("@io_bazel_rules_go//go:def.bzl", "go_library", "go_test") go_library( name = "go_default_library", srcs = [ - "actuator.go", "machine_controller.go", - "testactuator.go", + "machine_controller_noderef.go", + "machine_controller_phases.go", ], importpath = "sigs.k8s.io/cluster-api/pkg/controller/machine", visibility = ["//visibility:public"], deps = [ - "//pkg/apis/cluster/v1alpha1:go_default_library", - "//pkg/controller/error:go_default_library", + "//api/v1alpha2:go_default_library", + "//pkg/controller/external:go_default_library", + "//pkg/controller/noderefutil:go_default_library", "//pkg/controller/remote:go_default_library", + "//pkg/errors:go_default_library", "//pkg/util:go_default_library", "//vendor/github.com/pkg/errors:go_default_library", "//vendor/k8s.io/api/core/v1:go_default_library", "//vendor/k8s.io/apimachinery/pkg/api/errors:go_default_library", "//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", + "//vendor/k8s.io/apimachinery/pkg/apis/meta/v1/unstructured:go_default_library", "//vendor/k8s.io/apimachinery/pkg/runtime:go_default_library", + "//vendor/k8s.io/client-go/kubernetes/typed/core/v1:go_default_library", + "//vendor/k8s.io/client-go/tools/record:go_default_library", "//vendor/k8s.io/klog:go_default_library", + "//vendor/k8s.io/utils/pointer:go_default_library", "//vendor/sigs.k8s.io/controller-runtime/pkg/client:go_default_library", "//vendor/sigs.k8s.io/controller-runtime/pkg/controller:go_default_library", "//vendor/sigs.k8s.io/controller-runtime/pkg/handler:go_default_library", @@ -32,19 +38,29 @@ go_library( go_test( name = "go_default_test", srcs = [ + "machine_controller_noderef_test.go", + "machine_controller_phases_test.go", "machine_controller_test.go", "machine_reconciler_suite_test.go", "machine_reconciler_test.go", ], embed = [":go_default_library"], deps = [ - "//pkg/apis:go_default_library", - "//pkg/apis/cluster/v1alpha1:go_default_library", + "//api/v1alpha2:go_default_library", + "//pkg/controller/noderefutil:go_default_library", + "//pkg/errors:go_default_library", + "//vendor/github.com/onsi/gomega:go_default_library", "//vendor/golang.org/x/net/context:go_default_library", + "//vendor/k8s.io/api/core/v1:go_default_library", "//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", + "//vendor/k8s.io/apimachinery/pkg/apis/meta/v1/unstructured:go_default_library", + "//vendor/k8s.io/apimachinery/pkg/runtime:go_default_library", "//vendor/k8s.io/apimachinery/pkg/types:go_default_library", + "//vendor/k8s.io/client-go/kubernetes/fake:go_default_library", "//vendor/k8s.io/client-go/kubernetes/scheme:go_default_library", "//vendor/k8s.io/client-go/rest:go_default_library", + "//vendor/k8s.io/client-go/tools/record:go_default_library", + "//vendor/k8s.io/utils/pointer:go_default_library", "//vendor/sigs.k8s.io/controller-runtime/pkg/client:go_default_library", "//vendor/sigs.k8s.io/controller-runtime/pkg/client/fake:go_default_library", "//vendor/sigs.k8s.io/controller-runtime/pkg/envtest:go_default_library", diff --git a/vendor/sigs.k8s.io/cluster-api/pkg/controller/machine/actuator.go b/vendor/sigs.k8s.io/cluster-api/pkg/controller/machine/actuator.go deleted file mode 100644 index fb07683b39..0000000000 --- a/vendor/sigs.k8s.io/cluster-api/pkg/controller/machine/actuator.go +++ /dev/null @@ -1,39 +0,0 @@ -/* -Copyright 2018 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package machine - -import ( - "context" - - clusterv1 "sigs.k8s.io/cluster-api/pkg/apis/cluster/v1alpha1" -) - -/// [Actuator] -// Actuator controls machines on a specific infrastructure. All -// methods should be idempotent unless otherwise specified. -type Actuator interface { - // Create the machine. - Create(context.Context, *clusterv1.Cluster, *clusterv1.Machine) error - // Delete the machine. If no error is returned, it is assumed that all dependent resources have been cleaned up. - Delete(context.Context, *clusterv1.Cluster, *clusterv1.Machine) error - // Update the machine to the provided definition. - Update(context.Context, *clusterv1.Cluster, *clusterv1.Machine) error - // Checks if the machine currently exists. - Exists(context.Context, *clusterv1.Cluster, *clusterv1.Machine) (bool, error) -} - -/// [Actuator] diff --git a/vendor/sigs.k8s.io/cluster-api/pkg/controller/machine/machine_controller.go b/vendor/sigs.k8s.io/cluster-api/pkg/controller/machine/machine_controller.go index 2a7b7bc573..53dd96b5e8 100644 --- a/vendor/sigs.k8s.io/cluster-api/pkg/controller/machine/machine_controller.go +++ b/vendor/sigs.k8s.io/cluster-api/pkg/controller/machine/machine_controller.go @@ -18,18 +18,22 @@ package machine import ( "context" - "os" + "path" + "sync" + "time" "github.com/pkg/errors" corev1 "k8s.io/api/core/v1" apierrors "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/runtime" + "k8s.io/client-go/tools/record" "k8s.io/klog" - "sigs.k8s.io/cluster-api/pkg/apis/cluster/v1alpha1" - clusterv1 "sigs.k8s.io/cluster-api/pkg/apis/cluster/v1alpha1" - controllerError "sigs.k8s.io/cluster-api/pkg/controller/error" + "sigs.k8s.io/cluster-api/api/v1alpha2" + clusterv1 "sigs.k8s.io/cluster-api/api/v1alpha2" + "sigs.k8s.io/cluster-api/pkg/controller/external" "sigs.k8s.io/cluster-api/pkg/controller/remote" + capierrors "sigs.k8s.io/cluster-api/pkg/errors" "sigs.k8s.io/cluster-api/pkg/util" "sigs.k8s.io/controller-runtime/pkg/client" "sigs.k8s.io/controller-runtime/pkg/controller" @@ -39,63 +43,58 @@ import ( "sigs.k8s.io/controller-runtime/pkg/source" ) -const ( - NodeNameEnvVar = "NODE_NAME" -) - -var DefaultActuator Actuator +const controllerName = "machine-controller" -func AddWithActuator(mgr manager.Manager, actuator Actuator) error { - return add(mgr, newReconciler(mgr, actuator)) +// Add creates a new Machine Controller and adds it to the Manager with default RBAC. The Manager will set fields on the Controller +// and Start it when the Manager is Started. +func Add(mgr manager.Manager) error { + r := newReconciler(mgr) + c, err := addController(mgr, r) + r.controller = c + return err } // newReconciler returns a new reconcile.Reconciler -func newReconciler(mgr manager.Manager, actuator Actuator) reconcile.Reconciler { - r := &ReconcileMachine{ +func newReconciler(mgr manager.Manager) *ReconcileMachine { + return &ReconcileMachine{ Client: mgr.GetClient(), scheme: mgr.GetScheme(), - nodeName: os.Getenv(NodeNameEnvVar), - actuator: actuator, - } - - if r.nodeName == "" { - klog.Warningf("Environment variable %q is not set, this controller will not protect against deleting its own machine", NodeNameEnvVar) + recorder: mgr.GetEventRecorderFor(controllerName), } - - return r } -// add adds a new Controller to mgr with r as the reconcile.Reconciler -func add(mgr manager.Manager, r reconcile.Reconciler) error { +// addController adds a new Controller to mgr with r as the reconcile.Reconciler +func addController(mgr manager.Manager, r reconcile.Reconciler) (controller.Controller, error) { // Create a new controller - c, err := controller.New("machine-controller", mgr, controller.Options{Reconciler: r}) + c, err := controller.New(controllerName, mgr, controller.Options{Reconciler: r}) if err != nil { - return err + return nil, err } // Watch for changes to Machine - return c.Watch( + err = c.Watch( &source.Kind{Type: &clusterv1.Machine{}}, &handler.EnqueueRequestForObject{}, ) + if err != nil { + return nil, err + } + + return c, nil } // ReconcileMachine reconciles a Machine object type ReconcileMachine struct { client.Client - scheme *runtime.Scheme - - actuator Actuator - - // nodeName is the name of the node on which the machine controller is running, if not present, it is loaded from NODE_NAME. - nodeName string + scheme *runtime.Scheme + controller controller.Controller + recorder record.EventRecorder + externalWatchers sync.Map } // Reconcile reads that state of the cluster for a Machine object and makes changes based on the state read // and what is in the Machine.Spec -// +kubebuilder:rbac:groups=cluster.k8s.io,resources=machines;machines/status,verbs=get;list;watch;create;update;patch;delete -func (r *ReconcileMachine) Reconcile(request reconcile.Request) (reconcile.Result, error) { - // TODO(mvladev): Can context be passed from Kubebuilder? +func (r *ReconcileMachine) Reconcile(request reconcile.Request) (_ reconcile.Result, reterr error) { ctx := context.TODO() // Fetch the Machine instance @@ -111,62 +110,72 @@ func (r *ReconcileMachine) Reconcile(request reconcile.Request) (reconcile.Resul return reconcile.Result{}, err } - // Implement controller logic here - name := m.Name - klog.Infof("Reconciling Machine %q", name) + // Store Machine early state to allow patching. + patchMachine := client.MergeFrom(m.DeepCopy()) + + // Always issue a Patch for the Machine object and its status after each reconciliation. + // TODO(vincepri): Figure out if we should bubble up the errors from Patch to the controller. + defer func() { + gvk := m.GroupVersionKind() + if err := r.Client.Patch(ctx, m, patchMachine); err != nil { + klog.Errorf("Error Patching Machine %q in namespace %q: %v", m.Name, m.Namespace, err) + if reterr == nil { + reterr = err + } + return + } + // TODO(vincepri): This is a hack because after a Patch, the object loses TypeMeta information. + // Remove when https://github.com/kubernetes-sigs/controller-runtime/issues/526 is fixed. + m.SetGroupVersionKind(gvk) + if err := r.Client.Status().Patch(ctx, m, patchMachine); err != nil { + klog.Errorf("Error Patching Machine status %q in namespace %q: %v", m.Name, m.Namespace, err) + if reterr == nil { + reterr = err + } + } + }() // Cluster might be nil as some providers might not require a cluster object // for machine management. - cluster, err := r.getCluster(ctx, m) - if err != nil { - return reconcile.Result{}, err + cluster, err := util.GetClusterFromMetadata(ctx, r.Client, m.ObjectMeta) + if errors.Cause(err) == util.ErrNoCluster { + klog.Infof("Machine %q in namespace %q doesn't specify %q label, assuming nil cluster", + m.Name, m.Namespace, clusterv1.MachineClusterLabelName) + } else if err != nil { + return reconcile.Result{}, errors.Wrapf(err, "failed to get cluster %q for machine %q in namespace %q", + m.Labels[clusterv1.MachineClusterLabelName], m.Name, m.Namespace) } - // Set the ownerRef with foreground deletion if there is a linked cluster. if cluster != nil && shouldAdopt(m) { - klog.Infof("Cluster %s/%s is adopting Machine %s", cluster.Namespace, cluster.Name, m.Name) - blockOwnerDeletion := true - m.OwnerReferences = append(m.OwnerReferences, metav1.OwnerReference{ - APIVersion: cluster.APIVersion, - Kind: cluster.Kind, - Name: cluster.Name, - UID: cluster.UID, - BlockOwnerDeletion: &blockOwnerDeletion, + m.OwnerReferences = util.EnsureOwnerRef(m.OwnerReferences, metav1.OwnerReference{ + APIVersion: cluster.APIVersion, + Kind: cluster.Kind, + Name: cluster.Name, + UID: cluster.UID, }) } - if reconcileFinalizers(m, cluster) { - if err := r.Client.Update(ctx, m); err != nil { - klog.Infof("Failed to add finalizers to machine %q: %v", name, err) - return reconcile.Result{}, err + // If the Machine hasn't been deleted and doesn't have a finalizer, add one. + if m.ObjectMeta.DeletionTimestamp.IsZero() { + if !util.Contains(m.Finalizers, clusterv1.MachineFinalizer) { + m.Finalizers = append(m.ObjectMeta.Finalizers, clusterv1.MachineFinalizer) + if err := r.Client.Patch(ctx, m, patchMachine); err != nil { + return reconcile.Result{}, errors.Wrapf(err, "failed to add finalizer to Machine %q in namespace %q", m.Name, m.Namespace) + } + // Since adding the finalizer updates the object return to avoid later update issues + return reconcile.Result{Requeue: true}, nil } - // Since adding the finalizer updates the object return to avoid later update issues - return reconcile.Result{Requeue: true}, nil } - if !m.ObjectMeta.DeletionTimestamp.IsZero() { - // no-op if finalizer has been removed. - if !util.Contains(m.ObjectMeta.Finalizers, clusterv1.MachineFinalizer) { - klog.Infof("Reconciling machine %q causes a no-op as there is no finalizer", name) - return reconcile.Result{}, nil - } - - if !r.isDeleteAllowed(m) { - klog.Infof("Deleting machine hosting this controller is not allowed. Skipping reconciliation of machine %q", name) - return reconcile.Result{}, nil - } - - klog.Infof("Reconciling machine %q triggers delete", name) - if err := r.actuator.Delete(ctx, cluster, m); err != nil { - if requeueErr, ok := errors.Cause(err).(controllerError.HasRequeueAfterError); ok { - klog.Infof("Actuator returned requeue-after error: %v", requeueErr) - return reconcile.Result{Requeue: true, RequeueAfter: requeueErr.GetRequeueAfter()}, nil - } - - klog.Errorf("Failed to delete machine %q: %v", name, err) - return reconcile.Result{}, err + if err := r.reconcile(ctx, cluster, m); err != nil { + if requeueErr, ok := errors.Cause(err).(capierrors.HasRequeueAfterError); ok { + klog.Infof("Reconciliation for Machine %q in namespace %q asked to requeue: %v", m.Name, m.Namespace, err) + return reconcile.Result{Requeue: true, RequeueAfter: requeueErr.GetRequeueAfter()}, nil } + return reconcile.Result{}, err + } + if !m.ObjectMeta.DeletionTimestamp.IsZero() { if err := r.isDeleteNodeAllowed(context.Background(), m); err != nil { switch err { case errNilNodeRef: @@ -174,130 +183,31 @@ func (r *ReconcileMachine) Reconcile(request reconcile.Request) (reconcile.Resul case errNoControlPlaneNodes, errLastControlPlaneNode: klog.V(2).Infof("Deleting node %q is not allowed for machine %q: %v", m.Status.NodeRef.Name, m.Name, err) default: - klog.Errorf("IsDeleteNodeAllowed check failed for machine %q: %v", name, err) + klog.Errorf("IsDeleteNodeAllowed check failed for machine %q: %v", m.Name, err) return reconcile.Result{}, err } } else { klog.Infof("Deleting node %q for machine %q", m.Status.NodeRef.Name, m.Name) if err := r.deleteNode(ctx, cluster, m.Status.NodeRef.Name); err != nil && !apierrors.IsNotFound(err) { - klog.Errorf("Error deleting node %q for machine %q: %v", m.Status.NodeRef.Name, name, err) + klog.Errorf("Error deleting node %q for machine %q: %v", m.Status.NodeRef.Name, m.Name, err) return reconcile.Result{}, err } } - // Remove finalizer on successful deletion. - m.ObjectMeta.Finalizers = util.Filter(m.ObjectMeta.Finalizers, clusterv1.MachineFinalizer) - if err := r.Client.Update(context.Background(), m); err != nil { - klog.Errorf("Failed to remove finalizer from machine %q: %v", name, err) - return reconcile.Result{}, err - } - - klog.Infof("Machine %q deletion successful", name) - return reconcile.Result{}, nil - } - - exist, err := r.actuator.Exists(ctx, cluster, m) - if err != nil { - klog.Errorf("Failed to check if machine %q exists: %v", name, err) - return reconcile.Result{}, err - } - - if exist { - klog.Infof("Reconciling machine %q triggers idempotent update", name) - if err := r.actuator.Update(ctx, cluster, m); err != nil { - if requeueErr, ok := errors.Cause(err).(controllerError.HasRequeueAfterError); ok { - klog.Infof("Actuator returned requeue-after error: %v", requeueErr) + if err := r.isDeleteReady(ctx, m); err != nil { + if requeueErr, ok := errors.Cause(err).(capierrors.HasRequeueAfterError); ok { + klog.Infof("Reconciliation for Machine %q in namespace %q asked to requeue: %v", m.Name, m.Namespace, err) return reconcile.Result{Requeue: true, RequeueAfter: requeueErr.GetRequeueAfter()}, nil } - - klog.Errorf(`Error updating machine "%s/%s": %v`, m.Namespace, name, err) return reconcile.Result{}, err } - return reconcile.Result{}, nil - } - - // Machine resource created. Machine does not yet exist. - klog.Infof("Reconciling machine object %v triggers idempotent create.", m.ObjectMeta.Name) - if err := r.actuator.Create(ctx, cluster, m); err != nil { - if requeueErr, ok := errors.Cause(err).(controllerError.HasRequeueAfterError); ok { - klog.Infof("Actuator returned requeue-after error: %v", requeueErr) - return reconcile.Result{Requeue: true, RequeueAfter: requeueErr.GetRequeueAfter()}, nil - } - - klog.Warningf("Failed to create machine %q: %v", name, err) - return reconcile.Result{}, err + m.ObjectMeta.Finalizers = util.Filter(m.ObjectMeta.Finalizers, clusterv1.MachineFinalizer) } return reconcile.Result{}, nil } -// reconcileFinalizers appends any missing finalizers to the machine -// and returns true if the api server needs to be updated. -func reconcileFinalizers(m *clusterv1.Machine, cluster *clusterv1.Cluster) bool { - // If object hasn't been deleted and doesn't have a finalizer, add one - // Add a finalizer to newly created objects. - if m.ObjectMeta.DeletionTimestamp.IsZero() { - finalizerCount := len(m.Finalizers) - - if cluster != nil && !util.Contains(m.Finalizers, metav1.FinalizerDeleteDependents) { - m.Finalizers = append(m.ObjectMeta.Finalizers, metav1.FinalizerDeleteDependents) - } - - if !util.Contains(m.Finalizers, clusterv1.MachineFinalizer) { - m.Finalizers = append(m.ObjectMeta.Finalizers, clusterv1.MachineFinalizer) - } - - if len(m.Finalizers) > finalizerCount { - return true - } - } - return false -} - -func (r *ReconcileMachine) getCluster(ctx context.Context, machine *clusterv1.Machine) (*clusterv1.Cluster, error) { - if machine.Labels[clusterv1.MachineClusterLabelName] == "" { - klog.Infof("Machine %q in namespace %q doesn't specify %q label, assuming nil cluster", machine.Name, machine.Namespace, clusterv1.MachineClusterLabelName) - return nil, nil - } - - cluster := &clusterv1.Cluster{} - key := client.ObjectKey{ - Namespace: machine.Namespace, - Name: machine.Labels[clusterv1.MachineClusterLabelName], - } - - if err := r.Client.Get(ctx, key, cluster); err != nil { - return nil, err - } - - return cluster, nil -} - -// isDeletedAllowed returns false if the Machine we're trying to delete is the -// Machine hosting this controller. This method is meant to be functional -// only when the controllers are running in the workload cluster. -func (r *ReconcileMachine) isDeleteAllowed(machine *clusterv1.Machine) bool { - if r.nodeName == "" || machine.Status.NodeRef == nil { - return true - } - - if machine.Status.NodeRef.Name != r.nodeName { - return true - } - - node := &corev1.Node{} - if err := r.Client.Get(context.Background(), client.ObjectKey{Name: r.nodeName}, node); err != nil { - klog.Infof("Failed to determine if controller's node %q is associated with machine %q: %v", r.nodeName, machine.Name, err) - return true - } - - // When the UID of the machine's node reference and this controller's actual node match then then the request is to - // delete the machine this machine-controller is running on. Return false to not allow machine controller to delete its - // own machine. - return node.UID != machine.Status.NodeRef.UID -} - var ( errNilNodeRef = errors.New("noderef is nil") errLastControlPlaneNode = errors.New("last control plane member") @@ -373,9 +283,8 @@ func (r *ReconcileMachine) getMachinesInCluster(ctx context.Context, namespace, machineList := &clusterv1.MachineList{} labels := map[string]string{clusterv1.MachineClusterLabelName: name} - listOptions := client.InNamespace(namespace).MatchingLabels(labels) - if err := r.Client.List(ctx, listOptions, machineList); err != nil { + if err := r.Client.List(ctx, machineList, client.InNamespace(namespace), client.MatchingLabels(labels)); err != nil { return nil, errors.Wrap(err, "failed to list machines") } @@ -387,6 +296,32 @@ func (r *ReconcileMachine) getMachinesInCluster(ctx context.Context, namespace, return machines, nil } -func shouldAdopt(m *v1alpha1.Machine) bool { - return !util.HasOwner(m.OwnerReferences, v1alpha1.SchemeGroupVersion.String(), []string{"MachineSet", "MachineDeployment", "Cluster"}) +// isDeleteReady returns an error if any of Boostrap.ConfigRef or InfrastructureRef referenced objects still exists. +func (r *ReconcileMachine) isDeleteReady(ctx context.Context, m *v1alpha2.Machine) error { + if m.Spec.Bootstrap.ConfigRef != nil { + _, err := external.Get(r.Client, m.Spec.Bootstrap.ConfigRef, m.Namespace) + if apierrors.IsNotFound(err) { + return nil + } + if err != nil { + return errors.Wrapf(err, "failed to get %s %q for Machine %q in namespace %q", + path.Join(m.Spec.Bootstrap.ConfigRef.APIVersion, m.Spec.Bootstrap.ConfigRef.Kind), + m.Spec.Bootstrap.ConfigRef.Name, m.Name, m.Namespace) + } + return &capierrors.RequeueAfterError{RequeueAfter: 10 * time.Second} + } + + if _, err := external.Get(r.Client, &m.Spec.InfrastructureRef, m.Namespace); err != nil && !apierrors.IsNotFound(err) { + return errors.Wrapf(err, "failed to get %s %q for Machine %q in namespace %q", + path.Join(m.Spec.InfrastructureRef.APIVersion, m.Spec.InfrastructureRef.Kind), + m.Spec.InfrastructureRef.Name, m.Name, m.Namespace) + } else if err == nil { + return &capierrors.RequeueAfterError{RequeueAfter: 10 * time.Second} + } + + return nil +} + +func shouldAdopt(m *v1alpha2.Machine) bool { + return !util.HasOwner(m.OwnerReferences, v1alpha2.GroupVersion.String(), []string{"MachineSet", "Cluster"}) } diff --git a/vendor/sigs.k8s.io/cluster-api/pkg/controller/machine/machine_controller_noderef.go b/vendor/sigs.k8s.io/cluster-api/pkg/controller/machine/machine_controller_noderef.go new file mode 100644 index 0000000000..267c9948ed --- /dev/null +++ b/vendor/sigs.k8s.io/cluster-api/pkg/controller/machine/machine_controller_noderef.go @@ -0,0 +1,129 @@ +/* +Copyright 2019 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package machine + +import ( + "context" + "errors" + "time" + + apicorev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + corev1 "k8s.io/client-go/kubernetes/typed/core/v1" + "k8s.io/klog" + "sigs.k8s.io/cluster-api/api/v1alpha2" + "sigs.k8s.io/cluster-api/pkg/controller/noderefutil" + "sigs.k8s.io/cluster-api/pkg/controller/remote" + capierrors "sigs.k8s.io/cluster-api/pkg/errors" +) + +var ( + ErrNodeNotFound = errors.New("cannot find node with matching ProviderID") +) + +func (r *ReconcileMachine) reconcileNodeRef(ctx context.Context, cluster *v1alpha2.Cluster, machine *v1alpha2.Machine) error { + // Check that the Machine hasn't been deleted or in the process. + if !machine.DeletionTimestamp.IsZero() { + return nil + } + + // Check that the Machine doesn't already have a NodeRef. + if machine.Status.NodeRef != nil { + return nil + } + + // Check that Cluster isn't nil. + if cluster == nil { + klog.Warningf("Machine %q in namespace %q doesn't have a linked cluster, won't assign NodeRef", machine.Name, machine.Namespace) + return nil + } + + // Check that the Machine has a valid ProviderID. + if machine.Spec.ProviderID == nil || *machine.Spec.ProviderID == "" { + klog.Warningf("Machine %q in namespace %q doesn't have a valid ProviderID, retrying later", machine.Name, machine.Namespace) + return &capierrors.RequeueAfterError{RequeueAfter: 30 * time.Second} + } + + providerID, err := noderefutil.NewProviderID(*machine.Spec.ProviderID) + if err != nil { + return err + } + + clusterClient, err := remote.NewClusterClient(r.Client, cluster) + if err != nil { + return err + } + + corev1Client, err := clusterClient.CoreV1() + if err != nil { + return err + } + + // Get the Node reference. + nodeRef, err := r.getNodeReference(corev1Client, providerID) + if err != nil { + if err == ErrNodeNotFound { + klog.V(2).Infof("Cannot assign NodeRef to Machine %q in namespace %q: cannot find a matching Node, retrying later", + machine.Name, machine.Namespace) + return &capierrors.RequeueAfterError{RequeueAfter: 10 * time.Second} + } + klog.Errorf("Failed to assign NodeRef to Machine %q in namespace %q: %v", machine.Name, machine.Namespace, err) + r.recorder.Event(machine, apicorev1.EventTypeWarning, "FailedSetNodeRef", err.Error()) + return err + } + + // Set the Machine NodeRef. + machine.Status.NodeRef = nodeRef + klog.Infof("Set Machine's (%q in namespace %q) NodeRef to %q", machine.Name, machine.Namespace, machine.Status.NodeRef.Name) + r.recorder.Event(machine, apicorev1.EventTypeNormal, "SuccessfulSetNodeRef", machine.Status.NodeRef.Name) + return nil +} + +func (r *ReconcileMachine) getNodeReference(client corev1.NodesGetter, providerID *noderefutil.ProviderID) (*apicorev1.ObjectReference, error) { + listOpt := metav1.ListOptions{} + + for { + nodeList, err := client.Nodes().List(listOpt) + if err != nil { + return nil, err + } + + for _, node := range nodeList.Items { + nodeProviderID, err := noderefutil.NewProviderID(node.Spec.ProviderID) + if err != nil { + klog.V(3).Infof("Failed to parse ProviderID for Node %q: %v", node.Name, err) + continue + } + + if providerID.Equals(nodeProviderID) { + return &apicorev1.ObjectReference{ + Kind: node.Kind, + APIVersion: node.APIVersion, + Name: node.Name, + UID: node.UID, + }, nil + } + } + + listOpt.Continue = nodeList.Continue + if listOpt.Continue == "" { + break + } + } + + return nil, ErrNodeNotFound +} diff --git a/vendor/sigs.k8s.io/cluster-api/pkg/controller/noderef/noderef_controller_test.go b/vendor/sigs.k8s.io/cluster-api/pkg/controller/machine/machine_controller_noderef_test.go similarity index 64% rename from vendor/sigs.k8s.io/cluster-api/pkg/controller/noderef/noderef_controller_test.go rename to vendor/sigs.k8s.io/cluster-api/pkg/controller/machine/machine_controller_noderef_test.go index 5bb3a49016..effc03bed3 100644 --- a/vendor/sigs.k8s.io/cluster-api/pkg/controller/noderef/noderef_controller_test.go +++ b/vendor/sigs.k8s.io/cluster-api/pkg/controller/machine/machine_controller_noderef_test.go @@ -14,78 +14,26 @@ See the License for the specific language governing permissions and limitations under the License. */ -package noderef +package machine import ( "strings" "testing" - "time" - "github.com/onsi/gomega" - "golang.org/x/net/context" corev1 "k8s.io/api/core/v1" - apierrors "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/runtime" - "k8s.io/apimachinery/pkg/types" fakeclient "k8s.io/client-go/kubernetes/fake" "k8s.io/client-go/kubernetes/scheme" "k8s.io/client-go/tools/record" - "sigs.k8s.io/cluster-api/pkg/apis/cluster/v1alpha1" + "sigs.k8s.io/cluster-api/api/v1alpha2" "sigs.k8s.io/cluster-api/pkg/controller/noderefutil" - "sigs.k8s.io/controller-runtime/pkg/client" "sigs.k8s.io/controller-runtime/pkg/client/fake" - "sigs.k8s.io/controller-runtime/pkg/manager" - "sigs.k8s.io/controller-runtime/pkg/reconcile" ) -var c client.Client - -var expectedRequest = reconcile.Request{NamespacedName: types.NamespacedName{Name: "foo", Namespace: "default"}} - -const timeout = time.Second * 5 - -func TestReconcile(t *testing.T) { - g := gomega.NewGomegaWithT(t) - instance := &v1alpha1.Machine{ - ObjectMeta: metav1.ObjectMeta{ - Name: "foo", - Namespace: "default", - }, - } - - // Setup the Manager and Controller. Wrap the Controller Reconcile function so it writes each request to a - // channel when it is finished. - mgr, err := manager.New(cfg, manager.Options{}) - g.Expect(err).NotTo(gomega.HaveOccurred()) - c = mgr.GetClient() - - recFn, requests := SetupTestReconcile(newReconciler(mgr)) - g.Expect(add(mgr, recFn)).NotTo(gomega.HaveOccurred()) - - stopMgr, mgrStopped := StartTestManager(mgr, g) - - defer func() { - close(stopMgr) - mgrStopped.Wait() - }() - - // Create the Machine object and expect the Reconcile - err = c.Create(context.TODO(), instance) - // The instance object may not be a valid object because it might be missing some required fields. - // Please modify the instance object by adding required fields and then remove the following if statement. - if apierrors.IsInvalid(err) { - t.Logf("failed to create object, got an invalid object error: %v", err) - return - } - g.Expect(err).NotTo(gomega.HaveOccurred()) - defer c.Delete(context.TODO(), instance) - g.Eventually(requests, timeout).Should(gomega.Receive(gomega.Equal(expectedRequest))) -} - func TestGetNodeReference(t *testing.T) { - v1alpha1.AddToScheme(scheme.Scheme) - r := &ReconcileNodeRef{ + v1alpha2.AddToScheme(scheme.Scheme) + r := &ReconcileMachine{ Client: fake.NewFakeClient(), scheme: scheme.Scheme, recorder: record.NewFakeRecorder(32), diff --git a/vendor/sigs.k8s.io/cluster-api/pkg/controller/machine/machine_controller_phases.go b/vendor/sigs.k8s.io/cluster-api/pkg/controller/machine/machine_controller_phases.go new file mode 100644 index 0000000000..df8ef8e646 --- /dev/null +++ b/vendor/sigs.k8s.io/cluster-api/pkg/controller/machine/machine_controller_phases.go @@ -0,0 +1,288 @@ +/* +Copyright 2019 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package machine + +import ( + "context" + "time" + + "github.com/pkg/errors" + corev1 "k8s.io/api/core/v1" + apierrors "k8s.io/apimachinery/pkg/api/errors" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" + "k8s.io/klog" + "k8s.io/utils/pointer" + "sigs.k8s.io/cluster-api/api/v1alpha2" + clusterv1 "sigs.k8s.io/cluster-api/api/v1alpha2" + "sigs.k8s.io/cluster-api/pkg/controller/external" + capierrors "sigs.k8s.io/cluster-api/pkg/errors" + "sigs.k8s.io/cluster-api/pkg/util" + "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/controller-runtime/pkg/handler" + "sigs.k8s.io/controller-runtime/pkg/source" +) + +func (r *ReconcileMachine) reconcile(ctx context.Context, cluster *v1alpha2.Cluster, m *v1alpha2.Machine) (err error) { + // TODO(vincepri): These can be generalized with an interface and possibly a for loop. + errors := []error{} + errors = append(errors, r.reconcileBootstrap(ctx, m)) + errors = append(errors, r.reconcileInfrastructure(ctx, m)) + errors = append(errors, r.reconcileNodeRef(ctx, cluster, m)) + errors = append(errors, r.reconcilePhase(ctx, m)) + errors = append(errors, r.reconcileClusterAnnotations(ctx, cluster, m)) + + // Determine the return error, giving precedence to the first non-nil and non-requeueAfter errors. + for _, e := range errors { + if e == nil { + continue + } + if err == nil || capierrors.IsRequeueAfter(err) { + err = e + } + } + return err +} + +func (r *ReconcileMachine) reconcilePhase(ctx context.Context, m *v1alpha2.Machine) error { + // Set the phase to "pending" if nil. + if m.Status.Phase == "" { + m.Status.SetTypedPhase(v1alpha2.MachinePhasePending) + } + + // Set the phase to "provisioning" if bootstrap is ready and the infrastructure isn't. + if m.Status.BootstrapReady && !m.Status.InfrastructureReady { + m.Status.SetTypedPhase(v1alpha2.MachinePhaseProvisioning) + } + + // Set the phase to "provisioned" if the infrastructure is ready. + if m.Status.InfrastructureReady { + m.Status.SetTypedPhase(v1alpha2.MachinePhaseProvisioned) + } + + // Set the phase to "running" if there is a NodeRef field. + if m.Status.NodeRef != nil && m.Status.InfrastructureReady { + m.Status.SetTypedPhase(v1alpha2.MachinePhaseRunning) + } + + // Set the phase to "failed" if any of Status.ErrorReason or Status.ErrorMessage is not-nil. + if m.Status.ErrorReason != nil || m.Status.ErrorMessage != nil { + m.Status.SetTypedPhase(v1alpha2.MachinePhaseFailed) + } + + // Set the phase to "deleting" if the deletion timestamp is set. + if !m.DeletionTimestamp.IsZero() { + m.Status.SetTypedPhase(v1alpha2.MachinePhaseDeleting) + } + + return nil +} + +// reconcileExternal handles generic unstructured objects referenced by a Machine. +func (r *ReconcileMachine) reconcileExternal(ctx context.Context, m *v1alpha2.Machine, ref *corev1.ObjectReference) (*unstructured.Unstructured, error) { + obj, err := external.Get(r.Client, ref, m.Namespace) + if err != nil { + if apierrors.IsNotFound(err) && !m.DeletionTimestamp.IsZero() { + return nil, nil + } else if apierrors.IsNotFound(err) { + return nil, errors.Wrapf(&capierrors.RequeueAfterError{RequeueAfter: 30 * time.Second}, + "could not find %v %q for Machine %q in namespace %q, requeuing", + ref.GroupVersionKind(), ref.Name, m.Name, m.Namespace) + } + return nil, err + } + + objPatch := client.MergeFrom(obj.DeepCopy()) + + // Delete the external object if the Machine is being deleted. + if !m.DeletionTimestamp.IsZero() { + if err := r.Delete(ctx, obj); err != nil { + return nil, errors.Wrapf(err, + "failed to delete %v %q for Machine %q in namespace %q", + obj.GroupVersionKind(), ref.Name, m.Name, m.Namespace) + } + return obj, nil + } + + // Set external object OwnerReference to the Machine. + machineOwnerRef := metav1.OwnerReference{ + APIVersion: m.APIVersion, + Kind: m.Kind, + Name: m.Name, + UID: m.UID, + } + + if !util.HasOwnerRef(obj.GetOwnerReferences(), machineOwnerRef) { + obj.SetOwnerReferences(util.EnsureOwnerRef(obj.GetOwnerReferences(), machineOwnerRef)) + if err := r.Patch(ctx, obj, objPatch); err != nil { + return nil, errors.Wrapf(err, + "failed to set OwnerReference on %v %q for Machine %q in namespace %q", + obj.GroupVersionKind(), ref.Name, m.Name, m.Namespace) + } + } + + // Add watcher for external object, if there isn't one already. + _, loaded := r.externalWatchers.LoadOrStore(obj.GroupVersionKind().String(), struct{}{}) + if !loaded && r.controller != nil { + klog.Infof("Adding watcher on external object %q", obj.GroupVersionKind()) + err := r.controller.Watch( + &source.Kind{Type: obj}, + &handler.EnqueueRequestForOwner{OwnerType: &clusterv1.Machine{}}, + ) + if err != nil { + r.externalWatchers.Delete(obj.GroupVersionKind().String()) + return nil, errors.Wrapf(err, "failed to add watcher on external object %q", obj.GroupVersionKind()) + } + } + + // Set error reason and message, if any. + errorReason, errorMessage, err := external.ErrorsFrom(obj) + if err != nil { + return nil, err + } + if errorReason != "" { + machineStatusError := capierrors.MachineStatusError(errorReason) + m.Status.ErrorReason = &machineStatusError + } + if errorMessage != "" { + m.Status.ErrorMessage = pointer.StringPtr(errorMessage) + } + + return obj, nil +} + +// reconcileBootstrap reconciles the Spec.Bootstrap.ConfigRef object on a Machine. +func (r *ReconcileMachine) reconcileBootstrap(ctx context.Context, m *v1alpha2.Machine) error { + // TODO(vincepri): Move this validation in kubebuilder / webhook. + if m.Spec.Bootstrap.ConfigRef == nil && m.Spec.Bootstrap.Data == nil { + return errors.Errorf( + "Expected at least one of `Bootstrap.ConfigRef` or `Bootstrap.Data` to be populated for Machine %q in namespace %q", + m.Name, m.Namespace, + ) + } + + if m.Spec.Bootstrap.Data != nil { + m.Status.BootstrapReady = true + return nil + } + + // Call generic external reconciler. + bootstrapConfig, err := r.reconcileExternal(ctx, m, m.Spec.Bootstrap.ConfigRef) + if bootstrapConfig == nil && err == nil { + m.Status.BootstrapReady = false + return nil + } else if err != nil { + return err + } + + // If the bootstrap config is being deleted, return early. + if !bootstrapConfig.GetDeletionTimestamp().IsZero() { + return nil + } + + // Determine if the bootstrap provider is ready. + ready, err := external.IsReady(bootstrapConfig) + if err != nil { + return err + } else if !ready { + klog.V(3).Infof("Bootstrap provider for Machine %q in namespace %q is not ready, requeuing", m.Name, m.Namespace) + return &capierrors.RequeueAfterError{RequeueAfter: 30 * time.Second} + } + + // Get and set data from the bootstrap provider. + data, _, err := unstructured.NestedString(bootstrapConfig.Object, "status", "bootstrapData") + if err != nil { + return errors.Wrapf(err, "failed to retrieve data from bootstrap provider for Machine %q in namespace %q", m.Name, m.Namespace) + } else if data == "" { + return errors.Errorf("retrieved empty data from bootstrap provider for Machine %q in namespace %q", m.Name, m.Namespace) + } + + m.Spec.Bootstrap.Data = pointer.StringPtr(data) + m.Status.BootstrapReady = true + return nil +} + +// reconcileInfrastructure reconciles the Spec.InfrastructureRef object on a Machine. +func (r *ReconcileMachine) reconcileInfrastructure(ctx context.Context, m *v1alpha2.Machine) error { + // Call generic external reconciler. + infraConfig, err := r.reconcileExternal(ctx, m, &m.Spec.InfrastructureRef) + if infraConfig == nil && err == nil { + return nil + } else if err != nil { + return err + } + + if m.Status.InfrastructureReady || !infraConfig.GetDeletionTimestamp().IsZero() { + return nil + } + + // Determine if the infrastructure provider is ready. + ready, err := external.IsReady(infraConfig) + if err != nil { + return err + } else if !ready { + klog.V(3).Infof("Infrastructure provider for Machine %q in namespace %q is not ready, requeuing", m.Name, m.Namespace) + return &capierrors.RequeueAfterError{RequeueAfter: 30 * time.Second} + } + + // Get Spec.ProviderID from the infrastructure provider. + var providerID string + if err := util.UnstructuredUnmarshalField(infraConfig, &providerID, "spec", "providerID"); err != nil { + return errors.Wrapf(err, "failed to retrieve data from infrastructure provider for Machine %q in namespace %q", m.Name, m.Namespace) + } else if providerID == "" { + return errors.Errorf("retrieved empty Spec.ProviderID from infrastructure provider for Machine %q in namespace %q", m.Name, m.Namespace) + } + + // Get and set Status.Addresses from the infrastructure provider. + err = util.UnstructuredUnmarshalField(infraConfig, &m.Status.Addresses, "status", "addresses") + + if err != nil { + if err != util.ErrUnstructuredFieldNotFound { + return errors.Wrapf(err, "failed to retrieve addresses from infrastructure provider for Machine %q in namespace %q", m.Name, m.Namespace) + } + } + + m.Spec.ProviderID = pointer.StringPtr(providerID) + m.Status.InfrastructureReady = true + return nil +} + +// reconcileClusterAnnotations reconciles the annotations on the Cluster associated with Machines. +// TODO(vincepri): Move to cluster controller once the proposal merges. +func (r *ReconcileMachine) reconcileClusterAnnotations(ctx context.Context, cluster *v1alpha2.Cluster, m *v1alpha2.Machine) error { + if !m.DeletionTimestamp.IsZero() || cluster == nil { + return nil + } + + // If the Machine is a control plane, it has a NodeRef and it's ready, set an annotation on the Cluster. + if util.IsControlPlaneMachine(m) && m.Status.NodeRef != nil && m.Status.InfrastructureReady { + if cluster.Annotations == nil { + cluster.SetAnnotations(map[string]string{}) + } + + if _, ok := cluster.Annotations[v1alpha2.ClusterAnnotationControlPlaneReady]; !ok { + clusterPatch := client.MergeFrom(cluster) + cluster.Annotations[v1alpha2.ClusterAnnotationControlPlaneReady] = "true" + if err := r.Client.Patch(ctx, cluster, clusterPatch); err != nil { + return errors.Wrapf(err, "failed to set control-plane-ready annotation on Cluster %q in namespace %q", + cluster.Name, cluster.Namespace) + } + } + } + + return nil +} diff --git a/vendor/sigs.k8s.io/cluster-api/pkg/controller/machine/machine_controller_phases_test.go b/vendor/sigs.k8s.io/cluster-api/pkg/controller/machine/machine_controller_phases_test.go new file mode 100644 index 0000000000..7c3f5a41db --- /dev/null +++ b/vendor/sigs.k8s.io/cluster-api/pkg/controller/machine/machine_controller_phases_test.go @@ -0,0 +1,700 @@ +/* +Copyright 2019 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package machine + +import ( + "context" + "testing" + + "github.com/onsi/gomega" + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" + "k8s.io/client-go/kubernetes/scheme" + "k8s.io/utils/pointer" + "sigs.k8s.io/cluster-api/api/v1alpha2" + capierrors "sigs.k8s.io/cluster-api/pkg/errors" + "sigs.k8s.io/controller-runtime/pkg/client/fake" +) + +func TestReconcilePhase(t *testing.T) { + deletionTimestamp := metav1.Now() + + defaultMachine := v1alpha2.Machine{ + ObjectMeta: metav1.ObjectMeta{ + Name: "machine-test", + Namespace: "default", + }, + Spec: v1alpha2.MachineSpec{ + Bootstrap: v1alpha2.Bootstrap{ + ConfigRef: &corev1.ObjectReference{ + APIVersion: "bootstrap.cluster.x-k8s.io/v1alpha2", + Kind: "BootstrapConfig", + Name: "bootstrap-config1", + }, + }, + InfrastructureRef: corev1.ObjectReference{ + APIVersion: "infrastructure.cluster.x-k8s.io/v1alpha2", + Kind: "InfrastructureConfig", + Name: "infra-config1", + }, + }, + } + + testCases := []struct { + name string + bootstrapConfig map[string]interface{} + infraConfig map[string]interface{} + machine *v1alpha2.Machine + expectError bool + expectRequeueAfter bool + expected func(g *gomega.WithT, m *v1alpha2.Machine) + }{ + { + name: "new machine, expect pending", + bootstrapConfig: map[string]interface{}{ + "kind": "BootstrapConfig", + "apiVersion": "bootstrap.cluster.x-k8s.io/v1alpha2", + "metadata": map[string]interface{}{ + "name": "bootstrap-config1", + "namespace": "default", + }, + "spec": map[string]interface{}{}, + "status": map[string]interface{}{}, + }, + infraConfig: map[string]interface{}{ + "kind": "InfrastructureConfig", + "apiVersion": "infrastructure.cluster.x-k8s.io/v1alpha2", + "metadata": map[string]interface{}{ + "name": "infra-config1", + "namespace": "default", + }, + "spec": map[string]interface{}{}, + "status": map[string]interface{}{}, + }, + expectError: false, + expectRequeueAfter: true, + expected: func(g *gomega.WithT, m *v1alpha2.Machine) { + g.Expect(m.Status.GetTypedPhase()).To(gomega.Equal(v1alpha2.MachinePhasePending)) + }, + }, + { + name: "ready bootstrap, expect provisioning", + bootstrapConfig: map[string]interface{}{ + "kind": "BootstrapConfig", + "apiVersion": "bootstrap.cluster.x-k8s.io/v1alpha2", + "metadata": map[string]interface{}{ + "name": "bootstrap-config1", + "namespace": "default", + }, + "spec": map[string]interface{}{}, + "status": map[string]interface{}{ + "ready": true, + "bootstrapData": "...", + }, + }, + infraConfig: map[string]interface{}{ + "kind": "InfrastructureConfig", + "apiVersion": "infrastructure.cluster.x-k8s.io/v1alpha2", + "metadata": map[string]interface{}{ + "name": "infra-config1", + "namespace": "default", + }, + "spec": map[string]interface{}{}, + "status": map[string]interface{}{}, + }, + expectError: false, + expectRequeueAfter: true, + expected: func(g *gomega.WithT, m *v1alpha2.Machine) { + g.Expect(m.Status.GetTypedPhase()).To(gomega.Equal(v1alpha2.MachinePhaseProvisioning)) + }, + }, + { + name: "ready bootstrap and infra, expect provisioned", + bootstrapConfig: map[string]interface{}{ + "kind": "BootstrapConfig", + "apiVersion": "bootstrap.cluster.x-k8s.io/v1alpha2", + "metadata": map[string]interface{}{ + "name": "bootstrap-config1", + "namespace": "default", + }, + "spec": map[string]interface{}{}, + "status": map[string]interface{}{ + "ready": true, + "bootstrapData": "...", + }, + }, + infraConfig: map[string]interface{}{ + "kind": "InfrastructureConfig", + "apiVersion": "infrastructure.cluster.x-k8s.io/v1alpha2", + "metadata": map[string]interface{}{ + "name": "infra-config1", + "namespace": "default", + }, + "spec": map[string]interface{}{ + "providerID": "test://id-1", + }, + "status": map[string]interface{}{ + "ready": true, + "addresses": []interface{}{ + map[string]interface{}{ + "type": "InternalIP", + "address": "10.0.0.1", + }, + map[string]interface{}{ + "type": "InternalIP", + "address": "10.0.0.2", + }, + }, + }, + }, + expectError: false, + expectRequeueAfter: false, + expected: func(g *gomega.WithT, m *v1alpha2.Machine) { + g.Expect(m.Status.GetTypedPhase()).To(gomega.Equal(v1alpha2.MachinePhaseProvisioned)) + g.Expect(m.Status.Addresses).To(gomega.HaveLen(2)) + }, + }, + { + name: "ready bootstrap and infra, allow nil addresses as they are optional", + bootstrapConfig: map[string]interface{}{ + "kind": "BootstrapConfig", + "apiVersion": "bootstrap.cluster.x-k8s.io/v1alpha2", + "metadata": map[string]interface{}{ + "name": "bootstrap-config1", + "namespace": "default", + }, + "spec": map[string]interface{}{}, + "status": map[string]interface{}{ + "ready": true, + "bootstrapData": "...", + }, + }, + infraConfig: map[string]interface{}{ + "kind": "InfrastructureConfig", + "apiVersion": "infrastructure.cluster.x-k8s.io/v1alpha2", + "metadata": map[string]interface{}{ + "name": "infra-config1", + "namespace": "default", + }, + "spec": map[string]interface{}{ + "providerID": "test://id-1", + }, + "status": map[string]interface{}{ + "ready": true, + }, + }, + expectError: false, + expectRequeueAfter: false, + expected: func(g *gomega.WithT, m *v1alpha2.Machine) { + g.Expect(m.Status.GetTypedPhase()).To(gomega.Equal(v1alpha2.MachinePhaseProvisioned)) + g.Expect(m.Status.Addresses).To(gomega.HaveLen(0)) + }, + }, + { + name: "ready bootstrap, infra, and nodeRef, expect running", + machine: &v1alpha2.Machine{ + ObjectMeta: metav1.ObjectMeta{ + Name: "machine-test", + Namespace: "default", + }, + Spec: v1alpha2.MachineSpec{ + Bootstrap: v1alpha2.Bootstrap{ + ConfigRef: &corev1.ObjectReference{ + APIVersion: "bootstrap.cluster.x-k8s.io/v1alpha2", + Kind: "BootstrapConfig", + Name: "bootstrap-config1", + }, + }, + InfrastructureRef: corev1.ObjectReference{ + APIVersion: "infrastructure.cluster.x-k8s.io/v1alpha2", + Kind: "InfrastructureConfig", + Name: "infra-config1", + }, + }, + Status: v1alpha2.MachineStatus{ + BootstrapReady: true, + InfrastructureReady: true, + NodeRef: &corev1.ObjectReference{Kind: "Node", Name: "machine-test-node"}, + }, + }, + bootstrapConfig: map[string]interface{}{ + "kind": "BootstrapConfig", + "apiVersion": "bootstrap.cluster.x-k8s.io/v1alpha2", + "metadata": map[string]interface{}{ + "name": "bootstrap-config1", + "namespace": "default", + }, + "spec": map[string]interface{}{}, + "status": map[string]interface{}{ + "ready": true, + "bootstrapData": "...", + }, + }, + infraConfig: map[string]interface{}{ + "kind": "InfrastructureConfig", + "apiVersion": "infrastructure.cluster.x-k8s.io/v1alpha2", + "metadata": map[string]interface{}{ + "name": "infra-config1", + "namespace": "default", + }, + "spec": map[string]interface{}{ + "providerID": "test://id-1", + }, + "status": map[string]interface{}{ + "ready": true, + "addresses": []interface{}{ + map[string]interface{}{ + "type": "InternalIP", + "address": "10.0.0.1", + }, + }, + }, + }, + expectError: false, + expectRequeueAfter: false, + expected: func(g *gomega.WithT, m *v1alpha2.Machine) { + g.Expect(m.Status.GetTypedPhase()).To(gomega.Equal(v1alpha2.MachinePhaseRunning)) + }, + }, + { + name: "ready bootstrap, infra, and nodeRef, machine being deleted, expect deleting", + machine: &v1alpha2.Machine{ + ObjectMeta: metav1.ObjectMeta{ + Name: "machine-test", + Namespace: "default", + DeletionTimestamp: &deletionTimestamp, + }, + Spec: v1alpha2.MachineSpec{ + Bootstrap: v1alpha2.Bootstrap{ + ConfigRef: &corev1.ObjectReference{ + APIVersion: "bootstrap.cluster.x-k8s.io/v1alpha2", + Kind: "BootstrapConfig", + Name: "bootstrap-config1", + }, + }, + InfrastructureRef: corev1.ObjectReference{ + APIVersion: "infrastructure.cluster.x-k8s.io/v1alpha2", + Kind: "InfrastructureConfig", + Name: "infra-config1", + }, + }, + Status: v1alpha2.MachineStatus{ + BootstrapReady: true, + InfrastructureReady: true, + NodeRef: &corev1.ObjectReference{Kind: "Node", Name: "machine-test-node"}, + }, + }, + bootstrapConfig: map[string]interface{}{ + "kind": "BootstrapConfig", + "apiVersion": "bootstrap.cluster.x-k8s.io/v1alpha2", + "metadata": map[string]interface{}{ + "name": "bootstrap-config1", + "namespace": "default", + }, + "spec": map[string]interface{}{}, + "status": map[string]interface{}{ + "ready": true, + "bootstrapData": "...", + }, + }, + infraConfig: map[string]interface{}{ + "kind": "InfrastructureConfig", + "apiVersion": "infrastructure.cluster.x-k8s.io/v1alpha2", + "metadata": map[string]interface{}{ + "name": "infra-config1", + "namespace": "default", + }, + "spec": map[string]interface{}{ + "providerID": "test://id-1", + }, + "status": map[string]interface{}{ + "ready": true, + "addresses": []interface{}{ + map[string]interface{}{ + "type": "InternalIP", + "address": "10.0.0.1", + }, + }, + }, + }, + expectError: false, + expectRequeueAfter: false, + expected: func(g *gomega.WithT, m *v1alpha2.Machine) { + g.Expect(m.Status.GetTypedPhase()).To(gomega.Equal(v1alpha2.MachinePhaseDeleting)) + }, + }, + } + + for _, tc := range testCases { + t.Run(tc.name, func(t *testing.T) { + g := gomega.NewGomegaWithT(t) + + if tc.machine == nil { + tc.machine = defaultMachine.DeepCopy() + } + + bootstrapConfig := &unstructured.Unstructured{Object: tc.bootstrapConfig} + infraConfig := &unstructured.Unstructured{Object: tc.infraConfig} + r := &ReconcileMachine{ + Client: fake.NewFakeClient(tc.machine, bootstrapConfig, infraConfig), + scheme: scheme.Scheme, + } + + err := r.reconcile(context.Background(), nil, tc.machine) + if tc.expectError { + g.Expect(err).ToNot(gomega.BeNil()) + } else if tc.expectRequeueAfter { + g.Expect(capierrors.IsRequeueAfter(err)).To(gomega.BeTrue()) + } else if !tc.expectError { + g.Expect(err).To(gomega.BeNil()) + } + + if tc.expected != nil { + tc.expected(g, tc.machine) + } + + // Test that externalWatchers detected the new kinds. + if tc.machine.DeletionTimestamp.IsZero() { + _, ok := r.externalWatchers.Load(bootstrapConfig.GroupVersionKind().String()) + g.Expect(ok).To(gomega.BeTrue()) + _, ok = r.externalWatchers.Load(infraConfig.GroupVersionKind().String()) + g.Expect(ok).To(gomega.BeTrue()) + } + }) + + } + +} + +func TestReconcileBootstrap(t *testing.T) { + defaultMachine := v1alpha2.Machine{ + ObjectMeta: metav1.ObjectMeta{ + Name: "machine-test", + Namespace: "default", + Labels: map[string]string{ + v1alpha2.MachineClusterLabelName: "test-cluster", + }, + }, + Spec: v1alpha2.MachineSpec{ + Bootstrap: v1alpha2.Bootstrap{ + ConfigRef: &corev1.ObjectReference{ + APIVersion: "bootstrap.cluster.x-k8s.io/v1alpha2", + Kind: "BootstrapConfig", + Name: "bootstrap-config1", + }, + }, + }, + } + + testCases := []struct { + name string + bootstrapConfig map[string]interface{} + machine *v1alpha2.Machine + expectError bool + expected func(g *gomega.WithT, m *v1alpha2.Machine) + }{ + { + name: "new machine, bootstrap config ready with data", + bootstrapConfig: map[string]interface{}{ + "kind": "BootstrapConfig", + "apiVersion": "bootstrap.cluster.x-k8s.io/v1alpha2", + "metadata": map[string]interface{}{ + "name": "bootstrap-config1", + "namespace": "default", + }, + "spec": map[string]interface{}{}, + "status": map[string]interface{}{ + "ready": true, + "bootstrapData": "#!/bin/bash ... data", + }, + }, + expectError: false, + expected: func(g *gomega.WithT, m *v1alpha2.Machine) { + g.Expect(m.Status.BootstrapReady).To(gomega.BeTrue()) + g.Expect(m.Spec.Bootstrap.Data).ToNot(gomega.BeNil()) + g.Expect(*m.Spec.Bootstrap.Data).To(gomega.ContainSubstring("#!/bin/bash")) + }, + }, + { + name: "new machine, bootstrap config ready with no data", + bootstrapConfig: map[string]interface{}{ + "kind": "BootstrapConfig", + "apiVersion": "bootstrap.cluster.x-k8s.io/v1alpha2", + "metadata": map[string]interface{}{ + "name": "bootstrap-config1", + "namespace": "default", + }, + "spec": map[string]interface{}{}, + "status": map[string]interface{}{ + "ready": true, + }, + }, + expectError: true, + expected: func(g *gomega.WithT, m *v1alpha2.Machine) { + g.Expect(m.Status.BootstrapReady).To(gomega.BeFalse()) + g.Expect(m.Spec.Bootstrap.Data).To(gomega.BeNil()) + }, + }, + { + name: "new machine, bootstrap config not ready", + bootstrapConfig: map[string]interface{}{ + "kind": "BootstrapConfig", + "apiVersion": "bootstrap.cluster.x-k8s.io/v1alpha2", + "metadata": map[string]interface{}{ + "name": "bootstrap-config1", + "namespace": "default", + }, + "spec": map[string]interface{}{}, + "status": map[string]interface{}{}, + }, + expectError: true, + expected: func(g *gomega.WithT, m *v1alpha2.Machine) { + g.Expect(m.Status.BootstrapReady).To(gomega.BeFalse()) + }, + }, + { + name: "new machine, bootstrap config is not found", + bootstrapConfig: map[string]interface{}{ + "kind": "BootstrapConfig", + "apiVersion": "bootstrap.cluster.x-k8s.io/v1alpha2", + "metadata": map[string]interface{}{ + "name": "bootstrap-config1", + "namespace": "wrong-namespace", + }, + "spec": map[string]interface{}{}, + "status": map[string]interface{}{}, + }, + expectError: true, + expected: func(g *gomega.WithT, m *v1alpha2.Machine) { + g.Expect(m.Status.BootstrapReady).To(gomega.BeFalse()) + }, + }, + { + name: "new machine, no bootstrap config or data", + bootstrapConfig: map[string]interface{}{ + "kind": "BootstrapConfig", + "apiVersion": "bootstrap.cluster.x-k8s.io/v1alpha2", + "metadata": map[string]interface{}{ + "name": "bootstrap-config1", + "namespace": "wrong-namespace", + }, + "spec": map[string]interface{}{}, + "status": map[string]interface{}{}, + }, + expectError: true, + }, + { + name: "existing machine, bootstrap data should not change", + bootstrapConfig: map[string]interface{}{ + "kind": "BootstrapConfig", + "apiVersion": "bootstrap.cluster.x-k8s.io/v1alpha2", + "metadata": map[string]interface{}{ + "name": "bootstrap-config1", + "namespace": "default", + }, + "spec": map[string]interface{}{}, + "status": map[string]interface{}{ + "ready": true, + "data": "#!/bin/bash ... data with change", + }, + }, + machine: &v1alpha2.Machine{ + ObjectMeta: metav1.ObjectMeta{ + Name: "bootstrap-test-existing", + Namespace: "default", + }, + Spec: v1alpha2.MachineSpec{ + Bootstrap: v1alpha2.Bootstrap{ + ConfigRef: &corev1.ObjectReference{ + APIVersion: "bootstrap.cluster.x-k8s.io/v1alpha2", + Kind: "BootstrapConfig", + Name: "bootstrap-config1", + }, + Data: pointer.StringPtr("#!/bin/bash ... data"), + }, + }, + Status: v1alpha2.MachineStatus{ + BootstrapReady: true, + }, + }, + expectError: false, + expected: func(g *gomega.WithT, m *v1alpha2.Machine) { + g.Expect(m.Status.BootstrapReady).To(gomega.BeTrue()) + g.Expect(*m.Spec.Bootstrap.Data).To(gomega.Equal("#!/bin/bash ... data")) + }, + }, + { + name: "existing machine, bootstrap provider is to not ready", + bootstrapConfig: map[string]interface{}{ + "kind": "BootstrapConfig", + "apiVersion": "bootstrap.cluster.x-k8s.io/v1alpha2", + "metadata": map[string]interface{}{ + "name": "bootstrap-config1", + "namespace": "default", + }, + "spec": map[string]interface{}{}, + "status": map[string]interface{}{ + "ready": false, + "data": "#!/bin/bash ... data", + }, + }, + machine: &v1alpha2.Machine{ + ObjectMeta: metav1.ObjectMeta{ + Name: "bootstrap-test-existing", + Namespace: "default", + }, + Spec: v1alpha2.MachineSpec{ + Bootstrap: v1alpha2.Bootstrap{ + ConfigRef: &corev1.ObjectReference{ + APIVersion: "bootstrap.cluster.x-k8s.io/v1alpha2", + Kind: "BootstrapConfig", + Name: "bootstrap-config1", + }, + Data: pointer.StringPtr("#!/bin/bash ... data"), + }, + }, + Status: v1alpha2.MachineStatus{ + BootstrapReady: true, + }, + }, + expectError: false, + expected: func(g *gomega.WithT, m *v1alpha2.Machine) { + g.Expect(m.Status.BootstrapReady).To(gomega.BeTrue()) + }, + }, + } + + for _, tc := range testCases { + t.Run(tc.name, func(t *testing.T) { + g := gomega.NewGomegaWithT(t) + + if tc.machine == nil { + tc.machine = defaultMachine.DeepCopy() + } + + bootstrapConfig := &unstructured.Unstructured{Object: tc.bootstrapConfig} + r := &ReconcileMachine{ + Client: fake.NewFakeClient(tc.machine, bootstrapConfig), + scheme: scheme.Scheme, + } + + err := r.reconcileBootstrap(context.Background(), tc.machine) + if tc.expectError { + g.Expect(err).ToNot(gomega.BeNil()) + } else { + g.Expect(err).To(gomega.BeNil()) + } + + if tc.expected != nil { + tc.expected(g, tc.machine) + } + }) + + } + +} + +func TestReconcileInfrastructure(t *testing.T) { + defaultMachine := v1alpha2.Machine{ + ObjectMeta: metav1.ObjectMeta{ + Name: "machine-test", + Namespace: "default", + Labels: map[string]string{ + v1alpha2.MachineClusterLabelName: "test-cluster", + }, + }, + Spec: v1alpha2.MachineSpec{ + InfrastructureRef: corev1.ObjectReference{ + APIVersion: "infrastructure.cluster.x-k8s.io/v1alpha2", + Kind: "InfrastructureConfig", + Name: "infra-config1", + }, + }, + } + + testCases := []struct { + name string + infraConfig map[string]interface{} + machine *v1alpha2.Machine + expectError bool + expectChanged bool + expected func(g *gomega.WithT, m *v1alpha2.Machine) + }{ + { + name: "new machine, infrastructure config ready", + infraConfig: map[string]interface{}{ + "kind": "InfrastructureConfig", + "apiVersion": "infrastructure.cluster.x-k8s.io/v1alpha2", + "metadata": map[string]interface{}{ + "name": "infra-config1", + "namespace": "default", + }, + "spec": map[string]interface{}{ + "providerID": "test://id-1", + }, + "status": map[string]interface{}{ + "ready": true, + "addresses": []interface{}{ + map[string]interface{}{ + "type": "InternalIP", + "address": "10.0.0.1", + }, + map[string]interface{}{ + "type": "InternalIP", + "address": "10.0.0.2", + }, + }, + }, + }, + expectError: false, + expectChanged: true, + expected: func(g *gomega.WithT, m *v1alpha2.Machine) { + g.Expect(m.Status.InfrastructureReady).To(gomega.BeTrue()) + }, + }, + } + + for _, tc := range testCases { + t.Run(tc.name, func(t *testing.T) { + g := gomega.NewGomegaWithT(t) + + if tc.machine == nil { + tc.machine = defaultMachine.DeepCopy() + } + + infraConfig := &unstructured.Unstructured{Object: tc.infraConfig} + r := &ReconcileMachine{ + Client: fake.NewFakeClient(tc.machine, infraConfig), + scheme: scheme.Scheme, + } + + err := r.reconcileInfrastructure(context.Background(), tc.machine) + if tc.expectError { + g.Expect(err).ToNot(gomega.BeNil()) + } else { + g.Expect(err).To(gomega.BeNil()) + } + + if tc.expected != nil { + tc.expected(g, tc.machine) + } + }) + + } + +} diff --git a/vendor/sigs.k8s.io/cluster-api/pkg/controller/machine/machine_controller_test.go b/vendor/sigs.k8s.io/cluster-api/pkg/controller/machine/machine_controller_test.go index cf00795b4c..d710755e8a 100644 --- a/vendor/sigs.k8s.io/cluster-api/pkg/controller/machine/machine_controller_test.go +++ b/vendor/sigs.k8s.io/cluster-api/pkg/controller/machine/machine_controller_test.go @@ -17,15 +17,17 @@ limitations under the License. package machine import ( - "reflect" - "sort" "testing" + "github.com/onsi/gomega" + + corev1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" "k8s.io/apimachinery/pkg/types" "k8s.io/client-go/kubernetes/scheme" - "sigs.k8s.io/cluster-api/pkg/apis/cluster/v1alpha1" - clusterv1 "sigs.k8s.io/cluster-api/pkg/apis/cluster/v1alpha1" + "k8s.io/utils/pointer" + "sigs.k8s.io/cluster-api/api/v1alpha2" "sigs.k8s.io/controller-runtime/pkg/client/fake" "sigs.k8s.io/controller-runtime/pkg/reconcile" ) @@ -35,52 +37,91 @@ var ( ) func TestReconcileRequest(t *testing.T) { - machine1 := v1alpha1.Machine{ + g := gomega.NewGomegaWithT(t) + + infraConfig := unstructured.Unstructured{ + Object: map[string]interface{}{ + "kind": "InfrastructureConfig", + "apiVersion": "infrastructure.cluster.x-k8s.io/v1alpha2", + "metadata": map[string]interface{}{ + "name": "infra-config1", + "namespace": "default", + }, + "spec": map[string]interface{}{ + "providerID": "test://id-1", + }, + "status": map[string]interface{}{ + "ready": true, + "addresses": []interface{}{ + map[string]interface{}{ + "type": "InternalIP", + "address": "10.0.0.1", + }, + }, + }, + }, + } + machine1 := v1alpha2.Machine{ TypeMeta: metav1.TypeMeta{ Kind: "Machine", }, ObjectMeta: metav1.ObjectMeta{ Name: "create", Namespace: "default", - Finalizers: []string{v1alpha1.MachineFinalizer, metav1.FinalizerDeleteDependents}, - Labels: map[string]string{ - v1alpha1.MachineClusterLabelName: "testcluster", + Finalizers: []string{v1alpha2.MachineFinalizer, metav1.FinalizerDeleteDependents}, + }, + Spec: v1alpha2.MachineSpec{ + InfrastructureRef: corev1.ObjectReference{ + APIVersion: "infrastructure.cluster.x-k8s.io/v1alpha2", + Kind: "InfrastructureConfig", + Name: "infra-config1", }, + Bootstrap: v1alpha2.Bootstrap{Data: pointer.StringPtr("data")}, }, } - machine2 := v1alpha1.Machine{ + machine2 := v1alpha2.Machine{ TypeMeta: metav1.TypeMeta{ Kind: "Machine", }, ObjectMeta: metav1.ObjectMeta{ Name: "update", Namespace: "default", - Finalizers: []string{v1alpha1.MachineFinalizer, metav1.FinalizerDeleteDependents}, - Labels: map[string]string{ - v1alpha1.MachineClusterLabelName: "testcluster", + Finalizers: []string{v1alpha2.MachineFinalizer, metav1.FinalizerDeleteDependents}, + }, + Spec: v1alpha2.MachineSpec{ + InfrastructureRef: corev1.ObjectReference{ + APIVersion: "infrastructure.cluster.x-k8s.io/v1alpha2", + Kind: "InfrastructureConfig", + Name: "infra-config1", }, + Bootstrap: v1alpha2.Bootstrap{Data: pointer.StringPtr("data")}, }, } time := metav1.Now() - machine3 := v1alpha1.Machine{ + machine3 := v1alpha2.Machine{ TypeMeta: metav1.TypeMeta{ Kind: "Machine", }, ObjectMeta: metav1.ObjectMeta{ Name: "delete", Namespace: "default", - Finalizers: []string{v1alpha1.MachineFinalizer, metav1.FinalizerDeleteDependents}, + Finalizers: []string{v1alpha2.MachineFinalizer, metav1.FinalizerDeleteDependents}, DeletionTimestamp: &time, - Labels: map[string]string{ - v1alpha1.MachineClusterLabelName: "testcluster", + }, + Spec: v1alpha2.MachineSpec{ + InfrastructureRef: corev1.ObjectReference{ + APIVersion: "infrastructure.cluster.x-k8s.io/v1alpha2", + Kind: "InfrastructureConfig", + Name: "infra-config1", }, + Bootstrap: v1alpha2.Bootstrap{Data: pointer.StringPtr("data")}, }, } - clusterList := v1alpha1.ClusterList{ + clusterList := v1alpha2.ClusterList{ TypeMeta: metav1.TypeMeta{ Kind: "ClusterList", }, - Items: []v1alpha1.Cluster{ + Items: []v1alpha2.Cluster{ { TypeMeta: metav1.TypeMeta{ Kind: "Cluster", @@ -103,12 +144,8 @@ func TestReconcileRequest(t *testing.T) { } type expected struct { - createCallCount int64 - existCallCount int64 - updateCallCount int64 - deleteCallCount int64 - result reconcile.Result - error bool + result reconcile.Result + err bool } testCases := []struct { request reconcile.Request @@ -116,169 +153,45 @@ func TestReconcileRequest(t *testing.T) { expected expected }{ { - request: reconcile.Request{NamespacedName: types.NamespacedName{Name: machine1.Name, Namespace: machine1.Namespace}}, + request: reconcile.Request{NamespacedName: types.NamespacedName{Name: machine1.Name, Namespace: machine1.Namespace}}, + existsValue: false, expected: expected{ - createCallCount: 1, - existCallCount: 1, - updateCallCount: 0, - deleteCallCount: 0, - result: reconcile.Result{}, - error: false, + result: reconcile.Result{}, + err: false, }, }, { request: reconcile.Request{NamespacedName: types.NamespacedName{Name: machine2.Name, Namespace: machine2.Namespace}}, existsValue: true, expected: expected{ - createCallCount: 0, - existCallCount: 1, - updateCallCount: 1, - deleteCallCount: 0, - result: reconcile.Result{}, - error: false, + result: reconcile.Result{}, + err: false, }, }, { request: reconcile.Request{NamespacedName: types.NamespacedName{Name: machine3.Name, Namespace: machine3.Namespace}}, existsValue: true, expected: expected{ - createCallCount: 0, - existCallCount: 0, - updateCallCount: 0, - deleteCallCount: 1, - result: reconcile.Result{}, - error: false, + result: reconcile.Result{}, + err: false, }, }, } for _, tc := range testCases { - act := newTestActuator() - act.ExistsValue = tc.existsValue - v1alpha1.AddToScheme(scheme.Scheme) + v1alpha2.AddToScheme(scheme.Scheme) r := &ReconcileMachine{ - Client: fake.NewFakeClient(&clusterList, &machine1, &machine2, &machine3), - scheme: scheme.Scheme, - actuator: act, + Client: fake.NewFakeClient(&clusterList, &machine1, &machine2, &machine3, &infraConfig), + scheme: scheme.Scheme, } result, err := r.Reconcile(tc.request) - gotError := (err != nil) - if tc.expected.error != gotError { - var errorExpectation string - if !tc.expected.error { - errorExpectation = "no" - } - t.Errorf("Case: %s. Expected %s error, got: %v", tc.request.Name, errorExpectation, err) - } - - if !reflect.DeepEqual(result, tc.expected.result) { - t.Errorf("Case %s. Got: %v, expected %v", tc.request.Name, result, tc.expected.result) - } - - if act.CreateCallCount != tc.expected.createCallCount { - t.Errorf("Case %s. Got: %d createCallCount, expected %d", tc.request.Name, act.CreateCallCount, tc.expected.createCallCount) - } - - if act.UpdateCallCount != tc.expected.updateCallCount { - t.Errorf("Case %s. Got: %d updateCallCount, expected %d", tc.request.Name, act.UpdateCallCount, tc.expected.updateCallCount) - } - - if act.ExistsCallCount != tc.expected.existCallCount { - t.Errorf("Case %s. Got: %d existCallCount, expected %d", tc.request.Name, act.ExistsCallCount, tc.expected.existCallCount) - } - - if act.DeleteCallCount != tc.expected.deleteCallCount { - t.Errorf("Case %s. Got: %d deleteCallCount, expected %d", tc.request.Name, act.DeleteCallCount, tc.expected.deleteCallCount) - } - } -} - -func TestReconcileFinalizers(t *testing.T) { - // If we need to add new finalizer logic later, add them here as well. - allFinalizers := []string{metav1.FinalizerDeleteDependents, clusterv1.MachineFinalizer} - nilClusterFinalizers := []string{clusterv1.MachineFinalizer} - - cluster1 := v1alpha1.Cluster{} - - testCases := []struct { - cluster *v1alpha1.Cluster - startingFinalizers []string - deleted bool - expected bool - expectedFinalizers []string - }{ - { - cluster: &cluster1, - startingFinalizers: []string{}, - deleted: false, - expected: true, - expectedFinalizers: allFinalizers, - }, - { - cluster: &cluster1, - startingFinalizers: []string{metav1.FinalizerDeleteDependents}, - deleted: false, - expected: true, - expectedFinalizers: allFinalizers, - }, - { - cluster: &cluster1, - startingFinalizers: []string{clusterv1.MachineFinalizer}, - deleted: false, - expected: true, - expectedFinalizers: allFinalizers, - }, - { - cluster: nil, - startingFinalizers: []string{}, - deleted: false, - expected: true, - expectedFinalizers: nilClusterFinalizers, - }, - { - cluster: nil, - startingFinalizers: []string{clusterv1.MachineFinalizer}, - deleted: false, - expected: false, - expectedFinalizers: nilClusterFinalizers, - }, - { - cluster: &cluster1, - startingFinalizers: []string{}, - deleted: true, - expected: false, - expectedFinalizers: []string{}, - }, - { - cluster: &cluster1, - startingFinalizers: allFinalizers, - deleted: true, - expected: false, - expectedFinalizers: allFinalizers, - }, - } - - for i, tc := range testCases { - m := v1alpha1.Machine{ - ObjectMeta: metav1.ObjectMeta{ - Finalizers: tc.startingFinalizers, - }, + if tc.expected.err { + g.Expect(err).ToNot(gomega.BeNil()) + } else { + g.Expect(err).To(gomega.BeNil()) } - if tc.deleted { - time := metav1.Now() - m.ObjectMeta.DeletionTimestamp = &time - } - needUpdate := reconcileFinalizers(&m, tc.cluster) - if needUpdate != tc.expected { - t.Errorf("Case: %d. Expected %v, got: %v", i, tc.expected, needUpdate) - } - // sort for easier comparison: [a, b] != [b, a] for DeepEqual. - sort.Strings(tc.expectedFinalizers) - sort.Strings(m.Finalizers) - if !reflect.DeepEqual(tc.expectedFinalizers, m.Finalizers) { - t.Errorf("Case: %d. Expected %v finializers, got: %v", i, tc.expectedFinalizers, m.Finalizers) - } + g.Expect(result).To(gomega.Equal(tc.expected.result)) } } diff --git a/vendor/sigs.k8s.io/cluster-api/pkg/controller/machine/machine_reconciler_suite_test.go b/vendor/sigs.k8s.io/cluster-api/pkg/controller/machine/machine_reconciler_suite_test.go index cd7fdfa8a4..2e769df1d3 100644 --- a/vendor/sigs.k8s.io/cluster-api/pkg/controller/machine/machine_reconciler_suite_test.go +++ b/vendor/sigs.k8s.io/cluster-api/pkg/controller/machine/machine_reconciler_suite_test.go @@ -24,19 +24,18 @@ import ( "k8s.io/client-go/kubernetes/scheme" "k8s.io/client-go/rest" - "sigs.k8s.io/cluster-api/pkg/apis" + "sigs.k8s.io/cluster-api/api/v1alpha2" "sigs.k8s.io/controller-runtime/pkg/envtest" "sigs.k8s.io/controller-runtime/pkg/manager" - "sigs.k8s.io/controller-runtime/pkg/reconcile" ) var cfg *rest.Config func TestMain(m *testing.M) { t := &envtest.Environment{ - CRDDirectoryPaths: []string{filepath.Join("..", "..", "..", "config", "crds")}, + CRDDirectoryPaths: []string{filepath.Join("..", "..", "..", "config", "crd", "bases")}, } - apis.AddToScheme(scheme.Scheme) + v1alpha2.AddToScheme(scheme.Scheme) var err error if cfg, err = t.Start(); err != nil { @@ -48,19 +47,6 @@ func TestMain(m *testing.M) { os.Exit(code) } -// SetupTestReconcile returns a reconcile.Reconcile implementation that delegates to inner and -// writes the request to requests after Reconcile is finished. -func SetupTestReconcile(inner reconcile.Reconciler) (reconcile.Reconciler, chan reconcile.Request) { - requests := make(chan reconcile.Request) - fn := reconcile.Func(func(req reconcile.Request) (reconcile.Result, error) { - result, err := inner.Reconcile(req) - requests <- req - return result, err - }) - return fn, requests -} - -// StartTestManager adds recFn func StartTestManager(mgr manager.Manager, t *testing.T) chan struct{} { t.Helper() diff --git a/vendor/sigs.k8s.io/cluster-api/pkg/controller/machine/machine_reconciler_test.go b/vendor/sigs.k8s.io/cluster-api/pkg/controller/machine/machine_reconciler_test.go index 8d8b2f88e0..032f677136 100644 --- a/vendor/sigs.k8s.io/cluster-api/pkg/controller/machine/machine_reconciler_test.go +++ b/vendor/sigs.k8s.io/cluster-api/pkg/controller/machine/machine_reconciler_test.go @@ -20,57 +20,55 @@ import ( "testing" "time" + . "github.com/onsi/gomega" "golang.org/x/net/context" + corev1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/types" - clusterv1alpha1 "sigs.k8s.io/cluster-api/pkg/apis/cluster/v1alpha1" + clusterv1 "sigs.k8s.io/cluster-api/api/v1alpha2" "sigs.k8s.io/controller-runtime/pkg/client" "sigs.k8s.io/controller-runtime/pkg/manager" - "sigs.k8s.io/controller-runtime/pkg/reconcile" ) -var c client.Client - -var expectedRequest = reconcile.Request{NamespacedName: types.NamespacedName{Name: "foo", Namespace: "default"}} - const timeout = time.Second * 5 func TestReconcile(t *testing.T) { - instance := &clusterv1alpha1.Machine{ + RegisterTestingT(t) + ctx := context.TODO() + instance := &clusterv1.Machine{ ObjectMeta: metav1.ObjectMeta{Name: "foo", Namespace: "default"}, - Spec: clusterv1alpha1.MachineSpec{ - Versions: clusterv1alpha1.MachineVersionInfo{Kubelet: "1.10.3"}, + Spec: clusterv1.MachineSpec{ + InfrastructureRef: corev1.ObjectReference{ + APIVersion: "infrastructure.cluster.x-k8s.io/v1alpha2", + Kind: "InfrastructureRef", + Name: "machine-infrastructure", + }, }, } // Setup the Manager and Controller. Wrap the Controller Reconcile function so it writes each request to a // channel when it is finished. - mgr, err := manager.New(cfg, manager.Options{}) + mgr, err := manager.New(cfg, manager.Options{MetricsBindAddress: "0"}) if err != nil { t.Fatalf("error creating new manager: %v", err) } - c = mgr.GetClient() + c := mgr.GetClient() - a := newTestActuator() - recFn, requests := SetupTestReconcile(newReconciler(mgr, a)) - if err := add(mgr, recFn); err != nil { - t.Fatalf("error adding controller to manager: %v", err) - } + reconciler := newReconciler(mgr) + controller, err := addController(mgr, reconciler) + Expect(err).To(BeNil()) + reconciler.controller = controller defer close(StartTestManager(mgr, t)) // Create the Machine object and expect Reconcile and the actuator to be called - if err := c.Create(context.TODO(), instance); err != nil { - t.Fatalf("error creating instance: %v", err) - } - defer c.Delete(context.TODO(), instance) - select { - case recv := <-requests: - if recv != expectedRequest { - t.Error("received request does not match expected request") - } - case <-time.After(timeout): - t.Error("timed out waiting for request") - } + Expect(c.Create(ctx, instance)).To(BeNil()) + defer c.Delete(ctx, instance) - // TODO: Verify that the actuator is called correctly on Create + // Make sure the Machine exists. + Eventually(func() bool { + key := client.ObjectKey{Namespace: instance.Namespace, Name: instance.Name} + if err := c.Get(ctx, key, instance); err != nil { + return false + } + return true + }, timeout).Should(BeTrue()) } diff --git a/vendor/sigs.k8s.io/cluster-api/pkg/controller/machine/testactuator.go b/vendor/sigs.k8s.io/cluster-api/pkg/controller/machine/testactuator.go deleted file mode 100644 index 5e5b165b61..0000000000 --- a/vendor/sigs.k8s.io/cluster-api/pkg/controller/machine/testactuator.go +++ /dev/null @@ -1,101 +0,0 @@ -/* -Copyright 2018 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package machine - -import ( - "context" - "sync" - - "sigs.k8s.io/cluster-api/pkg/apis/cluster/v1alpha1" -) - -var _ Actuator = &TestActuator{} - -type TestActuator struct { - unblock chan string - BlockOnCreate bool - BlockOnDelete bool - BlockOnUpdate bool - BlockOnExists bool - CreateCallCount int64 - DeleteCallCount int64 - UpdateCallCount int64 - ExistsCallCount int64 - ExistsValue bool - Lock sync.Mutex -} - -func (a *TestActuator) Create(context.Context, *v1alpha1.Cluster, *v1alpha1.Machine) error { - defer func() { - if a.BlockOnCreate { - <-a.unblock - } - }() - - a.Lock.Lock() - defer a.Lock.Unlock() - a.CreateCallCount++ - return nil -} - -func (a *TestActuator) Delete(context.Context, *v1alpha1.Cluster, *v1alpha1.Machine) error { - defer func() { - if a.BlockOnDelete { - <-a.unblock - } - }() - - a.Lock.Lock() - defer a.Lock.Unlock() - a.DeleteCallCount++ - return nil -} - -func (a *TestActuator) Update(ctx context.Context, c *v1alpha1.Cluster, machine *v1alpha1.Machine) error { - defer func() { - if a.BlockOnUpdate { - <-a.unblock - } - }() - a.Lock.Lock() - defer a.Lock.Unlock() - a.UpdateCallCount++ - return nil -} - -func (a *TestActuator) Exists(context.Context, *v1alpha1.Cluster, *v1alpha1.Machine) (bool, error) { - defer func() { - if a.BlockOnExists { - <-a.unblock - } - }() - - a.Lock.Lock() - defer a.Lock.Unlock() - a.ExistsCallCount++ - return a.ExistsValue, nil -} - -func newTestActuator() *TestActuator { - ta := new(TestActuator) - ta.unblock = make(chan string) - return ta -} - -func (a *TestActuator) Unblock() { - close(a.unblock) -} diff --git a/vendor/sigs.k8s.io/cluster-api/pkg/controller/machinedeployment/BUILD.bazel b/vendor/sigs.k8s.io/cluster-api/pkg/controller/machinedeployment/BUILD.bazel index 8e89fc0254..d0217ef927 100644 --- a/vendor/sigs.k8s.io/cluster-api/pkg/controller/machinedeployment/BUILD.bazel +++ b/vendor/sigs.k8s.io/cluster-api/pkg/controller/machinedeployment/BUILD.bazel @@ -10,8 +10,7 @@ go_library( importpath = "sigs.k8s.io/cluster-api/pkg/controller/machinedeployment", visibility = ["//visibility:public"], deps = [ - "//pkg/apis/cluster/common:go_default_library", - "//pkg/apis/cluster/v1alpha1:go_default_library", + "//api/v1alpha2:go_default_library", "//pkg/controller/machinedeployment/util:go_default_library", "//pkg/util:go_default_library", "//vendor/github.com/pkg/errors:go_default_library", @@ -25,9 +24,9 @@ go_library( "//vendor/k8s.io/apimachinery/pkg/util/rand:go_default_library", "//vendor/k8s.io/apimachinery/pkg/util/sets:go_default_library", "//vendor/k8s.io/client-go/tools/record:go_default_library", - "//vendor/k8s.io/client-go/util/integer:go_default_library", "//vendor/k8s.io/client-go/util/retry:go_default_library", "//vendor/k8s.io/klog:go_default_library", + "//vendor/k8s.io/utils/integer:go_default_library", "//vendor/sigs.k8s.io/controller-runtime/pkg/client:go_default_library", "//vendor/sigs.k8s.io/controller-runtime/pkg/controller:go_default_library", "//vendor/sigs.k8s.io/controller-runtime/pkg/handler:go_default_library", @@ -46,17 +45,21 @@ go_test( ], embed = [":go_default_library"], deps = [ - "//pkg/apis:go_default_library", - "//pkg/apis/cluster/common:go_default_library", - "//pkg/apis/cluster/v1alpha1:go_default_library", + "//api/v1alpha2:go_default_library", + "//pkg/controller/external:go_default_library", + "//vendor/github.com/onsi/gomega:go_default_library", "//vendor/golang.org/x/net/context:go_default_library", + "//vendor/k8s.io/api/core/v1:go_default_library", + "//vendor/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1:go_default_library", "//vendor/k8s.io/apimachinery/pkg/api/errors:go_default_library", "//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", + "//vendor/k8s.io/apimachinery/pkg/apis/meta/v1/unstructured:go_default_library", "//vendor/k8s.io/apimachinery/pkg/types:go_default_library", "//vendor/k8s.io/apimachinery/pkg/util/intstr:go_default_library", "//vendor/k8s.io/client-go/kubernetes/scheme:go_default_library", "//vendor/k8s.io/client-go/rest:go_default_library", "//vendor/k8s.io/client-go/tools/record:go_default_library", + "//vendor/k8s.io/utils/pointer:go_default_library", "//vendor/sigs.k8s.io/controller-runtime/pkg/client:go_default_library", "//vendor/sigs.k8s.io/controller-runtime/pkg/client/fake:go_default_library", "//vendor/sigs.k8s.io/controller-runtime/pkg/envtest:go_default_library", diff --git a/vendor/sigs.k8s.io/cluster-api/pkg/controller/machinedeployment/machinedeployment_controller.go b/vendor/sigs.k8s.io/cluster-api/pkg/controller/machinedeployment/machinedeployment_controller.go index a142cdfceb..0c80f1e9d6 100644 --- a/vendor/sigs.k8s.io/cluster-api/pkg/controller/machinedeployment/machinedeployment_controller.go +++ b/vendor/sigs.k8s.io/cluster-api/pkg/controller/machinedeployment/machinedeployment_controller.go @@ -29,8 +29,8 @@ import ( "k8s.io/apimachinery/pkg/types" "k8s.io/client-go/tools/record" "k8s.io/klog" - "sigs.k8s.io/cluster-api/pkg/apis/cluster/common" - "sigs.k8s.io/cluster-api/pkg/apis/cluster/v1alpha1" + "sigs.k8s.io/cluster-api/api/v1alpha2" + clusterv1 "sigs.k8s.io/cluster-api/api/v1alpha2" "sigs.k8s.io/cluster-api/pkg/util" "sigs.k8s.io/controller-runtime/pkg/client" "sigs.k8s.io/controller-runtime/pkg/controller" @@ -45,7 +45,7 @@ const controllerName = "machinedeployment-controller" var ( // controllerKind contains the schema.GroupVersionKind for this controller type. - controllerKind = v1alpha1.SchemeGroupVersion.WithKind("MachineDeployment") + controllerKind = v1alpha2.GroupVersion.WithKind("MachineDeployment") ) // ReconcileMachineDeployment reconciles a MachineDeployment object. @@ -57,7 +57,11 @@ type ReconcileMachineDeployment struct { // newReconciler returns a new reconcile.Reconciler. func newReconciler(mgr manager.Manager) *ReconcileMachineDeployment { - return &ReconcileMachineDeployment{Client: mgr.GetClient(), scheme: mgr.GetScheme(), recorder: mgr.GetRecorder(controllerName)} + return &ReconcileMachineDeployment{ + Client: mgr.GetClient(), + scheme: mgr.GetScheme(), + recorder: mgr.GetEventRecorderFor(controllerName), + } } // Add creates a new MachineDeployment Controller and adds it to the Manager with default RBAC. @@ -76,7 +80,7 @@ func add(mgr manager.Manager, r reconcile.Reconciler, mapFn handler.ToRequestsFu // Watch for changes to MachineDeployment. err = c.Watch(&source.Kind{ - Type: &v1alpha1.MachineDeployment{}}, + Type: &v1alpha2.MachineDeployment{}}, &handler.EnqueueRequestForObject{}, ) if err != nil { @@ -85,8 +89,8 @@ func add(mgr manager.Manager, r reconcile.Reconciler, mapFn handler.ToRequestsFu // Watch for changes to MachineSet and reconcile the owner MachineDeployment. err = c.Watch( - &source.Kind{Type: &v1alpha1.MachineSet{}}, - &handler.EnqueueRequestForOwner{OwnerType: &v1alpha1.MachineDeployment{}, IsController: true}, + &source.Kind{Type: &v1alpha2.MachineSet{}}, + &handler.EnqueueRequestForOwner{OwnerType: &v1alpha2.MachineDeployment{}, IsController: true}, ) if err != nil { return err @@ -96,7 +100,7 @@ func add(mgr manager.Manager, r reconcile.Reconciler, mapFn handler.ToRequestsFu // This watcher is required for use cases like adoption. In case a MachineSet doesn't have // a controller reference, it'll look for potential matching MachineDeployments to reconcile. err = c.Watch( - &source.Kind{Type: &v1alpha1.MachineSet{}}, + &source.Kind{Type: &v1alpha2.MachineSet{}}, &handler.EnqueueRequestsFromMapFunc{ToRequests: mapFn}, ) if err != nil { @@ -108,13 +112,11 @@ func add(mgr manager.Manager, r reconcile.Reconciler, mapFn handler.ToRequestsFu // Reconcile reads that state of the cluster for a MachineDeployment object and makes changes based on the state read // and what is in the MachineDeployment.Spec. -// -// +kubebuilder:rbac:groups=cluster.k8s.io,resources=machinedeployments;machinedeployments/status,verbs=get;list;watch;create;update;patch;delete func (r *ReconcileMachineDeployment) Reconcile(request reconcile.Request) (reconcile.Result, error) { // Fetch the MachineDeployment instance - d := &v1alpha1.MachineDeployment{} + d := &v1alpha2.MachineDeployment{} ctx := context.TODO() - if err := r.Get(context.TODO(), request.NamespacedName, d); err != nil { + if err := r.Get(ctx, request.NamespacedName, d); err != nil { if apierrors.IsNotFound(err) { // Object not found, return. Created objects are automatically garbage collected. // For additional cleanup logic use finalizers. @@ -139,15 +141,16 @@ func (r *ReconcileMachineDeployment) Reconcile(request reconcile.Request) (recon return result, err } -func (r *ReconcileMachineDeployment) reconcile(ctx context.Context, d *v1alpha1.MachineDeployment) (reconcile.Result, error) { - v1alpha1.PopulateDefaultsMachineDeployment(d) +func (r *ReconcileMachineDeployment) reconcile(ctx context.Context, d *v1alpha2.MachineDeployment) (reconcile.Result, error) { + v1alpha2.PopulateDefaultsMachineDeployment(d) everything := metav1.LabelSelector{} if reflect.DeepEqual(d.Spec.Selector, &everything) { if d.Status.ObservedGeneration < d.Generation { + patch := client.MergeFrom(d.DeepCopy()) d.Status.ObservedGeneration = d.Generation - if err := r.Status().Update(context.Background(), d); err != nil { - klog.Warningf("Failed to update status for MachineDeployment %q: %v", d.Name, err) + if err := r.Status().Patch(ctx, d, patch); err != nil { + klog.Warningf("Failed to patch status for MachineDeployment %q: %v", d.Name, err) return reconcile.Result{}, err } } @@ -167,40 +170,22 @@ func (r *ReconcileMachineDeployment) reconcile(ctx context.Context, d *v1alpha1. // Cluster might be nil as some providers might not require a cluster object // for machine management. - cluster, err := r.getCluster(d) - if err != nil { + cluster, err := util.GetClusterFromMetadata(ctx, r.Client, d.ObjectMeta) + if errors.Cause(err) == util.ErrNoCluster { + klog.Infof("MachineDeployment %q in namespace %q doesn't specify %q label, assuming nil Cluster", d.Name, d.Namespace, v1alpha2.MachineClusterLabelName) + } else if err != nil { return reconcile.Result{}, err } - // Set the ownerRef with foreground deletion if there is a linked cluster. if cluster != nil && shouldAdopt(d) { - klog.Infof("Cluster %s/%s is adopting MachineDeployment %s", cluster.Namespace, cluster.Name, d.Name) - blockOwnerDeletion := true - d.OwnerReferences = append(d.OwnerReferences, metav1.OwnerReference{ - APIVersion: cluster.APIVersion, - Kind: cluster.Kind, - Name: cluster.Name, - UID: cluster.UID, - BlockOwnerDeletion: &blockOwnerDeletion, + d.OwnerReferences = util.EnsureOwnerRef(d.OwnerReferences, metav1.OwnerReference{ + APIVersion: cluster.APIVersion, + Kind: cluster.Kind, + Name: cluster.Name, + UID: cluster.UID, }) } - // Add foregroundDeletion finalizer if MachineDeployment isn't deleted and linked to a cluster. - if cluster != nil && - d.ObjectMeta.DeletionTimestamp.IsZero() && - !util.Contains(d.Finalizers, metav1.FinalizerDeleteDependents) { - - d.Finalizers = append(d.ObjectMeta.Finalizers, metav1.FinalizerDeleteDependents) - - if err := r.Client.Update(context.Background(), d); err != nil { - klog.Infof("Failed to add finalizers to MachineSet %q: %v", d.Name, err) - return reconcile.Result{}, err - } - - // Since adding the finalizer updates the object return to avoid later update issues - return reconcile.Result{Requeue: true}, nil - } - msList, err := r.getMachineSetsForDeployment(d) if err != nil { return reconcile.Result{}, err @@ -216,7 +201,7 @@ func (r *ReconcileMachineDeployment) reconcile(ctx context.Context, d *v1alpha1. } switch d.Spec.Strategy.Type { - case common.RollingUpdateMachineDeploymentStrategyType: + case v1alpha2.RollingUpdateMachineDeploymentStrategyType: return reconcile.Result{}, r.rolloutRolling(d, msList, machineMap) } @@ -224,16 +209,16 @@ func (r *ReconcileMachineDeployment) reconcile(ctx context.Context, d *v1alpha1. } // getCluster reuturns the Cluster associated with the MachineDeployment, if any. -func (r *ReconcileMachineDeployment) getCluster(d *v1alpha1.MachineDeployment) (*v1alpha1.Cluster, error) { - if d.Spec.Template.Labels[v1alpha1.MachineClusterLabelName] == "" { - klog.Infof("Deployment %q in namespace %q doesn't specify %q label, assuming nil cluster", d.Name, d.Namespace, v1alpha1.MachineClusterLabelName) +func (r *ReconcileMachineDeployment) getCluster(d *v1alpha2.MachineDeployment) (*v1alpha2.Cluster, error) { + if d.Spec.Template.Labels[v1alpha2.MachineClusterLabelName] == "" { + klog.Infof("Deployment %q in namespace %q doesn't specify %q label, assuming nil cluster", d.Name, d.Namespace, v1alpha2.MachineClusterLabelName) return nil, nil } - cluster := &v1alpha1.Cluster{} + cluster := &v1alpha2.Cluster{} key := client.ObjectKey{ Namespace: d.Namespace, - Name: d.Spec.Template.Labels[v1alpha1.MachineClusterLabelName], + Name: d.Spec.Template.Labels[v1alpha2.MachineClusterLabelName], } if err := r.Client.Get(context.Background(), key, cluster); err != nil { @@ -244,16 +229,14 @@ func (r *ReconcileMachineDeployment) getCluster(d *v1alpha1.MachineDeployment) ( } // getMachineSetsForDeployment returns a list of MachineSets associated with a MachineDeployment. -func (r *ReconcileMachineDeployment) getMachineSetsForDeployment(d *v1alpha1.MachineDeployment) ([]*v1alpha1.MachineSet, error) { - +func (r *ReconcileMachineDeployment) getMachineSetsForDeployment(d *v1alpha2.MachineDeployment) ([]*v1alpha2.MachineSet, error) { // List all MachineSets to find those we own but that no longer match our selector. - machineSets := &v1alpha1.MachineSetList{} - listOptions := &client.ListOptions{Namespace: d.Namespace} - if err := r.Client.List(context.Background(), listOptions, machineSets); err != nil { + machineSets := &v1alpha2.MachineSetList{} + if err := r.Client.List(context.Background(), machineSets, client.InNamespace(d.Namespace)); err != nil { return nil, err } - filtered := make([]*v1alpha1.MachineSet, 0, len(machineSets.Items)) + filtered := make([]*v1alpha2.MachineSet, 0, len(machineSets.Items)) for idx := range machineSets.Items { ms := &machineSets.Items[idx] @@ -295,17 +278,18 @@ func (r *ReconcileMachineDeployment) getMachineSetsForDeployment(d *v1alpha1.Mac } // adoptOrphan sets the MachineDeployment as a controller OwnerReference to the MachineSet. -func (r *ReconcileMachineDeployment) adoptOrphan(deployment *v1alpha1.MachineDeployment, machineSet *v1alpha1.MachineSet) error { +func (r *ReconcileMachineDeployment) adoptOrphan(deployment *v1alpha2.MachineDeployment, machineSet *v1alpha2.MachineSet) error { + patch := client.MergeFrom(machineSet.DeepCopy()) newRef := *metav1.NewControllerRef(deployment, controllerKind) machineSet.OwnerReferences = append(machineSet.OwnerReferences, newRef) - return r.Client.Update(context.Background(), machineSet) + return r.Client.Patch(context.Background(), machineSet, patch) } // getMachineMapForDeployment returns the Machines managed by a Deployment. // // It returns a map from MachineSet UID to a list of Machines controlled by that MachineSet, // according to the Machine's ControllerRef. -func (r *ReconcileMachineDeployment) getMachineMapForDeployment(d *v1alpha1.MachineDeployment, msList []*v1alpha1.MachineSet) (map[types.UID]*v1alpha1.MachineList, error) { +func (r *ReconcileMachineDeployment) getMachineMapForDeployment(d *v1alpha2.MachineDeployment, msList []*v1alpha2.MachineSet) (map[types.UID]*v1alpha2.MachineList, error) { // TODO(droot): double check if previous selector maps correctly to new one. // _, err := metav1.LabelSelectorAsSelector(&d.Spec.Selector) @@ -315,16 +299,15 @@ func (r *ReconcileMachineDeployment) getMachineMapForDeployment(d *v1alpha1.Mach return nil, err } - machines := &v1alpha1.MachineList{} - listOptions := &client.ListOptions{Namespace: d.Namespace} - if err = r.Client.List(context.Background(), listOptions.MatchingLabels(selector), machines); err != nil { + machines := &v1alpha2.MachineList{} + if err = r.Client.List(context.Background(), machines, client.InNamespace(d.Namespace), client.MatchingLabels(selector)); err != nil { return nil, err } // Group Machines by their controller (if it's in msList). - machineMap := make(map[types.UID]*v1alpha1.MachineList, len(msList)) + machineMap := make(map[types.UID]*v1alpha2.MachineList, len(msList)) for _, ms := range msList { - machineMap[ms.UID] = &v1alpha1.MachineList{} + machineMap[ms.UID] = &v1alpha2.MachineList{} } for idx := range machines.Items { @@ -347,20 +330,19 @@ func (r *ReconcileMachineDeployment) getMachineMapForDeployment(d *v1alpha1.Mach } // getMachineDeploymentsForMachineSet returns a list of MachineDeployments that could potentially match a MachineSet. -func (r *ReconcileMachineDeployment) getMachineDeploymentsForMachineSet(ms *v1alpha1.MachineSet) []*v1alpha1.MachineDeployment { +func (r *ReconcileMachineDeployment) getMachineDeploymentsForMachineSet(ms *v1alpha2.MachineSet) []*v1alpha2.MachineDeployment { if len(ms.Labels) == 0 { - klog.Warningf("No machine deployments found for MachineSet %q because it has no labels", ms.Name) + klog.Warningf("No MachineDeployments found for MachineSet %q because it has no labels", ms.Name) return nil } - dList := &v1alpha1.MachineDeploymentList{} - listOptions := &client.ListOptions{Namespace: ms.Namespace} - if err := r.Client.List(context.Background(), listOptions, dList); err != nil { - klog.Warningf("Failed to list machine deployments: %v", err) + dList := &v1alpha2.MachineDeploymentList{} + if err := r.Client.List(context.Background(), dList, client.InNamespace(ms.Namespace)); err != nil { + klog.Warningf("Failed to list MachineDeployments: %v", err) return nil } - deployments := make([]*v1alpha1.MachineDeployment, 0, len(dList.Items)) + deployments := make([]*v1alpha2.MachineDeployment, 0, len(dList.Items)) for idx, d := range dList.Items { selector, err := metav1.LabelSelectorAsSelector(&d.Spec.Selector) if err != nil { @@ -383,12 +365,9 @@ func (r *ReconcileMachineDeployment) getMachineDeploymentsForMachineSet(ms *v1al func (r *ReconcileMachineDeployment) MachineSetToDeployments(o handler.MapObject) []reconcile.Request { result := []reconcile.Request{} - ms := &v1alpha1.MachineSet{} - key := client.ObjectKey{Namespace: o.Meta.GetNamespace(), Name: o.Meta.GetName()} - if err := r.Client.Get(context.Background(), key, ms); err != nil { - if !apierrors.IsNotFound(err) { - klog.Errorf("Unable to retrieve MachineSet %q for possible MachineDeployment adoption: %v", key, err) - } + ms, ok := o.Object.(*clusterv1.MachineSet) + if !ok { + klog.Errorf("Expected a MachineSet but got %T instead: %v", o.Object, o.Object) return nil } @@ -402,7 +381,7 @@ func (r *ReconcileMachineDeployment) MachineSetToDeployments(o handler.MapObject mds := r.getMachineDeploymentsForMachineSet(ms) if len(mds) == 0 { - klog.V(4).Infof("Found no machine set for machine: %v", ms.Name) + klog.V(4).Infof("Found no MachineDeployment for MachineSet: %v", ms.Name) return nil } @@ -414,6 +393,6 @@ func (r *ReconcileMachineDeployment) MachineSetToDeployments(o handler.MapObject return result } -func shouldAdopt(md *v1alpha1.MachineDeployment) bool { - return !util.HasOwner(md.OwnerReferences, v1alpha1.SchemeGroupVersion.String(), []string{"Cluster"}) +func shouldAdopt(md *v1alpha2.MachineDeployment) bool { + return !util.HasOwner(md.OwnerReferences, v1alpha2.GroupVersion.String(), []string{"Cluster"}) } diff --git a/vendor/sigs.k8s.io/cluster-api/pkg/controller/machinedeployment/machinedeployment_controller_test.go b/vendor/sigs.k8s.io/cluster-api/pkg/controller/machinedeployment/machinedeployment_controller_test.go index 03063bdc47..6a8959c70f 100644 --- a/vendor/sigs.k8s.io/cluster-api/pkg/controller/machinedeployment/machinedeployment_controller_test.go +++ b/vendor/sigs.k8s.io/cluster-api/pkg/controller/machinedeployment/machinedeployment_controller_test.go @@ -23,7 +23,7 @@ import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/client-go/kubernetes/scheme" "k8s.io/client-go/tools/record" - "sigs.k8s.io/cluster-api/pkg/apis/cluster/v1alpha1" + "sigs.k8s.io/cluster-api/api/v1alpha2" "sigs.k8s.io/controller-runtime/pkg/client" "sigs.k8s.io/controller-runtime/pkg/client/fake" "sigs.k8s.io/controller-runtime/pkg/handler" @@ -35,29 +35,29 @@ var ( ) func TestMachineSetToDeployments(t *testing.T) { - machineDeployment := v1alpha1.MachineDeployment{ + machineDeployment := v1alpha2.MachineDeployment{ ObjectMeta: metav1.ObjectMeta{ Name: "withMatchingLabels", Namespace: "test", }, - Spec: v1alpha1.MachineDeploymentSpec{ + Spec: v1alpha2.MachineDeploymentSpec{ Selector: metav1.LabelSelector{ MatchLabels: map[string]string{ "foo": "bar", - v1alpha1.MachineClusterLabelName: "test-cluster", + v1alpha2.MachineClusterLabelName: "test-cluster", }, }, }, } - machineDeplopymentList := &v1alpha1.MachineDeploymentList{ + machineDeplopymentList := &v1alpha2.MachineDeploymentList{ TypeMeta: metav1.TypeMeta{ Kind: "MachineDeploymentList", }, - Items: []v1alpha1.MachineDeployment{machineDeployment}, + Items: []v1alpha2.MachineDeployment{machineDeployment}, } - ms1 := v1alpha1.MachineSet{ + ms1 := v1alpha2.MachineSet{ TypeMeta: metav1.TypeMeta{ Kind: "MachineSet", }, @@ -68,11 +68,11 @@ func TestMachineSetToDeployments(t *testing.T) { *metav1.NewControllerRef(&machineDeployment, controllerKind), }, Labels: map[string]string{ - v1alpha1.MachineClusterLabelName: "test-cluster", + v1alpha2.MachineClusterLabelName: "test-cluster", }, }, } - ms2 := v1alpha1.MachineSet{ + ms2 := v1alpha2.MachineSet{ TypeMeta: metav1.TypeMeta{ Kind: "MachineSet", }, @@ -80,11 +80,11 @@ func TestMachineSetToDeployments(t *testing.T) { Name: "noOwnerRefNoLabels", Namespace: "test", Labels: map[string]string{ - v1alpha1.MachineClusterLabelName: "test-cluster", + v1alpha2.MachineClusterLabelName: "test-cluster", }, }, } - ms3 := v1alpha1.MachineSet{ + ms3 := v1alpha2.MachineSet{ TypeMeta: metav1.TypeMeta{ Kind: "MachineSet", }, @@ -93,13 +93,13 @@ func TestMachineSetToDeployments(t *testing.T) { Namespace: "test", Labels: map[string]string{ "foo": "bar", - v1alpha1.MachineClusterLabelName: "test-cluster", + v1alpha2.MachineClusterLabelName: "test-cluster", }, }, } testsCases := []struct { - machineSet v1alpha1.MachineSet + machineSet v1alpha2.MachineSet mapObject handler.MapObject expected []reconcile.Request }{ @@ -131,9 +131,9 @@ func TestMachineSetToDeployments(t *testing.T) { }, } - v1alpha1.AddToScheme(scheme.Scheme) + v1alpha2.AddToScheme(scheme.Scheme) r := &ReconcileMachineDeployment{ - Client: fake.NewFakeClient(&ms1, &ms2, &ms3, machineDeplopymentList), + Client: fake.NewFakeClient(machineDeplopymentList), scheme: scheme.Scheme, recorder: record.NewFakeRecorder(32), } @@ -147,12 +147,12 @@ func TestMachineSetToDeployments(t *testing.T) { } func TestGetMachineDeploymentsForMachineSet(t *testing.T) { - machineDeployment := v1alpha1.MachineDeployment{ + machineDeployment := v1alpha2.MachineDeployment{ ObjectMeta: metav1.ObjectMeta{ Name: "withLabels", Namespace: "test", }, - Spec: v1alpha1.MachineDeploymentSpec{ + Spec: v1alpha2.MachineDeploymentSpec{ Selector: metav1.LabelSelector{ MatchLabels: map[string]string{ "foo": "bar", @@ -160,15 +160,15 @@ func TestGetMachineDeploymentsForMachineSet(t *testing.T) { }, }, } - machineDeplopymentList := &v1alpha1.MachineDeploymentList{ + machineDeplopymentList := &v1alpha2.MachineDeploymentList{ TypeMeta: metav1.TypeMeta{ Kind: "MachineDeploymentList", }, - Items: []v1alpha1.MachineDeployment{ + Items: []v1alpha2.MachineDeployment{ machineDeployment, }, } - ms1 := v1alpha1.MachineSet{ + ms1 := v1alpha2.MachineSet{ TypeMeta: metav1.TypeMeta{ Kind: "MachineSet", }, @@ -177,7 +177,7 @@ func TestGetMachineDeploymentsForMachineSet(t *testing.T) { Namespace: "test", }, } - ms2 := v1alpha1.MachineSet{ + ms2 := v1alpha2.MachineSet{ TypeMeta: metav1.TypeMeta{ Kind: "MachineSet", }, @@ -191,9 +191,9 @@ func TestGetMachineDeploymentsForMachineSet(t *testing.T) { } testCases := []struct { - machineDeploymentList v1alpha1.MachineDeploymentList - machineSet v1alpha1.MachineSet - expected []*v1alpha1.MachineDeployment + machineDeploymentList v1alpha2.MachineDeploymentList + machineSet v1alpha2.MachineSet + expected []*v1alpha2.MachineDeployment }{ { machineDeploymentList: *machineDeplopymentList, @@ -203,10 +203,10 @@ func TestGetMachineDeploymentsForMachineSet(t *testing.T) { { machineDeploymentList: *machineDeplopymentList, machineSet: ms2, - expected: []*v1alpha1.MachineDeployment{&machineDeployment}, + expected: []*v1alpha2.MachineDeployment{&machineDeployment}, }, } - v1alpha1.AddToScheme(scheme.Scheme) + v1alpha2.AddToScheme(scheme.Scheme) r := &ReconcileMachineDeployment{ Client: fake.NewFakeClient(&ms1, &ms2, machineDeplopymentList), scheme: scheme.Scheme, @@ -222,13 +222,13 @@ func TestGetMachineDeploymentsForMachineSet(t *testing.T) { } func TestGetMachineSetsForDeployment(t *testing.T) { - machineDeployment1 := v1alpha1.MachineDeployment{ + machineDeployment1 := v1alpha2.MachineDeployment{ ObjectMeta: metav1.ObjectMeta{ Name: "withMatchingOwnerRefAndLabels", Namespace: "test", UID: "UID", }, - Spec: v1alpha1.MachineDeploymentSpec{ + Spec: v1alpha2.MachineDeploymentSpec{ Selector: metav1.LabelSelector{ MatchLabels: map[string]string{ "foo": "bar", @@ -236,13 +236,13 @@ func TestGetMachineSetsForDeployment(t *testing.T) { }, }, } - machineDeployment2 := v1alpha1.MachineDeployment{ + machineDeployment2 := v1alpha2.MachineDeployment{ ObjectMeta: metav1.ObjectMeta{ Name: "withNoMatchingOwnerRef", Namespace: "test", UID: "unMatchingUID", }, - Spec: v1alpha1.MachineDeploymentSpec{ + Spec: v1alpha2.MachineDeploymentSpec{ Selector: metav1.LabelSelector{ MatchLabels: map[string]string{ "foo": "bar2", @@ -251,7 +251,7 @@ func TestGetMachineSetsForDeployment(t *testing.T) { }, } - ms1 := v1alpha1.MachineSet{ + ms1 := v1alpha2.MachineSet{ TypeMeta: metav1.TypeMeta{ Kind: "MachineSet", }, @@ -263,7 +263,7 @@ func TestGetMachineSetsForDeployment(t *testing.T) { }, }, } - ms2 := v1alpha1.MachineSet{ + ms2 := v1alpha2.MachineSet{ TypeMeta: metav1.TypeMeta{ Kind: "MachineSet", }, @@ -278,7 +278,7 @@ func TestGetMachineSetsForDeployment(t *testing.T) { }, }, } - ms3 := v1alpha1.MachineSet{ + ms3 := v1alpha2.MachineSet{ TypeMeta: metav1.TypeMeta{ Kind: "MachineSet", }, @@ -290,7 +290,7 @@ func TestGetMachineSetsForDeployment(t *testing.T) { }, }, } - ms4 := v1alpha1.MachineSet{ + ms4 := v1alpha2.MachineSet{ TypeMeta: metav1.TypeMeta{ Kind: "MachineSet", }, @@ -302,11 +302,11 @@ func TestGetMachineSetsForDeployment(t *testing.T) { }, }, } - machineSetList := &v1alpha1.MachineSetList{ + machineSetList := &v1alpha2.MachineSetList{ TypeMeta: metav1.TypeMeta{ Kind: "MachineSetList", }, - Items: []v1alpha1.MachineSet{ + Items: []v1alpha2.MachineSet{ ms1, ms2, ms3, @@ -315,20 +315,20 @@ func TestGetMachineSetsForDeployment(t *testing.T) { } testCases := []struct { - machineDeployment v1alpha1.MachineDeployment - expected []*v1alpha1.MachineSet + machineDeployment v1alpha2.MachineDeployment + expected []*v1alpha2.MachineSet }{ { machineDeployment: machineDeployment1, - expected: []*v1alpha1.MachineSet{&ms2, &ms3}, + expected: []*v1alpha2.MachineSet{&ms2, &ms3}, }, { machineDeployment: machineDeployment2, - expected: []*v1alpha1.MachineSet{&ms1}, + expected: []*v1alpha2.MachineSet{&ms1}, }, } - v1alpha1.AddToScheme(scheme.Scheme) + v1alpha2.AddToScheme(scheme.Scheme) r := &ReconcileMachineDeployment{ Client: fake.NewFakeClient(machineSetList), scheme: scheme.Scheme, diff --git a/vendor/sigs.k8s.io/cluster-api/pkg/controller/machinedeployment/machinedeployment_reconciler_suite_test.go b/vendor/sigs.k8s.io/cluster-api/pkg/controller/machinedeployment/machinedeployment_reconciler_suite_test.go index 2e16cc1a8a..765dc356b0 100644 --- a/vendor/sigs.k8s.io/cluster-api/pkg/controller/machinedeployment/machinedeployment_reconciler_suite_test.go +++ b/vendor/sigs.k8s.io/cluster-api/pkg/controller/machinedeployment/machinedeployment_reconciler_suite_test.go @@ -22,21 +22,26 @@ import ( "path/filepath" "testing" + apiextensionsv1beta1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1" "k8s.io/client-go/kubernetes/scheme" "k8s.io/client-go/rest" - "sigs.k8s.io/cluster-api/pkg/apis" + "sigs.k8s.io/cluster-api/api/v1alpha2" + "sigs.k8s.io/cluster-api/pkg/controller/external" "sigs.k8s.io/controller-runtime/pkg/envtest" "sigs.k8s.io/controller-runtime/pkg/manager" - "sigs.k8s.io/controller-runtime/pkg/reconcile" ) var cfg *rest.Config func TestMain(m *testing.M) { t := &envtest.Environment{ - CRDDirectoryPaths: []string{filepath.Join("..", "..", "..", "config", "crds")}, + CRDs: []*apiextensionsv1beta1.CustomResourceDefinition{ + external.TestGenericInfrastructureCRD, + external.TestGenericInfrastructureTemplateCRD, + }, + CRDDirectoryPaths: []string{filepath.Join("..", "..", "..", "config", "crd", "bases")}, } - apis.AddToScheme(scheme.Scheme) + v1alpha2.AddToScheme(scheme.Scheme) var err error if cfg, err = t.Start(); err != nil { @@ -48,20 +53,6 @@ func TestMain(m *testing.M) { os.Exit(code) } -// SetupTestReconcile returns a reconcile.Reconcile implementation that delegates to inner and -// writes the request to requests after Reconcile is finished. -func SetupTestReconcile(inner reconcile.Reconciler) (reconcile.Reconciler, chan reconcile.Request, chan error) { - requests := make(chan reconcile.Request) - errors := make(chan error) - fn := reconcile.Func(func(req reconcile.Request) (reconcile.Result, error) { - result, err := inner.Reconcile(req) - requests <- req - errors <- err - return result, err - }) - return fn, requests, errors -} - // StartTestManager adds recFn func StartTestManager(mgr manager.Manager, t *testing.T) chan struct{} { t.Helper() diff --git a/vendor/sigs.k8s.io/cluster-api/pkg/controller/machinedeployment/machinedeployment_reconciler_test.go b/vendor/sigs.k8s.io/cluster-api/pkg/controller/machinedeployment/machinedeployment_reconciler_test.go index a573d258b3..1052aaec32 100644 --- a/vendor/sigs.k8s.io/cluster-api/pkg/controller/machinedeployment/machinedeployment_reconciler_test.go +++ b/vendor/sigs.k8s.io/cluster-api/pkg/controller/machinedeployment/machinedeployment_reconciler_test.go @@ -17,307 +17,229 @@ limitations under the License. package machinedeployment import ( - "strconv" "testing" "time" + . "github.com/onsi/gomega" "golang.org/x/net/context" + corev1 "k8s.io/api/core/v1" apierrors "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" "k8s.io/apimachinery/pkg/types" "k8s.io/apimachinery/pkg/util/intstr" - "sigs.k8s.io/cluster-api/pkg/apis/cluster/common" - clusterv1alpha1 "sigs.k8s.io/cluster-api/pkg/apis/cluster/v1alpha1" + "k8s.io/utils/pointer" + clusterv1 "sigs.k8s.io/cluster-api/api/v1alpha2" "sigs.k8s.io/controller-runtime/pkg/client" "sigs.k8s.io/controller-runtime/pkg/manager" - "sigs.k8s.io/controller-runtime/pkg/reconcile" ) -const ( - timeout = time.Second * 5 - pollingInterval = 10 * time.Millisecond -) - -var ( - c client.Client - expectedRequest = reconcile.Request{NamespacedName: types.NamespacedName{Name: "foo", Namespace: "default"}} -) +const timeout = time.Second * 10 func TestReconcile(t *testing.T) { + RegisterTestingT(t) + ctx := context.TODO() + labels := map[string]string{"foo": "bar"} - deployment := &clusterv1alpha1.MachineDeployment{ + version := "1.10.3" + deployment := &clusterv1.MachineDeployment{ ObjectMeta: metav1.ObjectMeta{Name: "foo", Namespace: "default"}, - Spec: clusterv1alpha1.MachineDeploymentSpec{ - MinReadySeconds: int32Ptr(0), - Replicas: int32Ptr(2), - RevisionHistoryLimit: int32Ptr(0), + Spec: clusterv1.MachineDeploymentSpec{ + MinReadySeconds: pointer.Int32Ptr(0), + Replicas: pointer.Int32Ptr(2), + RevisionHistoryLimit: pointer.Int32Ptr(0), Selector: metav1.LabelSelector{MatchLabels: labels}, - Strategy: &clusterv1alpha1.MachineDeploymentStrategy{ - Type: common.RollingUpdateMachineDeploymentStrategyType, - RollingUpdate: &clusterv1alpha1.MachineRollingUpdateDeployment{ - MaxUnavailable: intstrPtr(0), - MaxSurge: intstrPtr(1), + Strategy: &clusterv1.MachineDeploymentStrategy{ + Type: clusterv1.RollingUpdateMachineDeploymentStrategyType, + RollingUpdate: &clusterv1.MachineRollingUpdateDeployment{ + MaxUnavailable: intOrStrPtr(0), + MaxSurge: intOrStrPtr(1), }, }, - Template: clusterv1alpha1.MachineTemplateSpec{ - ObjectMeta: metav1.ObjectMeta{ + Template: clusterv1.MachineTemplateSpec{ + ObjectMeta: clusterv1.ObjectMeta{ Labels: labels, }, - Spec: clusterv1alpha1.MachineSpec{ - Versions: clusterv1alpha1.MachineVersionInfo{Kubelet: "1.10.3"}, + Spec: clusterv1.MachineSpec{ + Version: &version, + InfrastructureRef: corev1.ObjectReference{ + APIVersion: "infrastructure.cluster.x-k8s.io/v1alpha2", + Kind: "InfrastructureMachineTemplate", + Name: "foo-template", + }, }, }, }, } + msListOpts := []client.ListOption{ + client.InNamespace("default"), + client.MatchingLabels(labels), + } // Setup the Manager and Controller. Wrap the Controller Reconcile function so it writes each request to a // channel when it is finished. - mgr, err := manager.New(cfg, manager.Options{}) - if err != nil { - t.Errorf("error creating new manager: %v", err) - } - c = mgr.GetClient() + mgr, err := manager.New(cfg, manager.Options{MetricsBindAddress: "0"}) + Expect(err).To(BeNil()) + c := mgr.GetClient() r := newReconciler(mgr) - recFn, requests, errors := SetupTestReconcile(r) - if err := add(mgr, recFn, r.MachineSetToDeployments); err != nil { - t.Errorf("error adding controller to manager: %v", err) - } + Expect(add(mgr, r, r.MachineSetToDeployments)).To(BeNil()) defer close(StartTestManager(mgr, t)) - // Create the MachineDeployment object and expect Reconcile to be called. - if err := c.Create(context.TODO(), deployment); err != nil { - t.Errorf("error creating instance: %v", err) + // Create infrastructure template resource. + infraResource := map[string]interface{}{ + "kind": "InfrastructureMachine", + "apiVersion": "infrastructure.cluster.x-k8s.io/v1alpha2", + "metadata": map[string]interface{}{}, + "spec": map[string]interface{}{ + "size": "3xlarge", + }, } - defer c.Delete(context.TODO(), deployment) - expectReconcile(t, requests, errors) + infraTmpl := &unstructured.Unstructured{ + Object: map[string]interface{}{ + "spec": map[string]interface{}{ + "template": infraResource, + }, + }, + } + infraTmpl.SetKind("InfrastructureMachineTemplate") + infraTmpl.SetAPIVersion("infrastructure.cluster.x-k8s.io/v1alpha2") + infraTmpl.SetName("foo-template") + infraTmpl.SetNamespace("default") + Expect(c.Create(ctx, infraTmpl)).To(BeNil()) + + // Create the MachineDeployment object and expect Reconcile to be called. + Expect(c.Create(ctx, deployment)).To(BeNil()) + defer c.Delete(ctx, deployment) // Verify that the MachineSet was created. - machineSets := &clusterv1alpha1.MachineSetList{} - expectInt(t, 1, func(ctx context.Context) int { - if err := c.List(ctx, &client.ListOptions{}, machineSets); err != nil { + machineSets := &clusterv1.MachineSetList{} + Eventually(func() int { + if err := c.List(ctx, machineSets, msListOpts...); err != nil { return -1 } return len(machineSets.Items) - }) - - ms := machineSets.Items[0] - if r := *ms.Spec.Replicas; r != 2 { - t.Errorf("replicas was %d not 2", r) - } - if k := ms.Spec.Template.Spec.Versions.Kubelet; k != "1.10.3" { - t.Errorf("kubelet was %q not '1.10.3'", k) - } - - // Delete a MachineSet and expect Reconcile to be called to replace it. - if err := c.Delete(context.TODO(), &ms); err != nil { - t.Errorf("error deleting machineset: %v", err) - } - expectReconcile(t, requests, errors) - expectInt(t, 1, func(ctx context.Context) int { - if err := c.List(ctx, &client.ListOptions{}, machineSets); err != nil { - return -1 + }, timeout).Should(BeEquivalentTo(1)) + + firstMachineSet := machineSets.Items[0] + Expect(*firstMachineSet.Spec.Replicas).To(BeEquivalentTo(2)) + Expect(*firstMachineSet.Spec.Template.Spec.Version).To(BeEquivalentTo("1.10.3")) + + // + // Delete firstMachineSet and expect Reconcile to be called to replace it. + // + Expect(c.Delete(ctx, &firstMachineSet)).To(BeNil()) + Eventually(func() bool { + if err := c.List(ctx, machineSets, msListOpts...); err != nil { + return false } - return len(machineSets.Items) - }) - - // Scale a MachineDeployment and expect Reconcile to be called - if err := updateMachineDeployment(c, deployment, func(d *clusterv1alpha1.MachineDeployment) { d.Spec.Replicas = int32Ptr(5) }); err != nil { + for _, ms := range machineSets.Items { + if ms.UID == firstMachineSet.UID { + return false + } + } + return len(machineSets.Items) > 0 + }, timeout).Should(BeTrue()) + + // + // Scale the MachineDeployment and expect Reconcile to be called. + // + secondMachineSet := machineSets.Items[0] + if err := updateMachineDeployment(c, deployment, func(d *clusterv1.MachineDeployment) { d.Spec.Replicas = pointer.Int32Ptr(5) }); err != nil { t.Errorf("error scaling machinedeployment: %v", err) } - if err := c.Update(context.TODO(), deployment); err != nil { - t.Errorf("error updating instance: %v", err) - } - expectReconcile(t, requests, errors) - expectInt(t, 5, func(ctx context.Context) int { - if err := c.List(ctx, &client.ListOptions{}, machineSets); err != nil { + Eventually(func() int { + key := client.ObjectKey{Name: secondMachineSet.Name, Namespace: secondMachineSet.Namespace} + if err := c.Get(ctx, key, &secondMachineSet); err != nil { return -1 } - if len(machineSets.Items) != 1 { - return -1 - } - return int(*machineSets.Items[0].Spec.Replicas) - }) + return int(*secondMachineSet.Spec.Replicas) + }, timeout).Should(BeEquivalentTo(5)) - // Update a MachineDeployment, expect Reconcile to be called and a new MachineSet to appear - if err := updateMachineDeployment(c, deployment, func(d *clusterv1alpha1.MachineDeployment) { d.Spec.Template.Labels["updated"] = "true" }); err != nil { + // + // Update a MachineDeployment, expect Reconcile to be called and a new MachineSet to appear. + // + if err := updateMachineDeployment(c, deployment, func(d *clusterv1.MachineDeployment) { d.Spec.Template.Labels["updated"] = "true" }); err != nil { t.Errorf("error scaling machinedeployment: %v", err) } - if err := c.Update(context.TODO(), deployment); err != nil { - t.Errorf("error updating instance: %v", err) - } - expectReconcile(t, requests, errors) - expectInt(t, 2, func(ctx context.Context) int { - if err := c.List(ctx, &client.ListOptions{}, machineSets); err != nil { + Eventually(func() int { + if err := c.List(ctx, machineSets, msListOpts...); err != nil { return -1 } return len(machineSets.Items) - }) - - // Wait for the new MachineSet to get scaled up and set .Status.Replicas and .Status.AvailableReplicas - // at each step - var newMachineSet, oldMachineSet *clusterv1alpha1.MachineSet - resourceVersion0, err0 := strconv.Atoi(machineSets.Items[0].ResourceVersion) - resourceVersion1, err1 := strconv.Atoi(machineSets.Items[1].ResourceVersion) - if err0 != nil { - t.Fatalf("Unable to convert MS %q ResourceVersion to a number: %v", machineSets.Items[0].Name, err0) - } - if err1 != nil { - t.Fatalf("Unable to convert MS %q ResourceVersion to a number: %v", machineSets.Items[1].Name, err1) - } - - if resourceVersion0 > resourceVersion1 { - newMachineSet = &machineSets.Items[0] - oldMachineSet = &machineSets.Items[1] - } else { - newMachineSet = &machineSets.Items[1] - oldMachineSet = &machineSets.Items[0] + }, timeout).Should(BeEquivalentTo(2)) + + // + // Wait for the new MachineSet to get scaled up and set .Status.Replicas and .Status.AvailableReplicas at each step. + // + var newMachineSet, oldMachineSet *clusterv1.MachineSet + for _, ms := range machineSets.Items { + if ms.UID == secondMachineSet.UID { + oldMachineSet = ms.DeepCopy() + } else { + newMachineSet = ms.DeepCopy() + } } - // Start off by setting .Status.Replicas and .Status.AvailableReplicas of the old MachineSet + // Start off by setting .Status.Replicas and .Status.AvailableReplicas of the old MachineSet. + oldMachineSetPatch := client.MergeFrom(oldMachineSet.DeepCopy()) oldMachineSet.Status.AvailableReplicas = *oldMachineSet.Spec.Replicas oldMachineSet.Status.Replicas = *oldMachineSet.Spec.Replicas - if err := c.Status().Update(context.TODO(), oldMachineSet); err != nil { - t.Errorf("error updating machineset: %v", err) - } - expectReconcile(t, requests, errors) + Expect(c.Status().Patch(ctx, oldMachineSet, oldMachineSetPatch)).To(BeNil()) - // Iterate over the scalesteps + // Iterate over the scalesteps. step := 1 for step < 5 { // Wait for newMachineSet to be scaled up - expectTrue(t, func(ctx context.Context) bool { - if err = c.Get(ctx, types.NamespacedName{ - Namespace: newMachineSet.Namespace, Name: newMachineSet.Name}, newMachineSet); err != nil { + Eventually(func() bool { + key := types.NamespacedName{Namespace: newMachineSet.Namespace, Name: newMachineSet.Name} + if err := c.Get(ctx, key, newMachineSet); err != nil { return false } currentReplicas := int(*newMachineSet.Spec.Replicas) step = currentReplicas return currentReplicas >= step && currentReplicas < 6 - }) + }, timeout).Should(BeTrue()) + newMachineSetPatch := client.MergeFrom(newMachineSet.DeepCopy()) newMachineSet.Status.Replicas = *newMachineSet.Spec.Replicas newMachineSet.Status.AvailableReplicas = *newMachineSet.Spec.Replicas - if err := c.Status().Update(context.TODO(), newMachineSet); err != nil { - t.Logf("error updating machineset: %v", err) - } - expectReconcile(t, requests, errors) + Expect(c.Status().Patch(ctx, newMachineSet, newMachineSetPatch)).To(BeNil()) // Wait for oldMachineSet to be scaled down updateOldMachineSet := true - expectTrue(t, func(ctx context.Context) bool { - if err := c.Get(ctx, types.NamespacedName{Namespace: oldMachineSet.Namespace, Name: oldMachineSet.Name}, oldMachineSet); err != nil { + Eventually(func() bool { + key := types.NamespacedName{Namespace: oldMachineSet.Namespace, Name: oldMachineSet.Name} + if err := c.Get(ctx, key, oldMachineSet); err != nil { if apierrors.IsNotFound(err) { updateOldMachineSet = false return true } return false } - return int(*oldMachineSet.Spec.Replicas) <= 5-step - }) + }, timeout).Should(BeTrue()) if updateOldMachineSet { + oldMachineSetPatch := client.MergeFrom(oldMachineSet.DeepCopy()) oldMachineSet.Status.Replicas = *oldMachineSet.Spec.Replicas oldMachineSet.Status.AvailableReplicas = *oldMachineSet.Spec.Replicas oldMachineSet.Status.ObservedGeneration = oldMachineSet.Generation - if err := c.Status().Update(context.TODO(), oldMachineSet); err != nil { - t.Logf("error updating machineset: %v", err) - } - expectReconcile(t, requests, errors) + Expect(c.Status().Patch(ctx, oldMachineSet, oldMachineSetPatch)).To(BeNil()) } } - // Expect the old MachineSet to be removed - expectInt(t, 1, func(ctx context.Context) int { - if err := c.List(ctx, &client.ListOptions{}, machineSets); err != nil { + Eventually(func() int { + if err := c.List(ctx, machineSets, msListOpts...); err != nil { return -1 } return len(machineSets.Items) - }) + }, timeout).Should(BeEquivalentTo(1)) } -func int32Ptr(i int32) *int32 { - return &i -} - -func intstrPtr(i int32) *intstr.IntOrString { +func intOrStrPtr(i int32) *intstr.IntOrString { // FromInt takes an int that must not be greater than int32... intstr := intstr.FromInt(int(i)) return &intstr } - -func expectReconcile(t *testing.T, requests chan reconcile.Request, errors chan error) { - t.Helper() - - ctx, cancel := context.WithTimeout(context.TODO(), timeout) - defer cancel() - -LOOP: - for range time.Tick(pollingInterval) { - select { - case recv := <-requests: - if recv == expectedRequest { - break LOOP - } - case <-ctx.Done(): - t.Fatalf("timed out waiting reconcile request") - } - } - - for range time.Tick(pollingInterval) { - select { - case err := <-errors: - if err == nil { - return - } - case <-ctx.Done(): - t.Fatalf("timed out waiting reconcile error") - } - } -} - -func expectInt(t *testing.T, expect int, fn func(context.Context) int) { - t.Helper() - - ctx, cancel := context.WithTimeout(context.TODO(), timeout) - defer cancel() - - for range time.Tick(pollingInterval) { - intCh := make(chan int) - go func() { intCh <- fn(ctx) }() - - select { - case n := <-intCh: - if n == expect { - return - } - case <-ctx.Done(): - t.Fatalf("timed out waiting for value: %d", expect) - return - } - } -} - -func expectTrue(t *testing.T, fn func(context.Context) bool) { - t.Helper() - - ctx, cancel := context.WithTimeout(context.TODO(), timeout) - defer cancel() - - for range time.Tick(pollingInterval) { - boolCh := make(chan bool) - go func() { boolCh <- fn(ctx) }() - - select { - case n := <-boolCh: - if n { - return - } - case <-ctx.Done(): - t.Fatal("timed out waiting for condition") - return - } - } -} diff --git a/vendor/sigs.k8s.io/cluster-api/pkg/controller/machinedeployment/rolling.go b/vendor/sigs.k8s.io/cluster-api/pkg/controller/machinedeployment/rolling.go index a12c13bf00..716364d026 100644 --- a/vendor/sigs.k8s.io/cluster-api/pkg/controller/machinedeployment/rolling.go +++ b/vendor/sigs.k8s.io/cluster-api/pkg/controller/machinedeployment/rolling.go @@ -21,14 +21,14 @@ import ( "github.com/pkg/errors" "k8s.io/apimachinery/pkg/types" - "k8s.io/client-go/util/integer" "k8s.io/klog" - "sigs.k8s.io/cluster-api/pkg/apis/cluster/v1alpha1" + "k8s.io/utils/integer" + "sigs.k8s.io/cluster-api/api/v1alpha2" dutil "sigs.k8s.io/cluster-api/pkg/controller/machinedeployment/util" ) // rolloutRolling implements the logic for rolling a new machine set. -func (r *ReconcileMachineDeployment) rolloutRolling(d *v1alpha1.MachineDeployment, msList []*v1alpha1.MachineSet, machineMap map[types.UID]*v1alpha1.MachineList) error { +func (r *ReconcileMachineDeployment) rolloutRolling(d *v1alpha2.MachineDeployment, msList []*v1alpha2.MachineSet, machineMap map[types.UID]*v1alpha2.MachineList) error { newMS, oldMSs, err := r.getAllMachineSetsAndSyncRevision(d, msList, machineMap, true) if err != nil { return err @@ -70,7 +70,7 @@ func (r *ReconcileMachineDeployment) rolloutRolling(d *v1alpha1.MachineDeploymen return nil } -func (r *ReconcileMachineDeployment) reconcileNewMachineSet(allMSs []*v1alpha1.MachineSet, newMS *v1alpha1.MachineSet, deployment *v1alpha1.MachineDeployment) error { +func (r *ReconcileMachineDeployment) reconcileNewMachineSet(allMSs []*v1alpha2.MachineSet, newMS *v1alpha2.MachineSet, deployment *v1alpha2.MachineDeployment) error { if deployment.Spec.Replicas == nil { return errors.Errorf("spec replicas for deployment set %v is nil, this is unexpected", deployment.Name) } @@ -98,13 +98,15 @@ func (r *ReconcileMachineDeployment) reconcileNewMachineSet(allMSs []*v1alpha1.M return err } -func (r *ReconcileMachineDeployment) reconcileOldMachineSets(allMSs []*v1alpha1.MachineSet, oldMSs []*v1alpha1.MachineSet, newMS *v1alpha1.MachineSet, deployment *v1alpha1.MachineDeployment) error { +func (r *ReconcileMachineDeployment) reconcileOldMachineSets(allMSs []*v1alpha2.MachineSet, oldMSs []*v1alpha2.MachineSet, newMS *v1alpha2.MachineSet, deployment *v1alpha2.MachineDeployment) error { if deployment.Spec.Replicas == nil { - return errors.Errorf("spec replicas for deployment set %v is nil, this is unexpected", deployment.Name) + return errors.Errorf("spec replicas for MachineDeployment %q/%q is nil, this is unexpected", + deployment.Namespace, deployment.Name) } if newMS.Spec.Replicas == nil { - return errors.Errorf("spec replicas for machine set %v is nil, this is unexpected", newMS.Name) + return errors.Errorf("spec replicas for MachineSet %q/%q is nil, this is unexpected", + newMS.Namespace, newMS.Name) } oldMachinesCount := dutil.GetReplicaCountForMachineSets(oldMSs) @@ -175,7 +177,7 @@ func (r *ReconcileMachineDeployment) reconcileOldMachineSets(allMSs []*v1alpha1. } // cleanupUnhealthyReplicas will scale down old machine sets with unhealthy replicas, so that all unhealthy replicas will be deleted. -func (r *ReconcileMachineDeployment) cleanupUnhealthyReplicas(oldMSs []*v1alpha1.MachineSet, deployment *v1alpha1.MachineDeployment, maxCleanupCount int32) ([]*v1alpha1.MachineSet, int32, error) { +func (r *ReconcileMachineDeployment) cleanupUnhealthyReplicas(oldMSs []*v1alpha2.MachineSet, deployment *v1alpha2.MachineDeployment, maxCleanupCount int32) ([]*v1alpha2.MachineSet, int32, error) { sort.Sort(dutil.MachineSetsByCreationTimestamp(oldMSs)) // Safely scale down all old machine sets with unhealthy replicas. Replica set will sort the machines in the order @@ -226,7 +228,7 @@ func (r *ReconcileMachineDeployment) cleanupUnhealthyReplicas(oldMSs []*v1alpha1 // scaleDownOldMachineSetsForRollingUpdate scales down old machine sets when deployment strategy is "RollingUpdate". // Need check maxUnavailable to ensure availability -func (r *ReconcileMachineDeployment) scaleDownOldMachineSetsForRollingUpdate(allMSs []*v1alpha1.MachineSet, oldMSs []*v1alpha1.MachineSet, deployment *v1alpha1.MachineDeployment) (int32, error) { +func (r *ReconcileMachineDeployment) scaleDownOldMachineSetsForRollingUpdate(allMSs []*v1alpha2.MachineSet, oldMSs []*v1alpha2.MachineSet, deployment *v1alpha2.MachineDeployment) (int32, error) { if deployment.Spec.Replicas == nil { return 0, errors.Errorf("spec replicas for deployment %v is nil, this is unexpected", deployment.Name) } diff --git a/vendor/sigs.k8s.io/cluster-api/pkg/controller/machinedeployment/sync.go b/vendor/sigs.k8s.io/cluster-api/pkg/controller/machinedeployment/sync.go index 6fe0be7760..b5092baa8d 100644 --- a/vendor/sigs.k8s.io/cluster-api/pkg/controller/machinedeployment/sync.go +++ b/vendor/sigs.k8s.io/cluster-api/pkg/controller/machinedeployment/sync.go @@ -33,14 +33,14 @@ import ( "k8s.io/apimachinery/pkg/util/sets" "k8s.io/client-go/util/retry" "k8s.io/klog" - clusterv1alpha1 "sigs.k8s.io/cluster-api/pkg/apis/cluster/v1alpha1" + clusterv1 "sigs.k8s.io/cluster-api/api/v1alpha2" dutil "sigs.k8s.io/cluster-api/pkg/controller/machinedeployment/util" "sigs.k8s.io/controller-runtime/pkg/client" ) // sync is responsible for reconciling deployments on scaling events or when they // are paused. -func (r *ReconcileMachineDeployment) sync(d *clusterv1alpha1.MachineDeployment, msList []*clusterv1alpha1.MachineSet, machineMap map[types.UID]*clusterv1alpha1.MachineList) error { +func (r *ReconcileMachineDeployment) sync(d *clusterv1.MachineDeployment, msList []*clusterv1.MachineSet, machineMap map[types.UID]*clusterv1.MachineList) error { newMS, oldMSs, err := r.getAllMachineSetsAndSyncRevision(d, msList, machineMap, false) if err != nil { return err @@ -71,7 +71,7 @@ func (r *ReconcileMachineDeployment) sync(d *clusterv1alpha1.MachineDeployment, // // Note that currently the deployment controller is using caches to avoid querying the server for reads. // This may lead to stale reads of machine sets, thus incorrect deployment status. -func (r *ReconcileMachineDeployment) getAllMachineSetsAndSyncRevision(d *clusterv1alpha1.MachineDeployment, msList []*clusterv1alpha1.MachineSet, machineMap map[types.UID]*clusterv1alpha1.MachineList, createIfNotExisted bool) (*clusterv1alpha1.MachineSet, []*clusterv1alpha1.MachineSet, error) { +func (r *ReconcileMachineDeployment) getAllMachineSetsAndSyncRevision(d *clusterv1.MachineDeployment, msList []*clusterv1.MachineSet, machineMap map[types.UID]*clusterv1.MachineList, createIfNotExisted bool) (*clusterv1.MachineSet, []*clusterv1.MachineSet, error) { _, allOldMSs := dutil.FindOldMachineSets(d, msList) // Get new machine set with the updated revision number @@ -88,7 +88,7 @@ func (r *ReconcileMachineDeployment) getAllMachineSetsAndSyncRevision(d *cluster // 2. If there's existing new MS, update its revision number if it's smaller than (maxOldRevision + 1), where maxOldRevision is the max revision number among all old MSes. // 3. If there's no existing new MS and createIfNotExisted is true, create one with appropriate revision number (maxOldRevision + 1) and replicas. // Note that the machine-template-hash will be added to adopted MSes and machines. -func (r *ReconcileMachineDeployment) getNewMachineSet(d *clusterv1alpha1.MachineDeployment, msList, oldMSs []*clusterv1alpha1.MachineSet, createIfNotExisted bool) (*clusterv1alpha1.MachineSet, error) { +func (r *ReconcileMachineDeployment) getNewMachineSet(d *clusterv1.MachineDeployment, msList, oldMSs []*clusterv1.MachineSet, createIfNotExisted bool) (*clusterv1.MachineSet, error) { existingNewMS := dutil.FindNewMachineSet(d, msList) // Calculate the max revision number among all old MSes @@ -114,7 +114,7 @@ func (r *ReconcileMachineDeployment) getNewMachineSet(d *clusterv1alpha1.Machine } // Apply revision annotation from existingNewMS if it is missing from the deployment. - err := r.updateMachineDeployment(d, func(innerDeployment *clusterv1alpha1.MachineDeployment) { + err := r.updateMachineDeployment(d, func(innerDeployment *clusterv1.MachineDeployment) { dutil.SetDeploymentRevision(d, msCopy.Annotations[dutil.RevisionAnnotation]) }) return msCopy, err @@ -140,7 +140,7 @@ func (r *ReconcileMachineDeployment) getNewMachineSet(d *clusterv1alpha1.Machine } // Create new MachineSet - newMS := clusterv1alpha1.MachineSet{ + newMS := clusterv1.MachineSet{ ObjectMeta: metav1.ObjectMeta{ // Make the name deterministic, to ensure idempotence Name: d.Name + "-" + apirand.SafeEncodeString(machineTemplateSpecHash), @@ -148,7 +148,7 @@ func (r *ReconcileMachineDeployment) getNewMachineSet(d *clusterv1alpha1.Machine Labels: newMSTemplate.Labels, OwnerReferences: []metav1.OwnerReference{*metav1.NewControllerRef(d, controllerKind)}, }, - Spec: clusterv1alpha1.MachineSetSpec{ + Spec: clusterv1.MachineSetSpec{ Replicas: new(int32), MinReadySeconds: minReadySeconds, Selector: *newMSSelector, @@ -182,7 +182,7 @@ func (r *ReconcileMachineDeployment) getNewMachineSet(d *clusterv1alpha1.Machine case apierrors.IsAlreadyExists(err): alreadyExists = true - ms := &clusterv1alpha1.MachineSet{} + ms := &clusterv1.MachineSet{} msErr := r.Get(context.Background(), client.ObjectKey{Namespace: newMS.Namespace, Name: newMS.Name}, ms) if msErr != nil { return nil, msErr @@ -210,7 +210,7 @@ func (r *ReconcileMachineDeployment) getNewMachineSet(d *clusterv1alpha1.Machine r.recorder.Eventf(d, corev1.EventTypeNormal, "SuccessfulCreate", "Created MachineSet %q", newMS.Name) } - err = r.updateMachineDeployment(d, func(innerDeployment *clusterv1alpha1.MachineDeployment) { + err = r.updateMachineDeployment(d, func(innerDeployment *clusterv1.MachineDeployment) { dutil.SetDeploymentRevision(d, newRevision) }) @@ -222,7 +222,7 @@ func (r *ReconcileMachineDeployment) getNewMachineSet(d *clusterv1alpha1.Machine // have the effect of hastening the rollout progress, which could produce a higher proportion of unavailable // replicas in the event of a problem with the rolled out template. Should run only on scaling events or // when a deployment is paused and not during the normal rollout process. -func (r *ReconcileMachineDeployment) scale(deployment *clusterv1alpha1.MachineDeployment, newMS *clusterv1alpha1.MachineSet, oldMSs []*clusterv1alpha1.MachineSet) error { +func (r *ReconcileMachineDeployment) scale(deployment *clusterv1.MachineDeployment, newMS *clusterv1.MachineSet, oldMSs []*clusterv1.MachineSet) error { if deployment.Spec.Replicas == nil { return errors.Errorf("spec replicas for deployment %v is nil, this is unexpected", deployment.Name) } @@ -333,18 +333,19 @@ func (r *ReconcileMachineDeployment) scale(deployment *clusterv1alpha1.MachineDe } // syncDeploymentStatus checks if the status is up-to-date and sync it if necessary -func (r *ReconcileMachineDeployment) syncDeploymentStatus(allMSs []*clusterv1alpha1.MachineSet, newMS *clusterv1alpha1.MachineSet, d *clusterv1alpha1.MachineDeployment) error { +func (r *ReconcileMachineDeployment) syncDeploymentStatus(allMSs []*clusterv1.MachineSet, newMS *clusterv1.MachineSet, d *clusterv1.MachineDeployment) error { newStatus := calculateStatus(allMSs, newMS, d) if reflect.DeepEqual(d.Status, newStatus) { return nil } + patch := client.MergeFrom(d.DeepCopy()) d.Status = newStatus - return r.Status().Update(context.Background(), d) + return r.Status().Patch(context.Background(), d, patch) } // calculateStatus calculates the latest status for the provided deployment by looking into the provided machine sets. -func calculateStatus(allMSs []*clusterv1alpha1.MachineSet, newMS *clusterv1alpha1.MachineSet, deployment *clusterv1alpha1.MachineDeployment) clusterv1alpha1.MachineDeploymentStatus { +func calculateStatus(allMSs []*clusterv1.MachineSet, newMS *clusterv1.MachineSet, deployment *clusterv1.MachineDeployment) clusterv1.MachineDeploymentStatus { availableReplicas := dutil.GetAvailableReplicaCountForMachineSets(allMSs) totalReplicas := dutil.GetReplicaCountForMachineSets(allMSs) unavailableReplicas := totalReplicas - availableReplicas @@ -355,11 +356,11 @@ func calculateStatus(allMSs []*clusterv1alpha1.MachineSet, newMS *clusterv1alpha unavailableReplicas = 0 } - status := clusterv1alpha1.MachineDeploymentStatus{ + status := clusterv1.MachineDeploymentStatus{ // TODO: Ensure that if we start retrying status updates, we won't pick up a new Generation value. ObservedGeneration: deployment.Generation, Replicas: dutil.GetActualReplicaCountForMachineSets(allMSs), - UpdatedReplicas: dutil.GetActualReplicaCountForMachineSets([]*clusterv1alpha1.MachineSet{newMS}), + UpdatedReplicas: dutil.GetActualReplicaCountForMachineSets([]*clusterv1.MachineSet{newMS}), ReadyReplicas: dutil.GetReadyReplicaCountForMachineSets(allMSs), AvailableReplicas: availableReplicas, UnavailableReplicas: unavailableReplicas, @@ -368,7 +369,7 @@ func calculateStatus(allMSs []*clusterv1alpha1.MachineSet, newMS *clusterv1alpha return status } -func (r *ReconcileMachineDeployment) scaleMachineSet(ms *clusterv1alpha1.MachineSet, newScale int32, deployment *clusterv1alpha1.MachineDeployment) (bool, error) { +func (r *ReconcileMachineDeployment) scaleMachineSet(ms *clusterv1.MachineSet, newScale int32, deployment *clusterv1.MachineDeployment) (bool, error) { if ms.Spec.Replicas == nil { return false, errors.Errorf("spec replicas for machine set %v is nil, this is unexpected", ms.Name) } @@ -388,7 +389,7 @@ func (r *ReconcileMachineDeployment) scaleMachineSet(ms *clusterv1alpha1.Machine return r.scaleMachineSetOperation(ms, newScale, deployment, scalingOperation) } -func (r *ReconcileMachineDeployment) scaleMachineSetOperation(ms *clusterv1alpha1.MachineSet, newScale int32, deployment *clusterv1alpha1.MachineDeployment, scaleOperation string) (bool, error) { +func (r *ReconcileMachineDeployment) scaleMachineSetOperation(ms *clusterv1.MachineSet, newScale int32, deployment *clusterv1.MachineDeployment, scaleOperation string) (bool, error) { if ms.Spec.Replicas == nil { return false, errors.Errorf("spec replicas for machine set %v is nil, this is unexpected", ms.Name) } @@ -407,10 +408,12 @@ func (r *ReconcileMachineDeployment) scaleMachineSetOperation(ms *clusterv1alpha ) if sizeNeedsUpdate || annotationsNeedUpdate { + patch := client.MergeFrom(ms.DeepCopy()) + *(ms.Spec.Replicas) = newScale dutil.SetReplicasAnnotations(ms, *(deployment.Spec.Replicas), *(deployment.Spec.Replicas)+dutil.MaxSurge(*deployment)) - err = r.Update(context.Background(), ms) + err = r.Patch(context.Background(), ms, patch) if err != nil { r.recorder.Eventf(deployment, corev1.EventTypeWarning, "FailedScale", "Failed to scale MachineSet %q: %v", ms.Name, err) } else if sizeNeedsUpdate { @@ -425,13 +428,13 @@ func (r *ReconcileMachineDeployment) scaleMachineSetOperation(ms *clusterv1alpha // cleanupDeployment is responsible for cleaning up a deployment i.e. retains all but the latest N old machine sets // where N=d.Spec.RevisionHistoryLimit. Old machine sets are older versions of the machinetemplate of a deployment kept // around by default 1) for historical reasons and 2) for the ability to rollback a deployment. -func (r *ReconcileMachineDeployment) cleanupDeployment(oldMSs []*clusterv1alpha1.MachineSet, deployment *clusterv1alpha1.MachineDeployment) error { +func (r *ReconcileMachineDeployment) cleanupDeployment(oldMSs []*clusterv1.MachineSet, deployment *clusterv1.MachineDeployment) error { if deployment.Spec.RevisionHistoryLimit == nil { return nil } // Avoid deleting machine set with deletion timestamp set - aliveFilter := func(ms *clusterv1alpha1.MachineSet) bool { + aliveFilter := func(ms *clusterv1.MachineSet) bool { return ms != nil && ms.ObjectMeta.DeletionTimestamp == nil } @@ -474,7 +477,7 @@ func (r *ReconcileMachineDeployment) cleanupDeployment(oldMSs []*clusterv1alpha1 // // msList should come from getMachineSetsForDeployment(d). // machineMap should come from getMachineMapForDeployment(d, msList). -func (r *ReconcileMachineDeployment) isScalingEvent(d *clusterv1alpha1.MachineDeployment, msList []*clusterv1alpha1.MachineSet, machineMap map[types.UID]*clusterv1alpha1.MachineList) (bool, error) { +func (r *ReconcileMachineDeployment) isScalingEvent(d *clusterv1.MachineDeployment, msList []*clusterv1.MachineSet, machineMap map[types.UID]*clusterv1.MachineList) (bool, error) { if d.Spec.Replicas == nil { return false, errors.Errorf("spec replicas for deployment %v is nil, this is unexpected", d.Name) } @@ -495,12 +498,12 @@ func (r *ReconcileMachineDeployment) isScalingEvent(d *clusterv1alpha1.MachineDe return false, nil } -func (r *ReconcileMachineDeployment) updateMachineDeployment(d *clusterv1alpha1.MachineDeployment, modify func(*clusterv1alpha1.MachineDeployment)) error { +func (r *ReconcileMachineDeployment) updateMachineDeployment(d *clusterv1.MachineDeployment, modify func(*clusterv1.MachineDeployment)) error { return updateMachineDeployment(r.Client, d, modify) } // We have this as standalone variant to be able to use it from the tests -func updateMachineDeployment(c client.Client, d *clusterv1alpha1.MachineDeployment, modify func(*clusterv1alpha1.MachineDeployment)) error { +func updateMachineDeployment(c client.Client, d *clusterv1.MachineDeployment, modify func(*clusterv1.MachineDeployment)) error { dCopy := d.DeepCopy() modify(dCopy) if equality.Semantic.DeepEqual(dCopy, d) { @@ -511,11 +514,13 @@ func updateMachineDeployment(c client.Client, d *clusterv1alpha1.MachineDeployme if err := c.Get(context.Background(), types.NamespacedName{Namespace: d.Namespace, Name: d.Name}, d); err != nil { return err } + // Save patch. + patch := client.MergeFrom(d.DeepCopy()) // Apply defaults. - clusterv1alpha1.PopulateDefaultsMachineDeployment(d) + clusterv1.PopulateDefaultsMachineDeployment(d) // Apply modifications. modify(d) - // Update the MachineDeployment. - return c.Update(context.Background(), d) + // Patch the MachineDeployment. + return c.Patch(context.Background(), d, patch) }) } diff --git a/vendor/sigs.k8s.io/cluster-api/pkg/controller/machinedeployment/util/BUILD.bazel b/vendor/sigs.k8s.io/cluster-api/pkg/controller/machinedeployment/util/BUILD.bazel index 28abccce0a..d435210da4 100644 --- a/vendor/sigs.k8s.io/cluster-api/pkg/controller/machinedeployment/util/BUILD.bazel +++ b/vendor/sigs.k8s.io/cluster-api/pkg/controller/machinedeployment/util/BUILD.bazel @@ -6,8 +6,7 @@ go_library( importpath = "sigs.k8s.io/cluster-api/pkg/controller/machinedeployment/util", visibility = ["//visibility:public"], deps = [ - "//pkg/apis/cluster/common:go_default_library", - "//pkg/apis/cluster/v1alpha1:go_default_library", + "//api/v1alpha2:go_default_library", "//vendor/github.com/davecgh/go-spew/spew:go_default_library", "//vendor/k8s.io/api/core/v1:go_default_library", "//vendor/k8s.io/apimachinery/pkg/api/equality:go_default_library", @@ -15,8 +14,8 @@ go_library( "//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", "//vendor/k8s.io/apimachinery/pkg/runtime:go_default_library", "//vendor/k8s.io/apimachinery/pkg/util/intstr:go_default_library", - "//vendor/k8s.io/client-go/util/integer:go_default_library", "//vendor/k8s.io/klog:go_default_library", + "//vendor/k8s.io/utils/integer:go_default_library", ], ) @@ -25,15 +24,10 @@ go_test( srcs = ["util_test.go"], embed = [":go_default_library"], deps = [ - "//pkg/apis/cluster/common:go_default_library", - "//pkg/apis/cluster/v1alpha1:go_default_library", - "//pkg/client/clientset_generated/clientset/fake:go_default_library", - "//vendor/github.com/pkg/errors:go_default_library", + "//api/v1alpha2:go_default_library", "//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/runtime:go_default_library", "//vendor/k8s.io/apimachinery/pkg/types:go_default_library", "//vendor/k8s.io/apimachinery/pkg/util/intstr:go_default_library", "//vendor/k8s.io/apiserver/pkg/storage/names:go_default_library", - "//vendor/k8s.io/client-go/testing:go_default_library", ], ) diff --git a/vendor/sigs.k8s.io/cluster-api/pkg/controller/machinedeployment/util/util.go b/vendor/sigs.k8s.io/cluster-api/pkg/controller/machinedeployment/util/util.go index 6cbc825e67..730b076c37 100644 --- a/vendor/sigs.k8s.io/cluster-api/pkg/controller/machinedeployment/util/util.go +++ b/vendor/sigs.k8s.io/cluster-api/pkg/controller/machinedeployment/util/util.go @@ -31,10 +31,9 @@ import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/runtime" intstrutil "k8s.io/apimachinery/pkg/util/intstr" - "k8s.io/client-go/util/integer" "k8s.io/klog" - "sigs.k8s.io/cluster-api/pkg/apis/cluster/common" - "sigs.k8s.io/cluster-api/pkg/apis/cluster/v1alpha1" + "k8s.io/utils/integer" + "sigs.k8s.io/cluster-api/api/v1alpha2" ) const ( @@ -72,7 +71,7 @@ const ( ) // MachineSetsByCreationTimestamp sorts a list of MachineSet by creation timestamp, using their names as a tie breaker. -type MachineSetsByCreationTimestamp []*v1alpha1.MachineSet +type MachineSetsByCreationTimestamp []*v1alpha2.MachineSet func (o MachineSetsByCreationTimestamp) Len() int { return len(o) } func (o MachineSetsByCreationTimestamp) Swap(i, j int) { o[i], o[j] = o[j], o[i] } @@ -85,7 +84,7 @@ func (o MachineSetsByCreationTimestamp) Less(i, j int) bool { // MachineSetsBySizeOlder sorts a list of MachineSet by size in descending order, using their creation timestamp or name as a tie breaker. // By using the creation timestamp, this sorts from old to new machine sets. -type MachineSetsBySizeOlder []*v1alpha1.MachineSet +type MachineSetsBySizeOlder []*v1alpha2.MachineSet func (o MachineSetsBySizeOlder) Len() int { return len(o) } func (o MachineSetsBySizeOlder) Swap(i, j int) { o[i], o[j] = o[j], o[i] } @@ -98,7 +97,7 @@ func (o MachineSetsBySizeOlder) Less(i, j int) bool { // MachineSetsBySizeNewer sorts a list of MachineSet by size in descending order, using their creation timestamp or name as a tie breaker. // By using the creation timestamp, this sorts from new to old machine sets. -type MachineSetsBySizeNewer []*v1alpha1.MachineSet +type MachineSetsBySizeNewer []*v1alpha2.MachineSet func (o MachineSetsBySizeNewer) Len() int { return len(o) } func (o MachineSetsBySizeNewer) Swap(i, j int) { o[i], o[j] = o[j], o[i] } @@ -110,7 +109,7 @@ func (o MachineSetsBySizeNewer) Less(i, j int) bool { } // SetDeploymentRevision updates the revision for a deployment. -func SetDeploymentRevision(deployment *v1alpha1.MachineDeployment, revision string) bool { +func SetDeploymentRevision(deployment *v1alpha2.MachineDeployment, revision string) bool { updated := false if deployment.Annotations == nil { @@ -125,7 +124,7 @@ func SetDeploymentRevision(deployment *v1alpha1.MachineDeployment, revision stri } // MaxRevision finds the highest revision in the machine sets -func MaxRevision(allMSs []*v1alpha1.MachineSet) int64 { +func MaxRevision(allMSs []*v1alpha2.MachineSet) int64 { max := int64(0) for _, ms := range allMSs { if v, err := Revision(ms); err != nil { @@ -169,7 +168,7 @@ func skipCopyAnnotation(key string) bool { // copyDeploymentAnnotationsToMachineSet copies deployment's annotations to machine set's annotations, // and returns true if machine set's annotation is changed. // Note that apply and revision annotations are not copied. -func copyDeploymentAnnotationsToMachineSet(deployment *v1alpha1.MachineDeployment, ms *v1alpha1.MachineSet) bool { +func copyDeploymentAnnotationsToMachineSet(deployment *v1alpha2.MachineDeployment, ms *v1alpha2.MachineSet) bool { msAnnotationsChanged := false if ms.Annotations == nil { ms.Annotations = make(map[string]string) @@ -188,15 +187,15 @@ func copyDeploymentAnnotationsToMachineSet(deployment *v1alpha1.MachineDeploymen } // GetDesiredReplicasAnnotation returns the number of desired replicas -func GetDesiredReplicasAnnotation(ms *v1alpha1.MachineSet) (int32, bool) { +func GetDesiredReplicasAnnotation(ms *v1alpha2.MachineSet) (int32, bool) { return getIntFromAnnotation(ms, DesiredReplicasAnnotation) } -func getMaxReplicasAnnotation(ms *v1alpha1.MachineSet) (int32, bool) { +func getMaxReplicasAnnotation(ms *v1alpha2.MachineSet) (int32, bool) { return getIntFromAnnotation(ms, MaxReplicasAnnotation) } -func getIntFromAnnotation(ms *v1alpha1.MachineSet, annotationKey string) (int32, bool) { +func getIntFromAnnotation(ms *v1alpha2.MachineSet, annotationKey string) (int32, bool) { annotationValue, ok := ms.Annotations[annotationKey] if !ok { return int32(0), false @@ -211,7 +210,7 @@ func getIntFromAnnotation(ms *v1alpha1.MachineSet, annotationKey string) (int32, // SetNewMachineSetAnnotations sets new machine set's annotations appropriately by updating its revision and // copying required deployment annotations to it; it returns true if machine set's annotation is changed. -func SetNewMachineSetAnnotations(deployment *v1alpha1.MachineDeployment, newMS *v1alpha1.MachineSet, newRevision string, exists bool) bool { +func SetNewMachineSetAnnotations(deployment *v1alpha2.MachineDeployment, newMS *v1alpha2.MachineSet, newRevision string, exists bool) bool { // First, copy deployment's annotations (except for apply and revision annotations) annotationChanged := copyDeploymentAnnotationsToMachineSet(deployment, newMS) // Then, update machine set's revision annotation @@ -265,7 +264,7 @@ func SetNewMachineSetAnnotations(deployment *v1alpha1.MachineDeployment, newMS * // FindOneActiveOrLatest returns the only active or the latest machine set in case there is at most one active // machine set. If there are more than one active machine sets, return nil so machine sets can be scaled down // to the point where there is only one active machine set. -func FindOneActiveOrLatest(newMS *v1alpha1.MachineSet, oldMSs []*v1alpha1.MachineSet) *v1alpha1.MachineSet { +func FindOneActiveOrLatest(newMS *v1alpha2.MachineSet, oldMSs []*v1alpha2.MachineSet) *v1alpha2.MachineSet { if newMS == nil && len(oldMSs) == 0 { return nil } @@ -288,7 +287,7 @@ func FindOneActiveOrLatest(newMS *v1alpha1.MachineSet, oldMSs []*v1alpha1.Machin } // SetReplicasAnnotations sets the desiredReplicas and maxReplicas into the annotations -func SetReplicasAnnotations(ms *v1alpha1.MachineSet, desiredReplicas, maxReplicas int32) bool { +func SetReplicasAnnotations(ms *v1alpha2.MachineSet, desiredReplicas, maxReplicas int32) bool { updated := false if ms.Annotations == nil { ms.Annotations = make(map[string]string) @@ -307,7 +306,7 @@ func SetReplicasAnnotations(ms *v1alpha1.MachineSet, desiredReplicas, maxReplica } // AnnotationsNeedUpdate return true if ReplicasAnnotations need to be updated -func ReplicasAnnotationsNeedUpdate(ms *v1alpha1.MachineSet, desiredReplicas, maxReplicas int32) bool { +func ReplicasAnnotationsNeedUpdate(ms *v1alpha2.MachineSet, desiredReplicas, maxReplicas int32) bool { if ms.Annotations == nil { return true } @@ -323,7 +322,7 @@ func ReplicasAnnotationsNeedUpdate(ms *v1alpha1.MachineSet, desiredReplicas, max } // MaxUnavailable returns the maximum unavailable machines a rolling deployment can take. -func MaxUnavailable(deployment v1alpha1.MachineDeployment) int32 { +func MaxUnavailable(deployment v1alpha2.MachineDeployment) int32 { if !IsRollingUpdate(&deployment) || *(deployment.Spec.Replicas) == 0 { return int32(0) } @@ -336,7 +335,7 @@ func MaxUnavailable(deployment v1alpha1.MachineDeployment) int32 { } // MaxSurge returns the maximum surge machines a rolling deployment can take. -func MaxSurge(deployment v1alpha1.MachineDeployment) int32 { +func MaxSurge(deployment v1alpha2.MachineDeployment) int32 { if !IsRollingUpdate(&deployment) { return int32(0) } @@ -348,7 +347,7 @@ func MaxSurge(deployment v1alpha1.MachineDeployment) int32 { // GetProportion will estimate the proportion for the provided machine set using 1. the current size // of the parent deployment, 2. the replica count that needs be added on the machine sets of the // deployment, and 3. the total replicas added in the machine sets of the deployment so far. -func GetProportion(ms *v1alpha1.MachineSet, d v1alpha1.MachineDeployment, deploymentReplicasToAdd, deploymentReplicasAdded int32) int32 { +func GetProportion(ms *v1alpha2.MachineSet, d v1alpha2.MachineDeployment, deploymentReplicasToAdd, deploymentReplicasAdded int32) int32 { if ms == nil || *(ms.Spec.Replicas) == 0 || deploymentReplicasToAdd == 0 || deploymentReplicasToAdd == deploymentReplicasAdded { return int32(0) } @@ -370,7 +369,7 @@ func GetProportion(ms *v1alpha1.MachineSet, d v1alpha1.MachineDeployment, deploy // getMachineSetFraction estimates the fraction of replicas a machine set can have in // 1. a scaling event during a rollout or 2. when scaling a paused deployment. -func getMachineSetFraction(ms v1alpha1.MachineSet, d v1alpha1.MachineDeployment) int32 { +func getMachineSetFraction(ms v1alpha2.MachineSet, d v1alpha2.MachineDeployment) int32 { // If we are scaling down to zero then the fraction of this machine set is its whole size (negative) if *(d.Spec.Replicas) == int32(0) { return -*(ms.Spec.Replicas) @@ -397,7 +396,7 @@ func getMachineSetFraction(ms v1alpha1.MachineSet, d v1alpha1.MachineDeployment) // 1. The hash result would be different upon machineTemplateSpec API changes // (e.g. the addition of a new field will cause the hash code to change) // 2. The deployment template won't have hash labels -func EqualIgnoreHash(template1, template2 *v1alpha1.MachineTemplateSpec) bool { +func EqualIgnoreHash(template1, template2 *v1alpha2.MachineTemplateSpec) bool { t1Copy := template1.DeepCopy() t2Copy := template2.DeepCopy() // Remove hash labels from template.Labels before comparing @@ -407,7 +406,7 @@ func EqualIgnoreHash(template1, template2 *v1alpha1.MachineTemplateSpec) bool { } // FindNewMachineSet returns the new MS this given deployment targets (the one with the same machine template). -func FindNewMachineSet(deployment *v1alpha1.MachineDeployment, msList []*v1alpha1.MachineSet) *v1alpha1.MachineSet { +func FindNewMachineSet(deployment *v1alpha2.MachineDeployment, msList []*v1alpha2.MachineSet) *v1alpha2.MachineSet { sort.Sort(MachineSetsByCreationTimestamp(msList)) for i := range msList { if EqualIgnoreHash(&msList[i].Spec.Template, &deployment.Spec.Template) { @@ -426,9 +425,9 @@ func FindNewMachineSet(deployment *v1alpha1.MachineDeployment, msList []*v1alpha // Returns two list of machine sets // - the first contains all old machine sets with all non-zero replicas // - the second contains all old machine sets -func FindOldMachineSets(deployment *v1alpha1.MachineDeployment, msList []*v1alpha1.MachineSet) ([]*v1alpha1.MachineSet, []*v1alpha1.MachineSet) { - var requiredMSs []*v1alpha1.MachineSet - allMSs := make([]*v1alpha1.MachineSet, 0, len(msList)) +func FindOldMachineSets(deployment *v1alpha2.MachineDeployment, msList []*v1alpha2.MachineSet) ([]*v1alpha2.MachineSet, []*v1alpha2.MachineSet) { + var requiredMSs []*v1alpha2.MachineSet + allMSs := make([]*v1alpha2.MachineSet, 0, len(msList)) newMS := FindNewMachineSet(deployment, msList) for _, ms := range msList { // Filter out new machine set @@ -444,7 +443,7 @@ func FindOldMachineSets(deployment *v1alpha1.MachineDeployment, msList []*v1alph } // GetReplicaCountForMachineSets returns the sum of Replicas of the given machine sets. -func GetReplicaCountForMachineSets(machineSets []*v1alpha1.MachineSet) int32 { +func GetReplicaCountForMachineSets(machineSets []*v1alpha2.MachineSet) int32 { totalReplicas := int32(0) for _, ms := range machineSets { if ms != nil { @@ -455,7 +454,7 @@ func GetReplicaCountForMachineSets(machineSets []*v1alpha1.MachineSet) int32 { } // GetActualReplicaCountForMachineSets returns the sum of actual replicas of the given machine sets. -func GetActualReplicaCountForMachineSets(machineSets []*v1alpha1.MachineSet) int32 { +func GetActualReplicaCountForMachineSets(machineSets []*v1alpha2.MachineSet) int32 { totalActualReplicas := int32(0) for _, ms := range machineSets { if ms != nil { @@ -466,7 +465,7 @@ func GetActualReplicaCountForMachineSets(machineSets []*v1alpha1.MachineSet) int } // GetReadyReplicaCountForMachineSets returns the number of ready machines corresponding to the given machine sets. -func GetReadyReplicaCountForMachineSets(machineSets []*v1alpha1.MachineSet) int32 { +func GetReadyReplicaCountForMachineSets(machineSets []*v1alpha2.MachineSet) int32 { totalReadyReplicas := int32(0) for _, ms := range machineSets { if ms != nil { @@ -477,7 +476,7 @@ func GetReadyReplicaCountForMachineSets(machineSets []*v1alpha1.MachineSet) int3 } // GetAvailableReplicaCountForMachineSets returns the number of available machines corresponding to the given machine sets. -func GetAvailableReplicaCountForMachineSets(machineSets []*v1alpha1.MachineSet) int32 { +func GetAvailableReplicaCountForMachineSets(machineSets []*v1alpha2.MachineSet) int32 { totalAvailableReplicas := int32(0) for _, ms := range machineSets { if ms != nil { @@ -488,13 +487,13 @@ func GetAvailableReplicaCountForMachineSets(machineSets []*v1alpha1.MachineSet) } // IsRollingUpdate returns true if the strategy type is a rolling update. -func IsRollingUpdate(deployment *v1alpha1.MachineDeployment) bool { - return deployment.Spec.Strategy.Type == common.RollingUpdateMachineDeploymentStrategyType +func IsRollingUpdate(deployment *v1alpha2.MachineDeployment) bool { + return deployment.Spec.Strategy.Type == v1alpha2.RollingUpdateMachineDeploymentStrategyType } // DeploymentComplete considers a deployment to be complete once all of its desired replicas // are updated and available, and no old machines are running. -func DeploymentComplete(deployment *v1alpha1.MachineDeployment, newStatus *v1alpha1.MachineDeploymentStatus) bool { +func DeploymentComplete(deployment *v1alpha2.MachineDeployment, newStatus *v1alpha2.MachineDeploymentStatus) bool { return newStatus.UpdatedReplicas == *(deployment.Spec.Replicas) && newStatus.Replicas == *(deployment.Spec.Replicas) && newStatus.AvailableReplicas == *(deployment.Spec.Replicas) && @@ -505,9 +504,9 @@ func DeploymentComplete(deployment *v1alpha1.MachineDeployment, newStatus *v1alp // When one of the following is true, we're rolling out the deployment; otherwise, we're scaling it. // 1) The new MS is saturated: newMS's replicas == deployment's replicas // 2) Max number of machines allowed is reached: deployment's replicas + maxSurge == all MSs' replicas -func NewMSNewReplicas(deployment *v1alpha1.MachineDeployment, allMSs []*v1alpha1.MachineSet, newMS *v1alpha1.MachineSet) (int32, error) { +func NewMSNewReplicas(deployment *v1alpha2.MachineDeployment, allMSs []*v1alpha2.MachineSet, newMS *v1alpha2.MachineSet) (int32, error) { switch deployment.Spec.Strategy.Type { - case common.RollingUpdateMachineDeploymentStrategyType: + case v1alpha2.RollingUpdateMachineDeploymentStrategyType: // Check if we can scale up. maxSurge, err := intstrutil.GetValueFromIntOrPercent(deployment.Spec.Strategy.RollingUpdate.MaxSurge, int(*(deployment.Spec.Replicas)), true) if err != nil { @@ -551,7 +550,7 @@ func NewMSNewReplicas(deployment *v1alpha1.MachineDeployment, allMSs []*v1alpha1 // Both the deployment and the machine set have to believe this machine set can own all of the desired // replicas in the deployment and the annotation helps in achieving that. All machines of the MachineSet // need to be available. -func IsSaturated(deployment *v1alpha1.MachineDeployment, ms *v1alpha1.MachineSet) bool { +func IsSaturated(deployment *v1alpha2.MachineDeployment, ms *v1alpha2.MachineSet) bool { if ms == nil { return false } @@ -596,18 +595,18 @@ func ResolveFenceposts(maxSurge, maxUnavailable *intstrutil.IntOrString, desired } // FilterActiveMachineSets returns machine sets that have (or at least ought to have) machines. -func FilterActiveMachineSets(machineSets []*v1alpha1.MachineSet) []*v1alpha1.MachineSet { - activeFilter := func(ms *v1alpha1.MachineSet) bool { +func FilterActiveMachineSets(machineSets []*v1alpha2.MachineSet) []*v1alpha2.MachineSet { + activeFilter := func(ms *v1alpha2.MachineSet) bool { return ms != nil && ms.Spec.Replicas != nil && *(ms.Spec.Replicas) > 0 } return FilterMachineSets(machineSets, activeFilter) } -type filterMS func(ms *v1alpha1.MachineSet) bool +type filterMS func(ms *v1alpha2.MachineSet) bool // FilterMachineSets returns machine sets that are filtered by filterFn (all returned ones should match filterFn). -func FilterMachineSets(MSes []*v1alpha1.MachineSet, filterFn filterMS) []*v1alpha1.MachineSet { - var filtered []*v1alpha1.MachineSet +func FilterMachineSets(MSes []*v1alpha2.MachineSet, filterFn filterMS) []*v1alpha2.MachineSet { + var filtered []*v1alpha2.MachineSet for i := range MSes { if filterFn(MSes[i]) { filtered = append(filtered, MSes[i]) @@ -686,7 +685,7 @@ func DeepHashObject(hasher hash.Hash, objectToWrite interface{}) { printer.Fprintf(hasher, "%#v", objectToWrite) } -func ComputeHash(template *v1alpha1.MachineTemplateSpec) uint32 { +func ComputeHash(template *v1alpha2.MachineTemplateSpec) uint32 { machineTemplateSpecHasher := fnv.New32a() DeepHashObject(machineTemplateSpecHasher, *template) diff --git a/vendor/sigs.k8s.io/cluster-api/pkg/controller/machinedeployment/util/util_test.go b/vendor/sigs.k8s.io/cluster-api/pkg/controller/machinedeployment/util/util_test.go index 66c1b2bace..f5b3ab1caf 100644 --- a/vendor/sigs.k8s.io/cluster-api/pkg/controller/machinedeployment/util/util_test.go +++ b/vendor/sigs.k8s.io/cluster-api/pkg/controller/machinedeployment/util/util_test.go @@ -25,85 +25,33 @@ import ( "testing" "time" - "github.com/pkg/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/types" "k8s.io/apimachinery/pkg/util/intstr" "k8s.io/apiserver/pkg/storage/names" - core "k8s.io/client-go/testing" - "sigs.k8s.io/cluster-api/pkg/apis/cluster/common" - "sigs.k8s.io/cluster-api/pkg/apis/cluster/v1alpha1" - "sigs.k8s.io/cluster-api/pkg/client/clientset_generated/clientset/fake" + "sigs.k8s.io/cluster-api/api/v1alpha2" ) -func addListMSReactor(fakeClient *fake.Clientset, obj runtime.Object) *fake.Clientset { - fakeClient.AddReactor("list", "machinesets", func(action core.Action) (handled bool, ret runtime.Object, err error) { - return true, obj, nil - }) - return fakeClient -} - -func addListMachinesReactor(fakeClient *fake.Clientset, obj runtime.Object) *fake.Clientset { - fakeClient.AddReactor("list", "machines", func(action core.Action) (handled bool, ret runtime.Object, err error) { - return true, obj, nil - }) - return fakeClient -} - -func addGetMSReactor(fakeClient *fake.Clientset, obj runtime.Object) *fake.Clientset { - msList, ok := obj.(*v1alpha1.MachineSetList) - fakeClient.AddReactor("get", "machinesets", func(action core.Action) (handled bool, ret runtime.Object, err error) { - name := action.(core.GetAction).GetName() - if ok { - for _, ms := range msList.Items { - if ms.Name == name { - return true, &ms, nil - } - } - } - return false, nil, errors.Errorf("could not find the requested machine set: %s", name) - - }) - return fakeClient -} - -func addUpdateMSReactor(fakeClient *fake.Clientset) *fake.Clientset { - fakeClient.AddReactor("update", "machinesets", func(action core.Action) (handled bool, ret runtime.Object, err error) { - obj := action.(core.UpdateAction).GetObject().(*v1alpha1.MachineSet) - return true, obj, nil - }) - return fakeClient -} - -func addUpdateMachinesReactor(fakeClient *fake.Clientset) *fake.Clientset { - fakeClient.AddReactor("update", "machines", func(action core.Action) (handled bool, ret runtime.Object, err error) { - obj := action.(core.UpdateAction).GetObject().(*v1alpha1.Machine) - return true, obj, nil - }) - return fakeClient -} - -func generateMSWithLabel(labels map[string]string, image string) v1alpha1.MachineSet { - return v1alpha1.MachineSet{ +func generateMSWithLabel(labels map[string]string, image string) v1alpha2.MachineSet { + return v1alpha2.MachineSet{ ObjectMeta: metav1.ObjectMeta{ Name: names.SimpleNameGenerator.GenerateName("machineset"), Labels: labels, }, - Spec: v1alpha1.MachineSetSpec{ + Spec: v1alpha2.MachineSetSpec{ Replicas: func(i int32) *int32 { return &i }(1), Selector: metav1.LabelSelector{MatchLabels: labels}, - Template: v1alpha1.MachineTemplateSpec{ - ObjectMeta: metav1.ObjectMeta{ + Template: v1alpha2.MachineTemplateSpec{ + ObjectMeta: v1alpha2.ObjectMeta{ Labels: labels, }, - Spec: v1alpha1.MachineSpec{}, + Spec: v1alpha2.MachineSpec{}, }, }, } } -func newDControllerRef(d *v1alpha1.MachineDeployment) *metav1.OwnerReference { +func newDControllerRef(d *v1alpha2.MachineDeployment) *metav1.OwnerReference { isController := true return &metav1.OwnerReference{ APIVersion: "clusters/v1alpha", @@ -115,16 +63,16 @@ func newDControllerRef(d *v1alpha1.MachineDeployment) *metav1.OwnerReference { } // generateMS creates a machine set, with the input deployment's template as its template -func generateMS(deployment v1alpha1.MachineDeployment) v1alpha1.MachineSet { +func generateMS(deployment v1alpha2.MachineDeployment) v1alpha2.MachineSet { template := deployment.Spec.Template.DeepCopy() - return v1alpha1.MachineSet{ + return v1alpha2.MachineSet{ ObjectMeta: metav1.ObjectMeta{ UID: randomUID(), Name: names.SimpleNameGenerator.GenerateName("machineset"), Labels: template.Labels, OwnerReferences: []metav1.OwnerReference{*newDControllerRef(&deployment)}, }, - Spec: v1alpha1.MachineSetSpec{ + Spec: v1alpha2.MachineSetSpec{ Replicas: new(int32), Template: *template, Selector: metav1.LabelSelector{MatchLabels: template.Labels}, @@ -137,41 +85,41 @@ func randomUID() types.UID { } // generateDeployment creates a deployment, with the input image as its template -func generateDeployment(image string) v1alpha1.MachineDeployment { +func generateDeployment(image string) v1alpha2.MachineDeployment { machineLabels := map[string]string{"name": image} - return v1alpha1.MachineDeployment{ + return v1alpha2.MachineDeployment{ ObjectMeta: metav1.ObjectMeta{ Name: image, Annotations: make(map[string]string), }, - Spec: v1alpha1.MachineDeploymentSpec{ + Spec: v1alpha2.MachineDeploymentSpec{ Replicas: func(i int32) *int32 { return &i }(1), Selector: metav1.LabelSelector{MatchLabels: machineLabels}, - Template: v1alpha1.MachineTemplateSpec{ - ObjectMeta: metav1.ObjectMeta{ + Template: v1alpha2.MachineTemplateSpec{ + ObjectMeta: v1alpha2.ObjectMeta{ Labels: machineLabels, }, - Spec: v1alpha1.MachineSpec{}, + Spec: v1alpha2.MachineSpec{}, }, }, } } -func generateMachineTemplateSpec(name, nodeName string, annotations, labels map[string]string) v1alpha1.MachineTemplateSpec { - return v1alpha1.MachineTemplateSpec{ - ObjectMeta: metav1.ObjectMeta{ +func generateMachineTemplateSpec(name, nodeName string, annotations, labels map[string]string) v1alpha2.MachineTemplateSpec { + return v1alpha2.MachineTemplateSpec{ + ObjectMeta: v1alpha2.ObjectMeta{ Name: name, Annotations: annotations, Labels: labels, }, - Spec: v1alpha1.MachineSpec{}, + Spec: v1alpha2.MachineSpec{}, } } func TestEqualIgnoreHash(t *testing.T) { tests := []struct { Name string - former, latter v1alpha1.MachineTemplateSpec + former, latter v1alpha2.MachineTemplateSpec expected bool }{ { @@ -238,7 +186,7 @@ func TestEqualIgnoreHash(t *testing.T) { for _, test := range tests { t.Run(test.Name, func(t *testing.T) { - runTest := func(t1, t2 *v1alpha1.MachineTemplateSpec, reversed bool) { + runTest := func(t1, t2 *v1alpha2.MachineTemplateSpec, reversed bool) { reverseString := "" if reversed { reverseString = " (reverse order)" @@ -281,26 +229,26 @@ func TestFindNewMachineSet(t *testing.T) { tests := []struct { Name string - deployment v1alpha1.MachineDeployment - msList []*v1alpha1.MachineSet - expected *v1alpha1.MachineSet + deployment v1alpha2.MachineDeployment + msList []*v1alpha2.MachineSet + expected *v1alpha2.MachineSet }{ { Name: "Get new MachineSet with the same template as Deployment spec but different machine-template-hash value", deployment: deployment, - msList: []*v1alpha1.MachineSet{&newMS, &oldMS}, + msList: []*v1alpha2.MachineSet{&newMS, &oldMS}, expected: &newMS, }, { Name: "Get the oldest new MachineSet when there are more than one MachineSet with the same template", deployment: deployment, - msList: []*v1alpha1.MachineSet{&newMS, &oldMS, &newMSDup}, + msList: []*v1alpha2.MachineSet{&newMS, &oldMS, &newMSDup}, expected: &newMSDup, }, { Name: "Get nil new MachineSet", deployment: deployment, - msList: []*v1alpha1.MachineSet{&oldMS}, + msList: []*v1alpha2.MachineSet{&oldMS}, expected: nil, }, } @@ -337,38 +285,38 @@ func TestFindOldMachineSets(t *testing.T) { tests := []struct { Name string - deployment v1alpha1.MachineDeployment - msList []*v1alpha1.MachineSet - machineList *v1alpha1.MachineList - expected []*v1alpha1.MachineSet - expectedRequire []*v1alpha1.MachineSet + deployment v1alpha2.MachineDeployment + msList []*v1alpha2.MachineSet + machineList *v1alpha2.MachineList + expected []*v1alpha2.MachineSet + expectedRequire []*v1alpha2.MachineSet }{ { Name: "Get old MachineSets", deployment: deployment, - msList: []*v1alpha1.MachineSet{&newMS, &oldMS}, - expected: []*v1alpha1.MachineSet{&oldMS}, + msList: []*v1alpha2.MachineSet{&newMS, &oldMS}, + expected: []*v1alpha2.MachineSet{&oldMS}, expectedRequire: nil, }, { Name: "Get old MachineSets with no new MachineSet", deployment: deployment, - msList: []*v1alpha1.MachineSet{&oldMS}, - expected: []*v1alpha1.MachineSet{&oldMS}, + msList: []*v1alpha2.MachineSet{&oldMS}, + expected: []*v1alpha2.MachineSet{&oldMS}, expectedRequire: nil, }, { Name: "Get old MachineSets with two new MachineSets, only the oldest new MachineSet is seen as new MachineSet", deployment: deployment, - msList: []*v1alpha1.MachineSet{&oldMS, &newMS, &newMSDup}, - expected: []*v1alpha1.MachineSet{&oldMS, &newMS}, - expectedRequire: []*v1alpha1.MachineSet{&newMS}, + msList: []*v1alpha2.MachineSet{&oldMS, &newMS, &newMSDup}, + expected: []*v1alpha2.MachineSet{&oldMS, &newMS}, + expectedRequire: []*v1alpha2.MachineSet{&newMS}, }, { Name: "Get empty old MachineSets", deployment: deployment, - msList: []*v1alpha1.MachineSet{&newMS}, - expected: []*v1alpha1.MachineSet{}, + msList: []*v1alpha2.MachineSet{&newMS}, + expected: []*v1alpha2.MachineSet{}, expectedRequire: nil, }, } @@ -390,7 +338,7 @@ func TestFindOldMachineSets(t *testing.T) { } // equal compares the equality of two MachineSet slices regardless of their ordering -func equal(mss1, mss2 []*v1alpha1.MachineSet) bool { +func equal(mss1, mss2 []*v1alpha2.MachineSet) bool { if reflect.DeepEqual(mss1, mss2) { return true } @@ -419,19 +367,19 @@ func TestGetReplicaCountForMachineSets(t *testing.T) { tests := []struct { Name string - sets []*v1alpha1.MachineSet + sets []*v1alpha2.MachineSet expectedCount int32 expectedActual int32 }{ { "1:2 Replicas", - []*v1alpha1.MachineSet{&ms1}, + []*v1alpha2.MachineSet{&ms1}, 1, 2, }, { "3:5 Replicas", - []*v1alpha1.MachineSet{&ms1, &ms2}, + []*v1alpha2.MachineSet{&ms1, &ms2}, 3, 5, }, @@ -515,7 +463,7 @@ func TestResolveFenceposts(t *testing.T) { func TestNewMSNewReplicas(t *testing.T) { tests := []struct { Name string - strategyType common.MachineDeploymentStrategyType + strategyType v1alpha2.MachineDeploymentStrategyType depReplicas int32 newMSReplicas int32 maxSurge int @@ -523,12 +471,12 @@ func TestNewMSNewReplicas(t *testing.T) { }{ { "can not scale up - to newMSReplicas", - common.RollingUpdateMachineDeploymentStrategyType, + v1alpha2.RollingUpdateMachineDeploymentStrategyType, 1, 5, 1, 5, }, { "scale up - to depReplicas", - common.RollingUpdateMachineDeploymentStrategyType, + v1alpha2.RollingUpdateMachineDeploymentStrategyType, 6, 2, 10, 6, }, } @@ -540,8 +488,8 @@ func TestNewMSNewReplicas(t *testing.T) { for _, test := range tests { t.Run(test.Name, func(t *testing.T) { *(newDeployment.Spec.Replicas) = test.depReplicas - newDeployment.Spec.Strategy = &v1alpha1.MachineDeploymentStrategy{Type: test.strategyType} - newDeployment.Spec.Strategy.RollingUpdate = &v1alpha1.MachineRollingUpdateDeployment{ + newDeployment.Spec.Strategy = &v1alpha2.MachineDeploymentStrategy{Type: test.strategyType} + newDeployment.Spec.Strategy.RollingUpdate = &v1alpha2.MachineRollingUpdateDeployment{ MaxUnavailable: func(i int) *intstr.IntOrString { x := intstr.FromInt(i) return &x @@ -552,7 +500,7 @@ func TestNewMSNewReplicas(t *testing.T) { }(test.maxSurge), } *(newRC.Spec.Replicas) = test.newMSReplicas - ms, err := NewMSNewReplicas(&newDeployment, []*v1alpha1.MachineSet{&rs5}, &newRC) + ms, err := NewMSNewReplicas(&newDeployment, []*v1alpha2.MachineSet{&rs5}, &newRC) if err != nil { t.Errorf("In test case %s, got unexpected error %v", test.Name, err) } @@ -564,19 +512,19 @@ func TestNewMSNewReplicas(t *testing.T) { } func TestDeploymentComplete(t *testing.T) { - deployment := func(desired, current, updated, available, maxUnavailable, maxSurge int32) *v1alpha1.MachineDeployment { - return &v1alpha1.MachineDeployment{ - Spec: v1alpha1.MachineDeploymentSpec{ + deployment := func(desired, current, updated, available, maxUnavailable, maxSurge int32) *v1alpha2.MachineDeployment { + return &v1alpha2.MachineDeployment{ + Spec: v1alpha2.MachineDeploymentSpec{ Replicas: &desired, - Strategy: &v1alpha1.MachineDeploymentStrategy{ - RollingUpdate: &v1alpha1.MachineRollingUpdateDeployment{ + Strategy: &v1alpha2.MachineDeploymentStrategy{ + RollingUpdate: &v1alpha2.MachineRollingUpdateDeployment{ MaxUnavailable: func(i int) *intstr.IntOrString { x := intstr.FromInt(i); return &x }(int(maxUnavailable)), MaxSurge: func(i int) *intstr.IntOrString { x := intstr.FromInt(i); return &x }(int(maxSurge)), }, - Type: common.RollingUpdateMachineDeploymentStrategyType, + Type: v1alpha2.RollingUpdateMachineDeploymentStrategyType, }, }, - Status: v1alpha1.MachineDeploymentStatus{ + Status: v1alpha2.MachineDeploymentStatus{ Replicas: current, UpdatedReplicas: updated, AvailableReplicas: available, @@ -587,7 +535,7 @@ func TestDeploymentComplete(t *testing.T) { tests := []struct { name string - d *v1alpha1.MachineDeployment + d *v1alpha2.MachineDeployment expected bool }{ @@ -641,23 +589,23 @@ func TestDeploymentComplete(t *testing.T) { } func TestMaxUnavailable(t *testing.T) { - deployment := func(replicas int32, maxUnavailable intstr.IntOrString) v1alpha1.MachineDeployment { - return v1alpha1.MachineDeployment{ - Spec: v1alpha1.MachineDeploymentSpec{ + deployment := func(replicas int32, maxUnavailable intstr.IntOrString) v1alpha2.MachineDeployment { + return v1alpha2.MachineDeployment{ + Spec: v1alpha2.MachineDeploymentSpec{ Replicas: func(i int32) *int32 { return &i }(replicas), - Strategy: &v1alpha1.MachineDeploymentStrategy{ - RollingUpdate: &v1alpha1.MachineRollingUpdateDeployment{ + Strategy: &v1alpha2.MachineDeploymentStrategy{ + RollingUpdate: &v1alpha2.MachineRollingUpdateDeployment{ MaxSurge: func(i int) *intstr.IntOrString { x := intstr.FromInt(i); return &x }(int(1)), MaxUnavailable: &maxUnavailable, }, - Type: common.RollingUpdateMachineDeploymentStrategyType, + Type: v1alpha2.RollingUpdateMachineDeploymentStrategyType, }, }, } } tests := []struct { name string - deployment v1alpha1.MachineDeployment + deployment v1alpha2.MachineDeployment expected int32 }{ { @@ -773,14 +721,14 @@ func TestReplicasAnnotationsNeedUpdate(t *testing.T) { tests := []struct { name string - machineSet *v1alpha1.MachineSet + machineSet *v1alpha2.MachineSet expected bool }{ { name: "test Annotations nil", - machineSet: &v1alpha1.MachineSet{ + machineSet: &v1alpha2.MachineSet{ ObjectMeta: metav1.ObjectMeta{Name: "hello", Namespace: "test"}, - Spec: v1alpha1.MachineSetSpec{ + Spec: v1alpha2.MachineSetSpec{ Selector: metav1.LabelSelector{MatchLabels: map[string]string{"foo": "bar"}}, }, }, @@ -788,13 +736,13 @@ func TestReplicasAnnotationsNeedUpdate(t *testing.T) { }, { name: "test desiredReplicas update", - machineSet: &v1alpha1.MachineSet{ + machineSet: &v1alpha2.MachineSet{ ObjectMeta: metav1.ObjectMeta{ Name: "hello", Namespace: "test", Annotations: map[string]string{DesiredReplicasAnnotation: "8", MaxReplicasAnnotation: maxReplicas}, }, - Spec: v1alpha1.MachineSetSpec{ + Spec: v1alpha2.MachineSetSpec{ Selector: metav1.LabelSelector{MatchLabels: map[string]string{"foo": "bar"}}, }, }, @@ -802,13 +750,13 @@ func TestReplicasAnnotationsNeedUpdate(t *testing.T) { }, { name: "test maxReplicas update", - machineSet: &v1alpha1.MachineSet{ + machineSet: &v1alpha2.MachineSet{ ObjectMeta: metav1.ObjectMeta{ Name: "hello", Namespace: "test", Annotations: map[string]string{DesiredReplicasAnnotation: desiredReplicas, MaxReplicasAnnotation: "16"}, }, - Spec: v1alpha1.MachineSetSpec{ + Spec: v1alpha2.MachineSetSpec{ Selector: metav1.LabelSelector{MatchLabels: map[string]string{"foo": "bar"}}, }, }, @@ -816,13 +764,13 @@ func TestReplicasAnnotationsNeedUpdate(t *testing.T) { }, { name: "test needn't update", - machineSet: &v1alpha1.MachineSet{ + machineSet: &v1alpha2.MachineSet{ ObjectMeta: metav1.ObjectMeta{ Name: "hello", Namespace: "test", Annotations: map[string]string{DesiredReplicasAnnotation: desiredReplicas, MaxReplicasAnnotation: maxReplicas}, }, - Spec: v1alpha1.MachineSetSpec{ + Spec: v1alpha2.MachineSetSpec{ Selector: metav1.LabelSelector{MatchLabels: map[string]string{"foo": "bar"}}, }, }, diff --git a/vendor/sigs.k8s.io/cluster-api/pkg/controller/machineset/BUILD.bazel b/vendor/sigs.k8s.io/cluster-api/pkg/controller/machineset/BUILD.bazel index b7b90d990a..95979d8caa 100644 --- a/vendor/sigs.k8s.io/cluster-api/pkg/controller/machineset/BUILD.bazel +++ b/vendor/sigs.k8s.io/cluster-api/pkg/controller/machineset/BUILD.bazel @@ -11,7 +11,8 @@ go_library( importpath = "sigs.k8s.io/cluster-api/pkg/controller/machineset", visibility = ["//visibility:public"], deps = [ - "//pkg/apis/cluster/v1alpha1:go_default_library", + "//api/v1alpha2:go_default_library", + "//pkg/controller/external:go_default_library", "//pkg/controller/noderefutil:go_default_library", "//pkg/controller/remote:go_default_library", "//pkg/util:go_default_library", @@ -19,6 +20,7 @@ go_library( "//vendor/k8s.io/api/core/v1:go_default_library", "//vendor/k8s.io/apimachinery/pkg/api/errors:go_default_library", "//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", + "//vendor/k8s.io/apimachinery/pkg/apis/meta/v1/unstructured:go_default_library", "//vendor/k8s.io/apimachinery/pkg/labels:go_default_library", "//vendor/k8s.io/apimachinery/pkg/runtime:go_default_library", "//vendor/k8s.io/client-go/tools/record:go_default_library", @@ -43,12 +45,15 @@ go_test( ], embed = [":go_default_library"], deps = [ - "//pkg/apis:go_default_library", - "//pkg/apis/cluster/common:go_default_library", - "//pkg/apis/cluster/v1alpha1:go_default_library", - "//vendor/golang.org/x/net/context:go_default_library", + "//api/v1alpha2:go_default_library", + "//pkg/controller/external:go_default_library", + "//pkg/errors:go_default_library", + "//vendor/github.com/onsi/gomega:go_default_library", + "//vendor/k8s.io/api/core/v1:go_default_library", + "//vendor/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1:go_default_library", + "//vendor/k8s.io/apimachinery/pkg/api/errors:go_default_library", "//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/types:go_default_library", + "//vendor/k8s.io/apimachinery/pkg/apis/meta/v1/unstructured:go_default_library", "//vendor/k8s.io/client-go/kubernetes/scheme:go_default_library", "//vendor/k8s.io/client-go/rest:go_default_library", "//vendor/sigs.k8s.io/controller-runtime/pkg/client:go_default_library", diff --git a/vendor/sigs.k8s.io/cluster-api/pkg/controller/machineset/delete_policy.go b/vendor/sigs.k8s.io/cluster-api/pkg/controller/machineset/delete_policy.go index 7515be6880..3cff59438b 100644 --- a/vendor/sigs.k8s.io/cluster-api/pkg/controller/machineset/delete_policy.go +++ b/vendor/sigs.k8s.io/cluster-api/pkg/controller/machineset/delete_policy.go @@ -22,12 +22,12 @@ import ( "github.com/pkg/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "sigs.k8s.io/cluster-api/pkg/apis/cluster/v1alpha1" + "sigs.k8s.io/cluster-api/api/v1alpha2" ) type ( deletePriority float64 - deletePriorityFunc func(machine *v1alpha1.Machine) deletePriority + deletePriorityFunc func(machine *v1alpha2.Machine) deletePriority ) const ( @@ -45,7 +45,7 @@ const ( ) // maps the creation timestamp onto the 0-100 priority range -func oldestDeletePriority(machine *v1alpha1.Machine) deletePriority { +func oldestDeletePriority(machine *v1alpha2.Machine) deletePriority { if machine.DeletionTimestamp != nil && !machine.DeletionTimestamp.IsZero() { return mustDelete } @@ -65,7 +65,7 @@ func oldestDeletePriority(machine *v1alpha1.Machine) deletePriority { return deletePriority(float64(mustDelete) * (1.0 - math.Exp(-d.Seconds()/secondsPerTenDays))) } -func newestDeletePriority(machine *v1alpha1.Machine) deletePriority { +func newestDeletePriority(machine *v1alpha2.Machine) deletePriority { if machine.DeletionTimestamp != nil && !machine.DeletionTimestamp.IsZero() { return mustDelete } @@ -78,7 +78,7 @@ func newestDeletePriority(machine *v1alpha1.Machine) deletePriority { return mustDelete - oldestDeletePriority(machine) } -func randomDeletePolicy(machine *v1alpha1.Machine) deletePriority { +func randomDeletePolicy(machine *v1alpha2.Machine) deletePriority { if machine.DeletionTimestamp != nil && !machine.DeletionTimestamp.IsZero() { return mustDelete } @@ -92,7 +92,7 @@ func randomDeletePolicy(machine *v1alpha1.Machine) deletePriority { } type sortableMachines struct { - machines []*v1alpha1.Machine + machines []*v1alpha2.Machine priority deletePriorityFunc } @@ -102,11 +102,11 @@ func (m sortableMachines) Less(i, j int) bool { return m.priority(m.machines[j]) < m.priority(m.machines[i]) // high to low } -func getMachinesToDeletePrioritized(filteredMachines []*v1alpha1.Machine, diff int, fun deletePriorityFunc) []*v1alpha1.Machine { +func getMachinesToDeletePrioritized(filteredMachines []*v1alpha2.Machine, diff int, fun deletePriorityFunc) []*v1alpha2.Machine { if diff >= len(filteredMachines) { return filteredMachines } else if diff <= 0 { - return []*v1alpha1.Machine{} + return []*v1alpha2.Machine{} } sortable := sortableMachines{ @@ -118,14 +118,14 @@ func getMachinesToDeletePrioritized(filteredMachines []*v1alpha1.Machine, diff i return sortable.machines[:diff] } -func getDeletePriorityFunc(ms *v1alpha1.MachineSet) (deletePriorityFunc, error) { +func getDeletePriorityFunc(ms *v1alpha2.MachineSet) (deletePriorityFunc, error) { // Map the Spec.DeletePolicy value to the appropriate delete priority function - switch msdp := v1alpha1.MachineSetDeletePolicy(ms.Spec.DeletePolicy); msdp { - case v1alpha1.RandomMachineSetDeletePolicy: + switch msdp := v1alpha2.MachineSetDeletePolicy(ms.Spec.DeletePolicy); msdp { + case v1alpha2.RandomMachineSetDeletePolicy: return randomDeletePolicy, nil - case v1alpha1.NewestMachineSetDeletePolicy: + case v1alpha2.NewestMachineSetDeletePolicy: return newestDeletePriority, nil - case v1alpha1.OldestMachineSetDeletePolicy: + case v1alpha2.OldestMachineSetDeletePolicy: return oldestDeletePriority, nil case "": return randomDeletePolicy, nil diff --git a/vendor/sigs.k8s.io/cluster-api/pkg/controller/machineset/delete_policy_test.go b/vendor/sigs.k8s.io/cluster-api/pkg/controller/machineset/delete_policy_test.go index 28149e4680..cc0f6209ee 100644 --- a/vendor/sigs.k8s.io/cluster-api/pkg/controller/machineset/delete_policy_test.go +++ b/vendor/sigs.k8s.io/cluster-api/pkg/controller/machineset/delete_policy_test.go @@ -21,50 +21,50 @@ import ( "testing" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "sigs.k8s.io/cluster-api/pkg/apis/cluster/common" - "sigs.k8s.io/cluster-api/pkg/apis/cluster/v1alpha1" + "sigs.k8s.io/cluster-api/api/v1alpha2" + capierrors "sigs.k8s.io/cluster-api/pkg/errors" ) func TestMachineToDelete(t *testing.T) { msg := "something wrong with the machine" now := metav1.Now() - mustDeleteMachine := &v1alpha1.Machine{ObjectMeta: metav1.ObjectMeta{DeletionTimestamp: &now}} - betterDeleteMachine := &v1alpha1.Machine{Status: v1alpha1.MachineStatus{ErrorMessage: &msg}} - deleteMeMachine := &v1alpha1.Machine{ObjectMeta: metav1.ObjectMeta{Annotations: map[string]string{DeleteNodeAnnotation: "yes"}}} + mustDeleteMachine := &v1alpha2.Machine{ObjectMeta: metav1.ObjectMeta{DeletionTimestamp: &now}} + betterDeleteMachine := &v1alpha2.Machine{Status: v1alpha2.MachineStatus{ErrorMessage: &msg}} + deleteMeMachine := &v1alpha2.Machine{ObjectMeta: metav1.ObjectMeta{Annotations: map[string]string{DeleteNodeAnnotation: "yes"}}} tests := []struct { desc string - machines []*v1alpha1.Machine + machines []*v1alpha2.Machine diff int - expect []*v1alpha1.Machine + expect []*v1alpha2.Machine }{ { desc: "func=randomDeletePolicy, diff=0", diff: 0, - machines: []*v1alpha1.Machine{ + machines: []*v1alpha2.Machine{ {}, }, - expect: []*v1alpha1.Machine{}, + expect: []*v1alpha2.Machine{}, }, { desc: "func=randomDeletePolicy, diff>len(machines)", diff: 2, - machines: []*v1alpha1.Machine{ + machines: []*v1alpha2.Machine{ {}, }, - expect: []*v1alpha1.Machine{ + expect: []*v1alpha2.Machine{ {}, }, }, { desc: "func=randomDeletePolicy, diff>betterDelete", diff: 2, - machines: []*v1alpha1.Machine{ + machines: []*v1alpha2.Machine{ {}, betterDeleteMachine, {}, }, - expect: []*v1alpha1.Machine{ + expect: []*v1alpha2.Machine{ betterDeleteMachine, {}, }, @@ -72,13 +72,13 @@ func TestMachineToDelete(t *testing.T) { { desc: "func=randomDeletePolicy, diffbetterDelete", diff: 2, - machines: []*v1alpha1.Machine{ + machines: []*v1alpha2.Machine{ {}, betterDeleteMachine, {}, }, - expect: []*v1alpha1.Machine{ + expect: []*v1alpha2.Machine{ betterDeleteMachine, {}, }, @@ -140,12 +140,12 @@ func TestMachineToDelete(t *testing.T) { { desc: "func=randomDeletePolicy, annotated, diff=1", diff: 1, - machines: []*v1alpha1.Machine{ + machines: []*v1alpha2.Machine{ {}, deleteMeMachine, {}, }, - expect: []*v1alpha1.Machine{ + expect: []*v1alpha2.Machine{ deleteMeMachine, }, }} @@ -161,60 +161,60 @@ func TestMachineToDelete(t *testing.T) { func TestMachineNewestDelete(t *testing.T) { currentTime := metav1.Now() - statusError := common.MachineStatusError("I'm unhealthy!") - mustDeleteMachine := &v1alpha1.Machine{ObjectMeta: metav1.ObjectMeta{DeletionTimestamp: ¤tTime}} - newest := &v1alpha1.Machine{ObjectMeta: metav1.ObjectMeta{CreationTimestamp: metav1.NewTime(currentTime.Time.AddDate(0, 0, -1))}} - new := &v1alpha1.Machine{ObjectMeta: metav1.ObjectMeta{CreationTimestamp: metav1.NewTime(currentTime.Time.AddDate(0, 0, -5))}} - old := &v1alpha1.Machine{ObjectMeta: metav1.ObjectMeta{CreationTimestamp: metav1.NewTime(currentTime.Time.AddDate(0, 0, -10))}} - oldest := &v1alpha1.Machine{ObjectMeta: metav1.ObjectMeta{CreationTimestamp: metav1.NewTime(currentTime.Time.AddDate(0, 0, -10))}} - annotatedMachine := &v1alpha1.Machine{ObjectMeta: metav1.ObjectMeta{Annotations: map[string]string{DeleteNodeAnnotation: "yes"}, CreationTimestamp: metav1.NewTime(currentTime.Time.AddDate(0, 0, -10))}} - unhealthyMachine := &v1alpha1.Machine{ObjectMeta: metav1.ObjectMeta{CreationTimestamp: metav1.NewTime(currentTime.Time.AddDate(0, 0, -10))}, Status: v1alpha1.MachineStatus{ErrorReason: &statusError}} + statusError := capierrors.MachineStatusError("I'm unhealthy!") + mustDeleteMachine := &v1alpha2.Machine{ObjectMeta: metav1.ObjectMeta{DeletionTimestamp: ¤tTime}} + newest := &v1alpha2.Machine{ObjectMeta: metav1.ObjectMeta{CreationTimestamp: metav1.NewTime(currentTime.Time.AddDate(0, 0, -1))}} + new := &v1alpha2.Machine{ObjectMeta: metav1.ObjectMeta{CreationTimestamp: metav1.NewTime(currentTime.Time.AddDate(0, 0, -5))}} + old := &v1alpha2.Machine{ObjectMeta: metav1.ObjectMeta{CreationTimestamp: metav1.NewTime(currentTime.Time.AddDate(0, 0, -10))}} + oldest := &v1alpha2.Machine{ObjectMeta: metav1.ObjectMeta{CreationTimestamp: metav1.NewTime(currentTime.Time.AddDate(0, 0, -10))}} + annotatedMachine := &v1alpha2.Machine{ObjectMeta: metav1.ObjectMeta{Annotations: map[string]string{DeleteNodeAnnotation: "yes"}, CreationTimestamp: metav1.NewTime(currentTime.Time.AddDate(0, 0, -10))}} + unhealthyMachine := &v1alpha2.Machine{ObjectMeta: metav1.ObjectMeta{CreationTimestamp: metav1.NewTime(currentTime.Time.AddDate(0, 0, -10))}, Status: v1alpha2.MachineStatus{ErrorReason: &statusError}} tests := []struct { desc string - machines []*v1alpha1.Machine + machines []*v1alpha2.Machine diff int - expect []*v1alpha1.Machine + expect []*v1alpha2.Machine }{ { desc: "func=newestDeletePriority, diff=1", diff: 1, - machines: []*v1alpha1.Machine{ + machines: []*v1alpha2.Machine{ new, oldest, old, mustDeleteMachine, newest, }, - expect: []*v1alpha1.Machine{mustDeleteMachine}, + expect: []*v1alpha2.Machine{mustDeleteMachine}, }, { desc: "func=newestDeletePriority, diff=2", diff: 2, - machines: []*v1alpha1.Machine{ + machines: []*v1alpha2.Machine{ new, oldest, mustDeleteMachine, old, newest, }, - expect: []*v1alpha1.Machine{mustDeleteMachine, newest}, + expect: []*v1alpha2.Machine{mustDeleteMachine, newest}, }, { desc: "func=newestDeletePriority, diff=3", diff: 3, - machines: []*v1alpha1.Machine{ + machines: []*v1alpha2.Machine{ new, mustDeleteMachine, oldest, old, newest, }, - expect: []*v1alpha1.Machine{mustDeleteMachine, newest, new}, + expect: []*v1alpha2.Machine{mustDeleteMachine, newest, new}, }, { desc: "func=newestDeletePriority, diff=1 (annotated)", diff: 1, - machines: []*v1alpha1.Machine{ + machines: []*v1alpha2.Machine{ new, oldest, old, newest, annotatedMachine, }, - expect: []*v1alpha1.Machine{annotatedMachine}, + expect: []*v1alpha2.Machine{annotatedMachine}, }, { desc: "func=newestDeletePriority, diff=1 (unhealthy)", diff: 1, - machines: []*v1alpha1.Machine{ + machines: []*v1alpha2.Machine{ new, oldest, old, newest, unhealthyMachine, }, - expect: []*v1alpha1.Machine{unhealthyMachine}, + expect: []*v1alpha2.Machine{unhealthyMachine}, }, } @@ -229,68 +229,68 @@ func TestMachineNewestDelete(t *testing.T) { func TestMachineOldestDelete(t *testing.T) { currentTime := metav1.Now() - statusError := common.MachineStatusError("I'm unhealthy!") - empty := &v1alpha1.Machine{} - newest := &v1alpha1.Machine{ObjectMeta: metav1.ObjectMeta{CreationTimestamp: metav1.NewTime(currentTime.Time.AddDate(0, 0, -1))}} - new := &v1alpha1.Machine{ObjectMeta: metav1.ObjectMeta{CreationTimestamp: metav1.NewTime(currentTime.Time.AddDate(0, 0, -5))}} - old := &v1alpha1.Machine{ObjectMeta: metav1.ObjectMeta{CreationTimestamp: metav1.NewTime(currentTime.Time.AddDate(0, 0, -10))}} - oldest := &v1alpha1.Machine{ObjectMeta: metav1.ObjectMeta{CreationTimestamp: metav1.NewTime(currentTime.Time.AddDate(0, 0, -10))}} - annotatedMachine := &v1alpha1.Machine{ObjectMeta: metav1.ObjectMeta{Annotations: map[string]string{DeleteNodeAnnotation: "yes"}, CreationTimestamp: metav1.NewTime(currentTime.Time.AddDate(0, 0, -10))}} - unhealthyMachine := &v1alpha1.Machine{ObjectMeta: metav1.ObjectMeta{CreationTimestamp: metav1.NewTime(currentTime.Time.AddDate(0, 0, -10))}, Status: v1alpha1.MachineStatus{ErrorReason: &statusError}} + statusError := capierrors.MachineStatusError("I'm unhealthy!") + empty := &v1alpha2.Machine{} + newest := &v1alpha2.Machine{ObjectMeta: metav1.ObjectMeta{CreationTimestamp: metav1.NewTime(currentTime.Time.AddDate(0, 0, -1))}} + new := &v1alpha2.Machine{ObjectMeta: metav1.ObjectMeta{CreationTimestamp: metav1.NewTime(currentTime.Time.AddDate(0, 0, -5))}} + old := &v1alpha2.Machine{ObjectMeta: metav1.ObjectMeta{CreationTimestamp: metav1.NewTime(currentTime.Time.AddDate(0, 0, -10))}} + oldest := &v1alpha2.Machine{ObjectMeta: metav1.ObjectMeta{CreationTimestamp: metav1.NewTime(currentTime.Time.AddDate(0, 0, -10))}} + annotatedMachine := &v1alpha2.Machine{ObjectMeta: metav1.ObjectMeta{Annotations: map[string]string{DeleteNodeAnnotation: "yes"}, CreationTimestamp: metav1.NewTime(currentTime.Time.AddDate(0, 0, -10))}} + unhealthyMachine := &v1alpha2.Machine{ObjectMeta: metav1.ObjectMeta{CreationTimestamp: metav1.NewTime(currentTime.Time.AddDate(0, 0, -10))}, Status: v1alpha2.MachineStatus{ErrorReason: &statusError}} tests := []struct { desc string - machines []*v1alpha1.Machine + machines []*v1alpha2.Machine diff int - expect []*v1alpha1.Machine + expect []*v1alpha2.Machine }{ { desc: "func=oldestDeletePriority, diff=1", diff: 1, - machines: []*v1alpha1.Machine{ + machines: []*v1alpha2.Machine{ empty, new, oldest, old, newest, }, - expect: []*v1alpha1.Machine{oldest}, + expect: []*v1alpha2.Machine{oldest}, }, { desc: "func=oldestDeletePriority, diff=2", diff: 2, - machines: []*v1alpha1.Machine{ + machines: []*v1alpha2.Machine{ new, oldest, old, newest, empty, }, - expect: []*v1alpha1.Machine{oldest, old}, + expect: []*v1alpha2.Machine{oldest, old}, }, { desc: "func=oldestDeletePriority, diff=3", diff: 3, - machines: []*v1alpha1.Machine{ + machines: []*v1alpha2.Machine{ new, oldest, old, newest, empty, }, - expect: []*v1alpha1.Machine{oldest, old, new}, + expect: []*v1alpha2.Machine{oldest, old, new}, }, { desc: "func=oldestDeletePriority, diff=4", diff: 4, - machines: []*v1alpha1.Machine{ + machines: []*v1alpha2.Machine{ new, oldest, old, newest, empty, }, - expect: []*v1alpha1.Machine{oldest, old, new, newest}, + expect: []*v1alpha2.Machine{oldest, old, new, newest}, }, { desc: "func=oldestDeletePriority, diff=1 (annotated)", diff: 1, - machines: []*v1alpha1.Machine{ + machines: []*v1alpha2.Machine{ empty, new, oldest, old, newest, annotatedMachine, }, - expect: []*v1alpha1.Machine{annotatedMachine}, + expect: []*v1alpha2.Machine{annotatedMachine}, }, { desc: "func=oldestDeletePriority, diff=1 (unhealthy)", diff: 1, - machines: []*v1alpha1.Machine{ + machines: []*v1alpha2.Machine{ empty, new, oldest, old, newest, unhealthyMachine, }, - expect: []*v1alpha1.Machine{unhealthyMachine}, + expect: []*v1alpha2.Machine{unhealthyMachine}, }, } diff --git a/vendor/sigs.k8s.io/cluster-api/pkg/controller/machineset/machine.go b/vendor/sigs.k8s.io/cluster-api/pkg/controller/machineset/machine.go index c421da741f..7ab6089d25 100644 --- a/vendor/sigs.k8s.io/cluster-api/pkg/controller/machineset/machine.go +++ b/vendor/sigs.k8s.io/cluster-api/pkg/controller/machineset/machine.go @@ -22,28 +22,24 @@ import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/labels" "k8s.io/klog" - "sigs.k8s.io/cluster-api/pkg/apis/cluster/v1alpha1" + "sigs.k8s.io/cluster-api/api/v1alpha2" "sigs.k8s.io/controller-runtime/pkg/client" ) -func (c *ReconcileMachineSet) getMachineSetsForMachine(m *v1alpha1.Machine) []*v1alpha1.MachineSet { +func (c *ReconcileMachineSet) getMachineSetsForMachine(m *v1alpha2.Machine) []*v1alpha2.MachineSet { if len(m.Labels) == 0 { klog.Warningf("No machine sets found for Machine %v because it has no labels", m.Name) return nil } - msList := &v1alpha1.MachineSetList{} - listOptions := &client.ListOptions{ - Namespace: m.Namespace, - } - - err := c.Client.List(context.Background(), listOptions, msList) + msList := &v1alpha2.MachineSetList{} + err := c.Client.List(context.Background(), msList, client.InNamespace(m.Namespace)) if err != nil { klog.Errorf("Failed to list machine sets, %v", err) return nil } - var mss []*v1alpha1.MachineSet + var mss []*v1alpha2.MachineSet for idx := range msList.Items { ms := &msList.Items[idx] if hasMatchingLabels(ms, m) { @@ -54,7 +50,7 @@ func (c *ReconcileMachineSet) getMachineSetsForMachine(m *v1alpha1.Machine) []*v return mss } -func hasMatchingLabels(machineSet *v1alpha1.MachineSet, machine *v1alpha1.Machine) bool { +func hasMatchingLabels(machineSet *v1alpha2.MachineSet, machine *v1alpha2.Machine) bool { selector, err := metav1.LabelSelectorAsSelector(&machineSet.Spec.Selector) if err != nil { klog.Warningf("unable to convert selector: %v", err) diff --git a/vendor/sigs.k8s.io/cluster-api/pkg/controller/machineset/machine_test.go b/vendor/sigs.k8s.io/cluster-api/pkg/controller/machineset/machine_test.go index 7e2f9190da..de96cd429b 100644 --- a/vendor/sigs.k8s.io/cluster-api/pkg/controller/machineset/machine_test.go +++ b/vendor/sigs.k8s.io/cluster-api/pkg/controller/machineset/machine_test.go @@ -20,18 +20,18 @@ import ( "testing" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "sigs.k8s.io/cluster-api/pkg/apis/cluster/v1alpha1" + "sigs.k8s.io/cluster-api/api/v1alpha2" ) func TestHasMatchingLabels(t *testing.T) { testCases := []struct { - machineSet v1alpha1.MachineSet - machine v1alpha1.Machine + machineSet v1alpha2.MachineSet + machine v1alpha2.Machine expected bool }{ { - machineSet: v1alpha1.MachineSet{ - Spec: v1alpha1.MachineSetSpec{ + machineSet: v1alpha2.MachineSet{ + Spec: v1alpha2.MachineSetSpec{ Selector: metav1.LabelSelector{ MatchLabels: map[string]string{ "foo": "bar", @@ -39,7 +39,7 @@ func TestHasMatchingLabels(t *testing.T) { }, }, }, - machine: v1alpha1.Machine{ + machine: v1alpha2.Machine{ ObjectMeta: metav1.ObjectMeta{ Name: "matchSelector", Labels: map[string]string{ @@ -50,8 +50,8 @@ func TestHasMatchingLabels(t *testing.T) { expected: true, }, { - machineSet: v1alpha1.MachineSet{ - Spec: v1alpha1.MachineSetSpec{ + machineSet: v1alpha2.MachineSet{ + Spec: v1alpha2.MachineSetSpec{ Selector: metav1.LabelSelector{ MatchLabels: map[string]string{ "foo": "bar", @@ -59,7 +59,7 @@ func TestHasMatchingLabels(t *testing.T) { }, }, }, - machine: v1alpha1.Machine{ + machine: v1alpha2.Machine{ ObjectMeta: metav1.ObjectMeta{ Name: "doesNotMatchSelector", Labels: map[string]string{ diff --git a/vendor/sigs.k8s.io/cluster-api/pkg/controller/machineset/machineset_controller.go b/vendor/sigs.k8s.io/cluster-api/pkg/controller/machineset/machineset_controller.go index d0e82134e2..1958e68de8 100644 --- a/vendor/sigs.k8s.io/cluster-api/pkg/controller/machineset/machineset_controller.go +++ b/vendor/sigs.k8s.io/cluster-api/pkg/controller/machineset/machineset_controller.go @@ -23,6 +23,8 @@ import ( "sync" "time" + "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" + "github.com/pkg/errors" corev1 "k8s.io/api/core/v1" apierrors "k8s.io/apimachinery/pkg/api/errors" @@ -31,8 +33,8 @@ import ( "k8s.io/apimachinery/pkg/runtime" "k8s.io/client-go/tools/record" "k8s.io/klog" - "sigs.k8s.io/cluster-api/pkg/apis/cluster/v1alpha1" - clusterv1alpha1 "sigs.k8s.io/cluster-api/pkg/apis/cluster/v1alpha1" + clusterv1 "sigs.k8s.io/cluster-api/api/v1alpha2" + "sigs.k8s.io/cluster-api/pkg/controller/external" "sigs.k8s.io/cluster-api/pkg/util" "sigs.k8s.io/controller-runtime/pkg/client" "sigs.k8s.io/controller-runtime/pkg/controller" @@ -47,7 +49,7 @@ const controllerName = "machineset-controller" var ( // controllerKind contains the schema.GroupVersionKind for this controller type. - controllerKind = clusterv1alpha1.SchemeGroupVersion.WithKind("MachineSet") + controllerKind = clusterv1.GroupVersion.WithKind("MachineSet") // stateConfirmationTimeout is the amount of time allowed to wait for desired state. stateConfirmationTimeout = 10 * time.Second @@ -66,7 +68,11 @@ func Add(mgr manager.Manager) error { // newReconciler returns a new reconcile.Reconciler. func newReconciler(mgr manager.Manager) *ReconcileMachineSet { - return &ReconcileMachineSet{Client: mgr.GetClient(), scheme: mgr.GetScheme(), recorder: mgr.GetRecorder(controllerName)} + return &ReconcileMachineSet{ + Client: mgr.GetClient(), + scheme: mgr.GetScheme(), + recorder: mgr.GetEventRecorderFor(controllerName), + } } // add adds a new Controller to mgr with r as the reconcile.Reconciler. @@ -79,7 +85,7 @@ func add(mgr manager.Manager, r reconcile.Reconciler, mapFn handler.ToRequestsFu // Watch for changes to MachineSet. err = c.Watch( - &source.Kind{Type: &clusterv1alpha1.MachineSet{}}, + &source.Kind{Type: &clusterv1.MachineSet{}}, &handler.EnqueueRequestForObject{}, ) if err != nil { @@ -88,8 +94,8 @@ func add(mgr manager.Manager, r reconcile.Reconciler, mapFn handler.ToRequestsFu // Watch for changes to Machines and reconcile the owner MachineSet. err = c.Watch( - &source.Kind{Type: &clusterv1alpha1.Machine{}}, - &handler.EnqueueRequestForOwner{IsController: true, OwnerType: &clusterv1alpha1.MachineSet{}}, + &source.Kind{Type: &clusterv1.Machine{}}, + &handler.EnqueueRequestForOwner{IsController: true, OwnerType: &clusterv1.MachineSet{}}, ) if err != nil { return err @@ -99,7 +105,7 @@ func add(mgr manager.Manager, r reconcile.Reconciler, mapFn handler.ToRequestsFu // This watcher is required for use cases like adoption. In case a Machine doesn't have // a controller reference, it'll look for potential matching MachineSet to reconcile. return c.Watch( - &source.Kind{Type: &clusterv1alpha1.Machine{}}, + &source.Kind{Type: &clusterv1.Machine{}}, &handler.EnqueueRequestsFromMapFunc{ToRequests: mapFn}, ) } @@ -114,12 +120,10 @@ type ReconcileMachineSet struct { // Reconcile reads that state of the cluster for a MachineSet object and makes changes based on the state read // and what is in the MachineSet.Spec // Automatically generate RBAC rules to allow the Controller to read and write Deployments -// +kubebuilder:rbac:groups=cluster.k8s.io,resources=machinesets;machinesets/status,verbs=get;list;watch;create;update;patch;delete -// +kubebuilder:rbac:groups=cluster.k8s.io,resources=machines,verbs=get;list;watch;create;update;patch;delete func (r *ReconcileMachineSet) Reconcile(request reconcile.Request) (reconcile.Result, error) { // Fetch the MachineSet instance ctx := context.TODO() - machineSet := &clusterv1alpha1.MachineSet{} + machineSet := &clusterv1.MachineSet{} if err := r.Get(ctx, request.NamespacedName, machineSet); err != nil { if apierrors.IsNotFound(err) { // Object not found, return. Created objects are automatically garbage collected. @@ -144,13 +148,8 @@ func (r *ReconcileMachineSet) Reconcile(request reconcile.Request) (reconcile.Re return result, err } -func (r *ReconcileMachineSet) reconcile(ctx context.Context, machineSet *clusterv1alpha1.MachineSet) (reconcile.Result, error) { - klog.V(4).Infof("Reconcile machineset %v", machineSet.Name) - allMachines := &clusterv1alpha1.MachineList{} - - if err := r.Client.List(context.Background(), client.InNamespace(machineSet.Namespace), allMachines); err != nil { - return reconcile.Result{}, errors.Wrap(err, "failed to list machines") - } +func (r *ReconcileMachineSet) reconcile(ctx context.Context, machineSet *clusterv1.MachineSet) (reconcile.Result, error) { + klog.V(4).Infof("Reconcile MachineSet %q in namespace %q", machineSet.Name, machineSet.Namespace) // Make sure that label selector can match template's labels. // TODO(vincepri): Move to a validation (admission) webhook when supported. @@ -163,44 +162,41 @@ func (r *ReconcileMachineSet) reconcile(ctx context.Context, machineSet *cluster return reconcile.Result{}, errors.Errorf("failed validation on MachineSet %q label selector, cannot match any machines ", machineSet.Name) } + selectorMap, err := metav1.LabelSelectorAsMap(&machineSet.Spec.Selector) + if err != nil { + return reconcile.Result{}, errors.Wrapf(err, "failed to convert MachineSet %q label selector to a map", machineSet.Name) + } + + allMachines := &clusterv1.MachineList{} + err = r.Client.List( + context.Background(), allMachines, + client.InNamespace(machineSet.Namespace), + client.MatchingLabels(selectorMap), + ) + if err != nil { + return reconcile.Result{}, errors.Wrap(err, "failed to list machines") + } + // Cluster might be nil as some providers might not require a cluster object // for machine management. - cluster, err := r.getCluster(machineSet) - if err != nil { + cluster, err := util.GetClusterFromMetadata(ctx, r.Client, machineSet.ObjectMeta) + if errors.Cause(err) == util.ErrNoCluster { + klog.Infof("MachineSet %q in namespace %q doesn't specify %q label, assuming nil cluster", machineSet.Name, machineSet.Namespace, clusterv1.MachineClusterLabelName) + } else if err != nil { return reconcile.Result{}, err } - // Set the ownerRef with foreground deletion if there is a linked cluster. if cluster != nil && shouldAdopt(machineSet) { - klog.Infof("Cluster %s/%s is adopting MachineSet %s", cluster.Namespace, cluster.Name, machineSet.Name) - blockOwnerDeletion := true - machineSet.OwnerReferences = append(machineSet.OwnerReferences, metav1.OwnerReference{ - APIVersion: cluster.APIVersion, - Kind: cluster.Kind, - Name: cluster.Name, - UID: cluster.UID, - BlockOwnerDeletion: &blockOwnerDeletion, + machineSet.OwnerReferences = util.EnsureOwnerRef(machineSet.OwnerReferences, metav1.OwnerReference{ + APIVersion: cluster.APIVersion, + Kind: cluster.Kind, + Name: cluster.Name, + UID: cluster.UID, }) } - // Add foregroundDeletion finalizer if MachineSet isn't deleted and linked to a cluster. - if cluster != nil && - machineSet.ObjectMeta.DeletionTimestamp.IsZero() && - !util.Contains(machineSet.Finalizers, metav1.FinalizerDeleteDependents) { - - machineSet.Finalizers = append(machineSet.ObjectMeta.Finalizers, metav1.FinalizerDeleteDependents) - - if err := r.Client.Update(context.Background(), machineSet); err != nil { - klog.Infof("Failed to add finalizers to MachineSet %q: %v", machineSet.Name, err) - return reconcile.Result{}, err - } - - // Since adding the finalizer updates the object return to avoid later update issues - return reconcile.Result{Requeue: true}, nil - } - // Filter out irrelevant machines (deleting/mismatch labels) and claim orphaned machines. - filteredMachines := make([]*clusterv1alpha1.Machine, 0, len(allMachines.Items)) + filteredMachines := make([]*clusterv1.Machine, 0, len(allMachines.Items)) for idx := range allMachines.Items { machine := &allMachines.Items[idx] if shouldExcludeMachine(machineSet, machine) { @@ -268,16 +264,16 @@ func (r *ReconcileMachineSet) reconcile(ctx context.Context, machineSet *cluster } // getCluster reuturns the Cluster associated with the MachineSet, if any. -func (r *ReconcileMachineSet) getCluster(ms *clusterv1alpha1.MachineSet) (*clusterv1alpha1.Cluster, error) { - if ms.Spec.Template.Labels[clusterv1alpha1.MachineClusterLabelName] == "" { - klog.Infof("MachineSet %q in namespace %q doesn't specify %q label, assuming nil cluster", ms.Name, ms.Namespace, clusterv1alpha1.MachineClusterLabelName) +func (r *ReconcileMachineSet) getCluster(ms *clusterv1.MachineSet) (*clusterv1.Cluster, error) { + if ms.Spec.Template.Labels[clusterv1.MachineClusterLabelName] == "" { + klog.Infof("MachineSet %q in namespace %q doesn't specify %q label, assuming nil cluster", ms.Name, ms.Namespace, clusterv1.MachineClusterLabelName) return nil, nil } - cluster := &clusterv1alpha1.Cluster{} + cluster := &clusterv1.Cluster{} key := client.ObjectKey{ Namespace: ms.Namespace, - Name: ms.Spec.Template.Labels[clusterv1alpha1.MachineClusterLabelName], + Name: ms.Spec.Template.Labels[clusterv1.MachineClusterLabelName], } if err := r.Client.Get(context.Background(), key, cluster); err != nil { @@ -288,7 +284,7 @@ func (r *ReconcileMachineSet) getCluster(ms *clusterv1alpha1.MachineSet) (*clust } // syncReplicas scales Machine resources up or down. -func (r *ReconcileMachineSet) syncReplicas(ms *clusterv1alpha1.MachineSet, machines []*clusterv1alpha1.Machine) error { +func (r *ReconcileMachineSet) syncReplicas(ms *clusterv1.MachineSet, machines []*clusterv1.Machine) error { if ms.Spec.Replicas == nil { return errors.Errorf("the Replicas field in Spec for machineset %v is nil, this should not be allowed", ms.Name) } @@ -300,17 +296,56 @@ func (r *ReconcileMachineSet) syncReplicas(ms *clusterv1alpha1.MachineSet, machi klog.Infof("Too few replicas for %v %s/%s, need %d, creating %d", controllerKind, ms.Namespace, ms.Name, *(ms.Spec.Replicas), diff) - var machineList []*clusterv1alpha1.Machine + var machineList []*clusterv1.Machine var errstrings []string for i := 0; i < diff; i++ { klog.Infof("Creating machine %d of %d, ( spec.replicas(%d) > currentMachineCount(%d) )", i+1, diff, *(ms.Spec.Replicas), len(machines)) - machine := r.createMachine(ms) - if err := r.Client.Create(context.Background(), machine); err != nil { + machine := r.getNewMachine(ms) + + // Clone and set the infrastructure and bootstrap references. + var ( + infraConfig, bootstrapConfig *unstructured.Unstructured + err error + ) + + infraConfig, err = external.CloneTemplate(r.Client, &machine.Spec.InfrastructureRef, machine.Namespace) + if err != nil { + return errors.Wrapf(err, "failed to clone infrastructure configuration for MachineSet %q in namespace %q", ms.Name, ms.Namespace) + } + machine.Spec.InfrastructureRef = corev1.ObjectReference{ + APIVersion: infraConfig.GetAPIVersion(), + Kind: infraConfig.GetKind(), + Namespace: infraConfig.GetNamespace(), + Name: infraConfig.GetName(), + } + + if machine.Spec.Bootstrap.ConfigRef != nil { + bootstrapConfig, err = external.CloneTemplate(r.Client, machine.Spec.Bootstrap.ConfigRef, machine.Namespace) + if err != nil { + return errors.Wrapf(err, "failed to clone bootstrap configuration for MachineSet %q in namespace %q", ms.Name, ms.Namespace) + } + machine.Spec.Bootstrap.ConfigRef = &corev1.ObjectReference{ + APIVersion: bootstrapConfig.GetAPIVersion(), + Kind: bootstrapConfig.GetKind(), + Namespace: bootstrapConfig.GetNamespace(), + Name: bootstrapConfig.GetName(), + } + } + + if err := r.Client.Create(context.TODO(), machine); err != nil { klog.Errorf("Unable to create Machine %q: %v", machine.Name, err) r.recorder.Eventf(ms, corev1.EventTypeWarning, "FailedCreate", "Failed to create machine %q: %v", machine.Name, err) errstrings = append(errstrings, err.Error()) + if err := r.Client.Delete(context.TODO(), infraConfig); !apierrors.IsNotFound(err) { + klog.Errorf("Failed to cleanup infrastructure configuration object after Machine creation error: %v", err) + } + if bootstrapConfig != nil { + if err := r.Client.Delete(context.TODO(), bootstrapConfig); !apierrors.IsNotFound(err) { + klog.Errorf("Failed to cleanup bootstrap configuration object after Machine creation error: %v", err) + } + } continue } klog.Infof("Created machine %d of %d with name %q", i+1, diff, machine.Name) @@ -341,7 +376,7 @@ func (r *ReconcileMachineSet) syncReplicas(ms *clusterv1alpha1.MachineSet, machi var wg sync.WaitGroup wg.Add(diff) for _, machine := range machinesToDelete { - go func(targetMachine *clusterv1alpha1.Machine) { + go func(targetMachine *clusterv1.Machine) { defer wg.Done() err := r.Client.Delete(context.Background(), targetMachine) if err != nil { @@ -370,17 +405,20 @@ func (r *ReconcileMachineSet) syncReplicas(ms *clusterv1alpha1.MachineSet, machi return nil } -// createMachine creates a Machine resource. The name of the newly created resource is going +// getNewMachine creates a new Machine object. The name of the newly created resource is going // to be created by the API server, we set the generateName field. -func (r *ReconcileMachineSet) createMachine(machineSet *clusterv1alpha1.MachineSet) *clusterv1alpha1.Machine { - gv := clusterv1alpha1.SchemeGroupVersion - machine := &clusterv1alpha1.Machine{ +func (r *ReconcileMachineSet) getNewMachine(machineSet *clusterv1.MachineSet) *clusterv1.Machine { + gv := clusterv1.GroupVersion + machine := &clusterv1.Machine{ TypeMeta: metav1.TypeMeta{ Kind: gv.WithKind("Machine").Kind, APIVersion: gv.String(), }, - ObjectMeta: machineSet.Spec.Template.ObjectMeta, - Spec: machineSet.Spec.Template.Spec, + ObjectMeta: metav1.ObjectMeta{ + Labels: machineSet.Spec.Template.Labels, + Annotations: machineSet.Spec.Template.Annotations, + }, + Spec: machineSet.Spec.Template.Spec, } machine.ObjectMeta.GenerateName = fmt.Sprintf("%s-", machineSet.Name) machine.ObjectMeta.OwnerReferences = []metav1.OwnerReference{*metav1.NewControllerRef(machineSet, controllerKind)} @@ -389,37 +427,27 @@ func (r *ReconcileMachineSet) createMachine(machineSet *clusterv1alpha1.MachineS } // shouldExcludeMachine returns true if the machine should be filtered out, false otherwise. -func shouldExcludeMachine(machineSet *clusterv1alpha1.MachineSet, machine *clusterv1alpha1.Machine) bool { - // Ignore inactive machines. +func shouldExcludeMachine(machineSet *clusterv1.MachineSet, machine *clusterv1.Machine) bool { if metav1.GetControllerOf(machine) != nil && !metav1.IsControlledBy(machine, machineSet) { klog.V(4).Infof("%s not controlled by %v", machine.Name, machineSet.Name) return true } - - if machine.ObjectMeta.DeletionTimestamp != nil { - return true - } - - if !hasMatchingLabels(machineSet, machine) { - return true - } - - return false + return machine.ObjectMeta.DeletionTimestamp != nil } // adoptOrphan sets the MachineSet as a controller OwnerReference to the Machine. -func (r *ReconcileMachineSet) adoptOrphan(machineSet *clusterv1alpha1.MachineSet, machine *clusterv1alpha1.Machine) error { +func (r *ReconcileMachineSet) adoptOrphan(machineSet *clusterv1.MachineSet, machine *clusterv1.Machine) error { newRef := *metav1.NewControllerRef(machineSet, controllerKind) machine.OwnerReferences = append(machine.OwnerReferences, newRef) return r.Client.Update(context.Background(), machine) } -func (r *ReconcileMachineSet) waitForMachineCreation(machineList []*clusterv1alpha1.Machine) error { +func (r *ReconcileMachineSet) waitForMachineCreation(machineList []*clusterv1.Machine) error { for _, machine := range machineList { pollErr := util.PollImmediate(stateConfirmationInterval, stateConfirmationTimeout, func() (bool, error) { key := client.ObjectKey{Namespace: machine.Namespace, Name: machine.Name} - if err := r.Client.Get(context.Background(), key, &clusterv1alpha1.Machine{}); err != nil { + if err := r.Client.Get(context.Background(), key, &clusterv1.Machine{}); err != nil { if apierrors.IsNotFound(err) { return false, nil } @@ -439,10 +467,10 @@ func (r *ReconcileMachineSet) waitForMachineCreation(machineList []*clusterv1alp return nil } -func (r *ReconcileMachineSet) waitForMachineDeletion(machineList []*clusterv1alpha1.Machine) error { +func (r *ReconcileMachineSet) waitForMachineDeletion(machineList []*clusterv1.Machine) error { for _, machine := range machineList { pollErr := util.PollImmediate(stateConfirmationInterval, stateConfirmationTimeout, func() (bool, error) { - m := &clusterv1alpha1.Machine{} + m := &clusterv1.Machine{} key := client.ObjectKey{Namespace: machine.Namespace, Name: machine.Name} err := r.Client.Get(context.Background(), key, m) @@ -466,12 +494,9 @@ func (r *ReconcileMachineSet) waitForMachineDeletion(machineList []*clusterv1alp func (r *ReconcileMachineSet) MachineToMachineSets(o handler.MapObject) []reconcile.Request { result := []reconcile.Request{} - m := &clusterv1alpha1.Machine{} - key := client.ObjectKey{Namespace: o.Meta.GetNamespace(), Name: o.Meta.GetName()} - if err := r.Client.Get(context.Background(), key, m); err != nil { - if !apierrors.IsNotFound(err) { - klog.Errorf("Unable to retrieve Machine %q for possible MachineSet adoption: %v", key, err) - } + m, ok := o.Object.(*clusterv1.Machine) + if !ok { + klog.Errorf("expected a Machine but got a %T", o.Object) return nil } @@ -497,6 +522,6 @@ func (r *ReconcileMachineSet) MachineToMachineSets(o handler.MapObject) []reconc return result } -func shouldAdopt(ms *v1alpha1.MachineSet) bool { - return !util.HasOwner(ms.OwnerReferences, v1alpha1.SchemeGroupVersion.String(), []string{"MachineDeployment", "Cluster"}) +func shouldAdopt(ms *clusterv1.MachineSet) bool { + return !util.HasOwner(ms.OwnerReferences, clusterv1.GroupVersion.String(), []string{"MachineDeployment", "Cluster"}) } diff --git a/vendor/sigs.k8s.io/cluster-api/pkg/controller/machineset/machineset_controller_test.go b/vendor/sigs.k8s.io/cluster-api/pkg/controller/machineset/machineset_controller_test.go index 7247de1df2..5f1cbba71c 100644 --- a/vendor/sigs.k8s.io/cluster-api/pkg/controller/machineset/machineset_controller_test.go +++ b/vendor/sigs.k8s.io/cluster-api/pkg/controller/machineset/machineset_controller_test.go @@ -23,7 +23,7 @@ import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/client-go/kubernetes/scheme" - "sigs.k8s.io/cluster-api/pkg/apis/cluster/v1alpha1" + "sigs.k8s.io/cluster-api/api/v1alpha2" "sigs.k8s.io/controller-runtime/pkg/client" "sigs.k8s.io/controller-runtime/pkg/client/fake" "sigs.k8s.io/controller-runtime/pkg/handler" @@ -33,21 +33,21 @@ import ( var _ reconcile.Reconciler = &ReconcileMachineSet{} func TestMachineSetToMachines(t *testing.T) { - machineSetList := &v1alpha1.MachineSetList{ + machineSetList := &v1alpha2.MachineSetList{ TypeMeta: metav1.TypeMeta{ Kind: "MachineSetList", }, - Items: []v1alpha1.MachineSet{ + Items: []v1alpha2.MachineSet{ { ObjectMeta: metav1.ObjectMeta{ Name: "withMatchingLabels", Namespace: "test", }, - Spec: v1alpha1.MachineSetSpec{ + Spec: v1alpha2.MachineSetSpec{ Selector: metav1.LabelSelector{ MatchLabels: map[string]string{ "foo": "bar", - v1alpha1.MachineClusterLabelName: "test-cluster", + v1alpha2.MachineClusterLabelName: "test-cluster", }, }, }, @@ -55,7 +55,7 @@ func TestMachineSetToMachines(t *testing.T) { }, } controller := true - m := v1alpha1.Machine{ + m := v1alpha2.Machine{ TypeMeta: metav1.TypeMeta{ Kind: "Machine", }, @@ -63,7 +63,7 @@ func TestMachineSetToMachines(t *testing.T) { Name: "withOwnerRef", Namespace: "test", Labels: map[string]string{ - v1alpha1.MachineClusterLabelName: "test-cluster", + v1alpha2.MachineClusterLabelName: "test-cluster", }, OwnerReferences: []metav1.OwnerReference{ { @@ -74,7 +74,7 @@ func TestMachineSetToMachines(t *testing.T) { }, }, } - m2 := v1alpha1.Machine{ + m2 := v1alpha2.Machine{ TypeMeta: metav1.TypeMeta{ Kind: "Machine", }, @@ -82,11 +82,11 @@ func TestMachineSetToMachines(t *testing.T) { Name: "noOwnerRefNoLabels", Namespace: "test", Labels: map[string]string{ - v1alpha1.MachineClusterLabelName: "test-cluster", + v1alpha2.MachineClusterLabelName: "test-cluster", }, }, } - m3 := v1alpha1.Machine{ + m3 := v1alpha2.Machine{ TypeMeta: metav1.TypeMeta{ Kind: "Machine", }, @@ -95,12 +95,12 @@ func TestMachineSetToMachines(t *testing.T) { Namespace: "test", Labels: map[string]string{ "foo": "bar", - v1alpha1.MachineClusterLabelName: "test-cluster", + v1alpha2.MachineClusterLabelName: "test-cluster", }, }, } testsCases := []struct { - machine v1alpha1.Machine + machine v1alpha2.Machine mapObject handler.MapObject expected []reconcile.Request }{ @@ -132,7 +132,7 @@ func TestMachineSetToMachines(t *testing.T) { }, } - v1alpha1.AddToScheme(scheme.Scheme) + v1alpha2.AddToScheme(scheme.Scheme) r := &ReconcileMachineSet{ Client: fake.NewFakeClient(&m, &m2, &m3, machineSetList), scheme: scheme.Scheme, @@ -149,13 +149,15 @@ func TestMachineSetToMachines(t *testing.T) { func TestShouldExcludeMachine(t *testing.T) { controller := true testCases := []struct { - machineSet v1alpha1.MachineSet - machine v1alpha1.Machine + machineSet v1alpha2.MachineSet + machine v1alpha2.Machine expected bool }{ { - machineSet: v1alpha1.MachineSet{}, - machine: v1alpha1.Machine{ + machineSet: v1alpha2.MachineSet{ + ObjectMeta: metav1.ObjectMeta{UID: "1"}, + }, + machine: v1alpha2.Machine{ ObjectMeta: metav1.ObjectMeta{ Name: "withNoMatchingOwnerRef", Namespace: "test", @@ -164,6 +166,7 @@ func TestShouldExcludeMachine(t *testing.T) { Name: "Owner", Kind: "MachineSet", Controller: &controller, + UID: "not-1", }, }, }, @@ -171,8 +174,28 @@ func TestShouldExcludeMachine(t *testing.T) { expected: true, }, { - machineSet: v1alpha1.MachineSet{ - Spec: v1alpha1.MachineSetSpec{ + machineSet: v1alpha2.MachineSet{ + ObjectMeta: metav1.ObjectMeta{UID: "1"}, + }, + machine: v1alpha2.Machine{ + ObjectMeta: metav1.ObjectMeta{ + Name: "withMatchingOwnerRef", + Namespace: "test", + OwnerReferences: []metav1.OwnerReference{ + { + Name: "Owner", + Kind: "MachineSet", + Controller: &controller, + UID: "1", + }, + }, + }, + }, + expected: false, + }, + { + machineSet: v1alpha2.MachineSet{ + Spec: v1alpha2.MachineSetSpec{ Selector: metav1.LabelSelector{ MatchLabels: map[string]string{ "foo": "bar", @@ -180,7 +203,7 @@ func TestShouldExcludeMachine(t *testing.T) { }, }, }, - machine: v1alpha1.Machine{ + machine: v1alpha2.Machine{ ObjectMeta: metav1.ObjectMeta{ Name: "withMatchingLabels", Namespace: "test", @@ -192,8 +215,8 @@ func TestShouldExcludeMachine(t *testing.T) { expected: false, }, { - machineSet: v1alpha1.MachineSet{}, - machine: v1alpha1.Machine{ + machineSet: v1alpha2.MachineSet{}, + machine: v1alpha2.Machine{ ObjectMeta: metav1.ObjectMeta{ Name: "withDeletionTimestamp", Namespace: "test", @@ -216,12 +239,12 @@ func TestShouldExcludeMachine(t *testing.T) { } func TestAdoptOrphan(t *testing.T) { - m := v1alpha1.Machine{ + m := v1alpha2.Machine{ ObjectMeta: metav1.ObjectMeta{ Name: "orphanMachine", }, } - ms := v1alpha1.MachineSet{ + ms := v1alpha2.MachineSet{ ObjectMeta: metav1.ObjectMeta{ Name: "adoptOrphanMachine", }, @@ -229,8 +252,8 @@ func TestAdoptOrphan(t *testing.T) { controller := true blockOwnerDeletion := true testCases := []struct { - machineSet v1alpha1.MachineSet - machine v1alpha1.Machine + machineSet v1alpha2.MachineSet + machine v1alpha2.Machine expected []metav1.OwnerReference }{ { @@ -238,7 +261,7 @@ func TestAdoptOrphan(t *testing.T) { machineSet: ms, expected: []metav1.OwnerReference{ { - APIVersion: v1alpha1.SchemeGroupVersion.String(), + APIVersion: v1alpha2.GroupVersion.String(), Kind: "MachineSet", Name: "adoptOrphanMachine", UID: "", @@ -249,7 +272,7 @@ func TestAdoptOrphan(t *testing.T) { }, } - v1alpha1.AddToScheme(scheme.Scheme) + v1alpha2.AddToScheme(scheme.Scheme) r := &ReconcileMachineSet{ Client: fake.NewFakeClient(&m), scheme: scheme.Scheme, diff --git a/vendor/sigs.k8s.io/cluster-api/pkg/controller/machineset/machineset_reconciler_suite_test.go b/vendor/sigs.k8s.io/cluster-api/pkg/controller/machineset/machineset_reconciler_suite_test.go index 292d68d5c2..b1781e68b3 100644 --- a/vendor/sigs.k8s.io/cluster-api/pkg/controller/machineset/machineset_reconciler_suite_test.go +++ b/vendor/sigs.k8s.io/cluster-api/pkg/controller/machineset/machineset_reconciler_suite_test.go @@ -22,21 +22,26 @@ import ( "path/filepath" "testing" + apiextensionsv1beta1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1" "k8s.io/client-go/kubernetes/scheme" "k8s.io/client-go/rest" - "sigs.k8s.io/cluster-api/pkg/apis" + "sigs.k8s.io/cluster-api/api/v1alpha2" + "sigs.k8s.io/cluster-api/pkg/controller/external" "sigs.k8s.io/controller-runtime/pkg/envtest" "sigs.k8s.io/controller-runtime/pkg/manager" - "sigs.k8s.io/controller-runtime/pkg/reconcile" ) var cfg *rest.Config func TestMain(m *testing.M) { t := &envtest.Environment{ - CRDDirectoryPaths: []string{filepath.Join("..", "..", "..", "config", "crds")}, + CRDs: []*apiextensionsv1beta1.CustomResourceDefinition{ + external.TestGenericInfrastructureCRD, + external.TestGenericInfrastructureTemplateCRD, + }, + CRDDirectoryPaths: []string{filepath.Join("..", "..", "..", "config", "crd", "bases")}, } - apis.AddToScheme(scheme.Scheme) + v1alpha2.AddToScheme(scheme.Scheme) var err error if cfg, err = t.Start(); err != nil { @@ -48,18 +53,6 @@ func TestMain(m *testing.M) { os.Exit(code) } -// SetupTestReconcile returns a reconcile.Reconcile implementation that delegates to inner and -// writes the request to requests after Reconcile is finished. -func SetupTestReconcile(inner reconcile.Reconciler) (reconcile.Reconciler, chan reconcile.Request) { - requests := make(chan reconcile.Request) - fn := reconcile.Func(func(req reconcile.Request) (reconcile.Result, error) { - result, err := inner.Reconcile(req) - requests <- req - return result, err - }) - return fn, requests -} - // StartTestManager adds recFn func StartTestManager(mgr manager.Manager, t *testing.T) chan struct{} { t.Helper() diff --git a/vendor/sigs.k8s.io/cluster-api/pkg/controller/machineset/machineset_reconciler_test.go b/vendor/sigs.k8s.io/cluster-api/pkg/controller/machineset/machineset_reconciler_test.go index 153a51e9aa..70460299c5 100644 --- a/vendor/sigs.k8s.io/cluster-api/pkg/controller/machineset/machineset_reconciler_test.go +++ b/vendor/sigs.k8s.io/cluster-api/pkg/controller/machineset/machineset_reconciler_test.go @@ -17,33 +17,50 @@ limitations under the License. package machineset import ( + "context" "testing" "time" - "golang.org/x/net/context" + . "github.com/onsi/gomega" + corev1 "k8s.io/api/core/v1" + apierrors "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/types" - clusterv1alpha1 "sigs.k8s.io/cluster-api/pkg/apis/cluster/v1alpha1" + "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" + clusterv1alpha2 "sigs.k8s.io/cluster-api/api/v1alpha2" "sigs.k8s.io/controller-runtime/pkg/client" "sigs.k8s.io/controller-runtime/pkg/manager" - "sigs.k8s.io/controller-runtime/pkg/reconcile" ) -var c client.Client - -var expectedRequest = reconcile.Request{NamespacedName: types.NamespacedName{Name: "foo", Namespace: "default"}} - -const timeout = time.Second * 5 +const timeout = time.Second * 10 func TestReconcile(t *testing.T) { + RegisterTestingT(t) + ctx := context.TODO() + replicas := int32(2) - instance := &clusterv1alpha1.MachineSet{ + version := "1.14.2" + instance := &clusterv1alpha2.MachineSet{ ObjectMeta: metav1.ObjectMeta{Name: "foo", Namespace: "default"}, - Spec: clusterv1alpha1.MachineSetSpec{ + Spec: clusterv1alpha2.MachineSetSpec{ Replicas: &replicas, - Template: clusterv1alpha1.MachineTemplateSpec{ - Spec: clusterv1alpha1.MachineSpec{ - Versions: clusterv1alpha1.MachineVersionInfo{Kubelet: "1.10.3"}, + Selector: metav1.LabelSelector{ + MatchLabels: map[string]string{ + "label-1": "true", + }, + }, + Template: clusterv1alpha2.MachineTemplateSpec{ + ObjectMeta: clusterv1alpha2.ObjectMeta{ + Labels: map[string]string{ + "label-1": "true", + }, + }, + Spec: clusterv1alpha2.MachineSpec{ + Version: &version, + InfrastructureRef: corev1.ObjectReference{ + APIVersion: "infrastructure.cluster.x-k8s.io/v1alpha2", + Kind: "InfrastructureMachineTemplate", + Name: "foo-template", + }, }, }, }, @@ -51,92 +68,128 @@ func TestReconcile(t *testing.T) { // Setup the Manager and Controller. Wrap the Controller Reconcile function so it writes each request to a // channel when it is finished. - mgr, err := manager.New(cfg, manager.Options{}) - if err != nil { - t.Errorf("error creating new manager: %v", err) + mgr, err := manager.New(cfg, manager.Options{MetricsBindAddress: "0"}) + Expect(err).To(BeNil()) + c := mgr.GetClient() + + // Create infrastructure template resource. + infraResource := map[string]interface{}{ + "kind": "InfrastructureMachine", + "apiVersion": "infrastructure.cluster.x-k8s.io/v1alpha2", + "metadata": map[string]interface{}{}, + "spec": map[string]interface{}{ + "size": "3xlarge", + }, } - c = mgr.GetClient() + infraTmpl := &unstructured.Unstructured{ + Object: map[string]interface{}{ + "spec": map[string]interface{}{ + "template": infraResource, + }, + }, + } + infraTmpl.SetKind("InfrastructureMachineTemplate") + infraTmpl.SetAPIVersion("infrastructure.cluster.x-k8s.io/v1alpha2") + infraTmpl.SetName("foo-template") + infraTmpl.SetNamespace("default") + Expect(c.Create(ctx, infraTmpl)).To(BeNil()) r := newReconciler(mgr) - recFn, requests := SetupTestReconcile(r) - if err := add(mgr, recFn, r.MachineToMachineSets); err != nil { + if err := add(mgr, r, r.MachineToMachineSets); err != nil { t.Errorf("error adding controller to manager: %v", err) } defer close(StartTestManager(mgr, t)) // Create the MachineSet object and expect Reconcile to be called and the Machines to be created. - if err := c.Create(context.TODO(), instance); err != nil { - t.Errorf("error creating instance: %v", err) - } - defer c.Delete(context.TODO(), instance) - select { - case recv := <-requests: - if recv != expectedRequest { - t.Error("received request does not match expected request") - } - case <-time.After(timeout): - t.Error("timed out waiting for request") - } + Expect(c.Create(ctx, instance)).To(BeNil()) + defer c.Delete(ctx, instance) - machines := &clusterv1alpha1.MachineList{} + machines := &clusterv1alpha2.MachineList{} - // TODO(joshuarubin) there seems to be a race here. If expectInt sleeps - // briefly, even 10ms, the number of replicas is 4 and not 2 as expected - expectInt(t, int(replicas), func(ctx context.Context) int { - if err := c.List(ctx, &client.ListOptions{}, machines); err != nil { + // Verify that we have 2 replicas. + Eventually(func() int { + if err := c.List(ctx, machines); err != nil { return -1 } return len(machines.Items) - }) - - // Verify that each machine has the desired kubelet version. - for _, m := range machines.Items { - if k := m.Spec.Versions.Kubelet; k != "1.10.3" { - t.Errorf("kubelet was %q not '1.10.3'", k) + }, timeout).Should(BeEquivalentTo(replicas)) + + // Try to delete 1 machine and check the MachineSet scales back up. + machineToBeDeleted := machines.Items[0] + Expect(c.Delete(ctx, &machineToBeDeleted)).To(BeNil()) + + // Verify that the Machine has been deleted. + Eventually(func() bool { + key := client.ObjectKey{Name: machineToBeDeleted.Name, Namespace: machineToBeDeleted.Namespace} + if err := c.Get(ctx, key, &machineToBeDeleted); apierrors.IsNotFound(err) { + // The Machine Controller usually deletes external references upon Machine deletion. + // Replicate the logic here to make sure there are no leftovers. + iref := &unstructured.Unstructured{Object: infraResource} + iref.SetName(machineToBeDeleted.Spec.InfrastructureRef.Name) + iref.SetNamespace("default") + Expect(r.Delete(ctx, iref)).To(BeNil()) + return true } - } + return false + }, timeout).Should(BeTrue()) - // Delete a Machine and expect Reconcile to be called to replace it. - m := machines.Items[0] - if err := c.Delete(context.TODO(), &m); err != nil { - t.Errorf("error deleting machine: %v", err) - } - select { - case recv := <-requests: - if recv != expectedRequest { - t.Error("received request does not match expected request") + // Verify that we have 2 replicas. + Eventually(func() (ready int) { + if err := c.List(ctx, machines); err != nil { + return -1 } - case <-time.After(timeout): - t.Error("timed out waiting for request") - } + return len(machines.Items) + }, timeout).Should(BeEquivalentTo(replicas)) - // TODO (robertbailey): Figure out why the control loop isn't working as expected. - /* - g.Eventually(func() int { - if err := c.List(context.TODO(), &client.ListOptions{}, machines); err != nil { - return -1 - } - return len(machines.Items) - }, timeout).Should(gomega.BeEquivalentTo(replicas)) - */ -} + // Verify that each machine has the desired kubelet version, + // create a fake node in Ready state, update NodeRef, and wait for a reconciliation request. + for _, m := range machines.Items { + Expect(m.Spec.Version).ToNot(BeNil()) + Expect(*m.Spec.Version).To(BeEquivalentTo("1.14.2")) + + node := &corev1.Node{ + ObjectMeta: metav1.ObjectMeta{ + GenerateName: "test-", + }, + } + Expect(c.Create(ctx, node)) -func expectInt(t *testing.T, expect int, fn func(context.Context) int) { - t.Helper() + node.Status.Conditions = append(node.Status.Conditions, corev1.NodeCondition{Type: corev1.NodeReady, Status: corev1.ConditionTrue}) + Expect(c.Status().Update(ctx, node)).To(BeNil()) - ctx, cancel := context.WithTimeout(context.TODO(), timeout) - defer cancel() + m.Status.NodeRef = &corev1.ObjectReference{ + APIVersion: node.APIVersion, + Kind: node.Kind, + Name: node.Name, + UID: node.UID, + } + Expect(c.Status().Update(ctx, &m)).To(BeNil()) + } - intCh := make(chan int) + // Verify that we have N=replicas infrastructure references. + infraConfigs := &unstructured.UnstructuredList{} + infraConfigs.SetKind(infraResource["kind"].(string)) + infraConfigs.SetAPIVersion(infraResource["apiVersion"].(string)) + Eventually(func() int { + if err := c.List(ctx, infraConfigs, client.InNamespace("default")); err != nil { + return -1 + } + return len(infraConfigs.Items) + }, timeout).Should(BeEquivalentTo(replicas)) - go func() { intCh <- fn(ctx) }() + // Verify that all Machines are Ready. + Eventually(func() int32 { + key := client.ObjectKey{Name: instance.Name, Namespace: instance.Namespace} + if err := c.Get(ctx, key, instance); err != nil { + return -1 + } + return instance.Status.AvailableReplicas + }, timeout).Should(BeEquivalentTo(replicas)) - select { - case n := <-intCh: - if n != expect { - t.Errorf("go unexpectef value %d, expected %d", n, expect) + Eventually(func() int { + if err := c.List(ctx, infraConfigs, client.InNamespace("default")); err != nil { + return -1 } - case <-ctx.Done(): - t.Errorf("timed out waiting for value: %d", expect) - } + return len(infraConfigs.Items) + }, timeout).Should(BeEquivalentTo(replicas)) } diff --git a/vendor/sigs.k8s.io/cluster-api/pkg/controller/machineset/status.go b/vendor/sigs.k8s.io/cluster-api/pkg/controller/machineset/status.go index dc475e3ffd..269efe1b7f 100644 --- a/vendor/sigs.k8s.io/cluster-api/pkg/controller/machineset/status.go +++ b/vendor/sigs.k8s.io/cluster-api/pkg/controller/machineset/status.go @@ -24,7 +24,7 @@ import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/labels" "k8s.io/klog" - "sigs.k8s.io/cluster-api/pkg/apis/cluster/v1alpha1" + "sigs.k8s.io/cluster-api/api/v1alpha2" "sigs.k8s.io/cluster-api/pkg/controller/noderefutil" "sigs.k8s.io/cluster-api/pkg/controller/remote" "sigs.k8s.io/controller-runtime/pkg/client" @@ -35,7 +35,7 @@ const ( statusUpdateRetries = 1 ) -func (c *ReconcileMachineSet) calculateStatus(ms *v1alpha1.MachineSet, filteredMachines []*v1alpha1.Machine) v1alpha1.MachineSetStatus { +func (c *ReconcileMachineSet) calculateStatus(ms *v1alpha2.MachineSet, filteredMachines []*v1alpha2.Machine) v1alpha2.MachineSetStatus { newStatus := ms.Status // Count the number of machines that have labels matching the labels of the machine @@ -85,7 +85,7 @@ func (c *ReconcileMachineSet) calculateStatus(ms *v1alpha1.MachineSet, filteredM } // updateMachineSetStatus attempts to update the Status.Replicas of the given MachineSet, with a single GET/PUT retry. -func updateMachineSetStatus(c client.Client, ms *v1alpha1.MachineSet, newStatus v1alpha1.MachineSetStatus) (*v1alpha1.MachineSet, error) { +func updateMachineSetStatus(c client.Client, ms *v1alpha2.MachineSet, newStatus v1alpha2.MachineSetStatus) (*v1alpha2.MachineSet, error) { // This is the steady state. It happens when the MachineSet doesn't have any expectations, since // we do a periodic relist every 10 minutes. If the generations differ but the replicas are // the same, a caller might've resized to the same replica count. @@ -136,7 +136,7 @@ func updateMachineSetStatus(c client.Client, ms *v1alpha1.MachineSet, newStatus return nil, updateErr } -func (c *ReconcileMachineSet) getMachineNode(cluster *v1alpha1.Cluster, machine *v1alpha1.Machine) (*corev1.Node, error) { +func (c *ReconcileMachineSet) getMachineNode(cluster *v1alpha2.Cluster, machine *v1alpha2.Machine) (*corev1.Node, error) { if cluster == nil { // Try to retrieve the Node from the local cluster, if no Cluster reference is found. node := &corev1.Node{} diff --git a/vendor/sigs.k8s.io/cluster-api/pkg/controller/node/BUILD.bazel b/vendor/sigs.k8s.io/cluster-api/pkg/controller/node/BUILD.bazel deleted file mode 100644 index 5a855c4171..0000000000 --- a/vendor/sigs.k8s.io/cluster-api/pkg/controller/node/BUILD.bazel +++ /dev/null @@ -1,51 +0,0 @@ -load("@io_bazel_rules_go//go:def.bzl", "go_library", "go_test") - -go_library( - name = "go_default_library", - srcs = [ - "node.go", - "node_controller.go", - ], - importpath = "sigs.k8s.io/cluster-api/pkg/controller/node", - visibility = ["//visibility:public"], - deps = [ - "//pkg/apis/cluster/v1alpha1:go_default_library", - "//pkg/controller/noderefutil:go_default_library", - "//pkg/util:go_default_library", - "//vendor/k8s.io/api/core/v1:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/api/errors:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/runtime:go_default_library", - "//vendor/k8s.io/client-go/tools/cache:go_default_library", - "//vendor/k8s.io/klog:go_default_library", - "//vendor/sigs.k8s.io/controller-runtime/pkg/client:go_default_library", - "//vendor/sigs.k8s.io/controller-runtime/pkg/controller:go_default_library", - "//vendor/sigs.k8s.io/controller-runtime/pkg/handler:go_default_library", - "//vendor/sigs.k8s.io/controller-runtime/pkg/manager:go_default_library", - "//vendor/sigs.k8s.io/controller-runtime/pkg/reconcile:go_default_library", - "//vendor/sigs.k8s.io/controller-runtime/pkg/source:go_default_library", - ], -) - -go_test( - name = "go_default_test", - srcs = [ - "node_reconciler_suite_test.go", - "node_reconciler_test.go", - ], - embed = [":go_default_library"], - deps = [ - "//pkg/apis:go_default_library", - "//vendor/golang.org/x/net/context:go_default_library", - "//vendor/k8s.io/api/core/v1:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/api/errors:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/types:go_default_library", - "//vendor/k8s.io/client-go/kubernetes/scheme:go_default_library", - "//vendor/k8s.io/client-go/rest:go_default_library", - "//vendor/sigs.k8s.io/controller-runtime/pkg/client:go_default_library", - "//vendor/sigs.k8s.io/controller-runtime/pkg/envtest:go_default_library", - "//vendor/sigs.k8s.io/controller-runtime/pkg/manager:go_default_library", - "//vendor/sigs.k8s.io/controller-runtime/pkg/reconcile:go_default_library", - ], -) diff --git a/vendor/sigs.k8s.io/cluster-api/pkg/controller/node/node.go b/vendor/sigs.k8s.io/cluster-api/pkg/controller/node/node.go deleted file mode 100644 index b2c7230086..0000000000 --- a/vendor/sigs.k8s.io/cluster-api/pkg/controller/node/node.go +++ /dev/null @@ -1,141 +0,0 @@ -/* -Copyright 2017 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package node - -import ( - "context" - - corev1 "k8s.io/api/core/v1" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/client-go/tools/cache" - "k8s.io/klog" - "sigs.k8s.io/cluster-api/pkg/apis/cluster/v1alpha1" - "sigs.k8s.io/cluster-api/pkg/controller/noderefutil" - "sigs.k8s.io/cluster-api/pkg/util" - "sigs.k8s.io/controller-runtime/pkg/client" -) - -const ( - // MachineAnnotationKey annotation used to link node and machine resource - MachineAnnotationKey = "cluster.k8s.io/machine" -) - -// We are using "cluster.k8s.io/machine" annotation to link node and machine resource. The "cluster.k8s.io/machine" -// annotation is an implementation detail of how the two objects can get linked together, but it is -// not required behavior. However, in the event that a Machine.Spec update requires replacing the -// Node, this can allow for faster turn-around time by allowing a new Node to be created with a new -// name while the old node is being deleted. -// -// Currently, these annotations are added by the node itself as part of its -// bootup script after "kubeadm join" succeeds. -func (c *ReconcileNode) link(node *corev1.Node) error { - nodeReady := noderefutil.IsNodeReady(node) - - // skip update if cached and no change in readiness. - if c.linkedNodes[node.ObjectMeta.Name] { - if cachedReady, ok := c.cachedReadiness[node.ObjectMeta.Name]; ok && cachedReady == nodeReady { - return nil - } - } - - val, ok := node.ObjectMeta.Annotations[MachineAnnotationKey] - if !ok { - return nil - } - - namespace, mach, err := cache.SplitMetaNamespaceKey(val) - if err != nil { - klog.Errorf("Machine annotation format is incorrect %v: %v\n", val, err) - return err - } - namespace = util.GetNamespaceOrDefault(namespace) - key := client.ObjectKey{Namespace: namespace, Name: mach} - - machine := &v1alpha1.Machine{} - if err = c.Client.Get(context.Background(), key, machine); err != nil { - klog.Errorf("Error getting machine %v: %v\n", mach, err) - return err - } - - t := metav1.Now() - machine.Status.LastUpdated = &t - machine.Status.NodeRef = objectRef(node) - if err = c.Client.Status().Update(context.Background(), machine); err != nil { - klog.Errorf("Error updating machine to link to node: %v\n", err) - } else { - klog.Infof("Successfully linked machine %s to node %s\n", - machine.ObjectMeta.Name, node.ObjectMeta.Name) - c.linkedNodes[node.ObjectMeta.Name] = true - c.cachedReadiness[node.ObjectMeta.Name] = nodeReady - } - return err -} - -func (c *ReconcileNode) unlink(node *corev1.Node) error { - val, ok := node.ObjectMeta.Annotations[MachineAnnotationKey] - if !ok { - return nil - } - - namespace, mach, err := cache.SplitMetaNamespaceKey(val) - if err != nil { - klog.Errorf("Machine annotation format is incorrect %v: %v\n", val, err) - return err - } - namespace = util.GetNamespaceOrDefault(namespace) - key := client.ObjectKey{Namespace: namespace, Name: mach} - - machine := &v1alpha1.Machine{} - if err = c.Client.Get(context.Background(), key, machine); err != nil { - klog.Errorf("Error getting machine %v: %v\n", mach, err) - return err - } - - // This machine has no link to remove - if machine.Status.NodeRef == nil { - return nil - } - - // This machine was linked to a different node, don't unlink them - if machine.Status.NodeRef.Name != node.ObjectMeta.Name { - klog.Warningf("Node (%v) is tring to unlink machine (%v) which is linked with node (%v).", - node.ObjectMeta.Name, machine.ObjectMeta.Name, machine.Status.NodeRef.Name) - return nil - } - - t := metav1.Now() - machine.Status.LastUpdated = &t - machine.Status.NodeRef = nil - if err = c.Client.Status().Update(context.Background(), machine); err != nil { - klog.Errorf("Error updating machine %s to unlink node %s: %v\n", - machine.ObjectMeta.Name, node.ObjectMeta.Name, err) - } else { - klog.Infof("Successfully unlinked node %s from machine %s\n", - node.ObjectMeta.Name, machine.ObjectMeta.Name) - delete(c.cachedReadiness, node.ObjectMeta.Name) - delete(c.linkedNodes, node.ObjectMeta.Name) - } - return err -} - -func objectRef(node *corev1.Node) *corev1.ObjectReference { - return &corev1.ObjectReference{ - Kind: "Node", - Name: node.ObjectMeta.Name, - UID: node.UID, - } -} diff --git a/vendor/sigs.k8s.io/cluster-api/pkg/controller/node/node_controller.go b/vendor/sigs.k8s.io/cluster-api/pkg/controller/node/node_controller.go deleted file mode 100644 index d66fee4e1c..0000000000 --- a/vendor/sigs.k8s.io/cluster-api/pkg/controller/node/node_controller.go +++ /dev/null @@ -1,101 +0,0 @@ -/* -Copyright 2018 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package node - -import ( - "context" - - corev1 "k8s.io/api/core/v1" - "k8s.io/apimachinery/pkg/api/errors" - "k8s.io/apimachinery/pkg/runtime" - "k8s.io/klog" - "sigs.k8s.io/controller-runtime/pkg/client" - "sigs.k8s.io/controller-runtime/pkg/controller" - "sigs.k8s.io/controller-runtime/pkg/handler" - "sigs.k8s.io/controller-runtime/pkg/manager" - "sigs.k8s.io/controller-runtime/pkg/reconcile" - "sigs.k8s.io/controller-runtime/pkg/source" -) - -// Add creates a new Node Controller and adds it to the Manager with default RBAC. The Manager will set fields on the Controller -// and Start it when the Manager is Started. -func Add(mgr manager.Manager) error { - return add(mgr, newReconciler(mgr)) -} - -// newReconciler returns a new reconcile.Reconciler -func newReconciler(mgr manager.Manager) reconcile.Reconciler { - return &ReconcileNode{ - Client: mgr.GetClient(), - scheme: mgr.GetScheme(), - linkedNodes: map[string]bool{}, - cachedReadiness: map[string]bool{}, - } -} - -// add adds a new Controller to mgr with r as the reconcile.Reconciler -func add(mgr manager.Manager, r reconcile.Reconciler) error { - // Create a new controller - c, err := controller.New("node-controller", mgr, controller.Options{Reconciler: r}) - if err != nil { - return err - } - - // Watch for changes to Node - err = c.Watch(&source.Kind{Type: &corev1.Node{}}, &handler.EnqueueRequestForObject{}) - if err != nil { - return err - } - - return nil -} - -var _ reconcile.Reconciler = &ReconcileNode{} - -// ReconcileNode reconciles a Node object -type ReconcileNode struct { - client.Client - scheme *runtime.Scheme - - linkedNodes map[string]bool - cachedReadiness map[string]bool -} - -// Reconcile reads that state of the cluster for a Node object and makes changes based on the state read -// and what is in the Node.Spec -// +kubebuilder:rbac:groups=core,resources=nodes,verbs=get;list;watch;create;update;patch;delete -// +kubebuilder:rbac:groups=cluster.k8s.io,resources=machines,verbs=get;list;watch;create;update;patch;delete -func (r *ReconcileNode) Reconcile(request reconcile.Request) (reconcile.Result, error) { - // Fetch the Node instance - instance := &corev1.Node{} - err := r.Get(context.Background(), request.NamespacedName, instance) - if err != nil { - if errors.IsNotFound(err) { - // Object not found, return. Created objects are automatically garbage collected. - // For additional cleanup logic use finalizers. - klog.Errorf("Unable to retrieve Node %v from store: %v", request, err) - return reconcile.Result{}, nil - } - // Error reading the object - requeue the request. - return reconcile.Result{}, err - } - - if instance.DeletionTimestamp.IsZero() { - return reconcile.Result{}, r.link(instance) - } - return reconcile.Result{}, r.unlink(instance) -} diff --git a/vendor/sigs.k8s.io/cluster-api/pkg/controller/node/node_reconciler_suite_test.go b/vendor/sigs.k8s.io/cluster-api/pkg/controller/node/node_reconciler_suite_test.go deleted file mode 100644 index f93d36915f..0000000000 --- a/vendor/sigs.k8s.io/cluster-api/pkg/controller/node/node_reconciler_suite_test.go +++ /dev/null @@ -1,74 +0,0 @@ -/* -Copyright 2018 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package node - -import ( - "log" - "os" - "path/filepath" - "testing" - - "k8s.io/client-go/kubernetes/scheme" - "k8s.io/client-go/rest" - "sigs.k8s.io/cluster-api/pkg/apis" - "sigs.k8s.io/controller-runtime/pkg/envtest" - "sigs.k8s.io/controller-runtime/pkg/manager" - "sigs.k8s.io/controller-runtime/pkg/reconcile" -) - -var cfg *rest.Config - -func TestMain(m *testing.M) { - t := &envtest.Environment{ - CRDDirectoryPaths: []string{filepath.Join("..", "..", "..", "config", "crds")}, - } - apis.AddToScheme(scheme.Scheme) - - var err error - if cfg, err = t.Start(); err != nil { - log.Fatal(err) - } - - code := m.Run() - t.Stop() - os.Exit(code) -} - -// SetupTestReconcile returns a reconcile.Reconcile implementation that delegates to inner and -// writes the request to requests after Reconcile is finished. -func SetupTestReconcile(inner reconcile.Reconciler) (reconcile.Reconciler, chan reconcile.Request) { - requests := make(chan reconcile.Request) - fn := reconcile.Func(func(req reconcile.Request) (reconcile.Result, error) { - result, err := inner.Reconcile(req) - requests <- req - return result, err - }) - return fn, requests -} - -// StartTestManager adds recFn -func StartTestManager(mgr manager.Manager, t *testing.T) chan struct{} { - t.Helper() - - stop := make(chan struct{}) - go func() { - if err := mgr.Start(stop); err != nil { - t.Fatalf("error starting test manager: %v", err) - } - }() - return stop -} diff --git a/vendor/sigs.k8s.io/cluster-api/pkg/controller/node/node_reconciler_test.go b/vendor/sigs.k8s.io/cluster-api/pkg/controller/node/node_reconciler_test.go deleted file mode 100644 index 304b1caa12..0000000000 --- a/vendor/sigs.k8s.io/cluster-api/pkg/controller/node/node_reconciler_test.go +++ /dev/null @@ -1,77 +0,0 @@ -/* -Copyright 2018 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package node - -import ( - "testing" - "time" - - "golang.org/x/net/context" - corev1 "k8s.io/api/core/v1" - apierrors "k8s.io/apimachinery/pkg/api/errors" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/types" - "sigs.k8s.io/controller-runtime/pkg/client" - "sigs.k8s.io/controller-runtime/pkg/manager" - "sigs.k8s.io/controller-runtime/pkg/reconcile" -) - -var c client.Client - -var expectedRequest = reconcile.Request{NamespacedName: types.NamespacedName{Name: "foo"}} - -const timeout = time.Second * 5 - -func TestReconcile(t *testing.T) { - instance := &corev1.Node{ObjectMeta: metav1.ObjectMeta{Name: "foo"}} - - // Setup the Manager and Controller. Wrap the Controller Reconcile function so it writes each request to a - // channel when it is finished. - mgr, err := manager.New(cfg, manager.Options{}) - if err != nil { - t.Errorf("error creating new manager: %v", err) - } - c = mgr.GetClient() - - recFn, requests := SetupTestReconcile(newReconciler(mgr)) - if err := add(mgr, recFn); err != nil { - t.Errorf("error adding controller to manager: %v", err) - } - defer close(StartTestManager(mgr, t)) - - // Create the Node object and expect the Reconcile - err = c.Create(context.TODO(), instance) - // The instance object may not be a valid object because it might be missing some required fields. - // Please modify the instance object by adding required fields and then remove the following if statement. - if apierrors.IsInvalid(err) { - t.Logf("failed to create object, got an invalid object error: %v", err) - return - } - if err != nil { - t.Errorf("error creating instance: %v", err) - } - defer c.Delete(context.TODO(), instance) - select { - case recv := <-requests: - if recv != expectedRequest { - t.Error("received request does not match expected request") - } - case <-time.After(timeout): - t.Error("timed out waiting for request") - } - -} diff --git a/vendor/sigs.k8s.io/cluster-api/pkg/controller/noderef/BUILD.bazel b/vendor/sigs.k8s.io/cluster-api/pkg/controller/noderef/BUILD.bazel deleted file mode 100644 index 88ac19a74c..0000000000 --- a/vendor/sigs.k8s.io/cluster-api/pkg/controller/noderef/BUILD.bazel +++ /dev/null @@ -1,57 +0,0 @@ -load("@io_bazel_rules_go//go:def.bzl", "go_library", "go_test") - -go_library( - name = "go_default_library", - srcs = ["noderef_controller.go"], - importpath = "sigs.k8s.io/cluster-api/pkg/controller/noderef", - visibility = ["//visibility:public"], - deps = [ - "//pkg/apis/cluster/v1alpha1:go_default_library", - "//pkg/controller/noderefutil:go_default_library", - "//pkg/controller/remote:go_default_library", - "//vendor/github.com/pkg/errors:go_default_library", - "//vendor/k8s.io/api/core/v1:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/api/errors:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/runtime:go_default_library", - "//vendor/k8s.io/client-go/kubernetes/typed/core/v1:go_default_library", - "//vendor/k8s.io/client-go/tools/record:go_default_library", - "//vendor/k8s.io/klog:go_default_library", - "//vendor/sigs.k8s.io/controller-runtime/pkg/client:go_default_library", - "//vendor/sigs.k8s.io/controller-runtime/pkg/controller:go_default_library", - "//vendor/sigs.k8s.io/controller-runtime/pkg/handler:go_default_library", - "//vendor/sigs.k8s.io/controller-runtime/pkg/manager:go_default_library", - "//vendor/sigs.k8s.io/controller-runtime/pkg/reconcile:go_default_library", - "//vendor/sigs.k8s.io/controller-runtime/pkg/source:go_default_library", - ], -) - -go_test( - name = "go_default_test", - srcs = [ - "noderef_controller_suite_test.go", - "noderef_controller_test.go", - ], - embed = [":go_default_library"], - deps = [ - "//pkg/apis:go_default_library", - "//pkg/apis/cluster/v1alpha1:go_default_library", - "//pkg/controller/noderefutil:go_default_library", - "//vendor/github.com/onsi/gomega:go_default_library", - "//vendor/golang.org/x/net/context:go_default_library", - "//vendor/k8s.io/api/core/v1:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/api/errors:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/runtime:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/types:go_default_library", - "//vendor/k8s.io/client-go/kubernetes/fake:go_default_library", - "//vendor/k8s.io/client-go/kubernetes/scheme:go_default_library", - "//vendor/k8s.io/client-go/rest:go_default_library", - "//vendor/k8s.io/client-go/tools/record:go_default_library", - "//vendor/sigs.k8s.io/controller-runtime/pkg/client:go_default_library", - "//vendor/sigs.k8s.io/controller-runtime/pkg/client/fake:go_default_library", - "//vendor/sigs.k8s.io/controller-runtime/pkg/envtest:go_default_library", - "//vendor/sigs.k8s.io/controller-runtime/pkg/manager:go_default_library", - "//vendor/sigs.k8s.io/controller-runtime/pkg/reconcile:go_default_library", - ], -) diff --git a/vendor/sigs.k8s.io/cluster-api/pkg/controller/noderef/noderef_controller.go b/vendor/sigs.k8s.io/cluster-api/pkg/controller/noderef/noderef_controller.go deleted file mode 100644 index ef9b0530bb..0000000000 --- a/vendor/sigs.k8s.io/cluster-api/pkg/controller/noderef/noderef_controller.go +++ /dev/null @@ -1,228 +0,0 @@ -/* -Copyright 2019 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package noderef - -import ( - "context" - "time" - - "github.com/pkg/errors" - apicorev1 "k8s.io/api/core/v1" - apierrors "k8s.io/apimachinery/pkg/api/errors" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/runtime" - corev1 "k8s.io/client-go/kubernetes/typed/core/v1" - "k8s.io/client-go/tools/record" - "k8s.io/klog" - "sigs.k8s.io/cluster-api/pkg/apis/cluster/v1alpha1" - "sigs.k8s.io/cluster-api/pkg/controller/noderefutil" - "sigs.k8s.io/cluster-api/pkg/controller/remote" - "sigs.k8s.io/controller-runtime/pkg/client" - "sigs.k8s.io/controller-runtime/pkg/controller" - "sigs.k8s.io/controller-runtime/pkg/handler" - "sigs.k8s.io/controller-runtime/pkg/manager" - "sigs.k8s.io/controller-runtime/pkg/reconcile" - "sigs.k8s.io/controller-runtime/pkg/source" -) - -// controllerName is the name of this controller -const controllerName = "noderef-controller" - -var ( - ErrNodeNotFound = errors.New("cannot find node with matching ProviderID") -) - -// Add creates a new NodeRef Controller and adds it to the Manager with default RBAC. The Manager will set fields on the Controller -// and Start it when the Manager is Started. -func Add(mgr manager.Manager) error { - return add(mgr, newReconciler(mgr)) -} - -// newReconciler returns a new reconcile.Reconciler -func newReconciler(mgr manager.Manager) reconcile.Reconciler { - return &ReconcileNodeRef{Client: mgr.GetClient(), scheme: mgr.GetScheme(), recorder: mgr.GetRecorder(controllerName)} -} - -// add adds a new Controller to mgr with r as the reconcile.Reconciler -func add(mgr manager.Manager, r reconcile.Reconciler) error { - // Create a new controller - c, err := controller.New(controllerName, mgr, controller.Options{Reconciler: r}) - if err != nil { - return err - } - - // Watch for changes to Machines. - return c.Watch(&source.Kind{Type: &v1alpha1.Machine{}}, &handler.EnqueueRequestForObject{}) -} - -var _ reconcile.Reconciler = &ReconcileNodeRef{} - -// ReconcileNodeRef reconciles a Machine object to assign a NodeRef. -type ReconcileNodeRef struct { - client.Client - scheme *runtime.Scheme - recorder record.EventRecorder -} - -// Reconcile responds to Machine events to assign a NodeRef. -// +kubebuilder:rbac:groups=,resources=secrets,verbs=get;list;watch -func (r *ReconcileNodeRef) Reconcile(request reconcile.Request) (reconcile.Result, error) { - klog.Infof("Reconcile request for Machine %q in namespace %q", request.Name, request.Namespace) - ctx := context.Background() - - // Fetch the Machine instance. - machine := &v1alpha1.Machine{} - err := r.Get(ctx, request.NamespacedName, machine) - if err != nil { - if apierrors.IsNotFound(err) { - klog.V(2).Infof("Machine %q in namespace %q is not found, won't reconcile", machine.Name, machine.Namespace) - return reconcile.Result{}, nil - } - return reconcile.Result{}, err - } - - // Check that the Machine hasn't been deleted or in the process. - if !machine.DeletionTimestamp.IsZero() { - klog.V(2).Infof("Machine %q in namespace %q has been deleted, won't reconcile", machine.Name, machine.Namespace) - return reconcile.Result{}, nil - } - - // Check that the Machine doesn't already have a NodeRef. - if machine.Status.NodeRef != nil { - klog.V(2).Infof("Machine %q in namespace %q already has a NodeRef, won't reconcile", machine.Name, machine.Namespace) - return reconcile.Result{}, nil - } - - // Check that the Machine has a cluster label. - if machine.Labels[v1alpha1.MachineClusterLabelName] == "" { - klog.V(2).Infof("Machine %q in namespace %q doesn't specify %q label, won't reconcile", machine.Name, machine.Namespace, - v1alpha1.MachineClusterLabelName) - return reconcile.Result{}, nil - } - - // Check that the Machine has a valid Cluster associated with it. - cluster, err := r.getCluster(ctx, machine) - if err != nil { - if apierrors.IsNotFound(err) { - klog.Warningf("Cannot find a Cluster for Machine %q in namespace %q, won't reconcile", machine.Name, machine.Namespace) - return reconcile.Result{}, err - } - return reconcile.Result{}, err - } - - // Check that the Machine has a valid ProviderID. - if machine.Spec.ProviderID == nil || *machine.Spec.ProviderID == "" { - klog.Warningf("Machine %q in namespace %q doesn't have a valid ProviderID, retrying later", machine.Name, machine.Namespace) - return reconcile.Result{RequeueAfter: 30 * time.Second}, nil - } - - result, err := r.reconcile(ctx, cluster, machine) - if err != nil { - if err == ErrNodeNotFound { - klog.Warningf("Failed to assign NodeRef to Machine %q: cannot find a matching Node in namespace %q, retrying later", machine.Name, machine.Namespace) - return reconcile.Result{RequeueAfter: 10 * time.Second}, nil - } - - klog.Errorf("Failed to assign NodeRef to Machine %q: %v", request.NamespacedName, err) - r.recorder.Event(machine, apicorev1.EventTypeWarning, "FailedSetNodeRef", err.Error()) - return reconcile.Result{}, err - } - - klog.Infof("Set Machine's (%q in namespace %q) NodeRef to %q", machine.Name, machine.Namespace, machine.Status.NodeRef.Name) - r.recorder.Event(machine, apicorev1.EventTypeNormal, "SuccessfulSetNodeRef", machine.Status.NodeRef.Name) - return result, nil -} - -func (r *ReconcileNodeRef) reconcile(ctx context.Context, cluster *v1alpha1.Cluster, machine *v1alpha1.Machine) (reconcile.Result, error) { - providerID, err := noderefutil.NewProviderID(*machine.Spec.ProviderID) - if err != nil { - return reconcile.Result{}, err - } - - clusterClient, err := remote.NewClusterClient(r.Client, cluster) - if err != nil { - return reconcile.Result{}, err - } - - corev1Client, err := clusterClient.CoreV1() - if err != nil { - return reconcile.Result{}, err - } - - // Get the Node reference. - nodeRef, err := r.getNodeReference(corev1Client, providerID) - if err != nil { - return reconcile.Result{}, err - } - - // Update Machine. - machine.Status.NodeRef = nodeRef - if err := r.Client.Status().Update(ctx, machine); err != nil { - return reconcile.Result{}, err - } - - return reconcile.Result{}, nil -} - -func (r *ReconcileNodeRef) getCluster(ctx context.Context, machine *v1alpha1.Machine) (*v1alpha1.Cluster, error) { - cluster := &v1alpha1.Cluster{} - key := client.ObjectKey{ - Namespace: machine.Namespace, - Name: machine.Labels[v1alpha1.MachineClusterLabelName], - } - - if err := r.Client.Get(ctx, key, cluster); err != nil { - return nil, err - } - - return cluster, nil -} - -func (r *ReconcileNodeRef) getNodeReference(client corev1.NodesGetter, providerID *noderefutil.ProviderID) (*apicorev1.ObjectReference, error) { - listOpt := metav1.ListOptions{} - - for { - nodeList, err := client.Nodes().List(listOpt) - if err != nil { - return nil, err - } - - for _, node := range nodeList.Items { - nodeProviderID, err := noderefutil.NewProviderID(node.Spec.ProviderID) - if err != nil { - klog.V(3).Infof("Failed to parse ProviderID for Node %q: %v", node.Name, err) - continue - } - - if providerID.Equals(nodeProviderID) { - return &apicorev1.ObjectReference{ - Kind: node.Kind, - APIVersion: node.APIVersion, - Name: node.Name, - UID: node.UID, - }, nil - } - } - - listOpt.Continue = nodeList.Continue - if listOpt.Continue == "" { - break - } - } - - return nil, ErrNodeNotFound -} diff --git a/vendor/sigs.k8s.io/cluster-api/pkg/controller/noderef/noderef_controller_suite_test.go b/vendor/sigs.k8s.io/cluster-api/pkg/controller/noderef/noderef_controller_suite_test.go deleted file mode 100644 index eeb7833edd..0000000000 --- a/vendor/sigs.k8s.io/cluster-api/pkg/controller/noderef/noderef_controller_suite_test.go +++ /dev/null @@ -1,75 +0,0 @@ -/* -Copyright 2019 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package noderef - -import ( - stdlog "log" - "os" - "path/filepath" - "sync" - "testing" - - "github.com/onsi/gomega" - "k8s.io/client-go/kubernetes/scheme" - "k8s.io/client-go/rest" - "sigs.k8s.io/cluster-api/pkg/apis" - "sigs.k8s.io/controller-runtime/pkg/envtest" - "sigs.k8s.io/controller-runtime/pkg/manager" - "sigs.k8s.io/controller-runtime/pkg/reconcile" -) - -var cfg *rest.Config - -func TestMain(m *testing.M) { - t := &envtest.Environment{ - CRDDirectoryPaths: []string{filepath.Join("..", "..", "..", "config", "crds")}, - } - apis.AddToScheme(scheme.Scheme) - - var err error - if cfg, err = t.Start(); err != nil { - stdlog.Fatal(err) - } - - code := m.Run() - t.Stop() - os.Exit(code) -} - -// SetupTestReconcile returns a reconcile.Reconcile implementation that delegates to inner and -// writes the request to requests after Reconcile is finished. -func SetupTestReconcile(inner reconcile.Reconciler) (reconcile.Reconciler, chan reconcile.Request) { - requests := make(chan reconcile.Request) - fn := reconcile.Func(func(req reconcile.Request) (reconcile.Result, error) { - result, err := inner.Reconcile(req) - requests <- req - return result, err - }) - return fn, requests -} - -// StartTestManager adds recFn -func StartTestManager(mgr manager.Manager, g *gomega.GomegaWithT) (chan struct{}, *sync.WaitGroup) { - stop := make(chan struct{}) - wg := &sync.WaitGroup{} - wg.Add(1) - go func() { - defer wg.Done() - g.Expect(mgr.Start(stop)).NotTo(gomega.HaveOccurred()) - }() - return stop, wg -} diff --git a/vendor/sigs.k8s.io/cluster-api/pkg/controller/remote/BUILD.bazel b/vendor/sigs.k8s.io/cluster-api/pkg/controller/remote/BUILD.bazel index d4ce571ab1..dc46472ef7 100644 --- a/vendor/sigs.k8s.io/cluster-api/pkg/controller/remote/BUILD.bazel +++ b/vendor/sigs.k8s.io/cluster-api/pkg/controller/remote/BUILD.bazel @@ -9,7 +9,7 @@ go_library( importpath = "sigs.k8s.io/cluster-api/pkg/controller/remote", visibility = ["//visibility:public"], deps = [ - "//pkg/apis/cluster/v1alpha1:go_default_library", + "//api/v1alpha2:go_default_library", "//vendor/github.com/pkg/errors:go_default_library", "//vendor/k8s.io/api/core/v1:go_default_library", "//vendor/k8s.io/apimachinery/pkg/api/errors:go_default_library", @@ -28,7 +28,7 @@ go_test( ], embed = [":go_default_library"], deps = [ - "//pkg/apis/cluster/v1alpha1:go_default_library", + "//api/v1alpha2:go_default_library", "//vendor/k8s.io/api/core/v1:go_default_library", "//vendor/k8s.io/apimachinery/pkg/api/errors:go_default_library", "//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", diff --git a/vendor/sigs.k8s.io/cluster-api/pkg/controller/remote/cluster.go b/vendor/sigs.k8s.io/cluster-api/pkg/controller/remote/cluster.go index b47389b444..db39b1b0e4 100644 --- a/vendor/sigs.k8s.io/cluster-api/pkg/controller/remote/cluster.go +++ b/vendor/sigs.k8s.io/cluster-api/pkg/controller/remote/cluster.go @@ -21,7 +21,7 @@ import ( corev1 "k8s.io/client-go/kubernetes/typed/core/v1" restclient "k8s.io/client-go/rest" "k8s.io/client-go/tools/clientcmd" - "sigs.k8s.io/cluster-api/pkg/apis/cluster/v1alpha1" + "sigs.k8s.io/cluster-api/api/v1alpha2" "sigs.k8s.io/controller-runtime/pkg/client" ) @@ -35,11 +35,11 @@ type ClusterClient interface { // clusterClient is a helper struct to connect to remote workload clusters. type clusterClient struct { restConfig *restclient.Config - cluster *v1alpha1.Cluster + cluster *v1alpha2.Cluster } // NewClusterClient creates a new ClusterClient. -func NewClusterClient(c client.Client, cluster *v1alpha1.Cluster) (ClusterClient, error) { +func NewClusterClient(c client.Client, cluster *v1alpha2.Cluster) (ClusterClient, error) { secret, err := GetKubeConfigSecret(c, cluster.Name, cluster.Namespace) if err != nil { return nil, errors.Wrapf(err, "failed to retrieve kubeconfig secret for Cluster %q in namespace %q", diff --git a/vendor/sigs.k8s.io/cluster-api/pkg/controller/remote/cluster_test.go b/vendor/sigs.k8s.io/cluster-api/pkg/controller/remote/cluster_test.go index 8412ee62ca..597f9dea2d 100644 --- a/vendor/sigs.k8s.io/cluster-api/pkg/controller/remote/cluster_test.go +++ b/vendor/sigs.k8s.io/cluster-api/pkg/controller/remote/cluster_test.go @@ -22,26 +22,26 @@ import ( apierrors "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "sigs.k8s.io/cluster-api/pkg/apis/cluster/v1alpha1" + "sigs.k8s.io/cluster-api/api/v1alpha2" "sigs.k8s.io/controller-runtime/pkg/client/fake" ) var ( - clusterWithValidKubeConfig = &v1alpha1.Cluster{ + clusterWithValidKubeConfig = &v1alpha2.Cluster{ ObjectMeta: metav1.ObjectMeta{ Name: "test1", Namespace: "test", }, } - clusterWithInvalidKubeConfig = &v1alpha1.Cluster{ + clusterWithInvalidKubeConfig = &v1alpha2.Cluster{ ObjectMeta: metav1.ObjectMeta{ Name: "test2", Namespace: "test", }, } - clusterWithNoKubeConfig = &v1alpha1.Cluster{ + clusterWithNoKubeConfig = &v1alpha2.Cluster{ ObjectMeta: metav1.ObjectMeta{ Name: "test3", Namespace: "test", diff --git a/vendor/sigs.k8s.io/cluster-api/pkg/errors/BUILD.bazel b/vendor/sigs.k8s.io/cluster-api/pkg/errors/BUILD.bazel index 1488ea7c95..f91f59551a 100644 --- a/vendor/sigs.k8s.io/cluster-api/pkg/errors/BUILD.bazel +++ b/vendor/sigs.k8s.io/cluster-api/pkg/errors/BUILD.bazel @@ -4,13 +4,13 @@ go_library( name = "go_default_library", srcs = [ "clusters.go", + "consts.go", + "controllers.go", "deployer.go", "machines.go", + "pointer.go", ], importpath = "sigs.k8s.io/cluster-api/pkg/errors", visibility = ["//visibility:public"], - deps = [ - "//pkg/apis/cluster/common:go_default_library", - "//vendor/github.com/pkg/errors:go_default_library", - ], + deps = ["//vendor/github.com/pkg/errors:go_default_library"], ) diff --git a/vendor/sigs.k8s.io/cluster-api/pkg/errors/clusters.go b/vendor/sigs.k8s.io/cluster-api/pkg/errors/clusters.go index f6a0f7809c..5a9fd6b61e 100644 --- a/vendor/sigs.k8s.io/cluster-api/pkg/errors/clusters.go +++ b/vendor/sigs.k8s.io/cluster-api/pkg/errors/clusters.go @@ -18,8 +18,6 @@ package errors import ( "fmt" - - commonerrors "sigs.k8s.io/cluster-api/pkg/apis/cluster/common" ) // A more descriptive kind of error that represents an error condition that @@ -27,7 +25,7 @@ import ( // enum-style constants meant to be interpreted by clusters. The "Message" // field is meant to be read by humans. type ClusterError struct { - Reason commonerrors.ClusterStatusError + Reason ClusterStatusError Message string } @@ -41,21 +39,21 @@ func (e *ClusterError) Error() string { func InvalidClusterConfiguration(format string, args ...interface{}) *ClusterError { return &ClusterError{ - Reason: commonerrors.InvalidConfigurationClusterError, + Reason: InvalidConfigurationClusterError, Message: fmt.Sprintf(format, args...), } } func CreateCluster(format string, args ...interface{}) *ClusterError { return &ClusterError{ - Reason: commonerrors.CreateClusterError, + Reason: CreateClusterError, Message: fmt.Sprintf(format, args...), } } func DeleteCluster(format string, args ...interface{}) *ClusterError { return &ClusterError{ - Reason: commonerrors.DeleteClusterError, + Reason: DeleteClusterError, Message: fmt.Sprintf(format, args...), } } diff --git a/vendor/sigs.k8s.io/cluster-api/pkg/apis/cluster/common/consts.go b/vendor/sigs.k8s.io/cluster-api/pkg/errors/consts.go similarity index 93% rename from vendor/sigs.k8s.io/cluster-api/pkg/apis/cluster/common/consts.go rename to vendor/sigs.k8s.io/cluster-api/pkg/errors/consts.go index dfa87062d9..de94f507a4 100644 --- a/vendor/sigs.k8s.io/cluster-api/pkg/apis/cluster/common/consts.go +++ b/vendor/sigs.k8s.io/cluster-api/pkg/errors/consts.go @@ -14,7 +14,7 @@ See the License for the specific language governing permissions and limitations under the License. */ -package common +package errors // Constants aren't automatically generated for unversioned packages. // Instead share the same constant for all versioned packages @@ -111,11 +111,3 @@ const ( // Example: the ProviderSpec specifies an instance type that doesn't exist. InvalidConfigurationMachineSetError MachineSetStatusError = "InvalidConfiguration" ) - -type MachineDeploymentStrategyType string - -const ( - // Replace the old MachineSet by new one using rolling update - // i.e. gradually scale down the old MachineSet and scale up the new one. - RollingUpdateMachineDeploymentStrategyType MachineDeploymentStrategyType = "RollingUpdate" -) diff --git a/vendor/sigs.k8s.io/cluster-api/pkg/controller/error/requeue_error.go b/vendor/sigs.k8s.io/cluster-api/pkg/errors/controllers.go similarity index 86% rename from vendor/sigs.k8s.io/cluster-api/pkg/controller/error/requeue_error.go rename to vendor/sigs.k8s.io/cluster-api/pkg/errors/controllers.go index a559529adb..be4d6e7461 100644 --- a/vendor/sigs.k8s.io/cluster-api/pkg/controller/error/requeue_error.go +++ b/vendor/sigs.k8s.io/cluster-api/pkg/errors/controllers.go @@ -14,11 +14,13 @@ See the License for the specific language governing permissions and limitations under the License. */ -package error +package errors import ( "fmt" "time" + + "github.com/pkg/errors" ) // HasRequeueAfterError represents that an actuator managed object should @@ -47,3 +49,9 @@ func (e *RequeueAfterError) Error() string { func (e *RequeueAfterError) GetRequeueAfter() time.Duration { return e.RequeueAfter } + +// IsRequeueAfter returns true if the error satisfies the interface HasRequeueAfterError. +func IsRequeueAfter(err error) bool { + _, ok := errors.Cause(err).(HasRequeueAfterError) + return ok +} diff --git a/vendor/sigs.k8s.io/cluster-api/pkg/errors/machines.go b/vendor/sigs.k8s.io/cluster-api/pkg/errors/machines.go index b78877b345..8a499050be 100644 --- a/vendor/sigs.k8s.io/cluster-api/pkg/errors/machines.go +++ b/vendor/sigs.k8s.io/cluster-api/pkg/errors/machines.go @@ -18,8 +18,6 @@ package errors import ( "fmt" - - commonerrors "sigs.k8s.io/cluster-api/pkg/apis/cluster/common" ) // A more descriptive kind of error that represents an error condition that @@ -27,7 +25,7 @@ import ( // enum-style constants meant to be interpreted by machines. The "Message" // field is meant to be read by humans. type MachineError struct { - Reason commonerrors.MachineStatusError + Reason MachineStatusError Message string } @@ -41,28 +39,28 @@ func (e *MachineError) Error() string { func InvalidMachineConfiguration(msg string, args ...interface{}) *MachineError { return &MachineError{ - Reason: commonerrors.InvalidConfigurationMachineError, + Reason: InvalidConfigurationMachineError, Message: fmt.Sprintf(msg, args...), } } func CreateMachine(msg string, args ...interface{}) *MachineError { return &MachineError{ - Reason: commonerrors.CreateMachineError, + Reason: CreateMachineError, Message: fmt.Sprintf(msg, args...), } } func UpdateMachine(msg string, args ...interface{}) *MachineError { return &MachineError{ - Reason: commonerrors.UpdateMachineError, + Reason: UpdateMachineError, Message: fmt.Sprintf(msg, args...), } } func DeleteMachine(msg string, args ...interface{}) *MachineError { return &MachineError{ - Reason: commonerrors.DeleteMachineError, + Reason: DeleteMachineError, Message: fmt.Sprintf(msg, args...), } } diff --git a/vendor/sigs.k8s.io/cluster-api/pkg/apis/addtoscheme_cluster_v1alpha1.go b/vendor/sigs.k8s.io/cluster-api/pkg/errors/pointer.go similarity index 64% rename from vendor/sigs.k8s.io/cluster-api/pkg/apis/addtoscheme_cluster_v1alpha1.go rename to vendor/sigs.k8s.io/cluster-api/pkg/errors/pointer.go index 5c1ec47ea6..2335cfaeff 100644 --- a/vendor/sigs.k8s.io/cluster-api/pkg/apis/addtoscheme_cluster_v1alpha1.go +++ b/vendor/sigs.k8s.io/cluster-api/pkg/errors/pointer.go @@ -14,13 +14,14 @@ See the License for the specific language governing permissions and limitations under the License. */ -package apis +package errors -import ( - "sigs.k8s.io/cluster-api/pkg/apis/cluster/v1alpha1" -) +// MachineStatusErrorPtr converts a MachineStatusError to a pointer. +func MachineStatusErrorPtr(v MachineStatusError) *MachineStatusError { + return &v +} -func init() { - // Register the types with the Scheme so the components can map objects to GroupVersionKinds and back - AddToSchemes = append(AddToSchemes, v1alpha1.SchemeBuilder.AddToScheme) +// ClusterStatusErrorPtr converts a MachineStatusError to a pointer. +func ClusterStatusErrorPtr(v ClusterStatusError) *ClusterStatusError { + return &v } diff --git a/vendor/sigs.k8s.io/cluster-api/pkg/kubeadm/BUILD.bazel b/vendor/sigs.k8s.io/cluster-api/pkg/kubeadm/BUILD.bazel deleted file mode 100644 index 0db52f6516..0000000000 --- a/vendor/sigs.k8s.io/cluster-api/pkg/kubeadm/BUILD.bazel +++ /dev/null @@ -1,16 +0,0 @@ -load("@io_bazel_rules_go//go:def.bzl", "go_library", "go_test") - -go_library( - name = "go_default_library", - srcs = ["kubeadm.go"], - importpath = "sigs.k8s.io/cluster-api/pkg/kubeadm", - visibility = ["//visibility:public"], - deps = ["//pkg/cmdrunner:go_default_library"], -) - -go_test( - name = "go_default_test", - srcs = ["kubeadm_test.go"], - embed = [":go_default_library"], - deps = ["//pkg/testcmdrunner:go_default_library"], -) diff --git a/vendor/sigs.k8s.io/cluster-api/pkg/kubeadm/kubeadm.go b/vendor/sigs.k8s.io/cluster-api/pkg/kubeadm/kubeadm.go deleted file mode 100644 index 68730f583c..0000000000 --- a/vendor/sigs.k8s.io/cluster-api/pkg/kubeadm/kubeadm.go +++ /dev/null @@ -1,97 +0,0 @@ -/* -Copyright 2018 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package kubeadm - -import ( - "strings" - "time" - - "sigs.k8s.io/cluster-api/pkg/cmdrunner" -) - -// The purpose of Kubeadm and this file is to provide a unit tested wrapper around the 'kubeadm' exec command. Higher -// level, application specific functionality built on top of kubeadm should be be in another location. -type Kubeadm struct { - runner cmdrunner.Runner -} - -// see https://kubernetes.io/docs/reference/setup-tools/kubeadm/kubeadm-token/ for an explanation of the parameters -type TokenCreateParams struct { - Config string - Description string - Groups []string - Help bool - KubeConfig string - PrintJoinCommand bool - TTL time.Duration - Usages []string -} - -func NewWithRunner(runner cmdrunner.Runner) *Kubeadm { - return &Kubeadm{ - runner: runner, - } -} - -func New() *Kubeadm { - return NewWithRunner(cmdrunner.New()) -} - -// TokenCreate execs `kubeadm token create` with the appropriate flags added by interpreting the params argument. The output of -// `kubeadm token create` is returned in full, including the terminating newline, without any modification. -func (k *Kubeadm) TokenCreate(params TokenCreateParams) (string, error) { - args := []string{"token", "create"} - args = appendStringParamIfPresent(args, "--config", params.Config) - args = appendStringParamIfPresent(args, "--description", params.Description) - args = appendStringSliceIfValid(args, "--groups", params.Groups) - args = appendFlagIfTrue(args, "--help", params.Help) - args = appendStringParamIfPresent(args, "--kubeconfig", params.KubeConfig) - args = appendFlagIfTrue(args, "--print-join-command", params.PrintJoinCommand) - if params.TTL != time.Duration(0) { - args = append(args, "--ttl") - args = append(args, params.TTL.String()) - } - args = appendStringSliceIfValid(args, "--usages", params.Usages) - return k.runner.CombinedOutput("kubeadm", args...) -} - -func appendFlagIfTrue(args []string, paramName string, value bool) []string { - if value { - return append(args, paramName) - } - return args -} - -func appendStringParamIfPresent(args []string, paramName string, value string) []string { - if value == "" { - return args - } - args = append(args, paramName) - return append(args, value) -} - -func appendStringSliceIfValid(args []string, paramName string, values []string) []string { - if len(values) == 0 { - return args - } - args = append(args, paramName) - return append(args, toStringSlice(values)) -} - -func toStringSlice(args []string) string { - return strings.Join(args, ":") -} diff --git a/vendor/sigs.k8s.io/cluster-api/pkg/kubeadm/kubeadm_test.go b/vendor/sigs.k8s.io/cluster-api/pkg/kubeadm/kubeadm_test.go deleted file mode 100644 index 65ede1fd4b..0000000000 --- a/vendor/sigs.k8s.io/cluster-api/pkg/kubeadm/kubeadm_test.go +++ /dev/null @@ -1,106 +0,0 @@ -/* -Copyright 2018 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package kubeadm_test - -import ( - "errors" - "strings" - "testing" - "time" - - "sigs.k8s.io/cluster-api/pkg/kubeadm" - "sigs.k8s.io/cluster-api/pkg/testcmdrunner" -) - -var ( - joinCommand = "kubeadm join --token a53d73.21029eb48b9002d0 35.199.169.93:443 --discovery-token-ca-cert-hash sha256:3bb9ee3aa3cee9898b85523e8dd6921752e07e3053453084c8d26d40d929a132" - errorExitCode = errors.New("kubeadm error") - errorMessage = "error message" -) - -func TestTokenCreateParameters(t *testing.T) { - var tests = []struct { - name string - err error - output string - params kubeadm.TokenCreateParams - }{ - {"empty params", nil, "kubeadm token create", kubeadm.TokenCreateParams{}}, - {"config", nil, "kubeadm token create --config /my/path/to/kubeadm-config", kubeadm.TokenCreateParams{Config: "/my/path/to/kubeadm-config"}}, - {"description", nil, "kubeadm token create --description my custom description", kubeadm.TokenCreateParams{Description: "my custom description"}}, - {"groups", nil, "kubeadm token create --groups system:bootstrappers:kubeadm:default-node-token", kubeadm.TokenCreateParams{Groups: []string{"system", "bootstrappers", "kubeadm", "default-node-token"}}}, - {"help", nil, "kubeadm token create --help", kubeadm.TokenCreateParams{Help: true}}, - {"print join command", nil, "kubeadm token create --print-join-command", kubeadm.TokenCreateParams{PrintJoinCommand: true}}, - {"ttl 24 hours", nil, "kubeadm token create --ttl 24h0m0s", kubeadm.TokenCreateParams{TTL: toDuration(24, 0, 0)}}, - {"ttl 55 minutes", nil, "kubeadm token create --ttl 55m0s", kubeadm.TokenCreateParams{TTL: toDuration(0, 55, 0)}}, - {"ttl 9 seconds", nil, "kubeadm token create --ttl 9s", kubeadm.TokenCreateParams{TTL: toDuration(0, 0, 9)}}, - {"ttl 1 second", nil, "kubeadm token create --ttl 1s", kubeadm.TokenCreateParams{TTL: toDuration(0, 0, 1)}}, - {"ttl 16 hours, 38 minutes, 2 seconds", nil, "kubeadm token create --ttl 16h38m2s", kubeadm.TokenCreateParams{TTL: toDuration(16, 38, 2)}}, - {"usages", nil, "kubeadm token create --usages signing:authentication", kubeadm.TokenCreateParams{Usages: []string{"signing", "authentication"}}}, - {"all", nil, "kubeadm token create --config /my/config --description my description --groups bootstrappers --help --print-join-command --ttl 1h1m1s --usages authentication", - kubeadm.TokenCreateParams{Config: "/my/config", Description: "my description", Groups: []string{"bootstrappers"}, Help: true, PrintJoinCommand: true, TTL: toDuration(1, 1, 1), Usages: []string{"authentication"}}}, - } - kadm := kubeadm.NewWithRunner(testcmdrunner.NewOrDie(t, echoCallback)) - for _, tst := range tests { - output, err := kadm.TokenCreate(tst.params) - if err != tst.err { - t.Errorf("test case '%v', unexpected error: got '%v', want '%v'", tst.name, err, tst.err) - } - if output != tst.output { - t.Errorf("test case '%v', unexpected output: got '%v', want '%v'", tst.name, output, tst.output) - } - } -} - -func TestTokenCreateReturnsUnmodifiedOutput(t *testing.T) { - kadm := kubeadm.NewWithRunner(testcmdrunner.NewOrDie(t, tokenCallback)) - output, err := kadm.TokenCreate(kubeadm.TokenCreateParams{}) - if err != nil { - t.Errorf("unexpected error: wanted nil") - } - if output != joinCommand { - t.Errorf("unexpected output: got '%v', want '%v'", output, joinCommand) - } -} - -func TestNonZeroExitCodeResultsInError(t *testing.T) { - kadm := kubeadm.NewWithRunner(testcmdrunner.NewOrDie(t, errorCallback)) - output, err := kadm.TokenCreate(kubeadm.TokenCreateParams{}) - if err == nil { - t.Errorf("expected error: got nil") - } - if output != errorMessage { - t.Errorf("unexpected output: got '%v', want '%v'", output, errorMessage) - } -} - -func echoCallback(cmd string, args ...string) (string, error) { - fullCmd := append([]string{cmd}, args...) - return strings.Join(fullCmd, " "), nil -} - -func tokenCallback(cmd string, args ...string) (string, error) { - return joinCommand, nil -} - -func errorCallback(cmd string, args ...string) (string, error) { - return errorMessage, errorExitCode -} - -func toDuration(hour int, minute int, second int) time.Duration { - return time.Duration(hour)*time.Hour + time.Duration(minute)*time.Minute + time.Duration(second)*time.Second -} diff --git a/vendor/sigs.k8s.io/cluster-api/pkg/provider/example/actuators/cluster/BUILD.bazel b/vendor/sigs.k8s.io/cluster-api/pkg/provider/example/actuators/cluster/BUILD.bazel deleted file mode 100644 index 92a54a8726..0000000000 --- a/vendor/sigs.k8s.io/cluster-api/pkg/provider/example/actuators/cluster/BUILD.bazel +++ /dev/null @@ -1,14 +0,0 @@ -load("@io_bazel_rules_go//go:def.bzl", "go_library") - -go_library( - name = "go_default_library", - srcs = ["clusteractuator.go"], - importpath = "sigs.k8s.io/cluster-api/pkg/provider/example/actuators/cluster", - visibility = ["//visibility:public"], - deps = [ - "//pkg/apis/cluster/v1alpha1:go_default_library", - "//pkg/client/clientset_generated/clientset/typed/cluster/v1alpha1:go_default_library", - "//vendor/k8s.io/api/core/v1:go_default_library", - "//vendor/k8s.io/client-go/tools/record:go_default_library", - ], -) diff --git a/vendor/sigs.k8s.io/cluster-api/pkg/provider/example/actuators/cluster/clusteractuator.go b/vendor/sigs.k8s.io/cluster-api/pkg/provider/example/actuators/cluster/clusteractuator.go deleted file mode 100644 index 249c5f0702..0000000000 --- a/vendor/sigs.k8s.io/cluster-api/pkg/provider/example/actuators/cluster/clusteractuator.go +++ /dev/null @@ -1,50 +0,0 @@ -/* -Copyright 2019 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package cluster - -import ( - corev1 "k8s.io/api/core/v1" - "k8s.io/client-go/tools/record" - clusterv1 "sigs.k8s.io/cluster-api/pkg/apis/cluster/v1alpha1" - clusterv1alpha1 "sigs.k8s.io/cluster-api/pkg/client/clientset_generated/clientset/typed/cluster/v1alpha1" -) - -// Actuator is responsible for performing cluster reconciliation -type Actuator struct { - clusterV1alpha1 clusterv1alpha1.ClusterV1alpha1Interface - recorder record.EventRecorder -} - -// NewClusterActuator creates a new cluster actuator -func NewClusterActuator(clusterV1alpha1 clusterv1alpha1.ClusterV1alpha1Interface, recorder record.EventRecorder) (*Actuator, error) { - return &Actuator{ - clusterV1alpha1: clusterV1alpha1, - recorder: recorder, - }, nil -} - -// Reconcile will create or update the cluster -func (a *Actuator) Reconcile(cluster *clusterv1.Cluster) error { - a.recorder.Event(cluster, corev1.EventTypeWarning, "cluster-api", "clusteractuator Reconcile invoked") - return nil -} - -// Delete deletes a cluster and is invoked by the Cluster Controller -func (a *Actuator) Delete(cluster *clusterv1.Cluster) error { - a.recorder.Event(cluster, corev1.EventTypeWarning, "cluster-api", "clusteractuator Delete invoked") - return nil -} diff --git a/vendor/sigs.k8s.io/cluster-api/pkg/provider/example/actuators/machine/BUILD.bazel b/vendor/sigs.k8s.io/cluster-api/pkg/provider/example/actuators/machine/BUILD.bazel deleted file mode 100644 index 3246479ed0..0000000000 --- a/vendor/sigs.k8s.io/cluster-api/pkg/provider/example/actuators/machine/BUILD.bazel +++ /dev/null @@ -1,14 +0,0 @@ -load("@io_bazel_rules_go//go:def.bzl", "go_library") - -go_library( - name = "go_default_library", - srcs = ["machineactuator.go"], - importpath = "sigs.k8s.io/cluster-api/pkg/provider/example/actuators/machine", - visibility = ["//visibility:public"], - deps = [ - "//pkg/apis/cluster/v1alpha1:go_default_library", - "//pkg/client/clientset_generated/clientset/typed/cluster/v1alpha1:go_default_library", - "//vendor/k8s.io/api/core/v1:go_default_library", - "//vendor/k8s.io/client-go/tools/record:go_default_library", - ], -) diff --git a/vendor/sigs.k8s.io/cluster-api/pkg/provider/example/actuators/machine/machineactuator.go b/vendor/sigs.k8s.io/cluster-api/pkg/provider/example/actuators/machine/machineactuator.go deleted file mode 100644 index 0c1585736d..0000000000 --- a/vendor/sigs.k8s.io/cluster-api/pkg/provider/example/actuators/machine/machineactuator.go +++ /dev/null @@ -1,64 +0,0 @@ -/* -Copyright 2019 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package machine - -import ( - "context" - - corev1 "k8s.io/api/core/v1" - "k8s.io/client-go/tools/record" - clusterv1 "sigs.k8s.io/cluster-api/pkg/apis/cluster/v1alpha1" - clusterv1alpha1 "sigs.k8s.io/cluster-api/pkg/client/clientset_generated/clientset/typed/cluster/v1alpha1" -) - -// Actuator is responsible for performing machine reconciliation. -type Actuator struct { - client clusterv1alpha1.ClusterV1alpha1Interface - recorder record.EventRecorder -} - -// NewMachineActuator return a machine actuator -func NewMachineActuator(clusterV1alpha1 clusterv1alpha1.ClusterV1alpha1Interface, recorder record.EventRecorder) (*Actuator, error) { - return &Actuator{ - client: clusterV1alpha1, - recorder: recorder, - }, nil -} - -// Create creates a machine -func (a *Actuator) Create(ctx context.Context, cluster *clusterv1.Cluster, machine *clusterv1.Machine) error { - a.recorder.Event(machine, corev1.EventTypeWarning, "cluster-api", "machineactuator Create invoked") - return nil -} - -// Delete deletes a machine -func (a *Actuator) Delete(ctx context.Context, cluster *clusterv1.Cluster, machine *clusterv1.Machine) error { - a.recorder.Event(machine, corev1.EventTypeWarning, "cluster-api", "machineactuator Delete invoked") - return nil -} - -// Update updates a machine -func (a *Actuator) Update(ctx context.Context, cluster *clusterv1.Cluster, machine *clusterv1.Machine) error { - a.recorder.Event(machine, corev1.EventTypeWarning, "cluster-api", "machineactuator Update invoked") - return nil -} - -// Exists test for the existence of a machine -func (a *Actuator) Exists(ctx context.Context, cluster *clusterv1.Cluster, machine *clusterv1.Machine) (bool, error) { - a.recorder.Event(machine, corev1.EventTypeWarning, "cluster-api", "machineactuator Exists invoked") - return false, nil -} diff --git a/vendor/sigs.k8s.io/cluster-api/pkg/provider/example/container/Dockerfile b/vendor/sigs.k8s.io/cluster-api/pkg/provider/example/container/Dockerfile index 1540993ba9..f929907b34 100644 --- a/vendor/sigs.k8s.io/cluster-api/pkg/provider/example/container/Dockerfile +++ b/vendor/sigs.k8s.io/cluster-api/pkg/provider/example/container/Dockerfile @@ -13,16 +13,23 @@ # limitations under the License. # Build the manager binary -FROM golang:1.12.5 as builder +FROM golang:1.12.6 as builder + +ARG ARCH # Copy in the go src WORKDIR $GOPATH/src/sigs.k8s.io/cluster-api COPY pkg/ pkg/ COPY vendor/ vendor/ COPY cmd/ cmd/ +COPY api/ api/ +COPY controllers/ controllers/ +COPY vendor/ vendor/ # Build -RUN CGO_ENABLED=0 GOOS=linux GOARCH=amd64 go build -a -ldflags '-extldflags "-static"' -o ./cmd/example-provider/manager sigs.k8s.io/cluster-api/cmd/example-provider +RUN CGO_ENABLED=0 GOOS=linux GOARCH=${ARCH} \ + go build -a -ldflags '-extldflags "-static"' \ + -o ./cmd/example-provider/manager sigs.k8s.io/cluster-api/cmd/example-provider # Copy the controller-manager into a thin image FROM gcr.io/distroless/static:latest diff --git a/vendor/sigs.k8s.io/cluster-api/pkg/testcmdrunner/BUILD.bazel b/vendor/sigs.k8s.io/cluster-api/pkg/testcmdrunner/BUILD.bazel deleted file mode 100644 index 1bf2070022..0000000000 --- a/vendor/sigs.k8s.io/cluster-api/pkg/testcmdrunner/BUILD.bazel +++ /dev/null @@ -1,8 +0,0 @@ -load("@io_bazel_rules_go//go:def.bzl", "go_library") - -go_library( - name = "go_default_library", - srcs = ["test_cmd_runner.go"], - importpath = "sigs.k8s.io/cluster-api/pkg/testcmdrunner", - visibility = ["//visibility:public"], -) diff --git a/vendor/sigs.k8s.io/cluster-api/pkg/testcmdrunner/test_cmd_runner.go b/vendor/sigs.k8s.io/cluster-api/pkg/testcmdrunner/test_cmd_runner.go deleted file mode 100644 index 239b49214c..0000000000 --- a/vendor/sigs.k8s.io/cluster-api/pkg/testcmdrunner/test_cmd_runner.go +++ /dev/null @@ -1,46 +0,0 @@ -/* -Copyright 2018 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package testcmdrunner - -import ( - "testing" -) - -// TestRunner mocks out command execution. -type TestRunner struct { - callback func(cmd string, args ...string) (string, error) -} - -// New builds a TestRunner to mock out command execution. -func New(callback func(cmd string, args ...string) (string, error)) (*TestRunner, error) { - return &TestRunner{ - callback: callback, - }, nil -} - -// NewOrDie builds a TestRunner or calls t.Fatal on error -func NewOrDie(t *testing.T, callback func(cmd string, args ...string) (string, error)) *TestRunner { - runner, err := New(callback) - if err != nil { - t.Fatal(err) - } - return runner -} - -func (runner *TestRunner) CombinedOutput(cmd string, args ...string) (string, error) { - return runner.callback(cmd, args...) -} diff --git a/vendor/sigs.k8s.io/cluster-api/pkg/util/BUILD.bazel b/vendor/sigs.k8s.io/cluster-api/pkg/util/BUILD.bazel index 5d95383326..243353b8d4 100644 --- a/vendor/sigs.k8s.io/cluster-api/pkg/util/BUILD.bazel +++ b/vendor/sigs.k8s.io/cluster-api/pkg/util/BUILD.bazel @@ -9,16 +9,22 @@ go_library( importpath = "sigs.k8s.io/cluster-api/pkg/util", visibility = ["//visibility:public"], deps = [ - "//pkg/apis/cluster/v1alpha1:go_default_library", + "//api/v1alpha2:go_default_library", + "//vendor/github.com/pkg/errors:go_default_library", "//vendor/k8s.io/api/core/v1:go_default_library", "//vendor/k8s.io/apimachinery/pkg/api/errors:go_default_library", "//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", "//vendor/k8s.io/apimachinery/pkg/apis/meta/v1/unstructured:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/util/json:go_default_library", + "//vendor/k8s.io/apimachinery/pkg/runtime:go_default_library", + "//vendor/k8s.io/apimachinery/pkg/runtime/schema:go_default_library", + "//vendor/k8s.io/apimachinery/pkg/runtime/serializer/streaming:go_default_library", "//vendor/k8s.io/apimachinery/pkg/util/wait:go_default_library", "//vendor/k8s.io/apimachinery/pkg/util/yaml:go_default_library", + "//vendor/k8s.io/client-go/kubernetes/scheme:go_default_library", "//vendor/k8s.io/klog:go_default_library", "//vendor/sigs.k8s.io/controller-runtime/pkg/client:go_default_library", + "//vendor/sigs.k8s.io/controller-runtime/pkg/handler:go_default_library", + "//vendor/sigs.k8s.io/controller-runtime/pkg/reconcile:go_default_library", ], ) @@ -27,8 +33,15 @@ go_test( srcs = ["util_test.go"], embed = [":go_default_library"], deps = [ - "//pkg/apis/cluster/v1alpha1:go_default_library", + "//api/v1alpha2:go_default_library", + "//vendor/k8s.io/api/core/v1:go_default_library", "//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", + "//vendor/k8s.io/apimachinery/pkg/runtime:go_default_library", + "//vendor/k8s.io/apimachinery/pkg/runtime/schema:go_default_library", "//vendor/k8s.io/apimachinery/pkg/types:go_default_library", + "//vendor/sigs.k8s.io/controller-runtime/pkg/client:go_default_library", + "//vendor/sigs.k8s.io/controller-runtime/pkg/client/fake:go_default_library", + "//vendor/sigs.k8s.io/controller-runtime/pkg/handler:go_default_library", + "//vendor/sigs.k8s.io/controller-runtime/pkg/reconcile:go_default_library", ], ) diff --git a/vendor/sigs.k8s.io/cluster-api/pkg/util/util.go b/vendor/sigs.k8s.io/cluster-api/pkg/util/util.go index 929a35c6e7..5fb4516010 100644 --- a/vendor/sigs.k8s.io/cluster-api/pkg/util/util.go +++ b/vendor/sigs.k8s.io/cluster-api/pkg/util/util.go @@ -17,26 +17,35 @@ limitations under the License. package util import ( + "bufio" + "bytes" "context" - "errors" + "encoding/json" "fmt" "io" "math/rand" "os" "os/exec" "os/user" + "reflect" "strings" "time" + "github.com/pkg/errors" v1 "k8s.io/api/core/v1" apierrors "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" - "k8s.io/apimachinery/pkg/util/json" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" + "k8s.io/apimachinery/pkg/runtime/serializer/streaming" "k8s.io/apimachinery/pkg/util/yaml" + "k8s.io/client-go/kubernetes/scheme" "k8s.io/klog" - clusterv1 "sigs.k8s.io/cluster-api/pkg/apis/cluster/v1alpha1" + clusterv1 "sigs.k8s.io/cluster-api/api/v1alpha2" "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/controller-runtime/pkg/handler" + "sigs.k8s.io/controller-runtime/pkg/reconcile" ) const ( @@ -48,9 +57,15 @@ const ( ) var ( - rnd = rand.New(rand.NewSource(time.Now().UnixNano())) + rnd = rand.New(rand.NewSource(time.Now().UnixNano())) + ErrNoCluster = fmt.Errorf("no %q label present", clusterv1.MachineClusterLabelName) + ErrUnstructuredFieldNotFound = fmt.Errorf("field not found") ) +func init() { + clusterv1.AddToScheme(scheme.Scheme) +} + // RandomToken returns a random token. func RandomToken() string { return fmt.Sprintf("%s.%s", RandomString(6), RandomString(16)) @@ -65,22 +80,21 @@ func RandomString(n int) string { return string(result) } -// GetControlPlaneMachine returns a control plane machine from input. -// Deprecated: use GetControlPlaneMachines. -func GetControlPlaneMachine(machines []*clusterv1.Machine) *clusterv1.Machine { +// GetControlPlaneMachines returns a slice containing control plane machines. +func GetControlPlaneMachines(machines []*clusterv1.Machine) (res []*clusterv1.Machine) { for _, machine := range machines { if IsControlPlaneMachine(machine) { - return machine + res = append(res, machine) } } - return nil + return } -// GetControlPlaneMachines returns a slice containing control plane machines. -func GetControlPlaneMachines(machines []*clusterv1.Machine) (res []*clusterv1.Machine) { - for _, machine := range machines { - if IsControlPlaneMachine(machine) { - res = append(res, machine) +// GetControlPlaneMachinesFromList returns a slice containing control plane machines. +func GetControlPlaneMachinesFromList(machineList *clusterv1.MachineList) (res []*clusterv1.Machine) { + for _, machine := range machineList.Items { + if IsControlPlaneMachine(&machine) { + res = append(res, &machine) } } return @@ -134,7 +148,7 @@ func GetMachineIfExists(c client.Client, namespace, name string) (*clusterv1.Mac // IsControlPlaneMachine checks machine is a control plane node. func IsControlPlaneMachine(machine *clusterv1.Machine) bool { - return machine.Spec.Versions.ControlPlane != "" + return machine.ObjectMeta.Labels[clusterv1.MachineControlPlaneLabelName] != "" } // IsNodeReady returns true if a node is ready. @@ -148,6 +162,165 @@ func IsNodeReady(node *v1.Node) bool { return false } +// GetClusterFromMetadata returns the Cluster object (if present) using the object metadata. +func GetClusterFromMetadata(ctx context.Context, c client.Client, obj metav1.ObjectMeta) (*clusterv1.Cluster, error) { + if obj.Labels[clusterv1.MachineClusterLabelName] == "" { + return nil, errors.WithStack(ErrNoCluster) + } + return GetClusterByName(ctx, c, obj.Namespace, obj.Labels[clusterv1.MachineClusterLabelName]) +} + +// GetOwnerCluster returns the Cluster object owning the current resource. +func GetOwnerCluster(ctx context.Context, c client.Client, obj metav1.ObjectMeta) (*clusterv1.Cluster, error) { + for _, ref := range obj.OwnerReferences { + if ref.Kind == "Cluster" && ref.APIVersion == clusterv1.GroupVersion.String() { + return GetClusterByName(ctx, c, obj.Namespace, ref.Name) + } + } + return nil, nil +} + +// GetClusterByName finds and return a Cluster object using the specified params. +func GetClusterByName(ctx context.Context, c client.Client, namespace, name string) (*clusterv1.Cluster, error) { + cluster := &clusterv1.Cluster{} + key := client.ObjectKey{ + Namespace: namespace, + Name: name, + } + + if err := c.Get(ctx, key, cluster); err != nil { + return nil, err + } + + return cluster, nil +} + +// ClusterToInfrastructureMapFunc returns a handler.ToRequestsFunc that watches for +// Cluster events and returns reconciliation requests for an infrastructure provider object. +func ClusterToInfrastructureMapFunc(gvk schema.GroupVersionKind) handler.ToRequestsFunc { + return func(o handler.MapObject) []reconcile.Request { + c, ok := o.Object.(*clusterv1.Cluster) + if !ok { + return nil + } + + // Return early if the InfrastructureRef is nil. + if c.Spec.InfrastructureRef == nil { + return nil + } + + // Return early if the GroupVersionKind doesn't match what we expect. + infraGVK := c.Spec.InfrastructureRef.GroupVersionKind() + if gvk != infraGVK { + return nil + } + + return []reconcile.Request{ + { + NamespacedName: client.ObjectKey{ + Namespace: c.Namespace, + Name: c.Spec.InfrastructureRef.Name, + }, + }, + } + } +} + +// GetOwnerMachine returns the Machine object owning the current resource. +func GetOwnerMachine(ctx context.Context, c client.Client, obj metav1.ObjectMeta) (*clusterv1.Machine, error) { + for _, ref := range obj.OwnerReferences { + if ref.Kind == "Machine" && ref.APIVersion == clusterv1.GroupVersion.String() { + return GetMachineByName(ctx, c, obj.Namespace, ref.Name) + } + } + return nil, nil +} + +// GetMachineByName finds and return a Machine object using the specified params. +func GetMachineByName(ctx context.Context, c client.Client, namespace, name string) (*clusterv1.Machine, error) { + m := &clusterv1.Machine{} + key := client.ObjectKey{Name: name, Namespace: namespace} + if err := c.Get(ctx, key, m); err != nil { + return nil, err + } + return m, nil +} + +// MachineToInfrastructureMapFunc returns a handler.ToRequestsFunc that watches for +// Machine events and returns reconciliation requests for an infrastructure provider object. +func MachineToInfrastructureMapFunc(gvk schema.GroupVersionKind) handler.ToRequestsFunc { + return func(o handler.MapObject) []reconcile.Request { + m, ok := o.Object.(*clusterv1.Machine) + if !ok { + return nil + } + + // Return early if the GroupVersionKind doesn't match what we expect. + infraGVK := m.Spec.InfrastructureRef.GroupVersionKind() + if gvk != infraGVK { + return nil + } + + return []reconcile.Request{ + { + NamespacedName: client.ObjectKey{ + Namespace: m.Namespace, + Name: m.Spec.InfrastructureRef.Name, + }, + }, + } + } +} + +// HasOwnerRef returns true if the OwnerReference is already in the slice. +func HasOwnerRef(ownerReferences []metav1.OwnerReference, ref metav1.OwnerReference) bool { + for _, r := range ownerReferences { + if reflect.DeepEqual(ref, r) { + return true + } + } + return false +} + +// EnsureOwnerRef makes sure the slice contains the OwnerReference. +func EnsureOwnerRef(ownerReferences []metav1.OwnerReference, ref metav1.OwnerReference) []metav1.OwnerReference { + if !HasOwnerRef(ownerReferences, ref) { + return append(ownerReferences, ref) + } + return ownerReferences +} + +// PointsTo returns true if any of the owner references point to the given target +func PointsTo(refs []metav1.OwnerReference, target *metav1.ObjectMeta) bool { + for _, ref := range refs { + if ref.UID == target.UID { + return true + } + } + + return false +} + +// UnstructuredUnmarshalField is a wrapper around json and unstructured objects to decode and copy a specific field +// value into an object. +func UnstructuredUnmarshalField(obj *unstructured.Unstructured, v interface{}, fields ...string) error { + value, found, err := unstructured.NestedFieldNoCopy(obj.Object, fields...) + if err != nil { + return errors.Wrapf(err, "failed to retrieve field %q from %q", strings.Join(fields, "."), obj.GroupVersionKind()) + } + if !found || value == nil { + return ErrUnstructuredFieldNotFound + } + valueBytes, err := json.Marshal(value) + if err != nil { + return errors.Wrapf(err, "failed to json-encode field %q value from %q", strings.Join(fields, "."), obj.GroupVersionKind()) + } + if err := json.Unmarshal(valueBytes, v); err != nil { + return errors.Wrapf(err, "failed to json-decode field %q value from %q", strings.Join(fields, "."), obj.GroupVersionKind()) + } + return nil +} + // Copy deep copies a Machine object. func Copy(m *clusterv1.Machine) *clusterv1.Machine { ret := &clusterv1.Machine{} @@ -208,77 +381,86 @@ func ParseClusterYaml(file string) (*clusterv1.Cluster, error) { return nil, err } - defer reader.Close() + decoder := NewYAMLDecoder(reader) - decoder := yaml.NewYAMLOrJSONDecoder(reader, 32) + for { + var cluster clusterv1.Cluster + _, gvk, err := decoder.Decode(nil, &cluster) + if err == io.EOF { + break + } - bytes, err := decodeClusterV1Kinds(decoder, "Cluster") - if err != nil { - return nil, err - } + if runtime.IsNotRegisteredError(err) { + continue + } - var cluster clusterv1.Cluster - if err := json.Unmarshal(bytes[0], &cluster); err != nil { - return nil, err + if err != nil { + return nil, err + } + + if gvk.Kind == "Cluster" { + return &cluster, nil + } } - return &cluster, nil + return nil, errors.New("Did not find a cluster") + } // ParseMachinesYaml extracts machine objects from a file. func ParseMachinesYaml(file string) ([]*clusterv1.Machine, error) { reader, err := os.Open(file) + if err != nil { return nil, err } - defer reader.Close() - - decoder := yaml.NewYAMLOrJSONDecoder(reader, 32) - var ( - bytes [][]byte - machineList clusterv1.MachineList - machines = []*clusterv1.Machine{} + machines []*clusterv1.Machine ) - // TODO: use the universal decoder instead of doing this. - if bytes, err = decodeClusterV1Kinds(decoder, "MachineList"); err != nil { - if isMissingKind(err) { - err = errors.New(MachineListFormatDeprecationMessage) + decoder := NewYAMLDecoder(reader) + defer decoder.Close() + + for { + obj, gvk, err := decoder.Decode(nil, nil) + + if err == io.EOF { + break } - return nil, err - } - // TODO: this is O(n^2) and must be optimized - for _, ml := range bytes { - if err := json.Unmarshal(ml, &machineList); err != nil { - return nil, err + if runtime.IsNotRegisteredError(err) { + continue } - for i := range machineList.Items { - machine := &machineList.Items[i] - if machine.APIVersion == "" || machine.Kind == "" { - return nil, errors.New(MachineListFormatDeprecationMessage) - } - machines = append(machines, machine) + + if err != nil { + return nil, err } - } - // reset reader to search for discrete Machine definitions - if _, err := reader.Seek(0, 0); err != nil { - return nil, err - } + switch gvk.Kind { - if bytes, err = decodeClusterV1Kinds(decoder, "Machine"); err != nil { - return nil, err - } + case "MachineList": + ml, ok := obj.(*clusterv1.MachineList) + if !ok { + return nil, fmt.Errorf("Expected MachineList, got %t", obj) + } - for _, m := range bytes { - machine := &clusterv1.Machine{} - if err := json.Unmarshal(m, machine); err != nil { - return nil, err + for i := range ml.Items { + machine := &ml.Items[i] + if machine.APIVersion == "" || machine.Kind == "" { + return nil, errors.New(MachineListFormatDeprecationMessage) + } + machines = append(machines, machine) + } + case "Machine": + m, ok := obj.(*clusterv1.Machine) + if !ok { + return nil, fmt.Errorf("Expected Machine, got %t", obj) + } + + machines = append(machines, m) } - machines = append(machines, machine) + } return machines, nil @@ -290,44 +472,42 @@ func isMissingKind(err error) bool { return strings.Contains(err.Error(), "Object 'Kind' is missing in") } -// decodeClusterV1Kinds returns a slice of objects matching the clusterv1 kind. -func decodeClusterV1Kinds(decoder *yaml.YAMLOrJSONDecoder, kind string) ([][]byte, error) { - outs := [][]byte{} +type yamlDecoder struct { + reader *yaml.YAMLReader + decoder runtime.Decoder + close func() error +} +func (d *yamlDecoder) Decode(defaults *schema.GroupVersionKind, into runtime.Object) (runtime.Object, *schema.GroupVersionKind, error) { for { - var out unstructured.Unstructured - - if err := decoder.Decode(&out); err == io.EOF { - break - } else if err != nil { - return nil, err + doc, err := d.reader.Read() + if err != nil { + return nil, nil, err } - if out.GetKind() == kind && out.GetAPIVersion() == clusterv1.SchemeGroupVersion.String() { - marshaled, err := out.MarshalJSON() - if err != nil { - return outs, err - } - outs = append(outs, marshaled) + // Skip over empty documents, i.e. a leading `---` + if len(bytes.TrimSpace(doc)) == 0 { + continue } + + return d.decoder.Decode(doc, defaults, into) } - return outs, nil +} +func (d *yamlDecoder) Close() error { + return d.close() } -// Returns true if any of the owner references point to the given target -func PointsTo(refs []metav1.OwnerReference, target *metav1.ObjectMeta) bool { - for _, ref := range refs { - if ref.UID == target.UID { - return true - } +func NewYAMLDecoder(r io.ReadCloser) streaming.Decoder { + return &yamlDecoder{ + reader: yaml.NewYAMLReader(bufio.NewReader(r)), + decoder: scheme.Codecs.UniversalDeserializer(), + close: r.Close, } - - return false } -// HasOwner checks if any of the references in the past list match the given apiVersion and one of the given kinds +// HasOwner checks if any of the references in the passed list match the given apiVersion and one of the given kinds func HasOwner(refList []metav1.OwnerReference, apiVersion string, kinds []string) bool { kMap := make(map[string]bool) for _, kind := range kinds { diff --git a/vendor/sigs.k8s.io/cluster-api/pkg/util/util_test.go b/vendor/sigs.k8s.io/cluster-api/pkg/util/util_test.go index f5e53f34d1..7d7f21d8bb 100644 --- a/vendor/sigs.k8s.io/cluster-api/pkg/util/util_test.go +++ b/vendor/sigs.k8s.io/cluster-api/pkg/util/util_test.go @@ -17,17 +17,27 @@ limitations under the License. package util import ( + "context" "io/ioutil" "os" + "reflect" "testing" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/types" - "sigs.k8s.io/cluster-api/pkg/apis/cluster/v1alpha1" + "sigs.k8s.io/controller-runtime/pkg/client/fake" + + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime/schema" + clusterv1 "sigs.k8s.io/cluster-api/api/v1alpha2" + "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/controller-runtime/pkg/handler" + "sigs.k8s.io/controller-runtime/pkg/reconcile" ) const validCluster = ` -apiVersion: "cluster.k8s.io/v1alpha1" +apiVersion: "cluster.x-k8s.io/v1alpha2" kind: Cluster metadata: name: cluster1 @@ -35,42 +45,42 @@ spec:` const validMachines1 = ` --- -apiVersion: "cluster.k8s.io/v1alpha1" +apiVersion: "cluster.x-k8s.io/v1alpha2" kind: Machine metadata: name: machine1 --- -apiVersion: "cluster.k8s.io/v1alpha1" +apiVersion: "cluster.x-k8s.io/v1alpha2" kind: Machine metadata: name: machine2` const validUnified1 = ` -apiVersion: "cluster.k8s.io/v1alpha1" +apiVersion: "cluster.x-k8s.io/v1alpha2" kind: Cluster metadata: name: cluster1 --- -apiVersion: "cluster.k8s.io/v1alpha1" +apiVersion: "cluster.x-k8s.io/v1alpha2" kind: MachineList items: -- apiVersion: "cluster.k8s.io/v1alpha1" +- apiVersion: "cluster.x-k8s.io/v1alpha2" kind: Machine metadata: name: machine1` const validUnified2 = ` -apiVersion: "cluster.k8s.io/v1alpha1" +apiVersion: "cluster.x-k8s.io/v1alpha2" kind: Cluster metadata: name: cluster1 --- -apiVersion: "cluster.k8s.io/v1alpha1" +apiVersion: "cluster.x-k8s.io/v1alpha2" kind: Machine metadata: name: machine1 --- -apiVersion: "cluster.k8s.io/v1alpha1" +apiVersion: "cluster.x-k8s.io/v1alpha2" kind: Machine metadata: name: machine2` @@ -87,24 +97,24 @@ metadata: name: cluster-api-shared-configuration namespace: cluster-api-test --- -apiVersion: "cluster.k8s.io/v1alpha1" +apiVersion: "cluster.x-k8s.io/v1alpha2" kind: Cluster metadata: name: cluster1 --- -apiVersion: "cluster.k8s.io/v1alpha1" +apiVersion: "cluster.x-k8s.io/v1alpha2" kind: Machine metadata: name: machine1 --- -apiVersion: "cluster.k8s.io/v1alpha1" +apiVersion: "cluster.x-k8s.io/v1alpha2" kind: Machine metadata: name: machine2` const invalidMachines1 = ` items: -- apiVersion: "cluster.k8s.io/v1alpha1" +- apiVersion: "cluster.x-k8s.io/v1alpha2" kind: Machine metadata: name: machine1 @@ -131,12 +141,12 @@ metadata: name: cluster-api-shared-configuration namespace: cluster-api-test --- -apiVersion: "cluster.k8s.io/v1alpha1" +apiVersion: "cluster.x-k8s.io/v1alpha2" kind: Cluster metadata: name: cluster1 --- -apiVersion: "cluster.k8s.io/v1alpha1" +apiVersion: "cluster.x-k8s.io/v1alpha2" kind: MachineList items: - metadata: @@ -158,7 +168,7 @@ metadata: name: cluster-api-shared-configuration namespace: cluster-api-test --- -apiVersion: "cluster.k8s.io/v1alpha1" +apiVersion: "cluster.x-k8s.io/v1alpha2" kind: Cluster metadata: name: cluster1 @@ -204,16 +214,6 @@ func TestParseClusterYaml(t *testing.T) { contents: validUnified3, expectedName: "cluster1", }, - { - name: "valid unified for cluster with invalid machinelist (only with type info) and a configmap", - contents: invalidUnified1, - expectedName: "cluster1", - }, - { - name: "valid unified for cluster with invalid machinelist (old top-level items list) and a configmap", - contents: invalidUnified2, - expectErr: true, - }, { name: "gibberish in file", contents: `blah ` + validCluster + ` blah`, @@ -230,7 +230,7 @@ func TestParseClusterYaml(t *testing.T) { c, err := ParseClusterYaml(file) if (testcase.expectErr && err == nil) || (!testcase.expectErr && err != nil) { - t.Fatalf("Unexpected returned error. Got: %v, Want Err: %v", err, testcase.expectErr) + t.Fatalf("Unexpected returned error. Got: %+v, Want Err: %v", err, testcase.expectErr) } if err != nil { return @@ -339,61 +339,157 @@ func createTempFile(contents string) (string, error) { return f.Name(), nil } -func TestPointsTo(t *testing.T) { - targetID := "fri3ndsh1p" - - meta := metav1.ObjectMeta{ - UID: types.UID(targetID), - } - - tests := []struct { - name string - refIDs []string - expected bool +func TestMachineToInfrastructureMapFunc(t *testing.T) { + var testcases = []struct { + name string + input schema.GroupVersionKind + request handler.MapObject + output []reconcile.Request }{ { - name: "empty owner list", - }, - { - name: "single wrong owner ref", - refIDs: []string{"m4g1c"}, + name: "should reconcile infra-1", + input: schema.GroupVersionKind{ + Group: "foo.cluster.x-k8s.io", + Version: "v1alpha2", + Kind: "TestMachine", + }, + request: handler.MapObject{ + Object: &clusterv1.Machine{ + ObjectMeta: metav1.ObjectMeta{ + Namespace: "default", + Name: "test-1", + }, + Spec: clusterv1.MachineSpec{ + InfrastructureRef: corev1.ObjectReference{ + APIVersion: "foo.cluster.x-k8s.io/v1alpha2", + Kind: "TestMachine", + Name: "infra-1", + }, + }, + }, + }, + output: []reconcile.Request{ + { + NamespacedName: client.ObjectKey{ + Namespace: "default", + Name: "infra-1", + }, + }, + }, }, { - name: "single right owner ref", - refIDs: []string{targetID}, - expected: true, + name: "should return no matching reconcile requests", + input: schema.GroupVersionKind{ + Group: "foo.cluster.x-k8s.io", + Version: "v1alpha2", + Kind: "TestMachine", + }, + request: handler.MapObject{ + Object: &clusterv1.Machine{ + ObjectMeta: metav1.ObjectMeta{ + Namespace: "default", + Name: "test-1", + }, + Spec: clusterv1.MachineSpec{ + InfrastructureRef: corev1.ObjectReference{ + APIVersion: "bar.cluster.x-k8s.io/v1alpha2", + Kind: "TestMachine", + Name: "bar-1", + }, + }, + }, + }, + output: nil, }, + } + + for _, tc := range testcases { + t.Run(tc.name, func(t *testing.T) { + fn := MachineToInfrastructureMapFunc(tc.input) + out := fn(tc.request) + if !reflect.DeepEqual(out, tc.output) { + t.Fatalf("Unexpected output. Got: %v, Want: %v", out, tc.output) + } + }) + } +} + +func TestClusterToInfrastructureMapFunc(t *testing.T) { + var testcases = []struct { + name string + input schema.GroupVersionKind + request handler.MapObject + output []reconcile.Request + }{ { - name: "multiple wrong refs", - refIDs: []string{"m4g1c", "h4rm0ny"}, + name: "should reconcile infra-1", + input: schema.GroupVersionKind{ + Group: "foo.cluster.x-k8s.io", + Version: "v1alpha2", + Kind: "TestCluster", + }, + request: handler.MapObject{ + Object: &clusterv1.Cluster{ + ObjectMeta: metav1.ObjectMeta{ + Namespace: "default", + Name: "test-1", + }, + Spec: clusterv1.ClusterSpec{ + InfrastructureRef: &corev1.ObjectReference{ + APIVersion: "foo.cluster.x-k8s.io/v1alpha2", + Kind: "TestCluster", + Name: "infra-1", + }, + }, + }, + }, + output: []reconcile.Request{ + { + NamespacedName: client.ObjectKey{ + Namespace: "default", + Name: "infra-1", + }, + }, + }, }, { - name: "multiple refs one right", - refIDs: []string{"m4g1c", targetID}, - expected: true, + name: "should return no matching reconcile requests", + input: schema.GroupVersionKind{ + Group: "foo.cluster.x-k8s.io", + Version: "v1alpha2", + Kind: "TestCluster", + }, + request: handler.MapObject{ + Object: &clusterv1.Cluster{ + ObjectMeta: metav1.ObjectMeta{ + Namespace: "default", + Name: "test-1", + }, + Spec: clusterv1.ClusterSpec{ + InfrastructureRef: &corev1.ObjectReference{ + APIVersion: "bar.cluster.x-k8s.io/v1alpha2", + Kind: "TestCluster", + Name: "bar-1", + }, + }, + }, + }, + output: nil, }, } - for _, test := range tests { - t.Run(test.name, func(t *testing.T) { - pointer := &metav1.ObjectMeta{} - - for _, ref := range test.refIDs { - pointer.OwnerReferences = append(pointer.OwnerReferences, metav1.OwnerReference{ - UID: types.UID(ref), - }) - } - - result := PointsTo(pointer.OwnerReferences, &meta) - if result != test.expected { - t.Errorf("expected %v, got %v", test.expected, result) + for _, tc := range testcases { + t.Run(tc.name, func(t *testing.T) { + fn := ClusterToInfrastructureMapFunc(tc.input) + out := fn(tc.request) + if !reflect.DeepEqual(out, tc.output) { + t.Fatalf("Unexpected output. Got: %v, Want: %v", out, tc.output) } }) } } func TestHasOwner(t *testing.T) { - tests := []struct { name string refList []metav1.OwnerReference @@ -407,12 +503,11 @@ func TestHasOwner(t *testing.T) { refList: []metav1.OwnerReference{ { Kind: "Cluster", - APIVersion: v1alpha1.SchemeGroupVersion.String(), + APIVersion: clusterv1.GroupVersion.String(), }, }, expected: true, }, - { name: "owned by something else", refList: []metav1.OwnerReference{ @@ -431,7 +526,7 @@ func TestHasOwner(t *testing.T) { refList: []metav1.OwnerReference{ { Kind: "MachineDeployment", - APIVersion: v1alpha1.SchemeGroupVersion.String(), + APIVersion: clusterv1.GroupVersion.String(), }, }, expected: true, @@ -446,12 +541,11 @@ func TestHasOwner(t *testing.T) { }, }, { - name: "right apiversion, wrong kind", refList: []metav1.OwnerReference{ { Kind: "Machine", - APIVersion: v1alpha1.SchemeGroupVersion.String(), + APIVersion: clusterv1.GroupVersion.String(), }, }, }, @@ -461,7 +555,7 @@ func TestHasOwner(t *testing.T) { t.Run(test.name, func(t *testing.T) { result := HasOwner( test.refList, - v1alpha1.SchemeGroupVersion.String(), + clusterv1.GroupVersion.String(), []string{"MachineDeployment", "Cluster"}, ) if test.expected != result { @@ -470,3 +564,124 @@ func TestHasOwner(t *testing.T) { }) } } + +func TestPointsTo(t *testing.T) { + targetID := "fri3ndsh1p" + + meta := metav1.ObjectMeta{ + UID: types.UID(targetID), + } + + tests := []struct { + name string + refIDs []string + expected bool + }{ + { + name: "empty owner list", + }, + { + name: "single wrong owner ref", + refIDs: []string{"m4g1c"}, + }, + { + name: "single right owner ref", + refIDs: []string{targetID}, + expected: true, + }, + { + name: "multiple wrong refs", + refIDs: []string{"m4g1c", "h4rm0ny"}, + }, + { + name: "multiple refs one right", + refIDs: []string{"m4g1c", targetID}, + expected: true, + }, + } + + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + pointer := &metav1.ObjectMeta{} + + for _, ref := range test.refIDs { + pointer.OwnerReferences = append(pointer.OwnerReferences, metav1.OwnerReference{ + UID: types.UID(ref), + }) + } + + result := PointsTo(pointer.OwnerReferences, &meta) + if result != test.expected { + t.Errorf("expected %v, got %v", test.expected, result) + } + }) + } +} + +func TestGetOwnerClusterSuccessByName(t *testing.T) { + scheme := runtime.NewScheme() + if err := clusterv1.AddToScheme(scheme); err != nil { + t.Fatal("failed to register cluster api objects to scheme") + } + + myCluster := &clusterv1.Cluster{ + ObjectMeta: metav1.ObjectMeta{ + Name: "my-cluster", + Namespace: "my-ns", + }, + } + + c := fake.NewFakeClientWithScheme(scheme, myCluster) + objm := metav1.ObjectMeta{ + OwnerReferences: []metav1.OwnerReference{ + { + Kind: "Cluster", + APIVersion: clusterv1.GroupVersion.String(), + Name: "my-cluster", + }, + }, + Namespace: "my-ns", + Name: "my-resource-owned-by-cluster", + } + cluster, err := GetOwnerCluster(context.TODO(), c, objm) + if err != nil { + t.Fatalf("did not expect an error but found one: %v", err) + } + if cluster == nil { + t.Fatal("expected a cluster but got nil") + } +} + +func TestGetOwnerMachineSuccessByName(t *testing.T) { + scheme := runtime.NewScheme() + if err := clusterv1.AddToScheme(scheme); err != nil { + t.Fatal("failed to register cluster api objects to scheme") + } + + myMachine := &clusterv1.Machine{ + ObjectMeta: metav1.ObjectMeta{ + Name: "my-machine", + Namespace: "my-ns", + }, + } + + c := fake.NewFakeClientWithScheme(scheme, myMachine) + objm := metav1.ObjectMeta{ + OwnerReferences: []metav1.OwnerReference{ + { + Kind: "Machine", + APIVersion: clusterv1.GroupVersion.String(), + Name: "my-machine", + }, + }, + Namespace: "my-ns", + Name: "my-resource-owned-by-machine", + } + machine, err := GetOwnerMachine(context.TODO(), c, objm) + if err != nil { + t.Fatalf("did not expect an error but found one: %v", err) + } + if machine == nil { + t.Fatal("expected a machine but got nil") + } +} diff --git a/vendor/sigs.k8s.io/cluster-api/sample/cluster.yaml b/vendor/sigs.k8s.io/cluster-api/sample/cluster.yaml deleted file mode 100644 index 92111ec617..0000000000 --- a/vendor/sigs.k8s.io/cluster-api/sample/cluster.yaml +++ /dev/null @@ -1,5 +0,0 @@ -apiVersion: cluster.k8s.io/v1alpha1 -kind: Cluster -metadata: - name: cluster-example -spec: diff --git a/vendor/sigs.k8s.io/cluster-api/sample/machine.yaml b/vendor/sigs.k8s.io/cluster-api/sample/machine.yaml deleted file mode 100644 index d7ab80715b..0000000000 --- a/vendor/sigs.k8s.io/cluster-api/sample/machine.yaml +++ /dev/null @@ -1,5 +0,0 @@ -apiVersion: cluster.k8s.io/v1alpha1 -kind: Machine -metadata: - name: machine-example -spec: diff --git a/vendor/sigs.k8s.io/cluster-api/sample/machineclass.yaml b/vendor/sigs.k8s.io/cluster-api/sample/machineclass.yaml deleted file mode 100644 index 8ca63eff03..0000000000 --- a/vendor/sigs.k8s.io/cluster-api/sample/machineclass.yaml +++ /dev/null @@ -1,31 +0,0 @@ -# Sample machine-class object -apiVersion: "cluster.k8s.io/v1alpha1" -kind: MachineClass -metadata: - name: small - namespace: foo -providerSpec: - apiVersion: "gceproviderconfig/v1alpha1" - kind: "GCEProviderConfig" - project: "$GCLOUD_PROJECT" - zone: "us-central1-f" - machineType: "n1-standard-2" - image: "projects/ubuntu-os-cloud/global/images/family/ubuntu-1604-lts" - ---- - -# Sample machine object -apiVersion: cluster.k8s.io/v1alpha1 -kind: Machine -metadata: - name: test-machine - namespace: foo - labels: - test-label: test-label -spec: - providerSpec: - valueFrom: - machineClass: - provider: gcp - name: small - namespace: foo diff --git a/vendor/sigs.k8s.io/cluster-api/sample/machinedeployment.yaml b/vendor/sigs.k8s.io/cluster-api/sample/machinedeployment.yaml deleted file mode 100644 index a00bd6ee32..0000000000 --- a/vendor/sigs.k8s.io/cluster-api/sample/machinedeployment.yaml +++ /dev/null @@ -1,26 +0,0 @@ -apiVersion: "cluster.k8s.io/v1alpha1" -kind: MachineDeployment -metadata: - name: sample-machinedeployment -spec: - replicas: 3 - selector: - matchLabels: - foo: bar - template: - metadata: - labels: - foo: bar - spec: - providerSpec: - value: - apiVersion: "gceproviderconfig/v1alpha1" - kind: "GCEProviderConfig" - roles: - - Node - project: "${GCLOUD_PROJECT}" - zone: "us-central1-c" - machineType: "n1-standard-2" - os: "ubuntu-1604-lts" - versions: - kubelet: 1.9.4 diff --git a/vendor/sigs.k8s.io/cluster-api/sample/machineset.yaml b/vendor/sigs.k8s.io/cluster-api/sample/machineset.yaml deleted file mode 100644 index a08b92cf64..0000000000 --- a/vendor/sigs.k8s.io/cluster-api/sample/machineset.yaml +++ /dev/null @@ -1,25 +0,0 @@ -apiVersion: "cluster.k8s.io/v1alpha1" -kind: MachineSet -metadata: - name: my-first-machineset -spec: - replicas: 3 - selector: - matchLabels: - foo: bar - template: - metadata: - labels: - foo: bar - spec: - providerSpec: - value: - apiVersion: "gceproviderconfig/v1alpha1" - kind: "GCEMachineProviderConfig" - roles: - - Node - zone: "us-central1-f" - machineType: "n1-standard-1" - os: "ubuntu-1604-lts" - versions: - kubelet: 1.9.4 diff --git a/vendor/sigs.k8s.io/cluster-api/scripts/ci-integration.sh b/vendor/sigs.k8s.io/cluster-api/scripts/ci-integration.sh index da24e52542..c317e54250 100644 --- a/vendor/sigs.k8s.io/cluster-api/scripts/ci-integration.sh +++ b/vendor/sigs.k8s.io/cluster-api/scripts/ci-integration.sh @@ -19,9 +19,8 @@ set -o nounset set -o pipefail MAKE="make" -KIND_VERSION="0.2.1" -KUSTOMIZE_VERSION="2.0.3" -KUBECTL_VERSION="v1.13.2" +KIND_VERSION="v0.4.0" +KUBECTL_VERSION="v1.15.0" CRD_YAML="crd.yaml" BOOTSTRAP_CLUSTER_NAME="clusterapi-bootstrap" CONTROLLER_REPO="controller-ci" # use arbitrary repo name since we don't need to publish it @@ -31,12 +30,6 @@ INTEGRATION_TEST_DIR="./test/integration" GOOS=$(go env GOOS) GOARCH=$(go env GOARCH) -install_kustomize() { - wget "https://github.com/kubernetes-sigs/kustomize/releases/download/v${KUSTOMIZE_VERSION}/kustomize_${KUSTOMIZE_VERSION}_${GOOS}_${GOARCH}" \ - --no-verbose -O /usr/local/bin/kustomize - chmod +x /usr/local/bin/kustomize -} - install_kind() { wget "https://github.com/kubernetes-sigs/kind/releases/download/${KIND_VERSION}/kind-${GOOS}-${GOARCH}" \ --no-verbose -O /usr/local/bin/kind @@ -51,18 +44,18 @@ install_kubectl() { build_containers() { VERSION="$(git describe --exact-match 2> /dev/null || git describe --match="$(git rev-parse --short=8 HEAD)" --always --dirty --abbrev=8)" - export CONTROLLER_IMG="${CONTROLLER_REPO}:${VERSION}" - export EXAMPLE_PROVIDER_IMG="${EXAMPLE_PROVIDER_REPO}:${VERSION}" + export CONTROLLER_IMG="${CONTROLLER_REPO}" + export EXAMPLE_PROVIDER_IMG="${EXAMPLE_PROVIDER_REPO}" - "${MAKE}" docker-build - "${MAKE}" docker-build-ci + "${MAKE}" docker-build TAG="${VERSION}" ARCH="${GOARCH}" + "${MAKE}" docker-build-ci TAG="${VERSION}" ARCH="${GOARCH}" } prepare_crd_yaml() { CLUSTER_API_CONFIG_PATH="./config" - kustomize build "${CLUSTER_API_CONFIG_PATH}/default/" > "${CRD_YAML}" + kubectl kustomize "${CLUSTER_API_CONFIG_PATH}/default/" > "${CRD_YAML}" echo "---" >> "${CRD_YAML}" - kustomize build "${CLUSTER_API_CONFIG_PATH}/ci/" >> "${CRD_YAML}" + kubectl kustomize "${CLUSTER_API_CONFIG_PATH}/ci/" >> "${CRD_YAML}" } create_bootstrap() { @@ -70,8 +63,8 @@ create_bootstrap() { KUBECONFIG="$(kind get kubeconfig-path --name="${BOOTSTRAP_CLUSTER_NAME}")" export KUBECONFIG - kind load docker-image "${CONTROLLER_IMG}" --name "${BOOTSTRAP_CLUSTER_NAME}" - kind load docker-image "${EXAMPLE_PROVIDER_IMG}" --name "${BOOTSTRAP_CLUSTER_NAME}" + kind load docker-image "${CONTROLLER_IMG}-${GOARCH}:${VERSION}" --name "${BOOTSTRAP_CLUSTER_NAME}" + kind load docker-image "${EXAMPLE_PROVIDER_IMG}-${GOARCH}:${VERSION}" --name "${BOOTSTRAP_CLUSTER_NAME}" } delete_bootstrap() { @@ -96,7 +89,7 @@ wait_pod_running() { } ensure_docker_in_docker() { - if [[ -z "${PROW_JOB_ID:-}" ]] ; then + if [[ -z "${PROW_JOB_ID}" ]] ; then # start docker service in setup other than Prow service docker start fi @@ -107,11 +100,9 @@ main() { ensure_docker_in_docker build_containers - install_kustomize - prepare_crd_yaml - install_kubectl install_kind + prepare_crd_yaml create_bootstrap kubectl create -f "${CRD_YAML}" diff --git a/vendor/sigs.k8s.io/cluster-api/scripts/ci-is-vendor-in-sync.sh b/vendor/sigs.k8s.io/cluster-api/scripts/ci-is-vendor-in-sync.sh index 644f03f554..06809562b7 100644 --- a/vendor/sigs.k8s.io/cluster-api/scripts/ci-is-vendor-in-sync.sh +++ b/vendor/sigs.k8s.io/cluster-api/scripts/ci-is-vendor-in-sync.sh @@ -22,4 +22,6 @@ REPO_ROOT=$(dirname "${BASH_SOURCE[0]}")/.. source "${REPO_ROOT}/hack/ensure-go.sh" cd "${REPO_ROOT}" +go mod tidy -v +go mod vendor -v go mod verify diff --git a/vendor/sigs.k8s.io/cluster-api/scripts/ci-test.sh b/vendor/sigs.k8s.io/cluster-api/scripts/ci-test.sh index ed69ebbf8f..a9857764b6 100644 --- a/vendor/sigs.k8s.io/cluster-api/scripts/ci-test.sh +++ b/vendor/sigs.k8s.io/cluster-api/scripts/ci-test.sh @@ -25,4 +25,4 @@ cd "${REPO_ROOT}" && \ source ./scripts/fetch_ext_bins.sh && \ fetch_tools && \ setup_envs && \ - make test + make test-go diff --git a/vendor/sigs.k8s.io/cluster-api/scripts/fetch_ext_bins.sh b/vendor/sigs.k8s.io/cluster-api/scripts/fetch_ext_bins.sh index 9d2646b264..16f534a13f 100644 --- a/vendor/sigs.k8s.io/cluster-api/scripts/fetch_ext_bins.sh +++ b/vendor/sigs.k8s.io/cluster-api/scripts/fetch_ext_bins.sh @@ -26,8 +26,7 @@ if [[ -n "${TRACE}" ]]; then set -x fi -# k8s_version=1.10.1 -k8s_version=1.11.0 +k8s_version=1.14.1 goarch=amd64 goos="unknown" diff --git a/vendor/sigs.k8s.io/cluster-api/test/integration/cluster/BUILD.bazel b/vendor/sigs.k8s.io/cluster-api/test/integration/cluster/BUILD.bazel index 8cc7fbe943..69972c407c 100644 --- a/vendor/sigs.k8s.io/cluster-api/test/integration/cluster/BUILD.bazel +++ b/vendor/sigs.k8s.io/cluster-api/test/integration/cluster/BUILD.bazel @@ -4,16 +4,14 @@ go_test( name = "go_default_test", srcs = ["cluster_test.go"], deps = [ - "//pkg/apis/cluster/v1alpha1:go_default_library", - "//pkg/client/clientset_generated/clientset:go_default_library", - "//pkg/client/clientset_generated/clientset/typed/cluster/v1alpha1:go_default_library", + "//api/v1alpha2:go_default_library", "//vendor/github.com/onsi/ginkgo:go_default_library", "//vendor/github.com/onsi/gomega:go_default_library", "//vendor/k8s.io/api/core/v1:go_default_library", "//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", - "//vendor/k8s.io/client-go/informers:go_default_library", "//vendor/k8s.io/client-go/kubernetes:go_default_library", - "//vendor/k8s.io/client-go/tools/cache:go_default_library", + "//vendor/k8s.io/client-go/kubernetes/scheme:go_default_library", "//vendor/k8s.io/client-go/tools/clientcmd:go_default_library", + "//vendor/sigs.k8s.io/controller-runtime/pkg/client:go_default_library", ], ) diff --git a/vendor/sigs.k8s.io/cluster-api/test/integration/cluster/cluster_test.go b/vendor/sigs.k8s.io/cluster-api/test/integration/cluster/cluster_test.go index b27a19ea87..47adc9b222 100644 --- a/vendor/sigs.k8s.io/cluster-api/test/integration/cluster/cluster_test.go +++ b/vendor/sigs.k8s.io/cluster-api/test/integration/cluster/cluster_test.go @@ -17,6 +17,7 @@ limitations under the License. package integration import ( + "context" "os" "testing" @@ -24,22 +25,24 @@ import ( . "github.com/onsi/gomega" corev1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - informers "k8s.io/client-go/informers" "k8s.io/client-go/kubernetes" - "k8s.io/client-go/tools/cache" + "k8s.io/client-go/kubernetes/scheme" "k8s.io/client-go/tools/clientcmd" - clusterv1alpha1 "sigs.k8s.io/cluster-api/pkg/apis/cluster/v1alpha1" - clientset "sigs.k8s.io/cluster-api/pkg/client/clientset_generated/clientset" - client "sigs.k8s.io/cluster-api/pkg/client/clientset_generated/clientset/typed/cluster/v1alpha1" + clusterv1 "sigs.k8s.io/cluster-api/api/v1alpha2" + "sigs.k8s.io/controller-runtime/pkg/client" ) -var clusterSpec = &clusterv1alpha1.ClusterSpec{ - ClusterNetwork: clusterv1alpha1.ClusterNetworkingConfig{ +func init() { + clusterv1.AddToScheme(scheme.Scheme) +} + +var clusterSpec = &clusterv1.ClusterSpec{ + ClusterNetwork: &clusterv1.ClusterNetworkingConfig{ ServiceDomain: "mydomain.com", - Services: clusterv1alpha1.NetworkRanges{ + Services: clusterv1.NetworkRanges{ CIDRBlocks: []string{"10.96.0.0/12"}, }, - Pods: clusterv1alpha1.NetworkRanges{ + Pods: clusterv1.NetworkRanges{ CIDRBlocks: []string{"192.168.0.0/16"}, }, }, @@ -54,10 +57,8 @@ func TestCluster(t *testing.T) { } var _ = Describe("Cluster-Controller", func() { - var clusterapi client.ClusterInterface - var client *kubernetes.Clientset - var stopper chan struct{} - var informer cache.SharedIndexInformer + var k8sClient *kubernetes.Clientset + var apiclient client.Client var testNamespace string BeforeEach(func() { @@ -67,61 +68,43 @@ var _ = Describe("Cluster-Controller", func() { Expect(err).ShouldNot(HaveOccurred()) // Create kubernetes client - client, err = kubernetes.NewForConfig(config) + k8sClient, err = kubernetes.NewForConfig(config) Expect(err).ShouldNot(HaveOccurred()) // Create namespace for test ns := &corev1.Namespace{ObjectMeta: metav1.ObjectMeta{GenerateName: "clusterapi-test-"}} - ns, err = client.Core().Namespaces().Create(ns) + ns, err = k8sClient.CoreV1().Namespaces().Create(ns) Expect(err).ShouldNot(HaveOccurred()) testNamespace = ns.ObjectMeta.Name - // Create informer for events in the namespace - factory := informers.NewSharedInformerFactoryWithOptions(client, 0, informers.WithNamespace(testNamespace)) - informer = factory.Core().V1().Events().Informer() - stopper = make(chan struct{}) - - // Create clusterapi client - cs, err := clientset.NewForConfig(config) + // Create a new client + apiclient, err = client.New(config, client.Options{Scheme: scheme.Scheme}) Expect(err).ShouldNot(HaveOccurred()) - clusterapi = cs.ClusterV1alpha1().Clusters(testNamespace) }) AfterEach(func() { - close(stopper) - client.Core().Namespaces().Delete(testNamespace, &metav1.DeleteOptions{}) + k8sClient.CoreV1().Namespaces().Delete(testNamespace, &metav1.DeleteOptions{}) }) Describe("Create Cluster", func() { - It("Should trigger an event", func(done Done) { - // Register handler for cluster events - events := make(chan *corev1.Event, 0) - informer.AddEventHandler(cache.ResourceEventHandlerFuncs{ - AddFunc: func(obj interface{}) { - // Guard against miscofigured informer not listening to Events - Expect(obj).Should(BeAssignableToTypeOf(&corev1.Event{})) - - e := obj.(*corev1.Event) - if e.InvolvedObject.Kind == "Cluster" { - events <- e - } - }, - }) - go informer.Run(stopper) - Expect(cache.WaitForCacheSync(stopper, informer.HasSynced)).To(BeTrue()) - + It("Should reach to pending phase after creation", func(done Done) { + ctx := context.Background() // Create Cluster - cluster := &clusterv1alpha1.Cluster{ + cluster := &clusterv1.Cluster{ ObjectMeta: metav1.ObjectMeta{ GenerateName: "cluster-", Namespace: testNamespace, }, Spec: *clusterSpec.DeepCopy(), } - _, err := clusterapi.Create(cluster) - Expect(err).ShouldNot(HaveOccurred()) - Expect(<-events).ShouldNot(BeNil()) + Expect(apiclient.Create(ctx, cluster)).ShouldNot(HaveOccurred()) + Eventually(func() bool { + if err := apiclient.Get(ctx, client.ObjectKey{Name: cluster.Name, Namespace: cluster.Namespace}, cluster); err != nil { + return false + } + return cluster.Status.Phase == "pending" + }).Should(BeTrue()) close(done) }, TIMEOUT) diff --git a/vendor/sigs.k8s.io/cluster-api/vendor/modules.txt b/vendor/sigs.k8s.io/cluster-api/vendor/modules.txt index 5e228ec183..4ed10c5870 100644 --- a/vendor/sigs.k8s.io/cluster-api/vendor/modules.txt +++ b/vendor/sigs.k8s.io/cluster-api/vendor/modules.txt @@ -1,60 +1,66 @@ -# cloud.google.com/go v0.35.1 +# cloud.google.com/go v0.34.0 cloud.google.com/go/compute/metadata -# contrib.go.opencensus.io/exporter/ocagent v0.2.0 +# contrib.go.opencensus.io/exporter/ocagent v0.4.12 contrib.go.opencensus.io/exporter/ocagent -# github.com/Azure/go-autorest v11.5.0+incompatible +# github.com/Azure/go-autorest/autorest v0.2.0 github.com/Azure/go-autorest/autorest -github.com/Azure/go-autorest/autorest/adal github.com/Azure/go-autorest/autorest/azure +# github.com/Azure/go-autorest/autorest/adal v0.1.0 +github.com/Azure/go-autorest/autorest/adal +# github.com/Azure/go-autorest/autorest/date v0.1.0 +github.com/Azure/go-autorest/autorest/date +# github.com/Azure/go-autorest/logger v0.1.0 github.com/Azure/go-autorest/logger +# github.com/Azure/go-autorest/tracing v0.1.0 github.com/Azure/go-autorest/tracing -github.com/Azure/go-autorest/autorest/date -# github.com/appscode/jsonpatch v0.0.0-20190108182946-7c0e3b262f30 -github.com/appscode/jsonpatch # github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973 github.com/beorn7/perks/quantile -# github.com/census-instrumentation/opencensus-proto v0.1.0 +# github.com/census-instrumentation/opencensus-proto v0.2.0 github.com/census-instrumentation/opencensus-proto/gen-go/agent/common/v1 +github.com/census-instrumentation/opencensus-proto/gen-go/agent/metrics/v1 github.com/census-instrumentation/opencensus-proto/gen-go/agent/trace/v1 -github.com/census-instrumentation/opencensus-proto/gen-go/trace/v1 +github.com/census-instrumentation/opencensus-proto/gen-go/metrics/v1 github.com/census-instrumentation/opencensus-proto/gen-go/resource/v1 +github.com/census-instrumentation/opencensus-proto/gen-go/trace/v1 # github.com/davecgh/go-spew v1.1.1 github.com/davecgh/go-spew/spew # github.com/dgrijalva/jwt-go v3.2.0+incompatible github.com/dgrijalva/jwt-go -# github.com/evanphx/json-patch v4.2.0+incompatible +# github.com/evanphx/json-patch v4.5.0+incompatible github.com/evanphx/json-patch -# github.com/ghodss/yaml v1.0.0 -github.com/ghodss/yaml +# github.com/fatih/color v1.7.0 +github.com/fatih/color # github.com/go-logr/logr v0.1.0 github.com/go-logr/logr # github.com/go-logr/zapr v0.1.0 github.com/go-logr/zapr -# github.com/gobuffalo/envy v1.6.12 -github.com/gobuffalo/envy -# github.com/gogo/protobuf v1.2.0 +# github.com/gobuffalo/flect v0.1.5 +github.com/gobuffalo/flect +# github.com/gogo/protobuf v1.2.1 github.com/gogo/protobuf/proto github.com/gogo/protobuf/sortkeys -# github.com/golang/groupcache v0.0.0-20181024230925-c65c006176ff +# github.com/golang/groupcache v0.0.0-20180513044358-24b0969c4cb7 github.com/golang/groupcache/lru -# github.com/golang/protobuf v1.2.0 +# github.com/golang/protobuf v1.3.1 github.com/golang/protobuf/proto github.com/golang/protobuf/ptypes/any github.com/golang/protobuf/ptypes github.com/golang/protobuf/ptypes/timestamp github.com/golang/protobuf/ptypes/duration github.com/golang/protobuf/ptypes/wrappers -# github.com/google/btree v1.0.0 -github.com/google/btree -# github.com/google/gofuzz v1.0.0 +github.com/golang/protobuf/jsonpb +github.com/golang/protobuf/protoc-gen-go/generator +github.com/golang/protobuf/ptypes/struct +github.com/golang/protobuf/protoc-gen-go/descriptor +github.com/golang/protobuf/protoc-gen-go/generator/internal/remap +github.com/golang/protobuf/protoc-gen-go/plugin +# github.com/google/gofuzz v0.0.0-20170612174753-24818f796faf github.com/google/gofuzz -# github.com/google/uuid v1.1.1 -github.com/google/uuid # github.com/googleapis/gnostic v0.2.0 github.com/googleapis/gnostic/OpenAPIv2 github.com/googleapis/gnostic/compiler github.com/googleapis/gnostic/extensions -# github.com/gophercloud/gophercloud v0.0.0-20190221164956-3f3cc5a566b2 +# github.com/gophercloud/gophercloud v0.2.0 github.com/gophercloud/gophercloud github.com/gophercloud/gophercloud/openstack github.com/gophercloud/gophercloud/openstack/identity/v2/tokens @@ -62,9 +68,10 @@ github.com/gophercloud/gophercloud/openstack/identity/v3/tokens github.com/gophercloud/gophercloud/openstack/utils github.com/gophercloud/gophercloud/openstack/identity/v2/tenants github.com/gophercloud/gophercloud/pagination -# github.com/gregjones/httpcache v0.0.0-20181110185634-c63ab54fda8f -github.com/gregjones/httpcache -github.com/gregjones/httpcache/diskcache +# github.com/grpc-ecosystem/grpc-gateway v1.8.5 +github.com/grpc-ecosystem/grpc-gateway/runtime +github.com/grpc-ecosystem/grpc-gateway/utilities +github.com/grpc-ecosystem/grpc-gateway/internal # github.com/hashicorp/golang-lru v0.5.0 github.com/hashicorp/golang-lru github.com/hashicorp/golang-lru/simplelru @@ -74,16 +81,16 @@ github.com/hpcloud/tail/ratelimiter github.com/hpcloud/tail/util github.com/hpcloud/tail/watch github.com/hpcloud/tail/winfile -# github.com/imdario/mergo v0.3.7 +# github.com/imdario/mergo v0.3.6 github.com/imdario/mergo # github.com/inconshreveable/mousetrap v1.0.0 github.com/inconshreveable/mousetrap -# github.com/joho/godotenv v1.3.0 -github.com/joho/godotenv # github.com/json-iterator/go v1.1.6 github.com/json-iterator/go -# github.com/markbates/inflect v1.0.4 -github.com/markbates/inflect +# github.com/mattn/go-colorable v0.1.2 +github.com/mattn/go-colorable +# github.com/mattn/go-isatty v0.0.8 +github.com/mattn/go-isatty # github.com/matttproud/golang_protobuf_extensions v1.0.1 github.com/matttproud/golang_protobuf_extensions/pbutil # github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd @@ -111,32 +118,30 @@ github.com/onsi/ginkgo/internal/specrunner github.com/onsi/ginkgo/reporters/stenographer/support/go-isatty # github.com/onsi/gomega v1.5.0 github.com/onsi/gomega -github.com/onsi/gomega/gbytes -github.com/onsi/gomega/gexec github.com/onsi/gomega/internal/assertion github.com/onsi/gomega/internal/asyncassertion github.com/onsi/gomega/internal/testingtsupport github.com/onsi/gomega/matchers github.com/onsi/gomega/types -github.com/onsi/gomega/format +github.com/onsi/gomega/gbytes +github.com/onsi/gomega/gexec github.com/onsi/gomega/internal/oraclematcher +github.com/onsi/gomega/format github.com/onsi/gomega/matchers/support/goraph/bipartitegraph github.com/onsi/gomega/matchers/support/goraph/edge github.com/onsi/gomega/matchers/support/goraph/node github.com/onsi/gomega/matchers/support/goraph/util -# github.com/pborman/uuid v1.2.0 +# github.com/pborman/uuid v0.0.0-20170612153648-e790cca94e6c github.com/pborman/uuid -# github.com/peterbourgon/diskv v2.0.1+incompatible -github.com/peterbourgon/diskv # github.com/pkg/errors v0.8.1 github.com/pkg/errors -# github.com/prometheus/client_golang v0.9.2 +# github.com/prometheus/client_golang v0.9.3-0.20190127221311-3c4408c8b829 github.com/prometheus/client_golang/prometheus/promhttp github.com/prometheus/client_golang/prometheus github.com/prometheus/client_golang/prometheus/internal # github.com/prometheus/client_model v0.0.0-20190115171406-56726106282f github.com/prometheus/client_model/go -# github.com/prometheus/common v0.1.0 +# github.com/prometheus/common v0.2.0 github.com/prometheus/common/expfmt github.com/prometheus/common/model github.com/prometheus/common/internal/bitbucket.org/ww/goautoneg @@ -145,32 +150,28 @@ github.com/prometheus/procfs github.com/prometheus/procfs/nfs github.com/prometheus/procfs/xfs github.com/prometheus/procfs/internal/util -# github.com/rogpeppe/go-internal v1.1.0 -github.com/rogpeppe/go-internal/modfile -github.com/rogpeppe/go-internal/module -github.com/rogpeppe/go-internal/semver # github.com/sergi/go-diff v1.0.0 github.com/sergi/go-diff/diffmatchpatch -# github.com/spf13/afero v1.2.2 -github.com/spf13/afero -github.com/spf13/afero/mem # github.com/spf13/cobra v0.0.3 github.com/spf13/cobra # github.com/spf13/pflag v1.0.3 github.com/spf13/pflag -# go.opencensus.io v0.18.0 +# go.opencensus.io v0.20.2 go.opencensus.io/plugin/ochttp go.opencensus.io/plugin/ochttp/propagation/tracecontext go.opencensus.io/stats/view go.opencensus.io/trace go.opencensus.io -go.opencensus.io/trace/tracestate -go.opencensus.io/plugin/ochttp/propagation/b3 +go.opencensus.io/plugin/ocgrpc +go.opencensus.io/resource go.opencensus.io/stats go.opencensus.io/tag +go.opencensus.io/trace/tracestate +go.opencensus.io/plugin/ochttp/propagation/b3 go.opencensus.io/trace/propagation -go.opencensus.io/exemplar go.opencensus.io/internal/tagencoding +go.opencensus.io/metric/metricdata +go.opencensus.io/metric/metricproducer go.opencensus.io/stats/internal go.opencensus.io/internal go.opencensus.io/trace/internal @@ -199,15 +200,15 @@ golang.org/x/net/html golang.org/x/net/html/atom golang.org/x/net/trace golang.org/x/net/internal/timeseries -# golang.org/x/oauth2 v0.0.0-20190115181402-5dab4167f31c +# golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421 golang.org/x/oauth2 golang.org/x/oauth2/google golang.org/x/oauth2/internal golang.org/x/oauth2/jws golang.org/x/oauth2/jwt -# golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4 +# golang.org/x/sync v0.0.0-20190423024810-112230192c58 golang.org/x/sync/semaphore -# golang.org/x/sys v0.0.0-20190616124812-15dcb6c0061f +# golang.org/x/sys v0.0.0-20190429190828-d89cdac9e872 golang.org/x/sys/unix golang.org/x/sys/windows # golang.org/x/text v0.3.2 @@ -231,21 +232,19 @@ golang.org/x/text/runes golang.org/x/text/internal/language golang.org/x/text/internal/language/compact golang.org/x/text/internal/tag -# golang.org/x/time v0.0.0-20181108054448-85acf8d2951c +# golang.org/x/time v0.0.0-20180412165947-fbb02b2291d2 golang.org/x/time/rate -# golang.org/x/tools v0.0.0-20190124215303-cc6a436ffe6b -golang.org/x/tools/imports -golang.org/x/tools/go/ast/astutil +# golang.org/x/tools v0.0.0-20190501045030-23463209683d golang.org/x/tools/go/packages -golang.org/x/tools/internal/gopathwalk -golang.org/x/tools/internal/module golang.org/x/tools/go/gcexportdata -golang.org/x/tools/go/internal/cgo golang.org/x/tools/go/internal/packagesdriver +golang.org/x/tools/internal/gopathwalk golang.org/x/tools/internal/semver -golang.org/x/tools/internal/fastwalk golang.org/x/tools/go/internal/gcimporter -# google.golang.org/api v0.1.0 +golang.org/x/tools/internal/fastwalk +# gomodules.xyz/jsonpatch/v2 v2.0.0 +gomodules.xyz/jsonpatch/v2 +# google.golang.org/api v0.3.1 google.golang.org/api/support/bundler # google.golang.org/appengine v1.4.0 google.golang.org/appengine @@ -258,18 +257,23 @@ google.golang.org/appengine/internal/base google.golang.org/appengine/internal/datastore google.golang.org/appengine/internal/log google.golang.org/appengine/internal/remote_api -# google.golang.org/genproto v0.0.0-20190219182410-082222b4a5c5 +# google.golang.org/genproto v0.0.0-20190307195333-5fe7a883aa19 +google.golang.org/genproto/googleapis/api/httpbody +google.golang.org/genproto/protobuf/field_mask google.golang.org/genproto/googleapis/rpc/status -# google.golang.org/grpc v1.18.0 +# google.golang.org/grpc v1.19.1 google.golang.org/grpc +google.golang.org/grpc/credentials +google.golang.org/grpc/metadata +google.golang.org/grpc/codes +google.golang.org/grpc/grpclog +google.golang.org/grpc/status +google.golang.org/grpc/stats google.golang.org/grpc/balancer google.golang.org/grpc/balancer/roundrobin -google.golang.org/grpc/codes google.golang.org/grpc/connectivity -google.golang.org/grpc/credentials google.golang.org/grpc/encoding google.golang.org/grpc/encoding/proto -google.golang.org/grpc/grpclog google.golang.org/grpc/internal google.golang.org/grpc/internal/backoff google.golang.org/grpc/internal/binarylog @@ -279,17 +283,14 @@ google.golang.org/grpc/internal/grpcrand google.golang.org/grpc/internal/grpcsync google.golang.org/grpc/internal/transport google.golang.org/grpc/keepalive -google.golang.org/grpc/metadata google.golang.org/grpc/naming google.golang.org/grpc/peer google.golang.org/grpc/resolver google.golang.org/grpc/resolver/dns google.golang.org/grpc/resolver/passthrough -google.golang.org/grpc/stats -google.golang.org/grpc/status google.golang.org/grpc/tap -google.golang.org/grpc/balancer/base google.golang.org/grpc/credentials/internal +google.golang.org/grpc/balancer/base google.golang.org/grpc/binarylog/grpc_binarylog_v1 google.golang.org/grpc/internal/syscall # gopkg.in/fsnotify.v1 v1.4.7 @@ -300,17 +301,15 @@ gopkg.in/inf.v0 gopkg.in/tomb.v1 # gopkg.in/yaml.v2 v2.2.2 gopkg.in/yaml.v2 -# k8s.io/api v0.0.0-20190222213804-5cb15d344471 -k8s.io/api/autoscaling/v1 +# k8s.io/api v0.0.0-20190409021203-6e4e0e4f393b k8s.io/api/core/v1 +k8s.io/api/autoscaling/v1 k8s.io/api/apps/v1 -k8s.io/api/authentication/v1 -k8s.io/api/policy/v1beta1 -k8s.io/api/admissionregistration/v1alpha1 k8s.io/api/admissionregistration/v1beta1 k8s.io/api/apps/v1beta1 k8s.io/api/apps/v1beta2 k8s.io/api/auditregistration/v1alpha1 +k8s.io/api/authentication/v1 k8s.io/api/authentication/v1beta1 k8s.io/api/authorization/v1 k8s.io/api/authorization/v1beta1 @@ -320,13 +319,19 @@ k8s.io/api/batch/v1 k8s.io/api/batch/v1beta1 k8s.io/api/batch/v2alpha1 k8s.io/api/certificates/v1beta1 +k8s.io/api/coordination/v1 k8s.io/api/coordination/v1beta1 k8s.io/api/events/v1beta1 k8s.io/api/extensions/v1beta1 k8s.io/api/networking/v1 +k8s.io/api/networking/v1beta1 +k8s.io/api/node/v1alpha1 +k8s.io/api/node/v1beta1 +k8s.io/api/policy/v1beta1 k8s.io/api/rbac/v1 k8s.io/api/rbac/v1alpha1 k8s.io/api/rbac/v1beta1 +k8s.io/api/scheduling/v1 k8s.io/api/scheduling/v1alpha1 k8s.io/api/scheduling/v1beta1 k8s.io/api/settings/v1alpha1 @@ -334,62 +339,63 @@ k8s.io/api/storage/v1 k8s.io/api/storage/v1alpha1 k8s.io/api/storage/v1beta1 k8s.io/api/admission/v1beta1 -# k8s.io/apiextensions-apiserver v0.0.0-20190228180357-d002e88f6236 +# k8s.io/apiextensions-apiserver v0.0.0-20190409022649-727a075fdec8 k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1 -k8s.io/apiextensions-apiserver/pkg/client/clientset/clientset k8s.io/apiextensions-apiserver/pkg/apis/apiextensions +k8s.io/apiextensions-apiserver/pkg/client/clientset/clientset k8s.io/apiextensions-apiserver/pkg/client/clientset/clientset/typed/apiextensions/v1beta1 k8s.io/apiextensions-apiserver/pkg/client/clientset/clientset/scheme -# k8s.io/apimachinery v0.0.0-20190703205208-4cfb76a8bf76 => k8s.io/apimachinery v0.0.0-20190221213512-86fb29eff628 -k8s.io/apimachinery/pkg/api/errors -k8s.io/apimachinery/pkg/apis/meta/v1 -k8s.io/apimachinery/pkg/util/wait -k8s.io/apimachinery/pkg/util/yaml -k8s.io/apimachinery/pkg/types +# k8s.io/apimachinery v0.0.0-20190404173353-6a84e37a896d k8s.io/apimachinery/pkg/runtime +k8s.io/apimachinery/pkg/apis/meta/v1 k8s.io/apimachinery/pkg/apis/meta/v1/validation k8s.io/apimachinery/pkg/labels k8s.io/apimachinery/pkg/runtime/schema k8s.io/apimachinery/pkg/util/intstr k8s.io/apimachinery/pkg/util/validation/field -k8s.io/apimachinery/pkg/runtime/serializer -k8s.io/apimachinery/pkg/util/runtime -k8s.io/apimachinery/pkg/watch +k8s.io/apimachinery/pkg/api/errors +k8s.io/apimachinery/pkg/util/wait +k8s.io/apimachinery/pkg/util/yaml +k8s.io/apimachinery/pkg/types +k8s.io/apimachinery/pkg/api/meta +k8s.io/apimachinery/pkg/apis/meta/v1/unstructured +k8s.io/apimachinery/pkg/util/errors k8s.io/apimachinery/pkg/api/equality k8s.io/apimachinery/pkg/util/rand k8s.io/apimachinery/pkg/util/sets -k8s.io/apimachinery/pkg/api/meta -k8s.io/apimachinery/pkg/apis/meta/v1/unstructured -k8s.io/apimachinery/pkg/util/json k8s.io/apimachinery/pkg/runtime/serializer/streaming -k8s.io/apimachinery/pkg/util/net -k8s.io/apimachinery/pkg/util/errors -k8s.io/apimachinery/pkg/util/validation -k8s.io/apimachinery/pkg/api/resource k8s.io/apimachinery/pkg/conversion -k8s.io/apimachinery/pkg/fields -k8s.io/apimachinery/pkg/selection k8s.io/apimachinery/pkg/conversion/queryparams +k8s.io/apimachinery/pkg/util/json k8s.io/apimachinery/pkg/util/naming -k8s.io/apimachinery/pkg/version +k8s.io/apimachinery/pkg/util/runtime +k8s.io/apimachinery/pkg/runtime/serializer +k8s.io/apimachinery/pkg/api/resource +k8s.io/apimachinery/pkg/fields +k8s.io/apimachinery/pkg/selection +k8s.io/apimachinery/pkg/watch +k8s.io/apimachinery/pkg/util/validation +k8s.io/apimachinery/pkg/util/net +k8s.io/apimachinery/pkg/apis/meta/v1beta1 k8s.io/apimachinery/pkg/util/clock +k8s.io/apimachinery/pkg/util/strategicpatch +k8s.io/apimachinery/third_party/forked/golang/reflect k8s.io/apimachinery/pkg/runtime/serializer/json k8s.io/apimachinery/pkg/runtime/serializer/protobuf k8s.io/apimachinery/pkg/runtime/serializer/recognizer k8s.io/apimachinery/pkg/runtime/serializer/versioning -k8s.io/apimachinery/pkg/util/strategicpatch -k8s.io/apimachinery/pkg/util/cache -k8s.io/apimachinery/pkg/util/diff -k8s.io/apimachinery/pkg/apis/meta/v1beta1 -k8s.io/apimachinery/third_party/forked/golang/reflect +k8s.io/apimachinery/pkg/version k8s.io/apimachinery/pkg/util/uuid -k8s.io/apimachinery/pkg/util/framer k8s.io/apimachinery/pkg/util/mergepatch k8s.io/apimachinery/third_party/forked/golang/json +k8s.io/apimachinery/pkg/util/framer +k8s.io/apimachinery/pkg/util/cache +k8s.io/apimachinery/pkg/util/diff k8s.io/apimachinery/pkg/apis/meta/internalversion -# k8s.io/apiserver v0.0.0-20190122042701-b6aa1175dafa +# k8s.io/apiserver v0.0.0-20190409021813-1ec86e4da56c k8s.io/apiserver/pkg/storage/names -# k8s.io/client-go v10.0.0+incompatible +# k8s.io/client-go v11.0.1-0.20190409021438-1a26190bd76a+incompatible +k8s.io/client-go/kubernetes/scheme k8s.io/client-go/kubernetes k8s.io/client-go/rest k8s.io/client-go/tools/clientcmd @@ -397,16 +403,10 @@ k8s.io/client-go/tools/clientcmd/api k8s.io/client-go/plugin/pkg/client/auth k8s.io/client-go/kubernetes/typed/core/v1 k8s.io/client-go/plugin/pkg/client/auth/gcp -k8s.io/client-go/discovery -k8s.io/client-go/util/flowcontrol -k8s.io/client-go/discovery/fake -k8s.io/client-go/testing -k8s.io/client-go/tools/cache -k8s.io/client-go/tools/leaderelection/resourcelock k8s.io/client-go/tools/record -k8s.io/client-go/util/integer +k8s.io/client-go/tools/leaderelection/resourcelock k8s.io/client-go/util/retry -k8s.io/client-go/kubernetes/typed/admissionregistration/v1alpha1 +k8s.io/client-go/discovery k8s.io/client-go/kubernetes/typed/admissionregistration/v1beta1 k8s.io/client-go/kubernetes/typed/apps/v1 k8s.io/client-go/kubernetes/typed/apps/v1beta1 @@ -423,20 +423,26 @@ k8s.io/client-go/kubernetes/typed/batch/v1 k8s.io/client-go/kubernetes/typed/batch/v1beta1 k8s.io/client-go/kubernetes/typed/batch/v2alpha1 k8s.io/client-go/kubernetes/typed/certificates/v1beta1 +k8s.io/client-go/kubernetes/typed/coordination/v1 k8s.io/client-go/kubernetes/typed/coordination/v1beta1 k8s.io/client-go/kubernetes/typed/events/v1beta1 k8s.io/client-go/kubernetes/typed/extensions/v1beta1 k8s.io/client-go/kubernetes/typed/networking/v1 +k8s.io/client-go/kubernetes/typed/networking/v1beta1 +k8s.io/client-go/kubernetes/typed/node/v1alpha1 +k8s.io/client-go/kubernetes/typed/node/v1beta1 k8s.io/client-go/kubernetes/typed/policy/v1beta1 k8s.io/client-go/kubernetes/typed/rbac/v1 k8s.io/client-go/kubernetes/typed/rbac/v1alpha1 k8s.io/client-go/kubernetes/typed/rbac/v1beta1 +k8s.io/client-go/kubernetes/typed/scheduling/v1 k8s.io/client-go/kubernetes/typed/scheduling/v1alpha1 k8s.io/client-go/kubernetes/typed/scheduling/v1beta1 k8s.io/client-go/kubernetes/typed/settings/v1alpha1 k8s.io/client-go/kubernetes/typed/storage/v1 k8s.io/client-go/kubernetes/typed/storage/v1alpha1 k8s.io/client-go/kubernetes/typed/storage/v1beta1 +k8s.io/client-go/util/flowcontrol k8s.io/client-go/pkg/version k8s.io/client-go/plugin/pkg/client/auth/exec k8s.io/client-go/rest/watch @@ -446,27 +452,26 @@ k8s.io/client-go/util/cert k8s.io/client-go/tools/auth k8s.io/client-go/tools/clientcmd/api/latest k8s.io/client-go/util/homedir +k8s.io/client-go/dynamic k8s.io/client-go/plugin/pkg/client/auth/azure k8s.io/client-go/plugin/pkg/client/auth/oidc k8s.io/client-go/plugin/pkg/client/auth/openstack -k8s.io/client-go/dynamic -k8s.io/client-go/kubernetes/scheme k8s.io/client-go/tools/leaderelection k8s.io/client-go/tools/reference k8s.io/client-go/util/jsonpath -k8s.io/client-go/tools/pager -k8s.io/client-go/util/buffer +k8s.io/client-go/tools/record/util k8s.io/client-go/util/workqueue k8s.io/client-go/kubernetes/fake -k8s.io/client-go/informers k8s.io/client-go/pkg/apis/clientauthentication k8s.io/client-go/pkg/apis/clientauthentication/v1alpha1 k8s.io/client-go/pkg/apis/clientauthentication/v1beta1 k8s.io/client-go/util/connrotation +k8s.io/client-go/util/keyutil k8s.io/client-go/tools/clientcmd/api/v1 k8s.io/client-go/restmapper +k8s.io/client-go/tools/cache k8s.io/client-go/third_party/forked/golang/template -k8s.io/client-go/kubernetes/typed/admissionregistration/v1alpha1/fake +k8s.io/client-go/discovery/fake k8s.io/client-go/kubernetes/typed/admissionregistration/v1beta1/fake k8s.io/client-go/kubernetes/typed/apps/v1/fake k8s.io/client-go/kubernetes/typed/apps/v1beta1/fake @@ -483,182 +488,94 @@ k8s.io/client-go/kubernetes/typed/batch/v1/fake k8s.io/client-go/kubernetes/typed/batch/v1beta1/fake k8s.io/client-go/kubernetes/typed/batch/v2alpha1/fake k8s.io/client-go/kubernetes/typed/certificates/v1beta1/fake +k8s.io/client-go/kubernetes/typed/coordination/v1/fake k8s.io/client-go/kubernetes/typed/coordination/v1beta1/fake k8s.io/client-go/kubernetes/typed/core/v1/fake k8s.io/client-go/kubernetes/typed/events/v1beta1/fake k8s.io/client-go/kubernetes/typed/extensions/v1beta1/fake k8s.io/client-go/kubernetes/typed/networking/v1/fake +k8s.io/client-go/kubernetes/typed/networking/v1beta1/fake +k8s.io/client-go/kubernetes/typed/node/v1alpha1/fake +k8s.io/client-go/kubernetes/typed/node/v1beta1/fake k8s.io/client-go/kubernetes/typed/policy/v1beta1/fake k8s.io/client-go/kubernetes/typed/rbac/v1/fake k8s.io/client-go/kubernetes/typed/rbac/v1alpha1/fake k8s.io/client-go/kubernetes/typed/rbac/v1beta1/fake +k8s.io/client-go/kubernetes/typed/scheduling/v1/fake k8s.io/client-go/kubernetes/typed/scheduling/v1alpha1/fake k8s.io/client-go/kubernetes/typed/scheduling/v1beta1/fake k8s.io/client-go/kubernetes/typed/settings/v1alpha1/fake k8s.io/client-go/kubernetes/typed/storage/v1/fake k8s.io/client-go/kubernetes/typed/storage/v1alpha1/fake k8s.io/client-go/kubernetes/typed/storage/v1beta1/fake -k8s.io/client-go/informers/admissionregistration -k8s.io/client-go/informers/apps -k8s.io/client-go/informers/auditregistration -k8s.io/client-go/informers/autoscaling -k8s.io/client-go/informers/batch -k8s.io/client-go/informers/certificates -k8s.io/client-go/informers/coordination -k8s.io/client-go/informers/core -k8s.io/client-go/informers/events -k8s.io/client-go/informers/extensions -k8s.io/client-go/informers/internalinterfaces -k8s.io/client-go/informers/networking -k8s.io/client-go/informers/policy -k8s.io/client-go/informers/rbac -k8s.io/client-go/informers/scheduling -k8s.io/client-go/informers/settings -k8s.io/client-go/informers/storage -k8s.io/client-go/informers/admissionregistration/v1alpha1 -k8s.io/client-go/informers/admissionregistration/v1beta1 -k8s.io/client-go/informers/apps/v1 -k8s.io/client-go/informers/apps/v1beta1 -k8s.io/client-go/informers/apps/v1beta2 -k8s.io/client-go/informers/auditregistration/v1alpha1 -k8s.io/client-go/informers/autoscaling/v1 -k8s.io/client-go/informers/autoscaling/v2beta1 -k8s.io/client-go/informers/autoscaling/v2beta2 -k8s.io/client-go/informers/batch/v1 -k8s.io/client-go/informers/batch/v1beta1 -k8s.io/client-go/informers/batch/v2alpha1 -k8s.io/client-go/informers/certificates/v1beta1 -k8s.io/client-go/informers/coordination/v1beta1 -k8s.io/client-go/informers/core/v1 -k8s.io/client-go/informers/events/v1beta1 -k8s.io/client-go/informers/extensions/v1beta1 -k8s.io/client-go/informers/networking/v1 -k8s.io/client-go/informers/policy/v1beta1 -k8s.io/client-go/informers/rbac/v1 -k8s.io/client-go/informers/rbac/v1alpha1 -k8s.io/client-go/informers/rbac/v1beta1 -k8s.io/client-go/informers/scheduling/v1alpha1 -k8s.io/client-go/informers/scheduling/v1beta1 -k8s.io/client-go/informers/settings/v1alpha1 -k8s.io/client-go/informers/storage/v1 -k8s.io/client-go/informers/storage/v1alpha1 -k8s.io/client-go/informers/storage/v1beta1 -k8s.io/client-go/listers/admissionregistration/v1alpha1 -k8s.io/client-go/listers/admissionregistration/v1beta1 -k8s.io/client-go/listers/apps/v1 -k8s.io/client-go/listers/apps/v1beta1 -k8s.io/client-go/listers/apps/v1beta2 -k8s.io/client-go/listers/auditregistration/v1alpha1 -k8s.io/client-go/listers/autoscaling/v1 -k8s.io/client-go/listers/autoscaling/v2beta1 -k8s.io/client-go/listers/autoscaling/v2beta2 -k8s.io/client-go/listers/batch/v1 -k8s.io/client-go/listers/batch/v1beta1 -k8s.io/client-go/listers/batch/v2alpha1 -k8s.io/client-go/listers/certificates/v1beta1 -k8s.io/client-go/listers/coordination/v1beta1 -k8s.io/client-go/listers/core/v1 -k8s.io/client-go/listers/events/v1beta1 -k8s.io/client-go/listers/extensions/v1beta1 -k8s.io/client-go/listers/networking/v1 -k8s.io/client-go/listers/policy/v1beta1 -k8s.io/client-go/listers/rbac/v1 -k8s.io/client-go/listers/rbac/v1alpha1 -k8s.io/client-go/listers/rbac/v1beta1 -k8s.io/client-go/listers/scheduling/v1alpha1 -k8s.io/client-go/listers/scheduling/v1beta1 -k8s.io/client-go/listers/settings/v1alpha1 -k8s.io/client-go/listers/storage/v1 -k8s.io/client-go/listers/storage/v1alpha1 -k8s.io/client-go/listers/storage/v1beta1 -# k8s.io/code-generator v0.0.0-20181117043124-c2090bec4d9b -k8s.io/code-generator/cmd/client-gen -k8s.io/code-generator/cmd/conversion-gen -k8s.io/code-generator/cmd/deepcopy-gen -k8s.io/code-generator/cmd/defaulter-gen -k8s.io/code-generator/cmd/informer-gen -k8s.io/code-generator/cmd/lister-gen -k8s.io/code-generator/cmd/register-gen -k8s.io/code-generator/cmd/set-gen -k8s.io/code-generator/cmd/client-gen/args -k8s.io/code-generator/cmd/client-gen/generators -k8s.io/code-generator/pkg/util -k8s.io/code-generator/cmd/conversion-gen/args -k8s.io/code-generator/cmd/conversion-gen/generators -k8s.io/code-generator/cmd/deepcopy-gen/args -k8s.io/code-generator/cmd/defaulter-gen/args -k8s.io/code-generator/cmd/informer-gen/args -k8s.io/code-generator/cmd/informer-gen/generators -k8s.io/code-generator/cmd/lister-gen/args -k8s.io/code-generator/cmd/lister-gen/generators -k8s.io/code-generator/cmd/register-gen/args -k8s.io/code-generator/cmd/register-gen/generators -k8s.io/code-generator/cmd/client-gen/types -k8s.io/code-generator/cmd/client-gen/generators/fake -k8s.io/code-generator/cmd/client-gen/generators/scheme -k8s.io/code-generator/cmd/client-gen/generators/util -k8s.io/code-generator/cmd/client-gen/path -# k8s.io/component-base v0.0.0-20190703210340-65d72cfeb85d +k8s.io/client-go/testing +k8s.io/client-go/tools/pager +# k8s.io/component-base v0.0.0-20190409021516-bd2732e5c3f7 k8s.io/component-base/cli/flag -# k8s.io/gengo v0.0.0-20190116091435-f8a0810f38af -k8s.io/gengo/args -k8s.io/gengo/examples/deepcopy-gen/generators -k8s.io/gengo/examples/defaulter-gen/generators -k8s.io/gengo/examples/set-gen/generators -k8s.io/gengo/generator -k8s.io/gengo/namer -k8s.io/gengo/types -k8s.io/gengo/parser -k8s.io/gengo/examples/set-gen/sets -# k8s.io/klog v0.3.2 -k8s.io/klog +# k8s.io/klog v0.3.1 k8s.io/klog/klogr -# k8s.io/kube-openapi v0.0.0-20190228160746-b3a7cee44a30 +k8s.io/klog +# k8s.io/kube-openapi v0.0.0-20180731170545-e3762e86a74c k8s.io/kube-openapi/pkg/util/proto -# sigs.k8s.io/controller-runtime v0.1.12 +# k8s.io/utils v0.0.0-20190506122338-8fab8cb257d5 +k8s.io/utils/pointer +k8s.io/utils/integer +k8s.io/utils/buffer +k8s.io/utils/trace +# sigs.k8s.io/controller-runtime v0.2.0-beta.5 +sigs.k8s.io/controller-runtime +sigs.k8s.io/controller-runtime/pkg/scheme sigs.k8s.io/controller-runtime/pkg/client sigs.k8s.io/controller-runtime/pkg/client/config sigs.k8s.io/controller-runtime/pkg/manager sigs.k8s.io/controller-runtime/pkg/runtime/signals sigs.k8s.io/controller-runtime/pkg/runtime/log -sigs.k8s.io/controller-runtime/pkg/runtime/scheme sigs.k8s.io/controller-runtime/pkg/controller sigs.k8s.io/controller-runtime/pkg/handler sigs.k8s.io/controller-runtime/pkg/reconcile sigs.k8s.io/controller-runtime/pkg/source +sigs.k8s.io/controller-runtime/pkg/builder +sigs.k8s.io/controller-runtime/pkg/controller/controllerutil +sigs.k8s.io/controller-runtime/pkg/log +sigs.k8s.io/controller-runtime/pkg/manager/signals sigs.k8s.io/controller-runtime/pkg/client/apiutil +sigs.k8s.io/controller-runtime/pkg/internal/log sigs.k8s.io/controller-runtime/pkg/cache sigs.k8s.io/controller-runtime/pkg/internal/recorder sigs.k8s.io/controller-runtime/pkg/leaderelection sigs.k8s.io/controller-runtime/pkg/metrics sigs.k8s.io/controller-runtime/pkg/recorder sigs.k8s.io/controller-runtime/pkg/runtime/inject -sigs.k8s.io/controller-runtime/pkg/webhook/admission -sigs.k8s.io/controller-runtime/pkg/webhook/admission/types +sigs.k8s.io/controller-runtime/pkg/webhook sigs.k8s.io/controller-runtime/pkg/envtest +sigs.k8s.io/controller-runtime/pkg/log/zap sigs.k8s.io/controller-runtime/pkg/internal/controller sigs.k8s.io/controller-runtime/pkg/predicate sigs.k8s.io/controller-runtime/pkg/event sigs.k8s.io/controller-runtime/pkg/source/internal sigs.k8s.io/controller-runtime/pkg/client/fake +sigs.k8s.io/controller-runtime/pkg/webhook/admission +sigs.k8s.io/controller-runtime/pkg/webhook/conversion sigs.k8s.io/controller-runtime/pkg/cache/internal -sigs.k8s.io/controller-runtime/pkg/patch +sigs.k8s.io/controller-runtime/pkg/webhook/internal/certwatcher sigs.k8s.io/controller-runtime/pkg/webhook/internal/metrics -sigs.k8s.io/controller-runtime/pkg/webhook/types sigs.k8s.io/controller-runtime/pkg/envtest/printer sigs.k8s.io/controller-runtime/pkg/internal/controller/metrics sigs.k8s.io/controller-runtime/pkg/internal/objectutil -# sigs.k8s.io/controller-tools v0.1.11 +sigs.k8s.io/controller-runtime/pkg/conversion +# sigs.k8s.io/controller-tools v0.2.0-beta.5 sigs.k8s.io/controller-tools/cmd/controller-gen -sigs.k8s.io/controller-tools/pkg/crd/generator +sigs.k8s.io/controller-tools/pkg/crd +sigs.k8s.io/controller-tools/pkg/deepcopy +sigs.k8s.io/controller-tools/pkg/genall +sigs.k8s.io/controller-tools/pkg/genall/help +sigs.k8s.io/controller-tools/pkg/genall/help/pretty +sigs.k8s.io/controller-tools/pkg/markers sigs.k8s.io/controller-tools/pkg/rbac sigs.k8s.io/controller-tools/pkg/webhook -sigs.k8s.io/controller-tools/pkg/crd/util -sigs.k8s.io/controller-tools/pkg/internal/codegen -sigs.k8s.io/controller-tools/pkg/internal/codegen/parse -sigs.k8s.io/controller-tools/pkg/util -sigs.k8s.io/controller-tools/pkg/internal/general -# sigs.k8s.io/testing_frameworks v0.1.1 +sigs.k8s.io/controller-tools/pkg/crd/markers +sigs.k8s.io/controller-tools/pkg/loader +# sigs.k8s.io/testing_frameworks v0.1.2-0.20190130140139-57f07443c2d4 sigs.k8s.io/testing_frameworks/integration sigs.k8s.io/testing_frameworks/integration/addr sigs.k8s.io/testing_frameworks/integration/internal diff --git a/vendor/sigs.k8s.io/controller-runtime/.gitignore b/vendor/sigs.k8s.io/controller-runtime/.gitignore new file mode 100644 index 0000000000..f749874ce0 --- /dev/null +++ b/vendor/sigs.k8s.io/controller-runtime/.gitignore @@ -0,0 +1,21 @@ +# Binaries for programs and plugins +*.exe +*.exe~ +*.dll +*.so +*.dylib + +# Test binary, build with `go test -c` +*.test + +# Output of the go coverage tool, specifically when used with LiteIDE +*.out + +# editor and IDE paraphernalia +.idea +*.swp +*.swo +*~ + +# Vscode files +.vscode diff --git a/vendor/sigs.k8s.io/controller-runtime/.golangci.yml b/vendor/sigs.k8s.io/controller-runtime/.golangci.yml new file mode 100644 index 0000000000..a418256a4a --- /dev/null +++ b/vendor/sigs.k8s.io/controller-runtime/.golangci.yml @@ -0,0 +1,5 @@ +linters-settings: + lll: + line-length: 170 + dupl: + threshold: 400 diff --git a/vendor/sigs.k8s.io/controller-runtime/.travis.yml b/vendor/sigs.k8s.io/controller-runtime/.travis.yml new file mode 100644 index 0000000000..1562e222fa --- /dev/null +++ b/vendor/sigs.k8s.io/controller-runtime/.travis.yml @@ -0,0 +1,31 @@ +language: go + +os: + - linux + - osx + +cache: + directories: + - $HOME/.cache/go-build + - $GOPATH/pkg/mod + +go: +- "1.12" + +git: + depth: 3 + +go_import_path: sigs.k8s.io/controller-runtime + +install: +- go get -u github.com/golang/dep/cmd/dep +#- go get -u golang.org/x/lint/golint +- curl -sfL https://install.goreleaser.com/github.com/golangci/golangci-lint.sh | sh -s -- -b $(go env GOPATH)/bin v1.15.0 + +script: +- GO111MODULE=on TRACE=1 ./hack/check-everything.sh + + +# TBD. Suppressing for now. +notifications: + email: false diff --git a/vendor/sigs.k8s.io/controller-runtime/CONTRIBUTING.md b/vendor/sigs.k8s.io/controller-runtime/CONTRIBUTING.md new file mode 100644 index 0000000000..063016c2b8 --- /dev/null +++ b/vendor/sigs.k8s.io/controller-runtime/CONTRIBUTING.md @@ -0,0 +1,28 @@ +# Contributing guidelines + +## Sign the CLA + +Kubernetes projects require that you sign a Contributor License Agreement (CLA) before we can accept your pull requests. + +Please see https://git.k8s.io/community/CLA.md for more info + +## Contributing steps + +1. Submit an issue describing your proposed change to the repo in question. +1. The [repo owners](OWNERS) will respond to your issue promptly. +1. If your proposed change is accepted, and you haven't already done so, sign a Contributor License Agreement (see details above). +1. Fork the desired repo, develop and test your code changes. +1. Submit a pull request. + +## Test locally + +1. Setup tools + ```bash + $ go get -u github.com/golang/dep/cmd/dep + $ curl -sfL https://install.goreleaser.com/github.com/golangci/golangci-lint.sh | sh -s -- -b $(go env GOPATH)/bin v1.15.0 + ``` +1. Test + ```bash + GO111MODULE=on TRACE=1 ./hack/check-everything.sh + ``` + diff --git a/vendor/sigs.k8s.io/controller-runtime/FAQ.md b/vendor/sigs.k8s.io/controller-runtime/FAQ.md new file mode 100644 index 0000000000..cfc2997924 --- /dev/null +++ b/vendor/sigs.k8s.io/controller-runtime/FAQ.md @@ -0,0 +1,81 @@ +# FAQ + +### Q: How do I know which type of object a controller references? + +**A**: Each controller should only reconcile one object type. Other +affected objects should be mapped to a single type of root object, using +the `EnqueueRequestForOwner` or `EnqueueRequestsFromMapFunc` event +handlers, and potentially indices. Then, your Reconcile method should +attempt to reconcile *all* state for that given root objects. + +### Q: How do I have different logic in my reconciler for different types of events (e.g. create, update, delete)? + +**A**: You should not. Reconcile functions should be idempotent, and +should always reconcile state by reading all the state it needs, then +writing updates. This allows your reconciler to correctly respond to +generic events, adjust to skipped or coalesced events, and easily deal +with application startup. The controller will enqueue reconcile requests +for both old and new objects if a mapping changes, but it's your +responsibility to make sure you have enough information to be able clean +up state that's no longer referenced. + +### Q: My cache might be stale if I read from a cache! How should I deal with that? + +**A**: There are several different approaches that can be taken, depending +on your situation. + +- When you can, take advantage of optimistic locking: use deterministic + names for objects you create, so that the Kubernetes API server will + warn you if the object already exists. Many controllers in Kubernetes + take this approach: the StatefulSet controller appends a specific number + to each pod that it creates, while the Deployment controller hashes the + pod template spec and appends that. + +- In the few cases when you cannot take advantage of deterministic names + (e.g. when using generateName), it may be useful in to track which + actions you took, and assume that they need to be repeated if they don't + occur after a given time (e.g. using a requeue result). This is what + the ReplicaSet controller does. + +In general, write your controller with the assumption that information +will eventually be correct, but may be slightly out of date. Make sure +that your reconcile function enforces the entire state of the world each +time it runs. If none of this works for you, you can always construct +a client that reads directly from the API server, but this is generally +considered to be a last resort, and the two approaches above should +generally cover most circumstances. + +### Q: Where's the fake client? How do I use it? + +**A**: The fake client +[exists](https://godoc.org/sigs.k8s.io/controller-runtime/pkg/client/fake), +but we generally recommend using +[envtest.Environment](https://godoc.org/sigs.k8s.io/controller-runtime/pkg/envtest#Environment) +to test against a real API server. In our experience, tests using fake +clients gradually re-implement poorly-written impressions of a real API +server, which leads to hard-to-maintain, complex test code. + +### Q: How should I write tests? Any suggestions for getting started? + +- Use the aforementioned + [envtest.Environment](https://godoc.org/sigs.k8s.io/controller-runtime/pkg/envtest#Environment) + to spin up a real API server instead of trying to mock one out. + +- Structure your tests to check that the state of the world is as you + expect it, *not* that a particular set of API calls were made, when + working with Kubernetes APIs. This will allow you to more easily + refactor and improve the internals of your controllers without changing + your tests. + +- Remember that any time you're interacting with the API server, changes + may have some delay between write time and reconcile time. + +### Q: What are these errors about no Kind being registered for a type? + +**A**: You're probably missing a fully-set-up Scheme. Schemes record the +mapping between Go types and group-version-kinds in Kubernetes. In +general, your application should have its own Scheme containing the types +from the API groups that it needs (be they Kubernetes types or your own). +See the [scheme builder +docs](https://godoc.org/sigs.k8s.io/controller-runtime/pkg/scheme) for +more information. diff --git a/vendor/sigs.k8s.io/cluster-api/Gopkg.lock b/vendor/sigs.k8s.io/controller-runtime/Gopkg.lock similarity index 52% rename from vendor/sigs.k8s.io/cluster-api/Gopkg.lock rename to vendor/sigs.k8s.io/controller-runtime/Gopkg.lock index a5fda4985c..0996047409 100644 --- a/vendor/sigs.k8s.io/cluster-api/Gopkg.lock +++ b/vendor/sigs.k8s.io/controller-runtime/Gopkg.lock @@ -2,23 +2,15 @@ [[projects]] - digest = "1:4d6f036ea3fe636bcb2e89850bcdc62a771354e157cd51b8b22a2de8562bf663" + digest = "1:5c3894b2aa4d6bead0ceeea6831b305d62879c871780e7b76296ded1b004bc57" name = "cloud.google.com/go" packages = ["compute/metadata"] pruneopts = "UT" - revision = "28a4bc8c44b3acbcc482cff0cdf7de29a4688b61" - version = "v0.35.1" + revision = "64a2037ec6be8a4b0c1d1f706ed35b428b989239" + version = "v0.26.0" [[projects]] - digest = "1:b92928b73320648b38c93cacb9082c0fe3f8ac3383ad9bd537eef62c380e0e7a" - name = "contrib.go.opencensus.io/exporter/ocagent" - packages = ["."] - pruneopts = "UT" - revision = "00af367e65149ff1f2f4b93bbfbb84fd9297170d" - version = "v0.2.0" - -[[projects]] - digest = "1:32e3c3a7444d3b98acd755e9ed768e28746b8ea9edb1fc3add44e6ab2a72e321" + digest = "1:327b9226c8ea5f1cd9952ba859bb7c335cab40fd8781c4a790ef259b0c5fbc40" name = "github.com/Azure/go-autorest" packages = [ "autorest", @@ -26,19 +18,27 @@ "autorest/azure", "autorest/date", "logger", - "tracing", + "version", ] pruneopts = "UT" - revision = "f4369d4c79bf40df5e86def1e0f54c7ef261a51c" - version = "v11.5.0" + revision = "39013ecb48eaf6ced3f4e3e1d95515140ce6b3cf" + version = "v10.15.2" [[projects]] - digest = "1:464aef731a5f82ded547c62e249a2e9ec59fbbc9ddab53cda7b9857852630a61" - name = "github.com/appscode/jsonpatch" + digest = "1:d1665c44bd5db19aaee18d1b6233c99b0b9a986e8bccb24ef54747547a48027f" + name = "github.com/PuerkitoBio/purell" + packages = ["."] + pruneopts = "UT" + revision = "0bcb03f4b4d0a9428594752bd2a3b9aa0a9d4bd4" + version = "v1.1.0" + +[[projects]] + branch = "master" + digest = "1:c739832d67eb1e9cc478a19cc1a1ccd78df0397bf8a32978b759152e205f644b" + name = "github.com/PuerkitoBio/urlesc" packages = ["."] pruneopts = "UT" - revision = "7c0e3b262f30165a8ec3d0b4c6059fd92703bfb2" - version = "1.0.0" + revision = "de5bf2ad457846296e2031421a34e2568e304e35" [[projects]] branch = "master" @@ -48,19 +48,6 @@ pruneopts = "UT" revision = "3a771d992973f24aa725d07868b467d1ddfceafb" -[[projects]] - digest = "1:65b0d980b428a6ad4425f2df4cd5410edd81f044cf527bd1c345368444649e58" - name = "github.com/census-instrumentation/opencensus-proto" - packages = [ - "gen-go/agent/common/v1", - "gen-go/agent/trace/v1", - "gen-go/resource/v1", - "gen-go/trace/v1", - ] - pruneopts = "UT" - revision = "7f2434bc10da710debe5c4315ed6d4df454b4024" - version = "v0.1.0" - [[projects]] digest = "1:ffe9824d294da03b391f44e1ae8281281b4afc1bdaa9588c9097785e3af10cec" name = "github.com/davecgh/go-spew" @@ -78,15 +65,15 @@ version = "v3.2.0" [[projects]] - digest = "1:9f6d41e901cde897bad4569877c96c9cacee67c9c6ede2794c515371c954cb6a" + digest = "1:899234af23e5793c34e06fd397f86ba33af5307b959b6a7afd19b63db065a9d7" name = "github.com/emicklei/go-restful" packages = [ ".", "log", ] pruneopts = "UT" - revision = "e37671aced663c8d3a395bd301857e26dcf4340c" - version = "v2.8.1" + revision = "3eb9738c1697594ea6e71a7156a9bb32ed216cf0" + version = "v2.8.0" [[projects]] digest = "1:f1f2bd73c025d24c3b93abf6364bccb802cf2fdedaa44360804c67800e8fab8d" @@ -96,19 +83,14 @@ revision = "72bf35d0ff611848c1dc9df0f976c81192392fa5" version = "v4.1.0" -[[projects]] - digest = "1:2cd7915ab26ede7d95b8749e6b1f933f1c6d5398030684e6505940a10f31cfda" - name = "github.com/ghodss/yaml" - packages = ["."] - pruneopts = "UT" - revision = "0ca9ea5df5451ffdf184b4428c902747c2c11cd7" - version = "v1.0.0" - [[projects]] branch = "master" - digest = "1:edd2fa4578eb086265db78a9201d15e76b298dfd0d5c379da83e9c61712cf6df" + digest = "1:99f994f4c2f71aeea1810d6061f3b52f3d2ee0084cfcb1ebbf40e00a0d0666f6" name = "github.com/go-logr/logr" - packages = ["."] + packages = [ + ".", + "testing", + ] pruneopts = "UT" revision = "9fb12b3b21c5415d16ac18dc5cd42c1cfdd40c4e" @@ -118,26 +100,49 @@ packages = ["."] pruneopts = "UT" revision = "7536572e8d55209135cd5e7ccf7fce43dca217ab" - version = "v0.1.0" [[projects]] - digest = "1:4868df29afe0468bd98f663aff34f486b2e92db41f25747d8e6855c9cd91808b" - name = "github.com/gobuffalo/envy" + digest = "1:2997679181d901ac8aaf4330d11138ecf3974c6d3334995ff36f20cbd597daf8" + name = "github.com/go-openapi/jsonpointer" packages = ["."] pruneopts = "UT" - revision = "f3b98d4da2fa434517f47f615e217fa72ff2c51e" - version = "v1.6.12" + revision = "3a0015ad55fa9873f41605d3e8f28cd279c32ab2" + version = "0.16.0" [[projects]] - digest = "1:b402bb9a24d108a9405a6f34675091b036c8b056aac843bf6ef2389a65c5cf48" + digest = "1:1ae3f233d75a731b164ca9feafd8ed646cbedf1784095876ed6988ce8aa88b1f" + name = "github.com/go-openapi/jsonreference" + packages = ["."] + pruneopts = "UT" + revision = "3fb327e6747da3043567ee86abd02bb6376b6be2" + version = "0.16.0" + +[[projects]] + digest = "1:3e5bdbd2a071c72c778c28fd7b5dfde95cdfbcef412f364377e031877205e418" + name = "github.com/go-openapi/spec" + packages = ["."] + pruneopts = "UT" + revision = "384415f06ee238aae1df5caad877de6ceac3a5c4" + version = "0.16.0" + +[[projects]] + digest = "1:c80984d4a9bb79539743aff5af91b595d84f513700150b0ed73c1697d1200d54" + name = "github.com/go-openapi/swag" + packages = ["."] + pruneopts = "UT" + revision = "becd2f08beafcca035645a8a101e0e3e18140458" + version = "0.16.0" + +[[projects]] + digest = "1:34e709f36fd4f868fb00dbaf8a6cab4c1ae685832d392874ba9d7c5dec2429d1" name = "github.com/gogo/protobuf" packages = [ "proto", "sortkeys", ] pruneopts = "UT" - revision = "4cbf7e384e768b4e01799441fdf2a706a5635ae7" - version = "v1.2.0" + revision = "636bf0302bc95575d69441b25a2603156ffdddf1" + version = "v1.1.1" [[projects]] branch = "master" @@ -145,10 +150,10 @@ name = "github.com/golang/groupcache" packages = ["lru"] pruneopts = "UT" - revision = "c65c006176ff7ff98bb916961c7abbc6b0afc0aa" + revision = "24b0969c4cb722950103eed87108c8d291a8df00" [[projects]] - digest = "1:8f0705fa33e8957018611cc81c65cb373b626c092d39931bb86882489fc4c3f4" + digest = "1:4c0989ca0bcd10799064318923b9bc2db6b4d6338dd75f3f2d86c3511aaaf5cf" name = "github.com/golang/protobuf" packages = [ "proto", @@ -156,20 +161,11 @@ "ptypes/any", "ptypes/duration", "ptypes/timestamp", - "ptypes/wrappers", ] pruneopts = "UT" revision = "aa810b61a9c79d51363740d207bb46cf8e620ed5" version = "v1.2.0" -[[projects]] - branch = "master" - digest = "1:0bfbe13936953a98ae3cfe8ed6670d396ad81edf069a806d2f6515d7bb6950df" - name = "github.com/google/btree" - packages = ["."] - pruneopts = "UT" - revision = "4030bb1f1f0c35b30ca7009e9ebd06849dd45306" - [[projects]] branch = "master" digest = "1:3ee90c0d94da31b442dde97c99635aaafec68d0b8a3c12ee2075c6bdabeec6bb" @@ -178,14 +174,6 @@ pruneopts = "UT" revision = "24818f796faf91cd76ec7bddd72458fbced7a6c1" -[[projects]] - digest = "1:582b704bebaa06b48c29b0cec224a6058a09c86883aaddabde889cd1a5f73e1b" - name = "github.com/google/uuid" - packages = ["."] - pruneopts = "UT" - revision = "0cd6bf5da1e1c83f8b45653022c74f71af0538a4" - version = "v1.1.1" - [[projects]] digest = "1:65c4414eeb350c47b8de71110150d0ea8a281835b1f386eacaa3ad7325929c21" name = "github.com/googleapis/gnostic" @@ -200,7 +188,7 @@ [[projects]] branch = "master" - digest = "1:73f45b0ab76211ff1301bbbce4955585cfe906b63cade635c3d11eb0c11be5f2" + digest = "1:349090dc3e864fe83388be8e8949df6e22607c477644abcc3b530d1b2c7bb714" name = "github.com/gophercloud/gophercloud" packages = [ ".", @@ -212,29 +200,18 @@ "pagination", ] pruneopts = "UT" - revision = "3f3cc5a566b2284815a4b6feb9e8c8f3d74aecdf" + revision = "199db9a3a2bfa55802ab074ab8fed67fce2a2791" [[projects]] branch = "master" - digest = "1:86c1210529e69d69860f2bb3ee9ccce0b595aa3f9165e7dd1388e5c612915888" - name = "github.com/gregjones/httpcache" - packages = [ - ".", - "diskcache", - ] - pruneopts = "UT" - revision = "c63ab54fda8f77302f8d414e19933f2b6026a089" - -[[projects]] - digest = "1:8ec8d88c248041a6df5f6574b87bc00e7e0b493881dad2e7ef47b11dc69093b5" + digest = "1:cf296baa185baae04a9a7004efee8511d08e2f5f51d4cbe5375da89722d681db" name = "github.com/hashicorp/golang-lru" packages = [ ".", "simplelru", ] pruneopts = "UT" - revision = "20f1fb78b0740ba8c3cb143a61e86ba5c8669768" - version = "v0.5.0" + revision = "0fb14efe8c47ae851c0034ed7a448854d3d34cf3" [[projects]] digest = "1:a1038ef593beb4771c8f0f9c26e8b00410acd800af5c6864651d9bf160ea1813" @@ -251,28 +228,12 @@ version = "v1.0.0" [[projects]] - digest = "1:a0cefd27d12712af4b5018dc7046f245e1e3b5760e2e848c30b171b570708f9b" + digest = "1:8eb1de8112c9924d59bf1d3e5c26f5eaa2bfc2a5fcbb92dc1c2e4546d695f277" name = "github.com/imdario/mergo" packages = ["."] pruneopts = "UT" - revision = "7c29201646fa3de8506f701213473dd407f19646" - version = "v0.3.7" - -[[projects]] - digest = "1:870d441fe217b8e689d7949fef6e43efbc787e50f200cb1e70dbca9204a1d6be" - name = "github.com/inconshreveable/mousetrap" - packages = ["."] - pruneopts = "UT" - revision = "76626ae9c91c4f2a10f34cad8ce83ea42c93bb75" - version = "v1.0" - -[[projects]] - digest = "1:ecd9aa82687cf31d1585d4ac61d0ba180e42e8a6182b85bd785fcca8dfeefc1b" - name = "github.com/joho/godotenv" - packages = ["."] - pruneopts = "UT" - revision = "23d116af351c84513e1946b527c88823e476be13" - version = "v1.3.0" + revision = "9f23e2d6bd2a77f959b2bf6acdbefd708a83a4a4" + version = "v0.3.6" [[projects]] digest = "1:3e551bbb3a7c0ab2a2bf4660e7fcad16db089fdcfbb44b0199e62838038623ea" @@ -280,15 +241,19 @@ packages = ["."] pruneopts = "UT" revision = "1624edc4454b8682399def8740d46db5e4362ba4" - version = "v1.1.5" + version = "1.1.5" [[projects]] - digest = "1:3804a3a02964db8e6db3e5e7960ac1c1a9b12835642dd4f4ac4e56c749ec73eb" - name = "github.com/markbates/inflect" - packages = ["."] + branch = "master" + digest = "1:84a5a2b67486d5d67060ac393aa255d05d24ed5ee41daecd5635ec22657b6492" + name = "github.com/mailru/easyjson" + packages = [ + "buffer", + "jlexer", + "jwriter", + ] pruneopts = "UT" - revision = "24b83195037b3bc61fcda2d28b7b0518bce293b6" - version = "v1.0.4" + revision = "60711f1a8329503b04e1c88535f419d0bb440bff" [[projects]] digest = "1:ff5ebae34cfbf047d505ee150de27e60570e8c394b3b8fdbb720ff6ac71985fc" @@ -315,7 +280,7 @@ version = "1.0.1" [[projects]] - digest = "1:e9c3bb68a6c9470302b8046d4647e0612a2ea6037b9c6a47de60c0a90db504f8" + digest = "1:42e29deef12327a69123b9cb2cb45fee4af5c12c2a23c6e477338279a052703f" name = "github.com/onsi/ginkgo" packages = [ ".", @@ -338,11 +303,11 @@ "types", ] pruneopts = "UT" - revision = "eea6ad008b96acdaa524f5b409513bf062b500ad" - version = "v1.8.0" + revision = "3774a09d95489ccaa16032e0770d08ea77ba6184" + version = "v1.6.0" [[projects]] - digest = "1:029c6999f3efa5f1cd3f8b9e02a8bc4276cc439d2dc8dc73cd98e4efd5c76cba" + digest = "1:58418daa018f162c520512737c19ecd42582524b0f900f2eee7c212ce55cf0da" name = "github.com/onsi/gomega" packages = [ ".", @@ -361,43 +326,19 @@ "types", ] pruneopts = "UT" - revision = "90e289841c1ed79b7a598a7cd9959750cb5e89e2" - version = "v1.5.0" + revision = "b6ea1ea48f981d0f615a154a45eabb9dd466556d" + version = "v1.4.1" [[projects]] - digest = "1:e5d0bd87abc2781d14e274807a470acd180f0499f8bf5bb18606e9ec22ad9de9" + digest = "1:361de06aa7ae272616cbe71c3994a654cc6316324e30998e650f7765b20c5b33" name = "github.com/pborman/uuid" packages = ["."] pruneopts = "UT" - revision = "adf5a7427709b9deb95d29d3fa8a2bf9cfd388f1" - version = "v1.2" - -[[projects]] - branch = "master" - digest = "1:3bf17a6e6eaa6ad24152148a631d18662f7212e21637c2699bff3369b7f00fa2" - name = "github.com/petar/GoLLRB" - packages = ["llrb"] - pruneopts = "UT" - revision = "53be0d36a84c2a886ca057d34b6aa4468df9ccb4" + revision = "e790cca94e6cc75c7064b1332e63811d4aae1a53" + version = "v1.1" [[projects]] - digest = "1:0e7775ebbcf00d8dd28ac663614af924411c868dca3d5aa762af0fae3808d852" - name = "github.com/peterbourgon/diskv" - packages = ["."] - pruneopts = "UT" - revision = "5f041e8faa004a95c88a202771f4cc3e991971e6" - version = "v2.0.1" - -[[projects]] - digest = "1:cf31692c14422fa27c83a05292eb5cbe0fb2775972e8f1f8446a71549bd8980b" - name = "github.com/pkg/errors" - packages = ["."] - pruneopts = "UT" - revision = "ba968bfe8b2f7e042a574c888954fccecfa385b4" - version = "v0.8.1" - -[[projects]] - digest = "1:93a746f1060a8acbcf69344862b2ceced80f854170e1caae089b2834c5fbf7f4" + digest = "1:20a8a18488bdef471795af0413d9a3c9c35dc24ca93342329b884ba3c15cbaab" name = "github.com/prometheus/client_golang" packages = [ "prometheus", @@ -405,8 +346,8 @@ "prometheus/promhttp", ] pruneopts = "UT" - revision = "505eaef017263e299324067d40ca2c48f6a2cf50" - version = "v0.9.2" + revision = "1cafe34db7fdec6022e17e00e1c1ea501022f3e4" + version = "v0.9.0" [[projects]] branch = "master" @@ -414,10 +355,11 @@ name = "github.com/prometheus/client_model" packages = ["go"] pruneopts = "UT" - revision = "56726106282f1985ea77d5305743db7231b0c0a8" + revision = "5c3871d89910bfb32f5fcab2aa4b9ec68e65a99f" [[projects]] - digest = "1:ce62b400185bf6b16ef6088011b719e449f5c15c4adb6821589679f752c2788e" + branch = "master" + digest = "1:63b68062b8968092eb86bedc4e68894bd096ea6b24920faca8b9dcf451f54bb5" name = "github.com/prometheus/common" packages = [ "expfmt", @@ -425,12 +367,11 @@ "model", ] pruneopts = "UT" - revision = "2998b132700a7d019ff618c06a234b47c1f3f681" - version = "v0.1.0" + revision = "c7de2306084e37d54b8be01f3541a8464345e9a5" [[projects]] branch = "master" - digest = "1:f532f2cdb9e9e4a8fad5a7e944482f4dd12650228e4a7a6c8492a0d069ce0690" + digest = "1:8c49953a1414305f2ff5465147ee576dd705487c35b15918fcd4efdc0cb7a290" name = "github.com/prometheus/procfs" packages = [ ".", @@ -439,78 +380,15 @@ "xfs", ] pruneopts = "UT" - revision = "bf6a532e95b1f7a62adf0ab5050a5bb2237ad2f4" + revision = "05ee40e3a273f7245e8777337fc7b46e533a9a92" [[projects]] - digest = "1:e09ada96a5a41deda4748b1659cc8953961799e798aea557257b56baee4ecaf3" - name = "github.com/rogpeppe/go-internal" - packages = [ - "modfile", - "module", - "semver", - ] - pruneopts = "UT" - revision = "68d1cb014f030acd0f10ac553801e43c0e5da629" - version = "v1.1.0" - -[[projects]] - digest = "1:d917313f309bda80d27274d53985bc65651f81a5b66b820749ac7f8ef061fd04" - name = "github.com/sergi/go-diff" - packages = ["diffmatchpatch"] - pruneopts = "UT" - revision = "1744e2970ca51c86172c8190fadad617561ed6e7" - version = "v1.0.0" - -[[projects]] - digest = "1:d707dbc1330c0ed177d4642d6ae102d5e2c847ebd0eb84562d0dc4f024531cfc" - name = "github.com/spf13/afero" - packages = [ - ".", - "mem", - ] - pruneopts = "UT" - revision = "a5d6946387efe7d64d09dcba68cdd523dc1273a3" - version = "v1.2.0" - -[[projects]] - digest = "1:645cabccbb4fa8aab25a956cbcbdf6a6845ca736b2c64e197ca7cbb9d210b939" - name = "github.com/spf13/cobra" - packages = ["."] - pruneopts = "UT" - revision = "ef82de70bb3f60c65fb8eebacbb2d122ef517385" - version = "v0.0.3" - -[[projects]] - digest = "1:c1b1102241e7f645bc8e0c22ae352e8f0dc6484b6cb4d132fa9f24174e0119e2" + digest = "1:dab83a1bbc7ad3d7a6ba1a1cc1760f25ac38cdf7d96a5cdd55cd915a4f5ceaf9" name = "github.com/spf13/pflag" packages = ["."] pruneopts = "UT" - revision = "298182f68c66c05229eb03ac171abe6e309ee79a" - version = "v1.0.3" - -[[projects]] - digest = "1:2ae8314c44cd413cfdb5b1df082b350116dd8d2fff973e62c01b285b7affd89e" - name = "go.opencensus.io" - packages = [ - ".", - "exemplar", - "internal", - "internal/tagencoding", - "plugin/ochttp", - "plugin/ochttp/propagation/b3", - "plugin/ochttp/propagation/tracecontext", - "stats", - "stats/internal", - "stats/view", - "tag", - "trace", - "trace/internal", - "trace/propagation", - "trace/tracestate", - ] - pruneopts = "UT" - revision = "b7bf3cdb64150a8c8c53b769fdeb2ba581bd4d4b" - version = "v0.18.0" + revision = "9a97c102cda95a86cec2345a6f09f55a939babf5" + version = "v1.0.2" [[projects]] digest = "1:3c1a69cdae3501bf75e76d0d86dc6f2b0a7421bc205c0cb7b96b19eed464a34d" @@ -545,15 +423,15 @@ [[projects]] branch = "master" - digest = "1:38f553aff0273ad6f367cb0a0f8b6eecbaef8dc6cb8b50e57b6a81c1d5b1e332" + digest = "1:3f3a05ae0b95893d90b9b3b5afdb79a9b3d96e4e36e099d841ae602e4aca0da8" name = "golang.org/x/crypto" packages = ["ssh/terminal"] pruneopts = "UT" - revision = "057139ce5d2bdbe6fe73c53679e24e9cf007f637" + revision = "614d502a4dac94afa3a6ce146bd1736da82514c6" [[projects]] branch = "master" - digest = "1:11c41693326331f3f42b28a524a9c7414ecd2f12f2d4d99a7e37e3254979a748" + digest = "1:ff58bc124488348ba2ca02f62de899d07ba3e7dce51da02557bfc9a8052608db" name = "golang.org/x/net" packages = [ "context", @@ -565,15 +443,13 @@ "http2", "http2/hpack", "idna", - "internal/timeseries", - "trace", ] pruneopts = "UT" - revision = "4b62a64f59f73840b9ab79204c94fee61cd1ba2c" + revision = "922f4815f713f213882e8ef45e0d315b164d705c" [[projects]] branch = "master" - digest = "1:511a6232760c10dcb1ebf1ab83ef0291e2baf801f203ca6314759c5458b73a6a" + digest = "1:f645667d687fc8bf228865a2c5455824ef05bad08841e673673ef2bb89ac5b90" name = "golang.org/x/oauth2" packages = [ ".", @@ -583,29 +459,21 @@ "jwt", ] pruneopts = "UT" - revision = "5dab4167f31cbd76b407f1486c86b40748bc5073" + revision = "d2e6202438beef2727060aa7cabdd924d92ebfd9" [[projects]] branch = "master" - digest = "1:75515eedc0dc2cb0b40372008b616fa2841d831c63eedd403285ff286c593295" - name = "golang.org/x/sync" - packages = ["semaphore"] - pruneopts = "UT" - revision = "37e7f081c4d4c64e13b10787722085407fe5d15f" - -[[projects]] - branch = "master" - digest = "1:bb644db32f5bc1e327a0f748ec871edc4dc46f19b4fbbc15bfd95b1460646537" + digest = "1:7710ea76bd73d3154e86eaf2db8b36a6dffbd6114e4e37b516f1d232c03ddd8d" name = "golang.org/x/sys" packages = [ "unix", "windows", ] pruneopts = "UT" - revision = "b90733256f2e882e81d52f9126de08df5615afd9" + revision = "11551d06cbcc94edc80a0facaccbda56473c19c1" [[projects]] - digest = "1:436b24586f8fee329e0dd65fd67c817681420cda1d7f934345c13fe78c212a73" + digest = "1:bb8277a2ca2bcad6ff7f413b939375924099be908cedd1314baa21ecd08df477" name = "golang.org/x/text" packages = [ "collate", @@ -634,6 +502,7 @@ "unicode/cldr", "unicode/norm", "unicode/rangetable", + "width", ] pruneopts = "UT" revision = "f21a4dfb5e38f5895301dc265a8def02365cc3d0" @@ -641,43 +510,22 @@ [[projects]] branch = "master" - digest = "1:9fdc2b55e8e0fafe4b41884091e51e77344f7dc511c5acedcfd98200003bff90" + digest = "1:c9e7a4b4d47c0ed205d257648b0e5b0440880cb728506e318f8ac7cd36270bc4" name = "golang.org/x/time" packages = ["rate"] pruneopts = "UT" - revision = "85acf8d2951cb2a3bde7632f9ff273ef0379bcbd" - -[[projects]] - branch = "master" - digest = "1:e97f89df92c40f9815adfea740acf7a7c685184295c470ec22322879d394a896" - name = "golang.org/x/tools" - packages = [ - "go/ast/astutil", - "go/gcexportdata", - "go/internal/cgo", - "go/internal/gcimporter", - "go/internal/packagesdriver", - "go/packages", - "go/types/typeutil", - "imports", - "internal/fastwalk", - "internal/gopathwalk", - "internal/module", - "internal/semver", - ] - pruneopts = "UT" - revision = "cc6a436ffe6b424fffa1d91862c9b664d8407828" + revision = "fbb02b2291d28baffd63558aa44b4b56f178d650" [[projects]] - digest = "1:5f003878aabe31d7f6b842d4de32b41c46c214bb629bb485387dbcce1edf5643" - name = "google.golang.org/api" - packages = ["support/bundler"] + digest = "1:7d4fd782f2a710d08834b2c01742b3bba7fb0248383f9cc6c3dc95b3025689d7" + name = "gomodules.xyz/jsonpatch" + packages = ["v2"] pruneopts = "UT" - revision = "19e022d8cf43ce81f046bae8cc18c5397cc7732f" - version = "v0.1.0" + revision = "e8422f09d27ee2c8cfb2c7f8089eb9eeb0764849" + version = "v2.0.0" [[projects]] - digest = "1:fa026a5c59bd2df343ec4a3538e6288dcf4e2ec5281d743ae82c120affe6926a" + digest = "1:c8907869850adaa8bd7631887948d0684f3787d0912f1c01ab72581a6c34432e" name = "google.golang.org/appengine" packages = [ ".", @@ -692,56 +540,8 @@ "urlfetch", ] pruneopts = "UT" - revision = "e9657d882bb81064595ca3b56cbe2546bbabf7b1" - version = "v1.4.0" - -[[projects]] - branch = "master" - digest = "1:077c1c599507b3b3e9156d17d36e1e61928ee9b53a5b420f10f28ebd4a0b275c" - name = "google.golang.org/genproto" - packages = ["googleapis/rpc/status"] - pruneopts = "UT" - revision = "082222b4a5c572e33e82ee9162d1352c7cf38682" - -[[projects]] - digest = "1:9ab5a33d8cb5c120602a34d2e985ce17956a4e8c2edce7e6961568f95e40c09a" - name = "google.golang.org/grpc" - packages = [ - ".", - "balancer", - "balancer/base", - "balancer/roundrobin", - "binarylog/grpc_binarylog_v1", - "codes", - "connectivity", - "credentials", - "credentials/internal", - "encoding", - "encoding/proto", - "grpclog", - "internal", - "internal/backoff", - "internal/binarylog", - "internal/channelz", - "internal/envconfig", - "internal/grpcrand", - "internal/grpcsync", - "internal/syscall", - "internal/transport", - "keepalive", - "metadata", - "naming", - "peer", - "resolver", - "resolver/dns", - "resolver/passthrough", - "stats", - "status", - "tap", - ] - pruneopts = "UT" - revision = "a02b0774206b209466313a0b525d2c738fe407eb" - version = "v1.18.0" + revision = "b1f26356af11148e710935ed1ac8a7f5702c7612" + version = "v1.1.0" [[projects]] digest = "1:abeb38ade3f32a92943e5be54f55ed6d6e3b6602761d74b4aab4c9dd45c18abd" @@ -769,19 +569,18 @@ revision = "dd632973f1e7218eb1089048e0798ec9ae7dceb8" [[projects]] - digest = "1:4d2e5a73dc1500038e504a8d78b986630e3626dc027bc030ba5c75da257cdb96" + digest = "1:342378ac4dcb378a5448dd723f0784ae519383532f5e70ade24132c4c8693202" name = "gopkg.in/yaml.v2" packages = ["."] pruneopts = "UT" - revision = "51d6538a90f86fe93ac480b35f37b2be17fef232" - version = "v2.2.2" + revision = "5420a8b6744d3b0345ab293f6fcba19c978f1183" + version = "v2.2.1" [[projects]] - digest = "1:684a27f12acc943bee24373b4e8f4acde803990add2fc5dde602c27b58207d3b" + digest = "1:baf5a7da864b8f493ff372a9486c2d6a7fae8363699af2dc0ef6dba9a869928f" name = "k8s.io/api" packages = [ "admission/v1beta1", - "admissionregistration/v1alpha1", "admissionregistration/v1beta1", "apps/v1", "apps/v1beta1", @@ -798,15 +597,20 @@ "batch/v1beta1", "batch/v2alpha1", "certificates/v1beta1", + "coordination/v1", "coordination/v1beta1", "core/v1", "events/v1beta1", "extensions/v1beta1", "networking/v1", + "networking/v1beta1", + "node/v1alpha1", + "node/v1beta1", "policy/v1beta1", "rbac/v1", "rbac/v1alpha1", "rbac/v1beta1", + "scheduling/v1", "scheduling/v1alpha1", "scheduling/v1beta1", "settings/v1alpha1", @@ -815,11 +619,11 @@ "storage/v1beta1", ] pruneopts = "UT" - revision = "5cb15d34447165a97c76ed5a60e4e99c8a01ecfe" - version = "kubernetes-1.13.4" + revision = "6e4e0e4f393bf5e8bbff570acd13217aa5a770cd" + version = "kubernetes-1.14.1" [[projects]] - digest = "1:db0e48e58e92eddc9ae08e9efb3dfaa1d61e32de690fff0dae5bccf4b45ebcce" + digest = "1:65226935ef0a84716e327aa46c5a11cf2158ca6d0c79b379202d0b2c2be0e9bc" name = "k8s.io/apiextensions-apiserver" packages = [ "pkg/apis/apiextensions", @@ -829,21 +633,19 @@ "pkg/client/clientset/clientset/typed/apiextensions/v1beta1", ] pruneopts = "UT" - revision = "d002e88f6236312f0289d9d1deab106751718ff0" - version = "kubernetes-1.13.4" + revision = "727a075fdec8319bf095330e344b3ccc668abc73" + version = "kubernetes-1.14.1" [[projects]] - digest = "1:ae4ffd63d09b73d49de165dbafa48079dd1ff7bcc9689954f4fe21860ae5a5b6" + digest = "1:2616411ee46c05b1488f127c171e9204695d15d1e654fe79ea2d204f96f28070" name = "k8s.io/apimachinery" packages = [ - "pkg/api/equality", "pkg/api/errors", "pkg/api/meta", "pkg/api/resource", "pkg/apis/meta/internalversion", "pkg/apis/meta/v1", "pkg/apis/meta/v1/unstructured", - "pkg/apis/meta/v1/validation", "pkg/apis/meta/v1beta1", "pkg/conversion", "pkg/conversion/queryparams", @@ -869,7 +671,6 @@ "pkg/util/mergepatch", "pkg/util/naming", "pkg/util/net", - "pkg/util/rand", "pkg/util/runtime", "pkg/util/sets", "pkg/util/strategicpatch", @@ -884,27 +685,17 @@ "third_party/forked/golang/reflect", ] pruneopts = "UT" - revision = "86fb29eff6288413d76bd8506874fddd9fccdff0" - version = "kubernetes-1.13.4" + revision = "6a84e37a896db9780c75367af8d2ed2bb944022e" + version = "kubernetes-1.14.1" [[projects]] - branch = "master" - digest = "1:f4399da66c191adee58287b4069dbd3f040c4b42c7050e426b9b906c263c65b6" - name = "k8s.io/apiserver" - packages = ["pkg/storage/names"] - pruneopts = "UT" - revision = "b6aa1175dafa586b8042c7bfdcd1585f9ecfaa08" - -[[projects]] - digest = "1:262c8555325bbe47c12ad8f29a8cacd208994cf7b69edc47dd41073854dccff5" + digest = "1:00f288473b016529887e152dad2465482cad64b233298d58f1526385505b27db" name = "k8s.io/client-go" packages = [ "discovery", - "discovery/fake", "dynamic", "informers", "informers/admissionregistration", - "informers/admissionregistration/v1alpha1", "informers/admissionregistration/v1beta1", "informers/apps", "informers/apps/v1", @@ -923,6 +714,7 @@ "informers/certificates", "informers/certificates/v1beta1", "informers/coordination", + "informers/coordination/v1", "informers/coordination/v1beta1", "informers/core", "informers/core/v1", @@ -933,6 +725,10 @@ "informers/internalinterfaces", "informers/networking", "informers/networking/v1", + "informers/networking/v1beta1", + "informers/node", + "informers/node/v1alpha1", + "informers/node/v1beta1", "informers/policy", "informers/policy/v1beta1", "informers/rbac", @@ -940,6 +736,7 @@ "informers/rbac/v1alpha1", "informers/rbac/v1beta1", "informers/scheduling", + "informers/scheduling/v1", "informers/scheduling/v1alpha1", "informers/scheduling/v1beta1", "informers/settings", @@ -949,73 +746,43 @@ "informers/storage/v1alpha1", "informers/storage/v1beta1", "kubernetes", - "kubernetes/fake", "kubernetes/scheme", - "kubernetes/typed/admissionregistration/v1alpha1", - "kubernetes/typed/admissionregistration/v1alpha1/fake", "kubernetes/typed/admissionregistration/v1beta1", - "kubernetes/typed/admissionregistration/v1beta1/fake", "kubernetes/typed/apps/v1", - "kubernetes/typed/apps/v1/fake", "kubernetes/typed/apps/v1beta1", - "kubernetes/typed/apps/v1beta1/fake", "kubernetes/typed/apps/v1beta2", - "kubernetes/typed/apps/v1beta2/fake", "kubernetes/typed/auditregistration/v1alpha1", - "kubernetes/typed/auditregistration/v1alpha1/fake", "kubernetes/typed/authentication/v1", - "kubernetes/typed/authentication/v1/fake", "kubernetes/typed/authentication/v1beta1", - "kubernetes/typed/authentication/v1beta1/fake", "kubernetes/typed/authorization/v1", - "kubernetes/typed/authorization/v1/fake", "kubernetes/typed/authorization/v1beta1", - "kubernetes/typed/authorization/v1beta1/fake", "kubernetes/typed/autoscaling/v1", - "kubernetes/typed/autoscaling/v1/fake", "kubernetes/typed/autoscaling/v2beta1", - "kubernetes/typed/autoscaling/v2beta1/fake", "kubernetes/typed/autoscaling/v2beta2", - "kubernetes/typed/autoscaling/v2beta2/fake", "kubernetes/typed/batch/v1", - "kubernetes/typed/batch/v1/fake", "kubernetes/typed/batch/v1beta1", - "kubernetes/typed/batch/v1beta1/fake", "kubernetes/typed/batch/v2alpha1", - "kubernetes/typed/batch/v2alpha1/fake", "kubernetes/typed/certificates/v1beta1", - "kubernetes/typed/certificates/v1beta1/fake", + "kubernetes/typed/coordination/v1", "kubernetes/typed/coordination/v1beta1", - "kubernetes/typed/coordination/v1beta1/fake", "kubernetes/typed/core/v1", - "kubernetes/typed/core/v1/fake", "kubernetes/typed/events/v1beta1", - "kubernetes/typed/events/v1beta1/fake", "kubernetes/typed/extensions/v1beta1", - "kubernetes/typed/extensions/v1beta1/fake", "kubernetes/typed/networking/v1", - "kubernetes/typed/networking/v1/fake", + "kubernetes/typed/networking/v1beta1", + "kubernetes/typed/node/v1alpha1", + "kubernetes/typed/node/v1beta1", "kubernetes/typed/policy/v1beta1", - "kubernetes/typed/policy/v1beta1/fake", "kubernetes/typed/rbac/v1", - "kubernetes/typed/rbac/v1/fake", "kubernetes/typed/rbac/v1alpha1", - "kubernetes/typed/rbac/v1alpha1/fake", "kubernetes/typed/rbac/v1beta1", - "kubernetes/typed/rbac/v1beta1/fake", + "kubernetes/typed/scheduling/v1", "kubernetes/typed/scheduling/v1alpha1", - "kubernetes/typed/scheduling/v1alpha1/fake", "kubernetes/typed/scheduling/v1beta1", - "kubernetes/typed/scheduling/v1beta1/fake", "kubernetes/typed/settings/v1alpha1", - "kubernetes/typed/settings/v1alpha1/fake", "kubernetes/typed/storage/v1", - "kubernetes/typed/storage/v1/fake", "kubernetes/typed/storage/v1alpha1", - "kubernetes/typed/storage/v1alpha1/fake", "kubernetes/typed/storage/v1beta1", - "kubernetes/typed/storage/v1beta1/fake", - "listers/admissionregistration/v1alpha1", "listers/admissionregistration/v1beta1", "listers/apps/v1", "listers/apps/v1beta1", @@ -1028,15 +795,20 @@ "listers/batch/v1beta1", "listers/batch/v2alpha1", "listers/certificates/v1beta1", + "listers/coordination/v1", "listers/coordination/v1beta1", "listers/core/v1", "listers/events/v1beta1", "listers/extensions/v1beta1", "listers/networking/v1", + "listers/networking/v1beta1", + "listers/node/v1alpha1", + "listers/node/v1beta1", "listers/policy/v1beta1", "listers/rbac/v1", "listers/rbac/v1alpha1", "listers/rbac/v1beta1", + "listers/scheduling/v1", "listers/scheduling/v1alpha1", "listers/scheduling/v1beta1", "listers/settings/v1alpha1", @@ -1069,160 +841,52 @@ "tools/metrics", "tools/pager", "tools/record", + "tools/record/util", "tools/reference", "transport", - "util/buffer", "util/cert", "util/connrotation", "util/flowcontrol", "util/homedir", - "util/integer", "util/jsonpath", + "util/keyutil", "util/retry", "util/workqueue", ] pruneopts = "UT" - revision = "b40b2a5939e43f7ffe0028ad67586b7ce50bb675" - version = "kubernetes-1.13.4" - -[[projects]] - branch = "release-1.13" - digest = "1:6a5942e71b8f97617b6984f11894f857dbe6ed3d564ad268b649802dc4c7dbcb" - name = "k8s.io/code-generator" - packages = [ - "cmd/client-gen", - "cmd/client-gen/args", - "cmd/client-gen/generators", - "cmd/client-gen/generators/fake", - "cmd/client-gen/generators/scheme", - "cmd/client-gen/generators/util", - "cmd/client-gen/path", - "cmd/client-gen/types", - "cmd/conversion-gen", - "cmd/conversion-gen/args", - "cmd/conversion-gen/generators", - "cmd/deepcopy-gen", - "cmd/deepcopy-gen/args", - "cmd/defaulter-gen", - "cmd/defaulter-gen/args", - "cmd/informer-gen", - "cmd/informer-gen/args", - "cmd/informer-gen/generators", - "cmd/lister-gen", - "cmd/lister-gen/args", - "cmd/lister-gen/generators", - "cmd/register-gen", - "cmd/register-gen/args", - "cmd/register-gen/generators", - "cmd/set-gen", - "pkg/util", - ] - pruneopts = "UT" - revision = "c2090bec4d9b1fb25de3812f868accc2bc9ecbae" - -[[projects]] - branch = "master" - digest = "1:dc54d24e166e75a89145883b6080306f5684db2e8ab7a068625f010dee604c6e" - name = "k8s.io/component-base" - packages = ["cli/flag"] - pruneopts = "UT" - revision = "fd5d14dd6d20b9f3b0c066e2702d877b0b8c049f" - -[[projects]] - branch = "master" - digest = "1:4b51f480ae0b1b401eb343db1807601fd45f827ec16c4dfe3275e74fd47b78a1" - name = "k8s.io/gengo" - packages = [ - "args", - "examples/deepcopy-gen/generators", - "examples/defaulter-gen/generators", - "examples/set-gen/generators", - "examples/set-gen/sets", - "generator", - "namer", - "parser", - "types", - ] - pruneopts = "UT" - revision = "f8a0810f38afb8478882b3835a615aebfda39afa" + revision = "1a26190bd76a9017e289958b9fba936430aa3704" + version = "kubernetes-1.14.1" [[projects]] - digest = "1:b28940274ee0e7b0b9e3fdeb55d63f812023100293b7e73d26c0261077a22e2e" + digest = "1:e2999bf1bb6eddc2a6aa03fe5e6629120a53088926520ca3b4765f77d7ff7eab" name = "k8s.io/klog" - packages = [ - ".", - "klogr", - ] + packages = ["."] pruneopts = "UT" - revision = "78315d914a8af2453db4864e69230b647b1ff711" - version = "v0.3.2" + revision = "a5bc97fbc634d635061f3146511332c7e313a55a" + version = "v0.1.0" [[projects]] branch = "master" - digest = "1:03a96603922fc1f6895ae083e1e16d943b55ef0656b56965351bd87e7d90485f" + digest = "1:0dac10b488f2671c15738b662478d665b0c0ec7cae3362669dec172147331ce5" name = "k8s.io/kube-openapi" - packages = ["pkg/util/proto"] - pruneopts = "UT" - revision = "ced9eb3070a5f1c548ef46e8dfe2a97c208d9f03" - -[[projects]] - digest = "1:e06b8ce47dfcd74cf6c677eb98a10685429d9acf4f52542568f11500bd813640" - name = "sigs.k8s.io/controller-runtime" packages = [ - "pkg/cache", - "pkg/cache/internal", - "pkg/client", - "pkg/client/apiutil", - "pkg/client/config", - "pkg/client/fake", - "pkg/controller", - "pkg/envtest", - "pkg/envtest/printer", - "pkg/event", - "pkg/handler", - "pkg/internal/controller", - "pkg/internal/controller/metrics", - "pkg/internal/objectutil", - "pkg/internal/recorder", - "pkg/leaderelection", - "pkg/manager", - "pkg/metrics", - "pkg/patch", - "pkg/predicate", - "pkg/reconcile", - "pkg/recorder", - "pkg/runtime/inject", - "pkg/runtime/log", - "pkg/runtime/scheme", - "pkg/runtime/signals", - "pkg/source", - "pkg/source/internal", - "pkg/webhook/admission", - "pkg/webhook/admission/types", - "pkg/webhook/internal/metrics", - "pkg/webhook/types", + "pkg/common", + "pkg/util/proto", ] pruneopts = "UT" - revision = "f1eaba5087d69cebb154c6a48193e6667f5b512c" - version = "v0.1.12" + revision = "e3762e86a74c878ffed47484592986685639c2cd" [[projects]] - digest = "1:767ce3278b1fe82e3c713bc701b1ada60c8b9301213472c73f65a8955dec8980" - name = "sigs.k8s.io/controller-tools" + branch = "master" + digest = "1:14e8a3b53e6d8cb5f44783056b71bb2ca1ac7e333939cc97f3e50b579c920845" + name = "k8s.io/utils" packages = [ - "cmd/controller-gen", - "pkg/crd/generator", - "pkg/crd/util", - "pkg/internal/codegen", - "pkg/internal/codegen/parse", - "pkg/internal/general", - "pkg/rbac", - "pkg/util", - "pkg/webhook", + "buffer", + "integer", + "trace", ] pruneopts = "UT" - revision = "464fbed97efaced33902e262f09c5eae7315c3e3" - version = "v0.1.11" + revision = "21c4ce38f2a793ec01e925ddc31216500183b773" [[projects]] digest = "1:9070222ca967d09b3966552a161dd4420d62315964bf5e1efd8cc4c7c30ebca8" @@ -1248,84 +912,75 @@ analyzer-name = "dep" analyzer-version = 1 input-imports = [ - "github.com/davecgh/go-spew/spew", "github.com/emicklei/go-restful", + "github.com/evanphx/json-patch", + "github.com/go-logr/logr", + "github.com/go-logr/logr/testing", + "github.com/go-logr/zapr", + "github.com/go-openapi/spec", "github.com/onsi/ginkgo", + "github.com/onsi/ginkgo/config", + "github.com/onsi/ginkgo/types", "github.com/onsi/gomega", - "github.com/pkg/errors", - "github.com/sergi/go-diff/diffmatchpatch", - "github.com/spf13/cobra", + "github.com/prometheus/client_golang/prometheus", + "github.com/prometheus/client_golang/prometheus/promhttp", + "github.com/prometheus/client_model/go", "github.com/spf13/pflag", - "golang.org/x/net/context", + "go.uber.org/zap", + "go.uber.org/zap/buffer", + "go.uber.org/zap/zapcore", + "gomodules.xyz/jsonpatch/v2", + "gopkg.in/fsnotify.v1", + "k8s.io/api/admission/v1beta1", "k8s.io/api/apps/v1", + "k8s.io/api/apps/v1beta1", "k8s.io/api/autoscaling/v1", "k8s.io/api/core/v1", + "k8s.io/api/extensions/v1beta1", "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1", - "k8s.io/apimachinery/pkg/api/equality", + "k8s.io/apiextensions-apiserver/pkg/client/clientset/clientset", "k8s.io/apimachinery/pkg/api/errors", "k8s.io/apimachinery/pkg/api/meta", "k8s.io/apimachinery/pkg/apis/meta/v1", "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured", - "k8s.io/apimachinery/pkg/apis/meta/v1/validation", + "k8s.io/apimachinery/pkg/fields", "k8s.io/apimachinery/pkg/labels", "k8s.io/apimachinery/pkg/runtime", "k8s.io/apimachinery/pkg/runtime/schema", "k8s.io/apimachinery/pkg/runtime/serializer", + "k8s.io/apimachinery/pkg/selection", "k8s.io/apimachinery/pkg/types", - "k8s.io/apimachinery/pkg/util/intstr", "k8s.io/apimachinery/pkg/util/json", - "k8s.io/apimachinery/pkg/util/rand", "k8s.io/apimachinery/pkg/util/runtime", "k8s.io/apimachinery/pkg/util/sets", - "k8s.io/apimachinery/pkg/util/validation/field", + "k8s.io/apimachinery/pkg/util/uuid", "k8s.io/apimachinery/pkg/util/wait", "k8s.io/apimachinery/pkg/util/yaml", "k8s.io/apimachinery/pkg/watch", - "k8s.io/apiserver/pkg/storage/names", "k8s.io/client-go/discovery", - "k8s.io/client-go/discovery/fake", + "k8s.io/client-go/dynamic", "k8s.io/client-go/informers", "k8s.io/client-go/kubernetes", - "k8s.io/client-go/kubernetes/fake", "k8s.io/client-go/kubernetes/scheme", "k8s.io/client-go/kubernetes/typed/core/v1", "k8s.io/client-go/plugin/pkg/client/auth", "k8s.io/client-go/plugin/pkg/client/auth/gcp", "k8s.io/client-go/rest", + "k8s.io/client-go/restmapper", "k8s.io/client-go/testing", "k8s.io/client-go/tools/cache", "k8s.io/client-go/tools/clientcmd", "k8s.io/client-go/tools/clientcmd/api", + "k8s.io/client-go/tools/leaderelection", "k8s.io/client-go/tools/leaderelection/resourcelock", + "k8s.io/client-go/tools/metrics", "k8s.io/client-go/tools/record", - "k8s.io/client-go/util/flowcontrol", - "k8s.io/client-go/util/integer", - "k8s.io/client-go/util/retry", - "k8s.io/code-generator/cmd/client-gen", - "k8s.io/code-generator/cmd/conversion-gen", - "k8s.io/code-generator/cmd/deepcopy-gen", - "k8s.io/code-generator/cmd/defaulter-gen", - "k8s.io/code-generator/cmd/informer-gen", - "k8s.io/code-generator/cmd/lister-gen", - "k8s.io/code-generator/cmd/register-gen", - "k8s.io/code-generator/cmd/set-gen", - "k8s.io/component-base/cli/flag", - "k8s.io/klog", - "k8s.io/klog/klogr", - "sigs.k8s.io/controller-runtime/pkg/client", - "sigs.k8s.io/controller-runtime/pkg/client/config", - "sigs.k8s.io/controller-runtime/pkg/client/fake", - "sigs.k8s.io/controller-runtime/pkg/controller", - "sigs.k8s.io/controller-runtime/pkg/envtest", - "sigs.k8s.io/controller-runtime/pkg/handler", - "sigs.k8s.io/controller-runtime/pkg/manager", - "sigs.k8s.io/controller-runtime/pkg/reconcile", - "sigs.k8s.io/controller-runtime/pkg/runtime/log", - "sigs.k8s.io/controller-runtime/pkg/runtime/scheme", - "sigs.k8s.io/controller-runtime/pkg/runtime/signals", - "sigs.k8s.io/controller-runtime/pkg/source", - "sigs.k8s.io/controller-tools/cmd/controller-gen", + "k8s.io/client-go/tools/reference", + "k8s.io/client-go/util/workqueue", + "k8s.io/kube-openapi/pkg/common", "sigs.k8s.io/testing_frameworks/integration", + "sigs.k8s.io/testing_frameworks/integration/addr", + "sigs.k8s.io/yaml", ] solver-name = "gps-cdcl" solver-version = 1 diff --git a/vendor/sigs.k8s.io/controller-runtime/Gopkg.toml b/vendor/sigs.k8s.io/controller-runtime/Gopkg.toml new file mode 100644 index 0000000000..81598f106b --- /dev/null +++ b/vendor/sigs.k8s.io/controller-runtime/Gopkg.toml @@ -0,0 +1,88 @@ +# Copyright 2018 The Kubernetes Authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# Packages required by users +# NB(directxman12): be very careful how you word these -- +# dep considers them as a dependency on the package you list +# meaning that if there's a main.go in the root package +# (like in apiextensions-apiserver), you'll pull it's deps in. +required = ["sigs.k8s.io/testing_frameworks/integration", + "k8s.io/client-go/plugin/pkg/client/auth", + "github.com/spf13/pflag", + "github.com/emicklei/go-restful", + "github.com/go-openapi/spec", + "k8s.io/kube-openapi/pkg/common", + "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1", + "k8s.io/apiextensions-apiserver/pkg/client/clientset/clientset", + "github.com/prometheus/client_golang/prometheus", + ] + +[[constraint]] + name = "k8s.io/api" + version = "kubernetes-1.14.1" + +[[constraint]] + name = "k8s.io/apiextensions-apiserver" + version = "kubernetes-1.14.1" + +[[constraint]] + name = "k8s.io/apimachinery" + version = "kubernetes-1.14.1" + +[[constraint]] + name = "k8s.io/client-go" + version = "kubernetes-1.14.1" + +[[constraint]] + name = "sigs.k8s.io/testing_frameworks" + version = "v0.1.1" + +[[constraint]] + name = "github.com/onsi/ginkgo" + version = "v1.5.0" + +[[constraint]] + name = "github.com/onsi/gomega" + version = "v1.4.0" + +[[constraint]] + name = "sigs.k8s.io/yaml" + version = "1.1.0" + +[[constraint]] + name = "go.uber.org/zap" + version = "1.8.0" + +[[constraint]] + name = "github.com/prometheus/client_golang" + version = "0.9.0" + +# these are not listed explicitly until we get version tags, +# since dep doesn't like bare revision dependencies + +# [[constraint]] +# name = "github.com/go-logr/logr" +# +# [[constraint]] +# name = "github.com/go-logr/zapr" + +# For dependency below: Refer to issue https://github.com/golang/dep/issues/1799 +[[override]] +name = "gopkg.in/fsnotify.v1" +source = "https://github.com/fsnotify/fsnotify.git" +version="v1.4.7" + +[prune] + go-tests = true + unused-packages = true diff --git a/vendor/sigs.k8s.io/controller-runtime/OWNERS b/vendor/sigs.k8s.io/controller-runtime/OWNERS new file mode 100644 index 0000000000..54ecc0f18d --- /dev/null +++ b/vendor/sigs.k8s.io/controller-runtime/OWNERS @@ -0,0 +1,11 @@ +# See the OWNERS docs: https://git.k8s.io/community/contributors/devel/owners.md +owners: + - directxman12 + - droot + - pwittrock +approvers: + - controller-runtime-admins + - controller-runtime-maintainers +reviewers: + - controller-runtime-admins + - controller-runtime-maintainers diff --git a/vendor/sigs.k8s.io/controller-runtime/OWNERS_ALIASES b/vendor/sigs.k8s.io/controller-runtime/OWNERS_ALIASES new file mode 100644 index 0000000000..40e94ca6e3 --- /dev/null +++ b/vendor/sigs.k8s.io/controller-runtime/OWNERS_ALIASES @@ -0,0 +1,8 @@ +# See the OWNERS docs: https://git.k8s.io/community/contributors/devel/owners.md + +aliases: + controller-runtime-admins: + - directxman12 + - droot + - pwittrock + controller-runtime-maintainers: [] diff --git a/vendor/sigs.k8s.io/controller-runtime/README.md b/vendor/sigs.k8s.io/controller-runtime/README.md new file mode 100644 index 0000000000..0e02f8a9c7 --- /dev/null +++ b/vendor/sigs.k8s.io/controller-runtime/README.md @@ -0,0 +1,61 @@ +[![Build Status](https://travis-ci.org/kubernetes-sigs/controller-runtime.svg?branch=master)](https://travis-ci.org/kubernetes-sigs/controller-runtime "Travis") +[![Go Report Card](https://goreportcard.com/badge/sigs.k8s.io/controller-runtime)](https://goreportcard.com/report/sigs.k8s.io/controller-runtime) + +# Kubernetes controller-runtime Project + +The Kubernetes controller-runtime Project is a set of go libraries for building Controllers. + +Documentation: + +- [Package overview](https://godoc.org/github.com/kubernetes-sigs/controller-runtime/pkg) +- [Basic controller using builder](https://godoc.org/github.com/kubernetes-sigs/controller-runtime/pkg/builder#example-Builder) +- [Creating a manager](https://godoc.org/github.com/kubernetes-sigs/controller-runtime/pkg/manager#example-New) +- [Creating a controller](https://godoc.org/github.com/kubernetes-sigs/controller-runtime/pkg/controller#example-New) +- [Examples](https://github.com/kubernetes-sigs/controller-runtime/blob/master/examples) + +# Versioning, Maintenance, and Compatibility + +The full documentation can be found at [VERSIONING.md](VERSIONING.md), but TL;DR: + +Users: + +- We follow [Semantic Versioning (semver)](https://semver.org) +- Use releases with your dependency management to ensure that you get compatible code +- The master branch contains all the latest code, some of which may break compatibility (so "normal" `go get` is not recommended) + +Contributors: + +- All code PR must be labeled with :bug: (patch fixes), :sparkles: (backwards-compatible features), or :warning: (breaking changes) +- Breaking changes will find their way into the next major release, other changes will go into an semi-immediate patch or minor release +- For a quick PR template suggesting the right information, use one of these PR templates: + * [Breaking Changes/Features](/.github/PULL_REQUEST_TEMPLATE/breaking_change.md) + * [Backwards-Compatible Features](/.github/PULL_REQUEST_TEMPLATE/compat_feature.md) + * [Bug fixes](/.github/PULL_REQUEST_TEMPLATE/bug_fix.md) + * [Documentation Changes](/.github/PULL_REQUEST_TEMPLATE/docs.md) + * [Test/Build/Other Changes](/.github/PULL_REQUEST_TEMPLATE/other.md) + +## FAQ + +See [FAQ.md](FAQ.md) + +## Community, discussion, contribution, and support + +Learn how to engage with the Kubernetes community on the [community page](http://kubernetes.io/community/). + +controller-runtime is a subproject of the [kubebuilder](https://github.com/kubernetes-sigs/kubebuilder) project +in sig apimachinery. + +You can reach the maintainers of this project at: + +- Slack channel: [#kubebuilder](http://slack.k8s.io/#kubebuilder) +- Google Group: [kubebuilder@googlegroups.com](https://groups.google.com/forum/#!forum/kubebuilder) + +## Contributing +Contributions are greatly appreciated. The maintainers actively manage the issues list, and try to highlight issues suitable for newcomers. +The project follows the typical GitHub pull request model. See [CONTRIBUTING.md](CONTRIBUTING.md) for more details. +Before starting any work, please either comment on an existing issue, or file a new one. + +## Code of conduct + +Participation in the Kubernetes community is governed by the [Kubernetes Code of Conduct](code-of-conduct.md). + diff --git a/vendor/sigs.k8s.io/controller-runtime/SECURITY_CONTACTS b/vendor/sigs.k8s.io/controller-runtime/SECURITY_CONTACTS new file mode 100644 index 0000000000..6f826fe021 --- /dev/null +++ b/vendor/sigs.k8s.io/controller-runtime/SECURITY_CONTACTS @@ -0,0 +1,15 @@ +# Defined below are the security contacts for this repo. +# +# They are the contact point for the Product Security Team to reach out +# to for triaging and handling of incoming issues. +# +# The below names agree to abide by the +# [Embargo Policy](https://github.com/kubernetes/sig-release/blob/master/security-release-process-documentation/security-release-process.md#embargo-policy) +# and will be removed and replaced if they violate that agreement. +# +# DO NOT REPORT SECURITY VULNERABILITIES DIRECTLY TO THESE NAMES, FOLLOW THE +# INSTRUCTIONS AT https://kubernetes.io/security/ + +directxman12 +pwittrock +droot diff --git a/vendor/sigs.k8s.io/controller-runtime/TMP-LOGGING.md b/vendor/sigs.k8s.io/controller-runtime/TMP-LOGGING.md new file mode 100644 index 0000000000..17860350c6 --- /dev/null +++ b/vendor/sigs.k8s.io/controller-runtime/TMP-LOGGING.md @@ -0,0 +1,166 @@ +Logging Guidelines +================== + +controller-runtime uses a kind of logging called *structured logging*. If +you've used a library like Zap or logrus before, you'll be familiar with +the concepts we use. If you've only used a logging library like the "log" +package (in the Go standard library) or "glog" (in Kubernetes), you'll +need to adjust how you think about logging a bit. + +### Getting Started With Structured Logging + +With structured logging, we associate a *constant* log message with some +variable key-value pairs. For instance, suppose we wanted to log that we +were starting reconciliation on a pod. In the Go standard libary logger, +we might write: + +```go +log.Printf("starting reconciliation for pod %s/%s", podNamespace, podName) +``` + +In controller-runtime, we'd instead write: + +```go +logger.Info("starting reconciliation", "pod", req.NamespacedNamed) +``` + +or even write + +```go +func (r *Reconciler) Reconcile(req reconcile.Request) (reconcile.Response, error) { + logger := logger.WithValues("pod", req.NamespacedName) + // do some stuff + logger.Info("starting reconciliation") +} +``` + +Notice how we've broken out the information that we want to convey into +a constant message (`"starting reconciliation"`) and some key-value pairs +that convey variable information (`"pod", req.NamespacedName`). We've +there-by added "structure" to our logs, which makes them easier to save +and search later, as well as correlate with metrics and events. + +All of controller-runtime's logging is done via +[logr](https://github.com/go-logr/logr), a generic interface for +structured logging. You can use whichever logging library you want to +implement the actual mechanics of the logging. controller-runtime +provides some helpers to make it easy to use +[Zap](https://go.uber.org/zap) as the implementation. + +You can configure the logging implementation using +`"sigs.k8s.io/controller-runtime/pkg/log".SetLogger`. That +package also contains the convenience functions for setting up Zap. + +You can get a handle to the the "root" logger using +`"sigs.k8s.io/controller-runtime/pkg/log".Log`, and can then call +`WithName` to create individual named loggers. You can call `WithName` +repeatedly to chain names together: + +```go +logger := log.Log.WithName("controller").WithName("replicaset") +// in reconcile... +logger = logger.WithValues("replicaset", req.NamespacedName) +// later on in reconcile... +logger.Info("doing things with pods", "pod", newPod) +``` + +As seen above, you can also call `WithValue` to create a new sub-logger +that always attaches some key-value pairs to a logger. + +Finally, you can use `V(1)` to mark a particular log line as "debug" logs: + +```go +logger.V(1).Info("this is particularly verbose!", "state of the world", +allKubernetesObjectsEverywhere) +``` + +While it's possible to use higher log levels, it's recommended that you +stick with `V(1)` or V(0)` (which is equivalent to not specifying `V`), +and then filter later based on key-value pairs or messages; different +numbers tend to lose meaning easily over time, and you'll be left +wondering why particular logs lines are at `V(5)` instead of `V(7)`. + +## Logging errors + +Errors should *always* be logged with `log.Error`, which allows logr +implementations to provide special handling of errors (for instance, +providing stack traces in debug mode). + +It's acceptable to log call `log.Error` with a nil error object. This +conveys that an error occurred in some capacity, but that no actual +`error` object was involved. + +## Logging messages + +- Don't put variable content in your messages -- use key-value pairs for + that. Never use `fmt.Sprintf` in your message. + +- Try to match the terminology in your messages with your key-value pairs + -- for instance, if you have a key-value pairs `api version`, use the + term `APIVersion` instead of `GroupVersion` in your message. + +## Logging Kubernetes Objects + +Kubernetes objects should be logged directly, like `log.Info("this is +a Kubernetes object", "pod", somePod)`. controller-runtime provides +a special encoder for Zap that will transform Kubernetes objects into +`name, namespace, apiVersion, kind` objects, when available and not in +development mode. Other logr implementations should implement similar +logic. + +## Logging Structured Values (Key-Value pairs) + +- Use lower-case, space separated keys. For example `object` for objects, + `api version` for `APIVersion` + +- Be consistent across your application, and with controller-runtime when + possible. + +- Try to be brief but descriptive. + +- Match terminology in keys with terminology in the message. + +- Be careful logging non-Kubernetes objects verbatim if they're very + large. + +### Groups, Versions, and Kinds + +- Kinds should not be logged alone (they're meaningless alone). Use + a `GroupKind` object to log them instead, or a `GroupVersionKind` when + version is relevant. + +- If you need to log an API version string, use `api version` as the key + (formatted as with a `GroupVersion`, or as received directly from API + discovery). + +### Objects and Types + +- If code works with a generic Kubernetes `runtime.Object`, use the + `object` key. For specific objects, prefer the resource name as the key + (e.g. `pod` for `v1.Pod` objects). + +- For non-Kubernetes objects, the `object` key may also be used, if you + accept a generic interface. + +- When logging a raw type, log it using the `type` key, with a value of + `fmt.Sprintf("%T", typ)` + +- If there's specific context around a type, the key may be more specific, + but should end with `type` -- for instance, `OwnerType` should be logged + as `owner` in the context of `log.Error(err, "Could not get ObjectKinds + for OwnerType", `owner type`, fmt.Sprintf("%T"))`. When possible, favor + communicating kind instead. + +### Multiple things + +- When logging multiple things, simply pluralize the key. + +### controller-runtime Specifics + +- Reconcile requests should be logged as `request`, although normal code + should favor logging the key. + +- Reconcile keys should be logged as with the same key as if you were + logging the object directly (e.g. `log.Info("reconciling pod", "pod", + req.NamespacedName)`). This ends up having a similar effect to logging + the object directly. diff --git a/vendor/sigs.k8s.io/controller-runtime/VERSIONING.md b/vendor/sigs.k8s.io/controller-runtime/VERSIONING.md new file mode 100644 index 0000000000..55fb5d7ff4 --- /dev/null +++ b/vendor/sigs.k8s.io/controller-runtime/VERSIONING.md @@ -0,0 +1,271 @@ +# Versioning and Branching in controller-runtime + +*NB*: this also applies to controller-tools. + +## TL;DR: + +### Users + +- We follow [Semantic Versioning (semver)](https://semver.org) +- Use releases with your dependency management to ensure that you get + compatible code +- The master branch contains all the latest code, some of which may break + compatibility (so "normal" `go get` is not recommended) + +### Contributors + +- All code PR must be labeled with :bug: (patch fixes), :sparkles: + (backwards-compatible features), or :warning: (breaking changes) + +- Breaking changes will find their way into the next major release, other + changes will go into an semi-immediate patch or minor release + +- Please *try* to avoid breaking changes when you can. They make users + face difficult decisions ("when do I go through the pain of + upgrading?"), and make life hard for maintainers and contributors + (dealing with differences on stable branches). + +### Mantainers + +Don't be lazy, read the rest of this doc :-) + +## Overview + +controller-runtime (and friends) follow [Semantic +Versioning](https://semver.org). I'd recommend reading the aforementioned +link if you're not familiar, but essentially, for any given release X.Y.Z: + +- an X (*major*) release indicates a set of backwards-compatible code. + Changing X means there's a breaking change. + +- a Y (*minor*) release indicates a minimum feature set. Changing Y means + the addition of a backwards-compatible feature. + +- a Z (*patch*) release indicates minimum set of bugfixes. Changing + Z means a backwards-compatible change that doesn't add functionality. + +*NB*: If the major release is `0`, any minor release may contain breaking +changes. + +These guarantees extend to all code exposed in public APIs of +controller-runtime. This includes code both in controller-runtime itself, +*plus types from dependencies in public APIs*. Types and functions not in +public APIs are not considered part of the guarantee. + +In order to easily maintain the guarantees, we have a couple of processes +that we follow. + +## Branches + +controller-runtime contains two types of branches: the *master* branch and +*release-X* branches. + +The *master* branch is where development happens. All the latest and +greatest code, including breaking changes, happens on master. + +The *release-X* branches contain stable, backwards compatible code. Every +major (X) release, a new such branch is created. It is from these +branches that minor and patch releases are tagged. If some cases, it may +be necessary open PRs for bugfixes directly against stable branches, but +this should generally not be the case. + +The maintainers are responsible for updating the contents of this branch; +generally, this is done just before a release using release tooling that +filters and checks for changes tagged as breaking (see below). + +### Tooling + +* [release-notes.sh](hack/release-notes.sh): generate release notes + for a range of commits, and check for next version type (***TODO***) + +* [verify-commit-messages.sh](hack/verify-commit-messages.sh): check that + your PR and/or commit messages have the right versioning icon + (***TODO***). + +## PR Process + +Every PR should be annotated with an icon indicating whether it's +a: + +- Breaking change: :warning: (`:warning:`) +- Non-breaking feature: :sparkles: (`:sparkles:`) +- Patch fix: :bug: (`:bug:`) +- Docs: :book: (`:book:`) +- Infra/Tests/Other: :running: (`:running:`) +- No release note: :ghost: (`:ghost:`) + +Use :ghost: (no release note) only for the PRs that change or revert unreleased +changes, which don't deserve a release note. Please don't abuse it. + +You can also use the equivalent emoji directly, since GitHub doesn't +render the `:xyz:` aliases in PR titles. + +Individual commits should not be tagged separately, but will generally be +assumed to match the PR. For instance, if you have a bugfix in with +a breaking change, it's generally encouraged to submit the bugfix +separately, but if you must put them in one PR, mark the commit +separately. + +### Commands and Workflow + +controller-runtime follows the standard Kubernetes workflow: any PR needs +`lgtm` and `approved` labels, PRs authors must have signed the CNCF CLA, +and PRs must pass the tests before being merged. See [the contributor +docs](https://github.com/kubernetes/community/blob/master/contributors/guide/pull-requests.md#the-testing-and-merge-workflow) +for more info. + +We use the same priority and kind labels as Kubernetes. See the labels +tab in GitHub for the full list. + +The standard Kubernetes comment commands should work in +controller-runtime. See [Prow](https://prow.k8s.io/command-help) for +a command reference. + +## Release Process + +Minor and patch releases are generally done immediately after a feature or +bugfix is landed, or sometimes a series of features tied together. + +Minor releases will only be tagged on the *most recent* major release +branch, except in exceptional circumstances. Patches will be backported +to maintained stable versions, as needed. + +Major releases are done shortly after a breaking change is merged -- once +a breaking change is merged, the next release *must* be a major revision. +We don't intend to have a lot of these, so we may put off merging breaking +PRs until a later date. + +### Exact Steps + +Follow the release-specific steps below, then follow the general steps +after that. + +#### Minor and patch releases + +1. Update the release-X branch with the latest set of changes by calling + `git rebase master` from the release branch. + +#### Major releases + +1. Create a new release branch named `release-X` (where `X` is the new + version) off of master. + +#### General + +2. Generate release notes using the release note tooling. + +3. Add a release for controller-runtime on GitHub, using those release + notes, with a title of `vX.Y.Z`. + +4. Do a similar process for + [controller-tools](https://github.com/kubernetes-sigs/controller-tools) + +5. Announce the release in `#kubebuilder` on Slack with a pinned message. + +6. Potentially update + [kubebuilder](https://github.com/kubernetes-sigs/kubebuilder) as well. + +### Breaking Changes + +Try to avoid breaking changes. They make life difficult for users, who +have to rewrite their code when they eventually upgrade, and for +maintainers/contributors, who have to deal with differences between master +and stable branches. + +That being said, we'll occasionally want to make breaking changes. They'll +be merged onto master, and will then trigger a major release (see [Release +Process](#release-process)). Because breaking changes induce a major +revision, the maintainers may delay a particular breaking change until +a later date when they are ready to make a major revision with a few +breaking changes. + +If you're going to make a breaking change, please make sure to explain in +detail why it's helpful. Is it necessary to cleanly resolve an issue? +Does it improve API ergonomics? + +Maintainers should treat breaking changes with caution, and evaluate +potential non-breaking solutions (see below). + +Note that API breakage in public APIs due to dependencies will trigger +a major revision, so you may occasionally need to have a major release +anyway, due to changes in libraries like `k8s.io/client-go` or +`k8s.io/apimachinery`. + +*NB*: Pre-1.0 releases treat breaking changes a bit more lightly. We'll +still consider carefully, but the pre-1.0 timeframe is useful for +converging on a ergonomic API. + +#### Avoiding breaking changes + +##### Solutions to avoid + +- **Confusingly duplicate methods, functions, or variables.** + + For instance, suppose we have an interface method `List(ctx + context.Context, options *ListOptions, obj runtime.Object) error`, and + we decide to switch it so that options come at the end, parametrically. + Adding a new interface method `ListParametric(ctx context.Context, obj + runtime.Object, options... ListOption)` is probably not the right + solution: + + - Users will intuitively see `List`, and use that in new projects, even + if it's marked as deprecated. + + - Users who don't notice the deprecation may be confused as to the + difference between `List` and `ListParametric`. + + - It's not immediately obvious in isolation (e.g. in surrounding code) + why the method is called `ListParametric`, and may cause confusion + when reading code that makes use of that method. + + In this case, it may be better to make the breaking change, and then + eventually do a major release. + +## Why don't we... + +### Use "next"-style branches + +Development branches: + +- don't win us much in terms of maintenance in the case of breaking + changes (we still have to merge/manage multiple branches for development + and stable) + +- can be confusing to contributors, who often expect master to have the + latest changes. + +### Never break compatibility + +Never doing a new major release could be an admirable goal, but gradually +leads to API cruft. + +Since one of the goals of controller-runtime is to be a friendly and +intuitive API, we want to avoid too much API cruft over time, and +occaisonal breaking changes in major releases help accomplish that goal. + +Furthermore, our dependency on Kubernetes libraries makes this difficult +(see below) + +### Always assume we've broken compatibility + +*a.k.a. k8s.io/client-go style* + +While this makes life easier (a bit) for maintainers, it's problematic for +users. While breaking changes arrive sooner, upgrading becomes very +painful. + +Furthermore, we still have to maintain stable branches for bugfixes, so +the maintenance burden isn't lessened by a ton. + +### Extend compatibility guarantees to all dependencies + +This is very difficult with the number of Kubernetes dependencies we have. +Kubernetes dependencies tend to either break compatibility every major +release (e.g. k8s.io/client-go, which loosely follows semver), or at +a whim (many other Kubernetes libraries). + +If we limit to the few objects we expose, we can better inform users about +how *controller-runtime itself* has changed in a given release. Then, +users can make informed decisions about how to proceed with any direct +uses of Kubernetes dependencies their controller-runtime-based application +may have. diff --git a/vendor/sigs.k8s.io/controller-runtime/alias.go b/vendor/sigs.k8s.io/controller-runtime/alias.go new file mode 100644 index 0000000000..af955ad301 --- /dev/null +++ b/vendor/sigs.k8s.io/controller-runtime/alias.go @@ -0,0 +1,130 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package controllerruntime + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime/schema" + "sigs.k8s.io/controller-runtime/pkg/builder" + "sigs.k8s.io/controller-runtime/pkg/client/config" + "sigs.k8s.io/controller-runtime/pkg/controller/controllerutil" + "sigs.k8s.io/controller-runtime/pkg/log" + "sigs.k8s.io/controller-runtime/pkg/manager" + "sigs.k8s.io/controller-runtime/pkg/manager/signals" + "sigs.k8s.io/controller-runtime/pkg/reconcile" + "sigs.k8s.io/controller-runtime/pkg/scheme" +) + +// Builder builds an Application ControllerManagedBy (e.g. Operator) and returns a manager.Manager to start it. +type Builder = builder.Builder + +// Request contains the information necessary to reconcile a Kubernetes object. This includes the +// information to uniquely identify the object - its Name and Namespace. It does NOT contain information about +// any specific Event or the object contents itself. +type Request = reconcile.Request + +// Result contains the result of a Reconciler invocation. +type Result = reconcile.Result + +// Manager initializes shared dependencies such as Caches and Clients, and provides them to Runnables. +// A Manager is required to create Controllers. +type Manager = manager.Manager + +// Options are the arguments for creating a new Manager +type Options = manager.Options + +// SchemeBuilder builds a new Scheme for mapping go types to Kubernetes GroupVersionKinds. +type SchemeBuilder = scheme.Builder + +// GroupVersion contains the "group" and the "version", which uniquely identifies the API. +type GroupVersion = schema.GroupVersion + +// GroupResource specifies a Group and a Resource, but does not force a version. This is useful for identifying +// concepts during lookup stages without having partially valid types +type GroupResource = schema.GroupResource + +// TypeMeta describes an individual object in an API response or request +// with strings representing the type of the object and its API schema version. +// Structures that are versioned or persisted should inline TypeMeta. +// +// +k8s:deepcopy-gen=false +type TypeMeta = metav1.TypeMeta + +// ObjectMeta is metadata that all persisted resources must have, which includes all objects +// users must create. +type ObjectMeta = metav1.ObjectMeta + +var ( + // GetConfigOrDie creates a *rest.Config for talking to a Kubernetes apiserver. + // If --kubeconfig is set, will use the kubeconfig file at that location. Otherwise will assume running + // in cluster and use the cluster provided kubeconfig. + // + // Will log an error and exit if there is an error creating the rest.Config. + GetConfigOrDie = config.GetConfigOrDie + + // GetConfig creates a *rest.Config for talking to a Kubernetes apiserver. + // If --kubeconfig is set, will use the kubeconfig file at that location. Otherwise will assume running + // in cluster and use the cluster provided kubeconfig. + // + // Config precedence + // + // * --kubeconfig flag pointing at a file + // + // * KUBECONFIG environment variable pointing at a file + // + // * In-cluster config if running in cluster + // + // * $HOME/.kube/config if exists + GetConfig = config.GetConfig + + // NewControllerManagedBy returns a new controller builder that will be started by the provided Manager + NewControllerManagedBy = builder.ControllerManagedBy + + // NewWebhookManagedBy returns a new webhook builder that will be started by the provided Manager + NewWebhookManagedBy = builder.WebhookManagedBy + + // NewManager returns a new Manager for creating Controllers. + NewManager = manager.New + + // CreateOrUpdate creates or updates the given object obj in the Kubernetes + // cluster. The object's desired state should be reconciled with the existing + // state using the passed in ReconcileFn. obj must be a struct pointer so that + // obj can be updated with the content returned by the Server. + // + // It returns the executed operation and an error. + CreateOrUpdate = controllerutil.CreateOrUpdate + + // SetControllerReference sets owner as a Controller OwnerReference on owned. + // This is used for garbage collection of the owned object and for + // reconciling the owner object on changes to owned (with a Watch + EnqueueRequestForOwner). + // Since only one OwnerReference can be a controller, it returns an error if + // there is another OwnerReference with Controller flag set. + SetControllerReference = controllerutil.SetControllerReference + + // SetupSignalHandler registered for SIGTERM and SIGINT. A stop channel is returned + // which is closed on one of these signals. If a second signal is caught, the program + // is terminated with exit code 1. + SetupSignalHandler = signals.SetupSignalHandler + + // Log is the base logger used by controller-runtime. It delegates + // to another logr.Logger. You *must* call SetLogger to + // get any actual logging. + Log = log.Log + + // SetLogger sets a concrete logging implementation for all deferred Loggers. + SetLogger = log.SetLogger +) diff --git a/vendor/sigs.k8s.io/controller-runtime/code-of-conduct.md b/vendor/sigs.k8s.io/controller-runtime/code-of-conduct.md new file mode 100644 index 0000000000..0d15c00cf3 --- /dev/null +++ b/vendor/sigs.k8s.io/controller-runtime/code-of-conduct.md @@ -0,0 +1,3 @@ +# Kubernetes Community Code of Conduct + +Please refer to our [Kubernetes Community Code of Conduct](https://git.k8s.io/community/code-of-conduct.md) diff --git a/vendor/sigs.k8s.io/controller-runtime/doc.go b/vendor/sigs.k8s.io/controller-runtime/doc.go new file mode 100644 index 0000000000..d90970796e --- /dev/null +++ b/vendor/sigs.k8s.io/controller-runtime/doc.go @@ -0,0 +1,124 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Package controllerruntime provides tools to construct Kubernetes-style +// controllers that manipulate both Kubernetes CRDs and aggregated/built-in +// Kubernetes APIs. +// +// It defines easy helpers for the common use cases when building CRDs, built +// on top of customizable layers of abstraction. Common cases should be easy, +// and uncommon cases should be possible. In general, controller-runtime tries +// to guide users towards Kubernetes controller best-practices. +// +// Getting Started +// +// The main entrypoint for controller-runtime is this root package, which +// contains all of the common types needed to get started building controllers: +// import ( +// controllers "sigs.k8s.io/controller-runtime" +// ) +// +// The examples in this package walk through a basic controller setup. The +// kubebuilder book (https://book.kubebuilder.io) has some more in-depth +// walkthroughs. +// +// controller-runtime favors structs with sane defaults over constructors, so +// it's fairly common to see structs being used directly in controller-runtime. +// +// Organization +// +// A brief-ish walkthrough of the layout of this library can be found below. Each +// package contains more information about how to use it. +// +// Frequently asked questions about using controller-runtime and designing +// controllers can be found at +// https://github.com/kubernetes-sigs/controller-runtime/blob/master/FAQ.md. +// +// Managers +// +// Every controller and webhook is ultimately run by a Manager (pkg/manager). A +// manager is responsible for running controllers and webhooks, and setting up +// common dependencies (pkg/runtime/inject), like shared caches and clients, as +// well as managing leader election (pkg/leaderelection). Managers are +// generally configured to gracefully shut down controllers on pod termination +// by wiring up a signal handler (pkg/manager/signals). +// +// Controllers +// +// Controllers (pkg/controller) use events (pkg/events) to eventually trigger +// reconcile requests. They may be constructed manually, but are often +// constructed with a Builder (pkg/builder), which eases the wiring of event +// sources (pkg/source), like Kubernetes API object changes, to event handlers +// (pkg/handler), like "enqueue a reconcile request for the object owner". +// Predicates (pkg/predicate) can be used to filter which events actually +// trigger reconciles. There are pre-written utilities for the common cases, and +// interfaces and helpers for advanced cases. +// +// Reconcilers +// +// Controller logic is implemented in terms of Reconcilers (pkg/reconcile). A +// Reconciler implements a function which takes a reconcile Request containing +// the name and namespace of the object to reconcile, reconciles the object, +// and returns a Response or an error indicating whether to requeue for a +// second round of processing. +// +// Clients and Caches +// +// Reconcilers use Clients (pkg/client) to access API objects. The default +// client provided by the manager reads from a local shared cache (pkg/cache) +// and writes directly to the API server, but clients can be constructed that +// only talk to the API server, without a cache. The Cache will auto-populate +// with watched objects, as well as when other structured objects are +// requested. Caches may also have indexes, which can be created via a +// FieldIndexer (pkg/client) obtained from the manager. Indexes can used to +// quickly and easily look up all objects with certain fields set. Reconcilers +// may retrieve event recorders (pkg/recorder) to emit events using the +// manager. +// +// Schemes +// +// Clients, Caches, and many other things in Kubernetes use Schemes +// (pkg/scheme) to associate Go types to Kubernetes API Kinds +// (Group-Version-Kinds, to be specific). +// +// Webhooks +// +// Similarly, webhooks (pkg/webhook/admission) may be implemented directly, but +// are often constructed using a builder (pkg/webhook/admission/builder). They +// are run via a server (pkg/webhook) which is managed by a Manager. +// +// Logging and Metrics +// +// Logging (pkg/log) in controller-runtime is done via structured logs, using a +// log set of interfaces called logr +// (https://godoc.org/github.com/go-logr/logr). While controller-runtime +// provides easy setup for using Zap (https://go.uber.org/zap, pkg/log/zap), +// you can provide any implementation of logr as the base logger for +// controller-runtime. +// +// Metrics (pkg/metrics) provided by controller-runtime are registered into a +// controller-runtime-specific Prometheus metrics registry. The manager can +// serve these by an HTTP endpoint, and additional metrics may be registered to +// this Registry as normal. +// +// Testing +// +// You can easily build integration and unit tests for your controllers and +// webhooks using the test Environment (pkg/envtest). This will automatically +// stand up a copy of etcd and kube-apiserver, and provide the correct options +// to connect to the API server. It's designed to work well with the Ginkgo +// testing framework, but should work with any testing setup. +package controllerruntime diff --git a/vendor/sigs.k8s.io/controller-runtime/go.mod b/vendor/sigs.k8s.io/controller-runtime/go.mod new file mode 100644 index 0000000000..928622f224 --- /dev/null +++ b/vendor/sigs.k8s.io/controller-runtime/go.mod @@ -0,0 +1,50 @@ +module sigs.k8s.io/controller-runtime + +go 1.11 + +require ( + cloud.google.com/go v0.26.0 // indirect + github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973 // indirect + github.com/evanphx/json-patch v4.5.0+incompatible + github.com/go-logr/logr v0.1.0 + github.com/go-logr/zapr v0.1.0 + github.com/gogo/protobuf v1.1.1 // indirect + github.com/golang/groupcache v0.0.0-20180513044358-24b0969c4cb7 // indirect + github.com/google/gofuzz v0.0.0-20170612174753-24818f796faf // indirect + github.com/googleapis/gnostic v0.2.0 // indirect + github.com/hashicorp/golang-lru v0.0.0-20180201235237-0fb14efe8c47 // indirect + github.com/imdario/mergo v0.3.6 // indirect + github.com/json-iterator/go v1.1.5 // indirect + github.com/kr/pretty v0.1.0 // indirect + github.com/matttproud/golang_protobuf_extensions v1.0.1 // indirect + github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect + github.com/modern-go/reflect2 v0.0.0-20180701023420-4b7aa43c6742 // indirect + github.com/onsi/ginkgo v1.6.0 + github.com/onsi/gomega v1.4.2 + github.com/pborman/uuid v0.0.0-20170612153648-e790cca94e6c // indirect + github.com/prometheus/client_golang v0.9.0 + github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910 + github.com/prometheus/common v0.0.0-20180801064454-c7de2306084e // indirect + github.com/prometheus/procfs v0.0.0-20180725123919-05ee40e3a273 // indirect + github.com/spf13/pflag v1.0.2 // indirect + go.uber.org/atomic v1.3.2 // indirect + go.uber.org/multierr v1.1.0 // indirect + go.uber.org/zap v1.9.1 + golang.org/x/crypto v0.0.0-20180820150726-614d502a4dac // indirect + golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be // indirect + golang.org/x/sync v0.0.0-20190423024810-112230192c58 // indirect + golang.org/x/time v0.0.0-20180412165947-fbb02b2291d2 // indirect + gomodules.xyz/jsonpatch/v2 v2.0.0 + google.golang.org/appengine v1.1.0 // indirect + gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127 // indirect + gopkg.in/fsnotify.v1 v1.4.7 + gopkg.in/inf.v0 v0.9.1 // indirect + k8s.io/api v0.0.0-20190409021203-6e4e0e4f393b + k8s.io/apiextensions-apiserver v0.0.0-20190409022649-727a075fdec8 + k8s.io/apimachinery v0.0.0-20190404173353-6a84e37a896d + k8s.io/client-go v11.0.1-0.20190409021438-1a26190bd76a+incompatible + k8s.io/kube-openapi v0.0.0-20180731170545-e3762e86a74c // indirect + k8s.io/utils v0.0.0-20190506122338-8fab8cb257d5 // indirect + sigs.k8s.io/testing_frameworks v0.1.1 + sigs.k8s.io/yaml v1.1.0 +) diff --git a/vendor/sigs.k8s.io/controller-runtime/go.sum b/vendor/sigs.k8s.io/controller-runtime/go.sum new file mode 100644 index 0000000000..e092650230 --- /dev/null +++ b/vendor/sigs.k8s.io/controller-runtime/go.sum @@ -0,0 +1,122 @@ +cloud.google.com/go v0.26.0 h1:e0WKqKTd5BnrG8aKH3J3h+QvEIQtSUcf2n5UZ5ZgLtQ= +cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= +github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973 h1:xJ4a3vCFaGF/jqvzLMYoU8P317H5OQ+Via4RmuPwCS0= +github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q= +github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= +github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/evanphx/json-patch v4.5.0+incompatible h1:ouOWdg56aJriqS0huScTkVXPC5IcNrDCXZ6OoTAWu7M= +github.com/evanphx/json-patch v4.5.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk= +github.com/fsnotify/fsnotify v1.4.7 h1:IXs+QLmnXW2CcXuY+8Mzv/fWEsPGWxqefPtCP5CnV9I= +github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo= +github.com/go-logr/logr v0.1.0 h1:M1Tv3VzNlEHg6uyACnRdtrploV2P7wZqH8BoQMtz0cg= +github.com/go-logr/logr v0.1.0/go.mod h1:ixOQHD9gLJUVQQ2ZOR7zLEifBX6tGkNJF4QyIY7sIas= +github.com/go-logr/zapr v0.1.0 h1:h+WVe9j6HAA01niTJPA/kKH0i7e0rLZBCwauQFcRE54= +github.com/go-logr/zapr v0.1.0/go.mod h1:tabnROwaDl0UNxkVeFRbY8bwB37GwRv0P8lg6aAiEnk= +github.com/gogo/protobuf v1.1.1 h1:72R+M5VuhED/KujmZVcIquuo8mBgX4oVda//DQb3PXo= +github.com/gogo/protobuf v1.1.1/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ= +github.com/golang/groupcache v0.0.0-20180513044358-24b0969c4cb7 h1:u4bArs140e9+AfE52mFHOXVFnOSBJBRlzTHrOPLOIhE= +github.com/golang/groupcache v0.0.0-20180513044358-24b0969c4cb7/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= +github.com/golang/protobuf v1.2.0 h1:P3YflyNX/ehuJFLhxviNdFxQPkGK5cDcApsge1SqnvM= +github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= +github.com/google/gofuzz v0.0.0-20170612174753-24818f796faf h1:+RRA9JqSOZFfKrOeqr2z77+8R2RKyh8PG66dcu1V0ck= +github.com/google/gofuzz v0.0.0-20170612174753-24818f796faf/go.mod h1:HP5RmnzzSNb993RKQDq4+1A4ia9nllfqcQFTQJedwGI= +github.com/googleapis/gnostic v0.2.0 h1:l6N3VoaVzTncYYW+9yOz2LJJammFZGBO13sqgEhpy9g= +github.com/googleapis/gnostic v0.2.0/go.mod h1:sJBsCZ4ayReDTBIg8b9dl28c5xFWyhBTVRp3pOg5EKY= +github.com/hashicorp/golang-lru v0.0.0-20180201235237-0fb14efe8c47 h1:UnszMmmmm5vLwWzDjTFVIkfhvWF1NdrmChl8L2NUDCw= +github.com/hashicorp/golang-lru v0.0.0-20180201235237-0fb14efe8c47/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= +github.com/hpcloud/tail v1.0.0 h1:nfCOvKYfkgYP8hkirhJocXT2+zOD8yUNjXaWfTlyFKI= +github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU= +github.com/imdario/mergo v0.3.6 h1:xTNEAn+kxVO7dTZGu0CegyqKZmoWFI0rF8UxjlB2d28= +github.com/imdario/mergo v0.3.6/go.mod h1:2EnlNZ0deacrJVfApfmtdGgDfMuh/nq6Ok1EcJh5FfA= +github.com/json-iterator/go v1.1.5 h1:gL2yXlmiIo4+t+y32d4WGwOjKGYcGOuyrg46vadswDE= +github.com/json-iterator/go v1.1.5/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU= +github.com/kr/pretty v0.1.0 h1:L/CwN0zerZDmRFUapSPitk6f+Q3+0za1rQkzVuMiMFI= +github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= +github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= +github.com/kr/text v0.1.0 h1:45sCR5RtlFHMR4UwH9sdQ5TC8v0qDQCHnXt+kaKSTVE= +github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= +github.com/matttproud/golang_protobuf_extensions v1.0.1 h1:4hp9jkHxhMHkqkrB3Ix0jegS5sx/RkqARlsWZ6pIwiU= +github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0= +github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd h1:TRLaZ9cD/w8PVh93nsPXa1VrQ6jlwL5oN8l14QlcNfg= +github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= +github.com/modern-go/reflect2 v0.0.0-20180701023420-4b7aa43c6742 h1:Esafd1046DLDQ0W1YjYsBW+p8U2u7vzgW2SQVmlNazg= +github.com/modern-go/reflect2 v0.0.0-20180701023420-4b7aa43c6742/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0= +github.com/onsi/ginkgo v1.6.0 h1:Ix8l273rp3QzYgXSR+c8d1fTG7UPgYkOSELPhiY/YGw= +github.com/onsi/ginkgo v1.6.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= +github.com/onsi/gomega v1.4.2 h1:3mYCb7aPxS/RU7TI1y4rkEn1oKmPRjNJLNEXgw7MH2I= +github.com/onsi/gomega v1.4.2/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY= +github.com/pborman/uuid v0.0.0-20170612153648-e790cca94e6c h1:MUyE44mTvnI5A0xrxIxaMqoWFzPfQvtE2IWUollMDMs= +github.com/pborman/uuid v0.0.0-20170612153648-e790cca94e6c/go.mod h1:VyrYX9gd7irzKovcSS6BIIEwPRkP2Wm2m9ufcdFSJ34= +github.com/pkg/errors v0.8.1 h1:iURUrRGxPUNPdy5/HRSm+Yj6okJ6UtLINN0Q9M4+h3I= +github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= +github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= +github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= +github.com/prometheus/client_golang v0.9.0 h1:tXuTFVHC03mW0D+Ua1Q2d1EAVqLTuggX50V0VLICCzY= +github.com/prometheus/client_golang v0.9.0/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw= +github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910 h1:idejC8f05m9MGOsuEi1ATq9shN03HrxNkD/luQvxCv8= +github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo= +github.com/prometheus/common v0.0.0-20180801064454-c7de2306084e h1:n/3MEhJQjQxrOUCzh1Y3Re6aJUUWRp2M9+Oc3eVn/54= +github.com/prometheus/common v0.0.0-20180801064454-c7de2306084e/go.mod h1:daVV7qP5qjZbuso7PdcryaAu0sAZbrN9i7WWcTMWvro= +github.com/prometheus/procfs v0.0.0-20180725123919-05ee40e3a273 h1:agujYaXJSxSo18YNX3jzl+4G6Bstwt+kqv47GS12uL0= +github.com/prometheus/procfs v0.0.0-20180725123919-05ee40e3a273/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= +github.com/spf13/afero v1.2.2/go.mod h1:9ZxEEn6pIJ8Rxe320qSDBk6AsU0r9pR7Q4OcevTdifk= +github.com/spf13/pflag v1.0.2 h1:Fy0orTDgHdbnzHcsOgfCN4LtHf0ec3wwtiwJqwvf3Gc= +github.com/spf13/pflag v1.0.2/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4= +github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= +github.com/stretchr/testify v1.3.0 h1:TivCn/peBQ7UY8ooIcPgZFpTNSz0Q2U6UrFlUfqbe0Q= +github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= +go.uber.org/atomic v1.3.2 h1:2Oa65PReHzfn29GpvgsYwloV9AVFHPDk8tYxt2c2tr4= +go.uber.org/atomic v1.3.2/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE= +go.uber.org/multierr v1.1.0 h1:HoEmRHQPVSqub6w2z2d2EOVs2fjyFRGyofhKuyDq0QI= +go.uber.org/multierr v1.1.0/go.mod h1:wR5kodmAFQ0UK8QlbwjlSNy0Z68gJhDJUG5sjR94q/0= +go.uber.org/zap v1.9.1 h1:XCJQEf3W6eZaVwhRBof6ImoYGJSITeKWsyeh3HFu/5o= +go.uber.org/zap v1.9.1/go.mod h1:vwi/ZaCAaUcBkycHslxD9B2zi4UTXhF60s6SWpuDF0Q= +golang.org/x/crypto v0.0.0-20180820150726-614d502a4dac h1:7d7lG9fHOLdL6jZPtnV4LpI41SbohIJ1Atq7U991dMg= +golang.org/x/crypto v0.0.0-20180820150726-614d502a4dac/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= +golang.org/x/net v0.0.0-20180906233101-161cd47e91fd h1:nTDtHvHSdCn1m6ITfMRqtOd/9+7a3s8RBNOZ3eYZzJA= +golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be h1:vEDujvNQGv4jgYKudGeI/+DAX4Jffq6hpD55MmoEvKs= +golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= +golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20190423024810-112230192c58 h1:8gQV6CLnAEikrhgkHFbMAEhagSSnXWGV915qUMm9mrU= +golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e h1:o3PsSEY8E4eXWkXrIP9YJALUkVZqzHJT5DOasTyn8Vs= +golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/text v0.3.0 h1:g61tztE5qeGQ89tm6NTjjM9VPIm088od1l6aSorWRWg= +golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= +golang.org/x/time v0.0.0-20180412165947-fbb02b2291d2 h1:+DCIGbF/swA92ohVg0//6X2IVY3KZs6p9mix0ziNYJM= +golang.org/x/time v0.0.0-20180412165947-fbb02b2291d2/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= +gomodules.xyz/jsonpatch/v2 v2.0.0 h1:OyHbl+7IOECpPKfVK42oFr6N7+Y2dR+Jsb/IiDV3hOo= +gomodules.xyz/jsonpatch/v2 v2.0.0/go.mod h1:IhYNNY4jnS53ZnfE4PAmpKtDpTCj1JFXc+3mwe7XcUU= +google.golang.org/appengine v1.1.0 h1:igQkv0AAhEIvTEpD5LIpAfav2eeVO9HBTjvKHVJPRSs= +google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM= +gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127 h1:qIbj1fsPNlZgppZ+VLlY7N33q108Sa+fhmuc+sWQYwY= +gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/fsnotify.v1 v1.4.7 h1:xOHLXZwVvI9hhs+cLKq5+I5onOuwQLhQwiu63xxlHs4= +gopkg.in/fsnotify.v1 v1.4.7/go.mod h1:Tz8NjZHkW78fSQdbUxIjBTcgA1z1m8ZHf0WmKUhAMys= +gopkg.in/inf.v0 v0.9.1 h1:73M5CoZyi3ZLMOyDlQh031Cx6N9NDJ2Vvfl76EDAgDc= +gopkg.in/inf.v0 v0.9.1/go.mod h1:cWUDdTG/fYaXco+Dcufb5Vnc6Gp2YChqWtbxRZE0mXw= +gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7 h1:uRGJdciOHaEIrze2W8Q3AKkepLTh2hOroT7a+7czfdQ= +gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7/go.mod h1:dt/ZhP58zS4L8KSrWDmTeBkI65Dw0HsyUHuEVlX15mw= +gopkg.in/yaml.v2 v2.2.1 h1:mUhvW9EsL+naU5Q3cakzfE91YhliOondGd6ZrsDBHQE= +gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +k8s.io/api v0.0.0-20190409021203-6e4e0e4f393b h1:aBGgKJUM9Hk/3AE8WaZIApnTxG35kbuQba2w+SXqezo= +k8s.io/api v0.0.0-20190409021203-6e4e0e4f393b/go.mod h1:iuAfoD4hCxJ8Onx9kaTIt30j7jUFS00AXQi6QMi99vA= +k8s.io/apiextensions-apiserver v0.0.0-20190409022649-727a075fdec8 h1:q1Qvjzs/iEdXF6A1a8H3AKVFDzJNcJn3nXMs6R6qFtA= +k8s.io/apiextensions-apiserver v0.0.0-20190409022649-727a075fdec8/go.mod h1:IxkesAMoaCRoLrPJdZNZUQp9NfZnzqaVzLhb2VEQzXE= +k8s.io/apimachinery v0.0.0-20190404173353-6a84e37a896d h1:Jmdtdt1ZnoGfWWIIik61Z7nKYgO3J+swQJtPYsP9wHA= +k8s.io/apimachinery v0.0.0-20190404173353-6a84e37a896d/go.mod h1:ccL7Eh7zubPUSh9A3USN90/OzHNSVN6zxzde07TDCL0= +k8s.io/client-go v11.0.1-0.20190409021438-1a26190bd76a+incompatible h1:U5Bt+dab9K8qaUmXINrkXO135kA11/i5Kg1RUydgaMQ= +k8s.io/client-go v11.0.1-0.20190409021438-1a26190bd76a+incompatible/go.mod h1:7vJpHMYJwNQCWgzmNV+VYUl1zCObLyodBc8nIyt8L5s= +k8s.io/klog v0.3.0 h1:0VPpR+sizsiivjIfIAQH/rl8tan6jvWkS7lU+0di3lE= +k8s.io/klog v0.3.0/go.mod h1:Gq+BEi5rUBO/HRz0bTSXDUcqjScdoY3a9IHpCEIOOfk= +k8s.io/kube-openapi v0.0.0-20180731170545-e3762e86a74c h1:3KSCztE7gPitlZmWbNwue/2U0YruD65DqX3INopDAQM= +k8s.io/kube-openapi v0.0.0-20180731170545-e3762e86a74c/go.mod h1:BXM9ceUBTj2QnfH2MK1odQs778ajze1RxcmP6S8RVVc= +k8s.io/utils v0.0.0-20190506122338-8fab8cb257d5 h1:VBM/0P5TWxwk+Nw6Z+lAw3DKgO76g90ETOiA6rfLV1Y= +k8s.io/utils v0.0.0-20190506122338-8fab8cb257d5/go.mod h1:sZAwmy6armz5eXlNoLmJcl4F1QuKu7sr+mFQ0byX7Ew= +sigs.k8s.io/testing_frameworks v0.1.1 h1:cP2l8fkA3O9vekpy5Ks8mmA0NW/F7yBdXf8brkWhVrs= +sigs.k8s.io/testing_frameworks v0.1.1/go.mod h1:VVBKrHmJ6Ekkfz284YKhQePcdycOzNH9qL6ht1zEr/U= +sigs.k8s.io/yaml v1.1.0 h1:4A07+ZFc2wgJwo8YNlQpr1rVlgUDlxXHhPJciaPY5gs= +sigs.k8s.io/yaml v1.1.0/go.mod h1:UJmg0vDUVViEyp3mgSv9WPwZCDxu4rQW1olrI1uml+o= diff --git a/vendor/sigs.k8s.io/controller-runtime/pkg/builder/controller.go b/vendor/sigs.k8s.io/controller-runtime/pkg/builder/controller.go new file mode 100644 index 0000000000..c3f5424688 --- /dev/null +++ b/vendor/sigs.k8s.io/controller-runtime/pkg/builder/controller.go @@ -0,0 +1,215 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package builder + +import ( + "fmt" + "strings" + + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/client-go/rest" + "sigs.k8s.io/controller-runtime/pkg/client/apiutil" + "sigs.k8s.io/controller-runtime/pkg/controller" + "sigs.k8s.io/controller-runtime/pkg/handler" + "sigs.k8s.io/controller-runtime/pkg/manager" + "sigs.k8s.io/controller-runtime/pkg/predicate" + "sigs.k8s.io/controller-runtime/pkg/reconcile" + "sigs.k8s.io/controller-runtime/pkg/source" +) + +// Supporting mocking out functions for testing +var newController = controller.New +var getGvk = apiutil.GVKForObject + +// Builder builds a Controller. +type Builder struct { + apiType runtime.Object + mgr manager.Manager + predicates []predicate.Predicate + managedObjects []runtime.Object + watchRequest []watchRequest + config *rest.Config + ctrl controller.Controller + ctrlOptions controller.Options + name string +} + +// ControllerManagedBy returns a new controller builder that will be started by the provided Manager +func ControllerManagedBy(m manager.Manager) *Builder { + return &Builder{mgr: m} +} + +// ForType defines the type of Object being *reconciled*, and configures the ControllerManagedBy to respond to create / delete / +// update events by *reconciling the object*. +// This is the equivalent of calling +// Watches(&source.Kind{Type: apiType}, &handler.EnqueueRequestForObject{}) +// +// Deprecated: Use For +func (blder *Builder) ForType(apiType runtime.Object) *Builder { + return blder.For(apiType) +} + +// For defines the type of Object being *reconciled*, and configures the ControllerManagedBy to respond to create / delete / +// update events by *reconciling the object*. +// This is the equivalent of calling +// Watches(&source.Kind{Type: apiType}, &handler.EnqueueRequestForObject{}) +func (blder *Builder) For(apiType runtime.Object) *Builder { + blder.apiType = apiType + return blder +} + +// Owns defines types of Objects being *generated* by the ControllerManagedBy, and configures the ControllerManagedBy to respond to +// create / delete / update events by *reconciling the owner object*. This is the equivalent of calling +// Watches(&handler.EnqueueRequestForOwner{&source.Kind{Type: }, &handler.EnqueueRequestForOwner{OwnerType: apiType, IsController: true}) +func (blder *Builder) Owns(apiType runtime.Object) *Builder { + blder.managedObjects = append(blder.managedObjects, apiType) + return blder +} + +type watchRequest struct { + src source.Source + eventhandler handler.EventHandler +} + +// Watches exposes the lower-level ControllerManagedBy Watches functions through the builder. Consider using +// Owns or For instead of Watches directly. +func (blder *Builder) Watches(src source.Source, eventhandler handler.EventHandler) *Builder { + blder.watchRequest = append(blder.watchRequest, watchRequest{src: src, eventhandler: eventhandler}) + return blder +} + +// WithConfig sets the Config to use for configuring clients. Defaults to the in-cluster config or to ~/.kube/config. +// +// Deprecated: Use ControllerManagedBy(Manager) and this isn't needed. +func (blder *Builder) WithConfig(config *rest.Config) *Builder { + blder.config = config + return blder +} + +// WithEventFilter sets the event filters, to filter which create/update/delete/generic events eventually +// trigger reconciliations. For example, filtering on whether the resource version has changed. +// Defaults to the empty list. +func (blder *Builder) WithEventFilter(p predicate.Predicate) *Builder { + blder.predicates = append(blder.predicates, p) + return blder +} + +// WithOptions overrides the controller options use in doController. Defaults to empty. +func (blder *Builder) WithOptions(options controller.Options) *Builder { + blder.ctrlOptions = options + return blder +} + +// Named sets the name of the controller to the given name. The name shows up +// in metrics, among other things, and thus should be a prometheus compatible name +// (underscores and alphanumeric characters only). +// +// By default, controllers are named using the lowercase version of their kind. +func (blder *Builder) Named(name string) *Builder { + blder.name = name + return blder +} + +// Complete builds the Application ControllerManagedBy. +func (blder *Builder) Complete(r reconcile.Reconciler) error { + _, err := blder.Build(r) + return err +} + +// Build builds the Application ControllerManagedBy and returns the Controller it created. +func (blder *Builder) Build(r reconcile.Reconciler) (controller.Controller, error) { + if r == nil { + return nil, fmt.Errorf("must provide a non-nil Reconciler") + } + if blder.mgr == nil { + return nil, fmt.Errorf("must provide a non-nil Manager") + } + + // Set the Config + blder.loadRestConfig() + + // Set the ControllerManagedBy + if err := blder.doController(r); err != nil { + return nil, err + } + + // Set the Watch + if err := blder.doWatch(); err != nil { + return nil, err + } + + return blder.ctrl, nil +} + +func (blder *Builder) doWatch() error { + // Reconcile type + src := &source.Kind{Type: blder.apiType} + hdler := &handler.EnqueueRequestForObject{} + err := blder.ctrl.Watch(src, hdler, blder.predicates...) + if err != nil { + return err + } + + // Watches the managed types + for _, obj := range blder.managedObjects { + src := &source.Kind{Type: obj} + hdler := &handler.EnqueueRequestForOwner{ + OwnerType: blder.apiType, + IsController: true, + } + if err := blder.ctrl.Watch(src, hdler, blder.predicates...); err != nil { + return err + } + } + + // Do the watch requests + for _, w := range blder.watchRequest { + if err := blder.ctrl.Watch(w.src, w.eventhandler, blder.predicates...); err != nil { + return err + } + + } + return nil +} + +func (blder *Builder) loadRestConfig() { + if blder.config == nil { + blder.config = blder.mgr.GetConfig() + } +} + +func (blder *Builder) getControllerName() (string, error) { + if blder.name != "" { + return blder.name, nil + } + gvk, err := getGvk(blder.apiType, blder.mgr.GetScheme()) + if err != nil { + return "", err + } + return strings.ToLower(gvk.Kind), nil +} + +func (blder *Builder) doController(r reconcile.Reconciler) error { + name, err := blder.getControllerName() + if err != nil { + return err + } + ctrlOptions := blder.ctrlOptions + ctrlOptions.Reconciler = r + blder.ctrl, err = newController(name, blder.mgr, ctrlOptions) + return err +} diff --git a/vendor/sigs.k8s.io/controller-runtime/pkg/builder/doc.go b/vendor/sigs.k8s.io/controller-runtime/pkg/builder/doc.go new file mode 100644 index 0000000000..09126576b2 --- /dev/null +++ b/vendor/sigs.k8s.io/controller-runtime/pkg/builder/doc.go @@ -0,0 +1,28 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Package builder provides wraps other controller-runtime libraries and exposes simple +// patterns for building common Controllers. +// +// Projects built with the builder package can trivially be rebased on top of the underlying +// packages if the project requires more customized behavior in the future. +package builder + +import ( + logf "sigs.k8s.io/controller-runtime/pkg/internal/log" +) + +var log = logf.RuntimeLog.WithName("builder") diff --git a/vendor/sigs.k8s.io/controller-runtime/pkg/builder/webhook.go b/vendor/sigs.k8s.io/controller-runtime/pkg/builder/webhook.go new file mode 100644 index 0000000000..ada4ddd239 --- /dev/null +++ b/vendor/sigs.k8s.io/controller-runtime/pkg/builder/webhook.go @@ -0,0 +1,166 @@ +/* +Copyright 2019 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package builder + +import ( + "net/http" + "net/url" + "strings" + + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" + "k8s.io/client-go/rest" + "sigs.k8s.io/controller-runtime/pkg/client/apiutil" + "sigs.k8s.io/controller-runtime/pkg/manager" + "sigs.k8s.io/controller-runtime/pkg/webhook/admission" + "sigs.k8s.io/controller-runtime/pkg/webhook/conversion" +) + +// WebhookBuilder builds a Webhook. +type WebhookBuilder struct { + apiType runtime.Object + gvk schema.GroupVersionKind + mgr manager.Manager + config *rest.Config +} + +func WebhookManagedBy(m manager.Manager) *WebhookBuilder { + return &WebhookBuilder{mgr: m} +} + +// TODO(droot): update the GoDoc for conversion. + +// For takes a runtime.Object which should be a CR. +// If the given object implements the admission.Defaulter interface, a MutatingWebhook will be wired for this type. +// If the given object implements the admission.Validator interface, a ValidatingWebhook will be wired for this type. +func (blder *WebhookBuilder) For(apiType runtime.Object) *WebhookBuilder { + blder.apiType = apiType + return blder +} + +// Complete builds the webhook. +func (blder *WebhookBuilder) Complete() error { + // Set the Config + blder.loadRestConfig() + + // Set the Webhook if needed + return blder.registerWebhooks() +} + +func (blder *WebhookBuilder) loadRestConfig() { + if blder.config == nil { + blder.config = blder.mgr.GetConfig() + } +} + +func (blder *WebhookBuilder) registerWebhooks() error { + // Create webhook(s) for each type + var err error + blder.gvk, err = apiutil.GVKForObject(blder.apiType, blder.mgr.GetScheme()) + if err != nil { + return err + } + + blder.registerDefaultingWebhook() + blder.registerValidatingWebhook() + + err = blder.registerConversionWebhook() + if err != nil { + return err + } + return nil +} + +// registerDefaultingWebhook registers a defaulting webhook if th +func (blder *WebhookBuilder) registerDefaultingWebhook() { + defaulter, isDefaulter := blder.apiType.(admission.Defaulter) + if !isDefaulter { + log.Info("skip registering a mutating webhook, admission.Defaulter interface is not implemented", "GVK", blder.gvk) + return + } + mwh := admission.DefaultingWebhookFor(defaulter) + if mwh != nil { + path := generateMutatePath(blder.gvk) + + // Checking if the path is already registered. + // If so, just skip it. + if !blder.isAlreadyHandled(path) { + log.Info("Registering a mutating webhook", + "GVK", blder.gvk, + "path", path) + blder.mgr.GetWebhookServer().Register(path, mwh) + } + } +} + +func (blder *WebhookBuilder) registerValidatingWebhook() { + validator, isValidator := blder.apiType.(admission.Validator) + if !isValidator { + log.Info("skip registering a validating webhook, admission.Validator interface is not implemented", "GVK", blder.gvk) + return + } + vwh := admission.ValidatingWebhookFor(validator) + if vwh != nil { + path := generateValidatePath(blder.gvk) + + // Checking if the path is already registered. + // If so, just skip it. + if !blder.isAlreadyHandled(path) { + log.Info("Registering a validating webhook", + "GVK", blder.gvk, + "path", path) + blder.mgr.GetWebhookServer().Register(path, vwh) + } + } +} + +func (blder *WebhookBuilder) registerConversionWebhook() error { + ok, err := conversion.IsConvertible(blder.mgr.GetScheme(), blder.apiType) + if err != nil { + log.Error(err, "conversion check failed", "object", blder.apiType) + return err + } + if ok { + if !blder.isAlreadyHandled("/convert") { + blder.mgr.GetWebhookServer().Register("/convert", &conversion.Webhook{}) + } + log.Info("conversion webhook enabled", "object", blder.apiType) + } + + return nil +} + +func (blder *WebhookBuilder) isAlreadyHandled(path string) bool { + if blder.mgr.GetWebhookServer().WebhookMux == nil { + return false + } + h, p := blder.mgr.GetWebhookServer().WebhookMux.Handler(&http.Request{URL: &url.URL{Path: path}}) + if p == path && h != nil { + return true + } + return false +} + +func generateMutatePath(gvk schema.GroupVersionKind) string { + return "/mutate-" + strings.Replace(gvk.Group, ".", "-", -1) + "-" + + gvk.Version + "-" + strings.ToLower(gvk.Kind) +} + +func generateValidatePath(gvk schema.GroupVersionKind) string { + return "/validate-" + strings.Replace(gvk.Group, ".", "-", -1) + "-" + + gvk.Version + "-" + strings.ToLower(gvk.Kind) +} diff --git a/vendor/sigs.k8s.io/controller-runtime/pkg/cache/cache.go b/vendor/sigs.k8s.io/controller-runtime/pkg/cache/cache.go index cb87d8f385..4903ab5601 100644 --- a/vendor/sigs.k8s.io/controller-runtime/pkg/cache/cache.go +++ b/vendor/sigs.k8s.io/controller-runtime/pkg/cache/cache.go @@ -29,30 +29,33 @@ import ( "sigs.k8s.io/controller-runtime/pkg/cache/internal" "sigs.k8s.io/controller-runtime/pkg/client" "sigs.k8s.io/controller-runtime/pkg/client/apiutil" - logf "sigs.k8s.io/controller-runtime/pkg/runtime/log" + logf "sigs.k8s.io/controller-runtime/pkg/internal/log" ) -var log = logf.KBLog.WithName("object-cache") +var log = logf.RuntimeLog.WithName("object-cache") -// Cache implements CacheReader by reading objects from a cache populated by InformersMap +// Cache knows how to load Kubernetes objects, fetch informers to request +// to receive events for Kubernetes objects (at a low-level), +// and add indices to fields on the objects stored in the cache. type Cache interface { - // Cache implements the client CacheReader + // Cache acts as a client to objects stored in the cache. client.Reader - // Cache implements InformersMap + // Cache loads informers and adds field indices. Informers } -// Informers knows how to create or fetch informers for different group-version-kinds. -// It's safe to call GetInformer from multiple threads. +// Informers knows how to create or fetch informers for different +// group-version-kinds, and add indices to those informers. It's safe to call +// GetInformer from multiple threads. type Informers interface { // GetInformer fetches or constructs an informer for the given object that corresponds to a single // API kind and resource. - GetInformer(obj runtime.Object) (toolscache.SharedIndexInformer, error) + GetInformer(obj runtime.Object) (Informer, error) // GetInformerForKind is similar to GetInformer, except that it takes a group-version-kind, instead // of the underlying object. - GetInformerForKind(gvk schema.GroupVersionKind) (toolscache.SharedIndexInformer, error) + GetInformerForKind(gvk schema.GroupVersionKind) (Informer, error) // Start runs all the informers known to this cache until the given channel is closed. // It blocks. @@ -61,12 +64,25 @@ type Informers interface { // WaitForCacheSync waits for all the caches to sync. Returns false if it could not sync a cache. WaitForCacheSync(stop <-chan struct{}) bool - // IndexField adds an index with the given field name on the given object type - // by using the given function to extract the value for that field. If you want - // compatibility with the Kubernetes API server, only return one key, and only use - // fields that the API server supports. Otherwise, you can return multiple keys, - // and "equality" in the field selector means that at least one key matches the value. - IndexField(obj runtime.Object, field string, extractValue client.IndexerFunc) error + // Informers knows how to add indices to the caches (informers) that it manages. + client.FieldIndexer +} + +// Informer - informer allows you interact with the underlying informer +type Informer interface { + // AddEventHandler adds an event handler to the shared informer using the shared informer's resync + // period. Events to a single handler are delivered sequentially, but there is no coordination + // between different handlers. + AddEventHandler(handler toolscache.ResourceEventHandler) + // AddEventHandlerWithResyncPeriod adds an event handler to the shared informer using the + // specified resync period. Events to a single handler are delivered sequentially, but there is + // no coordination between different handlers. + AddEventHandlerWithResyncPeriod(handler toolscache.ResourceEventHandler, resyncPeriod time.Duration) + // AddIndexers adds more indexers to this store. If you call this after you already have data + // in the store, the results are undefined. + AddIndexers(indexers toolscache.Indexers) error + //HasSynced return true if the informers underlying store has synced + HasSynced() bool } // Options are the optional arguments for creating a new InformersMap object @@ -87,7 +103,7 @@ type Options struct { var defaultResyncTime = 10 * time.Hour -// New initializes and returns a new Cache +// New initializes and returns a new Cache. func New(config *rest.Config, opts Options) (Cache, error) { opts, err := defaultOpts(config, opts) if err != nil { diff --git a/vendor/sigs.k8s.io/controller-runtime/pkg/cache/doc.go b/vendor/sigs.k8s.io/controller-runtime/pkg/cache/doc.go new file mode 100644 index 0000000000..e1742ac0f3 --- /dev/null +++ b/vendor/sigs.k8s.io/controller-runtime/pkg/cache/doc.go @@ -0,0 +1,19 @@ +/* +Copyright 2019 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Package cache provides object caches that act as caching client.Reader +// instances and help drive Kubernetes-object-based event handlers. +package cache diff --git a/vendor/sigs.k8s.io/controller-runtime/pkg/cache/informer_cache.go b/vendor/sigs.k8s.io/controller-runtime/pkg/cache/informer_cache.go index eedfcc6cf0..b906a2e08f 100644 --- a/vendor/sigs.k8s.io/controller-runtime/pkg/cache/informer_cache.go +++ b/vendor/sigs.k8s.io/controller-runtime/pkg/cache/informer_cache.go @@ -58,7 +58,7 @@ func (ip *informerCache) Get(ctx context.Context, key client.ObjectKey, out runt } // List implements Reader -func (ip *informerCache) List(ctx context.Context, opts *client.ListOptions, out runtime.Object) error { +func (ip *informerCache) List(ctx context.Context, out runtime.Object, opts ...client.ListOption) error { gvk, err := apiutil.GVKForObject(out, ip.Scheme) if err != nil { return err @@ -95,11 +95,11 @@ func (ip *informerCache) List(ctx context.Context, opts *client.ListOptions, out return err } - return cache.Reader.List(ctx, opts, out) + return cache.Reader.List(ctx, out, opts...) } // GetInformerForKind returns the informer for the GroupVersionKind -func (ip *informerCache) GetInformerForKind(gvk schema.GroupVersionKind) (cache.SharedIndexInformer, error) { +func (ip *informerCache) GetInformerForKind(gvk schema.GroupVersionKind) (Informer, error) { // Map the gvk to an object obj, err := ip.Scheme.New(gvk) if err != nil { @@ -113,7 +113,7 @@ func (ip *informerCache) GetInformerForKind(gvk schema.GroupVersionKind) (cache. } // GetInformer returns the informer for the obj -func (ip *informerCache) GetInformer(obj runtime.Object) (cache.SharedIndexInformer, error) { +func (ip *informerCache) GetInformer(obj runtime.Object) (Informer, error) { gvk, err := apiutil.GVKForObject(obj, ip.Scheme) if err != nil { return nil, err @@ -135,10 +135,10 @@ func (ip *informerCache) IndexField(obj runtime.Object, field string, extractVal if err != nil { return err } - return indexByField(informer.GetIndexer(), field, extractValue) + return indexByField(informer, field, extractValue) } -func indexByField(indexer cache.Indexer, field string, extractor client.IndexerFunc) error { +func indexByField(indexer Informer, field string, extractor client.IndexerFunc) error { indexFunc := func(objRaw interface{}) ([]string, error) { // TODO(directxman12): check if this is the correct type? obj, isObj := objRaw.(runtime.Object) diff --git a/vendor/sigs.k8s.io/controller-runtime/pkg/cache/internal/cache_reader.go b/vendor/sigs.k8s.io/controller-runtime/pkg/cache/internal/cache_reader.go index a5bac80385..a9b7ae347c 100644 --- a/vendor/sigs.k8s.io/controller-runtime/pkg/cache/internal/cache_reader.go +++ b/vendor/sigs.k8s.io/controller-runtime/pkg/cache/internal/cache_reader.go @@ -88,23 +88,26 @@ func (c *CacheReader) Get(_ context.Context, key client.ObjectKey, out runtime.O } // List lists items out of the indexer and writes them to out -func (c *CacheReader) List(_ context.Context, opts *client.ListOptions, out runtime.Object) error { +func (c *CacheReader) List(_ context.Context, out runtime.Object, opts ...client.ListOption) error { var objs []interface{} var err error - if opts != nil && opts.FieldSelector != nil { + listOpts := client.ListOptions{} + listOpts.ApplyOptions(opts) + + if listOpts.FieldSelector != nil { // TODO(directxman12): support more complicated field selectors by - // combining multiple indicies, GetIndexers, etc - field, val, requiresExact := requiresExactMatch(opts.FieldSelector) + // combining multiple indices, GetIndexers, etc + field, val, requiresExact := requiresExactMatch(listOpts.FieldSelector) if !requiresExact { return fmt.Errorf("non-exact field matches are not supported by the cache") } // list all objects by the field selector. If this is namespaced and we have one, ask for the // namespaced index key. Otherwise, ask for the non-namespaced variant by using the fake "all namespaces" // namespace. - objs, err = c.indexer.ByIndex(FieldIndexName(field), KeyToNamespacedKey(opts.Namespace, val)) - } else if opts != nil && opts.Namespace != "" { - objs, err = c.indexer.ByIndex(cache.NamespaceIndex, opts.Namespace) + objs, err = c.indexer.ByIndex(FieldIndexName(field), KeyToNamespacedKey(listOpts.Namespace, val)) + } else if listOpts.Namespace != "" { + objs, err = c.indexer.ByIndex(cache.NamespaceIndex, listOpts.Namespace) } else { objs = c.indexer.List() } @@ -112,34 +115,25 @@ func (c *CacheReader) List(_ context.Context, opts *client.ListOptions, out runt return err } var labelSel labels.Selector - if opts != nil && opts.LabelSelector != nil { - labelSel = opts.LabelSelector - } - - filteredItems, err := c.filterListItems(objs, labelSel) - if err != nil { - return err + if listOpts.LabelSelector != nil { + labelSel = listOpts.LabelSelector } - return apimeta.SetList(out, filteredItems) -} - -func (c *CacheReader) filterListItems(objs []interface{}, labelSel labels.Selector) ([]runtime.Object, error) { runtimeObjs := make([]runtime.Object, 0, len(objs)) for _, item := range objs { obj, isObj := item.(runtime.Object) if !isObj { - return nil, fmt.Errorf("cache contained %T, which is not an Object", obj) + return fmt.Errorf("cache contained %T, which is not an Object", obj) } - runtimeObjs = append(runtimeObjs, obj) + outObj := obj.DeepCopyObject() + outObj.GetObjectKind().SetGroupVersionKind(c.groupVersionKind) + runtimeObjs = append(runtimeObjs, outObj) } - filteredItems, err := objectutil.FilterWithLabels(runtimeObjs, labelSel) if err != nil { - return nil, err + return err } - - return filteredItems, nil + return apimeta.SetList(out, filteredItems) } // objectKeyToStorageKey converts an object key to store key. diff --git a/vendor/sigs.k8s.io/controller-runtime/pkg/cache/internal/informers_map.go b/vendor/sigs.k8s.io/controller-runtime/pkg/cache/internal/informers_map.go index 9dea36966b..b2787adfc8 100644 --- a/vendor/sigs.k8s.io/controller-runtime/pkg/cache/internal/informers_map.go +++ b/vendor/sigs.k8s.io/controller-runtime/pkg/cache/internal/informers_map.go @@ -147,68 +147,63 @@ func (ip *specificInformersMap) HasSyncedFuncs() []cache.InformerSynced { // the Informer from the map. func (ip *specificInformersMap) Get(gvk schema.GroupVersionKind, obj runtime.Object) (*MapEntry, error) { // Return the informer if it is found - i, ok := func() (*MapEntry, bool) { + i, started, ok := func() (*MapEntry, bool, bool) { ip.mu.RLock() defer ip.mu.RUnlock() i, ok := ip.informersByGVK[gvk] - return i, ok + return i, ip.started, ok }() - if ok { - return i, nil - } - - // Do the mutex part in its own function so we can use defer without blocking pieces that don't - // need to be locked - var sync bool - i, err := func() (*MapEntry, error) { - ip.mu.Lock() - defer ip.mu.Unlock() - // Check the cache to see if we already have an Informer. If we do, return the Informer. - // This is for the case where 2 routines tried to get the informer when it wasn't in the map - // so neither returned early, but the first one created it. - var ok bool - i, ok := ip.informersByGVK[gvk] - if ok { - return i, nil - } - - // Create a NewSharedIndexInformer and add it to the map. - var lw *cache.ListWatch - lw, err := ip.createListWatcher(gvk, ip) - if err != nil { + if !ok { + var err error + if i, started, err = ip.addInformerToMap(gvk, obj); err != nil { return nil, err } - ni := cache.NewSharedIndexInformer(lw, obj, ip.resync, cache.Indexers{ - cache.NamespaceIndex: cache.MetaNamespaceIndexFunc, - }) - i = &MapEntry{ - Informer: ni, - Reader: CacheReader{indexer: ni.GetIndexer(), groupVersionKind: gvk}, - } - ip.informersByGVK[gvk] = i - - // Start the Informer if need by - // TODO(seans): write thorough tests and document what happens here - can you add indexers? - // can you add eventhandlers? - if ip.started { - sync = true - go i.Informer.Run(ip.stop) - } - return i, nil - }() - if err != nil { - return nil, err } - if sync { + if started && !i.Informer.HasSynced() { // Wait for it to sync before returning the Informer so that folks don't read from a stale cache. if !cache.WaitForCacheSync(ip.stop, i.Informer.HasSynced) { return nil, fmt.Errorf("failed waiting for %T Informer to sync", obj) } } - return i, err + return i, nil +} + +func (ip *specificInformersMap) addInformerToMap(gvk schema.GroupVersionKind, obj runtime.Object) (*MapEntry, bool, error) { + ip.mu.Lock() + defer ip.mu.Unlock() + + // Check the cache to see if we already have an Informer. If we do, return the Informer. + // This is for the case where 2 routines tried to get the informer when it wasn't in the map + // so neither returned early, but the first one created it. + if i, ok := ip.informersByGVK[gvk]; ok { + return i, ip.started, nil + } + + // Create a NewSharedIndexInformer and add it to the map. + var lw *cache.ListWatch + lw, err := ip.createListWatcher(gvk, ip) + if err != nil { + return nil, false, err + } + ni := cache.NewSharedIndexInformer(lw, obj, ip.resync, cache.Indexers{ + cache.NamespaceIndex: cache.MetaNamespaceIndexFunc, + }) + i := &MapEntry{ + Informer: ni, + Reader: CacheReader{indexer: ni.GetIndexer(), groupVersionKind: gvk}, + } + ip.informersByGVK[gvk] = i + + // Start the Informer if need by + // TODO(seans): write thorough tests and document what happens here - can you add indexers? + // can you add eventhandlers? + if ip.started { + go i.Informer.Run(ip.stop) + } + return i, ip.started, nil } // newListWatch returns a new ListWatch object that can be used to create a SharedIndexInformer. diff --git a/vendor/sigs.k8s.io/controller-runtime/pkg/cache/multi_namespace_cache.go b/vendor/sigs.k8s.io/controller-runtime/pkg/cache/multi_namespace_cache.go new file mode 100644 index 0000000000..dc6e159e35 --- /dev/null +++ b/vendor/sigs.k8s.io/controller-runtime/pkg/cache/multi_namespace_cache.go @@ -0,0 +1,220 @@ +/* +Copyright 2019 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package cache + +import ( + "context" + "fmt" + "time" + + corev1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/api/meta" + apimeta "k8s.io/apimachinery/pkg/api/meta" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" + "k8s.io/client-go/rest" + toolscache "k8s.io/client-go/tools/cache" + "sigs.k8s.io/controller-runtime/pkg/client" +) + +// NewCacheFunc - Function for creating a new cache from the options and a rest config +type NewCacheFunc func(config *rest.Config, opts Options) (Cache, error) + +// MultiNamespacedCacheBuilder - Builder function to create a new multi-namespaced cache. +// This will scope the cache to a list of namespaces. Listing for all namespaces +// will list for all the namespaces that this knows about. +func MultiNamespacedCacheBuilder(namespaces []string) NewCacheFunc { + return func(config *rest.Config, opts Options) (Cache, error) { + opts, err := defaultOpts(config, opts) + if err != nil { + return nil, err + } + caches := map[string]Cache{} + for _, ns := range namespaces { + opts.Namespace = ns + c, err := New(config, opts) + if err != nil { + return nil, err + } + caches[ns] = c + } + return &multiNamespaceCache{namespaceToCache: caches, Scheme: opts.Scheme}, nil + } +} + +// multiNamespaceCache knows how to handle multiple namespaced caches +// Use this feature when scoping permissions for your +// operator to a list of namespaces instead of watching every namespace +// in the cluster. +type multiNamespaceCache struct { + namespaceToCache map[string]Cache + Scheme *runtime.Scheme +} + +var _ Cache = &multiNamespaceCache{} + +// Methods for multiNamespaceCache to conform to the Informers interface +func (c *multiNamespaceCache) GetInformer(obj runtime.Object) (Informer, error) { + informers := map[string]Informer{} + for ns, cache := range c.namespaceToCache { + informer, err := cache.GetInformer(obj) + if err != nil { + return nil, err + } + informers[ns] = informer + } + return &multiNamespaceInformer{namespaceToInformer: informers}, nil +} + +func (c *multiNamespaceCache) GetInformerForKind(gvk schema.GroupVersionKind) (Informer, error) { + informers := map[string]Informer{} + for ns, cache := range c.namespaceToCache { + informer, err := cache.GetInformerForKind(gvk) + if err != nil { + return nil, err + } + informers[ns] = informer + } + return &multiNamespaceInformer{namespaceToInformer: informers}, nil +} + +func (c *multiNamespaceCache) Start(stopCh <-chan struct{}) error { + for ns, cache := range c.namespaceToCache { + go func(ns string, cache Cache) { + err := cache.Start(stopCh) + if err != nil { + log.Error(err, "multinamespace cache failed to start namespaced informer", "namespace", ns) + } + }(ns, cache) + } + <-stopCh + return nil +} + +func (c *multiNamespaceCache) WaitForCacheSync(stop <-chan struct{}) bool { + synced := true + for _, cache := range c.namespaceToCache { + if s := cache.WaitForCacheSync(stop); !s { + synced = s + } + } + return synced +} + +func (c *multiNamespaceCache) IndexField(obj runtime.Object, field string, extractValue client.IndexerFunc) error { + for _, cache := range c.namespaceToCache { + if err := cache.IndexField(obj, field, extractValue); err != nil { + return err + } + } + return nil +} + +func (c *multiNamespaceCache) Get(ctx context.Context, key client.ObjectKey, obj runtime.Object) error { + cache, ok := c.namespaceToCache[key.Namespace] + if !ok { + return fmt.Errorf("unable to get: %v because of unknown namespace for the cache", key) + } + return cache.Get(ctx, key, obj) +} + +// List multi namespace cache will get all the objects in the namespaces that the cache is watching if asked for all namespaces. +func (c *multiNamespaceCache) List(ctx context.Context, list runtime.Object, opts ...client.ListOption) error { + listOpts := client.ListOptions{} + listOpts.ApplyOptions(opts) + if listOpts.Namespace != corev1.NamespaceAll { + cache, ok := c.namespaceToCache[listOpts.Namespace] + if !ok { + return fmt.Errorf("unable to get: %v because of unknown namespace for the cache", listOpts.Namespace) + } + return cache.List(ctx, list, opts...) + } + + listAccessor, err := meta.ListAccessor(list) + if err != nil { + return err + } + + allItems, err := apimeta.ExtractList(list) + if err != nil { + return err + } + var resourceVersion string + for _, cache := range c.namespaceToCache { + listObj := list.DeepCopyObject() + err = cache.List(ctx, listObj, opts...) + if err != nil { + return err + } + items, err := apimeta.ExtractList(listObj) + if err != nil { + return err + } + accessor, err := meta.ListAccessor(listObj) + if err != nil { + return fmt.Errorf("object: %T must be a list type", list) + } + allItems = append(allItems, items...) + // The last list call should have the most correct resource version. + resourceVersion = accessor.GetResourceVersion() + } + listAccessor.SetResourceVersion(resourceVersion) + + return apimeta.SetList(list, allItems) +} + +// multiNamespaceInformer knows how to handle interacting with the underlying informer across multiple namespaces +type multiNamespaceInformer struct { + namespaceToInformer map[string]Informer +} + +var _ Informer = &multiNamespaceInformer{} + +// AddEventHandler adds the handler to each namespaced informer +func (i *multiNamespaceInformer) AddEventHandler(handler toolscache.ResourceEventHandler) { + for _, informer := range i.namespaceToInformer { + informer.AddEventHandler(handler) + } +} + +// AddEventHandlerWithResyncPeriod adds the handler with a resync period to each namespaced informer +func (i *multiNamespaceInformer) AddEventHandlerWithResyncPeriod(handler toolscache.ResourceEventHandler, resyncPeriod time.Duration) { + for _, informer := range i.namespaceToInformer { + informer.AddEventHandlerWithResyncPeriod(handler, resyncPeriod) + } +} + +// AddIndexers adds the indexer for each namespaced informer +func (i *multiNamespaceInformer) AddIndexers(indexers toolscache.Indexers) error { + for _, informer := range i.namespaceToInformer { + err := informer.AddIndexers(indexers) + if err != nil { + return err + } + } + return nil +} + +// HasSynced checks if each namespaced informer has synced +func (i *multiNamespaceInformer) HasSynced() bool { + for _, informer := range i.namespaceToInformer { + if ok := informer.HasSynced(); !ok { + return ok + } + } + return true +} diff --git a/vendor/sigs.k8s.io/controller-runtime/pkg/client/apiutil/apimachinery.go b/vendor/sigs.k8s.io/controller-runtime/pkg/client/apiutil/apimachinery.go index 614d454f1f..cf775c3dc8 100644 --- a/vendor/sigs.k8s.io/controller-runtime/pkg/client/apiutil/apimachinery.go +++ b/vendor/sigs.k8s.io/controller-runtime/pkg/client/apiutil/apimachinery.go @@ -14,6 +14,9 @@ See the License for the specific language governing permissions and limitations under the License. */ +// Package apiutil contains utilities for working with raw Kubernetes +// API machinery, such as creating RESTMappers and raw REST clients, +// and extracting the GVK of an object. package apiutil import ( @@ -63,10 +66,13 @@ func GVKForObject(obj runtime.Object, scheme *runtime.Scheme) (schema.GroupVersi } // RESTClientForGVK constructs a new rest.Interface capable of accessing the resource associated -// with the given GroupVersionKind. +// with the given GroupVersionKind. The REST client will be configured to use the negotiated serializer from +// baseConfig, if set, otherwise a default serializer will be set. func RESTClientForGVK(gvk schema.GroupVersionKind, baseConfig *rest.Config, codecs serializer.CodecFactory) (rest.Interface, error) { cfg := createRestConfig(gvk, baseConfig) - cfg.NegotiatedSerializer = serializer.DirectCodecFactory{CodecFactory: codecs} + if cfg.NegotiatedSerializer == nil { + cfg.NegotiatedSerializer = serializer.DirectCodecFactory{CodecFactory: codecs} + } return rest.RESTClientFor(cfg) } diff --git a/vendor/sigs.k8s.io/controller-runtime/pkg/client/client.go b/vendor/sigs.k8s.io/controller-runtime/pkg/client/client.go index 05b9eba2b4..8be5619410 100644 --- a/vendor/sigs.k8s.io/controller-runtime/pkg/client/client.go +++ b/vendor/sigs.k8s.io/controller-runtime/pkg/client/client.go @@ -24,6 +24,7 @@ import ( "k8s.io/apimachinery/pkg/api/meta" "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" "k8s.io/apimachinery/pkg/runtime/serializer" "k8s.io/client-go/dynamic" "k8s.io/client-go/kubernetes/scheme" @@ -41,6 +42,15 @@ type Options struct { } // New returns a new Client using the provided config and Options. +// The returned client reads *and* writes directly from the server +// (it doesn't use object caches). It understands how to work with +// normal types (both custom resources and aggregated/built-in resources), +// as well as unstructured types. +// +// In the case of normal types, the scheme will be used to look up the +// corresponding group, version, and kind for the given type. In the +// case of unstructured types, the group, version, and kind will be extracted +// from the corresponding fields on the object. func New(config *rest.Config, options Options) (Client, error) { if config == nil { return nil, fmt.Errorf("must provide non-nil rest.Config to client.New") @@ -94,26 +104,37 @@ type client struct { unstructuredClient unstructuredClient } +// resetGroupVersionKind is a helper function to restore and preserve GroupVersionKind on an object. +// TODO(vincepri): Remove this function and its calls once controller-runtime dependencies are upgraded to 1.15. +func (c *client) resetGroupVersionKind(obj runtime.Object, gvk schema.GroupVersionKind) { + if gvk != schema.EmptyObjectKind.GroupVersionKind() { + if v, ok := obj.(schema.ObjectKind); ok { + v.SetGroupVersionKind(gvk) + } + } +} + // Create implements client.Client -func (c *client) Create(ctx context.Context, obj runtime.Object) error { +func (c *client) Create(ctx context.Context, obj runtime.Object, opts ...CreateOption) error { _, ok := obj.(*unstructured.Unstructured) if ok { - return c.unstructuredClient.Create(ctx, obj) + return c.unstructuredClient.Create(ctx, obj, opts...) } - return c.typedClient.Create(ctx, obj) + return c.typedClient.Create(ctx, obj, opts...) } // Update implements client.Client -func (c *client) Update(ctx context.Context, obj runtime.Object) error { +func (c *client) Update(ctx context.Context, obj runtime.Object, opts ...UpdateOption) error { + defer c.resetGroupVersionKind(obj, obj.GetObjectKind().GroupVersionKind()) _, ok := obj.(*unstructured.Unstructured) if ok { - return c.unstructuredClient.Update(ctx, obj) + return c.unstructuredClient.Update(ctx, obj, opts...) } - return c.typedClient.Update(ctx, obj) + return c.typedClient.Update(ctx, obj, opts...) } // Delete implements client.Client -func (c *client) Delete(ctx context.Context, obj runtime.Object, opts ...DeleteOptionFunc) error { +func (c *client) Delete(ctx context.Context, obj runtime.Object, opts ...DeleteOption) error { _, ok := obj.(*unstructured.Unstructured) if ok { return c.unstructuredClient.Delete(ctx, obj, opts...) @@ -121,6 +142,25 @@ func (c *client) Delete(ctx context.Context, obj runtime.Object, opts ...DeleteO return c.typedClient.Delete(ctx, obj, opts...) } +// DeleteAllOf implements client.Client +func (c *client) DeleteAllOf(ctx context.Context, obj runtime.Object, opts ...DeleteAllOfOption) error { + _, ok := obj.(*unstructured.Unstructured) + if ok { + return c.unstructuredClient.DeleteAllOf(ctx, obj, opts...) + } + return c.typedClient.DeleteAllOf(ctx, obj, opts...) +} + +// Patch implements client.Client +func (c *client) Patch(ctx context.Context, obj runtime.Object, patch Patch, opts ...PatchOption) error { + defer c.resetGroupVersionKind(obj, obj.GetObjectKind().GroupVersionKind()) + _, ok := obj.(*unstructured.Unstructured) + if ok { + return c.unstructuredClient.Patch(ctx, obj, patch, opts...) + } + return c.typedClient.Patch(ctx, obj, patch, opts...) +} + // Get implements client.Client func (c *client) Get(ctx context.Context, key ObjectKey, obj runtime.Object) error { _, ok := obj.(*unstructured.Unstructured) @@ -131,12 +171,12 @@ func (c *client) Get(ctx context.Context, key ObjectKey, obj runtime.Object) err } // List implements client.Client -func (c *client) List(ctx context.Context, opts *ListOptions, obj runtime.Object) error { +func (c *client) List(ctx context.Context, obj runtime.Object, opts ...ListOption) error { _, ok := obj.(*unstructured.UnstructuredList) if ok { - return c.unstructuredClient.List(ctx, opts, obj) + return c.unstructuredClient.List(ctx, obj, opts...) } - return c.typedClient.List(ctx, opts, obj) + return c.typedClient.List(ctx, obj, opts...) } // Status implements client.StatusClient @@ -153,10 +193,21 @@ type statusWriter struct { var _ StatusWriter = &statusWriter{} // Update implements client.StatusWriter -func (sw *statusWriter) Update(ctx context.Context, obj runtime.Object) error { +func (sw *statusWriter) Update(ctx context.Context, obj runtime.Object, opts ...UpdateOption) error { + defer sw.client.resetGroupVersionKind(obj, obj.GetObjectKind().GroupVersionKind()) + _, ok := obj.(*unstructured.Unstructured) + if ok { + return sw.client.unstructuredClient.UpdateStatus(ctx, obj, opts...) + } + return sw.client.typedClient.UpdateStatus(ctx, obj, opts...) +} + +// Patch implements client.Client +func (sw *statusWriter) Patch(ctx context.Context, obj runtime.Object, patch Patch, opts ...PatchOption) error { + defer sw.client.resetGroupVersionKind(obj, obj.GetObjectKind().GroupVersionKind()) _, ok := obj.(*unstructured.Unstructured) if ok { - return sw.client.unstructuredClient.UpdateStatus(ctx, obj) + return sw.client.unstructuredClient.PatchStatus(ctx, obj, patch, opts...) } - return sw.client.typedClient.UpdateStatus(ctx, obj) + return sw.client.typedClient.PatchStatus(ctx, obj, patch, opts...) } diff --git a/vendor/sigs.k8s.io/controller-runtime/pkg/client/client_cache.go b/vendor/sigs.k8s.io/controller-runtime/pkg/client/client_cache.go index d6452ab62e..2a1ff05d50 100644 --- a/vendor/sigs.k8s.io/controller-runtime/pkg/client/client_cache.go +++ b/vendor/sigs.k8s.io/controller-runtime/pkg/client/client_cache.go @@ -22,7 +22,7 @@ import ( "sync" "k8s.io/apimachinery/pkg/api/meta" - "k8s.io/apimachinery/pkg/apis/meta/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/runtime/schema" "k8s.io/apimachinery/pkg/runtime/serializer" @@ -141,5 +141,5 @@ type objMeta struct { *resourceMeta // Object contains meta data for the object instance - v1.Object + metav1.Object } diff --git a/vendor/sigs.k8s.io/controller-runtime/pkg/client/config/config.go b/vendor/sigs.k8s.io/controller-runtime/pkg/client/config/config.go index a57d48767f..25cd32ca18 100644 --- a/vendor/sigs.k8s.io/controller-runtime/pkg/client/config/config.go +++ b/vendor/sigs.k8s.io/controller-runtime/pkg/client/config/config.go @@ -25,12 +25,13 @@ import ( "k8s.io/client-go/rest" "k8s.io/client-go/tools/clientcmd" - logf "sigs.k8s.io/controller-runtime/pkg/runtime/log" + clientcmdapi "k8s.io/client-go/tools/clientcmd/api" + logf "sigs.k8s.io/controller-runtime/pkg/internal/log" ) var ( - kubeconfig, masterURL string - log = logf.KBLog.WithName("client").WithName("config") + kubeconfig, apiServerURL string + log = logf.RuntimeLog.WithName("client").WithName("config") ) func init() { @@ -38,15 +39,19 @@ func init() { flag.StringVar(&kubeconfig, "kubeconfig", "", "Paths to a kubeconfig. Only required if out-of-cluster.") - flag.StringVar(&masterURL, "master", "", - "The address of the Kubernetes API server. Overrides any value in kubeconfig. "+ + // This flag is deprecated, it'll be removed in a future iteration, please switch to --kubeconfig. + flag.StringVar(&apiServerURL, "master", "", + "(Deprecated: switch to `--kubeconfig`) The address of the Kubernetes API server. Overrides any value in kubeconfig. "+ "Only required if out-of-cluster.") } -// GetConfig creates a *rest.Config for talking to a Kubernetes apiserver. +// GetConfig creates a *rest.Config for talking to a Kubernetes API server. // If --kubeconfig is set, will use the kubeconfig file at that location. Otherwise will assume running // in cluster and use the cluster provided kubeconfig. // +// It also applies saner defaults for QPS and burst based on the Kubernetes +// controller manager defaults (20 QPS, 30 burst) +// // Config precedence // // * --kubeconfig flag pointing at a file @@ -57,13 +62,49 @@ func init() { // // * $HOME/.kube/config if exists func GetConfig() (*rest.Config, error) { + return GetConfigWithContext("") +} + +// GetConfigWithContext creates a *rest.Config for talking to a Kubernetes API server with a specific context. +// If --kubeconfig is set, will use the kubeconfig file at that location. Otherwise will assume running +// in cluster and use the cluster provided kubeconfig. +// +// It also applies saner defaults for QPS and burst based on the Kubernetes +// controller manager defaults (20 QPS, 30 burst) +// +// Config precedence +// +// * --kubeconfig flag pointing at a file +// +// * KUBECONFIG environment variable pointing at a file +// +// * In-cluster config if running in cluster +// +// * $HOME/.kube/config if exists +func GetConfigWithContext(context string) (*rest.Config, error) { + cfg, err := loadConfig(context) + if err != nil { + return nil, err + } + + if cfg.QPS == 0.0 { + cfg.QPS = 20.0 + cfg.Burst = 30.0 + } + + return cfg, nil +} + +// loadConfig loads a REST Config as per the rules specified in GetConfig +func loadConfig(context string) (*rest.Config, error) { + // If a flag is specified with the config location, use that if len(kubeconfig) > 0 { - return clientcmd.BuildConfigFromFlags(masterURL, kubeconfig) + return loadConfigWithContext(apiServerURL, kubeconfig, context) } - // If an env variable is specified with the config locaiton, use that + // If an env variable is specified with the config location, use that if len(os.Getenv("KUBECONFIG")) > 0 { - return clientcmd.BuildConfigFromFlags(masterURL, os.Getenv("KUBECONFIG")) + return loadConfigWithContext(apiServerURL, os.Getenv("KUBECONFIG"), context) } // If no explicit location, try the in-cluster config if c, err := rest.InClusterConfig(); err == nil { @@ -71,8 +112,8 @@ func GetConfig() (*rest.Config, error) { } // If no in-cluster config, try the default location in the user's home directory if usr, err := user.Current(); err == nil { - if c, err := clientcmd.BuildConfigFromFlags( - "", filepath.Join(usr.HomeDir, ".kube", "config")); err == nil { + if c, err := loadConfigWithContext(apiServerURL, filepath.Join(usr.HomeDir, ".kube", "config"), + context); err == nil { return c, nil } } @@ -80,6 +121,17 @@ func GetConfig() (*rest.Config, error) { return nil, fmt.Errorf("could not locate a kubeconfig") } +func loadConfigWithContext(apiServerURL, kubeconfig, context string) (*rest.Config, error) { + return clientcmd.NewNonInteractiveDeferredLoadingClientConfig( + &clientcmd.ClientConfigLoadingRules{ExplicitPath: kubeconfig}, + &clientcmd.ConfigOverrides{ + ClusterInfo: clientcmdapi.Cluster{ + Server: apiServerURL, + }, + CurrentContext: context, + }).ClientConfig() +} + // GetConfigOrDie creates a *rest.Config for talking to a Kubernetes apiserver. // If --kubeconfig is set, will use the kubeconfig file at that location. Otherwise will assume running // in cluster and use the cluster provided kubeconfig. diff --git a/vendor/sigs.k8s.io/controller-runtime/pkg/client/config/doc.go b/vendor/sigs.k8s.io/controller-runtime/pkg/client/config/doc.go index 3918958d27..796c9cf590 100644 --- a/vendor/sigs.k8s.io/controller-runtime/pkg/client/config/doc.go +++ b/vendor/sigs.k8s.io/controller-runtime/pkg/client/config/doc.go @@ -14,5 +14,5 @@ See the License for the specific language governing permissions and limitations under the License. */ -// Package config contains libraries for initializing rest configs for talking to the Kubernetes API +// Package config contains libraries for initializing REST configs for talking to the Kubernetes API package config diff --git a/vendor/sigs.k8s.io/controller-runtime/pkg/client/doc.go b/vendor/sigs.k8s.io/controller-runtime/pkg/client/doc.go new file mode 100644 index 0000000000..2965e5fa94 --- /dev/null +++ b/vendor/sigs.k8s.io/controller-runtime/pkg/client/doc.go @@ -0,0 +1,49 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Package client contains functionality for interacting with Kubernetes API +// servers. +// +// Clients +// +// Clients are split into two interfaces -- Readers and Writers. Readers +// get and list, while writers create, update, and delete. +// +// The New function can be used to create a new client that talks directly +// to the API server. +// +// A common pattern in Kubernetes to read from a cache and write to the API +// server. This pattern is covered by the DelegatingClient type, which can +// be used to have a client whose Reader is different from the Writer. +// +// Options +// +// Many client operations in Kubernetes support options. These options are +// represented as variadic arguments at the end of a given method call. +// For instance, to use a label selector on list, you can call +// err := someReader.List(context.Background(), &podList, client.MatchingLabels{"somelabel": "someval"}) +// +// Indexing +// +// Indexes may be added to caches using a FieldIndexer. This allows you to easily +// and efficiently look up objects with certain properties. You can then make +// use of the index by specifying a field selector on calls to List on the Reader +// corresponding to the given Cache. +// +// For instance, a Secret controller might have an index on the +// `.spec.volumes.secret.secretName` field in Pod objects, so that it could +// easily look up all pods that reference a given secret. +package client diff --git a/vendor/sigs.k8s.io/controller-runtime/pkg/client/interfaces.go b/vendor/sigs.k8s.io/controller-runtime/pkg/client/interfaces.go index 36d0fce620..00b43caee7 100644 --- a/vendor/sigs.k8s.io/controller-runtime/pkg/client/interfaces.go +++ b/vendor/sigs.k8s.io/controller-runtime/pkg/client/interfaces.go @@ -19,10 +19,9 @@ package client import ( "context" + apierrors "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/apimachinery/pkg/api/meta" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/fields" - "k8s.io/apimachinery/pkg/labels" "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/types" ) @@ -39,6 +38,14 @@ func ObjectKeyFromObject(obj runtime.Object) (ObjectKey, error) { return ObjectKey{Namespace: accessor.GetNamespace(), Name: accessor.GetName()}, nil } +// Patch is a patch that can be applied to a Kubernetes object. +type Patch interface { + // Type is the PatchType of the patch. + Type() types.PatchType + // Data is the raw data representing the patch. + Data(obj runtime.Object) ([]byte, error) +} + // TODO(directxman12): is there a sane way to deal with get/delete options? // Reader knows how to read and list Kubernetes objects. @@ -51,20 +58,27 @@ type Reader interface { // List retrieves list of objects for a given namespace and list options. On a // successful call, Items field in the list will be populated with the // result returned from the server. - List(ctx context.Context, opts *ListOptions, list runtime.Object) error + List(ctx context.Context, list runtime.Object, opts ...ListOption) error } // Writer knows how to create, delete, and update Kubernetes objects. type Writer interface { // Create saves the object obj in the Kubernetes cluster. - Create(ctx context.Context, obj runtime.Object) error + Create(ctx context.Context, obj runtime.Object, opts ...CreateOption) error // Delete deletes the given obj from Kubernetes cluster. - Delete(ctx context.Context, obj runtime.Object, opts ...DeleteOptionFunc) error + Delete(ctx context.Context, obj runtime.Object, opts ...DeleteOption) error // Update updates the given obj in the Kubernetes cluster. obj must be a // struct pointer so that obj can be updated with the content returned by the Server. - Update(ctx context.Context, obj runtime.Object) error + Update(ctx context.Context, obj runtime.Object, opts ...UpdateOption) error + + // Patch patches the given obj in the Kubernetes cluster. obj must be a + // struct pointer so that obj can be updated with the content returned by the Server. + Patch(ctx context.Context, obj runtime.Object, patch Patch, opts ...PatchOption) error + + // DeleteAllOf deletes all objects of the given type matching the given options. + DeleteAllOf(ctx context.Context, obj runtime.Object, opts ...DeleteAllOfOption) error } // StatusClient knows how to create a client which can update status subresource @@ -78,7 +92,12 @@ type StatusWriter interface { // Update updates the fields corresponding to the status subresource for the // given obj. obj must be a struct pointer so that obj can be updated // with the content returned by the Server. - Update(ctx context.Context, obj runtime.Object) error + Update(ctx context.Context, obj runtime.Object, opts ...UpdateOption) error + + // Patch patches the given object's subresource. obj must be a struct + // pointer so that obj can be updated with the content returned by the + // Server. + Patch(ctx context.Context, obj runtime.Object, patch Patch, opts ...PatchOption) error } // Client knows how to perform CRUD operations on Kubernetes objects. @@ -89,7 +108,8 @@ type Client interface { } // IndexerFunc knows how to take an object and turn it into a series -// of (non-namespaced) keys for that object. +// of non-namespaced keys. Namespaced objects are automatically given +// namespaced and non-spaced variants, so keys do not need to include namespace. type IndexerFunc func(runtime.Object) []string // FieldIndexer knows how to index over a particular "field" such that it @@ -100,193 +120,16 @@ type FieldIndexer interface { // compatibility with the Kubernetes API server, only return one key, and only use // fields that the API server supports. Otherwise, you can return multiple keys, // and "equality" in the field selector means that at least one key matches the value. + // The FieldIndexer will automatically take care of indexing over namespace + // and supporting efficient all-namespace queries. IndexField(obj runtime.Object, field string, extractValue IndexerFunc) error } -// DeleteOptions contains options for delete requests. It's generally a subset -// of metav1.DeleteOptions. -type DeleteOptions struct { - // GracePeriodSeconds is the duration in seconds before the object should be - // deleted. Value must be non-negative integer. The value zero indicates - // delete immediately. If this value is nil, the default grace period for the - // specified type will be used. - GracePeriodSeconds *int64 - - // Preconditions must be fulfilled before a deletion is carried out. If not - // possible, a 409 Conflict status will be returned. - Preconditions *metav1.Preconditions - - // PropagationPolicy determined whether and how garbage collection will be - // performed. Either this field or OrphanDependents may be set, but not both. - // The default policy is decided by the existing finalizer set in the - // metadata.finalizers and the resource-specific default policy. - // Acceptable values are: 'Orphan' - orphan the dependents; 'Background' - - // allow the garbage collector to delete the dependents in the background; - // 'Foreground' - a cascading policy that deletes all dependents in the - // foreground. - PropagationPolicy *metav1.DeletionPropagation - - // Raw represents raw DeleteOptions, as passed to the API server. - Raw *metav1.DeleteOptions -} - -// AsDeleteOptions returns these options as a metav1.DeleteOptions. -// This may mutate the Raw field. -func (o *DeleteOptions) AsDeleteOptions() *metav1.DeleteOptions { - - if o == nil { - return &metav1.DeleteOptions{} - } - if o.Raw == nil { - o.Raw = &metav1.DeleteOptions{} - } - - o.Raw.GracePeriodSeconds = o.GracePeriodSeconds - o.Raw.Preconditions = o.Preconditions - o.Raw.PropagationPolicy = o.PropagationPolicy - return o.Raw -} - -// ApplyOptions executes the given DeleteOptionFuncs and returns the mutated -// DeleteOptions. -func (o *DeleteOptions) ApplyOptions(optFuncs []DeleteOptionFunc) *DeleteOptions { - for _, optFunc := range optFuncs { - optFunc(o) - } - return o -} - -// DeleteOptionFunc is a function that mutates a DeleteOptions struct. It implements -// the functional options pattern. See -// https://github.com/tmrts/go-patterns/blob/master/idiom/functional-options.md. -type DeleteOptionFunc func(*DeleteOptions) - -// GracePeriodSeconds is a functional option that sets the GracePeriodSeconds -// field of a DeleteOptions struct. -func GracePeriodSeconds(gp int64) DeleteOptionFunc { - return func(opts *DeleteOptions) { - opts.GracePeriodSeconds = &gp - } -} - -// Preconditions is a functional option that sets the Preconditions field of a -// DeleteOptions struct. -func Preconditions(p *metav1.Preconditions) DeleteOptionFunc { - return func(opts *DeleteOptions) { - opts.Preconditions = p - } -} - -// PropagationPolicy is a functional option that sets the PropagationPolicy -// field of a DeleteOptions struct. -func PropagationPolicy(p metav1.DeletionPropagation) DeleteOptionFunc { - return func(opts *DeleteOptions) { - opts.PropagationPolicy = &p - } -} - -// ListOptions contains options for limitting or filtering results. -// It's generally a subset of metav1.ListOptions, with support for -// pre-parsed selectors (since generally, selectors will be executed -// against the cache). -type ListOptions struct { - // LabelSelector filters results by label. Use SetLabelSelector to - // set from raw string form. - LabelSelector labels.Selector - // FieldSelector filters results by a particular field. In order - // to use this with cache-based implementations, restrict usage to - // a single field-value pair that's been added to the indexers. - FieldSelector fields.Selector - - // Namespace represents the namespace to list for, or empty for - // non-namespaced objects, or to list across all namespaces. - Namespace string - - // Raw represents raw ListOptions, as passed to the API server. Note - // that these may not be respected by all implementations of interface, - // and the LabelSelector and FieldSelector fields are ignored. - Raw *metav1.ListOptions -} - -// SetLabelSelector sets this the label selector of these options -// from a string form of the selector. -func (o *ListOptions) SetLabelSelector(selRaw string) error { - sel, err := labels.Parse(selRaw) - if err != nil { - return err - } - o.LabelSelector = sel - return nil -} - -// SetFieldSelector sets this the label selector of these options -// from a string form of the selector. -func (o *ListOptions) SetFieldSelector(selRaw string) error { - sel, err := fields.ParseSelector(selRaw) - if err != nil { - return err - } - o.FieldSelector = sel - return nil -} - -// AsListOptions returns these options as a flattened metav1.ListOptions. -// This may mutate the Raw field. -func (o *ListOptions) AsListOptions() *metav1.ListOptions { - if o == nil { - return &metav1.ListOptions{} - } - if o.Raw == nil { - o.Raw = &metav1.ListOptions{} +// IgnoreNotFound returns nil on NotFound errors. +// All other values that are not NotFound errors or nil are returned unmodified. +func IgnoreNotFound(err error) error { + if apierrors.IsNotFound(err) { + return nil } - if o.LabelSelector != nil { - o.Raw.LabelSelector = o.LabelSelector.String() - } - if o.FieldSelector != nil { - o.Raw.FieldSelector = o.FieldSelector.String() - } - return o.Raw -} - -// MatchingLabels is a convenience function that sets the label selector -// to match the given labels, and then returns the options. -// It mutates the list options. -func (o *ListOptions) MatchingLabels(lbls map[string]string) *ListOptions { - sel := labels.SelectorFromSet(lbls) - o.LabelSelector = sel - return o -} - -// MatchingField is a convenience function that sets the field selector -// to match the given field, and then returns the options. -// It mutates the list options. -func (o *ListOptions) MatchingField(name, val string) *ListOptions { - sel := fields.SelectorFromSet(fields.Set{name: val}) - o.FieldSelector = sel - return o -} - -// InNamespace is a convenience function that sets the namespace, -// and then returns the options. It mutates the list options. -func (o *ListOptions) InNamespace(ns string) *ListOptions { - o.Namespace = ns - return o -} - -// MatchingLabels is a convenience function that constructs list options -// to match the given labels. -func MatchingLabels(lbls map[string]string) *ListOptions { - return (&ListOptions{}).MatchingLabels(lbls) -} - -// MatchingField is a convenience function that constructs list options -// to match the given field. -func MatchingField(name, val string) *ListOptions { - return (&ListOptions{}).MatchingField(name, val) -} - -// InNamespace is a convenience function that constructs list -// options to list in the given namespace. -func InNamespace(ns string) *ListOptions { - return (&ListOptions{}).InNamespace(ns) + return err } diff --git a/vendor/sigs.k8s.io/controller-runtime/pkg/client/options.go b/vendor/sigs.k8s.io/controller-runtime/pkg/client/options.go new file mode 100644 index 0000000000..4007a67657 --- /dev/null +++ b/vendor/sigs.k8s.io/controller-runtime/pkg/client/options.go @@ -0,0 +1,491 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package client + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/fields" + "k8s.io/apimachinery/pkg/labels" +) + +// {{{ "Functional" Option Interfaces + +// CreateOption is some configuration that modifies options for a create request. +type CreateOption interface { + // ApplyToCreate applies this configuration to the given create options. + ApplyToCreate(*CreateOptions) +} + +// DeleteOption is some configuration that modifies options for a delete request. +type DeleteOption interface { + // ApplyToDelete applies this configuration to the given delete options. + ApplyToDelete(*DeleteOptions) +} + +// ListOption is some configuration that modifies options for a list request. +type ListOption interface { + // ApplyToList applies this configuration to the given list options. + ApplyToList(*ListOptions) +} + +// UpdateOption is some configuration that modifies options for a update request. +type UpdateOption interface { + // ApplyToUpdate applies this configuration to the given update options. + ApplyToUpdate(*UpdateOptions) +} + +// PatchOption is some configuration that modifies options for a patch request. +type PatchOption interface { + // ApplyToPatch applies this configuration to the given patch options. + ApplyToPatch(*PatchOptions) +} + +// DeleteAllOfOption is some configuration that modifies options for a delete request. +type DeleteAllOfOption interface { + // ApplyToDeleteAllOf applies this configuration to the given deletecollection options. + ApplyToDeleteAllOf(*DeleteAllOfOptions) +} + +// }}} + +// {{{ Multi-Type Options + +// DryRunAll sets the "dry run" option to "all", executing all +// validation, etc without persisting the change to storage. +var DryRunAll = dryRunAll{} + +type dryRunAll struct{} + +func (dryRunAll) ApplyToCreate(opts *CreateOptions) { + opts.DryRun = []string{metav1.DryRunAll} +} +func (dryRunAll) ApplyToUpdate(opts *UpdateOptions) { + opts.DryRun = []string{metav1.DryRunAll} +} +func (dryRunAll) ApplyToPatch(opts *PatchOptions) { + opts.DryRun = []string{metav1.DryRunAll} +} +func (dryRunAll) ApplyToDelete(opts *DeleteOptions) { + opts.DryRun = []string{metav1.DryRunAll} +} + +// FieldOwner set the field manager name for the given server-side apply patch. +type FieldOwner string + +func (f FieldOwner) ApplyToPatch(opts *PatchOptions) { + opts.FieldManager = string(f) +} +func (f FieldOwner) ApplyToCreate(opts *CreateOptions) { + opts.FieldManager = string(f) +} +func (f FieldOwner) ApplyToUpdate(opts *UpdateOptions) { + opts.FieldManager = string(f) +} + +// }}} + +// {{{ Create Options + +// CreateOptions contains options for create requests. It's generally a subset +// of metav1.CreateOptions. +type CreateOptions struct { + // When present, indicates that modifications should not be + // persisted. An invalid or unrecognized dryRun directive will + // result in an error response and no further processing of the + // request. Valid values are: + // - All: all dry run stages will be processed + DryRun []string + + // FieldManager is the name of the user or component submitting + // this request. It must be set with server-side apply. + FieldManager string + + // Raw represents raw CreateOptions, as passed to the API server. + Raw *metav1.CreateOptions +} + +// AsCreateOptions returns these options as a metav1.CreateOptions. +// This may mutate the Raw field. +func (o *CreateOptions) AsCreateOptions() *metav1.CreateOptions { + if o == nil { + return &metav1.CreateOptions{} + } + if o.Raw == nil { + o.Raw = &metav1.CreateOptions{} + } + + o.Raw.DryRun = o.DryRun + o.Raw.FieldManager = o.FieldManager + return o.Raw +} + +// ApplyOptions applies the given create options on these options, +// and then returns itself (for convenient chaining). +func (o *CreateOptions) ApplyOptions(opts []CreateOption) *CreateOptions { + for _, opt := range opts { + opt.ApplyToCreate(o) + } + return o +} + +// CreateDryRunAll sets the "dry run" option to "all". +// +// Deprecated: Use DryRunAll +var CreateDryRunAll = DryRunAll + +// }}} + +// {{{ Delete Options + +// DeleteOptions contains options for delete requests. It's generally a subset +// of metav1.DeleteOptions. +type DeleteOptions struct { + // GracePeriodSeconds is the duration in seconds before the object should be + // deleted. Value must be non-negative integer. The value zero indicates + // delete immediately. If this value is nil, the default grace period for the + // specified type will be used. + GracePeriodSeconds *int64 + + // Preconditions must be fulfilled before a deletion is carried out. If not + // possible, a 409 Conflict status will be returned. + Preconditions *metav1.Preconditions + + // PropagationPolicy determined whether and how garbage collection will be + // performed. Either this field or OrphanDependents may be set, but not both. + // The default policy is decided by the existing finalizer set in the + // metadata.finalizers and the resource-specific default policy. + // Acceptable values are: 'Orphan' - orphan the dependents; 'Background' - + // allow the garbage collector to delete the dependents in the background; + // 'Foreground' - a cascading policy that deletes all dependents in the + // foreground. + PropagationPolicy *metav1.DeletionPropagation + + // Raw represents raw DeleteOptions, as passed to the API server. + Raw *metav1.DeleteOptions + + // When present, indicates that modifications should not be + // persisted. An invalid or unrecognized dryRun directive will + // result in an error response and no further processing of the + // request. Valid values are: + // - All: all dry run stages will be processed + DryRun []string +} + +// AsDeleteOptions returns these options as a metav1.DeleteOptions. +// This may mutate the Raw field. +func (o *DeleteOptions) AsDeleteOptions() *metav1.DeleteOptions { + if o == nil { + return &metav1.DeleteOptions{} + } + if o.Raw == nil { + o.Raw = &metav1.DeleteOptions{} + } + + o.Raw.GracePeriodSeconds = o.GracePeriodSeconds + o.Raw.Preconditions = o.Preconditions + o.Raw.PropagationPolicy = o.PropagationPolicy + o.Raw.DryRun = o.DryRun + return o.Raw +} + +// ApplyOptions applies the given delete options on these options, +// and then returns itself (for convenient chaining). +func (o *DeleteOptions) ApplyOptions(opts []DeleteOption) *DeleteOptions { + for _, opt := range opts { + opt.ApplyToDelete(o) + } + return o +} + +// GracePeriodSeconds sets the grace period for the deletion +// to the given number of seconds. +type GracePeriodSeconds int64 + +func (s GracePeriodSeconds) ApplyToDelete(opts *DeleteOptions) { + secs := int64(s) + opts.GracePeriodSeconds = &secs +} + +func (s GracePeriodSeconds) ApplyToDeleteAllOf(opts *DeleteAllOfOptions) { + s.ApplyToDelete(&opts.DeleteOptions) +} + +type Preconditions metav1.Preconditions + +func (p Preconditions) ApplyToDelete(opts *DeleteOptions) { + preconds := metav1.Preconditions(p) + opts.Preconditions = &preconds +} + +func (p Preconditions) ApplyToDeleteAllOf(opts *DeleteAllOfOptions) { + p.ApplyToDelete(&opts.DeleteOptions) +} + +type PropagationPolicy metav1.DeletionPropagation + +func (p PropagationPolicy) ApplyToDelete(opts *DeleteOptions) { + policy := metav1.DeletionPropagation(p) + opts.PropagationPolicy = &policy +} + +func (p PropagationPolicy) ApplyToDeleteAllOf(opts *DeleteAllOfOptions) { + p.ApplyToDelete(&opts.DeleteOptions) +} + +// }}} + +// {{{ List Options + +// ListOptions contains options for limiting or filtering results. +// It's generally a subset of metav1.ListOptions, with support for +// pre-parsed selectors (since generally, selectors will be executed +// against the cache). +type ListOptions struct { + // LabelSelector filters results by label. Use SetLabelSelector to + // set from raw string form. + LabelSelector labels.Selector + // FieldSelector filters results by a particular field. In order + // to use this with cache-based implementations, restrict usage to + // a single field-value pair that's been added to the indexers. + FieldSelector fields.Selector + + // Namespace represents the namespace to list for, or empty for + // non-namespaced objects, or to list across all namespaces. + Namespace string + + // Raw represents raw ListOptions, as passed to the API server. Note + // that these may not be respected by all implementations of interface, + // and the LabelSelector and FieldSelector fields are ignored. + Raw *metav1.ListOptions +} + +// AsListOptions returns these options as a flattened metav1.ListOptions. +// This may mutate the Raw field. +func (o *ListOptions) AsListOptions() *metav1.ListOptions { + if o == nil { + return &metav1.ListOptions{} + } + if o.Raw == nil { + o.Raw = &metav1.ListOptions{} + } + if o.LabelSelector != nil { + o.Raw.LabelSelector = o.LabelSelector.String() + } + if o.FieldSelector != nil { + o.Raw.FieldSelector = o.FieldSelector.String() + } + return o.Raw +} + +// ApplyOptions applies the given list options on these options, +// and then returns itself (for convenient chaining). +func (o *ListOptions) ApplyOptions(opts []ListOption) *ListOptions { + for _, opt := range opts { + opt.ApplyToList(o) + } + return o +} + +// MatchingLabels filters the list/delete operation on the given set of labels. +type MatchingLabels map[string]string + +func (m MatchingLabels) ApplyToList(opts *ListOptions) { + // TODO(directxman12): can we avoid reserializing this over and over? + sel := labels.SelectorFromSet(map[string]string(m)) + opts.LabelSelector = sel +} + +func (m MatchingLabels) ApplyToDeleteAllOf(opts *DeleteAllOfOptions) { + m.ApplyToList(&opts.ListOptions) +} + +// MatchingField filters the list operation on the given field selector +// (or index in the case of cached lists). +// +// Deprecated: Use MatchingFields +func MatchingField(name, val string) MatchingFields { + return MatchingFields{name: val} +} + +// MatchingField filters the list/delete operation on the given field selector +// (or index in the case of cached lists). +type MatchingFields fields.Set + +func (m MatchingFields) ApplyToList(opts *ListOptions) { + // TODO(directxman12): can we avoid re-serializing this? + sel := fields.SelectorFromSet(fields.Set(m)) + opts.FieldSelector = sel +} + +func (m MatchingFields) ApplyToDeleteAllOf(opts *DeleteAllOfOptions) { + m.ApplyToList(&opts.ListOptions) +} + +// InNamespace restricts the list/delete operation to the given namespace. +type InNamespace string + +func (n InNamespace) ApplyToList(opts *ListOptions) { + opts.Namespace = string(n) +} + +func (n InNamespace) ApplyToDeleteAllOf(opts *DeleteAllOfOptions) { + n.ApplyToList(&opts.ListOptions) +} + +// }}} + +// {{{ Update Options + +// UpdateOptions contains options for create requests. It's generally a subset +// of metav1.UpdateOptions. +type UpdateOptions struct { + // When present, indicates that modifications should not be + // persisted. An invalid or unrecognized dryRun directive will + // result in an error response and no further processing of the + // request. Valid values are: + // - All: all dry run stages will be processed + DryRun []string + + // FieldManager is the name of the user or component submitting + // this request. It must be set with server-side apply. + FieldManager string + + // Raw represents raw UpdateOptions, as passed to the API server. + Raw *metav1.UpdateOptions +} + +// AsUpdateOptions returns these options as a metav1.UpdateOptions. +// This may mutate the Raw field. +func (o *UpdateOptions) AsUpdateOptions() *metav1.UpdateOptions { + if o == nil { + return &metav1.UpdateOptions{} + } + if o.Raw == nil { + o.Raw = &metav1.UpdateOptions{} + } + + o.Raw.DryRun = o.DryRun + o.Raw.FieldManager = o.FieldManager + return o.Raw +} + +// ApplyOptions applies the given update options on these options, +// and then returns itself (for convenient chaining). +func (o *UpdateOptions) ApplyOptions(opts []UpdateOption) *UpdateOptions { + for _, opt := range opts { + opt.ApplyToUpdate(o) + } + return o +} + +// UpdateDryRunAll sets the "dry run" option to "all". +// +// Deprecated: Use DryRunAll +var UpdateDryRunAll = DryRunAll + +// }}} + +// {{{ Patch Options + +// PatchOptions contains options for patch requests. +type PatchOptions struct { + // When present, indicates that modifications should not be + // persisted. An invalid or unrecognized dryRun directive will + // result in an error response and no further processing of the + // request. Valid values are: + // - All: all dry run stages will be processed + DryRun []string + + // Force is going to "force" Apply requests. It means user will + // re-acquire conflicting fields owned by other people. Force + // flag must be unset for non-apply patch requests. + // +optional + Force *bool + + // FieldManager is the name of the user or component submitting + // this request. It must be set with server-side apply. + FieldManager string + + // Raw represents raw PatchOptions, as passed to the API server. + Raw *metav1.PatchOptions +} + +// ApplyOptions applies the given patch options on these options, +// and then returns itself (for convenient chaining). +func (o *PatchOptions) ApplyOptions(opts []PatchOption) *PatchOptions { + for _, opt := range opts { + opt.ApplyToPatch(o) + } + return o +} + +// AsPatchOptions returns these options as a metav1.PatchOptions. +// This may mutate the Raw field. +func (o *PatchOptions) AsPatchOptions() *metav1.PatchOptions { + if o == nil { + return &metav1.PatchOptions{} + } + if o.Raw == nil { + o.Raw = &metav1.PatchOptions{} + } + + o.Raw.DryRun = o.DryRun + o.Raw.Force = o.Force + o.Raw.FieldManager = o.FieldManager + return o.Raw +} + +// ForceOwnership indicates that in case of conflicts with server-side apply, +// the client should acquire ownership of the conflicting field. Most +// controllers should use this. +var ForceOwnership = forceOwnership{} + +type forceOwnership struct{} + +func (forceOwnership) ApplyToPatch(opts *PatchOptions) { + definitelyTrue := true + opts.Force = &definitelyTrue +} + +// PatchDryRunAll sets the "dry run" option to "all". +// +// Deprecated: Use DryRunAll +var PatchDryRunAll = DryRunAll + +// }}} + +// {{{ DeleteAllOf Options + +// these are all just delete options and list options + +// DeleteAllOfOptions contains options for deletecollection (deleteallof) requests. +// It's just list and delete options smooshed together. +type DeleteAllOfOptions struct { + ListOptions + DeleteOptions +} + +// ApplyOptions applies the given deleteallof options on these options, +// and then returns itself (for convenient chaining). +func (o *DeleteAllOfOptions) ApplyOptions(opts []DeleteAllOfOption) *DeleteAllOfOptions { + for _, opt := range opts { + opt.ApplyToDeleteAllOf(o) + } + return o +} + +// }}} diff --git a/vendor/sigs.k8s.io/controller-runtime/pkg/client/patch.go b/vendor/sigs.k8s.io/controller-runtime/pkg/client/patch.go new file mode 100644 index 0000000000..164de17f32 --- /dev/null +++ b/vendor/sigs.k8s.io/controller-runtime/pkg/client/patch.go @@ -0,0 +1,95 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package client + +import ( + jsonpatch "github.com/evanphx/json-patch" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/types" + "k8s.io/apimachinery/pkg/util/json" +) + +var ( + // Apply uses server-side apply to patch the given object. + Apply = applyPatch{} +) + +type patch struct { + patchType types.PatchType + data []byte +} + +// Type implements Patch. +func (s *patch) Type() types.PatchType { + return s.patchType +} + +// Data implements Patch. +func (s *patch) Data(obj runtime.Object) ([]byte, error) { + return s.data, nil +} + +// ConstantPatch constructs a new Patch with the given PatchType and data. +func ConstantPatch(patchType types.PatchType, data []byte) Patch { + return &patch{patchType, data} +} + +type mergeFromPatch struct { + from runtime.Object +} + +// Type implements patch. +func (s *mergeFromPatch) Type() types.PatchType { + return types.MergePatchType +} + +// Data implements Patch. +func (s *mergeFromPatch) Data(obj runtime.Object) ([]byte, error) { + originalJSON, err := json.Marshal(s.from) + if err != nil { + return nil, err + } + + modifiedJSON, err := json.Marshal(obj) + if err != nil { + return nil, err + } + + return jsonpatch.CreateMergePatch(originalJSON, modifiedJSON) +} + +// MergeFrom creates a Patch that patches using the merge-patch strategy with the given object as base. +func MergeFrom(obj runtime.Object) Patch { + return &mergeFromPatch{obj} +} + +// applyPatch uses server-side apply to patch the object. +type applyPatch struct{} + +// Type implements Patch. +func (p applyPatch) Type() types.PatchType { + return types.ApplyPatchType +} + +// Data implements Patch. +func (p applyPatch) Data(obj runtime.Object) ([]byte, error) { + // NB(directxman12): we might technically want to be using an actual encoder + // here (in case some more performant encoder is introduced) but this is + // correct and sufficient for our uses (it's what the JSON serializer in + // client-go does, more-or-less). + return json.Marshal(obj) +} diff --git a/vendor/sigs.k8s.io/controller-runtime/pkg/client/split.go b/vendor/sigs.k8s.io/controller-runtime/pkg/client/split.go index efcf6d4c31..47cba9576d 100644 --- a/vendor/sigs.k8s.io/controller-runtime/pkg/client/split.go +++ b/vendor/sigs.k8s.io/controller-runtime/pkg/client/split.go @@ -23,18 +23,20 @@ import ( "k8s.io/apimachinery/pkg/runtime" ) -// DelegatingClient forms an interface Client by composing separate -// reader, writer and statusclient interfaces. This way, you can have an Client that -// reads from a cache and writes to the API server. +// DelegatingClient forms a Client by composing separate reader, writer and +// statusclient interfaces. This way, you can have an Client that reads from a +// cache and writes to the API server. type DelegatingClient struct { Reader Writer StatusClient } -// DelegatingReader forms a interface Reader that will cause Get and List -// requests for unstructured types to use the ClientReader while -// requests for any other type of object with use the CacheReader. +// DelegatingReader forms a Reader that will cause Get and List requests for +// unstructured types to use the ClientReader while requests for any other type +// of object with use the CacheReader. This avoids accidentally caching the +// entire cluster in the common case of loading arbitrary unstructured objects +// (e.g. from OwnerReferences). type DelegatingReader struct { CacheReader Reader ClientReader Reader @@ -50,10 +52,10 @@ func (d *DelegatingReader) Get(ctx context.Context, key ObjectKey, obj runtime.O } // List retrieves list of objects for a given namespace and list options. -func (d *DelegatingReader) List(ctx context.Context, opts *ListOptions, list runtime.Object) error { +func (d *DelegatingReader) List(ctx context.Context, list runtime.Object, opts ...ListOption) error { _, isUnstructured := list.(*unstructured.UnstructuredList) if isUnstructured { - return d.ClientReader.List(ctx, opts, list) + return d.ClientReader.List(ctx, list, opts...) } - return d.CacheReader.List(ctx, opts, list) + return d.CacheReader.List(ctx, list, opts...) } diff --git a/vendor/sigs.k8s.io/controller-runtime/pkg/client/typed_client.go b/vendor/sigs.k8s.io/controller-runtime/pkg/client/typed_client.go index 51cef3006f..6ddc1171d3 100644 --- a/vendor/sigs.k8s.io/controller-runtime/pkg/client/typed_client.go +++ b/vendor/sigs.k8s.io/controller-runtime/pkg/client/typed_client.go @@ -30,54 +30,108 @@ type typedClient struct { } // Create implements client.Client -func (c *typedClient) Create(ctx context.Context, obj runtime.Object) error { +func (c *typedClient) Create(ctx context.Context, obj runtime.Object, opts ...CreateOption) error { o, err := c.cache.getObjMeta(obj) if err != nil { return err } + + createOpts := &CreateOptions{} + createOpts.ApplyOptions(opts) return o.Post(). NamespaceIfScoped(o.GetNamespace(), o.isNamespaced()). Resource(o.resource()). Body(obj). + VersionedParams(createOpts.AsCreateOptions(), c.paramCodec). Context(ctx). Do(). Into(obj) } // Update implements client.Client -func (c *typedClient) Update(ctx context.Context, obj runtime.Object) error { +func (c *typedClient) Update(ctx context.Context, obj runtime.Object, opts ...UpdateOption) error { o, err := c.cache.getObjMeta(obj) if err != nil { return err } + + updateOpts := &UpdateOptions{} + updateOpts.ApplyOptions(opts) return o.Put(). NamespaceIfScoped(o.GetNamespace(), o.isNamespaced()). Resource(o.resource()). Name(o.GetName()). Body(obj). + VersionedParams(updateOpts.AsUpdateOptions(), c.paramCodec). Context(ctx). Do(). Into(obj) } // Delete implements client.Client -func (c *typedClient) Delete(ctx context.Context, obj runtime.Object, opts ...DeleteOptionFunc) error { +func (c *typedClient) Delete(ctx context.Context, obj runtime.Object, opts ...DeleteOption) error { o, err := c.cache.getObjMeta(obj) if err != nil { return err } deleteOpts := DeleteOptions{} + deleteOpts.ApplyOptions(opts) + return o.Delete(). NamespaceIfScoped(o.GetNamespace(), o.isNamespaced()). Resource(o.resource()). Name(o.GetName()). - Body(deleteOpts.ApplyOptions(opts).AsDeleteOptions()). + Body(deleteOpts.AsDeleteOptions()). + Context(ctx). + Do(). + Error() +} + +// DeleteAllOf implements client.Client +func (c *typedClient) DeleteAllOf(ctx context.Context, obj runtime.Object, opts ...DeleteAllOfOption) error { + o, err := c.cache.getObjMeta(obj) + if err != nil { + return err + } + + deleteAllOfOpts := DeleteAllOfOptions{} + deleteAllOfOpts.ApplyOptions(opts) + + return o.Delete(). + NamespaceIfScoped(deleteAllOfOpts.ListOptions.Namespace, o.isNamespaced()). + Resource(o.resource()). + VersionedParams(deleteAllOfOpts.AsListOptions(), c.paramCodec). + Body(deleteAllOfOpts.AsDeleteOptions()). Context(ctx). Do(). Error() } +// Patch implements client.Client +func (c *typedClient) Patch(ctx context.Context, obj runtime.Object, patch Patch, opts ...PatchOption) error { + o, err := c.cache.getObjMeta(obj) + if err != nil { + return err + } + + data, err := patch.Data(obj) + if err != nil { + return err + } + + patchOpts := &PatchOptions{} + return o.Patch(patch.Type()). + NamespaceIfScoped(o.GetNamespace(), o.isNamespaced()). + Resource(o.resource()). + Name(o.GetName()). + VersionedParams(patchOpts.ApplyOptions(opts).AsPatchOptions(), c.paramCodec). + Body(data). + Context(ctx). + Do(). + Into(obj) +} + // Get implements client.Client func (c *typedClient) Get(ctx context.Context, key ObjectKey, obj runtime.Object) error { r, err := c.cache.getResource(obj) @@ -92,26 +146,24 @@ func (c *typedClient) Get(ctx context.Context, key ObjectKey, obj runtime.Object } // List implements client.Client -func (c *typedClient) List(ctx context.Context, opts *ListOptions, obj runtime.Object) error { +func (c *typedClient) List(ctx context.Context, obj runtime.Object, opts ...ListOption) error { r, err := c.cache.getResource(obj) if err != nil { return err } - namespace := "" - if opts != nil { - namespace = opts.Namespace - } + listOpts := ListOptions{} + listOpts.ApplyOptions(opts) return r.Get(). - NamespaceIfScoped(namespace, r.isNamespaced()). + NamespaceIfScoped(listOpts.Namespace, r.isNamespaced()). Resource(r.resource()). - VersionedParams(opts.AsListOptions(), c.paramCodec). + VersionedParams(listOpts.AsListOptions(), c.paramCodec). Context(ctx). Do(). Into(obj) } // UpdateStatus used by StatusWriter to write status. -func (c *typedClient) UpdateStatus(ctx context.Context, obj runtime.Object) error { +func (c *typedClient) UpdateStatus(ctx context.Context, obj runtime.Object, opts ...UpdateOption) error { o, err := c.cache.getObjMeta(obj) if err != nil { return err @@ -126,6 +178,32 @@ func (c *typedClient) UpdateStatus(ctx context.Context, obj runtime.Object) erro Name(o.GetName()). SubResource("status"). Body(obj). + VersionedParams((&UpdateOptions{}).ApplyOptions(opts).AsUpdateOptions(), c.paramCodec). + Context(ctx). + Do(). + Into(obj) +} + +// PatchStatus used by StatusWriter to write status. +func (c *typedClient) PatchStatus(ctx context.Context, obj runtime.Object, patch Patch, opts ...PatchOption) error { + o, err := c.cache.getObjMeta(obj) + if err != nil { + return err + } + + data, err := patch.Data(obj) + if err != nil { + return err + } + + patchOpts := &PatchOptions{} + return o.Patch(patch.Type()). + NamespaceIfScoped(o.GetNamespace(), o.isNamespaced()). + Resource(o.resource()). + Name(o.GetName()). + SubResource("status"). + Body(data). + VersionedParams(patchOpts.ApplyOptions(opts).AsPatchOptions(), c.paramCodec). Context(ctx). Do(). Into(obj) diff --git a/vendor/sigs.k8s.io/controller-runtime/pkg/client/unstructured_client.go b/vendor/sigs.k8s.io/controller-runtime/pkg/client/unstructured_client.go index 2ce0b19eb8..440ad2f97c 100644 --- a/vendor/sigs.k8s.io/controller-runtime/pkg/client/unstructured_client.go +++ b/vendor/sigs.k8s.io/controller-runtime/pkg/client/unstructured_client.go @@ -37,16 +37,18 @@ type unstructuredClient struct { } // Create implements client.Client -func (uc *unstructuredClient) Create(_ context.Context, obj runtime.Object) error { +func (uc *unstructuredClient) Create(_ context.Context, obj runtime.Object, opts ...CreateOption) error { u, ok := obj.(*unstructured.Unstructured) if !ok { return fmt.Errorf("unstructured client did not understand object: %T", obj) } + createOpts := CreateOptions{} + createOpts.ApplyOptions(opts) r, err := uc.getResourceInterface(u.GroupVersionKind(), u.GetNamespace()) if err != nil { return err } - i, err := r.Create(u, metav1.CreateOptions{}) + i, err := r.Create(u, *createOpts.AsCreateOptions()) if err != nil { return err } @@ -55,16 +57,18 @@ func (uc *unstructuredClient) Create(_ context.Context, obj runtime.Object) erro } // Update implements client.Client -func (uc *unstructuredClient) Update(_ context.Context, obj runtime.Object) error { +func (uc *unstructuredClient) Update(_ context.Context, obj runtime.Object, opts ...UpdateOption) error { u, ok := obj.(*unstructured.Unstructured) if !ok { return fmt.Errorf("unstructured client did not understand object: %T", obj) } + updateOpts := UpdateOptions{} + updateOpts.ApplyOptions(opts) r, err := uc.getResourceInterface(u.GroupVersionKind(), u.GetNamespace()) if err != nil { return err } - i, err := r.Update(u, metav1.UpdateOptions{}) + i, err := r.Update(u, *updateOpts.AsUpdateOptions()) if err != nil { return err } @@ -73,7 +77,7 @@ func (uc *unstructuredClient) Update(_ context.Context, obj runtime.Object) erro } // Delete implements client.Client -func (uc *unstructuredClient) Delete(_ context.Context, obj runtime.Object, opts ...DeleteOptionFunc) error { +func (uc *unstructuredClient) Delete(_ context.Context, obj runtime.Object, opts ...DeleteOption) error { u, ok := obj.(*unstructured.Unstructured) if !ok { return fmt.Errorf("unstructured client did not understand object: %T", obj) @@ -83,10 +87,53 @@ func (uc *unstructuredClient) Delete(_ context.Context, obj runtime.Object, opts return err } deleteOpts := DeleteOptions{} - err = r.Delete(u.GetName(), deleteOpts.ApplyOptions(opts).AsDeleteOptions()) + deleteOpts.ApplyOptions(opts) + err = r.Delete(u.GetName(), deleteOpts.AsDeleteOptions()) return err } +// DeleteAllOf implements client.Client +func (uc *unstructuredClient) DeleteAllOf(_ context.Context, obj runtime.Object, opts ...DeleteAllOfOption) error { + u, ok := obj.(*unstructured.Unstructured) + if !ok { + return fmt.Errorf("unstructured client did not understand object: %T", obj) + } + r, err := uc.getResourceInterface(u.GroupVersionKind(), u.GetNamespace()) + if err != nil { + return err + } + + deleteAllOfOpts := DeleteAllOfOptions{} + deleteAllOfOpts.ApplyOptions(opts) + err = r.DeleteCollection(deleteAllOfOpts.AsDeleteOptions(), *deleteAllOfOpts.AsListOptions()) + return err +} + +// Patch implements client.Client +func (uc *unstructuredClient) Patch(_ context.Context, obj runtime.Object, patch Patch, opts ...PatchOption) error { + u, ok := obj.(*unstructured.Unstructured) + if !ok { + return fmt.Errorf("unstructured client did not understand object: %T", obj) + } + r, err := uc.getResourceInterface(u.GroupVersionKind(), u.GetNamespace()) + if err != nil { + return err + } + + data, err := patch.Data(obj) + if err != nil { + return err + } + + patchOpts := &PatchOptions{} + i, err := r.Patch(u.GetName(), patch.Type(), data, *patchOpts.ApplyOptions(opts).AsPatchOptions()) + if err != nil { + return err + } + u.Object = i.Object + return nil +} + // Get implements client.Client func (uc *unstructuredClient) Get(_ context.Context, key ObjectKey, obj runtime.Object) error { u, ok := obj.(*unstructured.Unstructured) @@ -106,7 +153,7 @@ func (uc *unstructuredClient) Get(_ context.Context, key ObjectKey, obj runtime. } // List implements client.Client -func (uc *unstructuredClient) List(_ context.Context, opts *ListOptions, obj runtime.Object) error { +func (uc *unstructuredClient) List(_ context.Context, obj runtime.Object, opts ...ListOption) error { u, ok := obj.(*unstructured.UnstructuredList) if !ok { return fmt.Errorf("unstructured client did not understand object: %T", obj) @@ -115,16 +162,14 @@ func (uc *unstructuredClient) List(_ context.Context, opts *ListOptions, obj run if strings.HasSuffix(gvk.Kind, "List") { gvk.Kind = gvk.Kind[:len(gvk.Kind)-4] } - namespace := "" - if opts != nil { - namespace = opts.Namespace - } - r, err := uc.getResourceInterface(gvk, namespace) + listOpts := ListOptions{} + listOpts.ApplyOptions(opts) + r, err := uc.getResourceInterface(gvk, listOpts.Namespace) if err != nil { return err } - i, err := r.List(*opts.AsListOptions()) + i, err := r.List(*listOpts.AsListOptions()) if err != nil { return err } @@ -133,7 +178,24 @@ func (uc *unstructuredClient) List(_ context.Context, opts *ListOptions, obj run return nil } -func (uc *unstructuredClient) UpdateStatus(_ context.Context, obj runtime.Object) error { +func (uc *unstructuredClient) UpdateStatus(_ context.Context, obj runtime.Object, opts ...UpdateOption) error { + u, ok := obj.(*unstructured.Unstructured) + if !ok { + return fmt.Errorf("unstructured client did not understand object: %T", obj) + } + r, err := uc.getResourceInterface(u.GroupVersionKind(), u.GetNamespace()) + if err != nil { + return err + } + i, err := r.UpdateStatus(u, *(&UpdateOptions{}).ApplyOptions(opts).AsUpdateOptions()) + if err != nil { + return err + } + u.Object = i.Object + return nil +} + +func (uc *unstructuredClient) PatchStatus(_ context.Context, obj runtime.Object, patch Patch, opts ...PatchOption) error { u, ok := obj.(*unstructured.Unstructured) if !ok { return fmt.Errorf("unstructured client did not understand object: %T", obj) @@ -142,7 +204,13 @@ func (uc *unstructuredClient) UpdateStatus(_ context.Context, obj runtime.Object if err != nil { return err } - i, err := r.UpdateStatus(u, metav1.UpdateOptions{}) + + data, err := patch.Data(obj) + if err != nil { + return err + } + + i, err := r.Patch(u.GetName(), patch.Type(), data, *(&PatchOptions{}).ApplyOptions(opts).AsPatchOptions(), "status") if err != nil { return err } diff --git a/vendor/sigs.k8s.io/controller-runtime/pkg/controller/controller.go b/vendor/sigs.k8s.io/controller-runtime/pkg/controller/controller.go index 64a27a7a22..3411e95f75 100644 --- a/vendor/sigs.k8s.io/controller-runtime/pkg/controller/controller.go +++ b/vendor/sigs.k8s.io/controller-runtime/pkg/controller/controller.go @@ -42,17 +42,19 @@ type Options struct { // Work typically is reads and writes Kubernetes objects to make the system state match the state specified // in the object Spec. type Controller interface { - // Reconciler is called to Reconciler an object by Namespace/Name + // Reconciler is called to reconcile an object by Namespace/Name reconcile.Reconciler - // Watch takes events provided by a Source and uses the EventHandler to enqueue reconcile.Requests in - // response to the events. + // Watch takes events provided by a Source and uses the EventHandler to + // enqueue reconcile.Requests in response to the events. // - // Watch may be provided one or more Predicates to filter events before they are given to the EventHandler. - // Events will be passed to the EventHandler iff all provided Predicates evaluate to true. + // Watch may be provided one or more Predicates to filter events before + // they are given to the EventHandler. Events will be passed to the + // EventHandler if all provided Predicates evaluate to true. Watch(src source.Source, eventhandler handler.EventHandler, predicates ...predicate.Predicate) error - // Start starts the controller. Start blocks until stop is closed or a controller has an error starting. + // Start starts the controller. Start blocks until stop is closed or a + // controller has an error starting. Start(stop <-chan struct{}) error } @@ -78,15 +80,15 @@ func New(name string, mgr manager.Manager, options Options) (Controller, error) // Create controller with dependencies set c := &controller.Controller{ - Do: options.Reconciler, - Cache: mgr.GetCache(), - Config: mgr.GetConfig(), - Scheme: mgr.GetScheme(), - Client: mgr.GetClient(), - Recorder: mgr.GetRecorder(name), - Queue: workqueue.NewNamedRateLimitingQueue(workqueue.DefaultControllerRateLimiter(), name), + Do: options.Reconciler, + Cache: mgr.GetCache(), + Config: mgr.GetConfig(), + Scheme: mgr.GetScheme(), + Client: mgr.GetClient(), + Recorder: mgr.GetEventRecorderFor(name), + Queue: workqueue.NewNamedRateLimitingQueue(workqueue.DefaultControllerRateLimiter(), name), MaxConcurrentReconciles: options.MaxConcurrentReconciles, - Name: name, + Name: name, } // Add the controller as a Manager components diff --git a/vendor/sigs.k8s.io/controller-runtime/pkg/controller/controllerutil/controllerutil.go b/vendor/sigs.k8s.io/controller-runtime/pkg/controller/controllerutil/controllerutil.go new file mode 100644 index 0000000000..bb67c7037f --- /dev/null +++ b/vendor/sigs.k8s.io/controller-runtime/pkg/controller/controllerutil/controllerutil.go @@ -0,0 +1,170 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package controllerutil + +import ( + "context" + "fmt" + "reflect" + + "k8s.io/apimachinery/pkg/api/errors" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" + "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/controller-runtime/pkg/client/apiutil" +) + +// AlreadyOwnedError is an error returned if the object you are trying to assign +// a controller reference is already owned by another controller Object is the +// subject and Owner is the reference for the current owner +type AlreadyOwnedError struct { + Object metav1.Object + Owner metav1.OwnerReference +} + +func (e *AlreadyOwnedError) Error() string { + return fmt.Sprintf("Object %s/%s is already owned by another %s controller %s", e.Object.GetNamespace(), e.Object.GetName(), e.Owner.Kind, e.Owner.Name) +} + +func newAlreadyOwnedError(Object metav1.Object, Owner metav1.OwnerReference) *AlreadyOwnedError { + return &AlreadyOwnedError{ + Object: Object, + Owner: Owner, + } +} + +// SetControllerReference sets owner as a Controller OwnerReference on owned. +// This is used for garbage collection of the owned object and for +// reconciling the owner object on changes to owned (with a Watch + EnqueueRequestForOwner). +// Since only one OwnerReference can be a controller, it returns an error if +// there is another OwnerReference with Controller flag set. +func SetControllerReference(owner, object metav1.Object, scheme *runtime.Scheme) error { + ro, ok := owner.(runtime.Object) + if !ok { + return fmt.Errorf("%T is not a runtime.Object, cannot call SetControllerReference", owner) + } + + gvk, err := apiutil.GVKForObject(ro, scheme) + if err != nil { + return err + } + + // Create a new ref + ref := *metav1.NewControllerRef(owner, schema.GroupVersionKind{Group: gvk.Group, Version: gvk.Version, Kind: gvk.Kind}) + + existingRefs := object.GetOwnerReferences() + fi := -1 + for i, r := range existingRefs { + if referSameObject(ref, r) { + fi = i + } else if r.Controller != nil && *r.Controller { + return newAlreadyOwnedError(object, r) + } + } + if fi == -1 { + existingRefs = append(existingRefs, ref) + } else { + existingRefs[fi] = ref + } + + // Update owner references + object.SetOwnerReferences(existingRefs) + return nil +} + +// Returns true if a and b point to the same object +func referSameObject(a, b metav1.OwnerReference) bool { + aGV, err := schema.ParseGroupVersion(a.APIVersion) + if err != nil { + return false + } + + bGV, err := schema.ParseGroupVersion(b.APIVersion) + if err != nil { + return false + } + + return aGV == bGV && a.Kind == b.Kind && a.Name == b.Name +} + +// OperationResult is the action result of a CreateOrUpdate call +type OperationResult string + +const ( // They should complete the sentence "Deployment default/foo has been ..." + // OperationResultNone means that the resource has not been changed + OperationResultNone OperationResult = "unchanged" + // OperationResultCreated means that a new resource is created + OperationResultCreated OperationResult = "created" + // OperationResultUpdated means that an existing resource is updated + OperationResultUpdated OperationResult = "updated" +) + +// CreateOrUpdate creates or updates the given object in the Kubernetes +// cluster. The object's desired state must be reconciled with the existing +// state inside the passed in callback MutateFn. +// +// The MutateFn is called regardless of creating or updating an object. +// +// It returns the executed operation and an error. +func CreateOrUpdate(ctx context.Context, c client.Client, obj runtime.Object, f MutateFn) (OperationResult, error) { + key, err := client.ObjectKeyFromObject(obj) + if err != nil { + return OperationResultNone, err + } + + if err := c.Get(ctx, key, obj); err != nil { + if !errors.IsNotFound(err) { + return OperationResultNone, err + } + if err := mutate(f, key, obj); err != nil { + return OperationResultNone, err + } + if err := c.Create(ctx, obj); err != nil { + return OperationResultNone, err + } + return OperationResultCreated, nil + } + + existing := obj.DeepCopyObject() + if err := mutate(f, key, obj); err != nil { + return OperationResultNone, err + } + + if reflect.DeepEqual(existing, obj) { + return OperationResultNone, nil + } + + if err := c.Update(ctx, obj); err != nil { + return OperationResultNone, err + } + return OperationResultUpdated, nil +} + +// mutate wraps a MutateFn and applies validation to its result +func mutate(f MutateFn, key client.ObjectKey, obj runtime.Object) error { + if err := f(); err != nil { + return err + } + if newKey, err := client.ObjectKeyFromObject(obj); err != nil || key != newKey { + return fmt.Errorf("MutateFn cannot mutate object name and/or object namespace") + } + return nil +} + +// MutateFn is a function which mutates the existing object into it's desired state. +type MutateFn func() error diff --git a/pkg/apis/openstackproviderconfig/group.go b/vendor/sigs.k8s.io/controller-runtime/pkg/controller/controllerutil/doc.go similarity index 81% rename from pkg/apis/openstackproviderconfig/group.go rename to vendor/sigs.k8s.io/controller-runtime/pkg/controller/controllerutil/doc.go index 740e7963ce..ab386b29cd 100644 --- a/pkg/apis/openstackproviderconfig/group.go +++ b/vendor/sigs.k8s.io/controller-runtime/pkg/controller/controllerutil/doc.go @@ -14,5 +14,7 @@ See the License for the specific language governing permissions and limitations under the License. */ -// Package openstackproviderconfig contains openstackproviderconfig API versions -package openstackproviderconfig +/* +Package controllerutil contains utility functions for working with and implementing Controllers. +*/ +package controllerutil diff --git a/vendor/sigs.k8s.io/controller-runtime/pkg/conversion/conversion.go b/vendor/sigs.k8s.io/controller-runtime/pkg/conversion/conversion.go new file mode 100644 index 0000000000..da32ab48e4 --- /dev/null +++ b/vendor/sigs.k8s.io/controller-runtime/pkg/conversion/conversion.go @@ -0,0 +1,40 @@ +/* +Copyright 2019 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +/* +Package conversion provides interface definitions that an API Type needs to +implement for it to be supported by the generic conversion webhook handler +defined under pkg/webhook/conversion. +*/ +package conversion + +import "k8s.io/apimachinery/pkg/runtime" + +// Convertible defines capability of a type to convertible i.e. it can be converted to/from a hub type. +type Convertible interface { + runtime.Object + ConvertTo(dst Hub) error + ConvertFrom(src Hub) error +} + +// Hub marks that a given type is the hub type for conversion. This means that +// all conversions will first convert to the hub type, then convert from the hub +// type to the destination type. All types besides the hub type should implement +// Convertible. +type Hub interface { + runtime.Object + Hub() +} diff --git a/vendor/sigs.k8s.io/controller-runtime/pkg/envtest/crd.go b/vendor/sigs.k8s.io/controller-runtime/pkg/envtest/crd.go new file mode 100644 index 0000000000..88ef3a100e --- /dev/null +++ b/vendor/sigs.k8s.io/controller-runtime/pkg/envtest/crd.go @@ -0,0 +1,268 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package envtest + +import ( + "bufio" + "bytes" + "io" + "io/ioutil" + "os" + "path/filepath" + "time" + + apiextensionsv1beta1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1" + "k8s.io/apiextensions-apiserver/pkg/client/clientset/clientset" + "k8s.io/apimachinery/pkg/runtime/schema" + "k8s.io/apimachinery/pkg/util/sets" + "k8s.io/apimachinery/pkg/util/wait" + k8syaml "k8s.io/apimachinery/pkg/util/yaml" + "k8s.io/client-go/rest" + "sigs.k8s.io/yaml" +) + +// CRDInstallOptions are the options for installing CRDs +type CRDInstallOptions struct { + // Paths is the path to the directory containing CRDs + Paths []string + + // CRDs is a list of CRDs to install + CRDs []*apiextensionsv1beta1.CustomResourceDefinition + + // ErrorIfPathMissing will cause an error if a Path does not exist + ErrorIfPathMissing bool + + // maxTime is the max time to wait + maxTime time.Duration + + // pollInterval is the interval to check + pollInterval time.Duration +} + +const defaultPollInterval = 100 * time.Millisecond +const defaultMaxWait = 10 * time.Second + +// InstallCRDs installs a collection of CRDs into a cluster by reading the crd yaml files from a directory +func InstallCRDs(config *rest.Config, options CRDInstallOptions) ([]*apiextensionsv1beta1.CustomResourceDefinition, error) { + defaultCRDOptions(&options) + + // Read the CRD yamls into options.CRDs + if err := readCRDFiles(&options); err != nil { + return nil, err + } + + // Create the CRDs in the apiserver + if err := CreateCRDs(config, options.CRDs); err != nil { + return options.CRDs, err + } + + // Wait for the CRDs to appear as Resources in the apiserver + if err := WaitForCRDs(config, options.CRDs, options); err != nil { + return options.CRDs, err + } + + return options.CRDs, nil +} + +// readCRDFiles reads the directories of CRDs in options.Paths and adds the CRD structs to options.CRDs +func readCRDFiles(options *CRDInstallOptions) error { + if len(options.Paths) > 0 { + for _, path := range options.Paths { + if _, err := os.Stat(path); !options.ErrorIfPathMissing && os.IsNotExist(err) { + continue + } + new, err := readCRDs(path) + if err != nil { + return err + } + options.CRDs = append(options.CRDs, new...) + } + } + return nil +} + +// defaultCRDOptions sets the default values for CRDs +func defaultCRDOptions(o *CRDInstallOptions) { + if o.maxTime == 0 { + o.maxTime = defaultMaxWait + } + if o.pollInterval == 0 { + o.pollInterval = defaultPollInterval + } +} + +// WaitForCRDs waits for the CRDs to appear in discovery +func WaitForCRDs(config *rest.Config, crds []*apiextensionsv1beta1.CustomResourceDefinition, options CRDInstallOptions) error { + // Add each CRD to a map of GroupVersion to Resource + waitingFor := map[schema.GroupVersion]*sets.String{} + for _, crd := range crds { + gvs := []schema.GroupVersion{} + if crd.Spec.Version != "" { + gvs = append(gvs, schema.GroupVersion{Group: crd.Spec.Group, Version: crd.Spec.Version}) + } + for _, ver := range crd.Spec.Versions { + if ver.Served { + gvs = append(gvs, schema.GroupVersion{Group: crd.Spec.Group, Version: ver.Name}) + } + } + for _, gv := range gvs { + log.V(1).Info("adding API in waitlist", "GV", gv) + if _, found := waitingFor[gv]; !found { + // Initialize the set + waitingFor[gv] = &sets.String{} + } + // Add the Resource + waitingFor[gv].Insert(crd.Spec.Names.Plural) + } + } + + // Poll until all resources are found in discovery + p := &poller{config: config, waitingFor: waitingFor} + return wait.PollImmediate(options.pollInterval, options.maxTime, p.poll) +} + +// poller checks if all the resources have been found in discovery, and returns false if not +type poller struct { + // config is used to get discovery + config *rest.Config + + // waitingFor is the map of resources keyed by group version that have not yet been found in discovery + waitingFor map[schema.GroupVersion]*sets.String +} + +// poll checks if all the resources have been found in discovery, and returns false if not +func (p *poller) poll() (done bool, err error) { + // Create a new clientset to avoid any client caching of discovery + cs, err := clientset.NewForConfig(p.config) + if err != nil { + return false, err + } + + allFound := true + for gv, resources := range p.waitingFor { + // All resources found, do nothing + if resources.Len() == 0 { + delete(p.waitingFor, gv) + continue + } + + // Get the Resources for this GroupVersion + // TODO: Maybe the controller-runtime client should be able to do this... + resourceList, err := cs.Discovery().ServerResourcesForGroupVersion(gv.Group + "/" + gv.Version) + if err != nil { + return false, nil + } + + // Remove each found resource from the resources set that we are waiting for + for _, resource := range resourceList.APIResources { + resources.Delete(resource.Name) + } + + // Still waiting on some resources in this group version + if resources.Len() != 0 { + allFound = false + } + } + return allFound, nil +} + +// CreateCRDs creates the CRDs +func CreateCRDs(config *rest.Config, crds []*apiextensionsv1beta1.CustomResourceDefinition) error { + cs, err := clientset.NewForConfig(config) + if err != nil { + return err + } + + // Create each CRD + for _, crd := range crds { + log.V(1).Info("installing CRD", "crd", crd.Name) + if _, err := cs.ApiextensionsV1beta1().CustomResourceDefinitions().Create(crd); err != nil { + return err + } + } + return nil +} + +// readCRDs reads the CRDs from files and Unmarshals them into structs +func readCRDs(path string) ([]*apiextensionsv1beta1.CustomResourceDefinition, error) { + // Get the CRD files + var files []os.FileInfo + var err error + log.V(1).Info("reading CRDs from path", "path", path) + if files, err = ioutil.ReadDir(path); err != nil { + return nil, err + } + + // White list the file extensions that may contain CRDs + crdExts := sets.NewString(".json", ".yaml", ".yml") + + var crds []*apiextensionsv1beta1.CustomResourceDefinition + for _, file := range files { + // Only parse whitelisted file types + if !crdExts.Has(filepath.Ext(file.Name())) { + continue + } + + // Unmarshal CRDs from file into structs + docs, err := readDocuments(filepath.Join(path, file.Name())) + if err != nil { + return nil, err + } + + for _, doc := range docs { + crd := &apiextensionsv1beta1.CustomResourceDefinition{} + if err = yaml.Unmarshal(doc, crd); err != nil { + return nil, err + } + + // Check that it is actually a CRD + if crd.Spec.Names.Kind == "" || crd.Spec.Group == "" { + continue + } + crds = append(crds, crd) + } + + log.V(1).Info("read CRDs from file", "file", file.Name()) + } + return crds, nil +} + +// readDocuments reads documents from file +func readDocuments(fp string) ([][]byte, error) { + b, err := ioutil.ReadFile(fp) + if err != nil { + return nil, err + } + + docs := [][]byte{} + reader := k8syaml.NewYAMLReader(bufio.NewReader(bytes.NewReader(b))) + for { + // Read document + doc, err := reader.Read() + if err != nil { + if err == io.EOF { + break + } + + return nil, err + } + + docs = append(docs, doc) + } + + return docs, nil +} diff --git a/vendor/k8s.io/api/admissionregistration/v1alpha1/doc.go b/vendor/sigs.k8s.io/controller-runtime/pkg/envtest/doc.go similarity index 52% rename from vendor/k8s.io/api/admissionregistration/v1alpha1/doc.go rename to vendor/sigs.k8s.io/controller-runtime/pkg/envtest/doc.go index d29913cf52..412e794cc8 100644 --- a/vendor/k8s.io/api/admissionregistration/v1alpha1/doc.go +++ b/vendor/sigs.k8s.io/controller-runtime/pkg/envtest/doc.go @@ -14,12 +14,13 @@ See the License for the specific language governing permissions and limitations under the License. */ -// +k8s:deepcopy-gen=package -// +k8s:openapi-gen=true -// +groupName=admissionregistration.k8s.io - -// Package v1alpha1 is the v1alpha1 version of the API. -// AdmissionConfiguration and AdmissionPluginConfiguration are legacy static admission plugin configuration -// InitializerConfiguration and validatingWebhookConfiguration is for the -// new dynamic admission controller configuration. -package v1alpha1 // import "k8s.io/api/admissionregistration/v1alpha1" +// Package envtest provides libraries for integration testing by starting a local control plane +// +// Control plane binaries (etcd and kube-apiserver) are loaded by default from +// /usr/local/kubebuilder/bin. This can be overridden by setting the +// KUBEBUILDER_ASSETS environment variable, or by directly creating a +// ControlPlane for the Environment to use. +// +// Environment can also be configured to work with an existing cluster, and +// simply load CRDs and provide client configuration. +package envtest diff --git a/vendor/sigs.k8s.io/controller-runtime/pkg/envtest/ginkgo.go b/vendor/sigs.k8s.io/controller-runtime/pkg/envtest/ginkgo.go new file mode 100644 index 0000000000..8995687f98 --- /dev/null +++ b/vendor/sigs.k8s.io/controller-runtime/pkg/envtest/ginkgo.go @@ -0,0 +1,11 @@ +package envtest + +import ( + "sigs.k8s.io/controller-runtime/pkg/envtest/printer" +) + +// NewlineReporter is Reporter that Prints a newline after the default Reporter output so that the results +// are correctly parsed by test automation. +// See issue https://github.com/jstemmer/go-junit-report/issues/31 +// It's re-exported here to avoid compatibility breakage/mass rewrites. +type NewlineReporter = printer.NewlineReporter diff --git a/vendor/sigs.k8s.io/controller-runtime/pkg/envtest/printer/ginkgo.go b/vendor/sigs.k8s.io/controller-runtime/pkg/envtest/printer/ginkgo.go new file mode 100644 index 0000000000..1435a1a435 --- /dev/null +++ b/vendor/sigs.k8s.io/controller-runtime/pkg/envtest/printer/ginkgo.go @@ -0,0 +1,53 @@ +/* +Copyright 2016 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Package printer contains setup for a friendlier Ginkgo printer that's easier +// to parse by test automation. +package printer + +import ( + "fmt" + + "github.com/onsi/ginkgo" + "github.com/onsi/ginkgo/config" + "github.com/onsi/ginkgo/types" +) + +var _ ginkgo.Reporter = NewlineReporter{} + +// NewlineReporter is Reporter that Prints a newline after the default Reporter output so that the results +// are correctly parsed by test automation. +// See issue https://github.com/jstemmer/go-junit-report/issues/31 +type NewlineReporter struct{} + +// SpecSuiteWillBegin implements ginkgo.Reporter +func (NewlineReporter) SpecSuiteWillBegin(config config.GinkgoConfigType, summary *types.SuiteSummary) { +} + +// BeforeSuiteDidRun implements ginkgo.Reporter +func (NewlineReporter) BeforeSuiteDidRun(setupSummary *types.SetupSummary) {} + +// AfterSuiteDidRun implements ginkgo.Reporter +func (NewlineReporter) AfterSuiteDidRun(setupSummary *types.SetupSummary) {} + +// SpecWillRun implements ginkgo.Reporter +func (NewlineReporter) SpecWillRun(specSummary *types.SpecSummary) {} + +// SpecDidComplete implements ginkgo.Reporter +func (NewlineReporter) SpecDidComplete(specSummary *types.SpecSummary) {} + +// SpecSuiteDidEnd Prints a newline between "35 Passed | 0 Failed | 0 Pending | 0 Skipped" and "--- PASS:" +func (NewlineReporter) SpecSuiteDidEnd(summary *types.SuiteSummary) { fmt.Printf("\n") } diff --git a/vendor/sigs.k8s.io/controller-runtime/pkg/envtest/server.go b/vendor/sigs.k8s.io/controller-runtime/pkg/envtest/server.go new file mode 100644 index 0000000000..7c4d8cfff7 --- /dev/null +++ b/vendor/sigs.k8s.io/controller-runtime/pkg/envtest/server.go @@ -0,0 +1,261 @@ +/* +Copyright 2016 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package envtest + +import ( + "fmt" + "os" + "path/filepath" + "strings" + "time" + + apiextensionsv1beta1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1" + "k8s.io/client-go/rest" + "sigs.k8s.io/controller-runtime/pkg/client/config" + "sigs.k8s.io/testing_frameworks/integration" + + logf "sigs.k8s.io/controller-runtime/pkg/internal/log" +) + +var log = logf.RuntimeLog.WithName("test-env") + +// Default binary path for test framework +const ( + envUseExistingCluster = "USE_EXISTING_CLUSTER" + envKubeAPIServerBin = "TEST_ASSET_KUBE_APISERVER" + envEtcdBin = "TEST_ASSET_ETCD" + envKubectlBin = "TEST_ASSET_KUBECTL" + envKubebuilderPath = "KUBEBUILDER_ASSETS" + envStartTimeout = "KUBEBUILDER_CONTROLPLANE_START_TIMEOUT" + envStopTimeout = "KUBEBUILDER_CONTROLPLANE_STOP_TIMEOUT" + envAttachOutput = "KUBEBUILDER_ATTACH_CONTROL_PLANE_OUTPUT" + defaultKubebuilderPath = "/usr/local/kubebuilder/bin" + StartTimeout = 60 + StopTimeout = 60 + + defaultKubebuilderControlPlaneStartTimeout = 20 * time.Second + defaultKubebuilderControlPlaneStopTimeout = 20 * time.Second +) + +func defaultAssetPath(binary string) string { + assetPath := os.Getenv(envKubebuilderPath) + if assetPath == "" { + assetPath = defaultKubebuilderPath + } + return filepath.Join(assetPath, binary) + +} + +// DefaultKubeAPIServerFlags are default flags necessary to bring up apiserver. +var DefaultKubeAPIServerFlags = []string{ + "--etcd-servers={{ if .EtcdURL }}{{ .EtcdURL.String }}{{ end }}", + "--cert-dir={{ .CertDir }}", + "--insecure-port={{ if .URL }}{{ .URL.Port }}{{ end }}", + "--insecure-bind-address={{ if .URL }}{{ .URL.Hostname }}{{ end }}", + "--secure-port={{ if .SecurePort }}{{ .SecurePort }}{{ end }}", + "--admission-control=AlwaysAdmit", +} + +// Environment creates a Kubernetes test environment that will start / stop the Kubernetes control plane and +// install extension APIs +type Environment struct { + // ControlPlane is the ControlPlane including the apiserver and etcd + ControlPlane integration.ControlPlane + + // Config can be used to talk to the apiserver. It's automatically + // populated if not set using the standard controller-runtime config + // loading. + Config *rest.Config + + // CRDs is a list of CRDs to install + CRDs []*apiextensionsv1beta1.CustomResourceDefinition + + // CRDDirectoryPaths is a list of paths containing CRD yaml or json configs. + CRDDirectoryPaths []string + + // UseExisting indicates that this environments should use an + // existing kubeconfig, instead of trying to stand up a new control plane. + // This is useful in cases that need aggregated API servers and the like. + UseExistingCluster *bool + + // ControlPlaneStartTimeout is the maximum duration each controlplane component + // may take to start. It defaults to the KUBEBUILDER_CONTROLPLANE_START_TIMEOUT + // environment variable or 20 seconds if unspecified + ControlPlaneStartTimeout time.Duration + + // ControlPlaneStopTimeout is the maximum duration each controlplane component + // may take to stop. It defaults to the KUBEBUILDER_CONTROLPLANE_STOP_TIMEOUT + // environment variable or 20 seconds if unspecified + ControlPlaneStopTimeout time.Duration + + // KubeAPIServerFlags is the set of flags passed while starting the api server. + KubeAPIServerFlags []string + + // AttachControlPlaneOutput indicates if control plane output will be attached to os.Stdout and os.Stderr. + // Enable this to get more visibility of the testing control plane. + // It respect KUBEBUILDER_ATTACH_CONTROL_PLANE_OUTPUT environment variable. + AttachControlPlaneOutput bool +} + +// Stop stops a running server +func (te *Environment) Stop() error { + if te.useExistingCluster() { + return nil + } + return te.ControlPlane.Stop() +} + +// getAPIServerFlags returns flags to be used with the Kubernetes API server. +func (te Environment) getAPIServerFlags() []string { + // Set default API server flags if not set. + if len(te.KubeAPIServerFlags) == 0 { + return DefaultKubeAPIServerFlags + } + return te.KubeAPIServerFlags +} + +// Start starts a local Kubernetes server and updates te.ApiserverPort with the port it is listening on +func (te *Environment) Start() (*rest.Config, error) { + if te.useExistingCluster() { + log.V(1).Info("using existing cluster") + if te.Config == nil { + // we want to allow people to pass in their own config, so + // only load a config if it hasn't already been set. + log.V(1).Info("automatically acquiring client configuration") + + var err error + te.Config, err = config.GetConfig() + if err != nil { + return nil, err + } + } + } else { + if te.ControlPlane.APIServer == nil { + te.ControlPlane.APIServer = &integration.APIServer{Args: te.getAPIServerFlags()} + } + if te.ControlPlane.Etcd == nil { + te.ControlPlane.Etcd = &integration.Etcd{} + } + + if os.Getenv(envAttachOutput) == "true" { + te.AttachControlPlaneOutput = true + } + if te.ControlPlane.APIServer.Out == nil && te.AttachControlPlaneOutput { + te.ControlPlane.APIServer.Out = os.Stdout + } + if te.ControlPlane.APIServer.Err == nil && te.AttachControlPlaneOutput { + te.ControlPlane.APIServer.Err = os.Stderr + } + if te.ControlPlane.Etcd.Out == nil && te.AttachControlPlaneOutput { + te.ControlPlane.Etcd.Out = os.Stdout + } + if te.ControlPlane.Etcd.Err == nil && te.AttachControlPlaneOutput { + te.ControlPlane.Etcd.Err = os.Stderr + } + + if os.Getenv(envKubeAPIServerBin) == "" { + te.ControlPlane.APIServer.Path = defaultAssetPath("kube-apiserver") + } + if os.Getenv(envEtcdBin) == "" { + te.ControlPlane.Etcd.Path = defaultAssetPath("etcd") + } + if os.Getenv(envKubectlBin) == "" { + // we can't just set the path manually (it's behind a function), so set the environment variable instead + if err := os.Setenv(envKubectlBin, defaultAssetPath("kubectl")); err != nil { + return nil, err + } + } + + if err := te.defaultTimeouts(); err != nil { + return nil, fmt.Errorf("failed to default controlplane timeouts: %v", err) + } + te.ControlPlane.Etcd.StartTimeout = te.ControlPlaneStartTimeout + te.ControlPlane.Etcd.StopTimeout = te.ControlPlaneStopTimeout + te.ControlPlane.APIServer.StartTimeout = te.ControlPlaneStartTimeout + te.ControlPlane.APIServer.StopTimeout = te.ControlPlaneStopTimeout + + log.V(1).Info("starting control plane", "api server flags", te.ControlPlane.APIServer.Args) + if err := te.startControlPlane(); err != nil { + return nil, err + } + + // Create the *rest.Config for creating new clients + te.Config = &rest.Config{ + Host: te.ControlPlane.APIURL().Host, + // gotta go fast during tests -- we don't really care about overwhelming our test API server + QPS: 1000.0, + Burst: 2000.0, + } + } + + log.V(1).Info("installing CRDs") + _, err := InstallCRDs(te.Config, CRDInstallOptions{ + Paths: te.CRDDirectoryPaths, + CRDs: te.CRDs, + }) + return te.Config, err +} + +func (te *Environment) startControlPlane() error { + numTries, maxRetries := 0, 5 + var err error + for ; numTries < maxRetries; numTries++ { + // Start the control plane - retry if it fails + err = te.ControlPlane.Start() + if err == nil { + break + } + log.Error(err, "unable to start the controlplane", "tries", numTries) + } + if numTries == maxRetries { + return fmt.Errorf("failed to start the controlplane. retried %d times: %v", numTries, err) + } + return nil +} + +func (te *Environment) defaultTimeouts() error { + var err error + if te.ControlPlaneStartTimeout == 0 { + if envVal := os.Getenv(envStartTimeout); envVal != "" { + te.ControlPlaneStartTimeout, err = time.ParseDuration(envVal) + if err != nil { + return err + } + } else { + te.ControlPlaneStartTimeout = defaultKubebuilderControlPlaneStartTimeout + } + } + + if te.ControlPlaneStopTimeout == 0 { + if envVal := os.Getenv(envStopTimeout); envVal != "" { + te.ControlPlaneStopTimeout, err = time.ParseDuration(envVal) + if err != nil { + return err + } + } else { + te.ControlPlaneStopTimeout = defaultKubebuilderControlPlaneStopTimeout + } + } + return nil +} + +func (te *Environment) useExistingCluster() bool { + if te.UseExistingCluster == nil { + return strings.ToLower(os.Getenv(envUseExistingCluster)) == "true" + } + return *te.UseExistingCluster +} diff --git a/vendor/sigs.k8s.io/controller-runtime/pkg/event/doc.go b/vendor/sigs.k8s.io/controller-runtime/pkg/event/doc.go index ec9fe1d0c1..adba3bbc16 100644 --- a/vendor/sigs.k8s.io/controller-runtime/pkg/event/doc.go +++ b/vendor/sigs.k8s.io/controller-runtime/pkg/event/doc.go @@ -18,8 +18,11 @@ limitations under the License. Package event contains the definitions for the Event types produced by source.Sources and transformed into reconcile.Requests by handler.EventHandler. -The details of how events are produced and transformed into reconcile.Requests are not something most -users should need to use or understand. Instead of working with Events, users should use -source.Sources and handler.EventHandlers with Controller.Watch. +You should rarely need to work with these directly -- instead, use Controller.Watch with +source.Sources and handler.EventHandlers. + +Events generally contain both a full runtime.Object that caused the event, as well +as a direct handle to that object's metadata. This saves a lot of typecasting in +code that works with Events. */ package event diff --git a/vendor/sigs.k8s.io/controller-runtime/pkg/event/event.go b/vendor/sigs.k8s.io/controller-runtime/pkg/event/event.go index 4874e2e875..6aa50bf301 100644 --- a/vendor/sigs.k8s.io/controller-runtime/pkg/event/event.go +++ b/vendor/sigs.k8s.io/controller-runtime/pkg/event/event.go @@ -17,7 +17,7 @@ limitations under the License. package event import ( - "k8s.io/apimachinery/pkg/apis/meta/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/runtime" ) @@ -25,7 +25,7 @@ import ( // by a source.Source and transformed into a reconcile.Request by an handler.EventHandler. type CreateEvent struct { // Meta is the ObjectMeta of the Kubernetes Type that was created - Meta v1.Object + Meta metav1.Object // Object is the object from the event Object runtime.Object @@ -35,13 +35,13 @@ type CreateEvent struct { // by a source.Source and transformed into a reconcile.Request by an handler.EventHandler. type UpdateEvent struct { // MetaOld is the ObjectMeta of the Kubernetes Type that was updated (before the update) - MetaOld v1.Object + MetaOld metav1.Object // ObjectOld is the object from the event ObjectOld runtime.Object // MetaNew is the ObjectMeta of the Kubernetes Type that was updated (after the update) - MetaNew v1.Object + MetaNew metav1.Object // ObjectNew is the object from the event ObjectNew runtime.Object @@ -51,7 +51,7 @@ type UpdateEvent struct { // by a source.Source and transformed into a reconcile.Request by an handler.EventHandler. type DeleteEvent struct { // Meta is the ObjectMeta of the Kubernetes Type that was deleted - Meta v1.Object + Meta metav1.Object // Object is the object from the event Object runtime.Object @@ -66,7 +66,7 @@ type DeleteEvent struct { // handler.EventHandler. type GenericEvent struct { // Meta is the ObjectMeta of a Kubernetes Type this event is for - Meta v1.Object + Meta metav1.Object // Object is the object from the event Object runtime.Object diff --git a/vendor/sigs.k8s.io/controller-runtime/pkg/handler/doc.go b/vendor/sigs.k8s.io/controller-runtime/pkg/handler/doc.go index 3167e3526f..3b5b79048d 100644 --- a/vendor/sigs.k8s.io/controller-runtime/pkg/handler/doc.go +++ b/vendor/sigs.k8s.io/controller-runtime/pkg/handler/doc.go @@ -19,6 +19,8 @@ Package handler defines EventHandlers that enqueue reconcile.Requests in respons observed from Watching Kubernetes APIs. Users should provide a source.Source and handler.EventHandler to Controller.Watch in order to generate and enqueue reconcile.Request work items. +Generally, following premade event handlers should be sufficient for most use cases: + EventHandlers EnqueueRequestForObject - Enqueues a reconcile.Request containing the Name and Namespace of the object in the Event. This will @@ -29,7 +31,7 @@ EnqueueRequestForOwner - Enqueues a reconcile.Request containing the Name and Na This will cause owner of the object that was the source of the Event (e.g. the owner object that created the object) to be reconciled. -EnqueueRequestsFromMapFunc - Enqueues Reconciler.Requests resulting from a user provided transformation function run against the +EnqueueRequestsFromMapFunc - Enqueues reconcile.Requests resulting from a user provided transformation function run against the object in the Event. This will cause an arbitrary collection of objects (defined from a transformation of the source object) to be reconciled. */ diff --git a/vendor/sigs.k8s.io/controller-runtime/pkg/handler/enqueue.go b/vendor/sigs.k8s.io/controller-runtime/pkg/handler/enqueue.go index 9ddda5be6e..2464c8b671 100644 --- a/vendor/sigs.k8s.io/controller-runtime/pkg/handler/enqueue.go +++ b/vendor/sigs.k8s.io/controller-runtime/pkg/handler/enqueue.go @@ -20,11 +20,11 @@ import ( "k8s.io/apimachinery/pkg/types" "k8s.io/client-go/util/workqueue" "sigs.k8s.io/controller-runtime/pkg/event" + logf "sigs.k8s.io/controller-runtime/pkg/internal/log" "sigs.k8s.io/controller-runtime/pkg/reconcile" - logf "sigs.k8s.io/controller-runtime/pkg/runtime/log" ) -var enqueueLog = logf.KBLog.WithName("eventhandler").WithName("EnqueueRequestForObject") +var enqueueLog = logf.RuntimeLog.WithName("eventhandler").WithName("EnqueueRequestForObject") var _ EventHandler = &EnqueueRequestForObject{} diff --git a/vendor/sigs.k8s.io/controller-runtime/pkg/handler/enqueue_mapped.go b/vendor/sigs.k8s.io/controller-runtime/pkg/handler/enqueue_mapped.go index 8e4a5a6acb..a60790242c 100644 --- a/vendor/sigs.k8s.io/controller-runtime/pkg/handler/enqueue_mapped.go +++ b/vendor/sigs.k8s.io/controller-runtime/pkg/handler/enqueue_mapped.go @@ -22,6 +22,7 @@ import ( "k8s.io/client-go/util/workqueue" "sigs.k8s.io/controller-runtime/pkg/event" "sigs.k8s.io/controller-runtime/pkg/reconcile" + "sigs.k8s.io/controller-runtime/pkg/runtime/inject" ) var _ EventHandler = &EnqueueRequestsFromMapFunc{} @@ -68,6 +69,16 @@ func (e *EnqueueRequestsFromMapFunc) mapAndEnqueue(q workqueue.RateLimitingInter } } +// EnqueueRequestsFromMapFunc can inject fields into the mapper. + +// InjectFunc implements inject.Injector. +func (e *EnqueueRequestsFromMapFunc) InjectFunc(f inject.Func) error { + if f == nil { + return nil + } + return f(e.ToRequests) +} + // Mapper maps an object to a collection of keys to be enqueued type Mapper interface { // Map maps an object diff --git a/vendor/sigs.k8s.io/controller-runtime/pkg/handler/enqueue_owner.go b/vendor/sigs.k8s.io/controller-runtime/pkg/handler/enqueue_owner.go index a5b1076808..17d512696c 100644 --- a/vendor/sigs.k8s.io/controller-runtime/pkg/handler/enqueue_owner.go +++ b/vendor/sigs.k8s.io/controller-runtime/pkg/handler/enqueue_owner.go @@ -19,20 +19,21 @@ package handler import ( "fmt" + "k8s.io/apimachinery/pkg/api/meta" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/runtime/schema" "k8s.io/apimachinery/pkg/types" "k8s.io/client-go/util/workqueue" "sigs.k8s.io/controller-runtime/pkg/event" + logf "sigs.k8s.io/controller-runtime/pkg/internal/log" "sigs.k8s.io/controller-runtime/pkg/reconcile" "sigs.k8s.io/controller-runtime/pkg/runtime/inject" - logf "sigs.k8s.io/controller-runtime/pkg/runtime/log" ) var _ EventHandler = &EnqueueRequestForOwner{} -var log = logf.KBLog.WithName("eventhandler").WithName("EnqueueRequestForOwner") +var log = logf.RuntimeLog.WithName("eventhandler").WithName("EnqueueRequestForOwner") // EnqueueRequestForOwner enqueues Requests for the Owners of an object. E.g. the object that created // the object that was the source of the Event. @@ -51,6 +52,9 @@ type EnqueueRequestForOwner struct { // groupKind is the cached Group and Kind from OwnerType groupKind schema.GroupKind + + // mapper maps GroupVersionKinds to Resources + mapper meta.RESTMapper } // Create implements EventHandler @@ -126,10 +130,21 @@ func (e *EnqueueRequestForOwner) getOwnerReconcileRequest(object metav1.Object) // object in the event. if ref.Kind == e.groupKind.Kind && refGV.Group == e.groupKind.Group { // Match found - add a Request for the object referred to in the OwnerReference - result = append(result, reconcile.Request{NamespacedName: types.NamespacedName{ - Namespace: object.GetNamespace(), - Name: ref.Name, - }}) + request := reconcile.Request{NamespacedName: types.NamespacedName{ + Name: ref.Name, + }} + + // if owner is not namespaced then we should set the namespace to the empty + mapping, err := e.mapper.RESTMapping(e.groupKind, refGV.Version) + if err != nil { + log.Error(err, "Could not retrieve rest mapping", "kind", e.groupKind) + return nil + } + if mapping.Scope.Name() != meta.RESTScopeNameRoot { + request.Namespace = object.GetNamespace() + } + + result = append(result, request) } } @@ -163,3 +178,11 @@ var _ inject.Scheme = &EnqueueRequestForOwner{} func (e *EnqueueRequestForOwner) InjectScheme(s *runtime.Scheme) error { return e.parseOwnerTypeGroupKind(s) } + +var _ inject.Mapper = &EnqueueRequestForOwner{} + +// InjectMapper is called by the Controller to provide the rest mapper used by the manager. +func (e *EnqueueRequestForOwner) InjectMapper(m meta.RESTMapper) error { + e.mapper = m + return nil +} diff --git a/vendor/sigs.k8s.io/controller-runtime/pkg/internal/controller/controller.go b/vendor/sigs.k8s.io/controller-runtime/pkg/internal/controller/controller.go index f97bdd7cda..7f40a32a52 100644 --- a/vendor/sigs.k8s.io/controller-runtime/pkg/internal/controller/controller.go +++ b/vendor/sigs.k8s.io/controller-runtime/pkg/internal/controller/controller.go @@ -31,14 +31,14 @@ import ( "sigs.k8s.io/controller-runtime/pkg/client" "sigs.k8s.io/controller-runtime/pkg/handler" ctrlmetrics "sigs.k8s.io/controller-runtime/pkg/internal/controller/metrics" + logf "sigs.k8s.io/controller-runtime/pkg/internal/log" "sigs.k8s.io/controller-runtime/pkg/predicate" "sigs.k8s.io/controller-runtime/pkg/reconcile" "sigs.k8s.io/controller-runtime/pkg/runtime/inject" - logf "sigs.k8s.io/controller-runtime/pkg/runtime/log" "sigs.k8s.io/controller-runtime/pkg/source" ) -var log = logf.KBLog.WithName("controller") +var log = logf.RuntimeLog.WithName("controller") var _ inject.Injector = &Controller{} @@ -154,10 +154,7 @@ func (c *Controller) Start(stop <-chan struct{}) error { log.Info("Starting workers", "controller", c.Name, "worker count", c.MaxConcurrentReconciles) for i := 0; i < c.MaxConcurrentReconciles; i++ { // Process work items - go wait.Until(func() { - for c.processNextWorkItem() { - } - }, c.JitterPeriod, stop) + go wait.Until(c.worker, c.JitterPeriod, stop) } c.Started = true @@ -168,23 +165,17 @@ func (c *Controller) Start(stop <-chan struct{}) error { return nil } +// worker runs a worker thread that just dequeues items, processes them, and marks them done. +// It enforces that the reconcileHandler is never invoked concurrently with the same object. +func (c *Controller) worker() { + for c.processNextWorkItem() { + } +} + // processNextWorkItem will read a single work item off the workqueue and -// attempt to process it, by calling the syncHandler. +// attempt to process it, by calling the reconcileHandler. func (c *Controller) processNextWorkItem() bool { - // This code copy-pasted from the sample-Controller. - - // Update metrics after processing each item - reconcileStartTS := time.Now() - defer func() { - c.updateMetrics(time.Now().Sub(reconcileStartTS)) - }() - obj, shutdown := c.Queue.Get() - if obj == nil { - // Sometimes the Queue gives us nil items when it starts up - c.Queue.Forget(obj) - } - if shutdown { // Stop working return false @@ -197,6 +188,17 @@ func (c *Controller) processNextWorkItem() bool { // put back on the workqueue and attempted again after a back-off // period. defer c.Queue.Done(obj) + + return c.reconcileHandler(obj) +} + +func (c *Controller) reconcileHandler(obj interface{}) bool { + // Update metrics after processing each item + reconcileStartTS := time.Now() + defer func() { + c.updateMetrics(time.Now().Sub(reconcileStartTS)) + }() + var req reconcile.Request var ok bool if req, ok = obj.(reconcile.Request); !ok { @@ -209,7 +211,6 @@ func (c *Controller) processNextWorkItem() bool { // Return true, don't take a break return true } - // RunInformersAndControllers the syncHandler, passing it the namespace/Name string of the // resource to be synced. if result, err := c.Do.Reconcile(req); err != nil { @@ -219,6 +220,11 @@ func (c *Controller) processNextWorkItem() bool { ctrlmetrics.ReconcileTotal.WithLabelValues(c.Name, "error").Inc() return false } else if result.RequeueAfter > 0 { + // The result.RequeueAfter request will be lost, if it is returned + // along with a non-nil error. But this is intended as + // We need to drive to stable reconcile loops before queuing due + // to result.RequestAfter + c.Queue.Forget(obj) c.Queue.AddAfter(req, result.RequeueAfter) ctrlmetrics.ReconcileTotal.WithLabelValues(c.Name, "requeue_after").Inc() return true diff --git a/vendor/k8s.io/component-base/cli/flag/noop.go b/vendor/sigs.k8s.io/controller-runtime/pkg/internal/log/log.go similarity index 60% rename from vendor/k8s.io/component-base/cli/flag/noop.go rename to vendor/sigs.k8s.io/controller-runtime/pkg/internal/log/log.go index 03f7f14c0b..0d671b73d6 100644 --- a/vendor/k8s.io/component-base/cli/flag/noop.go +++ b/vendor/sigs.k8s.io/controller-runtime/pkg/internal/log/log.go @@ -14,28 +14,22 @@ See the License for the specific language governing permissions and limitations under the License. */ -package flag +// Package log contains utilities for fetching a new logger +// when one is not already available. +// Deprecated: use pkg/log +package log import ( - goflag "flag" - "github.com/spf13/pflag" -) - -// NoOp implements goflag.Value and plfag.Value, -// but has a noop Set implementation -type NoOp struct{} + "github.com/go-logr/logr" -var _ goflag.Value = NoOp{} -var _ pflag.Value = NoOp{} - -func (NoOp) String() string { - return "" -} + "sigs.k8s.io/controller-runtime/pkg/log" +) -func (NoOp) Set(val string) error { - return nil -} +var ( + // RuntimeLog is a base parent logger for use inside controller-runtime. + RuntimeLog logr.Logger +) -func (NoOp) Type() string { - return "NoOp" +func init() { + RuntimeLog = log.Log.WithName("controller-runtime") } diff --git a/vendor/sigs.k8s.io/controller-runtime/pkg/leaderelection/doc.go b/vendor/sigs.k8s.io/controller-runtime/pkg/leaderelection/doc.go index 3f0f52a9bd..24654faa3c 100644 --- a/vendor/sigs.k8s.io/controller-runtime/pkg/leaderelection/doc.go +++ b/vendor/sigs.k8s.io/controller-runtime/pkg/leaderelection/doc.go @@ -15,6 +15,10 @@ limitations under the License. */ /* -Package leaderelection contains a constructors for a leader election resource lock +Package leaderelection contains a constructors for a leader election resource lock. +This is used to ensure that multiple copies of a controller manager can be run with +only one active set of controllers, for active-passive HA. + +It uses built-in Kubernetes leader election APIs. */ package leaderelection diff --git a/vendor/sigs.k8s.io/controller-runtime/pkg/leaderelection/leader_election.go b/vendor/sigs.k8s.io/controller-runtime/pkg/leaderelection/leader_election.go index fb8019abe1..b3003ea5e3 100644 --- a/vendor/sigs.k8s.io/controller-runtime/pkg/leaderelection/leader_election.go +++ b/vendor/sigs.k8s.io/controller-runtime/pkg/leaderelection/leader_election.go @@ -84,6 +84,7 @@ func NewResourceLock(config *rest.Config, recorderProvider recorder.Provider, op options.LeaderElectionNamespace, options.LeaderElectionID, client.CoreV1(), + client.CoordinationV1(), resourcelock.ResourceLockConfig{ Identity: id, EventRecorder: recorderProvider.GetEventRecorderFor(id), @@ -100,7 +101,7 @@ func getInClusterNamespace() (string, error) { return "", fmt.Errorf("error checking namespace file: %v", err) } - // Load the namespace file and return itss content + // Load the namespace file and return its content namespace, err := ioutil.ReadFile(inClusterNamespacePath) if err != nil { return "", fmt.Errorf("error reading namespace file: %v", err) diff --git a/vendor/sigs.k8s.io/controller-runtime/pkg/runtime/log/deleg.go b/vendor/sigs.k8s.io/controller-runtime/pkg/log/deleg.go similarity index 90% rename from vendor/sigs.k8s.io/controller-runtime/pkg/runtime/log/deleg.go rename to vendor/sigs.k8s.io/controller-runtime/pkg/log/deleg.go index cb711696de..949117f6bb 100644 --- a/vendor/sigs.k8s.io/controller-runtime/pkg/runtime/log/deleg.go +++ b/vendor/sigs.k8s.io/controller-runtime/pkg/log/deleg.go @@ -17,6 +17,8 @@ limitations under the License. package log import ( + "sync" + "github.com/go-logr/logr" ) @@ -25,6 +27,7 @@ import ( type loggerPromise struct { logger *DelegatingLogger childPromises []*loggerPromise + promisesLock sync.Mutex name *string tags []interface{} @@ -33,9 +36,13 @@ type loggerPromise struct { // WithName provides a new Logger with the name appended func (p *loggerPromise) WithName(l *DelegatingLogger, name string) *loggerPromise { res := &loggerPromise{ - logger: l, - name: &name, + logger: l, + name: &name, + promisesLock: sync.Mutex{}, } + + p.promisesLock.Lock() + defer p.promisesLock.Unlock() p.childPromises = append(p.childPromises, res) return res } @@ -43,9 +50,13 @@ func (p *loggerPromise) WithName(l *DelegatingLogger, name string) *loggerPromis // WithValues provides a new Logger with the tags appended func (p *loggerPromise) WithValues(l *DelegatingLogger, tags ...interface{}) *loggerPromise { res := &loggerPromise{ - logger: l, - tags: tags, + logger: l, + tags: tags, + promisesLock: sync.Mutex{}, } + + p.promisesLock.Lock() + defer p.promisesLock.Unlock() p.childPromises = append(p.childPromises, res) return res } @@ -119,7 +130,7 @@ func (l *DelegatingLogger) Fulfill(actual logr.Logger) { func NewDelegatingLogger(initial logr.Logger) *DelegatingLogger { l := &DelegatingLogger{ Logger: initial, - promise: &loggerPromise{}, + promise: &loggerPromise{promisesLock: sync.Mutex{}}, } l.promise.logger = l return l diff --git a/vendor/sigs.k8s.io/controller-runtime/pkg/log/log.go b/vendor/sigs.k8s.io/controller-runtime/pkg/log/log.go new file mode 100644 index 0000000000..128e6542ea --- /dev/null +++ b/vendor/sigs.k8s.io/controller-runtime/pkg/log/log.go @@ -0,0 +1,48 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Package log contains utilities for fetching a new logger +// when one is not already available. +// +// The Log Handle +// +// This package contains a root logr.Logger Log. It may be used to +// get a handle to whatever the root logging implementation is. By +// default, no implementation exists, and the handle returns "promises" +// to loggers. When the implementation is set using SetLogger, these +// "promises" will be converted over to real loggers. +// +// Logr +// +// All logging in controller-runtime is structured, using a set of interfaces +// defined by a package called logr +// (https://godoc.org/github.com/go-logr/logr). The sub-package zap provides +// helpers for setting up logr backed by Zap (go.uber.org/zap). +package log + +import ( + "github.com/go-logr/logr" +) + +// SetLogger sets a concrete logging implementation for all deferred Loggers. +func SetLogger(l logr.Logger) { + Log.Fulfill(l) +} + +// Log is the base logger used by kubebuilder. It delegates +// to another logr.Logger. You *must* call SetLogger to +// get any actual logging. +var Log = NewDelegatingLogger(NullLogger{}) diff --git a/vendor/sigs.k8s.io/controller-runtime/pkg/runtime/log/null.go b/vendor/sigs.k8s.io/controller-runtime/pkg/log/null.go similarity index 100% rename from vendor/sigs.k8s.io/controller-runtime/pkg/runtime/log/null.go rename to vendor/sigs.k8s.io/controller-runtime/pkg/log/null.go diff --git a/vendor/sigs.k8s.io/controller-runtime/pkg/runtime/log/kube_helpers.go b/vendor/sigs.k8s.io/controller-runtime/pkg/log/zap/kube_helpers.go similarity index 96% rename from vendor/sigs.k8s.io/controller-runtime/pkg/runtime/log/kube_helpers.go rename to vendor/sigs.k8s.io/controller-runtime/pkg/log/zap/kube_helpers.go index f07d1e6008..2c0d88386d 100644 --- a/vendor/sigs.k8s.io/controller-runtime/pkg/runtime/log/kube_helpers.go +++ b/vendor/sigs.k8s.io/controller-runtime/pkg/log/zap/kube_helpers.go @@ -1,5 +1,5 @@ /* -Copyright 2018 The Kubernetes Authors. +Copyright 2019 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -14,9 +14,7 @@ See the License for the specific language governing permissions and limitations under the License. */ -// Package log contains utilities for fetching a new logger -// when one is not already available. -package log +package zap import ( "fmt" diff --git a/vendor/sigs.k8s.io/controller-runtime/pkg/runtime/log/log.go b/vendor/sigs.k8s.io/controller-runtime/pkg/log/zap/zap.go similarity index 64% rename from vendor/sigs.k8s.io/controller-runtime/pkg/runtime/log/log.go rename to vendor/sigs.k8s.io/controller-runtime/pkg/log/zap/zap.go index 4a50601005..5812e85bfb 100644 --- a/vendor/sigs.k8s.io/controller-runtime/pkg/runtime/log/log.go +++ b/vendor/sigs.k8s.io/controller-runtime/pkg/log/zap/zap.go @@ -1,5 +1,5 @@ /* -Copyright 2018 The Kubernetes Authors. +Copyright 2019 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -14,9 +14,9 @@ See the License for the specific language governing permissions and limitations under the License. */ -// Package log contains utilities for fetching a new logger -// when one is not already available. -package log +// Package zap contains helpers for setting up a new logr.Logger instance +// using the Zap logging framework. +package zap import ( "io" @@ -29,24 +29,29 @@ import ( "go.uber.org/zap/zapcore" ) -// ZapLogger is a Logger implementation. +// Logger is a Logger implementation. // If development is true, a Zap development config will be used // (stacktraces on warnings, no sampling), otherwise a Zap production // config will be used (stacktraces on errors, sampling). -func ZapLogger(development bool) logr.Logger { - return ZapLoggerTo(os.Stderr, development) +func Logger(development bool) logr.Logger { + return LoggerTo(os.Stderr, development) } -// ZapLoggerTo returns a new Logger implementation using Zap which logs -// to the given destination, instead of stderr. It otherise behaves like +// LoggerTo returns a new Logger implementation using Zap which logs +// to the given destination, instead of stderr. It otherwise behaves like // ZapLogger. -func ZapLoggerTo(destWriter io.Writer, development bool) logr.Logger { +func LoggerTo(destWriter io.Writer, development bool) logr.Logger { + return zapr.NewLogger(RawLoggerTo(destWriter, development)) +} + +// RawLoggerTo returns a new zap.Logger configured with KubeAwareEncoder +// which logs to a given destination +func RawLoggerTo(destWriter io.Writer, development bool, opts ...zap.Option) *zap.Logger { // this basically mimics NewConfig, but with a custom sink sink := zapcore.AddSync(destWriter) var enc zapcore.Encoder var lvl zap.AtomicLevel - var opts []zap.Option if development { encCfg := zap.NewDevelopmentEncoderConfig() enc = zapcore.NewConsoleEncoder(encCfg) @@ -64,22 +69,5 @@ func ZapLoggerTo(destWriter io.Writer, development bool) logr.Logger { opts = append(opts, zap.AddCallerSkip(1), zap.ErrorOutput(sink)) log := zap.New(zapcore.NewCore(&KubeAwareEncoder{Encoder: enc, Verbose: development}, sink, lvl)) log = log.WithOptions(opts...) - return zapr.NewLogger(log) -} - -// SetLogger sets a concrete logging implementation for all deferred Loggers. -func SetLogger(l logr.Logger) { - Log.Fulfill(l) -} - -// Log is the base logger used by kubebuilder. It delegates -// to another logr.Logger. You *must* call SetLogger to -// get any actual logging. -var Log = NewDelegatingLogger(NullLogger{}) - -// KBLog is a base parent logger. -var KBLog logr.Logger - -func init() { - KBLog = Log.WithName("kubebuilder") + return log } diff --git a/vendor/sigs.k8s.io/controller-runtime/pkg/manager/internal.go b/vendor/sigs.k8s.io/controller-runtime/pkg/manager/internal.go index fd04e8fb54..ed4ae806e3 100644 --- a/vendor/sigs.k8s.io/controller-runtime/pkg/manager/internal.go +++ b/vendor/sigs.k8s.io/controller-runtime/pkg/manager/internal.go @@ -33,14 +33,21 @@ import ( "k8s.io/client-go/tools/record" "sigs.k8s.io/controller-runtime/pkg/cache" "sigs.k8s.io/controller-runtime/pkg/client" + logf "sigs.k8s.io/controller-runtime/pkg/internal/log" "sigs.k8s.io/controller-runtime/pkg/metrics" "sigs.k8s.io/controller-runtime/pkg/recorder" "sigs.k8s.io/controller-runtime/pkg/runtime/inject" - logf "sigs.k8s.io/controller-runtime/pkg/runtime/log" - "sigs.k8s.io/controller-runtime/pkg/webhook/admission/types" + "sigs.k8s.io/controller-runtime/pkg/webhook" ) -var log = logf.KBLog.WithName("manager") +const ( + // Values taken from: https://github.com/kubernetes/apiserver/blob/master/pkg/apis/config/v1alpha1/defaults.go + defaultLeaseDuration = 15 * time.Second + defaultRenewDeadline = 10 * time.Second + defaultRetryPeriod = 2 * time.Second +) + +var log = logf.RuntimeLog.WithName("manager") type controllerManager struct { // config is the rest.config used to talk to the apiserver. Required. @@ -49,11 +56,13 @@ type controllerManager struct { // scheme is the scheme injected into Controllers, EventHandlers, Sources and Predicates. Defaults // to scheme.scheme. scheme *runtime.Scheme - // admissionDecoder is used to decode an admission.Request. - admissionDecoder types.Decoder - // runnables is the set of Controllers that the controllerManager injects deps into and Starts. - runnables []Runnable + // leaderElectionRunnables is the set of Controllers that the controllerManager injects deps into and Starts. + // These Runnables are managed by lead election. + leaderElectionRunnables []Runnable + // nonLeaderElectionRunnables is the set of webhook servers that the controllerManager injects deps into and Starts. + // These Runnables will not be blocked by lead election. + nonLeaderElectionRunnables []Runnable cache cache.Cache @@ -61,6 +70,9 @@ type controllerManager struct { // client is the client injected into Controllers (and EventHandlers, Sources and Predicates). client client.Client + // apiReader is the reader that will make requests to the api server and not the cache. + apiReader client.Reader + // fieldIndexes knows how to add field indexes over the Cache used by this controller, // which can later be consumed via field selectors from the injected client. fieldIndexes client.FieldIndexer @@ -78,9 +90,10 @@ type controllerManager struct { // metricsListener is used to serve prometheus metrics metricsListener net.Listener - mu sync.Mutex - started bool - errChan chan error + mu sync.Mutex + started bool + startedLeader bool + errChan chan error // internalStop is the stop channel *actually* used by everything involved // with the manager as a stop channel, so that we can pass a stop channel @@ -93,9 +106,26 @@ type controllerManager struct { internalStopper chan<- struct{} startCache func(stop <-chan struct{}) error + + // port is the port that the webhook server serves at. + port int + // host is the hostname that the webhook server binds to. + host string + + webhookServer *webhook.Server + + // leaseDuration is the duration that non-leader candidates will + // wait to force acquire leadership. + leaseDuration time.Duration + // renewDeadline is the duration that the acting master will retry + // refreshing leadership before giving up. + renewDeadline time.Duration + // retryPeriod is the duration the LeaderElector clients should wait + // between tries of actions. + retryPeriod time.Duration } -// Add sets dependencies on i, and adds it to the list of runnables to start. +// Add sets dependencies on i, and adds it to the list of Runnables to start. func (cm *controllerManager) Add(r Runnable) error { cm.mu.Lock() defer cm.mu.Unlock() @@ -105,9 +135,18 @@ func (cm *controllerManager) Add(r Runnable) error { return err } - // Add the runnable to the list - cm.runnables = append(cm.runnables, r) - if cm.started { + var shouldStart bool + + // Add the runnable to the leader election or the non-leaderelection list + if leRunnable, ok := r.(LeaderElectionRunnable); ok && !leRunnable.NeedLeaderElection() { + shouldStart = cm.started + cm.nonLeaderElectionRunnables = append(cm.nonLeaderElectionRunnables, r) + } else { + shouldStart = cm.startedLeader + cm.leaderElectionRunnables = append(cm.leaderElectionRunnables, r) + } + + if shouldStart { // If already started, start the controller go func() { cm.errChan <- r.Start(cm.internalStop) @@ -124,6 +163,9 @@ func (cm *controllerManager) SetFields(i interface{}) error { if _, err := inject.ClientInto(cm.client, i); err != nil { return err } + if _, err := inject.APIReaderInto(cm.apiReader, i); err != nil { + return err + } if _, err := inject.SchemeInto(cm.scheme, i); err != nil { return err } @@ -136,7 +178,7 @@ func (cm *controllerManager) SetFields(i interface{}) error { if _, err := inject.StopChannelInto(cm.internalStop, i); err != nil { return err } - if _, err := inject.DecoderInto(cm.admissionDecoder, i); err != nil { + if _, err := inject.MapperInto(cm.mapper, i); err != nil { return err } return nil @@ -154,10 +196,6 @@ func (cm *controllerManager) GetScheme() *runtime.Scheme { return cm.scheme } -func (cm *controllerManager) GetAdmissionDecoder() types.Decoder { - return cm.admissionDecoder -} - func (cm *controllerManager) GetFieldIndexer() client.FieldIndexer { return cm.fieldIndexes } @@ -166,7 +204,7 @@ func (cm *controllerManager) GetCache() cache.Cache { return cm.cache } -func (cm *controllerManager) GetRecorder(name string) record.EventRecorder { +func (cm *controllerManager) GetEventRecorderFor(name string) record.EventRecorder { return cm.recorderProvider.GetEventRecorderFor(name) } @@ -174,18 +212,37 @@ func (cm *controllerManager) GetRESTMapper() meta.RESTMapper { return cm.mapper } +func (cm *controllerManager) GetAPIReader() client.Reader { + return cm.apiReader +} + +func (cm *controllerManager) GetWebhookServer() *webhook.Server { + if cm.webhookServer == nil { + cm.webhookServer = &webhook.Server{ + Port: cm.port, + Host: cm.host, + } + if err := cm.Add(cm.webhookServer); err != nil { + panic("unable to add webhookServer to the controller manager") + } + } + return cm.webhookServer +} + func (cm *controllerManager) serveMetrics(stop <-chan struct{}) { + var metricsPath = "/metrics" handler := promhttp.HandlerFor(metrics.Registry, promhttp.HandlerOpts{ ErrorHandling: promhttp.HTTPErrorOnError, }) // TODO(JoelSpeed): Use existing Kubernetes machinery for serving metrics mux := http.NewServeMux() - mux.Handle("/metrics", handler) + mux.Handle(metricsPath, handler) server := http.Server{ Handler: mux, } // Run the server go func() { + log.Info("starting metrics server", "path", metricsPath) if err := server.Serve(cm.metricsListener); err != nil && err != http.ErrServerClosed { cm.errChan <- err } @@ -211,13 +268,15 @@ func (cm *controllerManager) Start(stop <-chan struct{}) error { go cm.serveMetrics(cm.internalStop) } + go cm.startNonLeaderElectionRunnables() + if cm.resourceLock != nil { err := cm.startLeaderElection() if err != nil { return err } } else { - go cm.start() + go cm.startLeaderElectionRunnables() } select { @@ -230,10 +289,47 @@ func (cm *controllerManager) Start(stop <-chan struct{}) error { } } -func (cm *controllerManager) start() { +func (cm *controllerManager) startNonLeaderElectionRunnables() { cm.mu.Lock() defer cm.mu.Unlock() + cm.waitForCache() + + // Start the non-leaderelection Runnables after the cache has synced + for _, c := range cm.nonLeaderElectionRunnables { + // Controllers block, but we want to return an error if any have an error starting. + // Write any Start errors to a channel so we can return them + ctrl := c + go func() { + cm.errChan <- ctrl.Start(cm.internalStop) + }() + } +} + +func (cm *controllerManager) startLeaderElectionRunnables() { + cm.mu.Lock() + defer cm.mu.Unlock() + + cm.waitForCache() + + // Start the leader election Runnables after the cache has synced + for _, c := range cm.leaderElectionRunnables { + // Controllers block, but we want to return an error if any have an error starting. + // Write any Start errors to a channel so we can return them + ctrl := c + go func() { + cm.errChan <- ctrl.Start(cm.internalStop) + }() + } + + cm.startedLeader = true +} + +func (cm *controllerManager) waitForCache() { + if cm.started { + return + } + // Start the Cache. Allow the function to start the cache to be mocked out for testing if cm.startCache == nil { cm.startCache = cm.cache.Start @@ -247,31 +343,18 @@ func (cm *controllerManager) start() { // Wait for the caches to sync. // TODO(community): Check the return value and write a test cm.cache.WaitForCacheSync(cm.internalStop) - - // Start the runnables after the cache has synced - for _, c := range cm.runnables { - // Controllers block, but we want to return an error if any have an error starting. - // Write any Start errors to a channel so we can return them - ctrl := c - go func() { - cm.errChan <- ctrl.Start(cm.internalStop) - }() - } - cm.started = true } func (cm *controllerManager) startLeaderElection() (err error) { l, err := leaderelection.NewLeaderElector(leaderelection.LeaderElectionConfig{ - Lock: cm.resourceLock, - // Values taken from: https://github.com/kubernetes/apiserver/blob/master/pkg/apis/config/v1alpha1/defaults.go - // TODO(joelspeed): These timings should be configurable - LeaseDuration: 15 * time.Second, - RenewDeadline: 10 * time.Second, - RetryPeriod: 2 * time.Second, + Lock: cm.resourceLock, + LeaseDuration: cm.leaseDuration, + RenewDeadline: cm.renewDeadline, + RetryPeriod: cm.retryPeriod, Callbacks: leaderelection.LeaderCallbacks{ OnStartedLeading: func(_ context.Context) { - cm.start() + cm.startLeaderElectionRunnables() }, OnStoppedLeading: func() { // Most implementations of leader election log.Fatal() here. @@ -285,7 +368,16 @@ func (cm *controllerManager) startLeaderElection() (err error) { return err } + ctx, cancel := context.WithCancel(context.Background()) + go func() { + select { + case <-cm.internalStop: + cancel() + case <-ctx.Done(): + } + }() + // Start the leader elector process - go l.Run(context.Background()) + go l.Run(ctx) return nil } diff --git a/vendor/sigs.k8s.io/controller-runtime/pkg/manager/manager.go b/vendor/sigs.k8s.io/controller-runtime/pkg/manager/manager.go index 8d346df6c1..56a34cba6e 100644 --- a/vendor/sigs.k8s.io/controller-runtime/pkg/manager/manager.go +++ b/vendor/sigs.k8s.io/controller-runtime/pkg/manager/manager.go @@ -36,16 +36,17 @@ import ( "sigs.k8s.io/controller-runtime/pkg/leaderelection" "sigs.k8s.io/controller-runtime/pkg/metrics" "sigs.k8s.io/controller-runtime/pkg/recorder" - "sigs.k8s.io/controller-runtime/pkg/webhook/admission" - "sigs.k8s.io/controller-runtime/pkg/webhook/admission/types" + "sigs.k8s.io/controller-runtime/pkg/webhook" ) // Manager initializes shared dependencies such as Caches and Clients, and provides them to Runnables. // A Manager is required to create Controllers. type Manager interface { - // Add will set reqeusted dependencies on the component, and cause the component to be + // Add will set requested dependencies on the component, and cause the component to be // started when Start is called. Add will inject any dependencies for which the argument - // implements the inject interface - e.g. inject.Client + // implements the inject interface - e.g. inject.Client. + // Depending on if a Runnable implements LeaderElectionRunnable interface, a Runnable can be run in either + // non-leaderelection mode (always running) or leader election mode (managed by leader election if enabled). Add(Runnable) error // SetFields will set any dependencies on an object for which the object has implemented the inject @@ -59,13 +60,13 @@ type Manager interface { // GetConfig returns an initialized Config GetConfig() *rest.Config - // GetScheme returns and initialized Scheme + // GetScheme returns an initialized Scheme GetScheme() *runtime.Scheme - // GetAdmissionDecoder returns the runtime.Decoder based on the scheme. - GetAdmissionDecoder() types.Decoder - - // GetClient returns a client configured with the Config + // GetClient returns a client configured with the Config. This client may + // not be a fully "direct" client -- it may read from a cache, for + // instance. See Options.NewClient for more information on how the default + // implementation works. GetClient() client.Client // GetFieldIndexer returns a client.FieldIndexer configured with the client @@ -74,17 +75,26 @@ type Manager interface { // GetCache returns a cache.Cache GetCache() cache.Cache - // GetRecorder returns a new EventRecorder for the provided name - GetRecorder(name string) record.EventRecorder + // GetEventRecorderFor returns a new EventRecorder for the provided name + GetEventRecorderFor(name string) record.EventRecorder // GetRESTMapper returns a RESTMapper GetRESTMapper() meta.RESTMapper + + // GetAPIReader returns a reader that will be configured to use the API server. + // This should be used sparingly and only when the client does not fit your + // use case. + GetAPIReader() client.Reader + + // GetWebhookServer returns a webhook.Server + GetWebhookServer() *webhook.Server } // Options are the arguments for creating a new Manager type Options struct { // Scheme is the scheme used to resolve runtime.Objects to GroupVersionKinds / Resources - // Defaults to the kubernetes/client-go scheme.Scheme + // Defaults to the kubernetes/client-go scheme.Scheme, but it's almost always better + // idea to pass your own scheme in. See the documentation in pkg/scheme for more information. Scheme *runtime.Scheme // MapperProvider provides the rest mapper used to map go types to Kubernetes APIs @@ -108,21 +118,42 @@ type Options struct { // will use for holding the leader lock. LeaderElectionID string - // Namespace if specified restricts the manager's cache to watch objects in the desired namespace - // Defaults to all namespaces - // Note: If a namespace is specified then controllers can still Watch for a cluster-scoped resource e.g Node - // For namespaced resources the cache will only hold objects from the desired namespace. + // LeaseDuration is the duration that non-leader candidates will + // wait to force acquire leadership. This is measured against time of + // last observed ack. Default is 15 seconds. + LeaseDuration *time.Duration + // RenewDeadline is the duration that the acting master will retry + // refreshing leadership before giving up. Default is 10 seconds. + RenewDeadline *time.Duration + // RetryPeriod is the duration the LeaderElector clients should wait + // between tries of actions. Default is 2 seconds. + RetryPeriod *time.Duration + + // Namespace if specified restricts the manager's cache to watch objects in + // the desired namespace Defaults to all namespaces + // + // Note: If a namespace is specified, controllers can still Watch for a + // cluster-scoped resource (e.g Node). For namespaced resources the cache + // will only hold objects from the desired namespace. Namespace string // MetricsBindAddress is the TCP address that the controller should bind to - // for serving prometheus metrics + // for serving prometheus metrics. + // It can be set to "0" to disable the metrics serving. MetricsBindAddress string + // Port is the port that the webhook server serves at. + // It is used to set webhook.Server.Port. + Port int + // Host is the hostname that the webhook server binds to. + // It is used to set webhook.Server.Host. + Host string + // Functions to all for a user to customize the values that will be injected. // NewCache is the function that will create the cache to be used // by the manager. If not set this will use the default new cache function. - NewCache NewCacheFunc + NewCache cache.NewCacheFunc // NewClient will create the client to be used by the manager. // If not set this will create the default DelegatingClient that will @@ -132,24 +163,25 @@ type Options struct { // Dependency injection for testing newRecorderProvider func(config *rest.Config, scheme *runtime.Scheme, logger logr.Logger) (recorder.Provider, error) newResourceLock func(config *rest.Config, recorderProvider recorder.Provider, options leaderelection.Options) (resourcelock.Interface, error) - newAdmissionDecoder func(scheme *runtime.Scheme) (types.Decoder, error) newMetricsListener func(addr string) (net.Listener, error) } -// NewCacheFunc allows a user to define how to create a cache -type NewCacheFunc func(config *rest.Config, opts cache.Options) (cache.Cache, error) - // NewClientFunc allows a user to define how to create a client type NewClientFunc func(cache cache.Cache, config *rest.Config, options client.Options) (client.Client, error) // Runnable allows a component to be started. +// It's very important that Start blocks until +// it's done running. type Runnable interface { - // Start starts running the component. The component will stop running when the channel is closed. - // Start blocks until the channel is closed or an error occurs. + // Start starts running the component. The component will stop running + // when the channel is closed. Start blocks until the channel is closed or + // an error occurs. Start(<-chan struct{}) error } -// RunnableFunc implements Runnable +// RunnableFunc implements Runnable using a function. +// It's very important that the given function block +// until it's done running. type RunnableFunc func(<-chan struct{}) error // Start implements Runnable @@ -157,6 +189,13 @@ func (r RunnableFunc) Start(s <-chan struct{}) error { return r(s) } +// LeaderElectionRunnable knows if a Runnable needs to be run in the leader election mode. +type LeaderElectionRunnable interface { + // NeedLeaderElection returns true if the Runnable needs to be run in the leader election mode. + // e.g. controllers need to be run in leader election mode, while webhook server doesn't. + NeedLeaderElection() bool +} + // New returns a new Manager for creating Controllers. func New(config *rest.Config, options Options) (Manager, error) { // Initialize a rest.config if none was specified @@ -180,6 +219,11 @@ func New(config *rest.Config, options Options) (Manager, error) { return nil, err } + apiReader, err := client.New(config, client.Options{Scheme: options.Scheme, Mapper: mapper}) + if err != nil { + return nil, err + } + writeObj, err := options.NewClient(cache, config, client.Options{Scheme: options.Scheme, Mapper: mapper}) if err != nil { return nil, err @@ -202,12 +246,7 @@ func New(config *rest.Config, options Options) (Manager, error) { return nil, err } - admissionDecoder, err := options.newAdmissionDecoder(options.Scheme) - if err != nil { - return nil, err - } - - // Create the mertics listener. This will throw an error if the metrics bind + // Create the metrics listener. This will throw an error if the metrics bind // address is invalid or already in use. metricsListener, err := options.newMetricsListener(options.MetricsBindAddress) if err != nil { @@ -219,17 +258,22 @@ func New(config *rest.Config, options Options) (Manager, error) { return &controllerManager{ config: config, scheme: options.Scheme, - admissionDecoder: admissionDecoder, errChan: make(chan error), cache: cache, fieldIndexes: cache, client: writeObj, + apiReader: apiReader, recorderProvider: recorderProvider, resourceLock: resourceLock, mapper: mapper, metricsListener: metricsListener, internalStop: stop, internalStopper: stop, + port: options.Port, + host: options.Host, + leaseDuration: *options.LeaseDuration, + renewDeadline: *options.RenewDeadline, + retryPeriod: *options.RetryPeriod, }, nil } @@ -282,13 +326,21 @@ func setOptionsDefaults(options Options) Options { options.newResourceLock = leaderelection.NewResourceLock } - if options.newAdmissionDecoder == nil { - options.newAdmissionDecoder = admission.NewDecoder - } - if options.newMetricsListener == nil { options.newMetricsListener = metrics.NewListener } + leaseDuration, renewDeadline, retryPeriod := defaultLeaseDuration, defaultRenewDeadline, defaultRetryPeriod + if options.LeaseDuration == nil { + options.LeaseDuration = &leaseDuration + } + + if options.RenewDeadline == nil { + options.RenewDeadline = &renewDeadline + } + + if options.RetryPeriod == nil { + options.RetryPeriod = &retryPeriod + } return options } diff --git a/vendor/sigs.k8s.io/controller-runtime/pkg/runtime/signals/doc.go b/vendor/sigs.k8s.io/controller-runtime/pkg/manager/signals/doc.go similarity index 78% rename from vendor/sigs.k8s.io/controller-runtime/pkg/runtime/signals/doc.go rename to vendor/sigs.k8s.io/controller-runtime/pkg/manager/signals/doc.go index 749321e7a1..737cc7eff2 100644 --- a/vendor/sigs.k8s.io/controller-runtime/pkg/runtime/signals/doc.go +++ b/vendor/sigs.k8s.io/controller-runtime/pkg/manager/signals/doc.go @@ -14,5 +14,7 @@ See the License for the specific language governing permissions and limitations under the License. */ -// Package signals contains libraries for handling signals to shutdown the system. +// Package signals contains libraries for handling signals to gracefully +// shutdown the manager in combination with Kubernetes pod graceful termination +// policy. package signals diff --git a/vendor/sigs.k8s.io/controller-runtime/pkg/runtime/signals/signal.go b/vendor/sigs.k8s.io/controller-runtime/pkg/manager/signals/signal.go similarity index 92% rename from vendor/sigs.k8s.io/controller-runtime/pkg/runtime/signals/signal.go rename to vendor/sigs.k8s.io/controller-runtime/pkg/manager/signals/signal.go index 6bddfddb4f..08eaef7b42 100644 --- a/vendor/sigs.k8s.io/controller-runtime/pkg/runtime/signals/signal.go +++ b/vendor/sigs.k8s.io/controller-runtime/pkg/manager/signals/signal.go @@ -23,7 +23,7 @@ import ( var onlyOneSignalHandler = make(chan struct{}) -// SetupSignalHandler registered for SIGTERM and SIGINT. A stop channel is returned +// SetupSignalHandler registers for SIGTERM and SIGINT. A stop channel is returned // which is closed on one of these signals. If a second signal is caught, the program // is terminated with exit code 1. func SetupSignalHandler() (stopCh <-chan struct{}) { diff --git a/vendor/sigs.k8s.io/controller-runtime/pkg/runtime/signals/signal_posix.go b/vendor/sigs.k8s.io/controller-runtime/pkg/manager/signals/signal_posix.go similarity index 100% rename from vendor/sigs.k8s.io/controller-runtime/pkg/runtime/signals/signal_posix.go rename to vendor/sigs.k8s.io/controller-runtime/pkg/manager/signals/signal_posix.go diff --git a/vendor/sigs.k8s.io/controller-runtime/pkg/runtime/signals/signal_windows.go b/vendor/sigs.k8s.io/controller-runtime/pkg/manager/signals/signal_windows.go similarity index 100% rename from vendor/sigs.k8s.io/controller-runtime/pkg/runtime/signals/signal_windows.go rename to vendor/sigs.k8s.io/controller-runtime/pkg/manager/signals/signal_windows.go diff --git a/vendor/sigs.k8s.io/controller-runtime/pkg/metrics/client_go_adapter.go b/vendor/sigs.k8s.io/controller-runtime/pkg/metrics/client_go_adapter.go index d02c56356f..505e5ff891 100644 --- a/vendor/sigs.k8s.io/controller-runtime/pkg/metrics/client_go_adapter.go +++ b/vendor/sigs.k8s.io/controller-runtime/pkg/metrics/client_go_adapter.go @@ -23,7 +23,6 @@ import ( "github.com/prometheus/client_golang/prometheus" reflectormetrics "k8s.io/client-go/tools/cache" clientmetrics "k8s.io/client-go/tools/metrics" - workqueuemetrics "k8s.io/client-go/util/workqueue" ) // this file contains setup logic to initialize the myriad of places @@ -104,64 +103,11 @@ var ( Name: "last_resource_version", Help: "Last resource version seen for the reflectors", }, []string{"name"}) - - // workqueue metrics - - workQueueSubsystem = "workqueue" - - depth = prometheus.NewGaugeVec(prometheus.GaugeOpts{ - Subsystem: workQueueSubsystem, - Name: "depth", - Help: "Current depth of workqueue", - }, []string{"name"}) - - adds = prometheus.NewCounterVec(prometheus.CounterOpts{ - Subsystem: workQueueSubsystem, - Name: "adds_total", - Help: "Total number of adds handled by workqueue", - }, []string{"name"}) - - latency = prometheus.NewHistogramVec(prometheus.HistogramOpts{ - Subsystem: workQueueSubsystem, - Name: "queue_latency_seconds", - Help: "How long in seconds an item stays in workqueue before being requested.", - Buckets: prometheus.ExponentialBuckets(10e-9, 10, 10), - }, []string{"name"}) - - workDuration = prometheus.NewHistogramVec(prometheus.HistogramOpts{ - Subsystem: workQueueSubsystem, - Name: "work_duration_seconds", - Help: "How long in seconds processing an item from workqueue takes.", - Buckets: prometheus.ExponentialBuckets(10e-9, 10, 10), - }, []string{"name"}) - - retries = prometheus.NewCounterVec(prometheus.CounterOpts{ - Subsystem: workQueueSubsystem, - Name: "retries_total", - Help: "Total number of retries handled by workqueue", - }, []string{"name"}) - - longestRunning = prometheus.NewGaugeVec(prometheus.GaugeOpts{ - Subsystem: workQueueSubsystem, - Name: "longest_running_processor_microseconds", - Help: "How many microseconds has the longest running " + - "processor for workqueue been running.", - }, []string{"name"}) - - unfinishedWork = prometheus.NewGaugeVec(prometheus.GaugeOpts{ - Subsystem: workQueueSubsystem, - Name: "unfinished_work_seconds", - Help: "How many seconds of work has done that " + - "is in progress and hasn't been observed by work_duration. Large " + - "values indicate stuck threads. One can deduce the number of stuck " + - "threads by observing the rate at which this increases.", - }, []string{"name"}) ) func init() { registerClientMetrics() registerReflectorMetrics() - registerWorkqueueMetrics() } // registerClientMetrics sets up the client latency metrics from client-go @@ -174,7 +120,7 @@ func registerClientMetrics() { clientmetrics.Register(&latencyAdapter{metric: requestLatency}, &resultAdapter{metric: requestResult}) } -// registerReflectorMetrics sets up reflector (reconile) loop metrics +// registerReflectorMetrics sets up reflector (reconcile) loop metrics func registerReflectorMetrics() { Registry.MustRegister(listsTotal) Registry.MustRegister(listsDuration) @@ -188,20 +134,7 @@ func registerReflectorMetrics() { reflectormetrics.SetReflectorMetricsProvider(reflectorMetricsProvider{}) } -// registerWorkQueueMetrics sets up workqueue (other reconcile) metrics -func registerWorkqueueMetrics() { - Registry.MustRegister(depth) - Registry.MustRegister(adds) - Registry.MustRegister(latency) - Registry.MustRegister(workDuration) - Registry.MustRegister(retries) - Registry.MustRegister(longestRunning) - Registry.MustRegister(unfinishedWork) - - workqueuemetrics.SetProvider(workqueueMetricsProvider{}) -} - -// this section contains adapters, implementations, and other sundry organic, artisinally +// this section contains adapters, implementations, and other sundry organic, artisanally // hand-crafted syntax trees required to convince client-go that it actually wants to let // someone use its metrics. @@ -262,38 +195,3 @@ func (reflectorMetricsProvider) NewItemsInWatchMetric(name string) reflectormetr func (reflectorMetricsProvider) NewLastResourceVersionMetric(name string) reflectormetrics.GaugeMetric { return lastResourceVersion.WithLabelValues(name) } - -// Workqueue metrics (method #3 for client-go metrics), -// copied (more-or-less directly) from k8s.io/kubernetes setup code -// (which isn't anywhere in an easily-importable place). -// TODO(directxman12): stop "cheating" and calling histograms summaries when we pull in the latest deps - -type workqueueMetricsProvider struct{} - -func (workqueueMetricsProvider) NewDepthMetric(name string) workqueuemetrics.GaugeMetric { - return depth.WithLabelValues(name) -} - -func (workqueueMetricsProvider) NewAddsMetric(name string) workqueuemetrics.CounterMetric { - return adds.WithLabelValues(name) -} - -func (workqueueMetricsProvider) NewLatencyMetric(name string) workqueuemetrics.SummaryMetric { - return latency.WithLabelValues(name) -} - -func (workqueueMetricsProvider) NewWorkDurationMetric(name string) workqueuemetrics.SummaryMetric { - return workDuration.WithLabelValues(name) -} - -func (workqueueMetricsProvider) NewRetriesMetric(name string) workqueuemetrics.CounterMetric { - return retries.WithLabelValues(name) -} - -func (workqueueMetricsProvider) NewLongestRunningProcessorMicrosecondsMetric(name string) workqueuemetrics.SettableGaugeMetric { - return longestRunning.WithLabelValues(name) -} - -func (workqueueMetricsProvider) NewUnfinishedWorkSecondsMetric(name string) workqueuemetrics.SettableGaugeMetric { - return unfinishedWork.WithLabelValues(name) -} diff --git a/vendor/sigs.k8s.io/controller-runtime/pkg/metrics/listener.go b/vendor/sigs.k8s.io/controller-runtime/pkg/metrics/listener.go index e9db3a110c..693bff8413 100644 --- a/vendor/sigs.k8s.io/controller-runtime/pkg/metrics/listener.go +++ b/vendor/sigs.k8s.io/controller-runtime/pkg/metrics/listener.go @@ -21,11 +21,9 @@ import ( "net" ) -// DefaultBindAddress sets the default bind address for the metrics -// listener -// The metrics is off by default. -// TODO: Flip the default by changing DefaultBindAddress back to ":8080" in the v0.2.0. -var DefaultBindAddress = "0" +// DefaultBindAddress sets the default bind address for the metrics listener +// The metrics is on by default. +var DefaultBindAddress = ":8080" // NewListener creates a new TCP listener bound to the given address. func NewListener(addr string) (net.Listener, error) { @@ -39,9 +37,12 @@ func NewListener(addr string) (net.Listener, error) { return nil, nil } + log.Info("metrics server is starting to listen", "addr", addr) ln, err := net.Listen("tcp", addr) if err != nil { - return nil, fmt.Errorf("error listening on %s: %v", addr, err) + er := fmt.Errorf("error listening on %s: %v", addr, err) + log.Error(er, "metrics server failed to listen. You may want to disable the metrics server or use another port if it is due to conflicts") + return nil, er } return ln, nil } diff --git a/vendor/sigs.k8s.io/controller-runtime/pkg/metrics/workqueue.go b/vendor/sigs.k8s.io/controller-runtime/pkg/metrics/workqueue.go new file mode 100644 index 0000000000..6381f0c14a --- /dev/null +++ b/vendor/sigs.k8s.io/controller-runtime/pkg/metrics/workqueue.go @@ -0,0 +1,173 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package metrics + +import ( + "github.com/prometheus/client_golang/prometheus" + "k8s.io/client-go/util/workqueue" + logf "sigs.k8s.io/controller-runtime/pkg/internal/log" +) + +var log = logf.RuntimeLog.WithName("metrics") + +// This file is copied and adapted from k8s.io/kubernetes/pkg/util/workqueue/prometheus +// which registers metrics to the default prometheus Registry. We require very +// similar functionality, but must register metrics to a different Registry. + +func init() { + workqueue.SetProvider(workqueueMetricsProvider{}) +} + +func registerWorkqueueMetric(c prometheus.Collector, name, queue string) { + if err := Registry.Register(c); err != nil { + log.Error(err, "failed to register metric", "name", name, "queue", queue) + } +} + +type workqueueMetricsProvider struct{} + +func (workqueueMetricsProvider) NewDepthMetric(queue string) workqueue.GaugeMetric { + const name = "workqueue_depth" + m := prometheus.NewGauge(prometheus.GaugeOpts{ + Name: name, + Help: "Current depth of workqueue", + ConstLabels: prometheus.Labels{"name": queue}, + }) + registerWorkqueueMetric(m, name, queue) + return m +} + +func (workqueueMetricsProvider) NewAddsMetric(queue string) workqueue.CounterMetric { + const name = "workqueue_adds_total" + m := prometheus.NewCounter(prometheus.CounterOpts{ + Name: name, + Help: "Total number of adds handled by workqueue", + ConstLabels: prometheus.Labels{"name": queue}, + }) + registerWorkqueueMetric(m, name, queue) + return m +} + +func (workqueueMetricsProvider) NewLatencyMetric(queue string) workqueue.HistogramMetric { + const name = "workqueue_queue_duration_seconds" + m := prometheus.NewHistogram(prometheus.HistogramOpts{ + Name: name, + Help: "How long in seconds an item stays in workqueue before being requested.", + ConstLabels: prometheus.Labels{"name": queue}, + Buckets: prometheus.ExponentialBuckets(10e-9, 10, 10), + }) + registerWorkqueueMetric(m, name, queue) + return m +} + +func (workqueueMetricsProvider) NewWorkDurationMetric(queue string) workqueue.HistogramMetric { + const name = "workqueue_work_duration_seconds" + m := prometheus.NewHistogram(prometheus.HistogramOpts{ + Name: name, + Help: "How long in seconds processing an item from workqueue takes.", + ConstLabels: prometheus.Labels{"name": queue}, + Buckets: prometheus.ExponentialBuckets(10e-9, 10, 10), + }) + registerWorkqueueMetric(m, name, queue) + return m +} + +func (workqueueMetricsProvider) NewUnfinishedWorkSecondsMetric(queue string) workqueue.SettableGaugeMetric { + const name = "workqueue_unfinished_work_seconds" + m := prometheus.NewGauge(prometheus.GaugeOpts{ + Name: name, + Help: "How many seconds of work has done that " + + "is in progress and hasn't been observed by work_duration. Large " + + "values indicate stuck threads. One can deduce the number of stuck " + + "threads by observing the rate at which this increases.", + ConstLabels: prometheus.Labels{"name": queue}, + }) + registerWorkqueueMetric(m, name, queue) + return m +} + +func (workqueueMetricsProvider) NewLongestRunningProcessorSecondsMetric(queue string) workqueue.SettableGaugeMetric { + const name = "workqueue_longest_running_processor_seconds" + m := prometheus.NewGauge(prometheus.GaugeOpts{ + Name: name, + Help: "How many seconds has the longest running " + + "processor for workqueue been running.", + ConstLabels: prometheus.Labels{"name": queue}, + }) + registerWorkqueueMetric(m, name, queue) + return m +} + +func (workqueueMetricsProvider) NewRetriesMetric(queue string) workqueue.CounterMetric { + const name = "workqueue_retries_total" + m := prometheus.NewCounter(prometheus.CounterOpts{ + Name: name, + Help: "Total number of retries handled by workqueue", + ConstLabels: prometheus.Labels{"name": queue}, + }) + registerWorkqueueMetric(m, name, queue) + return m +} + +// TODO(abursavich): Remove the following deprecated metrics when they are +// removed from k8s.io/client-go/util/workqueue. + +func (workqueueMetricsProvider) NewDeprecatedLongestRunningProcessorMicrosecondsMetric(queue string) workqueue.SettableGaugeMetric { + const name = "workqueue_longest_running_processor_microseconds" + m := prometheus.NewGauge(prometheus.GaugeOpts{ + Name: name, + Help: "(Deprecated) How many microseconds has the longest running " + + "processor for workqueue been running.", + ConstLabels: prometheus.Labels{"name": queue}, + }) + registerWorkqueueMetric(m, name, queue) + return m +} + +// NOTE: The following deprecated metrics are noops because they were never +// included in controller-runtime. + +func (workqueueMetricsProvider) NewDeprecatedDepthMetric(queue string) workqueue.GaugeMetric { + return noopMetric{} +} + +func (workqueueMetricsProvider) NewDeprecatedAddsMetric(queue string) workqueue.CounterMetric { + return noopMetric{} +} + +func (workqueueMetricsProvider) NewDeprecatedLatencyMetric(queue string) workqueue.SummaryMetric { + return noopMetric{} +} + +func (workqueueMetricsProvider) NewDeprecatedWorkDurationMetric(queue string) workqueue.SummaryMetric { + return noopMetric{} +} + +func (workqueueMetricsProvider) NewDeprecatedUnfinishedWorkSecondsMetric(queue string) workqueue.SettableGaugeMetric { + return noopMetric{} +} + +func (workqueueMetricsProvider) NewDeprecatedRetriesMetric(queue string) workqueue.CounterMetric { + return noopMetric{} +} + +type noopMetric struct{} + +func (noopMetric) Inc() {} +func (noopMetric) Dec() {} +func (noopMetric) Set(float64) {} +func (noopMetric) Observe(float64) {} diff --git a/vendor/sigs.k8s.io/controller-runtime/pkg/patch/patch.go b/vendor/sigs.k8s.io/controller-runtime/pkg/patch/patch.go deleted file mode 100644 index 5a93fa08fe..0000000000 --- a/vendor/sigs.k8s.io/controller-runtime/pkg/patch/patch.go +++ /dev/null @@ -1,45 +0,0 @@ -/* -Copyright 2018 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package patch - -import ( - "encoding/json" - "fmt" - "reflect" - - "github.com/appscode/jsonpatch" - - "k8s.io/apimachinery/pkg/runtime" -) - -// NewJSONPatch calculates the JSON patch between original and current objects. -func NewJSONPatch(original, current runtime.Object) ([]jsonpatch.JsonPatchOperation, error) { - originalGVK := original.GetObjectKind().GroupVersionKind() - currentGVK := current.GetObjectKind().GroupVersionKind() - if !reflect.DeepEqual(originalGVK, currentGVK) { - return nil, fmt.Errorf("GroupVersionKind %#v is expected to match %#v", originalGVK, currentGVK) - } - ori, err := json.Marshal(original) - if err != nil { - return nil, err - } - cur, err := json.Marshal(current) - if err != nil { - return nil, err - } - return jsonpatch.CreatePatch(ori, cur) -} diff --git a/vendor/sigs.k8s.io/controller-runtime/pkg/predicate/predicate.go b/vendor/sigs.k8s.io/controller-runtime/pkg/predicate/predicate.go index 99a4f9d93a..b5e8d014d3 100644 --- a/vendor/sigs.k8s.io/controller-runtime/pkg/predicate/predicate.go +++ b/vendor/sigs.k8s.io/controller-runtime/pkg/predicate/predicate.go @@ -18,10 +18,10 @@ package predicate import ( "sigs.k8s.io/controller-runtime/pkg/event" - logf "sigs.k8s.io/controller-runtime/pkg/runtime/log" + logf "sigs.k8s.io/controller-runtime/pkg/internal/log" ) -var log = logf.KBLog.WithName("predicate").WithName("eventFilters") +var log = logf.RuntimeLog.WithName("predicate").WithName("eventFilters") // Predicate filters events before enqueuing the keys. type Predicate interface { @@ -40,6 +40,7 @@ type Predicate interface { var _ Predicate = Funcs{} var _ Predicate = ResourceVersionChangedPredicate{} +var _ Predicate = GenerationChangedPredicate{} // Funcs is a function that implements Predicate. type Funcs struct { @@ -116,3 +117,47 @@ func (ResourceVersionChangedPredicate) Update(e event.UpdateEvent) bool { } return true } + +// GenerationChangedPredicate implements a default update predicate function on Generation change. +// +// This predicate will skip update events that have no change in the object's metadata.generation field. +// The metadata.generation field of an object is incremented by the API server when writes are made to the spec field of an object. +// This allows a controller to ignore update events where the spec is unchanged, and only the metadata and/or status fields are changed. +// +// For CustomResource objects the Generation is only incremented when the status subresource is enabled. +// +// Caveats: +// +// * The assumption that the Generation is incremented only on writing to the spec does not hold for all APIs. +// E.g For Deployment objects the Generation is also incremented on writes to the metadata.annotations field. +// For object types other than CustomResources be sure to verify which fields will trigger a Generation increment when they are written to. +// +// * With this predicate, any update events with writes only to the status field will not be reconciled. +// So in the event that the status block is overwritten or wiped by someone else the controller will not self-correct to restore the correct status. +type GenerationChangedPredicate struct { + Funcs +} + +// Update implements default UpdateEvent filter for validating generation change +func (GenerationChangedPredicate) Update(e event.UpdateEvent) bool { + if e.MetaOld == nil { + log.Error(nil, "Update event has no old metadata", "event", e) + return false + } + if e.ObjectOld == nil { + log.Error(nil, "Update event has no old runtime object to update", "event", e) + return false + } + if e.ObjectNew == nil { + log.Error(nil, "Update event has no new runtime object for update", "event", e) + return false + } + if e.MetaNew == nil { + log.Error(nil, "Update event has no new metadata", "event", e) + return false + } + if e.MetaNew.GetGeneration() == e.MetaOld.GetGeneration() { + return false + } + return true +} diff --git a/vendor/sigs.k8s.io/controller-runtime/pkg/reconcile/reconcile.go b/vendor/sigs.k8s.io/controller-runtime/pkg/reconcile/reconcile.go index fec8e3020d..fbdec10870 100644 --- a/vendor/sigs.k8s.io/controller-runtime/pkg/reconcile/reconcile.go +++ b/vendor/sigs.k8s.io/controller-runtime/pkg/reconcile/reconcile.go @@ -28,6 +28,7 @@ type Result struct { Requeue bool // RequeueAfter if greater than 0, tells the Controller to requeue the reconcile key after the Duration. + // Implies that Requeue is true, there is no need to set Requeue to true at the same time as RequeueAfter. RequeueAfter time.Duration } diff --git a/vendor/sigs.k8s.io/controller-runtime/pkg/recorder/recorder.go b/vendor/sigs.k8s.io/controller-runtime/pkg/recorder/recorder.go index 2bdcb199f7..f093f0a726 100644 --- a/vendor/sigs.k8s.io/controller-runtime/pkg/recorder/recorder.go +++ b/vendor/sigs.k8s.io/controller-runtime/pkg/recorder/recorder.go @@ -14,6 +14,10 @@ See the License for the specific language governing permissions and limitations under the License. */ +// Package recorder defines interfaces for working with Kubernetes event recorders. +// +// You can use these to emit Kubernetes events associated with a particular Kubernetes +// object. package recorder import ( diff --git a/vendor/sigs.k8s.io/controller-runtime/pkg/runtime/inject/inject.go b/vendor/sigs.k8s.io/controller-runtime/pkg/runtime/inject/inject.go index da7f1da457..b47a91d042 100644 --- a/vendor/sigs.k8s.io/controller-runtime/pkg/runtime/inject/inject.go +++ b/vendor/sigs.k8s.io/controller-runtime/pkg/runtime/inject/inject.go @@ -17,11 +17,13 @@ limitations under the License. package inject import ( + "github.com/go-logr/logr" + "k8s.io/apimachinery/pkg/api/meta" "k8s.io/apimachinery/pkg/runtime" "k8s.io/client-go/rest" + "sigs.k8s.io/controller-runtime/pkg/cache" "sigs.k8s.io/controller-runtime/pkg/client" - "sigs.k8s.io/controller-runtime/pkg/webhook/admission/types" ) // Cache is used by the ControllerManager to inject Cache into Sources, EventHandlers, Predicates, and @@ -39,6 +41,20 @@ func CacheInto(c cache.Cache, i interface{}) (bool, error) { return false, nil } +// APIReader is used by the Manager to inject the APIReader into necessary types. +type APIReader interface { + InjectAPIReader(client.Reader) error +} + +// APIReaderInto will set APIReader on i and return the result if it implements APIReaderInto. +// Returns false if i does not implement APIReader +func APIReaderInto(reader client.Reader, i interface{}) (bool, error) { + if s, ok := i.(APIReader); ok { + return true, s.InjectAPIReader(reader) + } + return false, nil +} + // Config is used by the ControllerManager to inject Config into Sources, EventHandlers, Predicates, and // Reconciles type Config interface { @@ -69,20 +85,6 @@ func ClientInto(client client.Client, i interface{}) (bool, error) { return false, nil } -// Decoder is used by the ControllerManager to inject decoder into webhook handlers. -type Decoder interface { - InjectDecoder(types.Decoder) error -} - -// DecoderInto will set decoder on i and return the result if it implements Decoder. Returns -// false if i does not implement Decoder. -func DecoderInto(decoder types.Decoder, i interface{}) (bool, error) { - if s, ok := i.(Decoder); ok { - return true, s.InjectDecoder(decoder) - } - return false, nil -} - // Scheme is used by the ControllerManager to inject Scheme into Sources, EventHandlers, Predicates, and // Reconciles type Scheme interface { @@ -113,6 +115,20 @@ func StopChannelInto(stop <-chan struct{}, i interface{}) (bool, error) { return false, nil } +// Mapper is used to inject the rest mapper to components that may need it +type Mapper interface { + InjectMapper(meta.RESTMapper) error +} + +// MapperInto will set the rest mapper on i and return the result if it implements Mapper. +// Returns false if i does not implement Mapper. +func MapperInto(mapper meta.RESTMapper, i interface{}) (bool, error) { + if m, ok := i.(Mapper); ok { + return true, m.InjectMapper(mapper) + } + return false, nil +} + // Func injects dependencies into i. type Func func(i interface{}) error @@ -129,3 +145,18 @@ func InjectorInto(f Func, i interface{}) (bool, error) { } return false, nil } + +// Logger is used to inject Loggers into components that need them +// and don't otherwise have opinions. +type Logger interface { + InjectLogger(l logr.Logger) error +} + +// LoggerInto will set the logger on the given object if it implements inject.Logger, +// returning true if a InjectLogger was called, and false otherwise. +func LoggerInto(l logr.Logger, i interface{}) (bool, error) { + if injectable, wantsLogger := i.(Logger); wantsLogger { + return true, injectable.InjectLogger(l) + } + return false, nil +} diff --git a/vendor/sigs.k8s.io/controller-runtime/pkg/runtime/scheme/scheme.go b/vendor/sigs.k8s.io/controller-runtime/pkg/runtime/scheme/scheme.go index 79868214e3..5b6c9465bf 100644 --- a/vendor/sigs.k8s.io/controller-runtime/pkg/runtime/scheme/scheme.go +++ b/vendor/sigs.k8s.io/controller-runtime/pkg/runtime/scheme/scheme.go @@ -1,5 +1,5 @@ /* -Copyright 2018 The Kubernetes Authors. +Copyright 2019 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -14,43 +14,16 @@ See the License for the specific language governing permissions and limitations under the License. */ +// Package scheme contains utilities for gradually building Schemes, +// which contain information associating Go types with Kubernetes +// groups, versions, and kinds. +// +// Deprecated: use pkg/scheme instead. package scheme import ( - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/runtime" - "k8s.io/apimachinery/pkg/runtime/schema" + "sigs.k8s.io/controller-runtime/pkg/scheme" ) // Builder builds a new Scheme for mapping go types to Kubernetes GroupVersionKinds. -type Builder struct { - GroupVersion schema.GroupVersion - runtime.SchemeBuilder -} - -// Register adds one or objects to the SchemeBuilder so they can be added to a Scheme. Register mutates bld. -func (bld *Builder) Register(object ...runtime.Object) *Builder { - bld.SchemeBuilder.Register(func(scheme *runtime.Scheme) error { - scheme.AddKnownTypes(bld.GroupVersion, object...) - metav1.AddToGroupVersion(scheme, bld.GroupVersion) - return nil - }) - return bld -} - -// RegisterAll registers all types from the Builder argument. RegisterAll mutates bld. -func (bld *Builder) RegisterAll(b *Builder) *Builder { - bld.SchemeBuilder = append(bld.SchemeBuilder, b.SchemeBuilder...) - return bld -} - -// AddToScheme adds all registered types to s. -func (bld *Builder) AddToScheme(s *runtime.Scheme) error { - return bld.SchemeBuilder.AddToScheme(s) -} - -// Build returns a new Scheme containing the registered types. -func (bld *Builder) Build() (*runtime.Scheme, error) { - s := runtime.NewScheme() - return s, bld.AddToScheme(s) -} +type Builder = scheme.Builder diff --git a/vendor/sigs.k8s.io/controller-runtime/pkg/scheme/scheme.go b/vendor/sigs.k8s.io/controller-runtime/pkg/scheme/scheme.go new file mode 100644 index 0000000000..cd60319674 --- /dev/null +++ b/vendor/sigs.k8s.io/controller-runtime/pkg/scheme/scheme.go @@ -0,0 +1,94 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Package scheme contains utilities for gradually building Schemes, +// which contain information associating Go types with Kubernetes +// groups, versions, and kinds. +// +// Each API group should define a utility function +// called AddToScheme for adding its types to a Scheme: +// +// // in package myapigroupv1... +// var ( +// SchemeGroupVersion = schema.GroupVersion{Group: "my.api.group", Version: "v1"} +// SchemeBuilder = &scheme.Builder{GroupVersion: SchemeGroupVersion} +// AddToScheme = SchemeBuilder.AddToScheme +// ) +// +// func init() { +// SchemeBuilder.Register(&MyType{}, &MyTypeList) +// } +// var ( +// scheme *runtime.Scheme = runtime.NewScheme() +// ) +// +// This also true of the built-in Kubernetes types. Then, in the entrypoint for +// your manager, assemble the scheme containing exactly the types you need. +// For instance, if our controller needs types from the core/v1 API group (e.g. Pod), +// plus types from my.api.group/v1: +// +// func init() { +// myapigroupv1.AddToScheme(scheme) +// kubernetesscheme.AddToScheme(scheme) +// } +// +// func main() { +// mgr := controllers.NewManager(controllers.GetConfigOrDie(), manager.Options{ +// Scheme: scheme, +// }) +// // ... +// } +// +package scheme + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" +) + +// Builder builds a new Scheme for mapping go types to Kubernetes GroupVersionKinds. +type Builder struct { + GroupVersion schema.GroupVersion + runtime.SchemeBuilder +} + +// Register adds one or objects to the SchemeBuilder so they can be added to a Scheme. Register mutates bld. +func (bld *Builder) Register(object ...runtime.Object) *Builder { + bld.SchemeBuilder.Register(func(scheme *runtime.Scheme) error { + scheme.AddKnownTypes(bld.GroupVersion, object...) + metav1.AddToGroupVersion(scheme, bld.GroupVersion) + return nil + }) + return bld +} + +// RegisterAll registers all types from the Builder argument. RegisterAll mutates bld. +func (bld *Builder) RegisterAll(b *Builder) *Builder { + bld.SchemeBuilder = append(bld.SchemeBuilder, b.SchemeBuilder...) + return bld +} + +// AddToScheme adds all registered types to s. +func (bld *Builder) AddToScheme(s *runtime.Scheme) error { + return bld.SchemeBuilder.AddToScheme(s) +} + +// Build returns a new Scheme containing the registered types. +func (bld *Builder) Build() (*runtime.Scheme, error) { + s := runtime.NewScheme() + return s, bld.AddToScheme(s) +} diff --git a/vendor/sigs.k8s.io/controller-runtime/pkg/source/doc.go b/vendor/sigs.k8s.io/controller-runtime/pkg/source/doc.go index 2e2ef2b36c..31935c83c1 100644 --- a/vendor/sigs.k8s.io/controller-runtime/pkg/source/doc.go +++ b/vendor/sigs.k8s.io/controller-runtime/pkg/source/doc.go @@ -15,7 +15,7 @@ limitations under the License. */ /* -Package source provides event streams provided to Controllers through Controller.Watch. Events are +Package source provides event streams to hook up to Controllers with Controller.Watch. Events are used with handler.EventHandlers to enqueue reconcile.Requests and trigger Reconciles for Kubernetes objects. */ diff --git a/vendor/sigs.k8s.io/controller-runtime/pkg/source/internal/eventsource.go b/vendor/sigs.k8s.io/controller-runtime/pkg/source/internal/eventsource.go index 6ced9f4eff..8558e27365 100644 --- a/vendor/sigs.k8s.io/controller-runtime/pkg/source/internal/eventsource.go +++ b/vendor/sigs.k8s.io/controller-runtime/pkg/source/internal/eventsource.go @@ -25,13 +25,13 @@ import ( "k8s.io/client-go/util/workqueue" "sigs.k8s.io/controller-runtime/pkg/event" "sigs.k8s.io/controller-runtime/pkg/handler" - logf "sigs.k8s.io/controller-runtime/pkg/runtime/log" + logf "sigs.k8s.io/controller-runtime/pkg/internal/log" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "sigs.k8s.io/controller-runtime/pkg/predicate" ) -var log = logf.KBLog.WithName("source").WithName("EventHandler") +var log = logf.RuntimeLog.WithName("source").WithName("EventHandler") var _ cache.ResourceEventHandler = EventHandler{} @@ -42,7 +42,7 @@ type EventHandler struct { Predicates []predicate.Predicate } -// OnAdd creates and CreateEvent and calls Create on EventHandler +// OnAdd creates CreateEvent and calls Create on EventHandler func (e EventHandler) OnAdd(obj interface{}) { c := event.CreateEvent{} @@ -74,7 +74,7 @@ func (e EventHandler) OnAdd(obj interface{}) { e.EventHandler.Create(c, e.Queue) } -// OnUpdate creates and UpdateEvent and calls Update on EventHandler +// OnUpdate creates UpdateEvent and calls Update on EventHandler func (e EventHandler) OnUpdate(oldObj, newObj interface{}) { u := event.UpdateEvent{} @@ -124,7 +124,7 @@ func (e EventHandler) OnUpdate(oldObj, newObj interface{}) { e.EventHandler.Update(u, e.Queue) } -// OnDelete creates and DeleteEvent and calls Delete on EventHandler +// OnDelete creates DeleteEvent and calls Delete on EventHandler func (e EventHandler) OnDelete(obj interface{}) { d := event.DeleteEvent{} diff --git a/vendor/sigs.k8s.io/controller-runtime/pkg/source/source.go b/vendor/sigs.k8s.io/controller-runtime/pkg/source/source.go index cd7bef798f..c777a8c719 100644 --- a/vendor/sigs.k8s.io/controller-runtime/pkg/source/source.go +++ b/vendor/sigs.k8s.io/controller-runtime/pkg/source/source.go @@ -25,16 +25,15 @@ import ( "k8s.io/client-go/util/workqueue" "sigs.k8s.io/controller-runtime/pkg/event" "sigs.k8s.io/controller-runtime/pkg/handler" + logf "sigs.k8s.io/controller-runtime/pkg/internal/log" "sigs.k8s.io/controller-runtime/pkg/runtime/inject" - logf "sigs.k8s.io/controller-runtime/pkg/runtime/log" "sigs.k8s.io/controller-runtime/pkg/source/internal" - toolscache "k8s.io/client-go/tools/cache" "sigs.k8s.io/controller-runtime/pkg/cache" "sigs.k8s.io/controller-runtime/pkg/predicate" ) -var log = logf.KBLog.WithName("source") +var log = logf.RuntimeLog.WithName("source") const ( // defaultBufferSize is the default number of event notifications that can be buffered. @@ -243,8 +242,8 @@ func (cs *Channel) syncLoop() { // Informer is used to provide a source of events originating inside the cluster from Watches (e.g. Pod Create) type Informer struct { - // Informer is the generated client-go Informer - Informer toolscache.SharedIndexInformer + // Informer is the controller-runtime Informer + Informer cache.Informer } var _ Source = &Informer{} diff --git a/vendor/sigs.k8s.io/controller-runtime/pkg/webhook/admission/decode.go b/vendor/sigs.k8s.io/controller-runtime/pkg/webhook/admission/decode.go index 1aea214cfd..9583b5e9ac 100644 --- a/vendor/sigs.k8s.io/controller-runtime/pkg/webhook/admission/decode.go +++ b/vendor/sigs.k8s.io/controller-runtime/pkg/webhook/admission/decode.go @@ -17,32 +17,60 @@ limitations under the License. package admission import ( + "fmt" + + "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/runtime/serializer" - "sigs.k8s.io/controller-runtime/pkg/webhook/admission/types" + "k8s.io/apimachinery/pkg/util/json" ) -// DecodeFunc is a function that implements the Decoder interface. -type DecodeFunc func(types.Request, runtime.Object) error - -var _ types.Decoder = DecodeFunc(nil) - -// Decode implements the Decoder interface. -func (f DecodeFunc) Decode(req types.Request, obj runtime.Object) error { - return f(req, obj) -} - -type decoder struct { +// Decoder knows how to decode the contents of an admission +// request into a concrete object. +type Decoder struct { codecs serializer.CodecFactory } // NewDecoder creates a Decoder given the runtime.Scheme -func NewDecoder(scheme *runtime.Scheme) (types.Decoder, error) { - return decoder{codecs: serializer.NewCodecFactory(scheme)}, nil +func NewDecoder(scheme *runtime.Scheme) (*Decoder, error) { + return &Decoder{codecs: serializer.NewCodecFactory(scheme)}, nil } // Decode decodes the inlined object in the AdmissionRequest into the passed-in runtime.Object. -func (d decoder) Decode(req types.Request, into runtime.Object) error { +// If you want decode the OldObject in the AdmissionRequest, use DecodeRaw. +// It errors out if req.Object.Raw is empty i.e. containing 0 raw bytes. +func (d *Decoder) Decode(req Request, into runtime.Object) error { + // we error out if rawObj is an empty object. + if len(req.Object.Raw) == 0 { + return fmt.Errorf("there is no content to decode") + } + return d.DecodeRaw(req.Object, into) +} + +// DecodeRaw decodes a RawExtension object into the passed-in runtime.Object. +// It errors out if rawObj is empty i.e. containing 0 raw bytes. +func (d *Decoder) DecodeRaw(rawObj runtime.RawExtension, into runtime.Object) error { + // NB(directxman12): there's a bug/weird interaction between decoders and + // the API server where the API server doesn't send a GVK on the embedded + // objects, which means the unstructured decoder refuses to decode. It + // also means we can't pass the unstructured directly in, since it'll try + // and call unstructured's special Unmarshal implementation, which calls + // back into that same decoder :-/ + // See kubernetes/kubernetes#74373. + + // we error out if rawObj is an empty object. + if len(rawObj.Raw) == 0 { + return fmt.Errorf("there is no content to decode") + } + if unstructuredInto, isUnstructured := into.(*unstructured.Unstructured); isUnstructured { + // unmarshal into unstructured's underlying object to avoid calling the decoder + if err := json.Unmarshal(rawObj.Raw, &unstructuredInto.Object); err != nil { + return err + } + + return nil + } + deserializer := d.codecs.UniversalDeserializer() - return runtime.DecodeInto(deserializer, req.AdmissionRequest.Object.Raw, into) + return runtime.DecodeInto(deserializer, rawObj.Raw, into) } diff --git a/vendor/sigs.k8s.io/controller-runtime/pkg/webhook/admission/defaulter.go b/vendor/sigs.k8s.io/controller-runtime/pkg/webhook/admission/defaulter.go new file mode 100644 index 0000000000..8b255894ba --- /dev/null +++ b/vendor/sigs.k8s.io/controller-runtime/pkg/webhook/admission/defaulter.go @@ -0,0 +1,75 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package admission + +import ( + "context" + "encoding/json" + "net/http" + + "k8s.io/apimachinery/pkg/runtime" +) + +// Defaulter defines functions for setting defaults on resources +type Defaulter interface { + runtime.Object + Default() +} + +// DefaultingWebhookFor creates a new Webhook for Defaulting the provided type. +func DefaultingWebhookFor(defaulter Defaulter) *Webhook { + return &Webhook{ + Handler: &mutatingHandler{defaulter: defaulter}, + } +} + +type mutatingHandler struct { + defaulter Defaulter + decoder *Decoder +} + +var _ DecoderInjector = &mutatingHandler{} + +// InjectDecoder injects the decoder into a mutatingHandler. +func (h *mutatingHandler) InjectDecoder(d *Decoder) error { + h.decoder = d + return nil +} + +// Handle handles admission requests. +func (h *mutatingHandler) Handle(ctx context.Context, req Request) Response { + if h.defaulter == nil { + panic("defaulter should never be nil") + } + + // Get the object in the request + obj := h.defaulter.DeepCopyObject().(Defaulter) + err := h.decoder.Decode(req, obj) + if err != nil { + return Errored(http.StatusBadRequest, err) + } + + // Default the object + obj.Default() + marshalled, err := json.Marshal(obj) + if err != nil { + return Errored(http.StatusInternalServerError, err) + } + + // Create the patch + return PatchResponseFromRaw(req.Object.Raw, marshalled) +} diff --git a/vendor/sigs.k8s.io/controller-runtime/pkg/webhook/admission/doc.go b/vendor/sigs.k8s.io/controller-runtime/pkg/webhook/admission/doc.go index 44a2b15294..0b274dd02b 100644 --- a/vendor/sigs.k8s.io/controller-runtime/pkg/webhook/admission/doc.go +++ b/vendor/sigs.k8s.io/controller-runtime/pkg/webhook/admission/doc.go @@ -17,85 +17,12 @@ limitations under the License. /* Package admission provides implementation for admission webhook and methods to implement admission webhook handlers. -The following snippet is an example implementation of mutating handler. - - type Mutator struct { - client client.Client - decoder types.Decoder - } - - func (m *Mutator) mutatePodsFn(ctx context.Context, pod *corev1.Pod) error { - // your logic to mutate the passed-in pod. - } - - func (m *Mutator) Handle(ctx context.Context, req types.Request) types.Response { - pod := &corev1.Pod{} - err := m.decoder.Decode(req, pod) - if err != nil { - return admission.ErrorResponse(http.StatusBadRequest, err) - } - // Do deepcopy before actually mutate the object. - copy := pod.DeepCopy() - err = m.mutatePodsFn(ctx, copy) - if err != nil { - return admission.ErrorResponse(http.StatusInternalServerError, err) - } - return admission.PatchResponse(pod, copy) - } - - // InjectClient is called by the Manager and provides a client.Client to the Mutator instance. - func (m *Mutator) InjectClient(c client.Client) error { - h.client = c - return nil - } - - // InjectDecoder is called by the Manager and provides a types.Decoder to the Mutator instance. - func (m *Mutator) InjectDecoder(d types.Decoder) error { - h.decoder = d - return nil - } - -The following snippet is an example implementation of validating handler. - - type Handler struct { - client client.Client - decoder types.Decoder - } - - func (v *Validator) validatePodsFn(ctx context.Context, pod *corev1.Pod) (bool, string, error) { - // your business logic - } - - func (v *Validator) Handle(ctx context.Context, req types.Request) types.Response { - pod := &corev1.Pod{} - err := h.decoder.Decode(req, pod) - if err != nil { - return admission.ErrorResponse(http.StatusBadRequest, err) - } - - allowed, reason, err := h.validatePodsFn(ctx, pod) - if err != nil { - return admission.ErrorResponse(http.StatusInternalServerError, err) - } - return admission.ValidationResponse(allowed, reason) - } - - // InjectClient is called by the Manager and provides a client.Client to the Validator instance. - func (v *Validator) InjectClient(c client.Client) error { - h.client = c - return nil - } - - // InjectDecoder is called by the Manager and provides a types.Decoder to the Validator instance. - func (v *Validator) InjectDecoder(d types.Decoder) error { - h.decoder = d - return nil - } +See examples/mutatingwebhook.go and examples/validatingwebhook.go for examples of admission webhooks. */ package admission import ( - logf "sigs.k8s.io/controller-runtime/pkg/runtime/log" + logf "sigs.k8s.io/controller-runtime/pkg/internal/log" ) -var log = logf.KBLog.WithName("admission") +var log = logf.RuntimeLog.WithName("admission") diff --git a/vendor/sigs.k8s.io/controller-runtime/pkg/webhook/admission/http.go b/vendor/sigs.k8s.io/controller-runtime/pkg/webhook/admission/http.go index 5b91ad2b50..5e4e2d699b 100644 --- a/vendor/sigs.k8s.io/controller-runtime/pkg/webhook/admission/http.go +++ b/vendor/sigs.k8s.io/controller-runtime/pkg/webhook/admission/http.go @@ -17,56 +17,45 @@ limitations under the License. package admission import ( - "context" "encoding/json" "errors" "fmt" "io" "io/ioutil" "net/http" - "time" "k8s.io/api/admission/v1beta1" admissionv1beta1 "k8s.io/api/admission/v1beta1" "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/runtime/serializer" utilruntime "k8s.io/apimachinery/pkg/util/runtime" - "sigs.k8s.io/controller-runtime/pkg/webhook/admission/types" - "sigs.k8s.io/controller-runtime/pkg/webhook/internal/metrics" ) -var admissionv1beta1scheme = runtime.NewScheme() -var admissionv1beta1schemecodecs = serializer.NewCodecFactory(admissionv1beta1scheme) +var admissionScheme = runtime.NewScheme() +var admissionCodecs = serializer.NewCodecFactory(admissionScheme) func init() { - addToScheme(admissionv1beta1scheme) -} - -func addToScheme(scheme *runtime.Scheme) { - utilruntime.Must(admissionv1beta1.AddToScheme(scheme)) + utilruntime.Must(admissionv1beta1.AddToScheme(admissionScheme)) } var _ http.Handler = &Webhook{} func (wh *Webhook) ServeHTTP(w http.ResponseWriter, r *http.Request) { - startTS := time.Now() - defer metrics.RequestLatency.WithLabelValues(wh.Name).Observe(time.Now().Sub(startTS).Seconds()) - var body []byte var err error - var reviewResponse types.Response + var reviewResponse Response if r.Body != nil { if body, err = ioutil.ReadAll(r.Body); err != nil { - log.Error(err, "unable to read the body from the incoming request") - reviewResponse = ErrorResponse(http.StatusBadRequest, err) + wh.log.Error(err, "unable to read the body from the incoming request") + reviewResponse = Errored(http.StatusBadRequest, err) wh.writeResponse(w, reviewResponse) return } } else { err = errors.New("request body is empty") - log.Error(err, "bad request") - reviewResponse = ErrorResponse(http.StatusBadRequest, err) + wh.log.Error(err, "bad request") + reviewResponse = Errored(http.StatusBadRequest, err) wh.writeResponse(w, reviewResponse) return } @@ -74,42 +63,38 @@ func (wh *Webhook) ServeHTTP(w http.ResponseWriter, r *http.Request) { // verify the content type is accurate contentType := r.Header.Get("Content-Type") if contentType != "application/json" { - err = fmt.Errorf("contentType=%s, expect application/json", contentType) - log.Error(err, "unable to process a request with an unknown content type", "content type", contentType) - reviewResponse = ErrorResponse(http.StatusBadRequest, err) + err = fmt.Errorf("contentType=%s, expected application/json", contentType) + wh.log.Error(err, "unable to process a request with an unknown content type", "content type", contentType) + reviewResponse = Errored(http.StatusBadRequest, err) wh.writeResponse(w, reviewResponse) return } - ar := v1beta1.AdmissionReview{} - if _, _, err := admissionv1beta1schemecodecs.UniversalDeserializer().Decode(body, nil, &ar); err != nil { - log.Error(err, "unable to decode the request") - reviewResponse = ErrorResponse(http.StatusBadRequest, err) + req := Request{} + ar := v1beta1.AdmissionReview{ + // avoid an extra copy + Request: &req.AdmissionRequest, + } + if _, _, err := admissionCodecs.UniversalDeserializer().Decode(body, nil, &ar); err != nil { + wh.log.Error(err, "unable to decode the request") + reviewResponse = Errored(http.StatusBadRequest, err) wh.writeResponse(w, reviewResponse) return } // TODO: add panic-recovery for Handle - reviewResponse = wh.Handle(context.Background(), types.Request{AdmissionRequest: ar.Request}) + reviewResponse = wh.Handle(r.Context(), req) wh.writeResponse(w, reviewResponse) } -func (wh *Webhook) writeResponse(w io.Writer, response types.Response) { - if response.Response.Result.Code != 0 { - if response.Response.Result.Code == http.StatusOK { - metrics.TotalRequests.WithLabelValues(wh.Name, "true").Inc() - } else { - metrics.TotalRequests.WithLabelValues(wh.Name, "false").Inc() - } - } - +func (wh *Webhook) writeResponse(w io.Writer, response Response) { encoder := json.NewEncoder(w) responseAdmissionReview := v1beta1.AdmissionReview{ - Response: response.Response, + Response: &response.AdmissionResponse, } err := encoder.Encode(responseAdmissionReview) if err != nil { - log.Error(err, "unable to encode the response") - wh.writeResponse(w, ErrorResponse(http.StatusInternalServerError, err)) + wh.log.Error(err, "unable to encode the response") + wh.writeResponse(w, Errored(http.StatusInternalServerError, err)) } } diff --git a/vendor/k8s.io/code-generator/cmd/client-gen/generators/tags.go b/vendor/sigs.k8s.io/controller-runtime/pkg/webhook/admission/inject.go similarity index 50% rename from vendor/k8s.io/code-generator/cmd/client-gen/generators/tags.go rename to vendor/sigs.k8s.io/controller-runtime/pkg/webhook/admission/inject.go index b004081036..d5af0d598f 100644 --- a/vendor/k8s.io/code-generator/cmd/client-gen/generators/tags.go +++ b/vendor/sigs.k8s.io/controller-runtime/pkg/webhook/admission/inject.go @@ -1,5 +1,5 @@ /* -Copyright 2016 The Kubernetes Authors. +Copyright 2019 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -14,19 +14,18 @@ See the License for the specific language governing permissions and limitations under the License. */ -package generators +package admission -import ( - "k8s.io/gengo/types" -) +// DecoderInjector is used by the ControllerManager to inject decoder into webhook handlers. +type DecoderInjector interface { + InjectDecoder(*Decoder) error +} -// extractTag gets the comment-tags for the key. If the tag did not exist, it -// returns the empty string. -func extractTag(key string, lines []string) string { - val, present := types.ExtractCommentTags("+", lines)[key] - if !present || len(val) < 1 { - return "" +// InjectDecoderInto will set decoder on i and return the result if it implements Decoder. Returns +// false if i does not implement Decoder. +func InjectDecoderInto(decoder *Decoder, i interface{}) (bool, error) { + if s, ok := i.(DecoderInjector); ok { + return true, s.InjectDecoder(decoder) } - - return val[0] + return false, nil } diff --git a/vendor/sigs.k8s.io/controller-runtime/pkg/webhook/admission/multi.go b/vendor/sigs.k8s.io/controller-runtime/pkg/webhook/admission/multi.go new file mode 100644 index 0000000000..2ffdb57b00 --- /dev/null +++ b/vendor/sigs.k8s.io/controller-runtime/pkg/webhook/admission/multi.go @@ -0,0 +1,126 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package admission + +import ( + "context" + "encoding/json" + "fmt" + "net/http" + + "gomodules.xyz/jsonpatch/v2" + admissionv1beta1 "k8s.io/api/admission/v1beta1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "sigs.k8s.io/controller-runtime/pkg/runtime/inject" +) + +type multiMutating []Handler + +func (hs multiMutating) Handle(ctx context.Context, req Request) Response { + patches := []jsonpatch.JsonPatchOperation{} + for _, handler := range hs { + resp := handler.Handle(ctx, req) + if !resp.Allowed { + return resp + } + if resp.PatchType != nil && *resp.PatchType != admissionv1beta1.PatchTypeJSONPatch { + return Errored(http.StatusInternalServerError, + fmt.Errorf("unexpected patch type returned by the handler: %v, only allow: %v", + resp.PatchType, admissionv1beta1.PatchTypeJSONPatch)) + } + patches = append(patches, resp.Patches...) + } + var err error + marshaledPatch, err := json.Marshal(patches) + if err != nil { + return Errored(http.StatusBadRequest, fmt.Errorf("error when marshaling the patch: %v", err)) + } + return Response{ + AdmissionResponse: admissionv1beta1.AdmissionResponse{ + Allowed: true, + Result: &metav1.Status{ + Code: http.StatusOK, + }, + Patch: marshaledPatch, + PatchType: func() *admissionv1beta1.PatchType { pt := admissionv1beta1.PatchTypeJSONPatch; return &pt }(), + }, + } +} + +// InjectFunc injects the field setter into the handlers. +func (hs multiMutating) InjectFunc(f inject.Func) error { + // inject directly into the handlers. It would be more correct + // to do this in a sync.Once in Handle (since we don't have some + // other start/finalize-type method), but it's more efficient to + // do it here, presumably. + for _, handler := range hs { + if err := f(handler); err != nil { + return err + } + } + + return nil +} + +// MultiMutatingHandler combines multiple mutating webhook handlers into a single +// mutating webhook handler. Handlers are called in sequential order, and the first +// `allowed: false` response may short-circuit the rest. Users must take care to +// ensure patches are disjoint. +func MultiMutatingHandler(handlers ...Handler) Handler { + return multiMutating(handlers) +} + +type multiValidating []Handler + +func (hs multiValidating) Handle(ctx context.Context, req Request) Response { + for _, handler := range hs { + resp := handler.Handle(ctx, req) + if !resp.Allowed { + return resp + } + } + return Response{ + AdmissionResponse: admissionv1beta1.AdmissionResponse{ + Allowed: true, + Result: &metav1.Status{ + Code: http.StatusOK, + }, + }, + } +} + +// MultiValidatingHandler combines multiple validating webhook handlers into a single +// validating webhook handler. Handlers are called in sequential order, and the first +// `allowed: false` response may short-circuit the rest. +func MultiValidatingHandler(handlers ...Handler) Handler { + return multiValidating(handlers) +} + +// InjectFunc injects the field setter into the handlers. +func (hs multiValidating) InjectFunc(f inject.Func) error { + // inject directly into the handlers. It would be more correct + // to do this in a sync.Once in Handle (since we don't have some + // other start/finalize-type method), but it's more efficient to + // do it here, presumably. + for _, handler := range hs { + if err := f(handler); err != nil { + return err + } + } + + return nil +} diff --git a/vendor/sigs.k8s.io/controller-runtime/pkg/webhook/admission/response.go b/vendor/sigs.k8s.io/controller-runtime/pkg/webhook/admission/response.go index bf7c903baa..da8e336788 100644 --- a/vendor/sigs.k8s.io/controller-runtime/pkg/webhook/admission/response.go +++ b/vendor/sigs.k8s.io/controller-runtime/pkg/webhook/admission/response.go @@ -19,17 +19,38 @@ package admission import ( "net/http" + "gomodules.xyz/jsonpatch/v2" + admissionv1beta1 "k8s.io/api/admission/v1beta1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/runtime" - "sigs.k8s.io/controller-runtime/pkg/patch" - "sigs.k8s.io/controller-runtime/pkg/webhook/admission/types" ) -// ErrorResponse creates a new Response for error-handling a request. -func ErrorResponse(code int32, err error) types.Response { - return types.Response{ - Response: &admissionv1beta1.AdmissionResponse{ +// Allowed constructs a response indicating that the given operation +// is allowed (without any patches). +func Allowed(reason string) Response { + return ValidationResponse(true, reason) +} + +// Denied constructs a response indicating that the given operation +// is not allowed. +func Denied(reason string) Response { + return ValidationResponse(false, reason) +} + +// Patched constructs a response indicating that the given operation is +// allowed, and that the target object should be modified by the given +// JSONPatch operations. +func Patched(reason string, patches ...jsonpatch.JsonPatchOperation) Response { + resp := Allowed(reason) + resp.Patches = patches + + return resp +} + +// Errored creates a new Response for error-handling a request. +func Errored(code int32, err error) Response { + return Response{ + AdmissionResponse: admissionv1beta1.AdmissionResponse{ Allowed: false, Result: &metav1.Status{ Code: code, @@ -40,29 +61,36 @@ func ErrorResponse(code int32, err error) types.Response { } // ValidationResponse returns a response for admitting a request. -func ValidationResponse(allowed bool, reason string) types.Response { - resp := types.Response{ - Response: &admissionv1beta1.AdmissionResponse{ +func ValidationResponse(allowed bool, reason string) Response { + code := http.StatusForbidden + if allowed { + code = http.StatusOK + } + resp := Response{ + AdmissionResponse: admissionv1beta1.AdmissionResponse{ Allowed: allowed, + Result: &metav1.Status{ + Code: int32(code), + }, }, } if len(reason) > 0 { - resp.Response.Result = &metav1.Status{ - Reason: metav1.StatusReason(reason), - } + resp.Result.Reason = metav1.StatusReason(reason) } return resp } -// PatchResponse returns a new response with json patch. -func PatchResponse(original, current runtime.Object) types.Response { - patches, err := patch.NewJSONPatch(original, current) +// PatchResponseFromRaw takes 2 byte arrays and returns a new response with json patch. +// The original object should be passed in as raw bytes to avoid the roundtripping problem +// described in https://github.com/kubernetes-sigs/kubebuilder/issues/510. +func PatchResponseFromRaw(original, current []byte) Response { + patches, err := jsonpatch.CreatePatch(original, current) if err != nil { - return ErrorResponse(http.StatusInternalServerError, err) + return Errored(http.StatusInternalServerError, err) } - return types.Response{ + return Response{ Patches: patches, - Response: &admissionv1beta1.AdmissionResponse{ + AdmissionResponse: admissionv1beta1.AdmissionResponse{ Allowed: true, PatchType: func() *admissionv1beta1.PatchType { pt := admissionv1beta1.PatchTypeJSONPatch; return &pt }(), }, diff --git a/vendor/sigs.k8s.io/controller-runtime/pkg/webhook/admission/types/types.go b/vendor/sigs.k8s.io/controller-runtime/pkg/webhook/admission/types/types.go deleted file mode 100644 index 75236779ff..0000000000 --- a/vendor/sigs.k8s.io/controller-runtime/pkg/webhook/admission/types/types.go +++ /dev/null @@ -1,44 +0,0 @@ -/* -Copyright 2018 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package types - -import ( - "github.com/appscode/jsonpatch" - - admissionv1beta1 "k8s.io/api/admission/v1beta1" - "k8s.io/apimachinery/pkg/runtime" -) - -// Request is the input of Handler -type Request struct { - AdmissionRequest *admissionv1beta1.AdmissionRequest -} - -// Response is the output of admission.Handler -type Response struct { - // Patches are the JSON patches for mutating webhooks. - // Using this instead of setting Response.Patch to minimize the overhead of serialization and deserialization. - Patches []jsonpatch.JsonPatchOperation - // Response is the admission response. Don't set the Patch field in it. - Response *admissionv1beta1.AdmissionResponse -} - -// Decoder is used to decode AdmissionRequest. -type Decoder interface { - // Decode decodes the raw byte object from the AdmissionRequest to the passed-in runtime.Object. - Decode(Request, runtime.Object) error -} diff --git a/vendor/sigs.k8s.io/controller-runtime/pkg/webhook/admission/validator.go b/vendor/sigs.k8s.io/controller-runtime/pkg/webhook/admission/validator.go new file mode 100644 index 0000000000..6defcfe92f --- /dev/null +++ b/vendor/sigs.k8s.io/controller-runtime/pkg/webhook/admission/validator.go @@ -0,0 +1,108 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package admission + +import ( + "context" + "net/http" + + "k8s.io/api/admission/v1beta1" + "k8s.io/apimachinery/pkg/runtime" +) + +// Validator defines functions for validating an operation +type Validator interface { + runtime.Object + ValidateCreate() error + ValidateUpdate(old runtime.Object) error + ValidateDelete() error +} + +// ValidatingWebhookFor creates a new Webhook for validating the provided type. +func ValidatingWebhookFor(validator Validator) *Webhook { + return &Webhook{ + Handler: &validatingHandler{validator: validator}, + } +} + +type validatingHandler struct { + validator Validator + decoder *Decoder +} + +var _ DecoderInjector = &validatingHandler{} + +// InjectDecoder injects the decoder into a validatingHandler. +func (h *validatingHandler) InjectDecoder(d *Decoder) error { + h.decoder = d + return nil +} + +// Handle handles admission requests. +func (h *validatingHandler) Handle(ctx context.Context, req Request) Response { + if h.validator == nil { + panic("validator should never be nil") + } + + // Get the object in the request + obj := h.validator.DeepCopyObject().(Validator) + if req.Operation == v1beta1.Create { + err := h.decoder.Decode(req, obj) + if err != nil { + return Errored(http.StatusBadRequest, err) + } + + err = obj.ValidateCreate() + if err != nil { + return Denied(err.Error()) + } + } + + if req.Operation == v1beta1.Update { + oldObj := obj.DeepCopyObject() + + err := h.decoder.DecodeRaw(req.Object, obj) + if err != nil { + return Errored(http.StatusBadRequest, err) + } + err = h.decoder.DecodeRaw(req.OldObject, oldObj) + if err != nil { + return Errored(http.StatusBadRequest, err) + } + + err = obj.ValidateUpdate(oldObj) + if err != nil { + return Denied(err.Error()) + } + } + + if req.Operation == v1beta1.Delete { + // In reference to PR: https://github.com/kubernetes/kubernetes/pull/76346 + // OldObject contains the object being deleted + err := h.decoder.DecodeRaw(req.OldObject, obj) + if err != nil { + return Errored(http.StatusBadRequest, err) + } + + err = obj.ValidateDelete() + if err != nil { + return Denied(err.Error()) + } + } + + return Allowed("") +} diff --git a/vendor/sigs.k8s.io/controller-runtime/pkg/webhook/admission/webhook.go b/vendor/sigs.k8s.io/controller-runtime/pkg/webhook/admission/webhook.go index 6a14dd6350..8f3430efbf 100644 --- a/vendor/sigs.k8s.io/controller-runtime/pkg/webhook/admission/webhook.go +++ b/vendor/sigs.k8s.io/controller-runtime/pkg/webhook/admission/webhook.go @@ -18,242 +18,183 @@ package admission import ( "context" - "encoding/json" "errors" - "fmt" "net/http" - "regexp" - "strings" - "sync" - - "github.com/appscode/jsonpatch" + "github.com/go-logr/logr" + "gomodules.xyz/jsonpatch/v2" admissionv1beta1 "k8s.io/api/admission/v1beta1" - admissionregistrationv1beta1 "k8s.io/api/admissionregistration/v1beta1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "sigs.k8s.io/controller-runtime/pkg/client" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/util/json" + "sigs.k8s.io/controller-runtime/pkg/runtime/inject" - atypes "sigs.k8s.io/controller-runtime/pkg/webhook/admission/types" - "sigs.k8s.io/controller-runtime/pkg/webhook/types" ) +var ( + errUnableToEncodeResponse = errors.New("unable to encode response") +) + +// Request defines the input for an admission handler. +// It contains information to identify the object in +// question (group, version, kind, resource, subresource, +// name, namespace), as well as the operation in question +// (e.g. Get, Create, etc), and the object itself. +type Request struct { + admissionv1beta1.AdmissionRequest +} + +// Response is the output of an admission handler. +// It contains a response indicating if a given +// operation is allowed, as well as a set of patches +// to mutate the object in the case of a mutating admission handler. +type Response struct { + // Patches are the JSON patches for mutating webhooks. + // Using this instead of setting Response.Patch to minimize + // overhead of serialization and deserialization. + // Patches set here will override any patches in the response, + // so leave this empty if you want to set the patch response directly. + Patches []jsonpatch.JsonPatchOperation + // AdmissionResponse is the raw admission response. + // The Patch field in it will be overwritten by the listed patches. + admissionv1beta1.AdmissionResponse +} + +// Complete populates any fields that are yet to be set in +// the underlying AdmissionResponse, It mutates the response. +func (r *Response) Complete(req Request) error { + r.UID = req.UID + + // ensure that we have a valid status code + if r.Result == nil { + r.Result = &metav1.Status{} + } + if r.Result.Code == 0 { + r.Result.Code = http.StatusOK + } + // TODO(directxman12): do we need to populate this further, and/or + // is code actually necessary (the same webhook doesn't use it) + + if len(r.Patches) == 0 { + return nil + } + + var err error + r.Patch, err = json.Marshal(r.Patches) + if err != nil { + return err + } + patchType := admissionv1beta1.PatchTypeJSONPatch + r.PatchType = &patchType + + return nil +} + // Handler can handle an AdmissionRequest. type Handler interface { - Handle(context.Context, atypes.Request) atypes.Response + // Handle yields a response to an AdmissionRequest. + // + // The supplied context is extracted from the received http.Request, allowing wrapping + // http.Handlers to inject values into and control cancelation of downstream request processing. + Handle(context.Context, Request) Response } // HandlerFunc implements Handler interface using a single function. -type HandlerFunc func(context.Context, atypes.Request) atypes.Response +type HandlerFunc func(context.Context, Request) Response var _ Handler = HandlerFunc(nil) // Handle process the AdmissionRequest by invoking the underlying function. -func (f HandlerFunc) Handle(ctx context.Context, req atypes.Request) atypes.Response { +func (f HandlerFunc) Handle(ctx context.Context, req Request) Response { return f(ctx, req) } // Webhook represents each individual webhook. type Webhook struct { - // Name is the name of the webhook - Name string - // Type is the webhook type, i.e. mutating, validating - Type types.WebhookType - // Path is the path this webhook will serve. - Path string - // Rules maps to the Rules field in admissionregistrationv1beta1.Webhook - Rules []admissionregistrationv1beta1.RuleWithOperations - // FailurePolicy maps to the FailurePolicy field in admissionregistrationv1beta1.Webhook - // This optional. If not set, will be defaulted to Ignore (fail-open) by the server. - // More details: https://github.com/kubernetes/api/blob/f5c295feaba2cbc946f0bbb8b535fc5f6a0345ee/admissionregistration/v1beta1/types.go#L144-L147 - FailurePolicy *admissionregistrationv1beta1.FailurePolicyType - // NamespaceSelector maps to the NamespaceSelector field in admissionregistrationv1beta1.Webhook - // This optional. - NamespaceSelector *metav1.LabelSelector - // Handlers contains a list of handlers. Each handler may only contains the business logic for its own feature. - // For example, feature foo and bar can be in the same webhook if all the other configurations are the same. - // The handler will be invoked sequentially as the order in the list. - // Note: if you are using mutating webhook with multiple handlers, it's your responsibility to - // ensure the handlers are not generating conflicting JSON patches. - Handlers []Handler - - once sync.Once -} + // Handler actually processes an admission request returning whether it was allowed or denied, + // and potentially patches to apply to the handler. + Handler Handler -func (w *Webhook) setDefaults() { - if len(w.Path) == 0 { - if len(w.Rules) == 0 || len(w.Rules[0].Resources) == 0 { - // can't do defaulting, skip it. - return - } - if w.Type == types.WebhookTypeMutating { - w.Path = "/mutate-" + w.Rules[0].Resources[0] - } else if w.Type == types.WebhookTypeValidating { - w.Path = "/validate-" + w.Rules[0].Resources[0] - } - } - if len(w.Name) == 0 { - reg := regexp.MustCompile("[^a-zA-Z0-9]+") - processedPath := strings.ToLower(reg.ReplaceAllString(w.Path, "")) - w.Name = processedPath + ".example.com" - } -} + // decoder is constructed on receiving a scheme and passed down to then handler + decoder *Decoder -// Add adds additional handler(s) in the webhook -func (w *Webhook) Add(handlers ...Handler) { - w.Handlers = append(w.Handlers, handlers...) + log logr.Logger } -// Webhook implements Handler interface. -var _ Handler = &Webhook{} +// InjectLogger gets a handle to a logging instance, hopefully with more info about this particular webhook. +func (w *Webhook) InjectLogger(l logr.Logger) error { + w.log = l + return nil +} // Handle processes AdmissionRequest. // If the webhook is mutating type, it delegates the AdmissionRequest to each handler and merge the patches. // If the webhook is validating type, it delegates the AdmissionRequest to each handler and // deny the request if anyone denies. -func (w *Webhook) Handle(ctx context.Context, req atypes.Request) atypes.Response { - if req.AdmissionRequest == nil { - return ErrorResponse(http.StatusBadRequest, errors.New("got an empty AdmissionRequest")) +func (w *Webhook) Handle(ctx context.Context, req Request) Response { + resp := w.Handler.Handle(ctx, req) + if err := resp.Complete(req); err != nil { + w.log.Error(err, "unable to encode response") + return Errored(http.StatusInternalServerError, errUnableToEncodeResponse) } - var resp atypes.Response - switch w.Type { - case types.WebhookTypeMutating: - resp = w.handleMutating(ctx, req) - case types.WebhookTypeValidating: - resp = w.handleValidating(ctx, req) - default: - return ErrorResponse(http.StatusInternalServerError, errors.New("you must specify your webhook type")) - } - resp.Response.UID = req.AdmissionRequest.UID + return resp } -func (w *Webhook) handleMutating(ctx context.Context, req atypes.Request) atypes.Response { - patches := []jsonpatch.JsonPatchOperation{} - for _, handler := range w.Handlers { - resp := handler.Handle(ctx, req) - if !resp.Response.Allowed { - setStatusOKInAdmissionResponse(resp.Response) - return resp - } - if resp.Response.PatchType != nil && *resp.Response.PatchType != admissionv1beta1.PatchTypeJSONPatch { - return ErrorResponse(http.StatusInternalServerError, - fmt.Errorf("unexpected patch type returned by the handler: %v, only allow: %v", - resp.Response.PatchType, admissionv1beta1.PatchTypeJSONPatch)) - } - patches = append(patches, resp.Patches...) - } +// InjectScheme injects a scheme into the webhook, in order to construct a Decoder. +func (w *Webhook) InjectScheme(s *runtime.Scheme) error { + // TODO(directxman12): we should have a better way to pass this down + var err error - marshaledPatch, err := json.Marshal(patches) + w.decoder, err = NewDecoder(s) if err != nil { - return ErrorResponse(http.StatusBadRequest, fmt.Errorf("error when marshaling the patch: %v", err)) + return err } - return atypes.Response{ - Response: &admissionv1beta1.AdmissionResponse{ - Allowed: true, - Result: &metav1.Status{ - Code: http.StatusOK, - }, - Patch: marshaledPatch, - PatchType: func() *admissionv1beta1.PatchType { pt := admissionv1beta1.PatchTypeJSONPatch; return &pt }(), - }, - } -} -func (w *Webhook) handleValidating(ctx context.Context, req atypes.Request) atypes.Response { - for _, handler := range w.Handlers { - resp := handler.Handle(ctx, req) - if !resp.Response.Allowed { - setStatusOKInAdmissionResponse(resp.Response) - return resp + // inject the decoder here too, just in case the order of calling this is not + // scheme first, then inject func + if w.Handler != nil { + if _, err := InjectDecoderInto(w.GetDecoder(), w.Handler); err != nil { + return err } } - return atypes.Response{ - Response: &admissionv1beta1.AdmissionResponse{ - Allowed: true, - Result: &metav1.Status{ - Code: http.StatusOK, - }, - }, - } -} - -func setStatusOKInAdmissionResponse(resp *admissionv1beta1.AdmissionResponse) { - if resp == nil { - return - } - if resp.Result == nil { - resp.Result = &metav1.Status{} - } - if resp.Result.Code == 0 { - resp.Result.Code = http.StatusOK - } -} - -// GetName returns the name of the webhook. -func (w *Webhook) GetName() string { - w.once.Do(w.setDefaults) - return w.Name -} - -// GetPath returns the path that the webhook registered. -func (w *Webhook) GetPath() string { - w.once.Do(w.setDefaults) - return w.Path -} -// GetType returns the type of the webhook. -func (w *Webhook) GetType() types.WebhookType { - w.once.Do(w.setDefaults) - return w.Type + return nil } -// Handler returns a http.Handler for the webhook -func (w *Webhook) Handler() http.Handler { - w.once.Do(w.setDefaults) - return w +// GetDecoder returns a decoder to decode the objects embedded in admission requests. +// It may be nil if we haven't received a scheme to use to determine object types yet. +func (w *Webhook) GetDecoder() *Decoder { + return w.decoder } -// Validate validates if the webhook is valid. -func (w *Webhook) Validate() error { - w.once.Do(w.setDefaults) - if len(w.Rules) == 0 { - return errors.New("field Rules should not be empty") - } - if len(w.Name) == 0 { - return errors.New("field Name should not be empty") - } - if w.Type != types.WebhookTypeMutating && w.Type != types.WebhookTypeValidating { - return fmt.Errorf("unsupported Type: %v, only WebhookTypeMutating and WebhookTypeValidating are supported", w.Type) - } - if len(w.Path) == 0 { - return errors.New("field Path should not be empty") - } - if len(w.Handlers) == 0 { - return errors.New("field Handler should not be empty") - } - return nil -} +// InjectFunc injects the field setter into the webhook. +func (w *Webhook) InjectFunc(f inject.Func) error { + // inject directly into the handlers. It would be more correct + // to do this in a sync.Once in Handle (since we don't have some + // other start/finalize-type method), but it's more efficient to + // do it here, presumably. -var _ inject.Client = &Webhook{} + // also inject a decoder, and wrap this so that we get a setFields + // that injects a decoder (hopefully things don't ignore the duplicate + // InjectorInto call). -// InjectClient injects the client into the handlers -func (w *Webhook) InjectClient(c client.Client) error { - for _, handler := range w.Handlers { - if _, err := inject.ClientInto(c, handler); err != nil { + var setFields inject.Func + setFields = func(target interface{}) error { + if err := f(target); err != nil { return err } - } - return nil -} -var _ inject.Decoder = &Webhook{} + if _, err := inject.InjectorInto(setFields, target); err != nil { + return err + } -// InjectDecoder injects the decoder into the handlers -func (w *Webhook) InjectDecoder(d atypes.Decoder) error { - for _, handler := range w.Handlers { - if _, err := inject.DecoderInto(d, handler); err != nil { + if _, err := InjectDecoderInto(w.GetDecoder(), target); err != nil { return err } + + return nil } - return nil + + return setFields(w.Handler) } diff --git a/vendor/sigs.k8s.io/controller-runtime/pkg/webhook/alias.go b/vendor/sigs.k8s.io/controller-runtime/pkg/webhook/alias.go new file mode 100644 index 0000000000..276784efb2 --- /dev/null +++ b/vendor/sigs.k8s.io/controller-runtime/pkg/webhook/alias.go @@ -0,0 +1,73 @@ +/* +Copyright 2019 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package webhook + +import ( + "gomodules.xyz/jsonpatch/v2" + "sigs.k8s.io/controller-runtime/pkg/webhook/admission" +) + +// define some aliases for common bits of the webhook functionality + +// Defaulter defines functions for setting defaults on resources +type Defaulter = admission.Defaulter + +// Validator defines functions for validating an operation +type Validator = admission.Validator + +// AdmissionRequest defines the input for an admission handler. +// It contains information to identify the object in +// question (group, version, kind, resource, subresource, +// name, namespace), as well as the operation in question +// (e.g. Get, Create, etc), and the object itself. +type AdmissionRequest = admission.Request + +// AdmissionResponse is the output of an admission handler. +// It contains a response indicating if a given +// operation is allowed, as well as a set of patches +// to mutate the object in the case of a mutating admission handler. +type AdmissionResponse = admission.Response + +// Admission is webhook suitable for registration with the server +// an admission webhook that validates API operations and potentially +// mutates their contents. +type Admission = admission.Webhook + +// AdmissionHandler knows how to process admission requests, validating them, +// and potentially mutating the objects they contain. +type AdmissionHandler = admission.Handler + +// AdmissionDecoder knows how to decode objects from admission requests. +type AdmissionDecoder = admission.Decoder + +// JSONPatchOp represents a single JSONPatch patch operation. +type JSONPatchOp = jsonpatch.Operation + +var ( + // Allowed indicates that the admission request should be allowed for the given reason. + Allowed = admission.Allowed + + // Denied indicates that the admission request should be denied for the given reason. + Denied = admission.Denied + + // Patched indicates that the admission request should be allowed for the given reason, + // and that the contained object should be mutated using the given patches. + Patched = admission.Patched + + // Errored indicates that an error occurred in the admission request. + Errored = admission.Errored +) diff --git a/vendor/sigs.k8s.io/controller-runtime/pkg/webhook/conversion/conversion.go b/vendor/sigs.k8s.io/controller-runtime/pkg/webhook/conversion/conversion.go new file mode 100644 index 0000000000..05760cbbc4 --- /dev/null +++ b/vendor/sigs.k8s.io/controller-runtime/pkg/webhook/conversion/conversion.go @@ -0,0 +1,349 @@ +/* +Copyright 2019 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +/* +Package conversion provides implementation for CRD conversion webhook that implements handler for version conversion requests for types that are convertible. + +See pkg/conversion for interface definitions required to ensure an API Type is convertible. +*/ +package conversion + +import ( + "encoding/json" + "fmt" + "net/http" + + apix "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1" + "k8s.io/apimachinery/pkg/api/meta" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" + "sigs.k8s.io/controller-runtime/pkg/conversion" + logf "sigs.k8s.io/controller-runtime/pkg/log" +) + +var ( + log = logf.Log.WithName("conversion-webhook") +) + +// Webhook implements a CRD conversion webhook HTTP handler. +type Webhook struct { + scheme *runtime.Scheme + decoder *Decoder +} + +// InjectScheme injects a scheme into the webhook, in order to construct a Decoder. +func (wh *Webhook) InjectScheme(s *runtime.Scheme) error { + var err error + wh.scheme = s + wh.decoder, err = NewDecoder(s) + if err != nil { + return err + } + + return nil +} + +// ensure Webhook implements http.Handler +var _ http.Handler = &Webhook{} + +func (wh *Webhook) ServeHTTP(w http.ResponseWriter, r *http.Request) { + convertReview := &apix.ConversionReview{} + err := json.NewDecoder(r.Body).Decode(convertReview) + if err != nil { + log.Error(err, "failed to read conversion request") + w.WriteHeader(http.StatusBadRequest) + return + } + + // TODO(droot): may be move the conversion logic to a separate module to + // decouple it from the http layer ? + resp, err := wh.handleConvertRequest(convertReview.Request) + if err != nil { + log.Error(err, "failed to convert", "request", convertReview.Request.UID) + convertReview.Response = errored(err) + } else { + convertReview.Response = resp + } + convertReview.Response.UID = convertReview.Request.UID + convertReview.Request = nil + + err = json.NewEncoder(w).Encode(convertReview) + if err != nil { + log.Error(err, "failed to write response") + return + } +} + +// handles a version conversion request. +func (wh *Webhook) handleConvertRequest(req *apix.ConversionRequest) (*apix.ConversionResponse, error) { + if req == nil { + return nil, fmt.Errorf("conversion request is nil") + } + var objects []runtime.RawExtension + + for _, obj := range req.Objects { + src, gvk, err := wh.decoder.Decode(obj.Raw) + if err != nil { + return nil, err + } + dst, err := wh.allocateDstObject(req.DesiredAPIVersion, gvk.Kind) + if err != nil { + return nil, err + } + err = wh.convertObject(src, dst) + if err != nil { + return nil, err + } + objects = append(objects, runtime.RawExtension{Object: dst}) + } + return &apix.ConversionResponse{ + UID: req.UID, + ConvertedObjects: objects, + Result: metav1.Status{ + Status: metav1.StatusSuccess, + }, + }, nil +} + +// convertObject will convert given a src object to dst object. +// Note(droot): couldn't find a way to reduce the cyclomatic complexity under 10 +// without compromising readability, so disabling gocyclo linter +func (wh *Webhook) convertObject(src, dst runtime.Object) error { + srcGVK := src.GetObjectKind().GroupVersionKind() + dstGVK := dst.GetObjectKind().GroupVersionKind() + + if srcGVK.GroupKind() != dstGVK.GroupKind() { + return fmt.Errorf("src %T and dst %T does not belong to same API Group", src, dst) + } + + if srcGVK == dstGVK { + return fmt.Errorf("conversion is not allowed between same type %T", src) + } + + srcIsHub, dstIsHub := isHub(src), isHub(dst) + srcIsConvertible, dstIsConvertible := isConvertible(src), isConvertible(dst) + + switch { + case srcIsHub && dstIsConvertible: + return dst.(conversion.Convertible).ConvertFrom(src.(conversion.Hub)) + case dstIsHub && srcIsConvertible: + return src.(conversion.Convertible).ConvertTo(dst.(conversion.Hub)) + case srcIsConvertible && dstIsConvertible: + return wh.convertViaHub(src.(conversion.Convertible), dst.(conversion.Convertible)) + default: + return fmt.Errorf("%T is not convertible to %T", src, dst) + } +} + +func (wh *Webhook) convertViaHub(src, dst conversion.Convertible) error { + hub, err := wh.getHub(src) + if err != nil { + return err + } + + if hub == nil { + return fmt.Errorf("%s does not have any Hub defined", src) + } + + err = src.ConvertTo(hub) + if err != nil { + return fmt.Errorf("%T failed to convert to hub version %T : %v", src, hub, err) + } + + err = dst.ConvertFrom(hub) + if err != nil { + return fmt.Errorf("%T failed to convert from hub version %T : %v", dst, hub, err) + } + + return nil +} + +// getHub returns an instance of the Hub for passed-in object's group/kind. +func (wh *Webhook) getHub(obj runtime.Object) (conversion.Hub, error) { + gvks, err := objectGVKs(wh.scheme, obj) + if err != nil { + return nil, err + } + if len(gvks) == 0 { + return nil, fmt.Errorf("error retrieving gvks for object : %v", obj) + } + + var hub conversion.Hub + var hubFoundAlready bool + for _, gvk := range gvks { + instance, err := wh.scheme.New(gvk) + if err != nil { + return nil, fmt.Errorf("failed to allocate an instance for gvk %v %v", gvk, err) + } + if val, isHub := instance.(conversion.Hub); isHub { + if hubFoundAlready { + return nil, fmt.Errorf("multiple hub version defined for %T", obj) + } + hubFoundAlready = true + hub = val + } + } + return hub, nil +} + +// allocateDstObject returns an instance for a given GVK. +func (wh *Webhook) allocateDstObject(apiVersion, kind string) (runtime.Object, error) { + gvk := schema.FromAPIVersionAndKind(apiVersion, kind) + + obj, err := wh.scheme.New(gvk) + if err != nil { + return obj, err + } + + t, err := meta.TypeAccessor(obj) + if err != nil { + return obj, err + } + + t.SetAPIVersion(apiVersion) + t.SetKind(kind) + + return obj, nil +} + +// IsConvertible determines if given type is convertible or not. For a type +// to be convertible, the group-kind needs to have a Hub type defined and all +// non-hub types must be able to convert to/from Hub. +func IsConvertible(scheme *runtime.Scheme, obj runtime.Object) (bool, error) { + var hubs, spokes, nonSpokes []runtime.Object + + gvks, err := objectGVKs(scheme, obj) + if err != nil { + return false, err + } + if len(gvks) == 0 { + return false, fmt.Errorf("error retrieving gvks for object : %v", obj) + } + + for _, gvk := range gvks { + instance, err := scheme.New(gvk) + if err != nil { + return false, fmt.Errorf("failed to allocate an instance for gvk %v %v", gvk, err) + } + + if isHub(instance) { + hubs = append(hubs, instance) + continue + } + + if !isConvertible(instance) { + nonSpokes = append(nonSpokes, instance) + continue + } + + spokes = append(spokes, instance) + } + + if len(gvks) == 1 { + return false, nil // single version + } + + if len(hubs) == 0 && len(spokes) == 0 { + // multiple version detected with no conversion implementation. This is + // true for multi-version built-in types. + return false, nil + } + + if len(hubs) == 1 && len(nonSpokes) == 0 { // convertible + spokeVersions := []string{} + for _, sp := range spokes { + spokeVersions = append(spokeVersions, sp.GetObjectKind().GroupVersionKind().String()) + } + return true, nil + } + + return false, PartialImplementationError{ + hubs: hubs, + nonSpokes: nonSpokes, + spokes: spokes, + } +} + +// objectGVKs returns all (Group,Version,Kind) for the Group/Kind of given object. +func objectGVKs(scheme *runtime.Scheme, obj runtime.Object) ([]schema.GroupVersionKind, error) { + // NB: we should not use `obj.GetObjectKind().GroupVersionKind()` to get the + // GVK here, since it is parsed from apiVersion and kind fields and it may + // return empty GVK if obj is an uninitialized object. + objGVKs, _, err := scheme.ObjectKinds(obj) + if err != nil { + return nil, err + } + if len(objGVKs) != 1 { + return nil, fmt.Errorf("expect to get only one GVK for %v", obj) + } + objGVK := objGVKs[0] + knownTypes := scheme.AllKnownTypes() + + var gvks []schema.GroupVersionKind + for gvk := range knownTypes { + if objGVK.GroupKind() == gvk.GroupKind() { + gvks = append(gvks, gvk) + } + } + return gvks, nil +} + +// PartialImplementationError represents an error due to partial conversion +// implementation such as hub without spokes, multiple hubs or spokes without hub. +type PartialImplementationError struct { + gvk schema.GroupVersionKind + hubs []runtime.Object + nonSpokes []runtime.Object + spokes []runtime.Object +} + +func (e PartialImplementationError) Error() string { + if len(e.hubs) == 0 { + return fmt.Sprintf("no hub defined for gvk %s", e.gvk) + } + if len(e.hubs) > 1 { + return fmt.Sprintf("multiple(%d) hubs defined for group-kind '%s' ", + len(e.hubs), e.gvk.GroupKind()) + } + if len(e.nonSpokes) > 0 { + return fmt.Sprintf("%d inconvertible types detected for group-kind '%s'", + len(e.nonSpokes), e.gvk.GroupKind()) + } + return "" +} + +// isHub determines if passed-in object is a Hub or not. +func isHub(obj runtime.Object) bool { + _, yes := obj.(conversion.Hub) + return yes +} + +// isConvertible determines if passed-in object is a convertible. +func isConvertible(obj runtime.Object) bool { + _, yes := obj.(conversion.Convertible) + return yes +} + +// helper to construct error response. +func errored(err error) *apix.ConversionResponse { + return &apix.ConversionResponse{ + Result: metav1.Status{ + Status: metav1.StatusFailure, + Message: err.Error(), + }, + } +} diff --git a/vendor/sigs.k8s.io/controller-runtime/pkg/webhook/conversion/decoder.go b/vendor/sigs.k8s.io/controller-runtime/pkg/webhook/conversion/decoder.go new file mode 100644 index 0000000000..8a145cd978 --- /dev/null +++ b/vendor/sigs.k8s.io/controller-runtime/pkg/webhook/conversion/decoder.go @@ -0,0 +1,31 @@ +package conversion + +import ( + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" + "k8s.io/apimachinery/pkg/runtime/serializer" +) + +// Decoder knows how to decode the contents of a CRD version conversion +// request into a concrete object. +// TODO(droot): consider reusing decoder from admission pkg for this. +type Decoder struct { + codecs serializer.CodecFactory +} + +// NewDecoder creates a Decoder given the runtime.Scheme +func NewDecoder(scheme *runtime.Scheme) (*Decoder, error) { + return &Decoder{codecs: serializer.NewCodecFactory(scheme)}, nil +} + +// Decode decodes the inlined object. +func (d *Decoder) Decode(content []byte) (runtime.Object, *schema.GroupVersionKind, error) { + deserializer := d.codecs.UniversalDeserializer() + return deserializer.Decode(content, nil, nil) +} + +// DecodeInto decodes the inlined object in the into the passed-in runtime.Object. +func (d *Decoder) DecodeInto(content []byte, into runtime.Object) error { + deserializer := d.codecs.UniversalDeserializer() + return runtime.DecodeInto(deserializer, content, into) +} diff --git a/cmd/clusterctl/main.go b/vendor/sigs.k8s.io/controller-runtime/pkg/webhook/doc.go similarity index 65% rename from cmd/clusterctl/main.go rename to vendor/sigs.k8s.io/controller-runtime/pkg/webhook/doc.go index 28372903d8..2c93f0d995 100644 --- a/cmd/clusterctl/main.go +++ b/vendor/sigs.k8s.io/controller-runtime/pkg/webhook/doc.go @@ -14,18 +14,15 @@ See the License for the specific language governing permissions and limitations under the License. */ -package main +/* +Package webhook provides methods to build and bootstrap a webhook server. + +Currently, it only supports admission webhooks. It will support CRD conversion webhooks in the near future. +*/ +package webhook import ( - "sigs.k8s.io/cluster-api-provider-openstack/pkg/deployer" - "sigs.k8s.io/cluster-api/cmd/clusterctl/cmd" - clustercommon "sigs.k8s.io/cluster-api/pkg/apis/cluster/common" + logf "sigs.k8s.io/controller-runtime/pkg/internal/log" ) -func init() { - clustercommon.RegisterClusterProvisioner("openstack", deployer.New()) -} - -func main() { - cmd.Execute() -} +var log = logf.RuntimeLog.WithName("webhook") diff --git a/vendor/sigs.k8s.io/controller-runtime/pkg/webhook/internal/certwatcher/certwatcher.go b/vendor/sigs.k8s.io/controller-runtime/pkg/webhook/internal/certwatcher/certwatcher.go new file mode 100644 index 0000000000..fbe757cc74 --- /dev/null +++ b/vendor/sigs.k8s.io/controller-runtime/pkg/webhook/internal/certwatcher/certwatcher.go @@ -0,0 +1,162 @@ +/* +Copyright 2019 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package certwatcher + +import ( + "crypto/tls" + "sync" + + "gopkg.in/fsnotify.v1" + logf "sigs.k8s.io/controller-runtime/pkg/internal/log" +) + +var log = logf.RuntimeLog.WithName("certwatcher") + +// CertWatcher watches certificate and key files for changes. When either file +// changes, it reads and parses both and calls an optional callback with the new +// certificate. +type CertWatcher struct { + sync.Mutex + + currentCert *tls.Certificate + watcher *fsnotify.Watcher + + certPath string + keyPath string +} + +// New returns a new CertWatcher watching the given certificate and key. +func New(certPath, keyPath string) (*CertWatcher, error) { + var err error + + cw := &CertWatcher{ + certPath: certPath, + keyPath: keyPath, + } + + // Initial read of certificate and key. + if err := cw.ReadCertificate(); err != nil { + return nil, err + } + + cw.watcher, err = fsnotify.NewWatcher() + if err != nil { + return nil, err + } + + return cw, nil +} + +// GetCertificate fetches the currently loaded certificate, which may be nil. +func (cw *CertWatcher) GetCertificate(_ *tls.ClientHelloInfo) (*tls.Certificate, error) { + cw.Lock() + defer cw.Unlock() + return cw.currentCert, nil +} + +// Start starts the watch on the certificate and key files. +func (cw *CertWatcher) Start(stopCh <-chan struct{}) error { + files := []string{cw.certPath, cw.keyPath} + + for _, f := range files { + if err := cw.watcher.Add(f); err != nil { + return err + } + } + + go cw.Watch() + + log.Info("Starting certificate watcher") + + // Block until the stop channel is closed. + <-stopCh + + return cw.watcher.Close() +} + +// Watch reads events from the watcher's channel and reacts to changes. +func (cw *CertWatcher) Watch() { + for { + select { + case event, ok := <-cw.watcher.Events: + // Channel is closed. + if !ok { + return + } + + cw.handleEvent(event) + + case err, ok := <-cw.watcher.Errors: + // Channel is closed. + if !ok { + return + } + + log.Error(err, "certificate watch error") + } + } +} + +// ReadCertificate reads the certificate and key files from disk, parses them, +// and updates the current certificate on the watcher. If a callback is set, it +// is invoked with the new certificate. +func (cw *CertWatcher) ReadCertificate() error { + cert, err := tls.LoadX509KeyPair(cw.certPath, cw.keyPath) + if err != nil { + return err + } + + cw.Lock() + cw.currentCert = &cert + cw.Unlock() + + log.Info("Updated current TLS certificate") + + return nil +} + +func (cw *CertWatcher) handleEvent(event fsnotify.Event) { + // Only care about events which may modify the contents of the file. + if !(isWrite(event) || isRemove(event) || isCreate(event)) { + return + } + + log.V(1).Info("certificate event", "event", event) + + // If the file was removed, re-add the watch. + if isRemove(event) { + if err := cw.watcher.Add(event.Name); err != nil { + log.Error(err, "error re-watching file") + } + } + + if err := cw.ReadCertificate(); err != nil { + log.Error(err, "error re-reading certificate") + } +} + +func isWrite(event fsnotify.Event) bool { + return event.Op&fsnotify.Write == fsnotify.Write +} + +func isCreate(event fsnotify.Event) bool { + return event.Op&fsnotify.Create == fsnotify.Create +} + +func isRemove(event fsnotify.Event) bool { + return event.Op&fsnotify.Remove == fsnotify.Remove +} diff --git a/vendor/sigs.k8s.io/controller-runtime/pkg/webhook/server.go b/vendor/sigs.k8s.io/controller-runtime/pkg/webhook/server.go new file mode 100644 index 0000000000..c7ac97abac --- /dev/null +++ b/vendor/sigs.k8s.io/controller-runtime/pkg/webhook/server.go @@ -0,0 +1,196 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package webhook + +import ( + "context" + "crypto/tls" + "fmt" + "net" + "net/http" + "os" + "path/filepath" + "strconv" + "sync" + "time" + + "sigs.k8s.io/controller-runtime/pkg/runtime/inject" + "sigs.k8s.io/controller-runtime/pkg/webhook/internal/certwatcher" + "sigs.k8s.io/controller-runtime/pkg/webhook/internal/metrics" +) + +const ( + certName = "tls.crt" + keyName = "tls.key" +) + +// DefaultPort is the default port that the webhook server serves. +var DefaultPort = 443 + +// Server is an admission webhook server that can serve traffic and +// generates related k8s resources for deploying. +type Server struct { + // Host is the address that the server will listen on. + // Defaults to "" - all addresses. + Host string + + // Port is the port number that the server will serve. + // It will be defaulted to 443 if unspecified. + Port int + + // CertDir is the directory that contains the server key and certificate. + // If using FSCertWriter in Provisioner, the server itself will provision the certificate and + // store it in this directory. + // If using SecretCertWriter in Provisioner, the server will provision the certificate in a secret, + // the user is responsible to mount the secret to the this location for the server to consume. + CertDir string + + // WebhookMux is the multiplexer that handles different webhooks. + WebhookMux *http.ServeMux + + // webhooks keep track of all registered webhooks for dependency injection, + // and to provide better panic messages on duplicate webhook registration. + webhooks map[string]http.Handler + + // setFields allows injecting dependencies from an external source + setFields inject.Func + + // defaultingOnce ensures that the default fields are only ever set once. + defaultingOnce sync.Once +} + +// setDefaults does defaulting for the Server. +func (s *Server) setDefaults() { + s.webhooks = map[string]http.Handler{} + if s.WebhookMux == nil { + s.WebhookMux = http.NewServeMux() + } + + if s.Port <= 0 { + s.Port = DefaultPort + } + + if len(s.CertDir) == 0 { + s.CertDir = filepath.Join(os.TempDir(), "k8s-webhook-server", "serving-certs") + } +} + +// NeedLeaderElection implements the LeaderElectionRunnable interface, which indicates +// the webhook server doesn't need leader election. +func (*Server) NeedLeaderElection() bool { + return false +} + +// Register marks the given webhook as being served at the given path. +// It panics if two hooks are registered on the same path. +func (s *Server) Register(path string, hook http.Handler) { + s.defaultingOnce.Do(s.setDefaults) + _, found := s.webhooks[path] + if found { + panic(fmt.Errorf("can't register duplicate path: %v", path)) + } + // TODO(directxman12): call setfields if we've already started the server + s.webhooks[path] = hook + s.WebhookMux.Handle(path, instrumentedHook(path, hook)) +} + +// instrumentedHook adds some instrumentation on top of the given webhook. +func instrumentedHook(path string, hookRaw http.Handler) http.Handler { + return http.HandlerFunc(func(resp http.ResponseWriter, req *http.Request) { + startTS := time.Now() + defer func() { metrics.RequestLatency.WithLabelValues(path).Observe(time.Now().Sub(startTS).Seconds()) }() + hookRaw.ServeHTTP(resp, req) + + // TODO(directxman12): add back in metric about total requests broken down by result? + }) +} + +// Start runs the server. +// It will install the webhook related resources depend on the server configuration. +func (s *Server) Start(stop <-chan struct{}) error { + s.defaultingOnce.Do(s.setDefaults) + + baseHookLog := log.WithName("webhooks") + // inject fields here as opposed to in Register so that we're certain to have our setFields + // function available. + for hookPath, webhook := range s.webhooks { + if err := s.setFields(webhook); err != nil { + return err + } + + // NB(directxman12): we don't propagate this further by wrapping setFields because it's + // unclear if this is how we want to deal with log propagation. In this specific instance, + // we want to be able to pass a logger to webhooks because they don't know their own path. + if _, err := inject.LoggerInto(baseHookLog.WithValues("webhook", hookPath), webhook); err != nil { + return err + } + } + + certPath := filepath.Join(s.CertDir, certName) + keyPath := filepath.Join(s.CertDir, keyName) + + certWatcher, err := certwatcher.New(certPath, keyPath) + if err != nil { + return err + } + + go func() { + if err := certWatcher.Start(stop); err != nil { + log.Error(err, "certificate watcher error") + } + }() + + cfg := &tls.Config{ + NextProtos: []string{"h2"}, + GetCertificate: certWatcher.GetCertificate, + } + + listener, err := tls.Listen("tcp", net.JoinHostPort(s.Host, strconv.Itoa(int(s.Port))), cfg) + if err != nil { + return err + } + + srv := &http.Server{ + Handler: s.WebhookMux, + } + + idleConnsClosed := make(chan struct{}) + go func() { + <-stop + + // TODO: use a context with reasonable timeout + if err := srv.Shutdown(context.Background()); err != nil { + // Error from closing listeners, or context timeout + log.Error(err, "error shutting down the HTTP server") + } + close(idleConnsClosed) + }() + + err = srv.Serve(listener) + if err != nil && err != http.ErrServerClosed { + return err + } + + <-idleConnsClosed + return nil +} + +// InjectFunc injects the field setter into the server. +func (s *Server) InjectFunc(f inject.Func) error { + s.setFields = f + return nil +} diff --git a/vendor/sigs.k8s.io/controller-runtime/pkg/webhook/types/webhook.go b/vendor/sigs.k8s.io/controller-runtime/pkg/webhook/types/webhook.go deleted file mode 100644 index 2ad1253f21..0000000000 --- a/vendor/sigs.k8s.io/controller-runtime/pkg/webhook/types/webhook.go +++ /dev/null @@ -1,28 +0,0 @@ -/* -Copyright 2018 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package types - -// WebhookType defines the type of a webhook -type WebhookType int - -const ( - _ = iota - // WebhookTypeMutating represents mutating type webhook - WebhookTypeMutating WebhookType = iota - // WebhookTypeValidating represents validating type webhook - WebhookTypeValidating -) diff --git a/vendor/sigs.k8s.io/controller-tools/cmd/controller-gen/main.go b/vendor/sigs.k8s.io/controller-tools/cmd/controller-gen/main.go index b28113ec06..5bdaa42078 100644 --- a/vendor/sigs.k8s.io/controller-tools/cmd/controller-gen/main.go +++ b/vendor/sigs.k8s.io/controller-tools/cmd/controller-gen/main.go @@ -16,195 +16,234 @@ limitations under the License. package main import ( + "encoding/json" "fmt" - "log" + "io" "os" - "path/filepath" + "strings" "github.com/spf13/cobra" - crdgenerator "sigs.k8s.io/controller-tools/pkg/crd/generator" + + "sigs.k8s.io/controller-tools/pkg/crd" + "sigs.k8s.io/controller-tools/pkg/deepcopy" + "sigs.k8s.io/controller-tools/pkg/genall" + "sigs.k8s.io/controller-tools/pkg/genall/help" + prettyhelp "sigs.k8s.io/controller-tools/pkg/genall/help/pretty" + "sigs.k8s.io/controller-tools/pkg/markers" "sigs.k8s.io/controller-tools/pkg/rbac" "sigs.k8s.io/controller-tools/pkg/webhook" ) -func main() { - rootCmd := &cobra.Command{ - Use: "controller-gen", - Short: "A reference implementation generation tool for Kubernetes APIs.", - Long: `A reference implementation generation tool for Kubernetes APIs.`, - Example: ` # Generate RBAC manifests for a project - controller-gen rbac - - # Generate CRD manifests for a project - controller-gen crd - - # Run all the generators for a given project - controller-gen all -`, +//go:generate go run ../helpgen/main.go paths=../../pkg/... generate:headerFile=../../boilerplate.go.txt,year=2019 + +// Options are specified to controller-gen by turning generators and output rules into +// markers, and then parsing them using the standard registry logic (without the "+"). +// Each marker and output rule should thus be usable as a marker target. + +var ( + // allGenerators maintains the list of all known generators, giving + // them names for use on the command line. + // each turns into a command line option, + // and has options for output forms. + allGenerators = map[string]genall.Generator{ + "crd": crd.Generator{}, + "rbac": rbac.Generator{}, + "object": deepcopy.Generator{}, + "webhook": webhook.Generator{}, } - rootCmd.AddCommand( - newRBACCmd(), - newCRDCmd(), - newWebhookCmd(), - newAllSubCmd(), - ) - - if err := rootCmd.Execute(); err != nil { - fmt.Println(err) - os.Exit(1) + // allOutputRules defines the list of all known output rules, giving + // them names for use on the command line. + // Each output rule turns into two command line options: + // - output::

(per-generator output) + // - output: (default output) + allOutputRules = map[string]genall.OutputRule{ + "dir": genall.OutputToDirectory(""), + "none": genall.OutputToNothing, + "stdout": genall.OutputToStdout, + "artifacts": genall.OutputArtifacts{}, } -} -func newRBACCmd() *cobra.Command { - o := &rbac.ManifestOptions{} - o.SetDefaults() + // optionsRegistry contains all the marker definitions used to process command line options + optionsRegistry = &markers.Registry{} +) - cmd := &cobra.Command{ - Use: "rbac", - Short: "Generates RBAC manifests", - Long: `Generate RBAC manifests from the RBAC annotations in Go source files. -Usage: -# controller-gen rbac [--name manager] [--input-dir input_dir] [--output-dir output_dir] -`, - Run: func(_ *cobra.Command, _ []string) { - if err := rbac.Generate(o); err != nil { - log.Fatal(err) +func init() { + for genName, gen := range allGenerators { + // make the generator options marker itself + defn := markers.Must(markers.MakeDefinition(genName, markers.DescribesPackage, gen)) + if err := optionsRegistry.Register(defn); err != nil { + panic(err) + } + if helpGiver, hasHelp := gen.(genall.HasHelp); hasHelp { + if help := helpGiver.Help(); help != nil { + optionsRegistry.AddHelp(defn, help) } - fmt.Printf("RBAC manifests generated under '%s' directory\n", o.OutputDir) - }, - } + } - f := cmd.Flags() - f.StringVar(&o.Name, "name", o.Name, "name to be used as prefix in identifier for manifests") - f.StringVar(&o.ServiceAccount, "service-account", o.ServiceAccount, "service account to bind the role to") - f.StringVar(&o.Namespace, "service-account-namespace", o.Namespace, "namespace of the service account to bind the role to") - f.StringVar(&o.InputDir, "input-dir", o.InputDir, "input directory pointing to Go source files") - f.StringVar(&o.OutputDir, "output-dir", o.OutputDir, "output directory where generated manifests will be saved") - f.StringVar(&o.RoleFile, "role-file", o.RoleFile, "output file for the role manifest") - f.StringVar(&o.BindingFile, "binding-file", o.BindingFile, "output file for the role binding manifest") - - return cmd -} - -func newCRDCmd() *cobra.Command { - g := &crdgenerator.Generator{} - - cmd := &cobra.Command{ - Use: "crd", - Short: "Generates CRD manifests", - Long: `Generate CRD manifests from the Type definitions in Go source files. -Usage: -# controller-gen crd [--domain k8s.io] [--root-path input_dir] [--output-dir output_dir] -`, - Run: func(_ *cobra.Command, _ []string) { - if err := g.ValidateAndInitFields(); err != nil { - log.Fatal(err) + // make per-generation output rule markers + for ruleName, rule := range allOutputRules { + ruleMarker := markers.Must(markers.MakeDefinition(fmt.Sprintf("output:%s:%s", genName, ruleName), markers.DescribesPackage, rule)) + if err := optionsRegistry.Register(ruleMarker); err != nil { + panic(err) } - if err := g.Do(); err != nil { - log.Fatal(err) + if helpGiver, hasHelp := rule.(genall.HasHelp); hasHelp { + if help := helpGiver.Help(); help != nil { + optionsRegistry.AddHelp(ruleMarker, help) + } } - fmt.Printf("CRD files generated, files can be found under path %s.\n", g.OutputDir) - }, + } } - f := cmd.Flags() - f.StringVar(&g.RootPath, "root-path", "", "working dir, must have PROJECT file under the path or parent path if domain not set") - f.StringVar(&g.OutputDir, "output-dir", "", "output directory, default to 'config/crds' under root path") - f.StringVar(&g.Domain, "domain", "", "domain of the resources, will try to fetch it from PROJECT file if not specified") - f.StringVar(&g.Namespace, "namespace", "", "CRD namespace, treat it as cluster scoped if not set") - f.BoolVar(&g.SkipMapValidation, "skip-map-validation", true, "if set to true, skip generating OpenAPI validation schema for map type in CRD.") - f.StringVar(&g.APIsPath, "apis-path", "pkg/apis", "the path to search for apis relative to the current directory") - f.StringVar(&g.APIsPkg, "apis-pkg", "", "the absolute Go pkg name for current project's api pkg.") + // make "default output" output rule markers + for ruleName, rule := range allOutputRules { + ruleMarker := markers.Must(markers.MakeDefinition("output:"+ruleName, markers.DescribesPackage, rule)) + if err := optionsRegistry.Register(ruleMarker); err != nil { + panic(err) + } + if helpGiver, hasHelp := rule.(genall.HasHelp); hasHelp { + if help := helpGiver.Help(); help != nil { + optionsRegistry.AddHelp(ruleMarker, help) + } + } + } - return cmd + // add in the common options markers + if err := genall.RegisterOptionsMarkers(optionsRegistry); err != nil { + panic(err) + } } -func newAllSubCmd() *cobra.Command { - var ( - projectDir, namespace string - ) +// noUsageError suppresses usage printing when it occurs +// (since cobra doesn't provide a good way to avoid printing +// out usage in only certain situations). +type noUsageError struct{ error } + +func main() { + helpLevel := 0 + whichLevel := 0 cmd := &cobra.Command{ - Use: "all", - Short: "runs all generators for a project", - Long: `Run all available generators for a given project -Usage: -# controller-gen all + Use: "controller-gen", + Short: "Generate Kubernetes API extension resources and code.", + Long: "Generate Kubernetes API extension resources and code.", + Example: ` # Generate RBAC manifests and crds for all types under apis/, + # outputting crds to /tmp/crds and everything else to stdout + controller-gen rbac:roleName= crd paths=./apis/... output:crd:dir=/tmp/crds output:stdout + + # Generate deepcopy/runtime.Object implementations for a particular file + controller-gen object paths=./apis/v1beta1/some_types.go + + # Run all the generators for a given project + controller-gen paths=./apis/... + + # Explain the markers for generating CRDs, and their arguments + controller-gen crd -ww `, - Run: func(_ *cobra.Command, _ []string) { - if projectDir == "" { - currDir, err := os.Getwd() - if err != nil { - log.Fatalf("project-dir missing, failed to use current directory: %v", err) - } - projectDir = currDir - } - crdGen := &crdgenerator.Generator{ - RootPath: projectDir, - OutputDir: filepath.Join(projectDir, "config", "crds"), - Namespace: namespace, - SkipMapValidation: true, + RunE: func(c *cobra.Command, rawOpts []string) error { + // print the help if we asked for it (since we've got a different help flag :-/), then bail + if helpLevel > 0 { + return c.Usage() } - if err := crdGen.ValidateAndInitFields(); err != nil { - log.Fatal(err) - } - if err := crdGen.Do(); err != nil { - log.Fatal(err) + + // print the marker docs if we asked for them, then bail + if whichLevel > 0 { + return printMarkerDocs(c, rawOpts, whichLevel) } - fmt.Printf("CRD manifests generated under '%s' \n", crdGen.OutputDir) - // RBAC generation - rbacOptions := &rbac.ManifestOptions{} - rbacOptions.SetDefaults() - if err := rbac.Generate(rbacOptions); err != nil { - log.Fatal(err) + // otherwise, set up the runtime for actually running the generators + rt, err := genall.FromOptions(optionsRegistry, rawOpts) + if err != nil { + return err } - fmt.Printf("RBAC manifests generated under '%s' \n", rbacOptions.OutputDir) - - o := &webhook.Options{ - WriterOptions: webhook.WriterOptions{ - InputDir: filepath.Join(projectDir, "pkg"), - OutputDir: filepath.Join(projectDir, "config", "webhook"), - PatchOutputDir: filepath.Join(projectDir, "config", "default"), - }, + if len(rt.Generators) == 0 { + return fmt.Errorf("no generators specified") } - o.SetDefaults() - if err := webhook.Generate(o); err != nil { - log.Fatal(err) + + if hadErrs := rt.Run(); hadErrs { + // don't obscure the actual error with a bunch of usage + return noUsageError{fmt.Errorf("not all generators ran successfully")} } - fmt.Printf("webhook manifests generated under '%s' directory\n", o.OutputDir) + return nil }, + SilenceUsage: true, // silence the usage, then print it out ourselves if it wasn't suppressed + } + cmd.Flags().CountVarP(&whichLevel, "which-markers", "w", "print out all markers available with the requested generators\n(up to -www for the most detailed output, or -wwww for json output)") + cmd.Flags().CountVarP(&helpLevel, "detailed-help", "h", "print out more detailed help\n(up to -hhh for the most detailed output, or -hhhh for json output)") + cmd.Flags().Bool("help", false, "print out usage and a summary of options") + oldUsage := cmd.UsageFunc() + cmd.SetUsageFunc(func(c *cobra.Command) error { + if err := oldUsage(c); err != nil { + return err + } + if helpLevel == 0 { + helpLevel = summaryHelp + } + fmt.Fprintf(c.OutOrStderr(), "\n\nOptions\n\n") + return helpForLevels(c.OutOrStdout(), c.OutOrStderr(), helpLevel, optionsRegistry, help.SortByOption) + }) + + if err := cmd.Execute(); err != nil { + if _, noUsage := err.(noUsageError); !noUsage { + // print the usage unless we suppressed it + if err := cmd.Usage(); err != nil { + panic(err) + } + } + fmt.Fprintf(cmd.OutOrStderr(), "run `%[1]s %[2]s -w` to see all available markers, or `%[1]s %[2]s -h` for usage\n", cmd.CalledAs(), strings.Join(os.Args[1:], " ")) + os.Exit(1) } - f := cmd.Flags() - f.StringVar(&projectDir, "project-dir", "", "project directory, it must have PROJECT file") - f.StringVar(&namespace, "namespace", "", "CRD namespace, treat it as cluster scoped if not set") - return cmd } -func newWebhookCmd() *cobra.Command { - o := &webhook.Options{} - o.SetDefaults() - - cmd := &cobra.Command{ - Use: "webhook", - Short: "Generates webhook related manifests", - Long: `Generate webhook related manifests from the webhook annotations in Go source files. -Usage: -# controller-gen webhook [--input-dir input_dir] [--output-dir output_dir] [--patch-output-dir patch-output_dir] -`, - Run: func(_ *cobra.Command, _ []string) { - if err := webhook.Generate(o); err != nil { - log.Fatal(err) - } - fmt.Printf("webhook manifests generated under '%s' directory\n", o.OutputDir) - }, +// printMarkerDocs prints out marker help for the given generators specified in +// the rawOptions, at the given level. +func printMarkerDocs(c *cobra.Command, rawOptions []string, whichLevel int) error { + // just grab a registry so we don't lag while trying to load roots + // (like we'd do if we just constructed the full runtime). + reg, err := genall.RegistryFromOptions(optionsRegistry, rawOptions) + if err != nil { + return err } - f := cmd.Flags() - f.StringVar(&o.InputDir, "input-dir", o.InputDir, "input directory pointing to Go source files") - f.StringVar(&o.OutputDir, "output-dir", o.OutputDir, "output directory where generated manifests will be saved.") - f.StringVar(&o.PatchOutputDir, "patch-output-dir", o.PatchOutputDir, "output directory where generated kustomize patch will be saved.") + return helpForLevels(c.OutOrStdout(), c.OutOrStderr(), whichLevel, reg, help.SortByCategory) +} - return cmd +func helpForLevels(mainOut io.Writer, errOut io.Writer, whichLevel int, reg *markers.Registry, sorter help.SortGroup) error { + helpInfo := help.ByCategory(reg, sorter) + switch whichLevel { + case jsonHelp: + if err := json.NewEncoder(mainOut).Encode(helpInfo); err != nil { + return err + } + case detailedHelp, fullHelp: + fullDetail := whichLevel == fullHelp + for _, cat := range helpInfo { + if cat.Category == "" { + continue + } + contents := prettyhelp.MarkersDetails(fullDetail, cat.Category, cat.Markers) + if err := contents.WriteTo(errOut); err != nil { + return err + } + } + case summaryHelp: + for _, cat := range helpInfo { + if cat.Category == "" { + continue + } + contents := prettyhelp.MarkersSummary(cat.Category, cat.Markers) + if err := contents.WriteTo(errOut); err != nil { + return err + } + } + } + return nil } + +const ( + _ = iota + summaryHelp + detailedHelp + fullHelp + jsonHelp +) diff --git a/vendor/sigs.k8s.io/controller-tools/pkg/crd/desc_visitor.go b/vendor/sigs.k8s.io/controller-tools/pkg/crd/desc_visitor.go new file mode 100644 index 0000000000..df424e4d63 --- /dev/null +++ b/vendor/sigs.k8s.io/controller-tools/pkg/crd/desc_visitor.go @@ -0,0 +1,78 @@ +/* +Copyright 2019 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package crd + +import ( + "strings" + "unicode" + + apiext "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1" +) + +// TruncateDescription truncates the description of fields in given schema if it +// exceeds maxLen. +// It tries to chop off the description at the closest sentence boundary. +func TruncateDescription(schema *apiext.JSONSchemaProps, maxLen int) { + EditSchema(schema, descVisitor{maxLen: maxLen}) +} + +// descVisitor recursively visits all fields in the schema and truncates the +// description of the fields to specified maxLen. +type descVisitor struct { + // maxLen is the maximum allowed length for decription of a field + maxLen int +} + +func (v descVisitor) Visit(schema *apiext.JSONSchemaProps) SchemaVisitor { + if schema == nil { + return v + } + if v.maxLen < 0 { + return nil /* no further work to be done for this schema */ + } + if v.maxLen == 0 { + schema.Description = "" + return v + } + if len(schema.Description) > v.maxLen { + schema.Description = truncateString(schema.Description, v.maxLen) + return v + } + return v +} + +// truncateString truncates given desc string if it exceeds maxLen. It may +// return string with length less than maxLen even in cases where original desc +// exceeds maxLen because it tries to chop off the desc at the closest sentence +// boundary to avoid incomplete sentences. +func truncateString(desc string, maxLen int) string { + desc = desc[0:maxLen] + + // Trying to chop off at closest sentence boundary. + if n := strings.LastIndexFunc(desc, isSentenceTerminal); n > 0 { + return desc[0 : n+1] + } + // TODO(droot): Improve the logic to chop off at closest word boundary + // or add elipses (...) to indicate that it's chopped incase no closest + // sentence found within maxLen. + return desc +} + +// helper function to determine if given rune is a sentence terminal or not. +func isSentenceTerminal(r rune) bool { + return unicode.Is(unicode.STerm, r) +} diff --git a/vendor/sigs.k8s.io/controller-tools/pkg/crd/doc.go b/vendor/sigs.k8s.io/controller-tools/pkg/crd/doc.go new file mode 100644 index 0000000000..fd48e44aec --- /dev/null +++ b/vendor/sigs.k8s.io/controller-tools/pkg/crd/doc.go @@ -0,0 +1,63 @@ +/* +Copyright 2019 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Package crd contains utilities for generating CustomResourceDefinitions and +// their corresponding OpenAPI validation schemata. +// +// Markers +// +// Markers live under the markers subpackage. Two types of markers exist: +// those that modify schema generation (for validation), and those that modify +// the rest of the CRD. See the subpackage for more information and all +// supported markers. +// +// Collecting Types and Generating CRDs +// +// The Parser is the entrypoint for collecting the information required to +// generate CRDs. Like loader and collector, its methods are idemptotent, not +// doing extra work if called multiple times. +// +// Parser's method start with Need. Calling NeedXYZ indicates that XYZ should +// be made present in the eqivalent field in the Parser, where it can then be +// loaded from. Each Need method will in turn call Need on anything it needs. +// +// In general, root packages should first be loaded into the Parser with +// NeedPackage. Then, CRDs can be generated with NeedCRDFor. +// +// Errors are generally attached directly to the relevant Package with +// AddError. +// +// Known Packages +// +// There are a few types from Kubernetes that have special meaning, but don't +// have validation markers attached. Those specific types have overrides +// listed in KnownPackages that can be added as overrides to any parser. +// +// Flattening +// +// Once schemata are generated, they can be used directly by external tooling +// (like JSONSchema validators), but must first be "flattened" to not contain +// references before use in a CRD (Kubernetes doesn't allow references in the +// CRD's validation schema). +// +// The Flattener built in to the Parser takes care of flattening out references +// when requesting the CRDs, but can be invoked manually. It will not modify +// the input schemata. +// +// Flattened schemata may further be passed to FlattenEmbedded to remove the +// use of AllOf (which is used to describe embedded struct fields when +// references are in use). This done automatically when fetching CRDs. +package crd diff --git a/vendor/sigs.k8s.io/controller-tools/pkg/crd/flatten.go b/vendor/sigs.k8s.io/controller-tools/pkg/crd/flatten.go new file mode 100644 index 0000000000..a764a41fe5 --- /dev/null +++ b/vendor/sigs.k8s.io/controller-tools/pkg/crd/flatten.go @@ -0,0 +1,441 @@ +/* +Copyright 2019 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package crd + +import ( + "fmt" + "reflect" + "sort" + "strings" + "sync" + + apiext "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1" + + "sigs.k8s.io/controller-tools/pkg/loader" +) + +// ErrorRecorder knows how to record errors. It wraps the part of +// pkg/loader.Package that we need to record errors in places were it might not +// make sense to have a loader.Package +type ErrorRecorder interface { + // AddError records that the given error occurred. + // See the documentation on loader.Package.AddError for more information. + AddError(error) +} + +// isOrNil checks if val is nil if val is of a nillable type, otherwise, +// it compares val to valInt (which should probably be the zero value). +func isOrNil(val reflect.Value, valInt interface{}, zeroInt interface{}) bool { + switch valKind := val.Kind(); valKind { + case reflect.Chan, reflect.Func, reflect.Interface, reflect.Map, reflect.Ptr, reflect.Slice: + return val.IsNil() + default: + return valInt == zeroInt + } +} + +// flattenAllOfInto copies properties from src to dst, then copies the properties +// of each item in src's allOf to dst's properties as well. +func flattenAllOfInto(dst *apiext.JSONSchemaProps, src apiext.JSONSchemaProps, errRec ErrorRecorder) { + if len(src.AllOf) > 0 { + for _, embedded := range src.AllOf { + flattenAllOfInto(dst, embedded, errRec) + } + } + + dstVal := reflect.Indirect(reflect.ValueOf(dst)) + srcVal := reflect.ValueOf(src) + typ := dstVal.Type() + + srcRemainder := apiext.JSONSchemaProps{} + srcRemVal := reflect.Indirect(reflect.ValueOf(&srcRemainder)) + dstRemainder := apiext.JSONSchemaProps{} + dstRemVal := reflect.Indirect(reflect.ValueOf(&dstRemainder)) + hoisted := false + + for i := 0; i < srcVal.NumField(); i++ { + fieldName := typ.Field(i).Name + switch fieldName { + case "AllOf": + // don't merge because we deal with it above + continue + case "Title", "Description", "Example", "ExternalDocs": + // don't merge because we pre-merge to properly preserve field docs + continue + } + srcField := srcVal.Field(i) + fldTyp := srcField.Type() + zeroVal := reflect.Zero(fldTyp) + zeroInt := zeroVal.Interface() + srcInt := srcField.Interface() + + if isOrNil(srcField, srcInt, zeroInt) { + // nothing to copy from src, continue + continue + } + + dstField := dstVal.Field(i) + dstInt := dstField.Interface() + if isOrNil(dstField, dstInt, zeroInt) { + // dst is empty, continue + dstField.Set(srcField) + continue + } + + if fldTyp.Comparable() && srcInt == dstInt { + // same value, continue + continue + } + + // resolve conflict + switch fieldName { + case "Properties": + // merge if possible, use all of otherwise + srcMap := srcInt.(map[string]apiext.JSONSchemaProps) + dstMap := dstInt.(map[string]apiext.JSONSchemaProps) + + for k, v := range srcMap { + dstProp, exists := dstMap[k] + if !exists { + dstMap[k] = v + continue + } + flattenAllOfInto(&dstProp, v, errRec) + dstMap[k] = dstProp + } + case "Required": + // merge + dstField.Set(reflect.AppendSlice(dstField, srcField)) + case "Type": + if srcInt != dstInt { + // TODO(directxman12): figure out how to attach this back to a useful point in the Go source or in the schema + errRec.AddError(fmt.Errorf("conflicting types in allOf branches in schema: %s vs %s", dstInt, srcInt)) + } + // keep the destination value, for now + // TODO(directxman12): Default -- use field? + // TODO(directxman12): + // - Dependencies: if field x is present, then either schema validates or all props are present + // - AdditionalItems: like AdditionalProperties + // - Definitions: common named validation sets that can be references (merge, bail if duplicate) + case "AdditionalProperties": + // as of the time of writing, `allows: false` is not allowed, so we don't have to handle it + srcProps := srcInt.(*apiext.JSONSchemaPropsOrBool) + if srcProps.Schema == nil { + // nothing to merge + continue + } + dstProps := dstInt.(*apiext.JSONSchemaPropsOrBool) + if dstProps.Schema == nil { + dstProps.Schema = &apiext.JSONSchemaProps{} + } + flattenAllOfInto(dstProps.Schema, *srcProps.Schema, errRec) + // NB(directxman12): no need to explicitly handle nullable -- false is considered to be the zero value + // TODO(directxman12): src isn't necessarily the field value -- it's just the most recent allOf entry + default: + // hoist into allOf... + hoisted = true + + srcRemVal.Field(i).Set(srcField) + dstRemVal.Field(i).Set(dstField) + // ...and clear the original + dstField.Set(zeroVal) + } + } + + if hoisted { + dst.AllOf = append(dst.AllOf, dstRemainder, srcRemainder) + } + + // dedup required + if len(dst.Required) > 0 { + reqUniq := make(map[string]struct{}) + for _, req := range dst.Required { + reqUniq[req] = struct{}{} + } + dst.Required = make([]string, 0, len(reqUniq)) + for req := range reqUniq { + dst.Required = append(dst.Required, req) + } + // be deterministic + sort.Strings(dst.Required) + } +} + +// allOfVisitor recursively visits allOf fields in the schema, +// merging nested allOf properties into the root schema. +type allOfVisitor struct { + // errRec is used to record errors while flattening (like two conflicting + // field values used in an allOf) + errRec ErrorRecorder +} + +func (v *allOfVisitor) Visit(schema *apiext.JSONSchemaProps) SchemaVisitor { + if schema == nil { + return v + } + + // clear this now so that we can safely preserve edits made my flattenAllOfInto + origAllOf := schema.AllOf + schema.AllOf = nil + + for _, embedded := range origAllOf { + flattenAllOfInto(schema, embedded, v.errRec) + } + return v +} + +// NB(directxman12): FlattenEmbedded is separate from Flattener because +// some tooling wants to flatten out embedded fields, but only actually +// flatten a few specific types first. + +// FlattenEmbedded flattens embedded fields (represented via AllOf) which have +// already had their references resolved into simple properties in the containing +// schema. +func FlattenEmbedded(schema *apiext.JSONSchemaProps, errRec ErrorRecorder) *apiext.JSONSchemaProps { + outSchema := schema.DeepCopy() + EditSchema(outSchema, &allOfVisitor{errRec: errRec}) + return outSchema +} + +// Flattener knows how to take a root type, and flatten all references in it +// into a single, flat type. Flattened types are cached, so it's relatively +// cheap to make repeated calls with the same type. +type Flattener struct { + // Parser is used to lookup package and type details, and parse in new packages. + Parser *Parser + + LookupReference func(ref string, contextPkg *loader.Package) (TypeIdent, error) + + // flattenedTypes hold the flattened version of each seen type for later reuse. + flattenedTypes map[TypeIdent]apiext.JSONSchemaProps + initOnce sync.Once +} + +func (f *Flattener) init() { + f.initOnce.Do(func() { + f.flattenedTypes = make(map[TypeIdent]apiext.JSONSchemaProps) + if f.LookupReference == nil { + f.LookupReference = identFromRef + } + }) +} + +// cacheType saves the flattened version of the given type for later reuse +func (f *Flattener) cacheType(typ TypeIdent, schema apiext.JSONSchemaProps) { + f.init() + f.flattenedTypes[typ] = schema +} + +// loadUnflattenedSchema fetches a fresh, unflattened schema from the parser. +func (f *Flattener) loadUnflattenedSchema(typ TypeIdent) (*apiext.JSONSchemaProps, error) { + f.Parser.NeedSchemaFor(typ) + + baseSchema, found := f.Parser.Schemata[typ] + if !found { + return nil, fmt.Errorf("unable to locate schema for type %s", typ) + } + return &baseSchema, nil +} + +// FlattenType flattens the given pre-loaded type, removing any references from it. +// It deep-copies the schema first, so it won't affect the parser's version of the schema. +func (f *Flattener) FlattenType(typ TypeIdent) *apiext.JSONSchemaProps { + f.init() + if cachedSchema, isCached := f.flattenedTypes[typ]; isCached { + return &cachedSchema + } + baseSchema, err := f.loadUnflattenedSchema(typ) + if err != nil { + typ.Package.AddError(err) + return nil + } + resSchema := f.FlattenSchema(*baseSchema, typ.Package) + f.cacheType(typ, *resSchema) + return resSchema +} + +// FlattenSchema flattens the given schema, removing any references. +// It deep-copies the schema first, so the input schema won't be affected. +func (f *Flattener) FlattenSchema(baseSchema apiext.JSONSchemaProps, currentPackage *loader.Package) *apiext.JSONSchemaProps { + resSchema := baseSchema.DeepCopy() + EditSchema(resSchema, &flattenVisitor{ + Flattener: f, + currentPackage: currentPackage, + }) + + return resSchema +} + +// RefParts splits a reference produced by the schema generator into its component +// type name and package name (if it's a cross-package reference). Note that +// referenced packages *must* be looked up relative to the current package. +func RefParts(ref string) (typ string, pkgName string, err error) { + if !strings.HasPrefix(ref, defPrefix) { + return "", "", fmt.Errorf("non-standard reference link %q", ref) + } + ref = ref[len(defPrefix):] + // decode the json pointer encodings + ref = strings.Replace(ref, "~1", "/", -1) + ref = strings.Replace(ref, "~0", "~", -1) + nameParts := strings.SplitN(ref, "~", 2) + + if len(nameParts) == 1 { + // local reference + return nameParts[0], "", nil + } + // cross-package reference + return nameParts[1], nameParts[0], nil +} + +// identFromRef converts the given schema ref from the given package back +// into the TypeIdent that it represents. +func identFromRef(ref string, contextPkg *loader.Package) (TypeIdent, error) { + typ, pkgName, err := RefParts(ref) + if err != nil { + return TypeIdent{}, err + } + + if pkgName == "" { + // a local reference + return TypeIdent{ + Name: typ, + Package: contextPkg, + }, nil + } + + // an external reference + return TypeIdent{ + Name: typ, + Package: contextPkg.Imports()[pkgName], + }, nil +} + +// preserveFields copies documentation fields from src into dst, preserving +// field-level documentation when flattening, and preserving field-level validation +// as allOf entries. +func preserveFields(dst *apiext.JSONSchemaProps, src apiext.JSONSchemaProps) { + srcDesc := src.Description + srcTitle := src.Title + srcExDoc := src.ExternalDocs + srcEx := src.Example + + src.Description, src.Title, src.ExternalDocs, src.Example = "", "", nil, nil + + src.Ref = nil + *dst = apiext.JSONSchemaProps{ + AllOf: []apiext.JSONSchemaProps{*dst, src}, + + // keep these, in case the source field doesn't specify anything useful + Description: dst.Description, + Title: dst.Title, + ExternalDocs: dst.ExternalDocs, + Example: dst.Example, + } + + if srcDesc != "" { + dst.Description = srcDesc + } + if srcTitle != "" { + dst.Title = srcTitle + } + if srcExDoc != nil { + dst.ExternalDocs = srcExDoc + } + if srcEx != nil { + dst.Example = srcEx + } +} + +// flattenVisitor visits each node in the schema, recursively flattening references. +type flattenVisitor struct { + *Flattener + + currentPackage *loader.Package + currentType *TypeIdent + currentSchema *apiext.JSONSchemaProps + originalField apiext.JSONSchemaProps +} + +func (f *flattenVisitor) Visit(baseSchema *apiext.JSONSchemaProps) SchemaVisitor { + if baseSchema == nil { + // end-of-node marker, cache the results + if f.currentType != nil { + f.cacheType(*f.currentType, *f.currentSchema) + // preserve field information *after* caching so that we don't + // accidentally cache field-level information onto the schema for + // the type in general. + preserveFields(f.currentSchema, f.originalField) + } + return f + } + + // if we get a type that's just a ref, resolve it + if baseSchema.Ref != nil && len(*baseSchema.Ref) > 0 { + // resolve this ref + refIdent, err := f.LookupReference(*baseSchema.Ref, f.currentPackage) + if err != nil { + f.currentPackage.AddError(err) + return nil + } + + // load and potentially flatten the schema + + // check the cache first... + if refSchemaCached, isCached := f.flattenedTypes[refIdent]; isCached { + // shallow copy is fine, it's just to avoid overwriting the doc fields + preserveFields(&refSchemaCached, *baseSchema) + *baseSchema = refSchemaCached + return nil // don't recurse, we're done + } + + // ...otherwise, we need to flatten + refSchema, err := f.loadUnflattenedSchema(refIdent) + if err != nil { + f.currentPackage.AddError(err) + return nil + } + refSchema = refSchema.DeepCopy() + + // keep field around to preserve field-level validation, docs, etc + origField := *baseSchema + *baseSchema = *refSchema + + // avoid loops (which shouldn't exist, but just in case) + // by marking a nil cached pointer before we start recursing + f.cacheType(refIdent, apiext.JSONSchemaProps{}) + + return &flattenVisitor{ + Flattener: f.Flattener, + + currentPackage: refIdent.Package, + currentType: &refIdent, + currentSchema: baseSchema, + originalField: origField, + } + } + + // otherwise, continue recursing... + if f.currentType != nil { + // ...but don't accidentally end this node early (for caching purposes) + return &flattenVisitor{ + Flattener: f.Flattener, + currentPackage: f.currentPackage, + } + } + + return f +} diff --git a/vendor/sigs.k8s.io/controller-tools/pkg/crd/gen.go b/vendor/sigs.k8s.io/controller-tools/pkg/crd/gen.go new file mode 100644 index 0000000000..de4a00f192 --- /dev/null +++ b/vendor/sigs.k8s.io/controller-tools/pkg/crd/gen.go @@ -0,0 +1,189 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package crd + +import ( + "fmt" + "go/types" + + apiext "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1" + "k8s.io/apimachinery/pkg/runtime/schema" + + crdmarkers "sigs.k8s.io/controller-tools/pkg/crd/markers" + "sigs.k8s.io/controller-tools/pkg/genall" + "sigs.k8s.io/controller-tools/pkg/loader" + "sigs.k8s.io/controller-tools/pkg/markers" +) + +// +controllertools:marker:generateHelp + +// Generator generates CustomResourceDefinition objects. +type Generator struct { + // TrivialVersions indicates that we should produce a single-version CRD. + // + // Single "trivial-version" CRDs are compatible with older (pre 1.13) + // Kubernetes API servers. The storage version's schema will be used as + // the CRD's schema. + TrivialVersions bool `marker:",optional"` + + // MaxDescLen specifies the maximum description length for fields in CRD's OpenAPI schema. + // + // 0 indicates drop the description for all fields completely. + // n indicates limit the description to at most n characters and truncate the description to + // closest sentence boundary if it exceeds n characters. + MaxDescLen *int `marker:",optional"` +} + +func (Generator) RegisterMarkers(into *markers.Registry) error { + return crdmarkers.Register(into) +} +func (g Generator) Generate(ctx *genall.GenerationContext) error { + parser := &Parser{ + Collector: ctx.Collector, + Checker: ctx.Checker, + } + + AddKnownTypes(parser) + for _, root := range ctx.Roots { + parser.NeedPackage(root) + } + + metav1Pkg := findMetav1(ctx.Roots) + if metav1Pkg == nil { + // no objects in the roots, since nothing imported metav1 + return nil + } + + // TODO: allow selecting a specific object + kubeKinds := findKubeKinds(parser, metav1Pkg) + if len(kubeKinds) == 0 { + // no objects in the roots + return nil + } + + for _, groupKind := range kubeKinds { + parser.NeedCRDFor(groupKind, g.MaxDescLen) + crd := parser.CustomResourceDefinitions[groupKind] + if g.TrivialVersions { + toTrivialVersions(&crd) + } + fileName := fmt.Sprintf("%s_%s.yaml", crd.Spec.Group, crd.Spec.Names.Plural) + if err := ctx.WriteYAML(fileName, crd); err != nil { + return err + } + } + + return nil +} + +// toTrivialVersions strips out all schemata except for the storage schema, +// and moves that up into the root object. This makes the CRD compatible +// with pre 1.13 clusters. +func toTrivialVersions(crd *apiext.CustomResourceDefinition) { + var canonicalSchema *apiext.CustomResourceValidation + var canonicalSubresources *apiext.CustomResourceSubresources + var canonicalColumns []apiext.CustomResourceColumnDefinition + for i, ver := range crd.Spec.Versions { + if ver.Storage == true { + canonicalSchema = ver.Schema + canonicalSubresources = ver.Subresources + canonicalColumns = ver.AdditionalPrinterColumns + } + crd.Spec.Versions[i].Schema = nil + crd.Spec.Versions[i].Subresources = nil + crd.Spec.Versions[i].AdditionalPrinterColumns = nil + } + if canonicalSchema == nil { + return + } + + crd.Spec.Validation = canonicalSchema + crd.Spec.Subresources = canonicalSubresources + crd.Spec.AdditionalPrinterColumns = canonicalColumns +} + +// findMetav1 locates the actual package representing metav1 amongst +// the imports of the roots. +func findMetav1(roots []*loader.Package) *loader.Package { + for _, root := range roots { + pkg := root.Imports()["k8s.io/apimachinery/pkg/apis/meta/v1"] + if pkg != nil { + return pkg + } + } + return nil +} + +// findKubeKinds locates all types that contain TypeMeta and ObjectMeta +// (and thus may be a Kubernetes object), and returns the corresponding +// group-kinds. +func findKubeKinds(parser *Parser, metav1Pkg *loader.Package) []schema.GroupKind { + // TODO(directxman12): technically, we should be finding metav1 per-package + var kubeKinds []schema.GroupKind + for typeIdent, info := range parser.Types { + hasObjectMeta := false + hasTypeMeta := false + + pkg := typeIdent.Package + pkg.NeedTypesInfo() + typesInfo := pkg.TypesInfo + + for _, field := range info.Fields { + if field.Name != "" { + // type and object meta are embedded, + // so they can't be this + continue + } + + fieldType := typesInfo.TypeOf(field.RawField.Type) + namedField, isNamed := fieldType.(*types.Named) + if !isNamed { + // ObjectMeta and TypeMeta are named types + continue + } + if namedField.Obj().Pkg() == nil { + // Embedded non-builtin universe type (specifically, it's probably `error`), + // so it can't be ObjectMeta or TypeMeta + continue + } + fieldPkgPath := loader.NonVendorPath(namedField.Obj().Pkg().Path()) + fieldPkg := pkg.Imports()[fieldPkgPath] + if fieldPkg != metav1Pkg { + continue + } + + switch namedField.Obj().Name() { + case "ObjectMeta": + hasObjectMeta = true + case "TypeMeta": + hasTypeMeta = true + } + } + + if !hasObjectMeta || !hasTypeMeta { + continue + } + + groupKind := schema.GroupKind{ + Group: parser.GroupVersions[pkg].Group, + Kind: typeIdent.Name, + } + kubeKinds = append(kubeKinds, groupKind) + } + + return kubeKinds +} diff --git a/vendor/sigs.k8s.io/controller-tools/pkg/crd/generator/generator.go b/vendor/sigs.k8s.io/controller-tools/pkg/crd/generator/generator.go deleted file mode 100644 index dfbf152789..0000000000 --- a/vendor/sigs.k8s.io/controller-tools/pkg/crd/generator/generator.go +++ /dev/null @@ -1,207 +0,0 @@ -/* -Copyright 2018 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package generator - -import ( - "fmt" - "log" - "os" - "path" - "strings" - - "github.com/ghodss/yaml" - "github.com/spf13/afero" - extensionsv1beta1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1" - "k8s.io/gengo/args" - "k8s.io/gengo/types" - crdutil "sigs.k8s.io/controller-tools/pkg/crd/util" - "sigs.k8s.io/controller-tools/pkg/internal/codegen" - "sigs.k8s.io/controller-tools/pkg/internal/codegen/parse" - "sigs.k8s.io/controller-tools/pkg/util" -) - -// Generator generates CRD manifests from API resource definitions defined in Go source files. -type Generator struct { - RootPath string - OutputDir string - Domain string - Namespace string - SkipMapValidation bool - - // OutFs is filesystem to be used for writing out the result - OutFs afero.Fs - - // apisPkg is the absolute Go pkg name for current project's 'pkg/apis' pkg. - // This is needed to determine if a Type belongs to the project or it is a referred Type. - apisPkg string - - // APIsPath and APIsPkg allow customized generation for Go types existing under directories other than pkg/apis - APIsPath string - APIsPkg string -} - -// ValidateAndInitFields validate and init generator fields. -func (c *Generator) ValidateAndInitFields() error { - var err error - - if c.OutFs == nil { - c.OutFs = afero.NewOsFs() - } - - if len(c.RootPath) == 0 { - // Take current path as root path if not specified. - c.RootPath, err = os.Getwd() - if err != nil { - return err - } - } - - // Validate root path is under go src path - if !crdutil.IsUnderGoSrcPath(c.RootPath) { - return fmt.Errorf("command must be run from path under $GOPATH/src/") - } - - // If Domain is not explicitly specified, - // try to search for PROJECT file as a basis. - if len(c.Domain) == 0 { - if !crdutil.PathHasProjectFile(c.RootPath) { - return fmt.Errorf("PROJECT file missing in dir %s", c.RootPath) - } - c.Domain = crdutil.GetDomainFromProject(c.RootPath) - } - - err = c.setAPIsPkg() - if err != nil { - return err - } - - // Init output directory - if c.OutputDir == "" { - c.OutputDir = path.Join(c.RootPath, "config/crds") - } - - return nil -} - -// Do manages CRD generation. -func (c *Generator) Do() error { - arguments := args.Default() - b, err := arguments.NewBuilder() - if err != nil { - return fmt.Errorf("failed making a parser: %v", err) - } - - // Switch working directory to root path. - if err := os.Chdir(c.RootPath); err != nil { - return fmt.Errorf("failed switching working dir: %v", err) - } - - if err := b.AddDirRecursive("./" + c.APIsPath); err != nil { - return fmt.Errorf("failed making a parser: %v", err) - } - ctx, err := parse.NewContext(b) - if err != nil { - return fmt.Errorf("failed making a context: %v", err) - } - - arguments.CustomArgs = &parse.Options{SkipMapValidation: c.SkipMapValidation} - - // TODO: find an elegant way to fulfill the domain in APIs. - p := parse.NewAPIs(ctx, arguments, c.Domain, c.apisPkg) - crds := c.getCrds(p) - - return c.writeCRDs(crds) -} - -func (c *Generator) writeCRDs(crds map[string][]byte) error { - // Ensure output dir exists. - if err := c.OutFs.MkdirAll(c.OutputDir, os.FileMode(0700)); err != nil { - return err - } - - for file, crd := range crds { - outFile := path.Join(c.OutputDir, file) - if err := (&util.FileWriter{Fs: c.OutFs}).WriteFile(outFile, crd); err != nil { - return err - } - } - return nil -} - -func getCRDFileName(resource *codegen.APIResource) string { - elems := []string{resource.Group, resource.Version, strings.ToLower(resource.Kind)} - return strings.Join(elems, "_") + ".yaml" -} - -func (c *Generator) getCrds(p *parse.APIs) map[string][]byte { - crds := map[string]extensionsv1beta1.CustomResourceDefinition{} - for _, g := range p.APIs.Groups { - for _, v := range g.Versions { - for _, r := range v.Resources { - crd := r.CRD - // ignore types which do not belong to this project - if !c.belongsToAPIsPkg(r.Type) { - continue - } - if len(c.Namespace) > 0 { - crd.Namespace = c.Namespace - } - fileName := getCRDFileName(r) - crds[fileName] = crd - } - } - } - - result := map[string][]byte{} - for file, crd := range crds { - b, err := yaml.Marshal(crd) - if err != nil { - log.Fatalf("Error: %v", err) - } - result[file] = b - } - - return result -} - -// belongsToAPIsPkg returns true if type t is defined under pkg/apis pkg of -// current project. -func (c *Generator) belongsToAPIsPkg(t *types.Type) bool { - return strings.HasPrefix(t.Name.Package, c.apisPkg) -} - -func (c *Generator) setAPIsPkg() error { - var err error - if c.APIsPath == "" { - c.APIsPath = "pkg/apis" - } - - c.apisPkg = c.APIsPkg - if c.apisPkg == "" { - // Validate apis directory exists under working path - apisPath := path.Join(c.RootPath, c.APIsPath) - if _, err := os.Stat(apisPath); err != nil { - return fmt.Errorf("error validating apis path %s: %v", apisPath, err) - } - - c.apisPkg, err = crdutil.DirToGoPkg(apisPath) - if err != nil { - return err - } - } - return nil -} diff --git a/vendor/sigs.k8s.io/controller-tools/pkg/crd/known_types.go b/vendor/sigs.k8s.io/controller-tools/pkg/crd/known_types.go new file mode 100644 index 0000000000..4a5bca923b --- /dev/null +++ b/vendor/sigs.k8s.io/controller-tools/pkg/crd/known_types.go @@ -0,0 +1,90 @@ +/* +Copyright 2019 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ +package crd + +import ( + apiext "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1" + + "sigs.k8s.io/controller-tools/pkg/loader" +) + +// KnownPackages overrides types in some comment packages that have custom validation +// but don't have validation markers on them (since they're from core Kubernetes). +var KnownPackages = map[string]PackageOverride{ + + "k8s.io/apimachinery/pkg/apis/meta/v1": func(p *Parser, pkg *loader.Package) { + // ObjectMeta is managed by the Kubernetes API server, so no need to + // generate validation for it. + p.Schemata[TypeIdent{Name: "ObjectMeta", Package: pkg}] = apiext.JSONSchemaProps{ + Type: "object", + } + p.Schemata[TypeIdent{Name: "Time", Package: pkg}] = apiext.JSONSchemaProps{ + Type: "string", + Format: "date-time", + } + p.Schemata[TypeIdent{Name: "MicroTime", Package: pkg}] = apiext.JSONSchemaProps{ + Type: "string", + Format: "date-time", + } + p.Schemata[TypeIdent{Name: "Duration", Package: pkg}] = apiext.JSONSchemaProps{ + // TODO(directxman12): regexp validation for this (or get kube to support it as a format value) + Type: "string", + } + p.Schemata[TypeIdent{Name: "Fields", Package: pkg}] = apiext.JSONSchemaProps{ + // this is a recursive structure that can't be flattened or, for that matter, properly generated. + // so just treat it as an arbitrary map + Type: "object", + AdditionalProperties: &apiext.JSONSchemaPropsOrBool{Allows: true}, + } + p.AddPackage(pkg) // get the rest of the types + }, + + "k8s.io/apimachinery/pkg/api/resource": func(p *Parser, pkg *loader.Package) { + p.Schemata[TypeIdent{Name: "Quantity", Package: pkg}] = apiext.JSONSchemaProps{ + // TODO(directxman12): regexp validation for this (or get kube to support it as a format value) + Type: "string", + } + // No point in calling AddPackage, this is the sole inhabitant + }, + + "k8s.io/apimachinery/pkg/runtime": func(p *Parser, pkg *loader.Package) { + p.Schemata[TypeIdent{Name: "RawExtension", Package: pkg}] = apiext.JSONSchemaProps{ + // TODO(directxman12): regexp validation for this (or get kube to support it as a format value) + Type: "object", + } + p.AddPackage(pkg) // get the rest of the types + }, + + "k8s.io/apimachinery/pkg/util/intstr": func(p *Parser, pkg *loader.Package) { + p.Schemata[TypeIdent{Name: "IntOrString", Package: pkg}] = apiext.JSONSchemaProps{ + AnyOf: []apiext.JSONSchemaProps{ + {Type: "string"}, + {Type: "integer"}, + }, + } + // No point in calling AddPackage, this is the sole inhabitant + }, +} + +// AddKnownTypes registers the packages overrides in KnownPackages with the given parser. +func AddKnownTypes(parser *Parser) { + // ensure everything is there before adding to PackageOverrides + // TODO(directxman12): this is a bit of a hack, maybe just use constructors? + parser.init() + for pkgName, override := range KnownPackages { + parser.PackageOverrides[pkgName] = override + } +} diff --git a/vendor/sigs.k8s.io/controller-tools/pkg/crd/markers/crd.go b/vendor/sigs.k8s.io/controller-tools/pkg/crd/markers/crd.go new file mode 100644 index 0000000000..f397e1100f --- /dev/null +++ b/vendor/sigs.k8s.io/controller-tools/pkg/crd/markers/crd.go @@ -0,0 +1,284 @@ +/* +Copyright 2019 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package markers + +import ( + "fmt" + + apiext "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1" + + "sigs.k8s.io/controller-tools/pkg/markers" +) + +// CRDMarkers lists all markers that directly modify the CRD (not validation +// schemas). +var CRDMarkers = []*definitionWithHelp{ + // TODO(directxman12): more detailed help + must(markers.MakeDefinition("kubebuilder:subresource:status", markers.DescribesType, SubresourceStatus{})). + WithHelp(SubresourceStatus{}.Help()), + + must(markers.MakeDefinition("kubebuilder:subresource:scale", markers.DescribesType, SubresourceScale{})). + WithHelp(SubresourceScale{}.Help()), + + must(markers.MakeDefinition("kubebuilder:printcolumn", markers.DescribesType, PrintColumn{})). + WithHelp(PrintColumn{}.Help()), + + must(markers.MakeDefinition("kubebuilder:resource", markers.DescribesType, Resource{})). + WithHelp(Resource{}.Help()), + + must(markers.MakeDefinition("kubebuilder:storageversion", markers.DescribesType, StorageVersion{})). + WithHelp(StorageVersion{}.Help()), +} + +// TODO: categories and singular used to be annotations types +// TODO: doc + +func init() { + AllDefinitions = append(AllDefinitions, CRDMarkers...) +} + +// +controllertools:marker:generateHelp:category=CRD + +// SubresourceStatus enables the "/status" subresource on a CRD. +type SubresourceStatus struct{} + +func (s SubresourceStatus) ApplyToCRD(crd *apiext.CustomResourceDefinitionSpec, version string) error { + var subresources *apiext.CustomResourceSubresources + if version == "" { + // single-version + if crd.Subresources == nil { + crd.Subresources = &apiext.CustomResourceSubresources{} + } + subresources = crd.Subresources + } else { + // multi-version + for i := range crd.Versions { + ver := &crd.Versions[i] + if ver.Name != version { + continue + } + if ver.Subresources == nil { + ver.Subresources = &apiext.CustomResourceSubresources{} + } + subresources = ver.Subresources + break + } + } + if subresources == nil { + return fmt.Errorf("status subresource applied to version %q not in CRD", version) + } + subresources.Status = &apiext.CustomResourceSubresourceStatus{} + return nil +} + +// +controllertools:marker:generateHelp:category=CRD + +// SubresourceScale enables the "/scale" subresource on a CRD. +type SubresourceScale struct { + // marker names are leftover legacy cruft + + // SpecPath specifies the jsonpath to the replicas field for the scale's spec. + SpecPath string `marker:"specpath"` + + // StatusPath specifies the jsonpath to the replicas field for the scale's status. + StatusPath string `marker:"statuspath"` + + // SelectorPath specifies the jsonpath to the pod label selector field for the scale's status. + // + // The selector field must be the *string* form (serialized form) of a selector. + // Setting a pod label selector is necessary for your type to work with the HorizontalPodAutoscaler. + SelectorPath *string `marker:"selectorpath"` +} + +func (s SubresourceScale) ApplyToCRD(crd *apiext.CustomResourceDefinitionSpec, version string) error { + var subresources *apiext.CustomResourceSubresources + if version == "" { + // single-version + if crd.Subresources == nil { + crd.Subresources = &apiext.CustomResourceSubresources{} + } + subresources = crd.Subresources + } else { + // multi-version + for i := range crd.Versions { + ver := &crd.Versions[i] + if ver.Name != version { + continue + } + if ver.Subresources == nil { + ver.Subresources = &apiext.CustomResourceSubresources{} + } + subresources = ver.Subresources + break + } + } + if subresources == nil { + return fmt.Errorf("scale subresource applied to version %q not in CRD", version) + } + subresources.Scale = &apiext.CustomResourceSubresourceScale{ + SpecReplicasPath: s.SpecPath, + StatusReplicasPath: s.StatusPath, + LabelSelectorPath: s.SelectorPath, + } + return nil +} + +// +controllertools:marker:generateHelp:category=CRD + +// StorageVersion marks this version as the "storage version" for the CRD for conversion. +// +// When conversion is enabled for a CRD (i.e. it's not a trivial-versions/single-version CRD), +// one version is set as the "storage version" to be stored in etcd. Attempting to store any +// other version will result in conversion to the storage version via a conversion webhook. +type StorageVersion struct{} + +func (s StorageVersion) ApplyToCRD(crd *apiext.CustomResourceDefinitionSpec, version string) error { + if version == "" { + // single-version, do nothing + return nil + } + // multi-version + for i := range crd.Versions { + ver := &crd.Versions[i] + if ver.Name != version { + continue + } + ver.Storage = true + break + } + return nil +} + +// +controllertools:marker:generateHelp:category=CRD + +// PrintColumn adds a column to "kubectl get" output for this CRD. +type PrintColumn struct { + // Name specifies the name of the column. + Name string + + // Type indicates the type of the column. + // + // It may be any OpenAPI data type listed at + // https://github.com/OAI/OpenAPI-Specification/blob/master/versions/2.0.md#data-types. + Type string + + // JSONPath specifies the jsonpath expression used to extract the value of the column. + JSONPath string `marker:"JSONPath"` // legacy cruft + + // Description specifies the help/description for this column. + Description string `marker:",optional"` + + // Format specifies the format of the column. + // + // It may be any OpenAPI data format corresponding to the type, listed at + // https://github.com/OAI/OpenAPI-Specification/blob/master/versions/2.0.md#data-types. + Format string `marker:",optional"` + + // Priority indicates how important it is that this column be displayed. + // + // Lower priority (*higher* numbered) columns will be hidden if the terminal + // width is too small. + Priority int32 `marker:",optional"` +} + +func (s PrintColumn) ApplyToCRD(crd *apiext.CustomResourceDefinitionSpec, version string) error { + var columns *[]apiext.CustomResourceColumnDefinition + if version == "" { + columns = &crd.AdditionalPrinterColumns + } else { + for i := range crd.Versions { + ver := &crd.Versions[i] + if ver.Name != version { + continue + } + if ver.Subresources == nil { + ver.Subresources = &apiext.CustomResourceSubresources{} + } + columns = &ver.AdditionalPrinterColumns + break + } + } + if columns == nil { + return fmt.Errorf("printer columns applied to version %q not in CRD", version) + } + + *columns = append(*columns, apiext.CustomResourceColumnDefinition{ + Name: s.Name, + Type: s.Type, + JSONPath: s.JSONPath, + Description: s.Description, + Format: s.Format, + Priority: s.Priority, + }) + + return nil +} + +// +controllertools:marker:generateHelp:category=CRD + +// Resource configures naming and scope for a CRD. +type Resource struct { + // Path specifies the plural "resource" for this CRD. + // + // It generally corresponds to a plural, lower-cased version of the Kind. + // See https://book.kubebuilder.io/cronjob-tutorial/gvks.html. + Path string `marker:",optional"` + + // ShortName specifies aliases for this CRD. + // + // Short names are often used when people have work with your resource + // over and over again. For instance, "rs" for "replicaset" or + // "crd" for customresourcedefinition. + ShortName []string `marker:",optional"` + + // Categories specifies which group aliases this resource is part of. + // + // Group aliases are used to work with groups of resources at once. + // The most common one is "all" which covers about a third of the base + // resources in Kubernetes, and is generally used for "user-facing" resources. + Categories []string `marker:",optional"` + + // Singular overrides the singular form of your resource. + // + // The singular form is otherwise defaulted off the plural (path). + Singular string `marker:",optional"` + + // Scope overrides the scope of the CRD (cluster vs namespaced). + // + // Scope defaults to "namespaced". Cluster-scoped ("cluster") resources + // don't exist in namespaces. + Scope string `marker:",optional"` +} + +func (s Resource) ApplyToCRD(crd *apiext.CustomResourceDefinitionSpec, version string) error { + if s.Path != "" { + crd.Names.Plural = s.Path + } + crd.Names.ShortNames = s.ShortName + crd.Names.Categories = s.Categories + + switch s.Scope { + case "": + crd.Scope = apiext.NamespaceScoped + default: + crd.Scope = apiext.ResourceScope(s.Scope) + } + + return nil +} + +// NB(directxman12): singular was historically distinct, so we keep it here for backwards compat diff --git a/vendor/sigs.k8s.io/controller-tools/pkg/crd/markers/doc.go b/vendor/sigs.k8s.io/controller-tools/pkg/crd/markers/doc.go new file mode 100644 index 0000000000..bfe68967e1 --- /dev/null +++ b/vendor/sigs.k8s.io/controller-tools/pkg/crd/markers/doc.go @@ -0,0 +1,46 @@ +/* +Copyright 2019 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Package markers defines markers for generating schema valiation +// and CRD structure. +// +// All markers related to CRD generation live in AllDefinitions. +// +// Validation Markers +// +// Validation markers have values that imprlement ApplyToSchema +// (crd.SchemaMarker). Any marker implementing this will automatically +// be run after the rest of a given schema node has been generated. +// Markers that need to be run before any other markers can also +// implement ApplyFirst, but this is discouraged and may change +// in the future. +// +// All validation markers start with "+kubebuilder:validation", and +// have the same name as their type name. +// +// CRD Markers +// +// Markers that modify anything in the CRD itself *except* for the schema +// implement ApplyToCRD (crd.CRDMarker). They are expected to detect whether +// they should apply themselves to a specific version in the CRD (as passed to +// them), or to the root-level CRD for legacy cases. They are applied *after* +// the rest of the CRD is computed. +// +// Misc +// +// This package also defines the "+groupName" and "+versionName" package-level +// markers, for defining package<->group-version mappings. +package markers diff --git a/vendor/sigs.k8s.io/controller-tools/pkg/crd/markers/package.go b/vendor/sigs.k8s.io/controller-tools/pkg/crd/markers/package.go new file mode 100644 index 0000000000..ec62c342b1 --- /dev/null +++ b/vendor/sigs.k8s.io/controller-tools/pkg/crd/markers/package.go @@ -0,0 +1,37 @@ +/* +Copyright 2019 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package markers + +import ( + "sigs.k8s.io/controller-tools/pkg/markers" +) + +func init() { + AllDefinitions = append(AllDefinitions, + must(markers.MakeDefinition("groupName", markers.DescribesPackage, "")). + WithHelp(markers.SimpleHelp("CRD", "specifies the API group name for this package.")), + + must(markers.MakeDefinition("versionName", markers.DescribesPackage, "")). + WithHelp(markers.SimpleHelp("CRD", "overrides the API group version for this package (defaults to the package name).")), + + must(markers.MakeDefinition("kubebuilder:validation:Optional", markers.DescribesPackage, struct{}{})). + WithHelp(markers.SimpleHelp("CRD validation", "specifies that all fields in this package are optional by default.")), + + must(markers.MakeDefinition("kubebuilder:validation:Required", markers.DescribesPackage, struct{}{})). + WithHelp(markers.SimpleHelp("CRD validation", "specifies that all fields in this package are required by default.")), + ) +} diff --git a/vendor/sigs.k8s.io/controller-tools/pkg/crd/markers/register.go b/vendor/sigs.k8s.io/controller-tools/pkg/crd/markers/register.go new file mode 100644 index 0000000000..0e7c426942 --- /dev/null +++ b/vendor/sigs.k8s.io/controller-tools/pkg/crd/markers/register.go @@ -0,0 +1,83 @@ +/* +Copyright 2019 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package markers + +import ( + "reflect" + + "sigs.k8s.io/controller-tools/pkg/markers" +) + +type definitionWithHelp struct { + *markers.Definition + Help *markers.DefinitionHelp +} + +func (d *definitionWithHelp) WithHelp(help *markers.DefinitionHelp) *definitionWithHelp { + d.Help = help + return d +} + +func (d *definitionWithHelp) Register(reg *markers.Registry) error { + if err := reg.Register(d.Definition); err != nil { + return err + } + if d.Help != nil { + reg.AddHelp(d.Definition, d.Help) + } + return nil +} + +func must(def *markers.Definition, err error) *definitionWithHelp { + return &definitionWithHelp{ + Definition: markers.Must(def, err), + } +} + +// AllDefinitions contains all marker definitions for this package. +var AllDefinitions []*definitionWithHelp + +type hasHelp interface { + Help() *markers.DefinitionHelp +} + +// mustMakeAllWithPrefix converts each object into a marker definition using +// the object's type's with the prefix to form the marker name. +func mustMakeAllWithPrefix(prefix string, target markers.TargetType, objs ...interface{}) []*definitionWithHelp { + defs := make([]*definitionWithHelp, len(objs)) + for i, obj := range objs { + name := prefix + ":" + reflect.TypeOf(obj).Name() + def, err := markers.MakeDefinition(name, target, obj) + if err != nil { + panic(err) + } + defs[i] = &definitionWithHelp{Definition: def, Help: obj.(hasHelp).Help()} + } + + return defs +} + +// Register registers all definitions for CRD generation to the given registry. +func Register(reg *markers.Registry) error { + for _, def := range AllDefinitions { + if err := def.Register(reg); err != nil { + return err + } + } + + return nil +} diff --git a/vendor/sigs.k8s.io/controller-tools/pkg/crd/markers/validation.go b/vendor/sigs.k8s.io/controller-tools/pkg/crd/markers/validation.go new file mode 100644 index 0000000000..9f7fbca722 --- /dev/null +++ b/vendor/sigs.k8s.io/controller-tools/pkg/crd/markers/validation.go @@ -0,0 +1,286 @@ +/* +Copyright 2019 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package markers + +import ( + "fmt" + + "encoding/json" + + "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1" + + "sigs.k8s.io/controller-tools/pkg/markers" +) + +// ValidationMarkers lists all available markers that affect CRD schema generation, +// except for the few that don't make sense as type-level markers (see FieldOnlyMarkers). +// All markers start with `+kubebuilder:validation:`, and continue with their type name. +// A copy is produced of all markers that describes types as well, for making types +// reusable and writing complex validations on slice items. +var ValidationMarkers = mustMakeAllWithPrefix("kubebuilder:validation", markers.DescribesField, + + // integer markers + + Maximum(0), + Minimum(0), + ExclusiveMaximum(false), + ExclusiveMinimum(false), + MultipleOf(0), + + // string markers + + MaxLength(0), + MinLength(0), + Pattern(""), + + // slice markers + + MaxItems(0), + MinItems(0), + UniqueItems(false), + + // general markers + + Enum(nil), + Format(""), + Type(""), +) + +// FieldOnlyMarkers list field-specific validation markers (i.e. those markers that don't make +// sense on a type, and thus aren't in ValidationMarkers). +var FieldOnlyMarkers = []*definitionWithHelp{ + must(markers.MakeDefinition("kubebuilder:validation:Required", markers.DescribesField, struct{}{})). + WithHelp(markers.SimpleHelp("CRD validation", "specifies that this field is required, if fields are optional by default.")), + must(markers.MakeDefinition("kubebuilder:validation:Optional", markers.DescribesField, struct{}{})). + WithHelp(markers.SimpleHelp("CRD validation", "specifies that this field is optional, if fields are required by default.")), + must(markers.MakeDefinition("optional", markers.DescribesField, struct{}{})). + WithHelp(markers.SimpleHelp("CRD validation", "specifies that this field is optional, if fields are required by default.")), + + must(markers.MakeDefinition("nullable", markers.DescribesField, Nullable{})). + WithHelp(Nullable{}.Help()), +} + +func init() { + AllDefinitions = append(AllDefinitions, ValidationMarkers...) + + for _, def := range ValidationMarkers { + newDef := *def.Definition + // copy both parts so we don't change the definition + typDef := definitionWithHelp{ + Definition: &newDef, + Help: def.Help, + } + typDef.Target = markers.DescribesType + AllDefinitions = append(AllDefinitions, &typDef) + } + + AllDefinitions = append(AllDefinitions, FieldOnlyMarkers...) +} + +// +controllertools:marker:generateHelp:category="CRD validation" +// Maximum specifies the maximum numeric value that this field can have. +type Maximum int + +// +controllertools:marker:generateHelp:category="CRD validation" +// Minimum specifies the minimum numeric value that this field can have. +type Minimum int + +// +controllertools:marker:generateHelp:category="CRD validation" +// ExclusiveMinimum indicates that the minimum is "up to" but not including that value. +type ExclusiveMinimum bool + +// +controllertools:marker:generateHelp:category="CRD validation" +// ExclusiveMaximum indicates that the maximum is "up to" but not including that value. +type ExclusiveMaximum bool + +// +controllertools:marker:generateHelp:category="CRD validation" +// MultipleOf specifies that this field must have a numeric value that's a multiple of this one. +type MultipleOf int + +// +controllertools:marker:generateHelp:category="CRD validation" +// MaxLength specifies the maximum length for this string. +type MaxLength int + +// +controllertools:marker:generateHelp:category="CRD validation" +// MinLength specifies the minimum length for this string. +type MinLength int + +// +controllertools:marker:generateHelp:category="CRD validation" +// Pattern specifies that this string must match the given regular expression. +type Pattern string + +// +controllertools:marker:generateHelp:category="CRD validation" +// MaxItems specifies the maximum length for this list. +type MaxItems int + +// +controllertools:marker:generateHelp:category="CRD validation" +// MinItems specifies the minimun length for this list. +type MinItems int + +// +controllertools:marker:generateHelp:category="CRD validation" +// UniqueItems specifies that all items in this list must be unique. +type UniqueItems bool + +// +controllertools:marker:generateHelp:category="CRD validation" +// Enum specifies that this (scalar) field is restricted to the *exact* values specified here. +type Enum []interface{} + +// +controllertools:marker:generateHelp:category="CRD validation" +// Format specifies additional "complex" formatting for this field. +// +// For example, a date-time field would be marked as "type: string" and +// "format: date-time". +type Format string + +// +controllertools:marker:generateHelp:category="CRD validation" +// Type overrides the type for this field (which defaults to the equivalent of the Go type). +// +// This generally must be paired with custom serialization. For example, the +// metav1.Time field would be marked as "type: string" and "format: date-time". +type Type string + +// +controllertools:marker:generateHelp:category="CRD validation" +// Nullable marks this field as allowing the "null" value. +// +// This is often not necessary, but may be helpful with custom serialization. +type Nullable struct{} + +func (m Maximum) ApplyToSchema(schema *v1beta1.JSONSchemaProps) error { + if schema.Type != "integer" { + return fmt.Errorf("must apply maximum to an integer") + } + val := float64(m) + schema.Maximum = &val + return nil +} +func (m Minimum) ApplyToSchema(schema *v1beta1.JSONSchemaProps) error { + if schema.Type != "integer" { + return fmt.Errorf("must apply minimum to an integer") + } + val := float64(m) + schema.Minimum = &val + return nil +} +func (m ExclusiveMaximum) ApplyToSchema(schema *v1beta1.JSONSchemaProps) error { + if schema.Type != "integer" { + return fmt.Errorf("must apply exclusivemaximum to an integer") + } + schema.ExclusiveMaximum = bool(m) + return nil +} +func (m ExclusiveMinimum) ApplyToSchema(schema *v1beta1.JSONSchemaProps) error { + if schema.Type != "integer" { + return fmt.Errorf("must apply exclusiveminimum to an integer") + } + schema.ExclusiveMinimum = bool(m) + return nil +} +func (m MultipleOf) ApplyToSchema(schema *v1beta1.JSONSchemaProps) error { + if schema.Type != "integer" { + return fmt.Errorf("must apply multipleof to an integer") + } + val := float64(m) + schema.MultipleOf = &val + return nil +} + +func (m MaxLength) ApplyToSchema(schema *v1beta1.JSONSchemaProps) error { + if schema.Type != "string" { + return fmt.Errorf("must apply maxlength to a string") + } + val := int64(m) + schema.MaxLength = &val + return nil +} +func (m MinLength) ApplyToSchema(schema *v1beta1.JSONSchemaProps) error { + if schema.Type != "string" { + return fmt.Errorf("must apply minlength to a string") + } + val := int64(m) + schema.MinLength = &val + return nil +} +func (m Pattern) ApplyToSchema(schema *v1beta1.JSONSchemaProps) error { + if schema.Type != "string" { + return fmt.Errorf("must apply pattern to a string") + } + schema.Pattern = string(m) + return nil +} + +func (m MaxItems) ApplyToSchema(schema *v1beta1.JSONSchemaProps) error { + if schema.Type != "array" { + return fmt.Errorf("must apply maxitem to an array") + } + val := int64(m) + schema.MaxItems = &val + return nil +} +func (m MinItems) ApplyToSchema(schema *v1beta1.JSONSchemaProps) error { + if schema.Type != "array" { + return fmt.Errorf("must apply minitems to an array") + } + val := int64(m) + schema.MinItems = &val + return nil +} +func (m UniqueItems) ApplyToSchema(schema *v1beta1.JSONSchemaProps) error { + if schema.Type != "array" { + return fmt.Errorf("must apply uniqueitems to an array") + } + schema.UniqueItems = bool(m) + return nil +} + +func (m Enum) ApplyToSchema(schema *v1beta1.JSONSchemaProps) error { + // TODO(directxman12): this is a bit hacky -- we should + // probably support AnyType better + using the schema structure + vals := make([]v1beta1.JSON, len(m)) + for i, val := range m { + // TODO(directxman12): check actual type with schema type? + // if we're expecting a string, marshal the string properly... + // NB(directxman12): we use json.Marshal to ensure we handle JSON escaping properly + valMarshalled, err := json.Marshal(val) + if err != nil { + return err + } + vals[i] = v1beta1.JSON{Raw: valMarshalled} + } + schema.Enum = vals + return nil +} +func (m Format) ApplyToSchema(schema *v1beta1.JSONSchemaProps) error { + schema.Format = string(m) + return nil +} + +// NB(directxman12): we "typecheck" on target schema properties here, +// which means the "Type" marker *must* be applied first. +// TODO(directxman12): find a less hacky way to do this +// (we could preserve ordering of markers, but that feels bad in its own right). + +func (m Type) ApplyToSchema(schema *v1beta1.JSONSchemaProps) error { + schema.Type = string(m) + return nil +} + +func (m Type) ApplyFirst() {} + +func (m Nullable) ApplyToSchema(schema *v1beta1.JSONSchemaProps) error { + schema.Nullable = true + return nil +} diff --git a/vendor/sigs.k8s.io/controller-tools/pkg/crd/markers/zz_generated.markerhelp.go b/vendor/sigs.k8s.io/controller-tools/pkg/crd/markers/zz_generated.markerhelp.go new file mode 100644 index 0000000000..9fbac47846 --- /dev/null +++ b/vendor/sigs.k8s.io/controller-tools/pkg/crd/markers/zz_generated.markerhelp.go @@ -0,0 +1,304 @@ +// +build !ignore_autogenerated + +/* +Copyright2019 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by helpgen. DO NOT EDIT. + +package markers + +import ( + "sigs.k8s.io/controller-tools/pkg/markers" +) + +func (Enum) Help() *markers.DefinitionHelp { + return &markers.DefinitionHelp{ + Category: "CRD validation", + DetailedHelp: markers.DetailedHelp{ + Summary: "specifies that this (scalar) field is restricted to the *exact* values specified here.", + Details: "", + }, + FieldHelp: map[string]markers.DetailedHelp{}, + } +} + +func (ExclusiveMaximum) Help() *markers.DefinitionHelp { + return &markers.DefinitionHelp{ + Category: "CRD validation", + DetailedHelp: markers.DetailedHelp{ + Summary: "indicates that the maximum is \"up to\" but not including that value.", + Details: "", + }, + FieldHelp: map[string]markers.DetailedHelp{}, + } +} + +func (ExclusiveMinimum) Help() *markers.DefinitionHelp { + return &markers.DefinitionHelp{ + Category: "CRD validation", + DetailedHelp: markers.DetailedHelp{ + Summary: "indicates that the minimum is \"up to\" but not including that value.", + Details: "", + }, + FieldHelp: map[string]markers.DetailedHelp{}, + } +} + +func (Format) Help() *markers.DefinitionHelp { + return &markers.DefinitionHelp{ + Category: "CRD validation", + DetailedHelp: markers.DetailedHelp{ + Summary: "specifies additional \"complex\" formatting for this field. ", + Details: " For example, a date-time field would be marked as \"type: string\" and \"format: date-time\".", + }, + FieldHelp: map[string]markers.DetailedHelp{}, + } +} + +func (MaxItems) Help() *markers.DefinitionHelp { + return &markers.DefinitionHelp{ + Category: "CRD validation", + DetailedHelp: markers.DetailedHelp{ + Summary: "specifies the maximum length for this list.", + Details: "", + }, + FieldHelp: map[string]markers.DetailedHelp{}, + } +} + +func (MaxLength) Help() *markers.DefinitionHelp { + return &markers.DefinitionHelp{ + Category: "CRD validation", + DetailedHelp: markers.DetailedHelp{ + Summary: "specifies the maximum length for this string.", + Details: "", + }, + FieldHelp: map[string]markers.DetailedHelp{}, + } +} + +func (Maximum) Help() *markers.DefinitionHelp { + return &markers.DefinitionHelp{ + Category: "CRD validation", + DetailedHelp: markers.DetailedHelp{ + Summary: "specifies the maximum numeric value that this field can have.", + Details: "", + }, + FieldHelp: map[string]markers.DetailedHelp{}, + } +} + +func (MinItems) Help() *markers.DefinitionHelp { + return &markers.DefinitionHelp{ + Category: "CRD validation", + DetailedHelp: markers.DetailedHelp{ + Summary: "specifies the minimun length for this list.", + Details: "", + }, + FieldHelp: map[string]markers.DetailedHelp{}, + } +} + +func (MinLength) Help() *markers.DefinitionHelp { + return &markers.DefinitionHelp{ + Category: "CRD validation", + DetailedHelp: markers.DetailedHelp{ + Summary: "specifies the minimum length for this string.", + Details: "", + }, + FieldHelp: map[string]markers.DetailedHelp{}, + } +} + +func (Minimum) Help() *markers.DefinitionHelp { + return &markers.DefinitionHelp{ + Category: "CRD validation", + DetailedHelp: markers.DetailedHelp{ + Summary: "specifies the minimum numeric value that this field can have.", + Details: "", + }, + FieldHelp: map[string]markers.DetailedHelp{}, + } +} + +func (MultipleOf) Help() *markers.DefinitionHelp { + return &markers.DefinitionHelp{ + Category: "CRD validation", + DetailedHelp: markers.DetailedHelp{ + Summary: "specifies that this field must have a numeric value that's a multiple of this one.", + Details: "", + }, + FieldHelp: map[string]markers.DetailedHelp{}, + } +} + +func (Nullable) Help() *markers.DefinitionHelp { + return &markers.DefinitionHelp{ + Category: "CRD validation", + DetailedHelp: markers.DetailedHelp{ + Summary: "marks this field as allowing the \"null\" value. ", + Details: " This is often not necessary, but may be helpful with custom serialization.", + }, + FieldHelp: map[string]markers.DetailedHelp{}, + } +} + +func (Pattern) Help() *markers.DefinitionHelp { + return &markers.DefinitionHelp{ + Category: "CRD validation", + DetailedHelp: markers.DetailedHelp{ + Summary: "specifies that this string must match the given regular expression.", + Details: "", + }, + FieldHelp: map[string]markers.DetailedHelp{}, + } +} + +func (PrintColumn) Help() *markers.DefinitionHelp { + return &markers.DefinitionHelp{ + Category: "CRD", + DetailedHelp: markers.DetailedHelp{ + Summary: "adds a column to \"kubectl get\" output for this CRD.", + Details: "", + }, + FieldHelp: map[string]markers.DetailedHelp{ + "Name": markers.DetailedHelp{ + Summary: "specifies the name of the column.", + Details: "", + }, + "Type": markers.DetailedHelp{ + Summary: "indicates the type of the column. ", + Details: " It may be any OpenAPI data type listed at https://github.com/OAI/OpenAPI-Specification/blob/master/versions/2.0.md#data-types.", + }, + "JSONPath": markers.DetailedHelp{ + Summary: "specifies the jsonpath expression used to extract the value of the column.", + Details: "", + }, + "Description": markers.DetailedHelp{ + Summary: "specifies the help/description for this column.", + Details: "", + }, + "Format": markers.DetailedHelp{ + Summary: "specifies the format of the column. ", + Details: " It may be any OpenAPI data format corresponding to the type, listed at https://github.com/OAI/OpenAPI-Specification/blob/master/versions/2.0.md#data-types.", + }, + "Priority": markers.DetailedHelp{ + Summary: "indicates how important it is that this column be displayed. ", + Details: " Lower priority (*higher* numbered) columns will be hidden if the terminal width is too small.", + }, + }, + } +} + +func (Resource) Help() *markers.DefinitionHelp { + return &markers.DefinitionHelp{ + Category: "CRD", + DetailedHelp: markers.DetailedHelp{ + Summary: "configures naming and scope for a CRD.", + Details: "", + }, + FieldHelp: map[string]markers.DetailedHelp{ + "Path": markers.DetailedHelp{ + Summary: "specifies the plural \"resource\" for this CRD. ", + Details: " It generally corresponds to a plural, lower-cased version of the Kind. See https://book.kubebuilder.io/cronjob-tutorial/gvks.html.", + }, + "ShortName": markers.DetailedHelp{ + Summary: "specifies aliases for this CRD. ", + Details: " Short names are often used when people have work with your resource over and over again. For instance, \"rs\" for \"replicaset\" or \"crd\" for customresourcedefinition.", + }, + "Categories": markers.DetailedHelp{ + Summary: "specifies which group aliases this resource is part of. ", + Details: " Group aliases are used to work with groups of resources at once. The most common one is \"all\" which covers about a third of the base resources in Kubernetes, and is generally used for \"user-facing\" resources.", + }, + "Singular": markers.DetailedHelp{ + Summary: "overrides the singular form of your resource. ", + Details: " The singular form is otherwise defaulted off the plural (path).", + }, + "Scope": markers.DetailedHelp{ + Summary: "overrides the scope of the CRD (cluster vs namespaced). ", + Details: " Scope defaults to \"namespaced\". Cluster-scoped (\"cluster\") resources don't exist in namespaces.", + }, + }, + } +} + +func (StorageVersion) Help() *markers.DefinitionHelp { + return &markers.DefinitionHelp{ + Category: "CRD", + DetailedHelp: markers.DetailedHelp{ + Summary: "marks this version as the \"storage version\" for the CRD for conversion. ", + Details: " When conversion is enabled for a CRD (i.e. it's not a trivial-versions/single-version CRD), one version is set as the \"storage version\" to be stored in etcd. Attempting to store any other version will result in conversion to the storage version via a conversion webhook.", + }, + FieldHelp: map[string]markers.DetailedHelp{}, + } +} + +func (SubresourceScale) Help() *markers.DefinitionHelp { + return &markers.DefinitionHelp{ + Category: "CRD", + DetailedHelp: markers.DetailedHelp{ + Summary: "enables the \"/scale\" subresource on a CRD.", + Details: "", + }, + FieldHelp: map[string]markers.DetailedHelp{ + "SpecPath": markers.DetailedHelp{ + Summary: "specifies the jsonpath to the replicas field for the scale's spec.", + Details: "", + }, + "StatusPath": markers.DetailedHelp{ + Summary: "specifies the jsonpath to the replicas field for the scale's status.", + Details: "", + }, + "SelectorPath": markers.DetailedHelp{ + Summary: "specifies the jsonpath to the pod label selector field for the scale's status. ", + Details: " The selector field must be the *string* form (serialized form) of a selector. Setting a pod label selector is necessary for your type to work with the HorizontalPodAutoscaler.", + }, + }, + } +} + +func (SubresourceStatus) Help() *markers.DefinitionHelp { + return &markers.DefinitionHelp{ + Category: "CRD", + DetailedHelp: markers.DetailedHelp{ + Summary: "enables the \"/status\" subresource on a CRD.", + Details: "", + }, + FieldHelp: map[string]markers.DetailedHelp{}, + } +} + +func (Type) Help() *markers.DefinitionHelp { + return &markers.DefinitionHelp{ + Category: "CRD validation", + DetailedHelp: markers.DetailedHelp{ + Summary: "overrides the type for this field (which defaults to the equivalent of the Go type). ", + Details: " This generally must be paired with custom serialization. For example, the metav1.Time field would be marked as \"type: string\" and \"format: date-time\".", + }, + FieldHelp: map[string]markers.DetailedHelp{}, + } +} + +func (UniqueItems) Help() *markers.DefinitionHelp { + return &markers.DefinitionHelp{ + Category: "CRD validation", + DetailedHelp: markers.DetailedHelp{ + Summary: "specifies that all items in this list must be unique.", + Details: "", + }, + FieldHelp: map[string]markers.DetailedHelp{}, + } +} diff --git a/vendor/sigs.k8s.io/controller-tools/pkg/crd/parser.go b/vendor/sigs.k8s.io/controller-tools/pkg/crd/parser.go new file mode 100644 index 0000000000..c2fe071260 --- /dev/null +++ b/vendor/sigs.k8s.io/controller-tools/pkg/crd/parser.go @@ -0,0 +1,219 @@ +/* +Copyright 2019 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package crd + +import ( + "fmt" + "go/ast" + + apiext "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1" + "k8s.io/apimachinery/pkg/runtime/schema" + + "sigs.k8s.io/controller-tools/pkg/loader" + "sigs.k8s.io/controller-tools/pkg/markers" +) + +// TypeIdent represents some type in a Package. +type TypeIdent struct { + Package *loader.Package + Name string +} + +func (t TypeIdent) String() string { + return fmt.Sprintf("%q.%s", t.Package.ID, t.Name) +} + +// PackageOverride overrides the loading of some package +// (potentially setting custom schemata, etc). It must +// call AddPackage if it wants to continue with the default +// loading behavior. +type PackageOverride func(p *Parser, pkg *loader.Package) + +// Parser knows how to parse out CRD information and generate +// OpenAPI schemata from some collection of types and markers. +// Most methods on Parser cache their results automatically, +// and thus may be called any number of times. +type Parser struct { + Collector *markers.Collector + + // Types contains the known TypeInfo for this parser. + Types map[TypeIdent]*markers.TypeInfo + // Schemata contains the known OpenAPI JSONSchemata for this parser. + Schemata map[TypeIdent]apiext.JSONSchemaProps + // GroupVersions contains the known group-versions of each package in this parser. + GroupVersions map[*loader.Package]schema.GroupVersion + // CustomResourceDefinitions contains the known CustomResourceDefinitions for types in this parser. + CustomResourceDefinitions map[schema.GroupKind]apiext.CustomResourceDefinition + + // PackageOverrides indicates that the loading of any package with + // the given path should be handled by the given overrider. + PackageOverrides map[string]PackageOverride + + // checker stores persistent partial type-checking/reference-traversal information. + Checker *loader.TypeChecker + // packages marks packages as loaded, to avoid re-loading them. + packages map[*loader.Package]struct{} + + flattener *Flattener +} + +func (p *Parser) init() { + if p.packages == nil { + p.packages = make(map[*loader.Package]struct{}) + } + if p.flattener == nil { + p.flattener = &Flattener{ + Parser: p, + } + } + if p.Schemata == nil { + p.Schemata = make(map[TypeIdent]apiext.JSONSchemaProps) + } + if p.Types == nil { + p.Types = make(map[TypeIdent]*markers.TypeInfo) + } + if p.PackageOverrides == nil { + p.PackageOverrides = make(map[string]PackageOverride) + } + if p.GroupVersions == nil { + p.GroupVersions = make(map[*loader.Package]schema.GroupVersion) + } + if p.CustomResourceDefinitions == nil { + p.CustomResourceDefinitions = make(map[schema.GroupKind]apiext.CustomResourceDefinition) + } +} + +// indexTypes loads all types in the package into Types. +func (p *Parser) indexTypes(pkg *loader.Package) { + // autodetect + pkgMarkers, err := markers.PackageMarkers(p.Collector, pkg) + if err != nil { + pkg.AddError(err) + } else if nameVal := pkgMarkers.Get("groupName"); nameVal != nil { + versionVal := pkg.Name // a reasonable guess + if versionMarker := pkgMarkers.Get("versionName"); versionMarker != nil { + versionVal = versionMarker.(string) + } + + p.GroupVersions[pkg] = schema.GroupVersion{ + Version: versionVal, + Group: nameVal.(string), + } + } + + if err := markers.EachType(p.Collector, pkg, func(info *markers.TypeInfo) { + ident := TypeIdent{ + Package: pkg, + Name: info.Name, + } + + p.Types[ident] = info + }); err != nil { + pkg.AddError(err) + } +} + +// LookupType fetches type info from Types. +func (p *Parser) LookupType(pkg *loader.Package, name string) *markers.TypeInfo { + return p.Types[TypeIdent{Package: pkg, Name: name}] +} + +// NeedSchemaFor indicates that a schema should be generated for the given type. +func (p *Parser) NeedSchemaFor(typ TypeIdent) { + p.init() + + p.NeedPackage(typ.Package) + if _, knownSchema := p.Schemata[typ]; knownSchema { + return + } + + info, knownInfo := p.Types[typ] + if !knownInfo { + typ.Package.AddError(fmt.Errorf("unknown type %s", typ)) + return + } + + // avoid tripping recursive schemata, like ManagedFields, by adding an empty WIP schema + p.Schemata[typ] = apiext.JSONSchemaProps{} + + schemaCtx := newSchemaContext(typ.Package, p) + ctxForInfo := schemaCtx.ForInfo(info) + + pkgMarkers, err := markers.PackageMarkers(p.Collector, typ.Package) + if err != nil { + typ.Package.AddError(err) + } + ctxForInfo.PackageMarkers = pkgMarkers + + schema := infoToSchema(ctxForInfo) + + p.Schemata[typ] = *schema + + return +} + +// NeedCRDFor lives off in spec.go + +// AddPackage indicates that types and type-checking information is needed +// for the the given package, *ignoring* overrides. +// Generally, consumers should call NeedPackage, while PackageOverrides should +// call AddPackage to continue with the normal loading procedure. +func (p *Parser) AddPackage(pkg *loader.Package) { + p.init() + if _, checked := p.packages[pkg]; checked { + return + } + p.indexTypes(pkg) + p.Checker.Check(pkg, filterTypesForCRDs) + p.packages[pkg] = struct{}{} +} + +// NeedPackage indicates that types and type-checking information +// is needed for the given package. +func (p *Parser) NeedPackage(pkg *loader.Package) { + p.init() + if _, checked := p.packages[pkg]; checked { + return + } + // overrides are going to be written without vendor. This is why we index by the actual + // object when we can. + if override, overridden := p.PackageOverrides[loader.NonVendorPath(pkg.PkgPath)]; overridden { + override(p, pkg) + p.packages[pkg] = struct{}{} + return + } + p.AddPackage(pkg) +} + +// filterTypesForCRDs filters out all nodes that aren't used in CRD generation, +// like interfaces and struct fields without JSON tag. +func filterTypesForCRDs(node ast.Node) bool { + switch node := node.(type) { + case *ast.InterfaceType: + // skip interfaces, we never care about references in them + return false + case *ast.StructType: + return true + case *ast.Field: + _, hasTag := loader.ParseAstTag(node.Tag).Lookup("json") + // fields without JSON tags mean we have custom serialization, + // so only visit fields with tags. + return hasTag + default: + return true + } +} diff --git a/vendor/sigs.k8s.io/controller-tools/pkg/crd/schema.go b/vendor/sigs.k8s.io/controller-tools/pkg/crd/schema.go new file mode 100644 index 0000000000..ab969a183f --- /dev/null +++ b/vendor/sigs.k8s.io/controller-tools/pkg/crd/schema.go @@ -0,0 +1,419 @@ +/* +Copyright 2019 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package crd + +import ( + "fmt" + "go/ast" + "go/types" + "strings" + + "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1" + + "sigs.k8s.io/controller-tools/pkg/loader" + "sigs.k8s.io/controller-tools/pkg/markers" +) + +// Schema flattening is done in a recursive mapping method. +// Start reading at infoToSchema. + +const ( + // defPrefix is the prefix used to link to definitions in the OpenAPI schema. + defPrefix = "#/definitions/" +) + +var ( + // byteType is the types.Type for byte (see the types documention + // for why we need to look this up in the Universe), saved + // for quick comparison. + byteType = types.Universe.Lookup("byte").Type() +) + +// SchemaMarker is any marker that needs to modify the schema of the underlying type or field. +type SchemaMarker interface { + // ApplyToSchema is called after the rest of the schema for a given type + // or field is generated, to modify the schema appropriately. + ApplyToSchema(*v1beta1.JSONSchemaProps) error +} + +// applyFirstMarker is applied before any other markers. It's a bit of a hack. +type applyFirstMarker interface { + ApplyFirst() +} + +// schemaRequester knows how to marker that another schema (e.g. via an external reference) is necessary. +type schemaRequester interface { + NeedSchemaFor(typ TypeIdent) +} + +// schemaContext stores and provides information across a hierarchy of schema generation. +type schemaContext struct { + pkg *loader.Package + info *markers.TypeInfo + + schemaRequester schemaRequester + PackageMarkers markers.MarkerValues +} + +// newSchemaContext constructs a new schemaContext for the given package and schema requester. +// It must have type info added before use via ForInfo. +func newSchemaContext(pkg *loader.Package, req schemaRequester) *schemaContext { + pkg.NeedTypesInfo() + return &schemaContext{ + pkg: pkg, + schemaRequester: req, + } +} + +// ForInfo produces a new schemaContext with containing the same information +// as this one, except with the given type information. +func (c *schemaContext) ForInfo(info *markers.TypeInfo) *schemaContext { + return &schemaContext{ + pkg: c.pkg, + info: info, + schemaRequester: c.schemaRequester, + } +} + +// requestSchema asks for the schema for a type in the package with the +// given import path. +func (c *schemaContext) requestSchema(pkgPath, typeName string) { + pkg := c.pkg + if pkgPath != "" { + pkg = c.pkg.Imports()[pkgPath] + } + c.schemaRequester.NeedSchemaFor(TypeIdent{ + Package: pkg, + Name: typeName, + }) +} + +// infoToSchema creates a schema for the type in the given set of type information. +func infoToSchema(ctx *schemaContext) *v1beta1.JSONSchemaProps { + return typeToSchema(ctx, ctx.info.RawSpec.Type) +} + +// applyMarkers applies schema markers to the given schema, respecting "apply first" markers. +func applyMarkers(ctx *schemaContext, markerSet markers.MarkerValues, props *v1beta1.JSONSchemaProps, node ast.Node) { + // apply "apply first" markers first... + for _, markerValues := range markerSet { + for _, markerValue := range markerValues { + if _, isApplyFirst := markerValue.(applyFirstMarker); !isApplyFirst { + continue + } + + schemaMarker, isSchemaMarker := markerValue.(SchemaMarker) + if !isSchemaMarker { + continue + } + + if err := schemaMarker.ApplyToSchema(props); err != nil { + ctx.pkg.AddError(loader.ErrFromNode(err /* an okay guess */, node)) + } + } + } + + // ...then the rest of the markers + for _, markerValues := range markerSet { + for _, markerValue := range markerValues { + if _, isApplyFirst := markerValue.(applyFirstMarker); isApplyFirst { + // skip apply-first markers, which were already applied + continue + } + + schemaMarker, isSchemaMarker := markerValue.(SchemaMarker) + if !isSchemaMarker { + continue + } + if err := schemaMarker.ApplyToSchema(props); err != nil { + ctx.pkg.AddError(loader.ErrFromNode(err /* an okay guess */, node)) + } + } + } +} + +// typeToSchema creates a schema for the given AST type. +func typeToSchema(ctx *schemaContext, rawType ast.Expr) *v1beta1.JSONSchemaProps { + var props *v1beta1.JSONSchemaProps + switch expr := rawType.(type) { + case *ast.Ident: + props = localNamedToSchema(ctx, expr) + case *ast.SelectorExpr: + props = namedToSchema(ctx, expr) + case *ast.ArrayType: + props = arrayToSchema(ctx, expr) + case *ast.MapType: + props = mapToSchema(ctx, expr) + case *ast.StarExpr: + props = typeToSchema(ctx, expr.X) + case *ast.StructType: + props = structToSchema(ctx, expr) + default: + ctx.pkg.AddError(loader.ErrFromNode(fmt.Errorf("unsupported AST kind %T", expr), rawType)) + // NB(directxman12): we explicitly don't handle interfaces + return &v1beta1.JSONSchemaProps{} + } + + props.Description = ctx.info.Doc + + applyMarkers(ctx, ctx.info.Markers, props, rawType) + + return props +} + +// qualifiedName constructs a JSONSchema-safe qualified name for a type +// (`` or `~0`, where `` +// is the package path with `/` replaced by `~1`, according to JSONPointer +// escapes). +func qualifiedName(pkgName, typeName string) string { + if pkgName != "" { + return strings.Replace(pkgName, "/", "~1", -1) + "~0" + typeName + } + return typeName +} + +// TypeRefLink creates a definition link for the given type and package. +func TypeRefLink(pkgName, typeName string) string { + return defPrefix + qualifiedName(pkgName, typeName) +} + +// localNamedToSchema creates a schema (ref) for a *potentially* local type reference +// (could be external from a dot-import). +func localNamedToSchema(ctx *schemaContext, ident *ast.Ident) *v1beta1.JSONSchemaProps { + typeInfo := ctx.pkg.TypesInfo.TypeOf(ident) + if typeInfo == types.Typ[types.Invalid] { + ctx.pkg.AddError(loader.ErrFromNode(fmt.Errorf("unknown type %s", ident.Name), ident)) + return &v1beta1.JSONSchemaProps{} + } + if basicInfo, isBasic := typeInfo.(*types.Basic); isBasic { + typ, fmt, err := builtinToType(basicInfo) + if err != nil { + ctx.pkg.AddError(loader.ErrFromNode(err, ident)) + } + return &v1beta1.JSONSchemaProps{ + Type: typ, + Format: fmt, + } + } + // NB(directxman12): if there are dot imports, this might be an external reference, + // so use typechecking info to get the actual object + typeNameInfo := typeInfo.(*types.Named).Obj() + pkg := typeNameInfo.Pkg() + pkgPath := loader.NonVendorPath(pkg.Path()) + if pkg == ctx.pkg.Types { + pkgPath = "" + } + ctx.requestSchema(pkgPath, typeNameInfo.Name()) + link := TypeRefLink(pkgPath, typeNameInfo.Name()) + return &v1beta1.JSONSchemaProps{ + Ref: &link, + } +} + +// namedSchema creates a schema (ref) for an explicitly external type reference. +func namedToSchema(ctx *schemaContext, named *ast.SelectorExpr) *v1beta1.JSONSchemaProps { + typeInfoRaw := ctx.pkg.TypesInfo.TypeOf(named) + if typeInfoRaw == types.Typ[types.Invalid] { + ctx.pkg.AddError(loader.ErrFromNode(fmt.Errorf("unknown type %v.%s", named.X, named.Sel.Name), named)) + return &v1beta1.JSONSchemaProps{} + } + typeInfo := typeInfoRaw.(*types.Named) + typeNameInfo := typeInfo.Obj() + nonVendorPath := loader.NonVendorPath(typeNameInfo.Pkg().Path()) + ctx.requestSchema(nonVendorPath, typeNameInfo.Name()) + link := TypeRefLink(nonVendorPath, typeNameInfo.Name()) + return &v1beta1.JSONSchemaProps{ + Ref: &link, + } + // NB(directxman12): we special-case things like resource.Quantity during the "collapse" phase. +} + +// arrayToSchema creates a schema for the items of the given array, dealing appropriately +// with the special `[]byte` type (according to OpenAPI standards). +func arrayToSchema(ctx *schemaContext, array *ast.ArrayType) *v1beta1.JSONSchemaProps { + eltType := ctx.pkg.TypesInfo.TypeOf(array.Elt) + if eltType == byteType && array.Len == nil { + // byte slices are represented as base64-encoded strings + // (the format is defined in OpenAPI v3, but not JSON Schema) + return &v1beta1.JSONSchemaProps{ + Type: "string", + Format: "byte", + } + } + // TODO(directxman12): backwards-compat would require access to markers from base info + items := typeToSchema(ctx.ForInfo(&markers.TypeInfo{}), array.Elt) + + return &v1beta1.JSONSchemaProps{ + Type: "array", + Items: &v1beta1.JSONSchemaPropsOrArray{Schema: items}, + } +} + +// mapToSchema creates a schema for items of the given map. Key types must eventually resolve +// to string (other types aren't allowed by JSON, and thus the kubernetes API standards). +func mapToSchema(ctx *schemaContext, mapType *ast.MapType) *v1beta1.JSONSchemaProps { + keyInfo := ctx.pkg.TypesInfo.TypeOf(mapType.Key) + // check that we've got a type that actually corresponds to a string + for keyInfo != nil { + switch typedKey := keyInfo.(type) { + case *types.Basic: + if typedKey.Info()&types.IsString == 0 { + ctx.pkg.AddError(loader.ErrFromNode(fmt.Errorf("map keys must be strings, not %s", keyInfo.String()), mapType.Key)) + return &v1beta1.JSONSchemaProps{} + } + keyInfo = nil // stop iterating + case *types.Named: + keyInfo = typedKey.Underlying() + default: + ctx.pkg.AddError(loader.ErrFromNode(fmt.Errorf("map keys must be strings, not %s", keyInfo.String()), mapType.Key)) + return &v1beta1.JSONSchemaProps{} + } + } + + // TODO(directxman12): backwards-compat would require access to markers from base info + var valSchema *v1beta1.JSONSchemaProps + switch val := mapType.Value.(type) { + case *ast.Ident: + valSchema = localNamedToSchema(ctx.ForInfo(&markers.TypeInfo{}), val) + case *ast.SelectorExpr: + valSchema = namedToSchema(ctx.ForInfo(&markers.TypeInfo{}), val) + case *ast.ArrayType: + valSchema = arrayToSchema(ctx.ForInfo(&markers.TypeInfo{}), val) + if valSchema.Type == "array" && valSchema.Items.Schema.Type != "string" { + ctx.pkg.AddError(loader.ErrFromNode(fmt.Errorf("map values must be a named type, not %T", mapType.Value), mapType.Value)) + return &v1beta1.JSONSchemaProps{} + } + default: + ctx.pkg.AddError(loader.ErrFromNode(fmt.Errorf("map values must be a named type, not %T", mapType.Value), mapType.Value)) + return &v1beta1.JSONSchemaProps{} + } + + return &v1beta1.JSONSchemaProps{ + Type: "object", + AdditionalProperties: &v1beta1.JSONSchemaPropsOrBool{ + Schema: valSchema, + Allows: true, /* set automatically by serialization, but useful for testing */ + }, + } +} + +// structToSchema creates a schema for the given struct. Embedded fields are placed in AllOf, +// and can be flattened later with a Flattener. +func structToSchema(ctx *schemaContext, structType *ast.StructType) *v1beta1.JSONSchemaProps { + props := &v1beta1.JSONSchemaProps{ + Type: "object", + Properties: make(map[string]v1beta1.JSONSchemaProps), + } + + if ctx.info.RawSpec.Type != structType { + ctx.pkg.AddError(loader.ErrFromNode(fmt.Errorf("encountered non-top-level struct (possibly embedded), those aren't allowed"), structType)) + return props + } + + for _, field := range ctx.info.Fields { + jsonTag, hasTag := field.Tag.Lookup("json") + if !hasTag { + // if the field doesn't have a JSON tag, it doesn't belong in output (and shouldn't exist in a serialized type) + ctx.pkg.AddError(loader.ErrFromNode(fmt.Errorf("encountered struct field %q without JSON tag in type %q", field.Name, ctx.info.Name), field.RawField)) + continue + } + jsonOpts := strings.Split(jsonTag, ",") + if len(jsonOpts) == 1 && jsonOpts[0] == "-" { + // skipped fields have the tag "-" (note that "-," means the field is named "-") + continue + } + + inline := false + omitEmpty := false + for _, opt := range jsonOpts[1:] { + switch opt { + case "inline": + inline = true + case "omitempty": + omitEmpty = true + } + } + fieldName := jsonOpts[0] + inline = inline || fieldName == "" // anonymous fields are inline fields in YAML/JSON + + // if no default required mode is set, default to required + defaultMode := "required" + if ctx.PackageMarkers.Get("kubebuilder:validation:Optional") != nil { + defaultMode = "optional" + } + + switch defaultMode { + // if this package isn't set to optional default... + case "required": + // ...everything that's not inline, omitempty, or explicitly optional is required + if !inline && !omitEmpty && field.Markers.Get("kubebuilder:validation:Optional") == nil && field.Markers.Get("optional") == nil { + props.Required = append(props.Required, fieldName) + } + + // if this package isn't set to required default... + case "optional": + // ...everything that isn't explicitly required is optional + if field.Markers.Get("kubebuilder:validation:Required") != nil { + props.Required = append(props.Required, fieldName) + } + } + + propSchema := typeToSchema(ctx.ForInfo(&markers.TypeInfo{}), field.RawField.Type) + propSchema.Description = field.Doc + + applyMarkers(ctx, field.Markers, propSchema, field.RawField) + + if inline { + props.AllOf = append(props.AllOf, *propSchema) + continue + } + + props.Properties[fieldName] = *propSchema + } + + return props +} + +// builtinToType converts builtin basic types to their equivalent JSON schema form. +// It *only* handles types allowed by the kubernetes API standards, meaning +// no floats (technically we shouldn't allow int64 or plain int either, but :shrug:). +func builtinToType(basic *types.Basic) (typ string, format string, err error) { + // NB(directxman12): formats from OpenAPI v3 are slightly different than those defined + // in JSONSchema. This'll use the OpenAPI v3 ones, since they're useful for bounding our + // non-string types. + basicInfo := basic.Info() + switch { + case basicInfo&types.IsBoolean != 0: + typ = "boolean" + case basicInfo&types.IsString != 0: + typ = "string" + case basicInfo&types.IsInteger != 0: + typ = "integer" + default: + // NB(directxman12): floats are *NOT* allowed in kubernetes APIs + return "", "", fmt.Errorf("unsupported type %q", basic.String()) + } + + switch basic.Kind() { + case types.Int32, types.Uint32: + format = "int32" + case types.Int64, types.Uint64: + format = "int64" + } + + return typ, format, nil +} diff --git a/vendor/sigs.k8s.io/controller-tools/pkg/crd/schema_visitor.go b/vendor/sigs.k8s.io/controller-tools/pkg/crd/schema_visitor.go new file mode 100644 index 0000000000..ab4b1889c4 --- /dev/null +++ b/vendor/sigs.k8s.io/controller-tools/pkg/crd/schema_visitor.go @@ -0,0 +1,104 @@ +/* +Copyright 2019 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package crd + +import ( + apiext "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1" +) + +// SchemaVisitor walks the nodes of a schema. +type SchemaVisitor interface { + // Visit is called for each schema node. If it returns a visitor, + // the visitor will be called on each direct child node, and then + // this visitor will be called again with `nil` to indicate that + // all children have been visited. If a nil visitor is returned, + // children are not visited. + Visit(schema *apiext.JSONSchemaProps) SchemaVisitor +} + +// EditSchema walks the given schema using the given visitor. Actual +// pointers to each schema node are passed to the visitor, so any changes +// made by the visitor will be reflected to the passed-in schema. +func EditSchema(schema *apiext.JSONSchemaProps, visitor SchemaVisitor) { + walker := schemaWalker{visitor: visitor} + walker.walkSchema(schema) +} + +// schemaWalker knows how to walk the schema, saving modifications +// made by the given visitor. +type schemaWalker struct { + visitor SchemaVisitor +} + +// walkSchema walks the given schema, saving modifications made by the +// visitor (this is as simple as passing a pointer in most cases, +// but special care needs to be taken to persist with maps). +func (w schemaWalker) walkSchema(schema *apiext.JSONSchemaProps) { + subVisitor := w.visitor.Visit(schema) + if subVisitor == nil { + return + } + defer subVisitor.Visit(nil) + + subWalker := schemaWalker{visitor: subVisitor} + if schema.Items != nil { + subWalker.walkPtr(schema.Items.Schema) + subWalker.walkSlice(schema.Items.JSONSchemas) + } + subWalker.walkSlice(schema.AllOf) + subWalker.walkSlice(schema.OneOf) + subWalker.walkSlice(schema.AnyOf) + subWalker.walkPtr(schema.Not) + subWalker.walkMap(schema.Properties) + if schema.AdditionalProperties != nil { + subWalker.walkPtr(schema.AdditionalProperties.Schema) + } + subWalker.walkMap(schema.PatternProperties) + for name, dep := range schema.Dependencies { + subWalker.walkPtr(dep.Schema) + schema.Dependencies[name] = dep + } + if schema.AdditionalItems != nil { + subWalker.walkPtr(schema.AdditionalItems.Schema) + } + subWalker.walkMap(schema.Definitions) +} + +// walkMap walks over values of the given map, saving changes to them. +func (w schemaWalker) walkMap(defs map[string]apiext.JSONSchemaProps) { + for name, def := range defs { + w.walkSchema(&def) + // make sure the edits actually go through since we can't + // take a reference to the value in the map + defs[name] = def + } +} + +// walkSlice walks over items of the given slice. +func (w schemaWalker) walkSlice(defs []apiext.JSONSchemaProps) { + for i := range defs { + w.walkSchema(&defs[i]) + } +} + +// walkPtr walks over the contents of the given pointer, if it's not nil. +func (w schemaWalker) walkPtr(def *apiext.JSONSchemaProps) { + if def == nil { + return + } + w.walkSchema(def) +} diff --git a/vendor/sigs.k8s.io/controller-tools/pkg/crd/spec.go b/vendor/sigs.k8s.io/controller-tools/pkg/crd/spec.go new file mode 100644 index 0000000000..f7a4389608 --- /dev/null +++ b/vendor/sigs.k8s.io/controller-tools/pkg/crd/spec.go @@ -0,0 +1,242 @@ +/* +Copyright 2019 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ +package crd + +import ( + "fmt" + "sort" + "strings" + + "github.com/gobuffalo/flect" + + apiext "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1" + "k8s.io/apimachinery/pkg/api/equality" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime/schema" + + "sigs.k8s.io/controller-tools/pkg/loader" +) + +// SpecMarker is a marker that knows how to apply itself to a particular +// version in a CRD. +type SpecMarker interface { + // ApplyToCRD applies this marker to the given CRD, in the given version + // within that CRD. It's called after everything else in the CRD is populated. + ApplyToCRD(crd *apiext.CustomResourceDefinitionSpec, version string) error +} + +// mergeIdenticalSubresources checks to see if subresources are identical across +// all versions, and if so, merges them into a top-level version. +// +// This assumes you're not using trivial versions. +func mergeIdenticalSubresources(crd *apiext.CustomResourceDefinition) { + subres := crd.Spec.Versions[0].Subresources + for _, ver := range crd.Spec.Versions { + if ver.Subresources == nil || !equality.Semantic.DeepEqual(subres, ver.Subresources) { + // either all nil, or not identical + return + } + } + + // things are identical if we've gotten this far, so move the subresources up + // and discard the identical per-version ones + crd.Spec.Subresources = subres + for i := range crd.Spec.Versions { + crd.Spec.Versions[i].Subresources = nil + } +} + +// mergeIdenticalSchemata checks to see if schemata are identical across +// all versions, and if so, merges them into a top-level version. +// +// This assumes you're not using trivial versions. +func mergeIdenticalSchemata(crd *apiext.CustomResourceDefinition) { + schema := crd.Spec.Versions[0].Schema + for _, ver := range crd.Spec.Versions { + if ver.Schema == nil || !equality.Semantic.DeepEqual(schema, ver.Schema) { + // either all nil, or not identical + return + } + } + + // things are identical if we've gotten this far, so move the schemata up + // to a single schema and discard the identical per-version ones + crd.Spec.Validation = schema + for i := range crd.Spec.Versions { + crd.Spec.Versions[i].Schema = nil + } +} + +// mergeIdenticalPrinterColumns checks to see if schemata are identical across +// all versions, and if so, merges them into a top-level version. +// +// This assumes you're not using trivial versions. +func mergeIdenticalPrinterColumns(crd *apiext.CustomResourceDefinition) { + cols := crd.Spec.Versions[0].AdditionalPrinterColumns + for _, ver := range crd.Spec.Versions { + if len(ver.AdditionalPrinterColumns) == 0 || !equality.Semantic.DeepEqual(cols, ver.AdditionalPrinterColumns) { + // either all nil, or not identical + return + } + } + + // things are identical if we've gotten this far, so move the printer columns up + // and discard the identical per-version ones + crd.Spec.AdditionalPrinterColumns = cols + for i := range crd.Spec.Versions { + crd.Spec.Versions[i].AdditionalPrinterColumns = nil + } +} + +// MergeIdenticalVersionInfo makes sure that components of the Versions field that are identical +// across all versions get merged into the top-level fields in v1beta1. +// +// This is required by the Kubernetes API server validation. +// +// The reason is that a v1beta1 -> v1 -> v1beta1 conversion cycle would need to +// round-trip identically, v1 doesn't have top-level subresources, and without +// this restriction it would be ambiguous how a v1-with-identical-subresources +// converts into a v1beta1). +func MergeIdenticalVersionInfo(crd *apiext.CustomResourceDefinition) { + if len(crd.Spec.Versions) > 1 { + mergeIdenticalSubresources(crd) + mergeIdenticalSchemata(crd) + mergeIdenticalPrinterColumns(crd) + } +} + +// NeedCRDFor requests the full CRD for the given group-kind. It requires +// that the packages containing the Go structs for that CRD have already +// been loaded with NeedPackage. +func (p *Parser) NeedCRDFor(groupKind schema.GroupKind, maxDescLen *int) { + p.init() + + if _, exists := p.CustomResourceDefinitions[groupKind]; exists { + return + } + + var packages []*loader.Package + for pkg, gv := range p.GroupVersions { + if gv.Group != groupKind.Group { + continue + } + packages = append(packages, pkg) + } + + defaultPlural := flect.Pluralize(strings.ToLower(groupKind.Kind)) + crd := apiext.CustomResourceDefinition{ + TypeMeta: metav1.TypeMeta{ + APIVersion: apiext.SchemeGroupVersion.String(), + Kind: "CustomResourceDefinition", + }, + ObjectMeta: metav1.ObjectMeta{ + Name: defaultPlural + "." + groupKind.Group, + }, + Spec: apiext.CustomResourceDefinitionSpec{ + Group: groupKind.Group, + Names: apiext.CustomResourceDefinitionNames{ + Kind: groupKind.Kind, + Plural: defaultPlural, + }, + }, + } + + for _, pkg := range packages { + typeIdent := TypeIdent{Package: pkg, Name: groupKind.Kind} + typeInfo := p.Types[typeIdent] + if typeInfo == nil { + continue + } + fullSchema := FlattenEmbedded(p.flattener.FlattenType(typeIdent), pkg) + if maxDescLen != nil { + TruncateDescription(fullSchema, *maxDescLen) + } + ver := apiext.CustomResourceDefinitionVersion{ + Name: p.GroupVersions[pkg].Version, + Served: true, + Schema: &apiext.CustomResourceValidation{ + OpenAPIV3Schema: fullSchema, + }, + } + crd.Spec.Versions = append(crd.Spec.Versions, ver) + } + + // markers are applied *after* initial generation of objects + for _, pkg := range packages { + typeIdent := TypeIdent{Package: pkg, Name: groupKind.Kind} + typeInfo := p.Types[typeIdent] + if typeInfo == nil { + continue + } + ver := p.GroupVersions[pkg].Version + + for _, markerVals := range typeInfo.Markers { + for _, val := range markerVals { + crdMarker, isCrdMarker := val.(SpecMarker) + if !isCrdMarker { + continue + } + if err := crdMarker.ApplyToCRD(&crd.Spec, ver); err != nil { + pkg.AddError(loader.ErrFromNode(err /* an okay guess */, typeInfo.RawSpec)) + } + } + } + } + + // fix the name if the plural was changed (this is the form the name *has* to take, so no harm in chaning it). + crd.Name = crd.Spec.Names.Plural + "." + groupKind.Group + + // nothing to actually write + if len(crd.Spec.Versions) == 0 { + return + } + + // it is necessary to make sure the order of CRD versions in crd.Spec.Versions is stable and explicitly set crd.Spec.Version. + // Otherwise, crd.Spec.Version may point to different CRD versions across different runs. + sort.Slice(crd.Spec.Versions, func(i, j int) bool { return crd.Spec.Versions[i].Name < crd.Spec.Versions[j].Name }) + crd.Spec.Version = crd.Spec.Versions[0].Name + + // make sure we have *a* storage version + // (default it if we only have one, otherwise, bail) + if len(crd.Spec.Versions) == 1 { + crd.Spec.Versions[0].Storage = true + } + + hasStorage := false + for _, ver := range crd.Spec.Versions { + if ver.Storage { + hasStorage = true + break + } + } + if !hasStorage { + // just add the error to the first relevant package for this CRD, + // since there's no specific error location + packages[0].AddError(fmt.Errorf("CRD for %s has no storage version", groupKind)) + } + + // NB(directxman12): CRD's status doesn't have omitempty markers, which means things + // get serialized as null, which causes the validator to freak out. Manually set + // these to empty till we get a better solution. + crd.Status.Conditions = []apiext.CustomResourceDefinitionCondition{} + crd.Status.StoredVersions = []string{} + + // make sure we merge identical per-version parts, to avoid validation errors + // (see the reasoning near the top of the file). + MergeIdenticalVersionInfo(&crd) + + p.CustomResourceDefinitions[groupKind] = crd +} diff --git a/vendor/sigs.k8s.io/controller-tools/pkg/crd/util/util.go b/vendor/sigs.k8s.io/controller-tools/pkg/crd/util/util.go deleted file mode 100644 index 821aab5d25..0000000000 --- a/vendor/sigs.k8s.io/controller-tools/pkg/crd/util/util.go +++ /dev/null @@ -1,117 +0,0 @@ -/* -Copyright 2018 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package util - -import ( - "bufio" - "fmt" - gobuild "go/build" - "log" - "os" - "path" - "path/filepath" - "strings" -) - -// IsGoSrcPath validate if given path is of path $GOPATH/src. -func IsGoSrcPath(filePath string) bool { - for _, gopath := range getGoPaths() { - goSrc := path.Join(gopath, "src") - if filePath == goSrc { - return true - } - } - - return false -} - -// IsUnderGoSrcPath validate if given path is under path $GOPATH/src. -func IsUnderGoSrcPath(filePath string) bool { - for _, gopath := range getGoPaths() { - goSrc := path.Join(gopath, "src") - if strings.HasPrefix(filepath.Dir(filePath), goSrc) { - return true - } - } - - return false -} - -// DirToGoPkg returns the Gopkg for the given directory if it exists -// under a GOPATH otherwise returns error. For example, -// /Users/x/go/src/github.com/y/z ==> github.com/y/z -func DirToGoPkg(dir string) (pkg string, err error) { - goPaths := getGoPaths() - for _, gopath := range goPaths { - goSrc := path.Join(gopath, "src") - if !strings.HasPrefix(dir, goSrc) { - continue - } - pkg, err := filepath.Rel(goSrc, dir) - if err == nil { - return pkg, err - } - } - - return "", fmt.Errorf("dir '%s' does not exist under any GOPATH %v", dir, goPaths) -} - -func getGoPaths() []string { - gopaths := os.Getenv("GOPATH") - if len(gopaths) == 0 { - gopaths = gobuild.Default.GOPATH - } - return filepath.SplitList(gopaths) -} - -// PathHasProjectFile validate if PROJECT file exists under the path. -func PathHasProjectFile(filePath string) bool { - if _, err := os.Stat(path.Join(filePath, "PROJECT")); os.IsNotExist(err) { - return false - } - - return true -} - -// GetDomainFromProject get domain information from the PROJECT file under the path. -func GetDomainFromProject(rootPath string) string { - var domain string - - file, err := os.Open(path.Join(rootPath, "PROJECT")) - if err != nil { - log.Fatal(err) - } - defer func() { - if err := file.Close(); err != nil { - log.Fatal(err) - } - }() - - scanner := bufio.NewScanner(file) - for scanner.Scan() { - if strings.HasPrefix(scanner.Text(), "domain:") { - domainInfo := strings.Split(scanner.Text(), ":") - if len(domainInfo) != 2 { - log.Fatalf("Unexpected domain info: %s", scanner.Text()) - } - domain = strings.Replace(domainInfo[1], " ", "", -1) - break - } - } - - return domain -} diff --git a/vendor/sigs.k8s.io/controller-tools/pkg/crd/zz_generated.markerhelp.go b/vendor/sigs.k8s.io/controller-tools/pkg/crd/zz_generated.markerhelp.go new file mode 100644 index 0000000000..fd42462471 --- /dev/null +++ b/vendor/sigs.k8s.io/controller-tools/pkg/crd/zz_generated.markerhelp.go @@ -0,0 +1,45 @@ +// +build !ignore_autogenerated + +/* +Copyright2019 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by helpgen. DO NOT EDIT. + +package crd + +import ( + "sigs.k8s.io/controller-tools/pkg/markers" +) + +func (Generator) Help() *markers.DefinitionHelp { + return &markers.DefinitionHelp{ + Category: "", + DetailedHelp: markers.DetailedHelp{ + Summary: "generates CustomResourceDefinition objects.", + Details: "", + }, + FieldHelp: map[string]markers.DetailedHelp{ + "TrivialVersions": markers.DetailedHelp{ + Summary: "indicates that we should produce a single-version CRD. ", + Details: " Single \"trivial-version\" CRDs are compatible with older (pre 1.13) Kubernetes API servers. The storage version's schema will be used as the CRD's schema.", + }, + "MaxDescLen": markers.DetailedHelp{ + Summary: "specifies the maximum description length for fields in CRD's OpenAPI schema. ", + Details: " 0 indicates drop the description for all fields completely. n indicates limit the description to at most n characters and truncate the description to closest sentence boundary if it exceeds n characters.", + }, + }, + } +} diff --git a/vendor/k8s.io/client-go/util/jsonpath/doc.go b/vendor/sigs.k8s.io/controller-tools/pkg/deepcopy/doc.go similarity index 58% rename from vendor/k8s.io/client-go/util/jsonpath/doc.go rename to vendor/sigs.k8s.io/controller-tools/pkg/deepcopy/doc.go index 0effb15c41..f4200f2fc1 100644 --- a/vendor/k8s.io/client-go/util/jsonpath/doc.go +++ b/vendor/sigs.k8s.io/controller-tools/pkg/deepcopy/doc.go @@ -1,5 +1,5 @@ /* -Copyright 2015 The Kubernetes Authors. +Copyright 2019 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -14,7 +14,10 @@ See the License for the specific language governing permissions and limitations under the License. */ -// package jsonpath is a template engine using jsonpath syntax, -// which can be seen at http://goessner.net/articles/JsonPath/. -// In addition, it has {range} {end} function to iterate list and slice. -package jsonpath // import "k8s.io/client-go/util/jsonpath" +// Package deepcopy generates DeepCopy, DeepCopyInto, and DeepCopyObject +// implementations for types. +// +// It's ported from k8s.io/code-generator's / k8s.io/gengo's deepcopy-gen, +// but it's scoped specifically to runtime.Object and skips support for +// deepcopying interfaces, which aren't handled in CRDs anyway. +package deepcopy diff --git a/vendor/sigs.k8s.io/controller-tools/pkg/deepcopy/gen.go b/vendor/sigs.k8s.io/controller-tools/pkg/deepcopy/gen.go new file mode 100644 index 0000000000..a10c705892 --- /dev/null +++ b/vendor/sigs.k8s.io/controller-tools/pkg/deepcopy/gen.go @@ -0,0 +1,300 @@ +/* +Copyright 2019 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package deepcopy + +import ( + "bytes" + "fmt" + "go/ast" + "go/format" + "io" + "sort" + "strings" + + "sigs.k8s.io/controller-tools/pkg/genall" + "sigs.k8s.io/controller-tools/pkg/loader" + "sigs.k8s.io/controller-tools/pkg/markers" +) + +// NB(directxman12): markers.LoadRoots ignores autogenerated code via a build tag +// so any time we check for existing deepcopy functions, we only seen manually written ones. + +const ( + runtimeObjPath = "k8s.io/apimachinery/pkg/runtime.Object" +) + +var ( + enablePkgMarker = markers.Must(markers.MakeDefinition("kubebuilder:object:generate", markers.DescribesPackage, false)) + enableTypeMarker = markers.Must(markers.MakeDefinition("kubebuilder:object:generate", markers.DescribesType, false)) + isObjectMarker = markers.Must(markers.MakeDefinition("kubebuilder:object:root", markers.DescribesType, false)) + + legacyEnablePkgMarker = markers.Must(markers.MakeDefinition("k8s:deepcopy-gen", markers.DescribesPackage, markers.RawArguments(nil))) + legacyEnableTypeMarker = markers.Must(markers.MakeDefinition("k8s:deepcopy-gen", markers.DescribesType, markers.RawArguments(nil))) + legacyIsObjectMarker = markers.Must(markers.MakeDefinition("k8s:deepcopy-gen:interfaces", markers.DescribesType, "")) +) + +// +controllertools:marker:generateHelp + +// Generator generates code containing DeepCopy, DeepCopyInto, and +// DeepCopyObject method implementations. +type Generator struct { + // HeaderFile specifies the header text (e.g. license) to prepend to generated files. + HeaderFile string `marker:",optional"` + // Year specifies the year to substitute for " YEAR" in the header file. + Year string `marker:",optional"` +} + +func (Generator) RegisterMarkers(into *markers.Registry) error { + if err := markers.RegisterAll(into, + enablePkgMarker, legacyEnablePkgMarker, enableTypeMarker, + legacyEnableTypeMarker, isObjectMarker, legacyIsObjectMarker); err != nil { + return err + } + into.AddHelp(enablePkgMarker, + markers.SimpleHelp("object", "enables or disables object interface & deepcopy implementation generation for this package")) + into.AddHelp( + enableTypeMarker, markers.SimpleHelp("object", "overrides enabling or disabling deepcopy generation for this type")) + into.AddHelp(isObjectMarker, + markers.SimpleHelp("object", "enables object interface implementation generation for this type")) + + into.AddHelp(legacyEnablePkgMarker, + markers.DeprecatedHelp(enablePkgMarker.Name, "object", "enables or disables object interface & deepcopy implementation generation for this package")) + into.AddHelp(legacyEnableTypeMarker, + markers.DeprecatedHelp(enableTypeMarker.Name, "object", "overrides enabling or disabling deepcopy generation for this type")) + into.AddHelp(legacyIsObjectMarker, + markers.DeprecatedHelp(isObjectMarker.Name, "object", "enables object interface implementation generation for this type")) + return nil +} + +func enabledOnPackage(col *markers.Collector, pkg *loader.Package) (bool, error) { + pkgMarkers, err := markers.PackageMarkers(col, pkg) + if err != nil { + return false, err + } + pkgMarker := pkgMarkers.Get(enablePkgMarker.Name) + if pkgMarker != nil { + return pkgMarker.(bool), nil + } + legacyMarker := pkgMarkers.Get(legacyEnablePkgMarker.Name) + if legacyMarker != nil { + legacyMarkerVal := string(legacyMarker.(markers.RawArguments)) + firstArg := strings.Split(legacyMarkerVal, ",")[0] + return firstArg == "package", nil + } + + return false, nil +} + +func enabledOnType(allTypes bool, info *markers.TypeInfo) bool { + if typeMarker := info.Markers.Get(enableTypeMarker.Name); typeMarker != nil { + return typeMarker.(bool) + } + legacyMarker := info.Markers.Get(legacyEnableTypeMarker.Name) + if legacyMarker != nil { + legacyMarkerVal := string(legacyMarker.(markers.RawArguments)) + return legacyMarkerVal == "true" + } + return allTypes || genObjectInterface(info) +} + +func genObjectInterface(info *markers.TypeInfo) bool { + objectEnabled := info.Markers.Get(isObjectMarker.Name) + if objectEnabled != nil { + return objectEnabled.(bool) + } + + for _, legacyEnabled := range info.Markers[legacyIsObjectMarker.Name] { + if legacyEnabled == runtimeObjPath { + return true + } + } + return false +} + +func (d Generator) Generate(ctx *genall.GenerationContext) error { + var headerText string + + if d.HeaderFile != "" { + headerBytes, err := ctx.ReadFile(d.HeaderFile) + if err != nil { + return err + } + headerText = string(headerBytes) + } + headerText = strings.ReplaceAll(headerText, " YEAR", " "+d.Year) + + objGenCtx := ObjectGenCtx{ + Collector: ctx.Collector, + Checker: ctx.Checker, + HeaderText: headerText, + } + + for _, root := range ctx.Roots { + outContents := objGenCtx.GenerateForPackage(root) + if outContents == nil { + continue + } + + writeOut(ctx, root, outContents) + } + + return nil +} + +// ObjectGenCtx contains the common info for generating deepcopy implementations. +// It mostly exists so that generating for a package can be easily tested without +// requiring a full set of output rules, etc. +type ObjectGenCtx struct { + Collector *markers.Collector + Checker *loader.TypeChecker + HeaderText string +} + +// writeHeader writes out the build tag, package declaration, and imports +func writeHeader(pkg *loader.Package, out io.Writer, packageName string, imports *importsList, headerText string) { + // NB(directxman12): blank line after build tags to distinguish them from comments + _, err := fmt.Fprintf(out, `// +build !ignore_autogenerated + +%[3]s + +// Code generated by controller-gen. DO NOT EDIT. + +package %[1]s + +import ( +%[2]s +) + +`, packageName, strings.Join(imports.ImportSpecs(), "\n"), headerText) + if err != nil { + pkg.AddError(err) + } + +} + +// GenerateForPackage generates DeepCopy and runtime.Object implementations for +// types in the given package, writing the formatted result to given writer. +// May return nil if source could not be generated. +func (ctx *ObjectGenCtx) GenerateForPackage(root *loader.Package) []byte { + allTypes, err := enabledOnPackage(ctx.Collector, root) + if err != nil { + root.AddError(err) + return nil + } + + ctx.Checker.Check(root, func(node ast.Node) bool { + // ignore interfaces + _, isIface := node.(*ast.InterfaceType) + return !isIface + }) + + root.NeedTypesInfo() + + byType := make(map[string][]byte) + imports := &importsList{ + byPath: make(map[string]string), + byAlias: make(map[string]string), + pkg: root, + } + // avoid confusing aliases by "reserving" the root package's name as an alias + imports.byAlias[root.Name] = "" + + if err := markers.EachType(ctx.Collector, root, func(info *markers.TypeInfo) { + outContent := new(bytes.Buffer) + + // copy when nabled for all types and not disabled, or enabled + // specifically on this type + if !enabledOnType(allTypes, info) { + return + } + + // avoid copying non-exported types, etc + if !shouldBeCopied(root, info) { + return + } + + copyCtx := ©MethodMaker{ + pkg: root, + importsList: imports, + codeWriter: &codeWriter{out: outContent}, + } + + copyCtx.GenerateMethodsFor(root, info) + + outBytes := outContent.Bytes() + if len(outBytes) > 0 { + byType[info.Name] = outBytes + } + }); err != nil { + root.AddError(err) + return nil + } + + if len(byType) == 0 { + return nil + } + + outContent := new(bytes.Buffer) + writeHeader(root, outContent, root.Name, imports, ctx.HeaderText) + writeMethods(root, outContent, byType) + + outBytes := outContent.Bytes() + formattedBytes, err := format.Source(outBytes) + if err != nil { + root.AddError(err) + // we still write the invalid source to disk to figure out what went wrong + } else { + outBytes = formattedBytes + } + + return outBytes +} + +// writeMethods writes each method to the file, sorted by type name. +func writeMethods(pkg *loader.Package, out io.Writer, byType map[string][]byte) { + sortedNames := make([]string, 0, len(byType)) + for name := range byType { + sortedNames = append(sortedNames, name) + } + sort.Strings(sortedNames) + + for _, name := range sortedNames { + _, err := out.Write(byType[name]) + if err != nil { + pkg.AddError(err) + } + } +} + +// writeFormatted outputs the given code, after gofmt-ing it. If we couldn't gofmt, +// we write the unformatted code for debugging purposes. +func writeOut(ctx *genall.GenerationContext, root *loader.Package, outBytes []byte) { + outputFile, err := ctx.Open(root, "zz_generated.deepcopy.go") + if err != nil { + root.AddError(err) + return + } + defer outputFile.Close() + n, err := outputFile.Write(outBytes) + if err != nil { + root.AddError(err) + return + } + if n < len(outBytes) { + root.AddError(io.ErrShortWrite) + } +} diff --git a/vendor/sigs.k8s.io/controller-tools/pkg/deepcopy/traverse.go b/vendor/sigs.k8s.io/controller-tools/pkg/deepcopy/traverse.go new file mode 100644 index 0000000000..1c92e48a09 --- /dev/null +++ b/vendor/sigs.k8s.io/controller-tools/pkg/deepcopy/traverse.go @@ -0,0 +1,812 @@ +/* +Copyright 2019 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package deepcopy + +import ( + "fmt" + "go/ast" + "go/types" + "io" + "path" + "strings" + "unicode" + "unicode/utf8" + + "sigs.k8s.io/controller-tools/pkg/loader" + "sigs.k8s.io/controller-tools/pkg/markers" +) + +// NB(directxman12): This code is a bit of a byzantine mess. +// I've tried to clean it up a bit from the original in deepcopy-gen, +// but parts remain a bit convoluted. Exercise caution when changing. +// It's perhaps a tad over-commented now, but better safe than sorry. +// It also seriously needs auditing for sanity -- there's parts where we +// copy the original deepcopy-gen's output just to be safe, but some of that +// could be simplified away if we're careful. + +// codeWriter assists in writing out Go code lines and blocks to a writer. +type codeWriter struct { + out io.Writer +} + +// Line writes a single line. +func (c *codeWriter) Line(line string) { + fmt.Fprintln(c.out, line) +} + +// Linef writes a single line with formatting (as per fmt.Sprintf). +func (c *codeWriter) Linef(line string, args ...interface{}) { + fmt.Fprintf(c.out, line+"\n", args...) +} + +// If writes an if statement with the given setup/condition clause, executing +// the given function to write the contents of the block. +func (c *codeWriter) If(setup string, block func()) { + c.Linef("if %s {", setup) + block() + c.Line("}") +} + +// If writes if and else statements with the given setup/condition clause, executing +// the given functions to write the contents of the blocks. +func (c *codeWriter) IfElse(setup string, ifBlock func(), elseBlock func()) { + c.Linef("if %s {", setup) + ifBlock() + c.Line("} else {") + elseBlock() + c.Line("}") +} + +// For writes an for statement with the given setup/condition clause, executing +// the given function to write the contents of the block. +func (c *codeWriter) For(setup string, block func()) { + c.Linef("for %s {", setup) + block() + c.Line("}") +} + +// importsList keeps track of required imports, automatically assigning aliases +// to import statement. +type importsList struct { + byPath map[string]string + byAlias map[string]string + + pkg *loader.Package +} + +// NeedImport marks that the given package is needed in the list of imports, +// returning the ident (import alias) that should be used to reference the package. +func (l *importsList) NeedImport(importPath string) string { + // we get an actual path from Package, which might include venddored + // packages if running on a package in vendor. + if ind := strings.LastIndex(importPath, "/vendor/"); ind != -1 { + importPath = importPath[ind+8: /* len("/vendor/") */] + } + + // check to see if we've already assigned an alias, and just return that. + alias, exists := l.byPath[importPath] + if exists { + return alias + } + + // otherwise, calculate an import alias by joining path parts till we get something unique + restPath, nextWord := path.Split(importPath) + + for otherPath, exists := "", true; exists && otherPath != importPath; otherPath, exists = l.byAlias[alias] { + if restPath == "" { + // do something else to disambiguate if we're run out of parts and + // still have duplicates, somehow + alias += "x" + } + + // can't have a first digit, per Go identifier rules, so just skip them + for firstRune, runeLen := utf8.DecodeRuneInString(nextWord); unicode.IsDigit(firstRune); firstRune, runeLen = utf8.DecodeRuneInString(nextWord) { + nextWord = nextWord[runeLen:] + } + + // make a valid identifier by replacing "bad" characters with underscores + nextWord = strings.Map(func(r rune) rune { + if unicode.IsLetter(r) || unicode.IsDigit(r) || r == '_' { + return r + } + return '_' + }, nextWord) + + alias = nextWord + alias + if len(restPath) > 0 { + restPath, nextWord = path.Split(restPath[:len(restPath)-1] /* chop off final slash */) + } + } + + l.byPath[importPath] = alias + l.byAlias[alias] = importPath + return alias +} + +// ImportSpecs returns a string form of each import spec +// (i.e. `alias "path/to/import"). Aliases are only present +// when they don't match the package name. +func (l *importsList) ImportSpecs() []string { + res := make([]string, 0, len(l.byPath)) + for importPath, alias := range l.byPath { + pkg := l.pkg.Imports()[importPath] + if pkg != nil && pkg.Name == alias { + // don't print if alias is the same as package name + // (we've already taken care of duplicates). + res = append(res, fmt.Sprintf("%q", importPath)) + } else { + res = append(res, fmt.Sprintf("%s %q", alias, importPath)) + } + } + return res +} + +// namingInfo holds package and syntax for referencing a field, type, +// etc. It's used to allow lazily marking import usage. +// You should generally retrieve the syntax using Syntax. +type namingInfo struct { + // typeInfo is the type being named. + typeInfo types.Type + nameOverride string +} + +// Syntax calculates the code representation of the given type or name, +// and marks that is used (potentially marking an import as used). +func (n *namingInfo) Syntax(basePkg *loader.Package, imports *importsList) string { + if n.nameOverride != "" { + return n.nameOverride + } + + // NB(directxman12): typeInfo.String gets us most of the way there, + // but fails (for us) on named imports, since it uses the full package path. + switch typeInfo := n.typeInfo.(type) { + case *types.Named: + // register that we need an import for this type, + // so we can get the appropriate alias to use. + typeName := typeInfo.Obj() + otherPkg := typeName.Pkg() + if otherPkg == basePkg.Types { + // local import + return typeName.Name() + } + alias := imports.NeedImport(loader.NonVendorPath(otherPkg.Path())) + return alias + "." + typeName.Name() + case *types.Basic: + return typeInfo.String() + case *types.Pointer: + return "*" + (&namingInfo{typeInfo: typeInfo.Elem()}).Syntax(basePkg, imports) + case *types.Slice: + return "[]" + (&namingInfo{typeInfo: typeInfo.Elem()}).Syntax(basePkg, imports) + case *types.Map: + return fmt.Sprintf( + "map[%s]%s", + (&namingInfo{typeInfo: typeInfo.Key()}).Syntax(basePkg, imports), + (&namingInfo{typeInfo: typeInfo.Elem()}).Syntax(basePkg, imports)) + default: + basePkg.AddError(fmt.Errorf("name requested for invalid type %s", typeInfo)) + return typeInfo.String() + } +} + +// copyMethodMakers makes DeepCopy (and related) methods for Go types, +// writing them to its codeWriter. +type copyMethodMaker struct { + pkg *loader.Package + *importsList + *codeWriter +} + +// GenerateMethodsFor makes DeepCopy, DeepCopyInto, and DeepCopyObject methods +// for the given type, when appropriate +func (c *copyMethodMaker) GenerateMethodsFor(root *loader.Package, info *markers.TypeInfo) { + typeInfo := root.TypesInfo.TypeOf(info.RawSpec.Name) + if typeInfo == types.Typ[types.Invalid] { + root.AddError(loader.ErrFromNode(fmt.Errorf("unknown type %s", info.Name), info.RawSpec)) + } + + // figure out if we need to use a pointer receiver -- most types get a pointer receiver, + // except those that are aliases to types that are already pass-by-reference (pointers, + // interfaces. maps, slices). + ptrReceiver := usePtrReceiver(typeInfo) + + hasManualDeepCopyInto := hasDeepCopyIntoMethod(root, typeInfo) + hasManualDeepCopy, deepCopyOnPtr := hasDeepCopyMethod(root, typeInfo) + + // only generate each method if it hasn't been implemented. + if !hasManualDeepCopyInto { + c.Line("// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.") + if ptrReceiver { + c.Linef("func (in *%s) DeepCopyInto(out *%s) {", info.Name, info.Name) + } else { + c.Linef("func (in %s) DeepCopyInto(out *%s) {", info.Name, info.Name) + c.Line("{in := &in") // add an extra block so that we can redefine `in` without type issues + } + + // just wrap the existing deepcopy if present + if hasManualDeepCopy { + if deepCopyOnPtr { + c.Line("clone := in.DeepCopy()") + c.Line("*out = *clone") + } else { + c.Line("*out = in.DeepCopy()") + } + } else { + c.genDeepCopyIntoBlock(&namingInfo{nameOverride: info.Name}, typeInfo) + } + + if !ptrReceiver { + c.Line("}") // close our extra "in redefinition" block + } + c.Line("}") + } + + if !hasManualDeepCopy { + // these are both straightforward, so we just template them out. + if ptrReceiver { + c.Linef(ptrDeepCopy, info.Name) + } else { + c.Linef(bareDeepCopy, info.Name) + } + + // maybe also generate DeepCopyObject, if asked. + if genObjectInterface(info) { + // we always need runtime.Object for DeepCopyObject + runtimeAlias := c.NeedImport("k8s.io/apimachinery/pkg/runtime") + if ptrReceiver { + c.Linef(ptrDeepCopyObj, info.Name, runtimeAlias) + } else { + c.Linef(bareDeepCopyObj, info.Name, runtimeAlias) + } + } + } +} + +// genDeepCopyBody generates a DeepCopyInto block for the given type. The +// block is *not* wrapped in curly braces. +func (c *copyMethodMaker) genDeepCopyIntoBlock(actualName *namingInfo, typeInfo types.Type) { + // we calculate *how* we should copy mostly based on the "eventual" type of + // a given type (i.e. the type that results from following all aliases) + last := eventualUnderlyingType(typeInfo) + + // we might hit a type that has a manual deepcopy method written on non-root types + // (this case is handled for root types in GenerateMethodFor). + // In that case (when we're not dealing with a pointer, since those need special handling + // to match 1-to-1 with k8s deepcopy-gen), just use that. + if _, isPtr := last.(*types.Pointer); !isPtr && hasAnyDeepCopyMethod(c.pkg, typeInfo) { + c.Line("*out = in.DeepCopy()") + return + } + + switch last := last.(type) { + case *types.Basic: + // basic types themselves can be "shallow" copied, so all we need + // to do is check if our *actual* type (not the underlying one) has + // a custom method implemented. + if hasMethod, _ := hasDeepCopyMethod(c.pkg, typeInfo); hasMethod { + c.Line("*out = in.DeepCopy()") + } + c.Line("*out = *in") + case *types.Map: + c.genMapDeepCopy(actualName, last) + case *types.Slice: + c.genSliceDeepCopy(actualName, last) + case *types.Struct: + c.genStructDeepCopy(actualName, last) + case *types.Pointer: + c.genPointerDeepCopy(actualName, last) + case *types.Named: + // handled via the above loop, should never happen + c.pkg.AddError(fmt.Errorf("interface type %s encountered directly, invalid condition", last)) + default: + c.pkg.AddError(fmt.Errorf("invalid type %s", last)) + } +} + +// genMapDeepCopy generates DeepCopy code for the given named type whose eventual +// type is the given map type. +func (c *copyMethodMaker) genMapDeepCopy(actualName *namingInfo, mapType *types.Map) { + // maps *must* have shallow-copiable types, since we just iterate + // through the keys, only trying to deepcopy the values. + if !fineToShallowCopy(mapType.Key()) { + c.pkg.AddError(fmt.Errorf("invalid map key type %s", mapType.Key())) + return + } + + // make our actual type (not the underlying one)... + c.Linef("*out = make(%[1]s, len(*in))", actualName.Syntax(c.pkg, c.importsList)) + + // ...and copy each element appropriately + c.For("key, val := range *in", func() { + // check if we have manually written methods, + // in which case we'll just try and use those + hasDeepCopy, copyOnPtr := hasDeepCopyMethod(c.pkg, mapType.Elem()) + hasDeepCopyInto := hasDeepCopyIntoMethod(c.pkg, mapType.Elem()) + switch { + case hasDeepCopyInto || hasDeepCopy: + // use the manually-written methods + _, fieldIsPtr := mapType.Elem().(*types.Pointer) // is "out" actually a pointer + inIsPtr := resultWillBePointer(mapType.Elem(), hasDeepCopy, copyOnPtr) // does copying "in" produce a pointer + if hasDeepCopy { + // If we're calling DeepCopy, check if it's receiver needs a pointer + inIsPtr = copyOnPtr + } + if inIsPtr == fieldIsPtr { + c.Line("(*out)[key] = val.DeepCopy()") + } else if fieldIsPtr { + c.Line("{") // use a block because we use `x` as a temporary + c.Line("x := val.DeepCopy()") + c.Line("(*out)[key] = &x") + c.Line("}") + } else { + c.Line("(*out)[key] = *val.DeepCopy()") + } + case fineToShallowCopy(mapType.Elem()): + // just shallow copy types for which it's safe to do so + c.Line("(*out)[key] = val") + default: + // otherwise, we've got some kind-specific actions, + // based on the element's eventual type. + + underlyingElem := eventualUnderlyingType(mapType.Elem()) + + // if it passes by reference, let the main switch handle it + if passesByReference(underlyingElem) { + c.Linef("var outVal %[1]s", (&namingInfo{typeInfo: underlyingElem}).Syntax(c.pkg, c.importsList)) + c.IfElse("val == nil", func() { + c.Line("(*out)[key] = nil") + }, func() { + c.Line("in, out := &val, &outVal") + c.genDeepCopyIntoBlock(&namingInfo{typeInfo: mapType.Elem()}, mapType.Elem()) + }) + c.Line("(*out)[key] = outVal") + + return + } + + // otherwise... + switch underlyingElem := underlyingElem.(type) { + case *types.Struct: + // structs will have deepcopy generated for them, so use that + c.Line("(*out)[key] = *val.DeepCopy()") + default: + c.pkg.AddError(fmt.Errorf("invalid map value type %s", underlyingElem)) + return + } + } + }) +} + +// genSliceDeepCopy generates DeepCopy code for the given named type whose +// underlying type is the given slice. +func (c *copyMethodMaker) genSliceDeepCopy(actualName *namingInfo, sliceType *types.Slice) { + underlyingElem := eventualUnderlyingType(sliceType.Elem()) + + // make the actual type (not the underlying) + c.Linef("*out = make(%[1]s, len(*in))", actualName.Syntax(c.pkg, c.importsList)) + + // check if we need to do anything special, or just copy each element appropriately + switch { + case hasAnyDeepCopyMethod(c.pkg, sliceType.Elem()): + // just use deepcopy if it's present (deepcopyinto will be filled in by our code) + c.For("i := range *in", func() { + c.Line("(*in)[i].DeepCopyInto(&(*out)[i])") + }) + case fineToShallowCopy(underlyingElem): + // shallow copy if ok + c.Line("copy(*out, *in)") + default: + // copy each element appropriately + c.For("i := range *in", func() { + // fall back to normal code for reference types or those with custom logic + if passesByReference(underlyingElem) || hasAnyDeepCopyMethod(c.pkg, sliceType.Elem()) { + c.If("(*in)[i] != nil", func() { + c.Line("in, out := &(*in)[i], &(*out)[i]") + c.genDeepCopyIntoBlock(&namingInfo{typeInfo: sliceType.Elem()}, sliceType.Elem()) + }) + return + } + + switch underlyingElem.(type) { + case *types.Struct: + // structs will always have deepcopy + c.Linef("(*in)[i].DeepCopyInto(&(*out)[i])") + default: + c.pkg.AddError(fmt.Errorf("invalid slice element type %s", underlyingElem)) + } + }) + } +} + +// genStructDeepCopy generates DeepCopy code for the given named type whose +// underlying type is the given struct. +func (c *copyMethodMaker) genStructDeepCopy(_ *namingInfo, structType *types.Struct) { + c.Line("*out = *in") + + for i := 0; i < structType.NumFields(); i++ { + field := structType.Field(i) + + // if we have a manual deepcopy, use that + hasDeepCopy, copyOnPtr := hasDeepCopyMethod(c.pkg, field.Type()) + hasDeepCopyInto := hasDeepCopyIntoMethod(c.pkg, field.Type()) + if hasDeepCopyInto || hasDeepCopy { + // NB(directxman12): yes, I know this is kind-of weird that we + // have all this special-casing here, but it's nice for testing + // purposes to be 1-to-1 with deepcopy-gen, which does all sorts of + // stuff like this (I'm pretty sure I found some codepaths that + // never execute there, because they're pretty clearly invalid + // syntax). + + _, fieldIsPtr := field.Type().(*types.Pointer) + inIsPtr := resultWillBePointer(field.Type(), hasDeepCopy, copyOnPtr) + if fieldIsPtr { + // we'll need a if block to check for nilness + // we'll let genDeepCopyIntoBlock handle the details, we just needed the setup + c.If(fmt.Sprintf("in.%s != nil", field.Name()), func() { + c.Linef("in, out := &in.%[1]s, &out.%[1]s", field.Name()) + c.genDeepCopyIntoBlock(&namingInfo{typeInfo: field.Type()}, field.Type()) + }) + } else { + // special-case for compatibility with deepcopy-gen + if inIsPtr == fieldIsPtr { + c.Linef("out.%[1]s = in.%[1]s.DeepCopy()", field.Name()) + } else { + c.Linef("in.%[1]s.DeepCopyInto(&out.%[1]s)", field.Name()) + } + } + continue + } + + // pass-by-reference fields get delegated to the main type + underlyingField := eventualUnderlyingType(field.Type()) + if passesByReference(underlyingField) { + c.If(fmt.Sprintf("in.%s != nil", field.Name()), func() { + c.Linef("in, out := &in.%[1]s, &out.%[1]s", field.Name()) + c.genDeepCopyIntoBlock(&namingInfo{typeInfo: field.Type()}, field.Type()) + }) + continue + } + + // otherwise... + switch underlyingField := underlyingField.(type) { + case *types.Basic: + // nothing to do, initial assignment copied this + case *types.Struct: + if fineToShallowCopy(field.Type()) { + c.Linef("out.%[1]s = in.%[1]s", field.Name()) + } else { + c.Linef("in.%[1]s.DeepCopyInto(&out.%[1]s)", field.Name()) + } + default: + c.pkg.AddError(fmt.Errorf("invalid field type %s", underlyingField)) + return + } + } +} + +// genPointerDeepCopy generates DeepCopy code for the given named type whose +// underlying type is the given struct. +func (c *copyMethodMaker) genPointerDeepCopy(_ *namingInfo, pointerType *types.Pointer) { + underlyingElem := eventualUnderlyingType(pointerType.Elem()) + + // if we have a manually written deepcopy, just use that + hasDeepCopy, copyOnPtr := hasDeepCopyMethod(c.pkg, pointerType.Elem()) + hasDeepCopyInto := hasDeepCopyIntoMethod(c.pkg, pointerType.Elem()) + if hasDeepCopyInto || hasDeepCopy { + outNeedsPtr := resultWillBePointer(pointerType.Elem(), hasDeepCopy, copyOnPtr) + if hasDeepCopy { + outNeedsPtr = copyOnPtr + } + if outNeedsPtr { + c.Line("*out = (*in).DeepCopy()") + } else { + c.Line("x := (*in).DeepCopy()") + c.Line("*out = &x") + } + return + } + + // shallow-copiable types are pretty easy + if fineToShallowCopy(underlyingElem) { + c.Linef("*out = new(%[1]s)", (&namingInfo{typeInfo: pointerType.Elem()}).Syntax(c.pkg, c.importsList)) + c.Line("**out = **in") + return + } + + // pass-by-reference types get delegated to the main switch + if passesByReference(underlyingElem) { + c.Linef("*out = new(%s)", (&namingInfo{typeInfo: underlyingElem}).Syntax(c.pkg, c.importsList)) + c.If("**in != nil", func() { + c.Line("in, out := *in, *out") + c.genDeepCopyIntoBlock(&namingInfo{typeInfo: underlyingElem}, eventualUnderlyingType(underlyingElem)) + }) + return + } + + // otherwise... + switch underlyingElem := underlyingElem.(type) { + case *types.Struct: + c.Linef("*out = new(%[1]s)", (&namingInfo{typeInfo: pointerType.Elem()}).Syntax(c.pkg, c.importsList)) + c.Line("(*in).DeepCopyInto(*out)") + default: + c.pkg.AddError(fmt.Errorf("invalid pointer element type %s", underlyingElem)) + return + } +} + +// usePtrReceiver checks if we need a pointer receiver on methods for the given type +// Pass-by-reference types don't get pointer receivers. +func usePtrReceiver(typeInfo types.Type) bool { + switch typeInfo.(type) { + case *types.Pointer: + return false + case *types.Map: + return false + case *types.Slice: + return false + case *types.Named: + return usePtrReceiver(typeInfo.Underlying()) + default: + return true + } +} + +func resultWillBePointer(typeInfo types.Type, hasDeepCopy, deepCopyOnPtr bool) bool { + // if we have a manual deepcopy, we can just check what that returns + if hasDeepCopy { + return deepCopyOnPtr + } + + // otherwise, we'll need to check its type + switch typeInfo := typeInfo.(type) { + case *types.Pointer: + // NB(directxman12): we don't have to worry about the elem having a deepcopy, + // since hasManualDeepCopy would've caught that. + + // we'll be calling on the elem, so check that + return resultWillBePointer(typeInfo.Elem(), false, false) + case *types.Map: + return false + case *types.Slice: + return false + case *types.Named: + return resultWillBePointer(typeInfo.Underlying(), false, false) + default: + return true + } +} + +// shouldBeCopied checks if we're supposed to make deepcopy methods the given type. +// +// This is the case if it's exported *and* either: +// - has a partial manual DeepCopy implementation (in which case we fill in the rest) +// - aliases to a non-basic type eventually +// - is a struct +func shouldBeCopied(pkg *loader.Package, info *markers.TypeInfo) bool { + if !ast.IsExported(info.Name) { + return false + } + + typeInfo := pkg.TypesInfo.TypeOf(info.RawSpec.Name) + if typeInfo == types.Typ[types.Invalid] { + pkg.AddError(loader.ErrFromNode(fmt.Errorf("unknown type %s", info.Name), info.RawSpec)) + return false + } + + // according to gengo, everything named is an alias, except for an alias to a pointer, + // which is just a pointer, afaict. Just roll with it. + if asPtr, isPtr := typeInfo.(*types.Named).Underlying().(*types.Pointer); isPtr { + typeInfo = asPtr + } + + lastType := typeInfo + if _, isNamed := typeInfo.(*types.Named); isNamed { + // if it has a manual deepcopy or deepcopyinto, we're fine + if hasAnyDeepCopyMethod(pkg, typeInfo) { + return true + } + + for underlyingType := typeInfo.Underlying(); underlyingType != lastType; lastType, underlyingType = underlyingType, underlyingType.Underlying() { + // if it has a manual deepcopy or deepcopyinto, we're fine + if hasAnyDeepCopyMethod(pkg, underlyingType) { + return true + } + + // aliases to other things besides basics need copy methods + // (basics can be straight-up shallow-copied) + if _, isBasic := underlyingType.(*types.Basic); !isBasic { + return true + } + } + } + + // structs are the only thing that's not a basic that's copiable by default + _, isStruct := lastType.(*types.Struct) + return isStruct +} + +// hasDeepCopyMethod checks if this type has a manual DeepCopy method and if +// the method has a pointer receiver. +func hasDeepCopyMethod(pkg *loader.Package, typeInfo types.Type) (bool, bool) { + deepCopyMethod, ind, _ := types.LookupFieldOrMethod(typeInfo, true /* check pointers too */, pkg.Types, "DeepCopy") + if len(ind) != 1 { + // ignore embedded methods + return false, false + } + if deepCopyMethod == nil { + return false, false + } + + methodSig := deepCopyMethod.Type().(*types.Signature) + if methodSig.Params() != nil && methodSig.Params().Len() != 0 { + return false, false + } + if methodSig.Results() == nil || methodSig.Results().Len() != 1 { + return false, false + } + + recvAsPtr, recvIsPtr := methodSig.Recv().Type().(*types.Pointer) + if recvIsPtr { + // NB(directxman12): the pointer type returned here isn't comparable even though they + // have the same underlying type, for some reason (probably that + // LookupFieldOrMethod calls types.NewPointer for us), so check the + // underlying values. + + resultPtr, resultIsPtr := methodSig.Results().At(0).Type().(*types.Pointer) + if !resultIsPtr { + // pointer vs non-pointer are different types + return false, false + } + + if recvAsPtr.Elem() != resultPtr.Elem() { + return false, false + } + } else if methodSig.Results().At(0).Type() != methodSig.Recv().Type() { + return false, false + } + + return true, recvIsPtr +} + +// hasDeepCopyIntoMethod checks if this type has a manual DeepCopyInto method. +func hasDeepCopyIntoMethod(pkg *loader.Package, typeInfo types.Type) bool { + deepCopyMethod, ind, _ := types.LookupFieldOrMethod(typeInfo, true /* check pointers too */, pkg.Types, "DeepCopyInto") + if len(ind) != 1 { + // ignore embedded methods + return false + } + if deepCopyMethod == nil { + return false + } + + methodSig := deepCopyMethod.Type().(*types.Signature) + if methodSig.Params() == nil || methodSig.Params().Len() != 1 { + return false + } + paramPtr, isPtr := methodSig.Params().At(0).Type().(*types.Pointer) + if !isPtr { + return false + } + if methodSig.Results() != nil && methodSig.Results().Len() != 0 { + return false + } + + if recvPtr, recvIsPtr := methodSig.Recv().Type().(*types.Pointer); recvIsPtr { + // NB(directxman12): the pointer type returned here isn't comparable even though they + // have the same underlying type, for some reason (probably that + // LookupFieldOrMethod calls types.NewPointer for us), so check the + // underlying values. + return paramPtr.Elem() == recvPtr.Elem() + } + return methodSig.Recv().Type() == paramPtr.Elem() +} + +// hasAnyDeepCopyMethod checks if the given method has DeepCopy or DeepCopyInto +// (either of which implies the other will exist eventually). +func hasAnyDeepCopyMethod(pkg *loader.Package, typeInfo types.Type) bool { + hasDeepCopy, _ := hasDeepCopyMethod(pkg, typeInfo) + return hasDeepCopy || hasDeepCopyIntoMethod(pkg, typeInfo) +} + +// eventualUnderlyingType gets the "final" type in a sequence of named aliases. +// It's effectively a shortcut for calling Underlying in a loop. +func eventualUnderlyingType(typeInfo types.Type) types.Type { + last := typeInfo + for underlying := typeInfo.Underlying(); underlying != last; last, underlying = underlying, underlying.Underlying() { + // get the actual underlying type + } + return last +} + +// fineToShallowCopy checks if a shallow-copying a type is equivalent to deepcopy-ing it. +func fineToShallowCopy(typeInfo types.Type) bool { + switch typeInfo := typeInfo.(type) { + case *types.Basic: + // basic types (int, string, etc) are always fine to shallow-copy + return true + case *types.Named: + // aliases are fine to shallow-copy as long as they resolve to a shallow-copyable type + return fineToShallowCopy(typeInfo.Underlying()) + case *types.Struct: + // structs are fine to shallow-copy if they have all shallow-copyable fields + for i := 0; i < typeInfo.NumFields(); i++ { + field := typeInfo.Field(i) + if !fineToShallowCopy(field.Type()) { + return false + } + } + return true + default: + return false + } +} + +// passesByReference checks if the given type passesByReference +// (except for interfaces, which are handled separately). +func passesByReference(typeInfo types.Type) bool { + switch typeInfo.(type) { + case *types.Slice: + return true + case *types.Map: + return true + case *types.Pointer: + return true + default: + return false + } +} + +var ( + // ptrDeepCopy is a DeepCopy for a type with an existing DeepCopyInto and a pointer receiver. + ptrDeepCopy = ` +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new %[1]s. +func (in *%[1]s) DeepCopy() *%[1]s { + if in == nil { return nil } + out := new(%[1]s) + in.DeepCopyInto(out) + return out +} +` + + // ptrDeepCopy is a DeepCopy for a type with an existing DeepCopyInto and a non-pointer receiver. + bareDeepCopy = ` +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new %[1]s. +func (in %[1]s) DeepCopy() %[1]s { + if in == nil { return nil } + out := new(%[1]s) + in.DeepCopyInto(out) + return *out +} +` + + // ptrDeepCopy is a DeepCopyObject for a type with an existing DeepCopyInto and a pointer receiver. + ptrDeepCopyObj = ` +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *%[1]s) DeepCopyObject() %[2]s.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} +` + // ptrDeepCopy is a DeepCopyObject for a type with an existing DeepCopyInto and a non-pointer receiver. + bareDeepCopyObj = ` +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in %[1]s) DeepCopyObject() %[2]s.Object { + return in.DeepCopy() +} +` +) diff --git a/vendor/sigs.k8s.io/controller-tools/pkg/deepcopy/zz_generated.markerhelp.go b/vendor/sigs.k8s.io/controller-tools/pkg/deepcopy/zz_generated.markerhelp.go new file mode 100644 index 0000000000..a04355836f --- /dev/null +++ b/vendor/sigs.k8s.io/controller-tools/pkg/deepcopy/zz_generated.markerhelp.go @@ -0,0 +1,45 @@ +// +build !ignore_autogenerated + +/* +Copyright2019 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by helpgen. DO NOT EDIT. + +package deepcopy + +import ( + "sigs.k8s.io/controller-tools/pkg/markers" +) + +func (Generator) Help() *markers.DefinitionHelp { + return &markers.DefinitionHelp{ + Category: "", + DetailedHelp: markers.DetailedHelp{ + Summary: "generates code containing DeepCopy, DeepCopyInto, and DeepCopyObject method implementations.", + Details: "", + }, + FieldHelp: map[string]markers.DetailedHelp{ + "HeaderFile": markers.DetailedHelp{ + Summary: "specifies the header text (e.g. license) to prepend to generated files.", + Details: "", + }, + "Year": markers.DetailedHelp{ + Summary: "specifies the year to substitute for \" YEAR\" in the header file.", + Details: "", + }, + }, + } +} diff --git a/vendor/sigs.k8s.io/controller-tools/pkg/genall/doc.go b/vendor/sigs.k8s.io/controller-tools/pkg/genall/doc.go new file mode 100644 index 0000000000..df8dd71389 --- /dev/null +++ b/vendor/sigs.k8s.io/controller-tools/pkg/genall/doc.go @@ -0,0 +1,58 @@ +/* +Copyright 2019 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Package genall defines entrypoints for generation tools to hook into and +// share the same set of parsing, typechecking, and marker information. +// +// Generators +// +// Each Generator knows how to register its markers into a central Registry, +// and then how to generate output using a Collector and some root packages. +// Each generator can be considered to be the output type of a marker, for easy +// command line parsing. +// +// Output and Input +// +// Generators output artifacts via an OutputRule. OutputRules know how to +// write output for different package-associated (code) files, as well as +// config files. Each OutputRule should also be considered to be the output +// type as a marker, for easy command-line parsing. +// +// OutputRules groups together an OutputRule per generator, plus a default +// output rule for any not explicitly specified. +// +// OutputRules are defined for stdout, file writing, and sending to /dev/null +// (useful for doing "type-checking" without actually saving the results). +// +// InputRule defines custom input loading, but its shared across all +// Generators. There's currently only a filesystem implementation. +// +// Runtime and Context +// +// Runtime maps together Generators, and constructs "contexts" which provide +// the common collector and roots, plus the output rule for that generator, and +// a handle for reading files (like boilerplate headers). +// +// It will run all associated generators, printing errors and automatically +// skipping type-checking errors (since those are commonly caused by the +// partial type-checking of loader.TypeChecker). +// +// Options +// +// The FromOptions (and associated helpers) function makes it easy to use generators +// and output rules as markers that can be parsed from the command line, producing +// a registry from command line args. +package genall diff --git a/vendor/sigs.k8s.io/controller-tools/pkg/genall/genall.go b/vendor/sigs.k8s.io/controller-tools/pkg/genall/genall.go new file mode 100644 index 0000000000..086b4df13e --- /dev/null +++ b/vendor/sigs.k8s.io/controller-tools/pkg/genall/genall.go @@ -0,0 +1,174 @@ +/* +Copyright 2019 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package genall + +import ( + "fmt" + "io" + "io/ioutil" + "os" + + "golang.org/x/tools/go/packages" + "sigs.k8s.io/yaml" + + "sigs.k8s.io/controller-tools/pkg/loader" + "sigs.k8s.io/controller-tools/pkg/markers" +) + +// Generators are a list of Generators. +type Generators []Generator + +// RegisterMarkers registers all markers defined by each of the Generators in +// this list into the given registry. +func (g Generators) RegisterMarkers(reg *markers.Registry) error { + for _, gen := range g { + if err := gen.RegisterMarkers(reg); err != nil { + return err + } + } + return nil +} + +// Generator knows how to register some set of markers, and then produce +// output artifacts based on loaded code containing those markers, +// sharing common loaded data. +type Generator interface { + // RegisterMarkers registers all markers needed by this Generator + // into the given registry. + RegisterMarkers(into *markers.Registry) error + // Generate generates artifacts produced by this marker. + // It's called *after* RegisterMarkers has been called. + Generate(*GenerationContext) error +} + +// HasHelp is some Generator, OutputRule, etc with a help method. +type HasHelp interface { + // Help returns help for this generator. + Help() *markers.DefinitionHelp +} + +// Runtime collects generators, loaded program data (Collector, root Packages), +// and I/O rules, running them together. +type Runtime struct { + // Generators are the Generators to be run by this Runtime. + Generators Generators + // GenerationContext is the base generation context that's copied + // to produce the context for each Generator. + GenerationContext + // OutputRules defines how to output artifacts for each Generator. + OutputRules OutputRules +} + +// GenerationContext defines the common information needed for each Generator +// to run. +type GenerationContext struct { + // Collector is the shared marker collector. + Collector *markers.Collector + // Roots are the base packages to be processed. + Roots []*loader.Package + // Checker is the shared partial type-checker. + Checker *loader.TypeChecker + // OutputRule describes how to output artifacts. + OutputRule + // InputRule describes how to load associated boilerplate artifacts. + // It should *not* be used to load source files. + InputRule +} + +// WriteYAML writes the given objects out, serialized as YAML, using the +// context's OutputRule. +func (g GenerationContext) WriteYAML(itemPath string, objs ...interface{}) error { + out, err := g.Open(nil, itemPath) + if err != nil { + return err + } + defer out.Close() + + for _, obj := range objs { + yamlContent, err := yaml.Marshal(obj) + if err != nil { + return err + } + n, err := out.Write(append([]byte("\n---\n"), yamlContent...)) + if err != nil { + return err + } + if n < len(yamlContent) { + return io.ErrShortWrite + } + } + + return nil +} + +// ReadFile reads the given boilerplate artifact using the context's InputRule. +func (g GenerationContext) ReadFile(path string) ([]byte, error) { + file, err := g.OpenForRead(path) + if err != nil { + return nil, err + } + defer file.Close() + return ioutil.ReadAll(file) +} + +// ForRoots produces a Runtime to run the given generators against the +// given packages. It outputs to /dev/null by default. +func (g Generators) ForRoots(rootPaths ...string) (*Runtime, error) { + roots, err := loader.LoadRoots(rootPaths...) + if err != nil { + return nil, err + } + rt := &Runtime{ + Generators: g, + GenerationContext: GenerationContext{ + Collector: &markers.Collector{ + Registry: &markers.Registry{}, + }, + Roots: roots, + InputRule: InputFromFileSystem, + Checker: &loader.TypeChecker{}, + }, + OutputRules: OutputRules{Default: OutputToNothing}, + } + if err := rt.Generators.RegisterMarkers(rt.Collector.Registry); err != nil { + return nil, err + } + return rt, nil +} + +// Run runs the Generators in this Runtime against its packages, printing +// errors (except type errors, which common result from using TypeChecker with +// filters), returning true if errors were found. +func (r *Runtime) Run() bool { + // TODO(directxman12): we could make this parallel, + // but we'd need to ensure all underlying machinery is threadsafe + if len(r.Generators) == 0 { + fmt.Fprintln(os.Stderr, "no generators to run") + return true + } + + for _, gen := range r.Generators { + ctx := r.GenerationContext // make a shallow copy + ctx.OutputRule = r.OutputRules.ForGenerator(gen) + if err := gen.Generate(&ctx); err != nil { + fmt.Fprintln(os.Stderr, err) + } + } + + // skip TypeErrors -- they're probably just from partial typechecking in crd-gen + return loader.PrintErrors(r.Roots, packages.TypeError) +} diff --git a/pkg/apis/openstackproviderconfig/v1alpha1/doc.go b/vendor/sigs.k8s.io/controller-tools/pkg/genall/help/doc.go similarity index 56% rename from pkg/apis/openstackproviderconfig/v1alpha1/doc.go rename to vendor/sigs.k8s.io/controller-tools/pkg/genall/help/doc.go index 7a8ffa99e4..d84d1798b4 100644 --- a/pkg/apis/openstackproviderconfig/v1alpha1/doc.go +++ b/vendor/sigs.k8s.io/controller-tools/pkg/genall/help/doc.go @@ -1,5 +1,5 @@ /* -Copyright 2018 The Kubernetes Authors. +Copyright 2019 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -14,9 +14,10 @@ See the License for the specific language governing permissions and limitations under the License. */ -// +k8s:deepcopy-gen=package,register -// +k8s:conversion-gen=sigs.k8s.io/cluster-api/cluster-api-provider-openstack/pkg/apis/openstackproviderconfig -// +k8s:openapi-gen=true -// +k8s:defaulter-gen=TypeMeta -// +groupName=openstackproviderconfig.k8s.io -package v1alpha1 +// Package help contains utilities for actually writing out marker help. +// +// Namely, it contains a series of structs (and helpers for producing them) +// that represent a merged view of marker definition and help that can be used +// for consumption by the pretty subpackage (for terminal help) or serialized +// as JSON (e.g. for generating HTML help). +package help diff --git a/vendor/sigs.k8s.io/controller-tools/pkg/genall/help/pretty/doc.go b/vendor/sigs.k8s.io/controller-tools/pkg/genall/help/pretty/doc.go new file mode 100644 index 0000000000..06622db744 --- /dev/null +++ b/vendor/sigs.k8s.io/controller-tools/pkg/genall/help/pretty/doc.go @@ -0,0 +1,30 @@ +/* +Copyright 2019 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Package pretty contains utilities for formatting terminal help output, +// and a use of those to display marker help. +// +// Terminal Output +// +// The Span interface and Table struct allow you to construct tables with +// colored formatting, without causing ANSI formatting characters to mess up width +// calculations. +// +// Marker Help +// +// The MarkersSummary prints a summary table for marker help, while the MarkersDetails +// prints out more detailed information, with explainations of the individual marker fields. +package pretty diff --git a/vendor/sigs.k8s.io/controller-tools/pkg/genall/help/pretty/help.go b/vendor/sigs.k8s.io/controller-tools/pkg/genall/help/pretty/help.go new file mode 100644 index 0000000000..3e34cc0d6c --- /dev/null +++ b/vendor/sigs.k8s.io/controller-tools/pkg/genall/help/pretty/help.go @@ -0,0 +1,171 @@ +package pretty + +import ( + "fmt" + "io" + + "sigs.k8s.io/controller-tools/pkg/genall/help" + + "github.com/fatih/color" +) + +var ( + headingStyle = Decoration(*color.New(color.Bold, color.Underline)) + markerNameStyle = Decoration(*color.New(color.Bold)) + fieldSummaryStyle = Decoration(*color.New(color.FgGreen, color.Italic)) + markerTargetStyle = Decoration(*color.New(color.Faint)) + fieldDetailStyle = Decoration(*color.New(color.Italic, color.FgGreen)) + deprecatedStyle = Decoration(*color.New(color.CrossedOut)) +) + +// MarkersSummary returns a condensed summary of help for the given markers. +func MarkersSummary(groupName string, markers []help.MarkerDoc) Span { + out := new(SpanWriter) + + out.Print(Text("\n")) + out.Print(headingStyle.Containing(Text(groupName))) + out.Print(Text("\n\n")) + + table := &Table{Sizing: &TableCalculator{Padding: 2}} + for _, marker := range markers { + table.StartRow() + table.Column(MarkerSyntaxHelp(marker)) + table.Column(markerTargetStyle.Containing(Text(marker.Target))) + + summary := new(SpanWriter) + if marker.DeprecatedInFavorOf != nil && len(*marker.DeprecatedInFavorOf) > 0 { + summary.Print(markerNameStyle.Containing(Text("(use "))) + summary.Print(markerNameStyle.Containing(Text(*marker.DeprecatedInFavorOf))) + summary.Print(markerNameStyle.Containing(Text(") "))) + } + summary.Print(Text(marker.Summary)) + table.Column(summary) + + table.EndRow() + } + out.Print(table) + + out.Print(Text("\n")) + + return out +} + +// MarkersDetails returns detailed help for the given markers, including detailed field help. +func MarkersDetails(fullDetail bool, groupName string, markers []help.MarkerDoc) Span { + out := new(SpanWriter) + + out.Print(Line(headingStyle.Containing(Text(groupName)))) + out.Print(Newlines(2)) + + for _, marker := range markers { + out.Print(Line(markerName(marker))) + out.Print(Text(" ")) + out.Print(markerTargetStyle.Containing(Text(marker.Target))) + + summary := new(SpanWriter) + if marker.DeprecatedInFavorOf != nil && len(*marker.DeprecatedInFavorOf) > 0 { + summary.Print(markerNameStyle.Containing(Text("(use "))) + summary.Print(markerNameStyle.Containing(Text(*marker.DeprecatedInFavorOf))) + summary.Print(markerNameStyle.Containing(Text(") "))) + } + summary.Print(Text(marker.Summary)) + + if !marker.AnonymousField() { + out.Print(Indented(1, Line(summary))) + if len(marker.Details) > 0 && fullDetail { + out.Print(Indented(1, Line(Text(marker.Details)))) + } + } + + if marker.AnonymousField() { + out.Print(Indented(1, Line(fieldDetailStyle.Containing(FieldSyntaxHelp(marker.Fields[0]))))) + out.Print(Text(" ")) + out.Print(summary) + if len(marker.Details) > 0 && fullDetail { + out.Print(Indented(2, Line(Text(marker.Details)))) + } + out.Print(Newlines(1)) + } else if !marker.Empty() { + out.Print(Newlines(1)) + if fullDetail { + for _, arg := range marker.Fields { + out.Print(Indented(1, Line(fieldDetailStyle.Containing(FieldSyntaxHelp(arg))))) + out.Print(Indented(2, Line(Text(arg.Summary)))) + if len(arg.Details) > 0 && fullDetail { + out.Print(Indented(2, Line(Text(arg.Details)))) + out.Print(Newlines(1)) + } + } + out.Print(Newlines(1)) + } else { + table := &Table{Sizing: &TableCalculator{Padding: 2}} + for _, arg := range marker.Fields { + table.StartRow() + table.Column(fieldDetailStyle.Containing(FieldSyntaxHelp(arg))) + table.Column(Text(arg.Summary)) + table.EndRow() + } + + out.Print(Indented(1, table)) + } + } else { + out.Print(Newlines(1)) + } + } + + return out +} + +func FieldSyntaxHelp(arg help.FieldHelp) Span { + return fieldSyntaxHelp(arg, "") +} + +// fieldSyntaxHelp prints the syntax help for a particular marker argument. +func fieldSyntaxHelp(arg help.FieldHelp, sep string) Span { + if arg.Optional { + return FromWriter(func(out io.Writer) error { + _, err := fmt.Fprintf(out, "[%s%s=<%s>]", sep, arg.Name, arg.TypeString()) + return err + }) + } + return FromWriter(func(out io.Writer) error { + _, err := fmt.Fprintf(out, "%s%s=<%s>", sep, arg.Name, arg.TypeString()) + return err + }) +} + +// markerName returns a span containing just the appropriately-formatted marker name. +func markerName(def help.MarkerDoc) Span { + if def.DeprecatedInFavorOf != nil { + return deprecatedStyle.Containing(Text("+" + def.Name)) + } + return markerNameStyle.Containing(Text("+" + def.Name)) +} + +// MarkerSyntaxHelp assembles syntax help for a given marker. +func MarkerSyntaxHelp(def help.MarkerDoc) Span { + out := new(SpanWriter) + + out.Print(markerName(def)) + + if def.Empty() { + return out + } + + sep := ":" + if def.AnonymousField() { + sep = "" + } + + fieldStyle := fieldSummaryStyle + if def.DeprecatedInFavorOf != nil { + fieldStyle = deprecatedStyle + } + + for _, arg := range def.Fields { + out.Print(fieldStyle.Containing(fieldSyntaxHelp(arg, sep))) + sep = "," + } + + return out +} diff --git a/vendor/sigs.k8s.io/controller-tools/pkg/genall/help/pretty/print.go b/vendor/sigs.k8s.io/controller-tools/pkg/genall/help/pretty/print.go new file mode 100644 index 0000000000..8d7452a0b2 --- /dev/null +++ b/vendor/sigs.k8s.io/controller-tools/pkg/genall/help/pretty/print.go @@ -0,0 +1,304 @@ +package pretty + +import ( + "bytes" + "fmt" + "io" + + "github.com/fatih/color" +) + +// NB(directxman12): this isn't particularly elegant, but it's also +// sufficiently simple as to be maintained here. Man (roff) would've +// probably worked, but it's not necessarily on Windows by default. + +// Span is a chunk of content that is writable to an output, but knows how to +// calculate its apparent visual "width" on the terminal (not to be confused +// with the raw length, which may include zero-width coloring sequences). +type Span interface { + // VisualLength reports the "width" as perceived by the user on the terminal + // (i.e. widest line, ignoring ANSI escape characters). + VisualLength() int + // WriteTo writes the full span contents to the given writer. + WriteTo(io.Writer) error +} + +// Table is a Span that writes its data in table form, with sizing controlled +// by the given table calculator. Rows are started with StartRow, followed by +// some calls to Column, followed by a call to EndRow. Once all rows are +// added, the table can be used as a Span. +type Table struct { + Sizing *TableCalculator + + cellsByRow [][]Span + colSizes []int +} + +// StartRow starts a new row. +// It must eventually be followed by EndRow. +func (t *Table) StartRow() { + t.cellsByRow = append(t.cellsByRow, []Span(nil)) +} + +// EndRow ends the currently started row. +func (t *Table) EndRow() { + lastRow := t.cellsByRow[len(t.cellsByRow)-1] + sizes := make([]int, len(lastRow)) + for i, cell := range lastRow { + sizes[i] = cell.VisualLength() + } + t.Sizing.AddRowSizes(sizes...) +} + +// Column adds the given span as a new column to the current row. +func (t *Table) Column(contents Span) { + currentRowInd := len(t.cellsByRow) - 1 + t.cellsByRow[currentRowInd] = append(t.cellsByRow[currentRowInd], contents) +} + +// SkipRow prints a span without having it contribute to the table calculation. +func (t *Table) SkipRow(contents Span) { + t.cellsByRow = append(t.cellsByRow, []Span{contents}) +} + +func (t *Table) WriteTo(out io.Writer) error { + if t.colSizes == nil { + t.colSizes = t.Sizing.ColumnWidths() + } + + for _, cells := range t.cellsByRow { + currentPosition := 0 + for colInd, cell := range cells { + colSize := t.colSizes[colInd] + diff := colSize - cell.VisualLength() + + if err := cell.WriteTo(out); err != nil { + return err + } + + if diff > 0 { + if err := writePadding(out, columnPadding, diff); err != nil { + return err + } + } + currentPosition += colSize + } + + if _, err := fmt.Fprint(out, "\n"); err != nil { + return err + } + } + + return nil +} + +func (t *Table) VisualLength() int { + if t.colSizes == nil { + t.colSizes = t.Sizing.ColumnWidths() + } + + res := 0 + for _, colSize := range t.colSizes { + res += colSize + } + return res +} + +// Text is a span that simply contains raw text. It's a good starting point. +type Text string + +func (t Text) VisualLength() int { return len(t) } +func (t Text) WriteTo(w io.Writer) error { + _, err := w.Write([]byte(t)) + return err +} + +// indented is a span that indents all lines by the given number of tabs. +type indented struct { + Amount int + Content Span +} + +func (i *indented) VisualLength() int { return i.Content.VisualLength() } +func (i *indented) WriteTo(w io.Writer) error { + var out bytes.Buffer + if err := i.Content.WriteTo(&out); err != nil { + return err + } + + lines := bytes.Split(out.Bytes(), []byte("\n")) + for lineInd, line := range lines { + if lineInd != 0 { + if _, err := w.Write([]byte("\n")); err != nil { + return err + } + } + if len(line) == 0 { + continue + } + + if err := writePadding(w, indentPadding, i.Amount); err != nil { + return err + } + if _, err := w.Write(line); err != nil { + return err + } + } + return nil +} + +// Indented returns a span that indents all lines by the given number of tabs. +func Indented(amt int, content Span) Span { + return &indented{Amount: amt, Content: content} +} + +// fromWriter is a span that takes content from a function expecting a Writer. +type fromWriter struct { + cache []byte + cacheError error + run func(io.Writer) error +} + +func (f *fromWriter) VisualLength() int { + if f.cache == nil { + var buf bytes.Buffer + if err := f.run(&buf); err != nil { + f.cacheError = err + } + f.cache = buf.Bytes() + } + return len(f.cache) +} +func (f *fromWriter) WriteTo(w io.Writer) error { + if f.cache != nil { + if f.cacheError != nil { + return f.cacheError + } + _, err := w.Write(f.cache) + return err + } + return f.run(w) +} + +// FromWriter returns a span that takes content from a function expecting a Writer. +func FromWriter(run func(io.Writer) error) Span { + return &fromWriter{run: run} +} + +// Decoration represents a terminal decoration. +type Decoration color.Color + +// Containing returns a Span that has the given decoration applied. +func (d Decoration) Containing(contents Span) Span { + return &decorated{ + Contents: contents, + Attributes: color.Color(d), + } +} + +// decorated is a span that has some terminal decoration applied. +type decorated struct { + Contents Span + Attributes color.Color +} + +func (d *decorated) VisualLength() int { return d.Contents.VisualLength() } +func (d *decorated) WriteTo(w io.Writer) error { + oldOut := color.Output + color.Output = w + defer func() { color.Output = oldOut }() + + d.Attributes.Set() + defer color.Unset() + + return d.Contents.WriteTo(w) +} + +// SpanWriter is a span that contains multiple sub-spans. +type SpanWriter struct { + contents []Span +} + +func (m *SpanWriter) VisualLength() int { + res := 0 + for _, span := range m.contents { + res += span.VisualLength() + } + return res +} +func (m *SpanWriter) WriteTo(w io.Writer) error { + for _, span := range m.contents { + if err := span.WriteTo(w); err != nil { + return err + } + } + return nil +} + +// Print adds a new span to this SpanWriter. +func (m *SpanWriter) Print(s Span) { + m.contents = append(m.contents, s) +} + +// lines is a span that adds some newlines, optionally followed by some content. +type lines struct { + content Span + amountBefore int +} + +func (l *lines) VisualLength() int { + if l.content == nil { + return 0 + } + return l.content.VisualLength() +} +func (l *lines) WriteTo(w io.Writer) error { + if err := writePadding(w, linesPadding, l.amountBefore); err != nil { + return err + } + if l.content != nil { + if err := l.content.WriteTo(w); err != nil { + return err + } + } + return nil +} + +// Newlines returns a span just containing some newlines. +func Newlines(amt int) Span { + return &lines{amountBefore: amt} +} + +// Line returns a span that emits a newline, followed by the given content. +func Line(content Span) Span { + return &lines{amountBefore: 1, content: content} +} + +var ( + columnPadding = []byte(" ") + indentPadding = []byte("\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t") + linesPadding = []byte("\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n") +) + +// writePadding writes out padding of the given type in the given amount to the writer. +// Each byte in the padding buffer contributes 1 to the amount -- the padding being +// a buffer is just for efficiency. +func writePadding(out io.Writer, typ []byte, amt int) error { + if amt <= len(typ) { + _, err := out.Write(typ[:amt]) + return err + } + + num := amt / len(typ) + rem := amt % len(typ) + for i := 0; i < num; i++ { + if _, err := out.Write(typ); err != nil { + return err + } + } + + if _, err := out.Write(typ[:rem]); err != nil { + return err + } + return nil +} diff --git a/vendor/sigs.k8s.io/controller-tools/pkg/genall/help/pretty/table.go b/vendor/sigs.k8s.io/controller-tools/pkg/genall/help/pretty/table.go new file mode 100644 index 0000000000..5a0b4752af --- /dev/null +++ b/vendor/sigs.k8s.io/controller-tools/pkg/genall/help/pretty/table.go @@ -0,0 +1,64 @@ +/* +Copyright 2019 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package pretty + +// TableCalculator calculates column widths (with optional padding) +// for a table based on the maximum required column width. +type TableCalculator struct { + cellSizesByCol [][]int + + Padding int + MaxWidth int +} + +// AddRowSizes registers a new row with cells of the given sizes. +func (c *TableCalculator) AddRowSizes(cellSizes ...int) { + if len(cellSizes) > len(c.cellSizesByCol) { + for range cellSizes[len(c.cellSizesByCol):] { + c.cellSizesByCol = append(c.cellSizesByCol, []int(nil)) + } + } + for i, size := range cellSizes { + c.cellSizesByCol[i] = append(c.cellSizesByCol[i], size) + } +} + +// ColumnWidths calculates the appropriate column sizes given the +// previously registered rows. +func (c *TableCalculator) ColumnWidths() []int { + maxColWidths := make([]int, len(c.cellSizesByCol)) + + for colInd, cellSizes := range c.cellSizesByCol { + max := 0 + for _, cellSize := range cellSizes { + if max < cellSize { + max = cellSize + } + } + maxColWidths[colInd] = max + } + + actualMaxWidth := c.MaxWidth - c.Padding + for i, width := range maxColWidths { + if actualMaxWidth > 0 && width > actualMaxWidth { + maxColWidths[i] = actualMaxWidth + } + maxColWidths[i] += c.Padding + } + + return maxColWidths +} diff --git a/vendor/sigs.k8s.io/controller-tools/pkg/genall/help/sort.go b/vendor/sigs.k8s.io/controller-tools/pkg/genall/help/sort.go new file mode 100644 index 0000000000..53c923e34b --- /dev/null +++ b/vendor/sigs.k8s.io/controller-tools/pkg/genall/help/sort.go @@ -0,0 +1,106 @@ +/* +Copyright 2019 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package help + +import ( + "strings" + + "sigs.k8s.io/controller-tools/pkg/markers" +) + +// SortGroup knows how to sort and group marker definitions. +type SortGroup interface { + // Less is equivalent to the Less function from sort, and is used to sort the markers. + Less(*markers.Definition, *markers.Definition) bool + // Group returns the "group" that a given marker belongs to. + Group(*markers.Definition, *markers.DefinitionHelp) string +} + +var ( + // SortByCategory sorts the markers by name and groups them by their help category. + SortByCategory = sortByCategory{} + + // SortByOption sorts by the generator that the option belongs to. + SortByOption = optionsSort{} +) + +type sortByCategory struct{} + +func (sortByCategory) Group(_ *markers.Definition, help *markers.DefinitionHelp) string { + if help == nil { + return "" + } + return help.Category +} +func (sortByCategory) Less(i, j *markers.Definition) bool { + return i.Name < j.Name +} + +type optionsSort struct{} + +func (optionsSort) Less(i, j *markers.Definition) bool { + iParts := strings.Split(i.Name, ":") + jParts := strings.Split(j.Name, ":") + + iGen := "" + iRule := "" + jGen := "" + jRule := "" + + switch len(iParts) { + case 1: + iGen = iParts[0] + // two means a default output rule, so ignore + case 2: + iRule = iParts[1] + case 3: + iGen = iParts[1] + iRule = iParts[2] + } + switch len(jParts) { + case 1: + jGen = jParts[0] + // two means a default output rule, so ignore + case 2: + jRule = jParts[1] + case 3: + jGen = jParts[1] + jRule = jParts[2] + } + + if iGen != jGen { + return iGen > jGen + } + + return iRule < jRule +} +func (optionsSort) Group(def *markers.Definition, _ *markers.DefinitionHelp) string { + parts := strings.Split(def.Name, ":") + + switch len(parts) { + case 1: + if parts[0] == "paths" { + return "generic" + } + return "generators" + case 2: + return "output rules (optionally as output::...)" + default: + return "" + // three means a marker-specific output rule, ignore + } +} diff --git a/vendor/sigs.k8s.io/controller-tools/pkg/genall/help/types.go b/vendor/sigs.k8s.io/controller-tools/pkg/genall/help/types.go new file mode 100644 index 0000000000..be11104324 --- /dev/null +++ b/vendor/sigs.k8s.io/controller-tools/pkg/genall/help/types.go @@ -0,0 +1,215 @@ +/* +Copyright 2019 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package help + +import ( + "sort" + "strings" + + "sigs.k8s.io/controller-tools/pkg/markers" +) + +// DetailedHelp contains both a summary and further details. +type DetailedHelp struct { + // Summary contains a one-line description. + Summary string `json:"summary"` + // Details contains further information. + Details string `json:"details,omitempty"` +} + +// Argument is the type data for a marker argument. +type Argument struct { + // Type is the data type of the argument (string, bool, int, slice, any, raw, invalid) + Type string `json:"type"` + // Optional marks this argument as optional. + Optional bool `json:"optional"` + // ItemType contains the type of the slice item, if this is a slice + ItemType *Argument `json:"itemType,omitempty"` +} + +func (a Argument) typeString(out *strings.Builder) { + if a.Type == "slice" { + out.WriteString("[]") + a.ItemType.typeString(out) + return + } + + out.WriteString(a.Type) +} + +// TypeString returns a string roughly equivalent +// (but not identical) to the underlying Go type that +// this argument would parse to. It's mainly useful +// for user-friendly formatting of this argument (e.g. +// help strings). +func (a Argument) TypeString() string { + out := &strings.Builder{} + a.typeString(out) + return out.String() +} + +// FieldHelp contains information required to print documentation for a marker field. +type FieldHelp struct { + // Name is the field name. + Name string `json:"name"` + // Argument is the type of the field. + Argument `json:",inline"` + + // DetailedHelp contains the textual help for the field. + DetailedHelp `json:",inline"` +} + +// MarkerDoc contains information required to print documentation for a marker. +type MarkerDoc struct { + // definition + + // Name is the name of the marker. + Name string `json:"name"` + // Target is the target (field, package, type) of the marker. + Target string `json:"target"` + + // help + + // DetailedHelp is the textual help for the marker. + DetailedHelp `json:",inline"` + // Category is the general "category" that this marker belongs to. + Category string `json:"category"` + // DeprecatedInFavorOf marks that this marker shouldn't be used when + // non-nil. If also non-empty, another marker should be used instead. + DeprecatedInFavorOf *string `json:"deprecatedInFavorOf,omitempty"` + // Fields is the type and help data for each field of this marker. + Fields []FieldHelp `json:"fields,omitempty"` +} + +// Empty checks if this marker has any arguments, returning true if not. +func (m MarkerDoc) Empty() bool { + return len(m.Fields) == 0 +} + +// AnonymousField chekcs if this is an single-valued marker +// (as opposed to having named fields). +func (m MarkerDoc) AnonymousField() bool { + return len(m.Fields) == 1 && m.Fields[0].Name == "" +} + +// ForArgument returns the equivalent documentation for a marker argument. +func ForArgument(argRaw markers.Argument) Argument { + res := Argument{ + Optional: argRaw.Optional, + } + + if argRaw.ItemType != nil { + itemType := ForArgument(*argRaw.ItemType) + res.ItemType = &itemType + } + + switch argRaw.Type { + case markers.IntType: + res.Type = "int" + case markers.StringType: + res.Type = "string" + case markers.BoolType: + res.Type = "bool" + case markers.AnyType: + res.Type = "any" + case markers.SliceType: + res.Type = "slice" + case markers.RawType: + res.Type = "raw" + case markers.InvalidType: + res.Type = "invalid" + } + + return res +} + +// ForDefinition returns the equivalent marker documentation for a given marker definition and spearate help. +func ForDefinition(defn *markers.Definition, maybeHelp *markers.DefinitionHelp) MarkerDoc { + var help markers.DefinitionHelp + if maybeHelp != nil { + help = *maybeHelp + } + + res := MarkerDoc{ + Name: defn.Name, + Category: help.Category, + DeprecatedInFavorOf: help.DeprecatedInFavorOf, + Target: defn.Target.String(), + DetailedHelp: DetailedHelp{Summary: help.Summary, Details: help.Details}, + } + + helpByField := help.FieldsHelp(defn) + + // TODO(directxman12): deterministic ordering + for fieldName, fieldHelpRaw := range helpByField { + fieldInfo := defn.Fields[fieldName] + fieldHelp := FieldHelp{ + Name: fieldName, + DetailedHelp: DetailedHelp{Summary: fieldHelpRaw.Summary, Details: fieldHelpRaw.Details}, + Argument: ForArgument(fieldInfo), + } + + res.Fields = append(res.Fields, fieldHelp) + } + + sort.Slice(res.Fields, func(i, j int) bool { return res.Fields[i].Name < res.Fields[j].Name }) + + return res +} + +// CategoryDoc contains help information for all markers in a Category. +type CategoryDoc struct { + Category string `json:"category"` + Markers []MarkerDoc `json:"markers"` +} + +// ByCategory returns the marker help for markers in the given +// registry, grouped and sorted according to the given method. +func ByCategory(reg *markers.Registry, sorter SortGroup) []CategoryDoc { + groupedMarkers := make(map[string][]*markers.Definition) + + for _, marker := range reg.AllDefinitions() { + group := sorter.Group(marker, reg.HelpFor(marker)) + groupedMarkers[group] = append(groupedMarkers[group], marker) + } + allGroups := make([]string, 0, len(groupedMarkers)) + for groupName := range groupedMarkers { + allGroups = append(allGroups, groupName) + } + + sort.Strings(allGroups) + + res := make([]CategoryDoc, len(allGroups)) + for i, groupName := range allGroups { + markers := groupedMarkers[groupName] + sort.Slice(markers, func(i, j int) bool { + return sorter.Less(markers[i], markers[j]) + }) + + markerDocs := make([]MarkerDoc, len(markers)) + for i, marker := range markers { + markerDocs[i] = ForDefinition(marker, reg.HelpFor(marker)) + } + + res[i] = CategoryDoc{ + Category: groupName, + Markers: markerDocs, + } + } + + return res +} diff --git a/vendor/sigs.k8s.io/cluster-api/pkg/cmdrunner/cmd_runner.go b/vendor/sigs.k8s.io/controller-tools/pkg/genall/input.go similarity index 50% rename from vendor/sigs.k8s.io/cluster-api/pkg/cmdrunner/cmd_runner.go rename to vendor/sigs.k8s.io/controller-tools/pkg/genall/input.go index a35753ac9d..46e191c0c2 100644 --- a/vendor/sigs.k8s.io/cluster-api/pkg/cmdrunner/cmd_runner.go +++ b/vendor/sigs.k8s.io/controller-tools/pkg/genall/input.go @@ -1,5 +1,5 @@ /* -Copyright 2018 The Kubernetes Authors. +Copyright 2019 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -14,26 +14,24 @@ See the License for the specific language governing permissions and limitations under the License. */ -package cmdrunner +package genall import ( - "os/exec" + "io" + "os" ) -// Runner has one method that executes a command and returns stdout and stderr. -type Runner interface { - CombinedOutput(cmd string, args ...string) (output string, err error) +// InputRule describes how to load non-code boilerplate artifacts. +// It's not used for loading code. +type InputRule interface { + // OpenForRead opens the given non-code artifact for reading. + OpenForRead(path string) (io.ReadCloser, error) } +type inputFromFileSystem struct{} -type realRunner struct { +func (inputFromFileSystem) OpenForRead(path string) (io.ReadCloser, error) { + return os.Open(path) } -// New returns a command runner. -func New() *realRunner { // nolint - return &realRunner{} -} - -func (runner *realRunner) CombinedOutput(cmd string, args ...string) (string, error) { - output, err := exec.Command(cmd, args...).CombinedOutput() - return string(output), err -} +// InputFromFileSystem reads from the filesystem as normal. +var InputFromFileSystem = inputFromFileSystem{} diff --git a/vendor/sigs.k8s.io/controller-tools/pkg/genall/options.go b/vendor/sigs.k8s.io/controller-tools/pkg/genall/options.go new file mode 100644 index 0000000000..613d19e6bb --- /dev/null +++ b/vendor/sigs.k8s.io/controller-tools/pkg/genall/options.go @@ -0,0 +1,192 @@ +/* +Copyright 2019 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package genall + +import ( + "fmt" + "strings" + + "sigs.k8s.io/controller-tools/pkg/markers" +) + +var ( + InputPathsMarker = markers.Must(markers.MakeDefinition("paths", markers.DescribesPackage, InputPaths(nil))) +) + +// +controllertools:marker:generateHelp:category="" + +// InputPaths represents paths and go-style path patterns to use as package roots. +type InputPaths []string + +// RegisterOptionsMarkers registers "mandatory" options markers for FromOptions into the given registry. +// At this point, that's just InputPaths. +func RegisterOptionsMarkers(into *markers.Registry) error { + if err := into.Register(InputPathsMarker); err != nil { + return err + } + // NB(directxman12): we make this optional so we don't have a bootstrap problem with helpgen + if helpGiver, hasHelp := ((interface{})(InputPaths(nil))).(HasHelp); hasHelp { + into.AddHelp(InputPathsMarker, helpGiver.Help()) + } + return nil +} + +// RegistryFromOptions produces just the marker registry that would be used by FromOptions, without +// attempting to produce a full Runtime. This can be useful if you want to display help without +// trying to load roots. +func RegistryFromOptions(optionsRegistry *markers.Registry, options []string) (*markers.Registry, error) { + protoRt, err := protoFromOptions(optionsRegistry, options) + if err != nil { + return nil, err + } + reg := &markers.Registry{} + if err := protoRt.Generators.RegisterMarkers(reg); err != nil { + return nil, err + } + return reg, nil +} + +// FromOptions parses the options from markers stored in the given registry out into a runtime. +// The markers in the registry must be either +// +// a) Generators +// b) OutputRules +// c) InputPaths +// +// The paths specified in InputPaths are loaded as package roots, and the combined with +// the generators and the specified output rules to produce a runtime that can be run or +// further modified. Not default generators are used if none are specified -- you can check +// the output and rerun for that. +func FromOptions(optionsRegistry *markers.Registry, options []string) (*Runtime, error) { + + protoRt, err := protoFromOptions(optionsRegistry, options) + if err != nil { + return nil, err + } + + // make the runtime + genRuntime, err := protoRt.Generators.ForRoots(protoRt.Paths...) + if err != nil { + return nil, err + } + + // attempt to figure out what the user wants without a lot of verbose specificity: + // if the user specifies a default rule, assume that they probably want to fall back + // to that. Otherwise, assume that they just wanted to customize one option from the + // set, and leave the rest in the standard configuration. + if protoRt.OutputRules.Default != nil { + genRuntime.OutputRules = protoRt.OutputRules + return genRuntime, nil + } + + outRules := DirectoryPerGenerator("config", protoRt.GeneratorsByName) + for gen, rule := range protoRt.OutputRules.ByGenerator { + outRules.ByGenerator[gen] = rule + } + + genRuntime.OutputRules = outRules + return genRuntime, nil +} + +// protoFromOptions returns a proto-Runtime from the given options registry and +// options set. This can then be used to construct an actual Runtime. See the +// FromOptions function for more details about how the options work. +func protoFromOptions(optionsRegistry *markers.Registry, options []string) (protoRuntime, error) { + var gens Generators + rules := OutputRules{ + ByGenerator: make(map[Generator]OutputRule), + } + var paths []string + + // collect the generators first, so that we can key the output on the actual + // generator, which matters if there's settings in the gen object and it's not a pointer. + outputByGen := make(map[string]OutputRule) + gensByName := make(map[string]Generator) + + for _, rawOpt := range options { + if rawOpt[0] != '+' { + rawOpt = "+" + rawOpt // add a `+` to make it acceptable for usage with the registry + } + defn := optionsRegistry.Lookup(rawOpt, markers.DescribesPackage) + if defn == nil { + return protoRuntime{}, fmt.Errorf("unknown option %q", rawOpt[1:]) + } + + val, err := defn.Parse(rawOpt) + if err != nil { + return protoRuntime{}, fmt.Errorf("unable to parse option %q: %v", rawOpt[1:], err) + } + + switch val := val.(type) { + case Generator: + gens = append(gens, val) + gensByName[defn.Name] = val + case OutputRule: + _, genName := splitOutputRuleOption(defn.Name) + if genName == "" { + // it's a default rule + rules.Default = val + continue + } + + outputByGen[genName] = val + continue + case InputPaths: + paths = append(paths, val...) + default: + return protoRuntime{}, fmt.Errorf("unknown option marker %q", defn.Name) + } + } + + // actually associate the rules now that we know the generators + for genName, outputRule := range outputByGen { + gen, knownGen := gensByName[genName] + if !knownGen { + return protoRuntime{}, fmt.Errorf("non-invoked generator %q", genName) + } + + rules.ByGenerator[gen] = outputRule + } + + return protoRuntime{ + Paths: paths, + Generators: Generators(gens), + OutputRules: rules, + GeneratorsByName: gensByName, + }, nil +} + +// protoRuntime represents the raw pieces needed to compose a runtime, as +// parsed from some options. +type protoRuntime struct { + Paths []string + Generators Generators + OutputRules OutputRules + GeneratorsByName map[string]Generator +} + +// splitOutputRuleOption splits a marker name of "output:rule:gen" or "output:rule" +// into its compontent rule and generator name. +func splitOutputRuleOption(name string) (ruleName string, genName string) { + parts := strings.SplitN(name, ":", 3) + if len(parts) == 3 { + // output:: + return parts[2], parts[1] + } + // output: + return parts[1], "" +} diff --git a/vendor/sigs.k8s.io/controller-tools/pkg/genall/output.go b/vendor/sigs.k8s.io/controller-tools/pkg/genall/output.go new file mode 100644 index 0000000000..a61fe9d7e1 --- /dev/null +++ b/vendor/sigs.k8s.io/controller-tools/pkg/genall/output.go @@ -0,0 +1,157 @@ +/* +Copyright 2019 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package genall + +import ( + "fmt" + "io" + "io/ioutil" + "os" + "path/filepath" + + "sigs.k8s.io/controller-tools/pkg/loader" +) + +// nopCloser is a WriteCloser whose Close +// is a no-op. +type nopCloser struct { + io.Writer +} + +func (n nopCloser) Close() error { + return nil +} + +// DirectoryPerGenerator produces output rules mapping output to a different subdirectory +// of the given base directory for each generator (with each subdirectory specified as +// the key in the input map). +func DirectoryPerGenerator(base string, generators map[string]Generator) OutputRules { + rules := OutputRules{ + Default: OutputArtifacts{Config: OutputToDirectory(base)}, + ByGenerator: make(map[Generator]OutputRule, len(generators)), + } + + for name, gen := range generators { + rules.ByGenerator[gen] = OutputArtifacts{ + Config: OutputToDirectory(filepath.Join(base, name)), + } + } + + return rules +} + +// OutputRules defines how to output artificats on a per-generator basis. +type OutputRules struct { + // Default is the output rule used when no specific per-generator overrides match. + Default OutputRule + // ByGenerator contains specific per-generator overrides. + ByGenerator map[Generator]OutputRule +} + +// ForGenerator returns the output rule that should be used +// by the given Generator. +func (o OutputRules) ForGenerator(gen Generator) OutputRule { + if forGen, specific := o.ByGenerator[gen]; specific { + return forGen + } + return o.Default +} + +// OutputRule defines how to output artifacts from a generator. +type OutputRule interface { + // Open opens the given artifact path for writing. If a package is passed, + // the artifact is considered to be used as part of the package (e.g. + // generated code), while a nil package indicates that the artifact is + // config (or something else not involved in Go compilation). + Open(pkg *loader.Package, path string) (io.WriteCloser, error) +} + +// OutputToNothing skips outputting anything. +var OutputToNothing = outputToNothing{} + +// +controllertools:marker:generateHelp:category="" + +// outputToNothing skips outputting anything. +type outputToNothing struct{} + +func (o outputToNothing) Open(_ *loader.Package, _ string) (io.WriteCloser, error) { + return nopCloser{ioutil.Discard}, nil +} + +// +controllertools:marker:generateHelp:category="" + +// OutputToDirectory outputs each artifact to the given directory, regardless +// of if it's package-associated or not. +type OutputToDirectory string + +func (o OutputToDirectory) Open(_ *loader.Package, itemPath string) (io.WriteCloser, error) { + // ensure the directory exists + if err := os.MkdirAll(string(o), os.ModePerm); err != nil { + return nil, err + } + path := filepath.Join(string(o), itemPath) + return os.Create(path) +} + +// OutputToStdout outputs everything to standard-out, with no separation. +// +// Generally useful for single-artifact outputs. +var OutputToStdout = outputToStdout{} + +// +controllertools:marker:generateHelp:category="" + +// outputToStdout outputs everything to standard-out, with no separation. +// +// Generally useful for single-artifact outputs. +type outputToStdout struct{} + +func (o outputToStdout) Open(_ *loader.Package, itemPath string) (io.WriteCloser, error) { + return nopCloser{os.Stdout}, nil +} + +// +controllertools:marker:generateHelp:category="" + +// OutputArtifacts outputs artifacts to different locations, depending on +// whether they're package-associated or not. +// +// Non-package associated artifacts +// are output to the Config directory, while package-associated ones are output +// to their package's source files' directory, unless an alternate path is +// specified in Code. +type OutputArtifacts struct { + // Config points to the directory to which to write configuration. + Config OutputToDirectory + // Code overrides the directory in which to write new code (defaults to where the existing code lives). + Code OutputToDirectory `marker:",optional"` +} + +func (o OutputArtifacts) Open(pkg *loader.Package, itemPath string) (io.WriteCloser, error) { + if pkg == nil { + return o.Config.Open(pkg, itemPath) + } + + if o.Code != "" { + return o.Code.Open(pkg, itemPath) + } + + if len(pkg.CompiledGoFiles) == 0 { + return nil, fmt.Errorf("cannot output to a package with no path on disk") + } + outDir := filepath.Dir(pkg.CompiledGoFiles[0]) + outPath := filepath.Join(outDir, itemPath) + return os.Create(outPath) +} diff --git a/vendor/sigs.k8s.io/controller-tools/pkg/genall/zz_generated.markerhelp.go b/vendor/sigs.k8s.io/controller-tools/pkg/genall/zz_generated.markerhelp.go new file mode 100644 index 0000000000..ca22025c4b --- /dev/null +++ b/vendor/sigs.k8s.io/controller-tools/pkg/genall/zz_generated.markerhelp.go @@ -0,0 +1,89 @@ +// +build !ignore_autogenerated + +/* +Copyright2019 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by helpgen. DO NOT EDIT. + +package genall + +import ( + "sigs.k8s.io/controller-tools/pkg/markers" +) + +func (InputPaths) Help() *markers.DefinitionHelp { + return &markers.DefinitionHelp{ + Category: "", + DetailedHelp: markers.DetailedHelp{ + Summary: "represents paths and go-style path patterns to use as package roots.", + Details: "", + }, + FieldHelp: map[string]markers.DetailedHelp{}, + } +} + +func (OutputArtifacts) Help() *markers.DefinitionHelp { + return &markers.DefinitionHelp{ + Category: "", + DetailedHelp: markers.DetailedHelp{ + Summary: "outputs artifacts to different locations, depending on whether they're package-associated or not. ", + Details: " Non-package associated artifacts are output to the Config directory, while package-associated ones are output to their package's source files' directory, unless an alternate path is specified in Code.", + }, + FieldHelp: map[string]markers.DetailedHelp{ + "Config": markers.DetailedHelp{ + Summary: "points to the directory to which to write configuration.", + Details: "", + }, + "Code": markers.DetailedHelp{ + Summary: "overrides the directory in which to write new code (defaults to where the existing code lives).", + Details: "", + }, + }, + } +} + +func (OutputToDirectory) Help() *markers.DefinitionHelp { + return &markers.DefinitionHelp{ + Category: "", + DetailedHelp: markers.DetailedHelp{ + Summary: "outputs each artifact to the given directory, regardless of if it's package-associated or not.", + Details: "", + }, + FieldHelp: map[string]markers.DetailedHelp{}, + } +} + +func (outputToNothing) Help() *markers.DefinitionHelp { + return &markers.DefinitionHelp{ + Category: "", + DetailedHelp: markers.DetailedHelp{ + Summary: "skips outputting anything.", + Details: "", + }, + FieldHelp: map[string]markers.DetailedHelp{}, + } +} + +func (outputToStdout) Help() *markers.DefinitionHelp { + return &markers.DefinitionHelp{ + Category: "", + DetailedHelp: markers.DetailedHelp{ + Summary: "outputs everything to standard-out, with no separation. ", + Details: " Generally useful for single-artifact outputs.", + }, + FieldHelp: map[string]markers.DetailedHelp{}, + } +} diff --git a/vendor/sigs.k8s.io/controller-tools/pkg/internal/codegen/parse/apis.go b/vendor/sigs.k8s.io/controller-tools/pkg/internal/codegen/parse/apis.go deleted file mode 100644 index c953b4b3b6..0000000000 --- a/vendor/sigs.k8s.io/controller-tools/pkg/internal/codegen/parse/apis.go +++ /dev/null @@ -1,287 +0,0 @@ -/* -Copyright 2018 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package parse - -import ( - "fmt" - "path" - "path/filepath" - "strings" - - "k8s.io/apimachinery/pkg/util/sets" - "k8s.io/gengo/types" - "sigs.k8s.io/controller-tools/pkg/internal/codegen" -) - -type genUnversionedType struct { - Type *types.Type - Resource *codegen.APIResource -} - -func (b *APIs) parseAPIs() { - apis := &codegen.APIs{ - Domain: b.Domain, - Package: b.APIsPkg, - Groups: map[string]*codegen.APIGroup{}, - Rules: b.Rules, - Informers: b.Informers, - } - - for group, versionMap := range b.ByGroupVersionKind { - apiGroup := &codegen.APIGroup{ - Group: group, - GroupTitle: strings.Title(group), - Domain: b.Domain, - Versions: map[string]*codegen.APIVersion{}, - UnversionedResources: map[string]*codegen.APIResource{}, - } - - for version, kindMap := range versionMap { - apiVersion := &codegen.APIVersion{ - Domain: b.Domain, - Group: group, - Version: version, - Resources: map[string]*codegen.APIResource{}, - } - for kind, resource := range kindMap { - apiResource := &codegen.APIResource{ - Domain: resource.Domain, - Version: resource.Version, - Group: resource.Group, - Resource: resource.Resource, - Type: resource.Type, - REST: resource.REST, - Kind: resource.Kind, - Subresources: resource.Subresources, - StatusStrategy: resource.StatusStrategy, - Strategy: resource.Strategy, - NonNamespaced: resource.NonNamespaced, - ShortName: resource.ShortName, - } - parseDoc(resource, apiResource) - apiVersion.Resources[kind] = apiResource - // Set the package for the api version - apiVersion.Pkg = b.context.Universe[resource.Type.Name.Package] - // Set the package for the api group - apiGroup.Pkg = b.context.Universe[filepath.Dir(resource.Type.Name.Package)] - if apiGroup.Pkg != nil { - apiGroup.PkgPath = apiGroup.Pkg.Path - } - - apiGroup.UnversionedResources[kind] = apiResource - } - - apiGroup.Versions[version] = apiVersion - } - b.parseStructs(apiGroup) - apis.Groups[group] = apiGroup - } - apis.Pkg = b.context.Universe[b.APIsPkg] - b.APIs = apis -} - -func (b *APIs) parseStructs(apigroup *codegen.APIGroup) { - remaining := []genUnversionedType{} - for _, version := range apigroup.Versions { - for _, resource := range version.Resources { - remaining = append(remaining, genUnversionedType{resource.Type, resource}) - } - } - for _, version := range b.SubByGroupVersionKind[apigroup.Group] { - for _, kind := range version { - remaining = append(remaining, genUnversionedType{kind, nil}) - } - } - - done := sets.String{} - for len(remaining) > 0 { - // Pop the next element from the list - next := remaining[0] - remaining[0] = remaining[len(remaining)-1] - remaining = remaining[:len(remaining)-1] - - // Already processed this type. Skip it - if done.Has(next.Type.Name.Name) { - continue - } - done.Insert(next.Type.Name.Name) - - // Generate the struct and append to the list - result, additionalTypes := parseType(next.Type) - - // This is a resource, so generate the client - if b.genClient(next.Type) { - result.GenClient = true - result.GenDeepCopy = true - } - - if next.Resource != nil { - result.NonNamespaced = IsNonNamespaced(next.Type) - } - - if b.genDeepCopy(next.Type) { - result.GenDeepCopy = true - } - apigroup.Structs = append(apigroup.Structs, result) - - // Add the newly discovered subtypes - for _, at := range additionalTypes { - remaining = append(remaining, genUnversionedType{at, nil}) - } - } -} - -// parseType parses the type into a Struct, and returns a list of types that -// need to be parsed -func parseType(t *types.Type) (*codegen.Struct, []*types.Type) { - remaining := []*types.Type{} - - s := &codegen.Struct{ - Name: t.Name.Name, - GenClient: false, - GenUnversioned: true, // Generate unversioned structs by default - } - - for _, c := range t.CommentLines { - if strings.Contains(c, "+genregister:unversioned=false") { - // Don't generate the unversioned struct - s.GenUnversioned = false - } - } - - for _, member := range t.Members { - uType := member.Type.Name.Name - memberName := member.Name - uImport := "" - - // Use the element type for Pointers, Maps and Slices - mSubType := member.Type - hasElem := false - for mSubType.Elem != nil { - mSubType = mSubType.Elem - hasElem = true - } - if hasElem { - // Strip the package from the field type - uType = strings.Replace(member.Type.String(), mSubType.Name.Package+".", "", 1) - } - - base := filepath.Base(member.Type.String()) - samepkg := t.Name.Package == mSubType.Name.Package - - // If not in the same package, calculate the import pkg - if !samepkg { - parts := strings.Split(base, ".") - if len(parts) > 1 { - // Don't generate unversioned types for core types, just use the versioned types - if strings.HasPrefix(mSubType.Name.Package, "k8s.io/api/") { - // Import the package under an alias so it doesn't conflict with other groups - // having the same version - importAlias := path.Base(path.Dir(mSubType.Name.Package)) + path.Base(mSubType.Name.Package) - uImport = fmt.Sprintf("%s \"%s\"", importAlias, mSubType.Name.Package) - if hasElem { - // Replace the full package with the alias when referring to the type - uType = strings.Replace(member.Type.String(), mSubType.Name.Package, importAlias, 1) - } else { - // Replace the full package with the alias when referring to the type - uType = fmt.Sprintf("%s.%s", importAlias, parts[1]) - } - } else { - switch member.Type.Name.Package { - case "k8s.io/apimachinery/pkg/apis/meta/v1": - // Use versioned types for meta/v1 - uImport = fmt.Sprintf("%s \"%s\"", "metav1", "k8s.io/apimachinery/pkg/apis/meta/v1") - uType = "metav1." + parts[1] - default: - // Use unversioned types for everything else - t := member.Type - - if t.Elem != nil { - // handle Pointers, Maps, Slices - - // We need to parse the package from the Type String - t = t.Elem - str := member.Type.String() - startPkg := strings.LastIndexAny(str, "*]") - endPkg := strings.LastIndexAny(str, ".") - pkg := str[startPkg+1 : endPkg] - name := str[endPkg+1:] - prefix := str[:startPkg+1] - - uImportBase := path.Base(pkg) - uImportName := path.Base(path.Dir(pkg)) + uImportBase - uImport = fmt.Sprintf("%s \"%s\"", uImportName, pkg) - - uType = prefix + uImportName + "." + name - } else { - // handle non- Pointer, Maps, Slices - pkg := t.Name.Package - name := t.Name.Name - - // Come up with the alias the package is imported under - // Concatenate with directory package to reduce naming collisions - uImportBase := path.Base(pkg) - uImportName := path.Base(path.Dir(pkg)) + uImportBase - - // Create the import statement - uImport = fmt.Sprintf("%s \"%s\"", uImportName, pkg) - - // Create the field type name - should be . - uType = uImportName + "." + name - } - } - } - } - } - - if member.Embedded { - memberName = "" - } - - s.Fields = append(s.Fields, &codegen.Field{ - Name: memberName, - VersionedPackage: member.Type.Name.Package, - UnversionedImport: uImport, - UnversionedType: uType, - }) - - // Add this member Type for processing if it isn't a primitive and - // is part of the same API group - if !mSubType.IsPrimitive() && GetGroup(mSubType) == GetGroup(t) { - remaining = append(remaining, mSubType) - } - } - return s, remaining -} - -func (b *APIs) genClient(c *types.Type) bool { - comments := Comments(c.CommentLines) - resource := comments.getTag("resource", ":") + comments.getTag("kubebuilder:resource", ":") - return len(resource) > 0 -} - -func (b *APIs) genDeepCopy(c *types.Type) bool { - comments := Comments(c.CommentLines) - return comments.hasTag("subresource-request") -} - -func parseDoc(resource, apiResource *codegen.APIResource) { - if HasDocAnnotation(resource.Type) { - resource.DocAnnotation = getDocAnnotation(resource.Type, "warning", "note") - apiResource.DocAnnotation = resource.DocAnnotation - } -} diff --git a/vendor/sigs.k8s.io/controller-tools/pkg/internal/codegen/parse/context.go b/vendor/sigs.k8s.io/controller-tools/pkg/internal/codegen/parse/context.go deleted file mode 100644 index 98493540f9..0000000000 --- a/vendor/sigs.k8s.io/controller-tools/pkg/internal/codegen/parse/context.go +++ /dev/null @@ -1,42 +0,0 @@ -/* -Copyright 2018 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package parse - -import ( - "k8s.io/gengo/generator" - "k8s.io/gengo/namer" - "k8s.io/gengo/parser" -) - -// NewContext returns a new Context from the builder -func NewContext(p *parser.Builder) (*generator.Context, error) { - return generator.NewContext(p, NameSystems(), DefaultNameSystem()) -} - -// DefaultNameSystem returns public by default. -func DefaultNameSystem() string { - return "public" -} - -// NameSystems returns the name system used by the generators in this package. -// e.g. black-magic -func NameSystems() namer.NameSystems { - return namer.NameSystems{ - "public": namer.NewPublicNamer(1), - "raw": namer.NewRawNamer("", nil), - } -} diff --git a/vendor/sigs.k8s.io/controller-tools/pkg/internal/codegen/parse/crd.go b/vendor/sigs.k8s.io/controller-tools/pkg/internal/codegen/parse/crd.go deleted file mode 100644 index a03f6bb82a..0000000000 --- a/vendor/sigs.k8s.io/controller-tools/pkg/internal/codegen/parse/crd.go +++ /dev/null @@ -1,639 +0,0 @@ -/* -Copyright 2018 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package parse - -import ( - "bytes" - "encoding/json" - "fmt" - "log" - "regexp" - "strconv" - "strings" - "text/template" - - "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/util/sets" - "k8s.io/gengo/types" -) - -// parseCRDs populates the CRD field of each Group.Version.Resource, -// creating validations using the annotations on type fields. -func (b *APIs) parseCRDs() { - for _, group := range b.APIs.Groups { - for _, version := range group.Versions { - for _, resource := range version.Resources { - if IsAPIResource(resource.Type) { - resource.JSONSchemaProps, resource.Validation = - b.typeToJSONSchemaProps(resource.Type, sets.NewString(), []string{}, true) - - // Note: Drop the Type field at the root level of validation - // schema. Refer to following issue for details. - // https://github.com/kubernetes/kubernetes/issues/65293 - resource.JSONSchemaProps.Type = "" - j, err := json.MarshalIndent(resource.JSONSchemaProps, "", " ") - if err != nil { - log.Fatalf("Could not Marshall validation %v\n", err) - } - resource.ValidationComments = string(j) - - resource.CRD = v1beta1.CustomResourceDefinition{ - TypeMeta: metav1.TypeMeta{ - APIVersion: "apiextensions.k8s.io/v1beta1", - Kind: "CustomResourceDefinition", - }, - ObjectMeta: metav1.ObjectMeta{ - Name: fmt.Sprintf("%s.%s.%s", resource.Resource, resource.Group, resource.Domain), - Labels: map[string]string{"controller-tools.k8s.io": "1.0"}, - }, - Spec: v1beta1.CustomResourceDefinitionSpec{ - Group: fmt.Sprintf("%s.%s", resource.Group, resource.Domain), - Version: resource.Version, - Names: v1beta1.CustomResourceDefinitionNames{ - Kind: resource.Kind, - Plural: resource.Resource, - }, - Validation: &v1beta1.CustomResourceValidation{ - OpenAPIV3Schema: &resource.JSONSchemaProps, - }, - }, - } - if resource.NonNamespaced { - resource.CRD.Spec.Scope = "Cluster" - } else { - resource.CRD.Spec.Scope = "Namespaced" - } - - if hasCategories(resource.Type) { - categoriesTag := getCategoriesTag(resource.Type) - categories := strings.Split(categoriesTag, ",") - resource.CRD.Spec.Names.Categories = categories - resource.Categories = categories - } - - if hasSingular(resource.Type) { - singularName := getSingularName(resource.Type) - resource.CRD.Spec.Names.Singular = singularName - } - - if hasStatusSubresource(resource.Type) { - if resource.CRD.Spec.Subresources == nil { - resource.CRD.Spec.Subresources = &v1beta1.CustomResourceSubresources{} - } - resource.CRD.Spec.Subresources.Status = &v1beta1.CustomResourceSubresourceStatus{} - } - - resource.CRD.Status.Conditions = []v1beta1.CustomResourceDefinitionCondition{} - resource.CRD.Status.StoredVersions = []string{} - - if hasScaleSubresource(resource.Type) { - if resource.CRD.Spec.Subresources == nil { - resource.CRD.Spec.Subresources = &v1beta1.CustomResourceSubresources{} - } - jsonPath, err := parseScaleParams(resource.Type) - if err != nil { - log.Fatalf("failed in parsing CRD, error: %v", err.Error()) - } - resource.CRD.Spec.Subresources.Scale = &v1beta1.CustomResourceSubresourceScale{ - SpecReplicasPath: jsonPath[specReplicasPath], - StatusReplicasPath: jsonPath[statusReplicasPath], - } - labelSelctor, ok := jsonPath[labelSelectorPath] - if ok && labelSelctor != "" { - resource.CRD.Spec.Subresources.Scale.LabelSelectorPath = &labelSelctor - } - } - if hasPrintColumn(resource.Type) { - result, err := parsePrintColumnParams(resource.Type) - if err != nil { - log.Fatalf("failed to parse printcolumn annotations, error: %v", err.Error()) - } - resource.CRD.Spec.AdditionalPrinterColumns = result - } - if len(resource.ShortName) > 0 { - resource.CRD.Spec.Names.ShortNames = strings.Split(resource.ShortName, ";") - } - } - } - } - } -} - -func (b *APIs) getTime() string { - return `v1beta1.JSONSchemaProps{ - Type: "string", - Format: "date-time", -}` -} - -func (b *APIs) getDuration() string { - return `v1beta1.JSONSchemaProps{ - Type: "string", -}` -} - -func (b *APIs) getQuantity() string { - return `v1beta1.JSONSchemaProps{ - Type: "string", -}` -} - -func (b *APIs) objSchema() string { - return `v1beta1.JSONSchemaProps{ - Type: "object", -}` -} - -// typeToJSONSchemaProps returns a JSONSchemaProps object and its serialization -// in Go that describe the JSONSchema validations for the given type. -func (b *APIs) typeToJSONSchemaProps(t *types.Type, found sets.String, comments []string, isRoot bool) (v1beta1.JSONSchemaProps, string) { - // Special cases - time := types.Name{Name: "Time", Package: "k8s.io/apimachinery/pkg/apis/meta/v1"} - duration := types.Name{Name: "Duration", Package: "k8s.io/apimachinery/pkg/apis/meta/v1"} - quantity := types.Name{Name: "Quantity", Package: "k8s.io/apimachinery/pkg/api/resource"} - meta := types.Name{Name: "ObjectMeta", Package: "k8s.io/apimachinery/pkg/apis/meta/v1"} - unstructured := types.Name{Name: "Unstructured", Package: "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"} - rawExtension := types.Name{Name: "RawExtension", Package: "k8s.io/apimachinery/pkg/runtime"} - intOrString := types.Name{Name: "IntOrString", Package: "k8s.io/apimachinery/pkg/util/intstr"} - // special types first - specialTypeProps := v1beta1.JSONSchemaProps{ - Description: parseDescription(comments), - } - for _, l := range comments { - getValidation(l, &specialTypeProps) - } - switch t.Name { - case time: - specialTypeProps.Type = "string" - specialTypeProps.Format = "date-time" - return specialTypeProps, b.getTime() - case duration: - specialTypeProps.Type = "string" - return specialTypeProps, b.getDuration() - case quantity: - specialTypeProps.Type = "string" - return specialTypeProps, b.getQuantity() - case meta, unstructured, rawExtension: - specialTypeProps.Type = "object" - return specialTypeProps, b.objSchema() - case intOrString: - specialTypeProps.AnyOf = []v1beta1.JSONSchemaProps{ - { - Type: "string", - }, - { - Type: "integer", - }, - } - return specialTypeProps, b.objSchema() - } - - var v v1beta1.JSONSchemaProps - var s string - switch t.Kind { - case types.Builtin: - v, s = b.parsePrimitiveValidation(t, found, comments) - case types.Struct: - v, s = b.parseObjectValidation(t, found, comments, isRoot) - case types.Map: - v, s = b.parseMapValidation(t, found, comments) - case types.Slice: - v, s = b.parseArrayValidation(t, found, comments) - case types.Array: - v, s = b.parseArrayValidation(t, found, comments) - case types.Pointer: - v, s = b.typeToJSONSchemaProps(t.Elem, found, comments, false) - case types.Alias: - v, s = b.typeToJSONSchemaProps(t.Underlying, found, comments, false) - default: - log.Fatalf("Unknown supported Kind %v\n", t.Kind) - } - - return v, s -} - -var jsonRegex = regexp.MustCompile("json:\"([a-zA-Z0-9,]+)\"") - -type primitiveTemplateArgs struct { - v1beta1.JSONSchemaProps - Value string - Format string - EnumValue string // TODO check type of enum value to match the type of field - Description string -} - -var primitiveTemplate = template.Must(template.New("map-template").Parse( - `v1beta1.JSONSchemaProps{ - {{ if .Pattern -}} - Pattern: "{{ .Pattern }}", - {{ end -}} - {{ if .Maximum -}} - Maximum: getFloat({{ .Maximum }}), - {{ end -}} - {{ if .ExclusiveMaximum -}} - ExclusiveMaximum: {{ .ExclusiveMaximum }}, - {{ end -}} - {{ if .Minimum -}} - Minimum: getFloat({{ .Minimum }}), - {{ end -}} - {{ if .ExclusiveMinimum -}} - ExclusiveMinimum: {{ .ExclusiveMinimum }}, - {{ end -}} - Type: "{{ .Value }}", - {{ if .Format -}} - Format: "{{ .Format }}", - {{ end -}} - {{ if .EnumValue -}} - Enum: {{ .EnumValue }}, - {{ end -}} - {{ if .MaxLength -}} - MaxLength: getInt({{ .MaxLength }}), - {{ end -}} - {{ if .MinLength -}} - MinLength: getInt({{ .MinLength }}), - {{ end -}} -}`)) - -// parsePrimitiveValidation returns a JSONSchemaProps object and its -// serialization in Go that describe the validations for the given primitive -// type. -func (b *APIs) parsePrimitiveValidation(t *types.Type, found sets.String, comments []string) (v1beta1.JSONSchemaProps, string) { - props := v1beta1.JSONSchemaProps{Type: string(t.Name.Name)} - - for _, l := range comments { - getValidation(l, &props) - } - - buff := &bytes.Buffer{} - - var n, f, s, d string - switch t.Name.Name { - case "int", "int64", "uint64": - n = "integer" - f = "int64" - case "int32", "uint32": - n = "integer" - f = "int32" - case "float", "float32": - n = "number" - f = "float" - case "float64": - n = "number" - f = "double" - case "bool": - n = "boolean" - case "string": - n = "string" - f = props.Format - default: - n = t.Name.Name - } - if props.Enum != nil { - s = parseEnumToString(props.Enum) - } - d = parseDescription(comments) - if err := primitiveTemplate.Execute(buff, primitiveTemplateArgs{props, n, f, s, d}); err != nil { - log.Fatalf("%v", err) - } - props.Type = n - props.Format = f - props.Description = d - return props, buff.String() -} - -type mapTempateArgs struct { - Result string - SkipMapValidation bool -} - -var mapTemplate = template.Must(template.New("map-template").Parse( - `v1beta1.JSONSchemaProps{ - Type: "object", - {{if not .SkipMapValidation}}AdditionalProperties: &v1beta1.JSONSchemaPropsOrBool{ - Allows: true, - Schema: &{{.Result}}, - },{{end}} -}`)) - -// parseMapValidation returns a JSONSchemaProps object and its serialization in -// Go that describe the validations for the given map type. -func (b *APIs) parseMapValidation(t *types.Type, found sets.String, comments []string) (v1beta1.JSONSchemaProps, string) { - additionalProps, result := b.typeToJSONSchemaProps(t.Elem, found, comments, false) - additionalProps.Description = "" - props := v1beta1.JSONSchemaProps{ - Type: "object", - Description: parseDescription(comments), - } - parseOption := b.arguments.CustomArgs.(*Options) - if !parseOption.SkipMapValidation { - props.AdditionalProperties = &v1beta1.JSONSchemaPropsOrBool{ - Allows: true, - Schema: &additionalProps} - } - - for _, l := range comments { - getValidation(l, &props) - } - - buff := &bytes.Buffer{} - if err := mapTemplate.Execute(buff, mapTempateArgs{Result: result, SkipMapValidation: parseOption.SkipMapValidation}); err != nil { - log.Fatalf("%v", err) - } - return props, buff.String() -} - -var arrayTemplate = template.Must(template.New("array-template").Parse( - `v1beta1.JSONSchemaProps{ - Type: "{{.Type}}", - {{ if .Format -}} - Format: "{{.Format}}", - {{ end -}} - {{ if .MaxItems -}} - MaxItems: getInt({{ .MaxItems }}), - {{ end -}} - {{ if .MinItems -}} - MinItems: getInt({{ .MinItems }}), - {{ end -}} - {{ if .UniqueItems -}} - UniqueItems: {{ .UniqueItems }}, - {{ end -}} - {{ if .Items -}} - Items: &v1beta1.JSONSchemaPropsOrArray{ - Schema: &{{.ItemsSchema}}, - }, - {{ end -}} -}`)) - -type arrayTemplateArgs struct { - v1beta1.JSONSchemaProps - ItemsSchema string -} - -// parseArrayValidation returns a JSONSchemaProps object and its serialization in -// Go that describe the validations for the given array type. -func (b *APIs) parseArrayValidation(t *types.Type, found sets.String, comments []string) (v1beta1.JSONSchemaProps, string) { - items, result := b.typeToJSONSchemaProps(t.Elem, found, comments, false) - items.Description = "" - props := v1beta1.JSONSchemaProps{ - Type: "array", - Items: &v1beta1.JSONSchemaPropsOrArray{Schema: &items}, - Description: parseDescription(comments), - } - // To represent byte arrays in the generated code, the property of the OpenAPI definition - // should have string as its type and byte as its format. - if t.Name.Name == "[]byte" { - props.Type = "string" - props.Format = "byte" - props.Items = nil - props.Description = parseDescription(comments) - } - for _, l := range comments { - getValidation(l, &props) - } - if t.Name.Name != "[]byte" { - // Except for the byte array special case above, the "format" property - // should be applied to the array items and not the array itself. - props.Format = "" - } - buff := &bytes.Buffer{} - if err := arrayTemplate.Execute(buff, arrayTemplateArgs{props, result}); err != nil { - log.Fatalf("%v", err) - } - return props, buff.String() -} - -type objectTemplateArgs struct { - v1beta1.JSONSchemaProps - Fields map[string]string - Required []string - IsRoot bool -} - -var objectTemplate = template.Must(template.New("object-template").Parse( - `v1beta1.JSONSchemaProps{ - {{ if not .IsRoot -}} - Type: "object", - {{ end -}} - Properties: map[string]v1beta1.JSONSchemaProps{ - {{ range $k, $v := .Fields -}} - "{{ $k }}": {{ $v }}, - {{ end -}} - }, - {{if .Required}}Required: []string{ - {{ range $k, $v := .Required -}} - "{{ $v }}", - {{ end -}} - },{{ end -}} -}`)) - -// parseObjectValidation returns a JSONSchemaProps object and its serialization in -// Go that describe the validations for the given object type. -func (b *APIs) parseObjectValidation(t *types.Type, found sets.String, comments []string, isRoot bool) (v1beta1.JSONSchemaProps, string) { - buff := &bytes.Buffer{} - props := v1beta1.JSONSchemaProps{ - Type: "object", - Description: parseDescription(comments), - } - - for _, l := range comments { - getValidation(l, &props) - } - - if strings.HasPrefix(t.Name.String(), "k8s.io/api") { - if err := objectTemplate.Execute(buff, objectTemplateArgs{props, nil, nil, false}); err != nil { - log.Fatalf("%v", err) - } - } else { - m, result, required := b.getMembers(t, found) - props.Properties = m - props.Required = required - - if err := objectTemplate.Execute(buff, objectTemplateArgs{props, result, required, isRoot}); err != nil { - log.Fatalf("%v", err) - } - } - return props, buff.String() -} - -// getValidation parses the validation tags from the comment and sets the -// validation rules on the given JSONSchemaProps. -func getValidation(comment string, props *v1beta1.JSONSchemaProps) { - comment = strings.TrimLeft(comment, " ") - if !strings.HasPrefix(comment, "+kubebuilder:validation:") { - return - } - c := strings.Replace(comment, "+kubebuilder:validation:", "", -1) - parts := strings.Split(c, "=") - if len(parts) != 2 { - log.Fatalf("Expected +kubebuilder:validation:= actual: %s", comment) - return - } - switch parts[0] { - case "Maximum": - f, err := strconv.ParseFloat(parts[1], 64) - if err != nil { - log.Fatalf("Could not parse float from %s: %v", comment, err) - return - } - props.Maximum = &f - case "ExclusiveMaximum": - b, err := strconv.ParseBool(parts[1]) - if err != nil { - log.Fatalf("Could not parse bool from %s: %v", comment, err) - return - } - props.ExclusiveMaximum = b - case "Minimum": - f, err := strconv.ParseFloat(parts[1], 64) - if err != nil { - log.Fatalf("Could not parse float from %s: %v", comment, err) - return - } - props.Minimum = &f - case "ExclusiveMinimum": - b, err := strconv.ParseBool(parts[1]) - if err != nil { - log.Fatalf("Could not parse bool from %s: %v", comment, err) - return - } - props.ExclusiveMinimum = b - case "MaxLength": - i, err := strconv.Atoi(parts[1]) - v := int64(i) - if err != nil { - log.Fatalf("Could not parse int from %s: %v", comment, err) - return - } - props.MaxLength = &v - case "MinLength": - i, err := strconv.Atoi(parts[1]) - v := int64(i) - if err != nil { - log.Fatalf("Could not parse int from %s: %v", comment, err) - return - } - props.MinLength = &v - case "Pattern": - props.Pattern = parts[1] - case "MaxItems": - if props.Type == "array" { - i, err := strconv.Atoi(parts[1]) - v := int64(i) - if err != nil { - log.Fatalf("Could not parse int from %s: %v", comment, err) - return - } - props.MaxItems = &v - } - case "MinItems": - if props.Type == "array" { - i, err := strconv.Atoi(parts[1]) - v := int64(i) - if err != nil { - log.Fatalf("Could not parse int from %s: %v", comment, err) - return - } - props.MinItems = &v - } - case "UniqueItems": - if props.Type == "array" { - b, err := strconv.ParseBool(parts[1]) - if err != nil { - log.Fatalf("Could not parse bool from %s: %v", comment, err) - return - } - props.UniqueItems = b - } - case "MultipleOf": - f, err := strconv.ParseFloat(parts[1], 64) - if err != nil { - log.Fatalf("Could not parse float from %s: %v", comment, err) - return - } - props.MultipleOf = &f - case "Enum": - if props.Type != "array" { - value := strings.Split(parts[1], ",") - enums := []v1beta1.JSON{} - for _, s := range value { - checkType(props, s, &enums) - } - props.Enum = enums - } - case "Format": - props.Format = parts[1] - default: - log.Fatalf("Unsupport validation: %s", comment) - } -} - -// getMembers builds maps by field name of the JSONSchemaProps and their Go -// serializations. -func (b *APIs) getMembers(t *types.Type, found sets.String) (map[string]v1beta1.JSONSchemaProps, map[string]string, []string) { - members := map[string]v1beta1.JSONSchemaProps{} - result := map[string]string{} - required := []string{} - - // Don't allow recursion until we support it through refs - // TODO: Support recursion - if found.Has(t.Name.String()) { - fmt.Printf("Breaking recursion for type %s", t.Name.String()) - return members, result, required - } - found.Insert(t.Name.String()) - - for _, member := range t.Members { - tags := jsonRegex.FindStringSubmatch(member.Tags) - if len(tags) == 0 { - // Skip fields without json tags - //fmt.Printf("Skipping member %s %s\n", member.Name, member.Type.Name.String()) - continue - } - ts := strings.Split(tags[1], ",") - name := member.Name - strat := "" - if len(ts) > 0 && len(ts[0]) > 0 { - name = ts[0] - } - if len(ts) > 1 { - strat = ts[1] - } - - // Inline "inline" structs - if strat == "inline" { - m, r, re := b.getMembers(member.Type, found) - for n, v := range m { - members[n] = v - } - for n, v := range r { - result[n] = v - } - required = append(required, re...) - } else { - m, r := b.typeToJSONSchemaProps(member.Type, found, member.CommentLines, false) - members[name] = m - result[name] = r - if !strings.HasSuffix(strat, "omitempty") { - required = append(required, name) - } - } - } - - defer found.Delete(t.Name.String()) - return members, result, required -} diff --git a/vendor/sigs.k8s.io/controller-tools/pkg/internal/codegen/parse/index.go b/vendor/sigs.k8s.io/controller-tools/pkg/internal/codegen/parse/index.go deleted file mode 100644 index d1e8c3fc03..0000000000 --- a/vendor/sigs.k8s.io/controller-tools/pkg/internal/codegen/parse/index.go +++ /dev/null @@ -1,161 +0,0 @@ -/* -Copyright 2018 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package parse - -import ( - "fmt" - "log" - "strings" - - "github.com/gobuffalo/flect" - - "k8s.io/gengo/types" - "sigs.k8s.io/controller-tools/pkg/internal/codegen" - "sigs.k8s.io/controller-tools/pkg/internal/general" -) - -// parseIndex indexes all types with the comment "// +resource=RESOURCE" by GroupVersionKind and -// GroupKindVersion -func (b *APIs) parseIndex() { - // Index resource by group, version, kind - b.ByGroupVersionKind = map[string]map[string]map[string]*codegen.APIResource{} - - // Index resources by group, kind, version - b.ByGroupKindVersion = map[string]map[string]map[string]*codegen.APIResource{} - - // Index subresources by group, version, kind - b.SubByGroupVersionKind = map[string]map[string]map[string]*types.Type{} - - for _, c := range b.context.Order { - // The type is a subresource, add it to the subresource index - if IsAPISubresource(c) { - group := GetGroup(c) - version := GetVersion(c, group) - kind := GetKind(c, group) - if _, f := b.SubByGroupVersionKind[group]; !f { - b.SubByGroupVersionKind[group] = map[string]map[string]*types.Type{} - } - if _, f := b.SubByGroupVersionKind[group][version]; !f { - b.SubByGroupVersionKind[group][version] = map[string]*types.Type{} - } - b.SubByGroupVersionKind[group][version][kind] = c - } - - // If it isn't a subresource or resource, continue to the next type - if !IsAPIResource(c) { - continue - } - - // Parse out the resource information - r := &codegen.APIResource{ - Type: c, - NonNamespaced: IsNonNamespaced(c), - } - r.Group = GetGroup(c) - r.Version = GetVersion(c, r.Group) - r.Kind = GetKind(c, r.Group) - r.Domain = b.Domain - - // TODO: revisit the part... - if r.Resource == "" { - r.Resource = flect.Pluralize(strings.ToLower(r.Kind)) - } - rt, err := parseResourceAnnotation(c) - if err != nil { - log.Fatalf("failed to parse resource annotations, error: %v", err.Error()) - } - if rt.Resource != "" { - r.Resource = rt.Resource - } - r.ShortName = rt.ShortName - - // Copy the Status strategy to mirror the non-status strategy - r.StatusStrategy = strings.TrimSuffix(r.Strategy, "Strategy") - r.StatusStrategy = fmt.Sprintf("%sStatusStrategy", r.StatusStrategy) - - // Initialize the map entries so they aren't nill - if _, f := b.ByGroupKindVersion[r.Group]; !f { - b.ByGroupKindVersion[r.Group] = map[string]map[string]*codegen.APIResource{} - } - if _, f := b.ByGroupKindVersion[r.Group][r.Kind]; !f { - b.ByGroupKindVersion[r.Group][r.Kind] = map[string]*codegen.APIResource{} - } - if _, f := b.ByGroupVersionKind[r.Group]; !f { - b.ByGroupVersionKind[r.Group] = map[string]map[string]*codegen.APIResource{} - } - if _, f := b.ByGroupVersionKind[r.Group][r.Version]; !f { - b.ByGroupVersionKind[r.Group][r.Version] = map[string]*codegen.APIResource{} - } - - // Add the resource to the map - b.ByGroupKindVersion[r.Group][r.Kind][r.Version] = r - b.ByGroupVersionKind[r.Group][r.Version][r.Kind] = r - r.Type = c - } -} - -// resourceTags contains the tags present in a "+resource=" comment -type resourceTags struct { - Resource string - REST string - Strategy string - ShortName string -} - -// resourceAnnotationValue is a helper function to extract resource annotation. -func resourceAnnotationValue(tag string) (resourceTags, error) { - res := resourceTags{} - for _, elem := range strings.Split(tag, ",") { - key, value, err := general.ParseKV(elem) - if err != nil { - return resourceTags{}, fmt.Errorf("// +kubebuilder:resource: tags must be key value pairs. Expected "+ - "keys [path=] "+ - "Got string: [%s]", tag) - } - switch key { - case "path": - res.Resource = value - case "shortName": - res.ShortName = value - default: - return resourceTags{}, fmt.Errorf("The given input %s is invalid", value) - } - } - return res, nil -} - -// parseResourceAnnotation parses the tags in a "+resource=" comment into a resourceTags struct. -func parseResourceAnnotation(t *types.Type) (resourceTags, error) { - finalResult := resourceTags{} - var resourceAnnotationFound bool - for _, comment := range t.CommentLines { - anno := general.GetAnnotation(comment, "kubebuilder:resource") - if len(anno) == 0 { - continue - } - result, err := resourceAnnotationValue(anno) - if err != nil { - return resourceTags{}, err - } - if resourceAnnotationFound { - return resourceTags{}, fmt.Errorf("resource annotation should only exists once per type") - } - resourceAnnotationFound = true - finalResult = result - } - return finalResult, nil -} diff --git a/vendor/sigs.k8s.io/controller-tools/pkg/internal/codegen/parse/parser.go b/vendor/sigs.k8s.io/controller-tools/pkg/internal/codegen/parse/parser.go deleted file mode 100644 index c7a55dd3de..0000000000 --- a/vendor/sigs.k8s.io/controller-tools/pkg/internal/codegen/parse/parser.go +++ /dev/null @@ -1,151 +0,0 @@ -/* -Copyright 2018 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package parse - -import ( - "bufio" - "go/build" - "log" - "os" - "path/filepath" - "strings" - - "github.com/pkg/errors" - rbacv1 "k8s.io/api/rbac/v1" - "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/util/sets" - "k8s.io/gengo/args" - "k8s.io/gengo/generator" - "k8s.io/gengo/types" - "sigs.k8s.io/controller-tools/pkg/internal/codegen" -) - -// APIs is the information of a collection of API -type APIs struct { - context *generator.Context - arguments *args.GeneratorArgs - Domain string - VersionedPkgs sets.String - UnversionedPkgs sets.String - APIsPkg string - APIsPkgRaw *types.Package - GroupNames sets.String - - APIs *codegen.APIs - Controllers []codegen.Controller - - ByGroupKindVersion map[string]map[string]map[string]*codegen.APIResource - ByGroupVersionKind map[string]map[string]map[string]*codegen.APIResource - SubByGroupVersionKind map[string]map[string]map[string]*types.Type - Groups map[string]types.Package - Rules []rbacv1.PolicyRule - Informers map[v1.GroupVersionKind]bool -} - -// NewAPIs returns a new APIs instance with given context. -func NewAPIs(context *generator.Context, arguments *args.GeneratorArgs, domain, apisPkg string) *APIs { - b := &APIs{ - context: context, - arguments: arguments, - Domain: domain, - APIsPkg: apisPkg, - } - b.parsePackages() - b.parseGroupNames() - b.parseIndex() - b.parseAPIs() - b.parseCRDs() - if len(b.Domain) == 0 { - b.parseDomain() - } - return b -} - -// parseGroupNames initializes b.GroupNames with the set of all groups -func (b *APIs) parseGroupNames() { - b.GroupNames = sets.String{} - for p := range b.UnversionedPkgs { - pkg := b.context.Universe[p] - if pkg == nil { - // If the input had no Go files, for example. - continue - } - b.GroupNames.Insert(filepath.Base(p)) - } -} - -// parsePackages parses out the sets of Versioned, Unversioned packages and identifies the root Apis package. -func (b *APIs) parsePackages() { - b.VersionedPkgs = sets.NewString() - b.UnversionedPkgs = sets.NewString() - for _, o := range b.context.Order { - if IsAPIResource(o) { - versioned := o.Name.Package - b.VersionedPkgs.Insert(versioned) - - unversioned := filepath.Dir(versioned) - b.UnversionedPkgs.Insert(unversioned) - } - } -} - -// parseDomain parses the domain from the apis/doc.go file comment "// +domain=YOUR_DOMAIN". -func (b *APIs) parseDomain() { - pkg := b.context.Universe[b.APIsPkg] - if pkg == nil { - // If the input had no Go files, for example. - panic(errors.Errorf("Missing apis package.")) - } - comments := Comments(pkg.Comments) - b.Domain = comments.getTag("domain", "=") - if len(b.Domain) == 0 { - b.Domain = parseDomainFromFiles(b.context.Inputs) - if len(b.Domain) == 0 { - panic("Could not find string matching // +domain=.+ in apis/doc.go") - } - } -} - -func parseDomainFromFiles(paths []string) string { - var domain string - for _, path := range paths { - if strings.HasSuffix(path, "pkg/apis") { - filePath := strings.Join([]string{build.Default.GOPATH, "src", path, "doc.go"}, "/") - lines := []string{} - - file, err := os.Open(filePath) - if err != nil { - log.Fatal(err) - } - defer file.Close() - scanner := bufio.NewScanner(file) - for scanner.Scan() { - if strings.HasPrefix(scanner.Text(), "//") { - lines = append(lines, strings.Replace(scanner.Text(), "// ", "", 1)) - } - } - if err := scanner.Err(); err != nil { - log.Fatal(err) - } - - comments := Comments(lines) - domain = comments.getTag("domain", "=") - break - } - } - return domain -} diff --git a/vendor/sigs.k8s.io/controller-tools/pkg/internal/codegen/parse/util.go b/vendor/sigs.k8s.io/controller-tools/pkg/internal/codegen/parse/util.go deleted file mode 100644 index 7df4453534..0000000000 --- a/vendor/sigs.k8s.io/controller-tools/pkg/internal/codegen/parse/util.go +++ /dev/null @@ -1,539 +0,0 @@ -/* -Copyright 2018 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package parse - -import ( - "fmt" - "log" - "path/filepath" - "strconv" - "strings" - - "github.com/pkg/errors" - "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1" - "k8s.io/gengo/types" -) - -const ( - specReplicasPath = "specpath" - statusReplicasPath = "statuspath" - labelSelectorPath = "selectorpath" - jsonPathError = "invalid scale path. specpath, statuspath key-value pairs are required, only selectorpath key-value is optinal. For example: // +kubebuilder:subresource:scale:specpath=.spec.replica,statuspath=.status.replica,selectorpath=.spec.Label" - printColumnName = "name" - printColumnType = "type" - printColumnDescr = "description" - printColumnPath = "JSONPath" - printColumnFormat = "format" - printColumnPri = "priority" - printColumnError = "invalid printcolumn path. name,type, and JSONPath are required kye-value pairs and rest of the fields are optinal. For example: // +kubebuilder:printcolumn:name=abc,type=string,JSONPath=status" -) - -// Options contains the parser options -type Options struct { - SkipMapValidation bool - - // SkipRBACValidation flag determines whether to check RBAC annotations - // for the controller or not at parse stage. - SkipRBACValidation bool -} - -// IsAPIResource returns true if either of the two conditions become true: -// 1. t has a +resource/+kubebuilder:resource comment tag -// 2. t has TypeMeta and ObjectMeta in its member list. -func IsAPIResource(t *types.Type) bool { - for _, c := range t.CommentLines { - if strings.Contains(c, "+resource") || strings.Contains(c, "+kubebuilder:resource") { - return true - } - } - - typeMetaFound, objMetaFound := false, false - for _, m := range t.Members { - if m.Name == "TypeMeta" && m.Type.String() == "k8s.io/apimachinery/pkg/apis/meta/v1.TypeMeta" { - typeMetaFound = true - } - if m.Name == "ObjectMeta" && m.Type.String() == "k8s.io/apimachinery/pkg/apis/meta/v1.ObjectMeta" { - objMetaFound = true - } - if typeMetaFound && objMetaFound { - return true - } - } - return false -} - -// IsNonNamespaced returns true if t has a +nonNamespaced comment tag -func IsNonNamespaced(t *types.Type) bool { - if !IsAPIResource(t) { - return false - } - - for _, c := range t.CommentLines { - if strings.Contains(c, "+genclient:nonNamespaced") { - return true - } - } - - for _, c := range t.SecondClosestCommentLines { - if strings.Contains(c, "+genclient:nonNamespaced") { - return true - } - } - - return false -} - -// IsController returns true if t has a +controller or +kubebuilder:controller tag -func IsController(t *types.Type) bool { - for _, c := range t.CommentLines { - if strings.Contains(c, "+controller") || strings.Contains(c, "+kubebuilder:controller") { - return true - } - } - return false -} - -// IsRBAC returns true if t has a +rbac or +kubebuilder:rbac tag -func IsRBAC(t *types.Type) bool { - for _, c := range t.CommentLines { - if strings.Contains(c, "+rbac") || strings.Contains(c, "+kubebuilder:rbac") { - return true - } - } - return false -} - -// hasPrintColumn returns true if t has a +printcolumn or +kubebuilder:printcolumn annotation. -func hasPrintColumn(t *types.Type) bool { - for _, c := range t.CommentLines { - if strings.Contains(c, "+printcolumn") || strings.Contains(c, "+kubebuilder:printcolumn") { - return true - } - } - return false -} - -// IsInformer returns true if t has a +informers or +kubebuilder:informers tag -func IsInformer(t *types.Type) bool { - for _, c := range t.CommentLines { - if strings.Contains(c, "+informers") || strings.Contains(c, "+kubebuilder:informers") { - return true - } - } - return false -} - -// IsAPISubresource returns true if t has a +subresource-request comment tag -func IsAPISubresource(t *types.Type) bool { - for _, c := range t.CommentLines { - if strings.Contains(c, "+subresource-request") { - return true - } - } - return false -} - -// HasSubresource returns true if t is an APIResource with one or more Subresources -func HasSubresource(t *types.Type) bool { - if !IsAPIResource(t) { - return false - } - for _, c := range t.CommentLines { - if strings.Contains(c, "subresource") { - return true - } - } - return false -} - -// hasStatusSubresource returns true if t is an APIResource annotated with -// +kubebuilder:subresource:status -func hasStatusSubresource(t *types.Type) bool { - if !IsAPIResource(t) { - return false - } - for _, c := range t.CommentLines { - if strings.Contains(c, "+kubebuilder:subresource:status") { - return true - } - } - return false -} - -// hasScaleSubresource returns true if t is an APIResource annotated with -// +kubebuilder:subresource:scale -func hasScaleSubresource(t *types.Type) bool { - if !IsAPIResource(t) { - return false - } - for _, c := range t.CommentLines { - if strings.Contains(c, "+kubebuilder:subresource:scale") { - return true - } - } - return false -} - -// hasCategories returns true if t is an APIResource annotated with -// +kubebuilder:categories -func hasCategories(t *types.Type) bool { - if !IsAPIResource(t) { - return false - } - - for _, c := range t.CommentLines { - if strings.Contains(c, "+kubebuilder:categories") { - return true - } - } - return false -} - -// HasDocAnnotation returns true if t is an APIResource with doc annotation -// +kubebuilder:doc -func HasDocAnnotation(t *types.Type) bool { - if !IsAPIResource(t) { - return false - } - for _, c := range t.CommentLines { - if strings.Contains(c, "+kubebuilder:doc") { - return true - } - } - return false -} - -// hasSingular returns true if t is an APIResource annotated with -// +kubebuilder:singular -func hasSingular(t *types.Type) bool { - if !IsAPIResource(t) { - return false - } - for _, c := range t.CommentLines{ - if strings.Contains(c, "+kubebuilder:singular"){ - return true - } - } - return false -} - -// IsUnversioned returns true if t is in given group, and not in versioned path. -func IsUnversioned(t *types.Type, group string) bool { - return IsApisDir(filepath.Base(filepath.Dir(t.Name.Package))) && GetGroup(t) == group -} - -// IsVersioned returns true if t is in given group, and in versioned path. -func IsVersioned(t *types.Type, group string) bool { - dir := filepath.Base(filepath.Dir(filepath.Dir(t.Name.Package))) - return IsApisDir(dir) && GetGroup(t) == group -} - -// GetVersion returns version of t. -func GetVersion(t *types.Type, group string) string { - if !IsVersioned(t, group) { - panic(errors.Errorf("Cannot get version for unversioned type %v", t.Name)) - } - return filepath.Base(t.Name.Package) -} - -// GetGroup returns group of t. -func GetGroup(t *types.Type) string { - return filepath.Base(GetGroupPackage(t)) -} - -// GetGroupPackage returns group package of t. -func GetGroupPackage(t *types.Type) string { - if IsApisDir(filepath.Base(filepath.Dir(t.Name.Package))) { - return t.Name.Package - } - return filepath.Dir(t.Name.Package) -} - -// GetKind returns kind of t. -func GetKind(t *types.Type, group string) string { - if !IsVersioned(t, group) && !IsUnversioned(t, group) { - panic(errors.Errorf("Cannot get kind for type not in group %v", t.Name)) - } - return t.Name.Name -} - -// IsApisDir returns true if a directory path is a Kubernetes api directory -func IsApisDir(dir string) bool { - return dir == "apis" || dir == "api" -} - -// Comments is a structure for using comment tags on go structs and fields -type Comments []string - -// GetTags returns the value for the first comment with a prefix matching "+name=" -// e.g. "+name=foo\n+name=bar" would return "foo" -func (c Comments) getTag(name, sep string) string { - for _, c := range c { - prefix := fmt.Sprintf("+%s%s", name, sep) - if strings.HasPrefix(c, prefix) { - return strings.Replace(c, prefix, "", 1) - } - } - return "" -} - -// hasTag returns true if the Comments has a tag with the given name -func (c Comments) hasTag(name string) bool { - for _, c := range c { - prefix := fmt.Sprintf("+%s", name) - if strings.HasPrefix(c, prefix) { - return true - } - } - return false -} - -// GetTags returns the value for all comments with a prefix and separator. E.g. for "name" and "=" -// "+name=foo\n+name=bar" would return []string{"foo", "bar"} -func (c Comments) getTags(name, sep string) []string { - tags := []string{} - for _, c := range c { - prefix := fmt.Sprintf("+%s%s", name, sep) - if strings.HasPrefix(c, prefix) { - tags = append(tags, strings.Replace(c, prefix, "", 1)) - } - } - return tags -} - -// getCategoriesTag returns the value of the +kubebuilder:categories tags -func getCategoriesTag(c *types.Type) string { - comments := Comments(c.CommentLines) - resource := comments.getTag("kubebuilder:categories", "=") - if len(resource) == 0 { - panic(errors.Errorf("Must specify +kubebuilder:categories comment for type %v", c.Name)) - } - return resource -} - -// getSingularName returns the value of the +kubebuilder:singular tag -func getSingularName(c *types.Type) string { - comments := Comments(c.CommentLines) - singular := comments.getTag("kubebuilder:singular", "=") - if len(singular) == 0 { - panic(errors.Errorf("Must specify a value to use with +kubebuilder:singular comment for type %v", c.Name)) - } - return singular -} - -// getDocAnnotation parse annotations of "+kubebuilder:doc:" with tags of "warning" or "doc" for control generating doc config. -// E.g. +kubebuilder:doc:warning=foo +kubebuilder:doc:note=bar -func getDocAnnotation(t *types.Type, tags ...string) map[string]string { - annotation := make(map[string]string) - for _, tag := range tags { - for _, c := range t.CommentLines { - prefix := fmt.Sprintf("+kubebuilder:doc:%s=", tag) - if strings.HasPrefix(c, prefix) { - annotation[tag] = strings.Replace(c, prefix, "", 1) - } - } - } - return annotation -} - -// parseByteValue returns the literal digital number values from a byte array -func parseByteValue(b []byte) string { - elem := strings.Join(strings.Fields(fmt.Sprintln(b)), ",") - elem = strings.TrimPrefix(elem, "[") - elem = strings.TrimSuffix(elem, "]") - return elem -} - -// parseDescription parse comments above each field in the type definition. -func parseDescription(res []string) string { - var temp strings.Builder - var desc string - for _, comment := range res { - if !(strings.Contains(comment, "+kubebuilder") || strings.Contains(comment, "+optional")) { - temp.WriteString(comment) - temp.WriteString(" ") - desc = strings.TrimRight(temp.String(), " ") - } - } - return desc -} - -// parseEnumToString returns a representive validated go format string from JSONSchemaProps schema -func parseEnumToString(value []v1beta1.JSON) string { - res := "[]v1beta1.JSON{" - prefix := "v1beta1.JSON{[]byte{" - for _, v := range value { - res = res + prefix + parseByteValue(v.Raw) + "}}," - } - return strings.TrimSuffix(res, ",") + "}" -} - -// check type of enum element value to match type of field -func checkType(props *v1beta1.JSONSchemaProps, s string, enums *[]v1beta1.JSON) { - - // TODO support more types check - switch props.Type { - case "int", "int64", "uint64": - if _, err := strconv.ParseInt(s, 0, 64); err != nil { - log.Fatalf("Invalid integer value [%v] for a field of integer type", s) - } - *enums = append(*enums, v1beta1.JSON{Raw: []byte(fmt.Sprintf("%v", s))}) - case "int32", "unit32": - if _, err := strconv.ParseInt(s, 0, 32); err != nil { - log.Fatalf("Invalid integer value [%v] for a field of integer32 type", s) - } - *enums = append(*enums, v1beta1.JSON{Raw: []byte(fmt.Sprintf("%v", s))}) - case "float", "float32": - if _, err := strconv.ParseFloat(s, 32); err != nil { - log.Fatalf("Invalid float value [%v] for a field of float32 type", s) - } - *enums = append(*enums, v1beta1.JSON{Raw: []byte(fmt.Sprintf("%v", s))}) - case "float64": - if _, err := strconv.ParseFloat(s, 64); err != nil { - log.Fatalf("Invalid float value [%v] for a field of float type", s) - } - *enums = append(*enums, v1beta1.JSON{Raw: []byte(fmt.Sprintf("%v", s))}) - case "string": - *enums = append(*enums, v1beta1.JSON{Raw: []byte(`"` + s + `"`)}) - } -} - -// Scale subresource requires specpath, statuspath, selectorpath key values, represents for JSONPath of -// SpecReplicasPath, StatusReplicasPath, LabelSelectorPath separately. e.g. -// +kubebuilder:subresource:scale:specpath=.spec.replica,statuspath=.status.replica,selectorpath= -func parseScaleParams(t *types.Type) (map[string]string, error) { - jsonPath := make(map[string]string) - for _, c := range t.CommentLines { - if strings.Contains(c, "+kubebuilder:subresource:scale") { - paths := strings.Replace(c, "+kubebuilder:subresource:scale:", "", -1) - path := strings.Split(paths, ",") - if len(path) < 2 { - return nil, fmt.Errorf(jsonPathError) - } - for _, s := range path { - kv := strings.Split(s, "=") - if kv[0] == specReplicasPath || kv[0] == statusReplicasPath || kv[0] == labelSelectorPath { - jsonPath[kv[0]] = kv[1] - } else { - return nil, fmt.Errorf(jsonPathError) - } - } - var ok bool - _, ok = jsonPath[specReplicasPath] - if !ok { - return nil, fmt.Errorf(jsonPathError) - } - _, ok = jsonPath[statusReplicasPath] - if !ok { - return nil, fmt.Errorf(jsonPathError) - } - return jsonPath, nil - } - } - return nil, fmt.Errorf(jsonPathError) -} - -// printColumnKV parses key-value string formatted as "foo=bar" and returns key and value. -func printColumnKV(s string) (key, value string, err error) { - kv := strings.SplitN(s, "=", 2) - if len(kv) != 2 { - err = fmt.Errorf("invalid key value pair") - return key, value, err - } - key, value = kv[0], kv[1] - if strings.HasPrefix(value, "\"") && strings.HasSuffix(value, "\"") { - value = value[1 : len(value)-1] - } - return key, value, err -} - -// helperPrintColumn is a helper function for the parsePrintColumnParams to compute printer columns. -func helperPrintColumn(parts string, comment string) (v1beta1.CustomResourceColumnDefinition, error) { - config := v1beta1.CustomResourceColumnDefinition{} - var count int - part := strings.Split(parts, ",") - if len(part) < 3 { - return v1beta1.CustomResourceColumnDefinition{}, fmt.Errorf(printColumnError) - } - - for _, elem := range strings.Split(parts, ",") { - key, value, err := printColumnKV(elem) - if err != nil { - return v1beta1.CustomResourceColumnDefinition{}, - fmt.Errorf("//+kubebuilder:printcolumn: tags must be key value pairs.Expected "+ - "keys [name=,type=,description=,format=] "+ - "Got string: [%s]", parts) - } - if key == printColumnName || key == printColumnType || key == printColumnPath { - count++ - } - switch key { - case printColumnName: - config.Name = value - case printColumnType: - if value == "integer" || value == "number" || value == "string" || value == "boolean" || value == "date" { - config.Type = value - } else { - return v1beta1.CustomResourceColumnDefinition{}, fmt.Errorf("invalid value for %s printcolumn", printColumnType) - } - case printColumnFormat: - if config.Type == "integer" && (value == "int32" || value == "int64") { - config.Format = value - } else if config.Type == "number" && (value == "float" || value == "double") { - config.Format = value - } else if config.Type == "string" && (value == "byte" || value == "date" || value == "date-time" || value == "password") { - config.Format = value - } else { - return v1beta1.CustomResourceColumnDefinition{}, fmt.Errorf("invalid value for %s printcolumn", printColumnFormat) - } - case printColumnPath: - config.JSONPath = value - case printColumnPri: - i, err := strconv.Atoi(value) - v := int32(i) - if err != nil { - return v1beta1.CustomResourceColumnDefinition{}, fmt.Errorf("invalid value for %s printcolumn", printColumnPri) - } - config.Priority = v - case printColumnDescr: - config.Description = value - default: - return v1beta1.CustomResourceColumnDefinition{}, fmt.Errorf(printColumnError) - } - } - if count != 3 { - return v1beta1.CustomResourceColumnDefinition{}, fmt.Errorf(printColumnError) - } - return config, nil -} - -// printcolumn requires name,type,JSONPath fields and rest of the field are optional -// +kubebuilder:printcolumn:name=,type=,description=,JSONPath:<.spec.Name>,priority=,format= -func parsePrintColumnParams(t *types.Type) ([]v1beta1.CustomResourceColumnDefinition, error) { - result := []v1beta1.CustomResourceColumnDefinition{} - for _, comment := range t.CommentLines { - if strings.Contains(comment, "+kubebuilder:printcolumn") { - parts := strings.Replace(comment, "+kubebuilder:printcolumn:", "", -1) - res, err := helperPrintColumn(parts, comment) - if err != nil { - return []v1beta1.CustomResourceColumnDefinition{}, err - } - result = append(result, res) - } - } - return result, nil -} diff --git a/vendor/sigs.k8s.io/controller-tools/pkg/internal/codegen/types.go b/vendor/sigs.k8s.io/controller-tools/pkg/internal/codegen/types.go deleted file mode 100644 index ebfaf620d0..0000000000 --- a/vendor/sigs.k8s.io/controller-tools/pkg/internal/codegen/types.go +++ /dev/null @@ -1,213 +0,0 @@ -/* -Copyright 2018 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package codegen - -import ( - "sort" - - rbacv1 "k8s.io/api/rbac/v1" - "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1" - "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/runtime/schema" - "k8s.io/apimachinery/pkg/util/sets" - "k8s.io/gengo/types" -) - -// APIs is the information of a collection of API -type APIs struct { - // Domain is the domain portion of the group - e.g. k8s.io - Domain string - - // Package is the name of the root API package - e.g. github.com/my-org/my-repo/pkg/apis - Package string - - // Pkg the Package for the root API package - Pkg *types.Package - - // Groups is the list of API groups found under the apis package - Groups map[string]*APIGroup - - Rules []rbacv1.PolicyRule - - Informers map[v1.GroupVersionKind]bool -} - -// GetRules get rules of the APIs -func (apis *APIs) GetRules() []rbacv1.PolicyRule { - rules := []rbacv1.PolicyRule{} - rulesIndex := map[v1.GroupResource]sets.String{} - for _, rule := range apis.Rules { - for _, g := range rule.APIGroups { - for _, r := range rule.Resources { - gr := v1.GroupResource{ - Group: g, - Resource: r, - } - if _, found := rulesIndex[gr]; !found { - rulesIndex[gr] = sets.NewString() - } - rulesIndex[gr].Insert(rule.Verbs...) - } - } - } - for gr, v := range rulesIndex { - verbs := v.List() - sort.Strings(verbs) - rule := rbacv1.PolicyRule{ - Resources: []string{gr.Resource}, - APIGroups: []string{gr.Group}, - Verbs: verbs, - } - rules = append(rules, rule) - } - return rules -} - -// APIGroup contains information of an API group. -type APIGroup struct { - // Package is the name of the go package the api group is under - e.g. github.com/me/apiserver-helloworld/apis - Package string - // Domain is the domain portion of the group - e.g. k8s.io - Domain string - // Group is the short name of the group - e.g. mushroomkingdom - Group string - GroupTitle string - // Versions is the list of all versions for this group keyed by name - Versions map[string]*APIVersion - - UnversionedResources map[string]*APIResource - - // Structs is a list of unversioned definitions that must be generated - Structs []*Struct - Pkg *types.Package - PkgPath string -} - -// Struct contains information of a struct. -type Struct struct { - // Name is the name of the type - Name string - // genClient - GenClient bool - GenDeepCopy bool - NonNamespaced bool - - GenUnversioned bool - // Fields is the list of fields appearing in the struct - Fields []*Field -} - -// Field contains information of a field. -type Field struct { - // Name is the name of the field - Name string - // For versioned Kubernetes types, this is the versioned package - VersionedPackage string - // For versioned Kubernetes types, this is the unversioned package - UnversionedImport string - UnversionedType string -} - -// APIVersion contains information of an API version. -type APIVersion struct { - // Domain is the group domain - e.g. k8s.io - Domain string - // Group is the group name - e.g. mushroomkingdom - Group string - // Version is the api version - e.g. v1beta1 - Version string - // Resources is a list of resources appearing in the API version keyed by name - Resources map[string]*APIResource - // Pkg is the Package object from code-gen - Pkg *types.Package -} - -// APIResource contains information of an API resource. -type APIResource struct { - // Domain is the group domain - e.g. k8s.io - Domain string - // Group is the group name - e.g. mushroomkingdom - Group string - // Version is the api version - e.g. v1beta1 - Version string - // Kind is the resource name - e.g. PeachesCastle - Kind string - // Resource is the resource name - e.g. peachescastles - Resource string - // REST is the rest.Storage implementation used to handle requests - // This field is optional. The standard REST implementation will be used - // by default. - REST string - // Subresources is a map of subresources keyed by name - Subresources map[string]*APISubresource - // Type is the Type object from code-gen - Type *types.Type - // Strategy is name of the struct to use for the strategy - Strategy string - // Strategy is name of the struct to use for the strategy - StatusStrategy string - // NonNamespaced indicates that the resource kind is non namespaced - NonNamespaced bool - - ShortName string - - JSONSchemaProps v1beta1.JSONSchemaProps - CRD v1beta1.CustomResourceDefinition - Validation string - ValidationComments string - // DocAnnotation is a map of annotations by name for doc. e.g. warning, notes message - DocAnnotation map[string]string - // Categories is a list of categories the resource is part of. - Categories []string -} - -// APISubresource contains information of an API subresource. -type APISubresource struct { - // Domain is the group domain - e.g. k8s.io - Domain string - // Group is the group name - e.g. mushroomkingdom - Group string - // Version is the api version - e.g. v1beta1 - Version string - // Kind is the resource name - e.g. PeachesCastle - Kind string - // Resource is the resource name - e.g. peachescastles - Resource string - // Request is the subresource request type - e.g. ScaleCastle - Request string - // REST is the rest.Storage implementation used to handle requests - REST string - // Path is the subresource path - e.g. scale - Path string - - // ImportPackage is the import statement that must appear for the Request - ImportPackage string - - // RequestType is the type of the request - RequestType *types.Type - - // RESTType is the type of the request handler - RESTType *types.Type -} - -// Controller contains information of a controller. -type Controller struct { - Target schema.GroupVersionKind - Resource string - Pkg *types.Package - Repo string -} diff --git a/vendor/sigs.k8s.io/controller-tools/pkg/internal/general/util.go b/vendor/sigs.k8s.io/controller-tools/pkg/internal/general/util.go deleted file mode 100644 index afa889e36a..0000000000 --- a/vendor/sigs.k8s.io/controller-tools/pkg/internal/general/util.go +++ /dev/null @@ -1,102 +0,0 @@ -/* -Copyright 2018 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package general - -import ( - "fmt" - "go/ast" - "go/parser" - "go/token" - "os" - "path/filepath" - "strings" -) - -// isGoFile filters files from parsing. -func isGoFile(f os.FileInfo) bool { - // ignore non-Go or Go test files - name := f.Name() - return !f.IsDir() && - !strings.HasPrefix(name, ".") && - !strings.HasSuffix(name, "_test.go") && - strings.HasSuffix(name, ".go") -} - -// GetAnnotation extracts the annotation from comment text. -// It will return "foo" for comment "+kubebuilder:webhook:foo" . -func GetAnnotation(c, name string) string { - prefix := fmt.Sprintf("+%s:", name) - if strings.HasPrefix(c, prefix) { - return strings.TrimPrefix(c, prefix) - } - return "" -} - -// ParseKV parses key-value string formatted as "foo=bar" and returns key and value. -func ParseKV(s string) (key, value string, err error) { - kv := strings.Split(s, "=") - if len(kv) != 2 { - err = fmt.Errorf("invalid key value pair") - return key, value, err - } - key, value = kv[0], kv[1] - if strings.HasPrefix(value, "\"") && strings.HasSuffix(value, "\"") { - value = value[1 : len(value)-1] - } - return key, value, err -} - -// ParseDir parses the Go files under given directory and parses the annotation by -// invoking the parseFn function on each comment group (multi-lines comments). -// TODO(droot): extend it to multiple dirs -func ParseDir(dir string, parseFn func(string) error) error { - fset := token.NewFileSet() - - err := filepath.Walk(dir, - func(path string, info os.FileInfo, _ error) error { - if !isGoFile(info) { - // TODO(droot): enable this output based on verbose flag - // fmt.Println("skipping non-go file", path) - return nil - } - return ParseFile(fset, path, nil, parseFn) - }) - return err -} - -// ParseFile parses given filename or content src and parses annotations by -// invoking the parseFn function on each comment group (multi-lines comments). -func ParseFile(fset *token.FileSet, filename string, src interface{}, parseFn func(string) error) error { - f, err := parser.ParseFile(fset, filename, src, parser.ParseComments) - if err != nil { - fmt.Printf("error from parse.ParseFile: %v", err) - return err - } - - // using commentMaps here because it sanitizes the comment text by removing - // comment markers, compresses newlines etc. - cmap := ast.NewCommentMap(fset, f, f.Comments) - - for _, commentGroup := range cmap.Comments() { - err = parseFn(commentGroup.Text()) - if err != nil { - fmt.Print("error when parsing annotation") - return err - } - } - return nil -} diff --git a/vendor/sigs.k8s.io/controller-tools/pkg/loader/doc.go b/vendor/sigs.k8s.io/controller-tools/pkg/loader/doc.go new file mode 100644 index 0000000000..a80065ec74 --- /dev/null +++ b/vendor/sigs.k8s.io/controller-tools/pkg/loader/doc.go @@ -0,0 +1,60 @@ +/* +Copyright 2019 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Package loader defines helpers for loading packages from sources. It wraps +// go/packages, allow incremental loading of source code and manual control +// over which packages get type-checked. This allows for faster loading in +// cases where you don't actually care about certain imports. +// +// Because it uses go/packages, it's modules-aware, and works in both modules- +// and non-modules environments. +// +// Loading +// +// The main entrypoint for loading is LoadRoots, which traverse the package +// graph starting at the given patterns (file, package, path, or ...-wildcard, +// as one might pass to go list). Packages beyond the roots can be accessed +// via the Imports() method. Packages are initially loaded with export data +// paths, filenames, and imports. +// +// Packages are suitable for comparison, as each unique package only ever has +// one *Package object returned. +// +// Syntax and TypeChecking +// +// ASTs and type-checking information can be loaded with NeedSyntax and +// NeedTypesInfo, respectively. Both are idempotent -- repeated calls will +// simply re-use the cached contents. Note that NeedTypesInfo will *only* type +// check the current package -- if you want to type-check imports as well, +// you'll need to type-check them first. +// +// Reference Pruning and Recursive Checking +// +// In order to type-check using only the packages you care about, you can use a +// TypeChecker. TypeChecker will visit each top-level type declaration, +// collect (optionally filtered) references, and type-check references +// packages. +// +// Errors +// +// Errors can be added to each package. Use ErrFromNode to create an error +// from an AST node. Errors can then be printed (complete with file and +// position information) using PrintErrors, optionally filtered by error type. +// It's generally a good idea to filter out TypeErrors when doing incomplete +// type-checking with TypeChecker. You can use MaybeErrList to return multiple +// errors if you need to return an error instead of adding it to a package. +// AddError will later unroll it into individual errors. +package loader diff --git a/vendor/sigs.k8s.io/controller-tools/pkg/loader/errors.go b/vendor/sigs.k8s.io/controller-tools/pkg/loader/errors.go new file mode 100644 index 0000000000..aae8ef39f7 --- /dev/null +++ b/vendor/sigs.k8s.io/controller-tools/pkg/loader/errors.go @@ -0,0 +1,63 @@ +/* +Copyright 2019 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package loader + +import ( + "fmt" + "go/ast" + "go/token" +) + +// PositionedError represents some error with an associated position. +// The position is tied to some external token.FileSet. +type PositionedError struct { + Pos token.Pos + error +} + +// ErrFromNode returns the given error, with additional information +// attaching it to the given AST node. It will automatically map +// over error lists. +func ErrFromNode(err error, node ast.Node) error { + if asList, isList := err.(ErrList); isList { + resList := make(ErrList, len(asList)) + for i, baseErr := range asList { + resList[i] = ErrFromNode(baseErr, node) + } + return resList + } + return PositionedError{ + Pos: node.Pos(), + error: err, + } +} + +// MaybeErrList constructs an ErrList if the given list of +// errors has any errors, otherwise returning nil. +func MaybeErrList(errs []error) error { + if len(errs) == 0 { + return nil + } + return ErrList(errs) +} + +// ErrList is a list of errors aggregated together into a single error. +type ErrList []error + +func (l ErrList) Error() string { + return fmt.Sprintf("%v", []error(l)) +} diff --git a/vendor/sigs.k8s.io/controller-tools/pkg/loader/loader.go b/vendor/sigs.k8s.io/controller-tools/pkg/loader/loader.go new file mode 100644 index 0000000000..7b3dff30f8 --- /dev/null +++ b/vendor/sigs.k8s.io/controller-tools/pkg/loader/loader.go @@ -0,0 +1,361 @@ +/* +Copyright 2019 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package loader + +import ( + "fmt" + "go/ast" + "go/parser" + "go/scanner" + "go/token" + "go/types" + "io/ioutil" + "os" + "sync" + + "golang.org/x/tools/go/packages" +) + +// Much of this is strongly inspired by the contents of go/packages, +// except that it allows for lazy loading of syntax and type-checking +// information to speed up cases where full traversal isn't needed. + +// PrintErrors print errors associated with all packages +// in the given package graph, starting at the given root +// packages and traversing through all imports. It will skip +// any errors of the kinds specified in filterKinds. It will +// return true if any errors were printed. +func PrintErrors(pkgs []*Package, filterKinds ...packages.ErrorKind) bool { + pkgsRaw := make([]*packages.Package, len(pkgs)) + for i, pkg := range pkgs { + pkgsRaw[i] = pkg.Package + } + toSkip := make(map[packages.ErrorKind]struct{}) + for _, errKind := range filterKinds { + toSkip[errKind] = struct{}{} + } + hadErrors := false + packages.Visit(pkgsRaw, nil, func(pkgRaw *packages.Package) { + for _, err := range pkgRaw.Errors { + if _, skip := toSkip[err.Kind]; skip { + continue + } + hadErrors = true + fmt.Fprintln(os.Stderr, err) + } + }) + return hadErrors +} + +// Package is a single, unique Go package that can be +// lazily parsed and type-checked. Packages should not +// be constructed directly -- instead, use LoadRoots. +// For a given call to LoadRoots, only a single instance +// of each package exists, and thus they may be used as keys +// and for comparison. +type Package struct { + *packages.Package + + imports map[string]*Package + + loader *loader + sync.Mutex +} + +// Imports returns the imports for the given package, indexed by +// package path (*not* name in any particular file). +func (p *Package) Imports() map[string]*Package { + if p.imports == nil { + p.imports = p.loader.packagesFor(p.Package.Imports) + } + + return p.imports +} + +// NeedTypesInfo indicates that type-checking information is needed for this package. +// Actual type-checking information can be accessed via the Types and TypesInfo fields. +func (p *Package) NeedTypesInfo() { + if p.TypesInfo != nil { + return + } + p.NeedSyntax() + p.loader.typeCheck(p) +} + +// NeedSyntax indicates that a parsed AST is needed for this package. +// Actual ASTs can be accessed via the Syntax field. +func (p *Package) NeedSyntax() { + if p.Syntax != nil { + return + } + out := make([]*ast.File, len(p.CompiledGoFiles)) + var wg sync.WaitGroup + wg.Add(len(p.CompiledGoFiles)) + for i, filename := range p.CompiledGoFiles { + go func(i int, filename string) { + defer wg.Done() + src, err := ioutil.ReadFile(filename) + if err != nil { + p.AddError(err) + return + } + out[i], err = p.loader.parseFile(filename, src) + if err != nil { + p.AddError(err) + return + } + }(i, filename) + } + wg.Wait() + for _, file := range out { + if file == nil { + return + } + } + p.Syntax = out +} + +// AddError adds an error to the errors associated with the given package. +func (p *Package) AddError(err error) { + switch typedErr := err.(type) { + case *os.PathError: + // file-reading errors + p.Errors = append(p.Errors, packages.Error{ + Pos: typedErr.Path + ":1", + Msg: typedErr.Err.Error(), + Kind: packages.ParseError, + }) + case scanner.ErrorList: + // parsing/scanning errors + for _, subErr := range typedErr { + p.Errors = append(p.Errors, packages.Error{ + Pos: subErr.Pos.String(), + Msg: subErr.Msg, + Kind: packages.ParseError, + }) + } + case types.Error: + // type-checking errors + p.Errors = append(p.Errors, packages.Error{ + Pos: typedErr.Fset.Position(typedErr.Pos).String(), + Msg: typedErr.Msg, + Kind: packages.TypeError, + }) + case ErrList: + for _, subErr := range typedErr { + p.AddError(subErr) + } + case PositionedError: + p.Errors = append(p.Errors, packages.Error{ + Pos: p.loader.cfg.Fset.Position(typedErr.Pos).String(), + Msg: typedErr.Error(), + Kind: packages.UnknownError, + }) + default: + // should only happen for external errors, like ref checking + p.Errors = append(p.Errors, packages.Error{ + Pos: p.ID + ":-", + Msg: err.Error(), + Kind: packages.UnknownError, + }) + } +} + +// loader loads packages and their imports. Loaded packages will have +// type size, imports, and exports file information populated. Additional +// information, like ASTs and type-checking information, can be accessed +// via methods on individual packages. +type loader struct { + // Roots are the loaded "root" packages in the package graph loaded via + // LoadRoots. + Roots []*Package + + // cfg contains the package loading config (initialized on demand) + cfg *packages.Config + // packages contains the cache of Packages indexed by the underlying + // package.Package, so that we don't ever produce two Packages with + // the same underlying packages.Package. + packages map[*packages.Package]*Package + packagesMu sync.Mutex +} + +// packageFor returns a wrapped Package for the given packages.Package, +// ensuring that there's a one-to-one mapping between the two. +// It's *not* threadsafe -- use packagesFor for that. +func (l *loader) packageFor(pkgRaw *packages.Package) *Package { + if l.packages[pkgRaw] == nil { + l.packages[pkgRaw] = &Package{ + Package: pkgRaw, + loader: l, + } + } + return l.packages[pkgRaw] +} + +// packagesFor returns a map of Package objects for each packages.Package in the input +// map, ensuring that there's a one-to-one mapping between package.Package and Package +// (as per packageFor). +func (l *loader) packagesFor(pkgsRaw map[string]*packages.Package) map[string]*Package { + l.packagesMu.Lock() + defer l.packagesMu.Unlock() + + out := make(map[string]*Package, len(pkgsRaw)) + for name, rawPkg := range pkgsRaw { + out[name] = l.packageFor(rawPkg) + } + return out +} + +// typeCheck type-checks the given package. +func (l *loader) typeCheck(pkg *Package) { + // don't conflict with typeCheckFromExportData + + pkg.TypesInfo = &types.Info{ + Types: make(map[ast.Expr]types.TypeAndValue), + Defs: make(map[*ast.Ident]types.Object), + Uses: make(map[*ast.Ident]types.Object), + Implicits: make(map[ast.Node]types.Object), + Scopes: make(map[ast.Node]*types.Scope), + Selections: make(map[*ast.SelectorExpr]*types.Selection), + } + + pkg.Fset = l.cfg.Fset + pkg.Types = types.NewPackage(pkg.PkgPath, pkg.Name) + + importer := importerFunc(func(path string) (*types.Package, error) { + if path == "unsafe" { + return types.Unsafe, nil + } + + // The imports map is keyed by import path. + importedPkg := pkg.Imports()[path] + + // it's possible to have a call to check in parallel to a call to this + // if one package in the package graph gets its dependency filtered out, + // but another doesn't (so one wants a "dummy" package here, and another + // wants the full check). + // + // Thus, we need to lock here (at least for the time being) to avoid + // races between the above write to `pkg.Types` and this checking of + // importedPkg.Types. + importedPkg.Lock() + defer importedPkg.Unlock() + + if importedPkg == nil { + return nil, fmt.Errorf("no package information for %q", path) + } + + if importedPkg.Types != nil && importedPkg.Types.Complete() { + return importedPkg.Types, nil + } + + // if we haven't already loaded typecheck data, we don't care about this package's types + return types.NewPackage(importedPkg.PkgPath, importedPkg.Name), nil + }) + + var errs []error + + // type-check + checkConfig := &types.Config{ + Importer: importer, + + IgnoreFuncBodies: true, // we only need decl-level info + + Error: func(err error) { + errs = append(errs, err) + }, + + Sizes: pkg.TypesSizes, + } + if err := types.NewChecker(checkConfig, l.cfg.Fset, pkg.Types, pkg.TypesInfo).Files(pkg.Syntax); err != nil { + errs = append(errs, err) + } + + // make sure that if a given sub-import is ill-typed, we mark this package as ill-typed as well. + illTyped := len(errs) > 0 + if !illTyped { + for _, importedPkg := range pkg.Imports() { + if importedPkg.IllTyped { + illTyped = true + break + } + } + } + pkg.IllTyped = illTyped + + // publish errors to the package error list. + for _, err := range errs { + pkg.AddError(err) + } +} + +// parseFile parses the given file, including comments. +func (l *loader) parseFile(filename string, src []byte) (*ast.File, error) { + // skip function bodies + file, err := parser.ParseFile(l.cfg.Fset, filename, src, parser.AllErrors|parser.ParseComments) + if err != nil { + return nil, err + } + + return file, nil +} + +// LoadRoots loads the given "root" packages by path, transitively loading +// and all imports as well. +// +// Loaded packages will have type size, imports, and exports file information +// populated. Additional information, like ASTs and type-checking information, +// can be accessed via methods on individual packages. +func LoadRoots(roots ...string) ([]*Package, error) { + return LoadRootsWithConfig(&packages.Config{}, roots...) +} + +// LoadRootsWithConfig functions like LoadRoots, except that it allows passing +// a custom loading config. The config will be modified to suit the needs of +// the loader. +// +// This is generally only useful for use in testing when you need to modify +// loading settings to load from a fake location. +func LoadRootsWithConfig(cfg *packages.Config, roots ...string) ([]*Package, error) { + l := &loader{ + cfg: cfg, + packages: make(map[*packages.Package]*Package), + } + l.cfg.Mode |= packages.LoadImports | packages.NeedTypesSizes + if l.cfg.Fset == nil { + l.cfg.Fset = token.NewFileSet() + } + // put our build flags first so that callers can override them + l.cfg.BuildFlags = append([]string{"-tags", "ignore_autogenerated"}, l.cfg.BuildFlags...) + + rawPkgs, err := packages.Load(l.cfg, roots...) + if err != nil { + return nil, err + } + + for _, rawPkg := range rawPkgs { + l.Roots = append(l.Roots, l.packageFor(rawPkg)) + } + + return l.Roots, nil +} + +// importFunc is an implementation of the single-method +// types.Importer interface based on a function value. +type importerFunc func(path string) (*types.Package, error) + +func (f importerFunc) Import(path string) (*types.Package, error) { return f(path) } diff --git a/vendor/sigs.k8s.io/controller-tools/pkg/loader/paths.go b/vendor/sigs.k8s.io/controller-tools/pkg/loader/paths.go new file mode 100644 index 0000000000..3b783e1685 --- /dev/null +++ b/vendor/sigs.k8s.io/controller-tools/pkg/loader/paths.go @@ -0,0 +1,32 @@ +/* +Copyright 2019 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package loader + +import ( + "strings" +) + +// NonVendorPath returns a package path that does not include anything before the +// last vendor directory. This is useful for when using vendor directories, +// and using go/types.Package.Path(), which returns the full path including vendor. +// +// If you're using this, make sure you really need it -- it's better to index by +// the actual Package object when you can. +func NonVendorPath(rawPath string) string { + parts := strings.Split(rawPath, "/vendor/") + return parts[len(parts)-1] +} diff --git a/vendor/sigs.k8s.io/controller-tools/pkg/loader/refs.go b/vendor/sigs.k8s.io/controller-tools/pkg/loader/refs.go new file mode 100644 index 0000000000..9e2989cf54 --- /dev/null +++ b/vendor/sigs.k8s.io/controller-tools/pkg/loader/refs.go @@ -0,0 +1,254 @@ +/* +Copyright 2019 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package loader + +import ( + "fmt" + + "go/ast" + "strconv" + "sync" +) + +// NB(directxman12): most of this is done by the typechecker, +// but it's a bit slow/heavyweight for what we want -- we want +// to resolve external imports *only* if we actually need them. + +// Basically, what we do is: +// 1. Map imports to names +// 2. Find all explicit external references (`name.type`) +// 3. Find all referenced packages by merging explicit references and dot imports +// 4. Only type-check those packages +// 5. Ignore type-checking errors from the missing packages, because we won't ever +// touch unloaded types (they're probably used in ignored fields/types, variables, or functions) +// (done using PrintErrors with an ignore argument from the caller). +// 6. Notice any actual type-checking errors via invalid types + +// importsMap saves import aliases, mapping them to underlying packages. +type importsMap struct { + // dotImports maps package IDs to packages for any packages that have/ been imported as `.` + dotImports map[string]*Package + // byName maps package aliases or names to the underlying package. + byName map[string]*Package +} + +// mapImports maps imports from the names they use in the given file to the underlying package, +// using a map of package import paths to packages (generally from Package.Imports()). +func mapImports(file *ast.File, importedPkgs map[string]*Package) (*importsMap, error) { + m := &importsMap{ + dotImports: make(map[string]*Package), + byName: make(map[string]*Package), + } + for _, importSpec := range file.Imports { + path, err := strconv.Unquote(importSpec.Path.Value) + if err != nil { + return nil, ErrFromNode(err, importSpec.Path) + } + importedPkg := importedPkgs[path] + if importedPkg == nil { + return nil, ErrFromNode(fmt.Errorf("no such package located"), importSpec.Path) + } + if importSpec.Name == nil { + m.byName[importedPkg.Name] = importedPkg + continue + } + if importSpec.Name.Name == "." { + m.dotImports[importedPkg.ID] = importedPkg + continue + } + m.byName[importSpec.Name.Name] = importedPkg + } + + return m, nil +} + +// referenceSet finds references to external packages' types in the given file, +// without otherwise calling into the type-checker. When checking structs, +// it only checks fields with JSON tags. +type referenceSet struct { + file *ast.File + imports *importsMap + pkg *Package + + externalRefs map[*Package]struct{} +} + +func (r *referenceSet) init() { + if r.externalRefs == nil { + r.externalRefs = make(map[*Package]struct{}) + } +} + +// NodeFilter filters nodes, accepting them for reference collection +// when true is returned and rejecting them when false is returned. +type NodeFilter func(ast.Node) bool + +// collectReferences saves all references to external types in the given info. +func (r *referenceSet) collectReferences(rawType ast.Expr, filterNode NodeFilter) { + r.init() + col := &referenceCollector{ + refs: r, + filterNode: filterNode, + } + ast.Walk(col, rawType) +} + +// external saves an external reference to the given named package. +func (r *referenceSet) external(pkgName string) { + pkg := r.imports.byName[pkgName] + if pkg == nil { + r.pkg.AddError(fmt.Errorf("use of unimported package %q", pkgName)) + return + } + r.externalRefs[pkg] = struct{}{} +} + +// referenceCollector visits nodes in an AST, adding external references to a +// referenceSet. +type referenceCollector struct { + refs *referenceSet + filterNode NodeFilter +} + +func (c *referenceCollector) Visit(node ast.Node) ast.Visitor { + if !c.filterNode(node) { + return nil + } + switch typedNode := node.(type) { + case *ast.Ident: + // local reference or dot-import, ignore + return nil + case *ast.SelectorExpr: + pkgName := typedNode.X.(*ast.Ident).Name + c.refs.external(pkgName) + return nil + default: + return c + } +} + +// allReferencedPackages finds all directly referenced packages in the given package. +func allReferencedPackages(pkg *Package, filterNodes NodeFilter) []*Package { + pkg.NeedSyntax() + refsByFile := make(map[*ast.File]*referenceSet) + for _, file := range pkg.Syntax { + imports, err := mapImports(file, pkg.Imports()) + if err != nil { + pkg.AddError(err) + return nil + } + refs := &referenceSet{ + file: file, + imports: imports, + pkg: pkg, + } + refsByFile[file] = refs + } + + EachType(pkg, func(file *ast.File, decl *ast.GenDecl, spec *ast.TypeSpec) { + refs := refsByFile[file] + refs.collectReferences(spec.Type, filterNodes) + }) + + allPackages := make(map[*Package]struct{}) + for _, refs := range refsByFile { + for _, pkg := range refs.imports.dotImports { + allPackages[pkg] = struct{}{} + } + for ref := range refs.externalRefs { + allPackages[ref] = struct{}{} + } + } + + res := make([]*Package, 0, len(allPackages)) + for pkg := range allPackages { + res = append(res, pkg) + } + return res +} + +// TypeChecker performs type-checking on a limitted subset of packages by +// checking each package's types' externally-referenced types, and only +// type-checking those packages. +type TypeChecker struct { + checkedPackages map[*Package]struct{} + filterNodes NodeFilter + sync.Mutex +} + +// Check type-checks the given package and all packages referenced +// by types that pass through (have true returned by) filterNodes. +func (c *TypeChecker) Check(root *Package, filterNodes NodeFilter) { + c.init() + + if filterNodes == nil { + filterNodes = c.filterNodes + } + + // use a sub-checker with the appropriate settings + (&TypeChecker{ + filterNodes: filterNodes, + checkedPackages: c.checkedPackages, + }).check(root) +} + +func (c *TypeChecker) init() { + if c.checkedPackages == nil { + c.checkedPackages = make(map[*Package]struct{}) + } + if c.filterNodes == nil { + // check every type by default + c.filterNodes = func(_ ast.Node) bool { + return true + } + } +} + +// check recursively type-checks the given package, only loading packages that +// are actually referenced by our types (it's the actual implementation of Check, +// without initialization). +func (c *TypeChecker) check(root *Package) { + root.Lock() + defer root.Unlock() + + c.Lock() + _, ok := c.checkedPackages[root] + c.Unlock() + if ok { + return + } + + refedPackages := allReferencedPackages(root, c.filterNodes) + + // first, resolve imports for all leaf packages... + var wg sync.WaitGroup + for _, pkg := range refedPackages { + wg.Add(1) + go func(pkg *Package) { + defer wg.Done() + c.check(pkg) + }(pkg) + } + wg.Wait() + + // ...then, we can safely type-check ourself + root.NeedTypesInfo() + + c.Lock() + defer c.Unlock() + c.checkedPackages[root] = struct{}{} +} diff --git a/vendor/sigs.k8s.io/controller-tools/pkg/loader/visit.go b/vendor/sigs.k8s.io/controller-tools/pkg/loader/visit.go new file mode 100644 index 0000000000..b5646fde1d --- /dev/null +++ b/vendor/sigs.k8s.io/controller-tools/pkg/loader/visit.go @@ -0,0 +1,81 @@ +/* +Copyright 2019 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package loader + +import ( + "go/ast" + "reflect" + "strconv" +) + +// TypeCallback is a callback called for each raw AST (gendecl, typespec) combo. +type TypeCallback func(file *ast.File, decl *ast.GenDecl, spec *ast.TypeSpec) + +// EachType calls the given callback for each (gendecl, typespec) combo in the +// given package. Generally, using markers.EachType is better when working +// with marker data, and has a more convinient representation. +func EachType(pkg *Package, cb TypeCallback) { + visitor := &typeVisitor{ + callback: cb, + } + pkg.NeedSyntax() + for _, file := range pkg.Syntax { + visitor.file = file + ast.Walk(visitor, file) + } +} + +// typeVisitor visits all TypeSpecs, calling the given callback for each. +type typeVisitor struct { + callback TypeCallback + decl *ast.GenDecl + file *ast.File +} + +// Visit visits all TypeSpecs. +func (v *typeVisitor) Visit(node ast.Node) ast.Visitor { + if node == nil { + v.decl = nil + return v + } + + switch typedNode := node.(type) { + case *ast.File: + v.file = typedNode + return v + case *ast.GenDecl: + v.decl = typedNode + return v + case *ast.TypeSpec: + v.callback(v.file, v.decl, typedNode) + return nil // don't recurse + default: + return nil + } +} + +// ParseAstTag parses the given raw tag literal into a reflect.StructTag. +func ParseAstTag(tag *ast.BasicLit) reflect.StructTag { + if tag == nil { + return reflect.StructTag("") + } + tagStr, err := strconv.Unquote(tag.Value) + if err != nil { + return reflect.StructTag("") + } + return reflect.StructTag(tagStr) +} diff --git a/vendor/sigs.k8s.io/controller-tools/pkg/markers/collect.go b/vendor/sigs.k8s.io/controller-tools/pkg/markers/collect.go new file mode 100644 index 0000000000..c5ea2345fd --- /dev/null +++ b/vendor/sigs.k8s.io/controller-tools/pkg/markers/collect.go @@ -0,0 +1,422 @@ +/* +Copyright 2019 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package markers + +import ( + "go/ast" + "go/token" + "strings" + "sync" + + "sigs.k8s.io/controller-tools/pkg/loader" +) + +// Collector collects and parses marker comments defined in the registry +// from package source code. If no registry is provided, an empty one will +// be initialized on the first call to MarkersInPackage. +type Collector struct { + *Registry + + byPackage map[string]map[ast.Node]MarkerValues + mu sync.Mutex +} + +// MarkerValues are all the values for some set of markers. +type MarkerValues map[string][]interface{} + +// Get fetches the first value that for the given marker, returning +// nil if no values are available. +func (v MarkerValues) Get(name string) interface{} { + vals := v[name] + if len(vals) == 0 { + return nil + } + return vals[0] +} + +func (c *Collector) init() { + if c.Registry == nil { + c.Registry = &Registry{} + } + if c.byPackage == nil { + c.byPackage = make(map[string]map[ast.Node]MarkerValues) + } +} + +// MarkersInPackage computes the marker values by node for the given package. Results +// are cached by package ID, so this is safe to call repeatedly from different functions. +// Each file in the package is treated as a distinct node. +// +// We consider a marker to be associated with a given AST node if either of the following are true: +// +// - it's in the Godoc for that AST node +// +// - it's in the closest non-godoc comment group above that node, +// *and* that node is a type or field node, *and* [it's either +// registered as type-level *or* it's not registered as being +// package-level] +// +// - it's not in the Godoc of a node, doesn't meet the above criteria, and +// isn't in a struct definition (in which case it's package-level) +func (c *Collector) MarkersInPackage(pkg *loader.Package) (map[ast.Node]MarkerValues, error) { + c.mu.Lock() + c.init() + if markers, exist := c.byPackage[pkg.ID]; exist { + c.mu.Unlock() + return markers, nil + } + // unlock early, it's ok if we do a bit extra work rather than locking while we're working + c.mu.Unlock() + + pkg.NeedSyntax() + nodeMarkersRaw := c.associatePkgMarkers(pkg) + markers, err := c.parseMarkersInPackage(nodeMarkersRaw) + if err != nil { + return nil, err + } + + c.mu.Lock() + defer c.mu.Unlock() + c.byPackage[pkg.ID] = markers + + return markers, nil +} + +// parseMarkersInPackage parses the given raw marker comments into output values using the registry. +func (c *Collector) parseMarkersInPackage(nodeMarkersRaw map[ast.Node][]markerComment) (map[ast.Node]MarkerValues, error) { + var errors []error + nodeMarkerValues := make(map[ast.Node]MarkerValues) + for node, markersRaw := range nodeMarkersRaw { + var target TargetType + switch node.(type) { + case *ast.File: + target = DescribesPackage + case *ast.Field: + target = DescribesField + default: + target = DescribesType + } + markerVals := make(map[string][]interface{}) + for _, markerRaw := range markersRaw { + markerText := markerRaw.Text() + def := c.Registry.Lookup(markerText, target) + if def == nil { + continue + } + val, err := def.Parse(markerText) + if err != nil { + errors = append(errors, loader.ErrFromNode(err, markerRaw)) + continue + } + markerVals[def.Name] = append(markerVals[def.Name], val) + } + nodeMarkerValues[node] = markerVals + } + + return nodeMarkerValues, loader.MaybeErrList(errors) +} + +// associatePkgMarkers associates markers with AST nodes in the given package. +func (c *Collector) associatePkgMarkers(pkg *loader.Package) map[ast.Node][]markerComment { + nodeMarkers := make(map[ast.Node][]markerComment) + for _, file := range pkg.Syntax { + fileNodeMarkers := c.associateFileMarkers(file) + for node, markers := range fileNodeMarkers { + nodeMarkers[node] = append(nodeMarkers[node], markers...) + } + } + + return nodeMarkers +} + +// associateFileMarkers associates markers with AST nodes in the given file. +func (c *Collector) associateFileMarkers(file *ast.File) map[ast.Node][]markerComment { + // grab all the raw marker comments by node + visitor := markerSubVisitor{ + collectPackageLevel: true, + markerVisitor: &markerVisitor{ + nodeMarkers: make(map[ast.Node][]markerComment), + allComments: file.Comments, + }, + } + ast.Walk(visitor, file) + + // grab the last package-level comments at the end of the file (if any) + lastFileMarkers := visitor.markersBetween(false, visitor.commentInd, len(visitor.allComments)) + visitor.pkgMarkers = append(visitor.pkgMarkers, lastFileMarkers...) + + // figure out if any type-level markers are actually package-level markers + for node, markers := range visitor.nodeMarkers { + _, isType := node.(*ast.TypeSpec) + if !isType { + continue + } + endOfMarkers := 0 + for _, marker := range markers { + if marker.fromGodoc { + // markers from godoc are never package level + markers[endOfMarkers] = marker + endOfMarkers++ + continue + } + markerText := marker.Text() + typeDef := c.Registry.Lookup(markerText, DescribesType) + if typeDef != nil { + // prefer assuming type-level markers + markers[endOfMarkers] = marker + endOfMarkers++ + continue + } + def := c.Registry.Lookup(markerText, DescribesPackage) + if def == nil { + // assume type-level unless proven otherwise + markers[endOfMarkers] = marker + endOfMarkers++ + continue + } + // it's package-level, since a package-level definition exists + visitor.pkgMarkers = append(visitor.pkgMarkers, marker) + } + visitor.nodeMarkers[node] = markers[:endOfMarkers] // re-set after trimming the package markers + } + visitor.nodeMarkers[file] = visitor.pkgMarkers + + return visitor.nodeMarkers +} + +// markerComment is an AST comment that contains a marker. +// It may or may not be from a Godoc comment, which affects +// marker re-associated (from type-level to package-level) +type markerComment struct { + *ast.Comment + fromGodoc bool +} + +// Text returns the text of the marker, stripped of the comment +// marker and leading spaces, as should be passed to Registry.Lookup +// and Registry.Parse. +func (c markerComment) Text() string { + return strings.TrimSpace(c.Comment.Text[2:]) +} + +// markerVisistor visits AST nodes, recording markers associated with each node. +type markerVisitor struct { + allComments []*ast.CommentGroup + commentInd int + + declComments []markerComment + lastLineCommentGroup *ast.CommentGroup + + pkgMarkers []markerComment + nodeMarkers map[ast.Node][]markerComment +} + +// isMarkerComment checks that the given comment is a single-line (`//`) +// comment and it's first non-space content is `+`. +func isMarkerComment(comment string) bool { + if comment[0:2] != "//" { + return false + } + stripped := strings.TrimSpace(comment[2:]) + if len(stripped) < 1 || stripped[0] != '+' { + return false + } + return true +} + +// markersBetween grabs the markers between the given indicies in the list of all comments. +func (v *markerVisitor) markersBetween(fromGodoc bool, start, end int) []markerComment { + if start < 0 || end < 0 { + return nil + } + var res []markerComment + for i := start; i < end; i++ { + commentGroup := v.allComments[i] + for _, comment := range commentGroup.List { + if !isMarkerComment(comment.Text) { + continue + } + res = append(res, markerComment{Comment: comment, fromGodoc: fromGodoc}) + } + } + return res +} + +type markerSubVisitor struct { + *markerVisitor + node ast.Node + collectPackageLevel bool +} + +// Visit collects markers for each node in the AST, optionally +// collecting unassociated markers as package-level. +func (v markerSubVisitor) Visit(node ast.Node) ast.Visitor { + if node == nil { + // end of the node, so we might need to advance comments beyond the end + // of the block if we don't want to collect package-level markers in + // this block. + + if !v.collectPackageLevel { + if v.commentInd < len(v.allComments) { + lastCommentInd := v.commentInd + nextGroup := v.allComments[lastCommentInd] + for nextGroup.Pos() < v.node.End() { + lastCommentInd++ + if lastCommentInd >= len(v.allComments) { + // after the increment so our decrement below still makes sense + break + } + nextGroup = v.allComments[lastCommentInd] + } + v.commentInd = lastCommentInd + } + } + + return nil + } + + // skip comments on the same line as the previous node + // making sure to double-check for the case where we've gone past the end of the comments + // but still have to finish up typespec-gendecl association (see below). + if v.lastLineCommentGroup != nil && v.commentInd < len(v.allComments) && v.lastLineCommentGroup.Pos() == v.allComments[v.commentInd].Pos() { + v.commentInd++ + } + + // stop visiting if there are no more comments in the file + // NB(directxman12): we can't just stop immediately, because we + // still need to check if there are typespecs associated with gendecls. + var markerCommentBlock []markerComment + var docCommentBlock []markerComment + lastCommentInd := v.commentInd + if v.commentInd < len(v.allComments) { + // figure out the first comment after the node in question... + nextGroup := v.allComments[lastCommentInd] + for nextGroup.Pos() < node.Pos() { + lastCommentInd++ + if lastCommentInd >= len(v.allComments) { + // after the increment so our decrement below still makes sense + break + } + nextGroup = v.allComments[lastCommentInd] + } + lastCommentInd-- // ...then decrement to get the last comment before the node in question + + // figure out the godoc comment so we can deal with it separately + var docGroup *ast.CommentGroup + docGroup, v.lastLineCommentGroup = associatedCommentsFor(node) + + // find the last comment group that's not godoc + markerCommentInd := lastCommentInd + if docGroup != nil && v.allComments[markerCommentInd].Pos() == docGroup.Pos() { + markerCommentInd-- + } + + // check if we have freestanding package markers, + // and find the markers in our "closest non-godoc" comment block, + // plus our godoc comment block + if markerCommentInd >= v.commentInd { + if v.collectPackageLevel { + // assume anything between the comment ind and the marker ind (not including it) + // are package-level + v.pkgMarkers = append(v.pkgMarkers, v.markersBetween(false, v.commentInd, markerCommentInd)...) + } + markerCommentBlock = v.markersBetween(false, markerCommentInd, markerCommentInd+1) + docCommentBlock = v.markersBetween(true, markerCommentInd+1, lastCommentInd+1) + } else { + docCommentBlock = v.markersBetween(true, markerCommentInd+1, lastCommentInd+1) + } + } + + resVisitor := markerSubVisitor{ + collectPackageLevel: false, // don't collect package level by default + markerVisitor: v.markerVisitor, + node: node, + } + + // associate those markers with a node + switch typedNode := node.(type) { + case *ast.GenDecl: + // save the comments associated with the gen-decl if it's a single-line type decl + if typedNode.Lparen != token.NoPos || typedNode.Tok != token.TYPE { + // not a single-line type spec, treat them as free comments + v.pkgMarkers = append(v.pkgMarkers, markerCommentBlock...) + break + } + // save these, we'll need them when we encounter the actual type spec + v.declComments = append(v.declComments, markerCommentBlock...) + v.declComments = append(v.declComments, docCommentBlock...) + case *ast.TypeSpec: + // add in comments attributed to the gen-decl, if any, + // as well as comments associated with the actual type + v.nodeMarkers[node] = append(v.nodeMarkers[node], v.declComments...) + v.nodeMarkers[node] = append(v.nodeMarkers[node], markerCommentBlock...) + v.nodeMarkers[node] = append(v.nodeMarkers[node], docCommentBlock...) + + v.declComments = nil + v.collectPackageLevel = false // don't collect package-level inside type structs + case *ast.Field: + v.nodeMarkers[node] = append(v.nodeMarkers[node], markerCommentBlock...) + v.nodeMarkers[node] = append(v.nodeMarkers[node], docCommentBlock...) + case *ast.File: + v.pkgMarkers = append(v.pkgMarkers, markerCommentBlock...) + v.pkgMarkers = append(v.pkgMarkers, docCommentBlock...) + + // collect markers in root file scope + resVisitor.collectPackageLevel = true + default: + // assume markers before anything else are package-level markers, + // *but* don't include any markers in godoc + if v.collectPackageLevel { + v.pkgMarkers = append(v.pkgMarkers, markerCommentBlock...) + } + } + + // increment the comment ind so that we start at the right place for the next node + v.commentInd = lastCommentInd + 1 + + return resVisitor + +} + +// associatedCommentsFor returns the doc comment group (if relevant and present) and end-of-line comment +// (again if relevant and present) for the given AST node. +func associatedCommentsFor(node ast.Node) (docGroup *ast.CommentGroup, lastLineCommentGroup *ast.CommentGroup) { + switch typedNode := node.(type) { + case *ast.Field: + docGroup = typedNode.Doc + lastLineCommentGroup = typedNode.Comment + case *ast.File: + docGroup = typedNode.Doc + case *ast.FuncDecl: + docGroup = typedNode.Doc + case *ast.GenDecl: + docGroup = typedNode.Doc + case *ast.ImportSpec: + docGroup = typedNode.Doc + lastLineCommentGroup = typedNode.Comment + case *ast.TypeSpec: + docGroup = typedNode.Doc + lastLineCommentGroup = typedNode.Comment + case *ast.ValueSpec: + docGroup = typedNode.Doc + lastLineCommentGroup = typedNode.Comment + default: + lastLineCommentGroup = nil + } + + return docGroup, lastLineCommentGroup +} diff --git a/vendor/sigs.k8s.io/controller-tools/pkg/markers/doc.go b/vendor/sigs.k8s.io/controller-tools/pkg/markers/doc.go new file mode 100644 index 0000000000..707036de8f --- /dev/null +++ b/vendor/sigs.k8s.io/controller-tools/pkg/markers/doc.go @@ -0,0 +1,113 @@ +/* +Copyright 2019 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Package markers contains utilities for defining and parsing "marker +// comments", also occasionally called tag comments (we use the term marker to +// avoid confusing with struct tags). Parsed result (output) values take the +// form of Go values, much like the "encoding/json" package. +// +// Definitions and Parsing +// +// Markers are defined as structured Definitions which can be used to +// consistently parse marker comments. A Definition contains an concrete +// output type for the marker, which can be a simple type (like string), a +// struct, or a wrapper type (useful for defining additional methods on marker +// types). +// +// Markers take the general form +// +// +path:to:marker=val +// +// +path:to:marker:arg1=val,arg2=val2 +// +// +path:to:marker +// +// Arguments may be ints, bools, strings, and slices. Ints and bool take their +// standard form from Go. Strings may take any of their standard forms, or any +// sequence of unquoted characters up until a `,` or `;` is encountered. Lists +// take either of the following forms: +// +// val;val;val +// +// {val, val, val} +// +// Note that the first form will not properly parse nested slices, but is +// generally convenient and is the form used in many existing markers. +// +// Each of those argument types maps to the corresponding go type. Pointers +// mark optional fields (a struct tag, below, may also be used). The empty +// interface will match any type. +// +// Struct fields may optionally be annotated with the `marker` struct tag. The +// first argument is a name override. If it's left blank (or the tag isn't +// present), the camelCase version of the name will be used. The only +// additional argument defined is `optional`, which marks a field as optional +// without using a pointer. +// +// All parsed values are unmarshalled into the output type. If any +// non-optional fields aren't mentioned, an error will be raised unless +// `Strict` is set to false. +// +// Registries and Lookup +// +// Definitions can be added to registries to facilitate lookups. Each +// definition is marked as either describing a type, struct field, or package +// (unassociated). The same marker name may be registered multiple times, as +// long as each describes a different construct (type, field, or package). +// Definitions can then be looked up by passing unparsed markers. +// +// Collection and Extraction +// +// Markers can be collected from a loader.Package using a Collector. The +// Collector will read from a given Registry, collecting comments that look +// like markers and parsing them if they match some definition on the registry. +// +// Markers are considered associated with a particular field or type if they +// exist in the Godoc, or the closest non-godoc comment. Any other markers not +// inside a some other block (e.g. a struct definition, interface definition, +// etc) are considered package level. Markers in a "closest non-Go comment +// block" may also be considered package level if registered as such and no +// identical type-level definition exists. +// +// Like loader.Package, Collector's methods are idempotent and will not +// reperform work. +// +// Traversal +// +// EachType function iterates over each type in a Package, providing +// conveniently structured type and field information with marker values +// associated. +// +// PackageMarkers can be used to fetch just package-level markers. +// +// Help +// +// Help can be defined for each marker using the DefinitionHelp struct. It's +// mostly intended to be generated off of godocs using cmd/helpgen, which takes +// the first line as summary (removing the type/field name), and considers the +// rest as details. It looks for the +// +// +controllertools:generateHelp[:category=] +// +// marker to start generation. +// +// If you can't use godoc-based generation for whatever reasons (e.g. +// primitive-typed markers), you can use the SimpleHelp and DeprecatedHelp +// helper functions to generate help structs. +// +// Help is then registered into a registry as associated with the actual +// definition, and can then be later retrieved from the registry. +package markers diff --git a/vendor/sigs.k8s.io/controller-tools/pkg/markers/help.go b/vendor/sigs.k8s.io/controller-tools/pkg/markers/help.go new file mode 100644 index 0000000000..26ca059bc8 --- /dev/null +++ b/vendor/sigs.k8s.io/controller-tools/pkg/markers/help.go @@ -0,0 +1,81 @@ +/* +Copyright 2019 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package markers + +// You *probably* don't want to write these structs by hand +// -- use cmd/helpgen if you can write Godoc, and {Simple,Deprecated}Help +// otherwise. + +// DetailedHelp contains brief help, as well as more details. +// For the "full" help, join the two together. +type DetailedHelp struct { + Summary string + Details string +} + +// DefinitionHelp contains overall help for a marker Definition, +// as well as per-field help. +type DefinitionHelp struct { + // DetailedHelp contains the overall help for the marker. + DetailedHelp + // Category describes what kind of marker this is. + Category string + // DeprecatedInFavorOf marks the marker as deprecated. + // If non-nil & empty, it's assumed to just mean deprecated permanently. + // If non-empty, it's assumed to be a marker name. + DeprecatedInFavorOf *string + + // NB(directxman12): we make FieldHelp be in terms of the Go struct field + // names so that we don't have to know the conversion or processing rules + // for struct fields at compile-time for help generation. + + // FieldHelp defines the per-field help for this marker, *in terms of the + // go struct field names. Use the FieldsHelp method to map this to + // marker argument names. + FieldHelp map[string]DetailedHelp +} + +// FieldsHelp maps per-field help to the actual marker argument names from the +// given definition. +func (d *DefinitionHelp) FieldsHelp(def *Definition) map[string]DetailedHelp { + fieldsHelp := make(map[string]DetailedHelp, len(def.FieldNames)) + for fieldName, argName := range def.FieldNames { + fieldsHelp[fieldName] = d.FieldHelp[argName] + } + return fieldsHelp +} + +// SimpleHelp returns help that just has marker-level summary information +// (e.g. for use with empty or primitive-typed markers, where Godoc-based +// generation isn't possible). +func SimpleHelp(category, summary string) *DefinitionHelp { + return &DefinitionHelp{ + Category: category, + DetailedHelp: DetailedHelp{Summary: summary}, + } +} + +// DeprecatedHelp returns simple help (a la SimpleHelp), except marked as +// deprecated in favor of the given marker (or an empty string for just +// deprecated). +func DeprecatedHelp(inFavorOf, category, summary string) *DefinitionHelp { + return &DefinitionHelp{ + Category: category, + DetailedHelp: DetailedHelp{Summary: summary}, + DeprecatedInFavorOf: &inFavorOf, + } +} diff --git a/vendor/sigs.k8s.io/controller-tools/pkg/markers/parse.go b/vendor/sigs.k8s.io/controller-tools/pkg/markers/parse.go new file mode 100644 index 0000000000..9f90994e40 --- /dev/null +++ b/vendor/sigs.k8s.io/controller-tools/pkg/markers/parse.go @@ -0,0 +1,727 @@ +/* +Copyright 2019 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package markers + +import ( + "bytes" + "errors" + "fmt" + "reflect" + "strconv" + "strings" + sc "text/scanner" + "unicode" + + "sigs.k8s.io/controller-tools/pkg/loader" +) + +// expect checks that the next token of the scanner is the given token, adding an error +// to the scanner if not. It returns whether the token was as expected. +func expect(scanner *sc.Scanner, expected rune, errDesc string) bool { + tok := scanner.Scan() + if tok != expected { + scanner.Error(scanner, fmt.Sprintf("expected %s, got %q", errDesc, scanner.TokenText())) + return false + } + return true +} + +var ( + // interfaceType is a pre-computed reflect.Type representing the empty interface. + interfaceType = reflect.TypeOf((*interface{})(nil)).Elem() + rawArgsType = reflect.TypeOf((*RawArguments)(nil)).Elem() +) + +// lowerCamelCase converts PascalCase string to +// a camelCase string (by lowering the first rune). +func lowerCamelCase(in string) string { + isFirst := true + return strings.Map(func(inRune rune) rune { + if isFirst { + isFirst = false + return unicode.ToLower(inRune) + } + return inRune + }, in) +} + +// RawArguments is a special type that can be used for a marker +// to receive *all* raw, underparsed argument data for a marker. +// You probably want to use `interface{}` to match any type instead. +// Use *only* for legacy markers that don't follow Definition's normal +// parsing logic. It should *not* be used as a field in a marker struct. +type RawArguments []byte + +// ArgumentType is the kind of a marker argument type. +// It's roughly analogous to a subset of reflect.Kind, with +// an extra "AnyType" to represent the empty interface. +type ArgumentType int + +const ( + // Invalid represents a type that can't be parsed, and should never be used. + InvalidType ArgumentType = iota + // IntType is an int + IntType + // StringType is a string + StringType + // BoolType is a bool + BoolType + // AnyType is the empty interface, and matches the rest of the content + AnyType + // SliceType is any slice constructed of the ArgumentTypes + SliceType + // RawType represents content that gets passed directly to the marker + // without any parsing. It should *only* be used with anonymous markers. + RawType +) + +// Argument is the type of a marker argument. +type Argument struct { + Type ArgumentType + Optional bool + Pointer bool + + ItemType *Argument +} + +// typeString contains the internals of TypeString. +func (a Argument) typeString(out *strings.Builder) { + if a.Pointer { + out.WriteRune('*') + } + + switch a.Type { + case InvalidType: + out.WriteString("") + case IntType: + out.WriteString("int") + case StringType: + out.WriteString("string") + case BoolType: + out.WriteString("bool") + case AnyType: + out.WriteString("") + case SliceType: + out.WriteString("[]") + // arguments can't be non-pointer optional, so just call into typeString again. + a.ItemType.typeString(out) + case RawType: + out.WriteString("") + } +} + +// TypeString returns a string roughly equivalent +// (but not identical) to the underlying Go type that +// this argument would parse to. It's mainly useful +// for user-friendly formatting of this argument (e.g. +// help strings). +func (a Argument) TypeString() string { + out := &strings.Builder{} + a.typeString(out) + return out.String() +} + +func (a Argument) String() string { + if a.Optional { + return fmt.Sprintf("", a.TypeString()) + } + return fmt.Sprintf("", a.TypeString()) +} + +// castAndSet casts val to out's type if needed, +// then sets out to val. +func castAndSet(out, val reflect.Value) { + outType := out.Type() + if outType != val.Type() { + val = val.Convert(outType) + } + out.Set(val) +} + +// makeSliceType makes a reflect.Type for a slice of the given type. +// Useful for constructing the out value for when AnyType's guess returns a slice. +func makeSliceType(itemType Argument) (reflect.Type, error) { + var itemReflectedType reflect.Type + switch itemType.Type { + case IntType: + itemReflectedType = reflect.TypeOf(int(0)) + case StringType: + itemReflectedType = reflect.TypeOf("") + case BoolType: + itemReflectedType = reflect.TypeOf(false) + case SliceType: + subItemType, err := makeSliceType(*itemType.ItemType) + if err != nil { + return nil, err + } + itemReflectedType = subItemType + default: + return nil, fmt.Errorf("invalid type when constructing guessed slice out: %v", itemType.Type) + } + + if itemType.Pointer { + itemReflectedType = reflect.PtrTo(itemReflectedType) + } + + return reflect.SliceOf(itemReflectedType), nil +} + +// guessType takes an educated guess about the type of the next field. If allowSlice +// is false, it will not guess slices. It's less efficient than parsing with actual +// type information, since we need to allocate to peek ahead full tokens, and the scanner +// only allows peeking ahead one character. +func guessType(scanner *sc.Scanner, raw string, allowSlice bool) *Argument { + if allowSlice { + maybeItem := guessType(scanner, raw, false) + + subRaw := raw[scanner.Pos().Offset:] + subScanner := parserScanner(subRaw, scanner.Error) + + var tok rune + for tok = subScanner.Scan(); tok != ',' && tok != sc.EOF && tok != ';'; tok = subScanner.Scan() { + // wait till we get something interesting + } + + // semicolon means it's a legacy slice + if tok == ';' { + return &Argument{ + Type: SliceType, + ItemType: maybeItem, + } + } + + return maybeItem + } + + // first, try the easy case -- quoted strings strings + hint := scanner.Peek() + switch hint { + case '"', '\'', '`': + return &Argument{Type: StringType} + } + + // everything else needs a duplicate scanner to scan properly + // (so we don't consume our scanner tokens until we actually + // go to use this -- Go doesn't like scanners that can be rewound). + subRaw := raw[scanner.Pos().Offset:] + subScanner := parserScanner(subRaw, scanner.Error) + + // next, check for slices + if hint == '{' { + subScanner.Scan() + return &Argument{ + Type: SliceType, + ItemType: guessType(subScanner, subRaw, false), + } + } + + // then, bools... + probablyString := false + if hint == 't' || hint == 'f' { + // maybe a bool + if nextTok := subScanner.Scan(); nextTok == sc.Ident { + switch subScanner.TokenText() { + case "true", "false": + // definitely a bool + return &Argument{Type: BoolType} + } + // probably a string + probablyString = true + } else { + // we shouldn't ever get here + scanner.Error(scanner, fmt.Sprintf("got a token (%q) that looked like an ident, but was not", scanner.TokenText())) + return &Argument{Type: InvalidType} + } + } + + if !probablyString { + if nextTok := subScanner.Scan(); nextTok == sc.Int { + return &Argument{Type: IntType} + } + } + + // otherwise assume bare strings + return &Argument{Type: StringType} +} + +// parseString parses either of the two accepted string forms (quoted, or bare tokens). +func (a *Argument) parseString(scanner *sc.Scanner, raw string, out reflect.Value) { + // strings are a bit weird -- the "easy" case is quoted strings (tokenized as strings), + // the "hard" case (present for backwards compat) is a bare sequence of tokens that aren't + // a comma. + tok := scanner.Scan() + if tok == sc.String || tok == sc.RawString { + // the easy case + val, err := strconv.Unquote(scanner.TokenText()) + if err != nil { + scanner.Error(scanner, fmt.Sprintf("unable to parse string: %v", err)) + return + } + castAndSet(out, reflect.ValueOf(val)) + return + } + + // the "hard" case -- bare tokens not including ',' (the argument + // separator), ';' (the slice separator), or '}' (delimitted slice + // ender) + startPos := scanner.Position.Offset + for hint := scanner.Peek(); hint != ',' && hint != ';' && hint != '}' && hint != sc.EOF; hint = scanner.Peek() { + // skip this token + scanner.Scan() + } + endPos := scanner.Position.Offset + len(scanner.TokenText()) + castAndSet(out, reflect.ValueOf(raw[startPos:endPos])) +} + +// parseSlice parses either of the two slice forms (curly-brace-delimitted and semicolon-separated). +func (a *Argument) parseSlice(scanner *sc.Scanner, raw string, out reflect.Value) { + // slices have two supported formats, like string: + // - `{val, val, val}` (preferred) + // - `val;val;val` (legacy) + resSlice := reflect.Zero(out.Type()) + elem := reflect.Indirect(reflect.New(out.Type().Elem())) + + // preferred case + if scanner.Peek() == '{' { + // NB(directxman12): supporting delimitted slices in bare slices + // would require an extra look-ahead here :-/ + + scanner.Scan() // skip '{' + for hint := scanner.Peek(); hint != '}' && hint != sc.EOF; hint = scanner.Peek() { + a.ItemType.parse(scanner, raw, elem, true) + resSlice = reflect.Append(resSlice, elem) + tok := scanner.Peek() + if tok == '}' { + break + } + if !expect(scanner, ',', "comma") { + return + } + } + if !expect(scanner, '}', "close curly brace") { + return + } + castAndSet(out, resSlice) + return + } + + // legacy case + for hint := scanner.Peek(); hint != ',' && hint != '}' && hint != sc.EOF; hint = scanner.Peek() { + a.ItemType.parse(scanner, raw, elem, true) + resSlice = reflect.Append(resSlice, elem) + tok := scanner.Peek() + if tok == ',' || tok == '}' || tok == sc.EOF { + break + } + scanner.Scan() + if tok != ';' { + scanner.Error(scanner, fmt.Sprintf("expected comma, got %q", scanner.TokenText())) + return + } + } + castAndSet(out, resSlice) +} + +// parse functions like Parse, except that it allows passing down whether or not we're +// already in a slice, to avoid duplicate legacy slice detection for AnyType +func (a *Argument) parse(scanner *sc.Scanner, raw string, out reflect.Value, inSlice bool) { + // nolint:gocyclo + if a.Type == InvalidType { + scanner.Error(scanner, fmt.Sprintf("cannot parse invalid type")) + return + } + if a.Pointer { + out.Set(reflect.New(out.Type().Elem())) + out = reflect.Indirect(out) + } + switch a.Type { + case RawType: + // raw consumes everything else + castAndSet(out, reflect.ValueOf(raw[scanner.Pos().Offset:])) + // consume everything else + for tok := scanner.Scan(); tok != sc.EOF; tok = scanner.Scan() { + } + case IntType: + if !expect(scanner, sc.Int, "integer") { + return + } + // TODO(directxman12): respect the size when parsing + val, err := strconv.Atoi(scanner.TokenText()) + if err != nil { + scanner.Error(scanner, fmt.Sprintf("unable to parse integer: %v", err)) + return + } + castAndSet(out, reflect.ValueOf(val)) + case StringType: + // strings are a bit weird -- the "easy" case is quoted strings (tokenized as strings), + // the "hard" case (present for backwards compat) is a bare sequence of tokens that aren't + // a comma. + a.parseString(scanner, raw, out) + case BoolType: + if !expect(scanner, sc.Ident, "true or false") { + return + } + switch scanner.TokenText() { + case "true": + castAndSet(out, reflect.ValueOf(true)) + case "false": + castAndSet(out, reflect.ValueOf(false)) + default: + scanner.Error(scanner, fmt.Sprintf("expected true or false, got %q", scanner.TokenText())) + return + } + case AnyType: + guessedType := guessType(scanner, raw, !inSlice) + newOut := out + if guessedType.Type == SliceType { + // we need to be able to construct the right element types, below + // in parse, so construct a concretely-typed value to use as "out" + newType, err := makeSliceType(*guessedType.ItemType) + if err != nil { + scanner.Error(scanner, err.Error()) + return + } + newOut = reflect.Indirect(reflect.New(newType)) + } + if !newOut.CanSet() { + panic("at the disco") + } + guessedType.Parse(scanner, raw, newOut) + castAndSet(out, newOut) + case SliceType: + // slices have two supported formats, like string: + // - `{val, val, val}` (preferred) + // - `val;val;val` (legacy) + a.parseSlice(scanner, raw, out) + } +} + +// Parse attempts to consume the argument from the given scanner (based on the given +// raw input as well for collecting ranges of content), and places the output value +// in the given reflect.Value. Errors are reported via the given scanner. +func (a *Argument) Parse(scanner *sc.Scanner, raw string, out reflect.Value) { + a.parse(scanner, raw, out, false) +} + +// ArgumentFromType constructs an Argument by examining the given +// raw reflect.Type. It can construct arguments from the Go types +// corresponding to any of the types listed in ArgumentType. +func ArgumentFromType(rawType reflect.Type) (Argument, error) { + if rawType == rawArgsType { + return Argument{ + Type: RawType, + }, nil + } + + if rawType == interfaceType { + return Argument{ + Type: AnyType, + }, nil + } + + arg := Argument{} + if rawType.Kind() == reflect.Ptr { + rawType = rawType.Elem() + arg.Pointer = true + arg.Optional = true + } + + switch rawType.Kind() { + case reflect.String: + arg.Type = StringType + case reflect.Int, reflect.Int32: // NB(directxman12): all ints in kubernetes are int32, so explicitly support that + arg.Type = IntType + case reflect.Bool: + arg.Type = BoolType + case reflect.Slice: + arg.Type = SliceType + itemType, err := ArgumentFromType(rawType.Elem()) + if err != nil { + return Argument{}, fmt.Errorf("bad slice item type: %v", err) + } + arg.ItemType = &itemType + default: + return Argument{}, fmt.Errorf("type has unsupported kind %s", rawType.Kind()) + } + + return arg, nil +} + +// TargetType describes which kind of node a given marker is associated with. +type TargetType int + +const ( + // DescribesPackage indicates that a marker is associated with a package. + DescribesPackage TargetType = iota + // DescribesType indicates that a marker is associated with a type declaration. + DescribesType + // DescribesField indicates that a marker is associated with a struct field. + DescribesField +) + +func (t TargetType) String() string { + switch t { + case DescribesPackage: + return "package" + case DescribesType: + return "type" + case DescribesField: + return "field" + default: + return "(unknown)" + } +} + +// Definition is a parsed definition of a marker. +type Definition struct { + // Output is the deserialized Go type of the marker. + Output reflect.Type + // Name is the marker's name. + Name string + // Target indicates which kind of node this marker can be associated with. + Target TargetType + // Fields lists out the types of each field that this marker has, by + // argument name as used in the marker (if the output type isn't a struct, + // it'll have a single, blank field name). This only lists exported fields, + // (as per reflection rules). + Fields map[string]Argument + // FieldNames maps argument names (as used in the marker) to struct field name + // in the output type. + FieldNames map[string]string + // Strict indicates that this definition should error out when parsing if + // not all non-optional fields were seen. + Strict bool +} + +// AnonymousField indicates that the definition has one field, +// (actually the original object), and thus the field +// doesn't get named as part of the name. +func (d *Definition) AnonymousField() bool { + if len(d.Fields) != 1 { + return false + } + _, hasAnonField := d.Fields[""] + return hasAnonField +} + +// Empty indicates that this definition has no fields. +func (d *Definition) Empty() bool { + return len(d.Fields) == 0 +} + +// argumentInfo returns information about an argument field as the marker parser's field loader +// would see it. This can be useful if you have to interact with marker definition structs +// externally (e.g. at compile time). +func argumentInfo(fieldName string, tag reflect.StructTag) (argName string, optionalOpt bool) { + argName = lowerCamelCase(fieldName) + markerTag, tagSpecified := tag.Lookup("marker") + markerTagParts := strings.Split(markerTag, ",") + if tagSpecified && markerTagParts[0] != "" { + // allow overriding to support legacy cases where we don't follow camelCase conventions + argName = markerTagParts[0] + } + optionalOpt = false + for _, tagOption := range markerTagParts[1:] { + switch tagOption { + case "optional": + optionalOpt = true + } + } + + return argName, optionalOpt +} + +// loadFields uses reflection to populate argument information from the Output type. +func (d *Definition) loadFields() error { + if d.Fields == nil { + d.Fields = make(map[string]Argument) + d.FieldNames = make(map[string]string) + } + if d.Output.Kind() != reflect.Struct { + // anonymous field type + argType, err := ArgumentFromType(d.Output) + if err != nil { + return err + } + d.Fields[""] = argType + d.FieldNames[""] = "" + return nil + } + + for i := 0; i < d.Output.NumField(); i++ { + field := d.Output.Field(i) + if field.PkgPath != "" { + // as per the reflect package docs, pkgpath is empty for exported fields, + // so non-empty package path means a private field, which we should skip + continue + } + argName, optionalOpt := argumentInfo(field.Name, field.Tag) + + argType, err := ArgumentFromType(field.Type) + if err != nil { + return fmt.Errorf("unable to extract type information for field %q: %v", field.Name, err) + } + + if argType.Type == RawType { + return fmt.Errorf("RawArguments must be the direct type of a marker, and not a field") + } + + argType.Optional = optionalOpt || argType.Optional + + d.Fields[argName] = argType + d.FieldNames[argName] = field.Name + } + + return nil +} + +// parserScanner makes a new scanner appropriate for use in parsing definitions and arguments. +func parserScanner(raw string, err func(*sc.Scanner, string)) *sc.Scanner { + scanner := &sc.Scanner{} + scanner.Init(bytes.NewBufferString(raw)) + scanner.Mode = sc.ScanIdents | sc.ScanInts | sc.ScanStrings | sc.ScanRawStrings | sc.SkipComments + scanner.Error = err + + return scanner +} + +// Parse uses the type information in this Definition to parse the given +// raw marker in the form `+a:b:c=arg,d=arg` into an output object of the +// type specified in the definition. +func (d *Definition) Parse(rawMarker string) (interface{}, error) { + name, anonName, fields := splitMarker(rawMarker) + + out := reflect.Indirect(reflect.New(d.Output)) + + // if we're a not a struct or have no arguments, treat the full `a:b:c` as the name, + // otherwise, treat `c` as a field name, and `a:b` as the marker name. + if !d.AnonymousField() && !d.Empty() && len(anonName) >= len(name)+1 { + fields = anonName[len(name)+1:] + "=" + fields + } + + var errs []error + scanner := parserScanner(fields, func(_ *sc.Scanner, msg string) { + errs = append(errs, errors.New(msg)) + }) + + // TODO(directxman12): strict parsing where we error out if certain fields aren't optional + seen := make(map[string]struct{}, len(d.Fields)) + if d.AnonymousField() && scanner.Peek() != sc.EOF { + // no need for trying to parse field names if we're not a struct + field := d.Fields[""] + field.Parse(scanner, fields, out) + seen[""] = struct{}{} // mark as seen for strict definitions + } else if !d.Empty() && scanner.Peek() != sc.EOF { + // if we expect *and* actually have arguments passed + for { + // parse the argument name + if !expect(scanner, sc.Ident, "argument name") { + break + } + argName := scanner.TokenText() + if !expect(scanner, '=', "equals") { + break + } + + // make sure we know the field + fieldName, known := d.FieldNames[argName] + if !known { + scanner.Error(scanner, fmt.Sprintf("unknown argument %q", argName)) + break + } + fieldType, known := d.Fields[argName] + if !known { + scanner.Error(scanner, fmt.Sprintf("unknown argument %q", argName)) + break + } + seen[argName] = struct{}{} // mark as seen for strict definitions + + // parse the field value + fieldVal := out.FieldByName(fieldName) + if !fieldVal.CanSet() { + scanner.Error(scanner, fmt.Sprintf("cannot set field %q (might not exist)", fieldName)) + break + } + fieldType.Parse(scanner, fields, fieldVal) + + if len(errs) > 0 { + break + } + + if scanner.Peek() == sc.EOF { + break + } + if !expect(scanner, ',', "comma") { + break + } + } + } + + if tok := scanner.Scan(); tok != sc.EOF { + scanner.Error(scanner, fmt.Sprintf("extra arguments provided: %q", fields[scanner.Position.Offset:])) + } + + if d.Strict { + for argName, arg := range d.Fields { + if _, wasSeen := seen[argName]; !wasSeen && !arg.Optional { + scanner.Error(scanner, fmt.Sprintf("missing argument %q", argName)) + } + } + } + + return out.Interface(), loader.MaybeErrList(errs) +} + +// MakeDefinition constructs a definition from a name, type, and the output type. +// All such definitions are strict by default. If a struct is passed as the output +// type, its public fields will automatically be populated into Fields (and similar +// fields in Definition). Other values will have a single, empty-string-named Fields +// entry. +func MakeDefinition(name string, target TargetType, output interface{}) (*Definition, error) { + def := &Definition{ + Name: name, + Target: target, + Output: reflect.TypeOf(output), + Strict: true, + } + + if err := def.loadFields(); err != nil { + return nil, err + } + + return def, nil +} + +// splitMarker takes a marker in the form of `+a:b:c=arg,d=arg` and splits it +// into the name (`a:b`), the name if it's not a struct (`a:b:c`), and the parts +// that are definitely fields (`arg,d=arg`). +func splitMarker(raw string) (name string, anonymousName string, restFields string) { + raw = raw[1:] // get rid of the leading '+' + nameFieldParts := strings.SplitN(raw, "=", 2) + if len(nameFieldParts) == 1 { + return nameFieldParts[0], nameFieldParts[0], "" + } + anonymousName = nameFieldParts[0] + name = anonymousName + restFields = nameFieldParts[1] + + nameParts := strings.Split(name, ":") + if len(nameParts) > 1 { + name = strings.Join(nameParts[:len(nameParts)-1], ":") + } + return name, anonymousName, restFields +} diff --git a/vendor/sigs.k8s.io/controller-tools/pkg/markers/reg.go b/vendor/sigs.k8s.io/controller-tools/pkg/markers/reg.go new file mode 100644 index 0000000000..b0124630f8 --- /dev/null +++ b/vendor/sigs.k8s.io/controller-tools/pkg/markers/reg.go @@ -0,0 +1,153 @@ +/* +Copyright 2019 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package markers + +import ( + "fmt" + "sync" +) + +// Registry keeps track of registered definitions, and allows for easy lookup. +// It's thread-safe, and the zero-value can be safely used. +type Registry struct { + forPkg map[string]*Definition + forType map[string]*Definition + forField map[string]*Definition + helpFor map[*Definition]*DefinitionHelp + + mu sync.RWMutex + initOnce sync.Once +} + +func (r *Registry) init() { + r.initOnce.Do(func() { + if r.forPkg == nil { + r.forPkg = make(map[string]*Definition) + } + if r.forType == nil { + r.forType = make(map[string]*Definition) + } + if r.forField == nil { + r.forField = make(map[string]*Definition) + } + if r.helpFor == nil { + r.helpFor = make(map[*Definition]*DefinitionHelp) + } + }) +} + +// Define defines a new marker with the given name, target, and output type. +// It's a shortcut around +// r.Register(MakeDefinition(name, target, obj)) +func (r *Registry) Define(name string, target TargetType, obj interface{}) error { + def, err := MakeDefinition(name, target, obj) + if err != nil { + return err + } + return r.Register(def) +} + +// Register registers the given marker definition with this registry for later lookup. +func (r *Registry) Register(def *Definition) error { + r.init() + + r.mu.Lock() + defer r.mu.Unlock() + + switch def.Target { + case DescribesPackage: + r.forPkg[def.Name] = def + case DescribesType: + r.forType[def.Name] = def + case DescribesField: + r.forField[def.Name] = def + default: + return fmt.Errorf("unknown target type %v", def.Target) + } + return nil +} + +// AddHelp stores the given help in the registry, marking it as associated with +// the given definition. +func (r *Registry) AddHelp(def *Definition, help *DefinitionHelp) { + r.init() + + r.mu.Lock() + defer r.mu.Unlock() + + r.helpFor[def] = help +} + +// Lookup fetches the definition corresponding to the given name and target type. +func (r *Registry) Lookup(name string, target TargetType) *Definition { + r.init() + + r.mu.RLock() + defer r.mu.RUnlock() + + switch target { + case DescribesPackage: + return tryAnonLookup(name, r.forPkg) + case DescribesType: + return tryAnonLookup(name, r.forType) + case DescribesField: + return tryAnonLookup(name, r.forField) + default: + return nil + } +} + +// HelpFor fetches the help for a given definition, if present. +func (r *Registry) HelpFor(def *Definition) *DefinitionHelp { + r.init() + + r.mu.RLock() + defer r.mu.RUnlock() + + return r.helpFor[def] +} + +// AllDefinitions returns all marker definitions known to this registry. +func (r *Registry) AllDefinitions() []*Definition { + res := make([]*Definition, 0, len(r.forPkg)+len(r.forType)+len(r.forField)) + for _, def := range r.forPkg { + res = append(res, def) + } + for _, def := range r.forType { + res = append(res, def) + } + for _, def := range r.forField { + res = append(res, def) + } + return res +} + +// tryAnonLookup tries looking up the given marker as both an struct-based +// marker and an anonymous marker, returning whichever format matches first, +// preferring the longer (anonymous) name in case of conflicts. +func tryAnonLookup(name string, defs map[string]*Definition) *Definition { + // NB(directxman12): we look up anonymous names first to work with + // legacy style marker definitions that have a namespaced approach + // (e.g. deepcopy-gen, which uses `+k8s:deepcopy-gen=foo,bar` *and* + // `+k8s.io:deepcopy-gen:interfaces=foo`). + name, anonName, _ := splitMarker(name) + if def, exists := defs[anonName]; exists { + return def + } + + return defs[name] +} diff --git a/vendor/sigs.k8s.io/controller-runtime/pkg/patch/doc.go b/vendor/sigs.k8s.io/controller-tools/pkg/markers/regutil.go similarity index 54% rename from vendor/sigs.k8s.io/controller-runtime/pkg/patch/doc.go rename to vendor/sigs.k8s.io/controller-tools/pkg/markers/regutil.go index 0e1d2a9f56..a9160c3c2f 100644 --- a/vendor/sigs.k8s.io/controller-runtime/pkg/patch/doc.go +++ b/vendor/sigs.k8s.io/controller-tools/pkg/markers/regutil.go @@ -1,5 +1,5 @@ /* -Copyright 2018 The Kubernetes Authors. +Copyright 2019 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -14,20 +14,23 @@ See the License for the specific language governing permissions and limitations under the License. */ -/* -Package patch provides method to calculate JSON patch between 2 k8s objects. - -Calculate JSON patch +package markers - oldDeployment := appsv1.Deployment{ - // some fields - } - newDeployment := appsv1.Deployment{ - // some different fields - } - patch, err := NewJSONPatch(oldDeployment, newDeployment) +// Must panics on errors creating definitions. +func Must(def *Definition, err error) *Definition { if err != nil { - // handle error + panic(err) } -*/ -package patch + return def +} + +// RegisterAll attempts to register all definitions against the given registry, +// stopping and returning if an error occurs. +func RegisterAll(reg *Registry, defs ...*Definition) error { + for _, def := range defs { + if err := reg.Register(def); err != nil { + return err + } + } + return nil +} diff --git a/vendor/sigs.k8s.io/controller-tools/pkg/markers/zip.go b/vendor/sigs.k8s.io/controller-tools/pkg/markers/zip.go new file mode 100644 index 0000000000..a9b4c98af5 --- /dev/null +++ b/vendor/sigs.k8s.io/controller-tools/pkg/markers/zip.go @@ -0,0 +1,191 @@ +/* +Copyright 2019 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package markers + +import ( + "go/ast" + "go/token" + "reflect" + "strings" + + "sigs.k8s.io/controller-tools/pkg/loader" +) + +// extractDoc extracts documentation from the given node, skipping markers +// in the godoc and falling back to the decl if necessary (for single-line decls). +func extractDoc(node ast.Node, decl *ast.GenDecl) string { + var docs *ast.CommentGroup + switch docced := node.(type) { + case *ast.Field: + docs = docced.Doc + case *ast.File: + docs = docced.Doc + case *ast.GenDecl: + docs = docced.Doc + case *ast.TypeSpec: + docs = docced.Doc + // type Ident expr expressions get docs attached to the decl, + // so check for that case (missing Lparen == single line type decl) + if docs == nil && decl.Lparen == token.NoPos { + docs = decl.Doc + } + } + + if docs == nil { + return "" + } + + // filter out markers + var outGroup ast.CommentGroup + outGroup.List = make([]*ast.Comment, 0, len(docs.List)) + for _, comment := range docs.List { + if isMarkerComment(comment.Text) { + continue + } + outGroup.List = append(outGroup.List, comment) + } + + // split lines, and re-join together as a single + // paragraph, respecting double-newlines as + // paragraph markers. + outLines := strings.Split(outGroup.Text(), "\n") + if outLines[len(outLines)-1] == "" { + // chop off the extraneous last part + outLines = outLines[:len(outLines)-1] + } + // respect double-newline meaning actual newline + for i, line := range outLines { + if line == "" { + outLines[i] = "\n" + } + } + return strings.Join(outLines, " ") +} + +// PackageMarkers collects all the package-level marker values for the given package. +func PackageMarkers(col *Collector, pkg *loader.Package) (MarkerValues, error) { + markers, err := col.MarkersInPackage(pkg) + if err != nil { + return nil, err + } + res := make(MarkerValues) + for _, file := range pkg.Syntax { + fileMarkers := markers[file] + for name, vals := range fileMarkers { + res[name] = append(res[name], vals...) + } + } + + return res, nil +} + +// FieldInfo contains marker values and commonly used information for a struct field. +type FieldInfo struct { + // Name is the name of the field (or "" for embedded fields) + Name string + // Doc is the Godoc of the field, pre-processed to remove markers and joine + // single newlines together. + Doc string + // Tag struct tag associated with this field (or "" if non existed). + Tag reflect.StructTag + + // Markers are all registered markers associated with this field. + Markers MarkerValues + + // RawField is the raw, underlying field AST object that this field represents. + RawField *ast.Field +} + +// TypeInfo contains marker values and commonly used information for a type declaration. +type TypeInfo struct { + // Name is the name of the type. + Name string + // Doc is the Godoc of the type, pre-processed to remove markers and joine + // single newlines together. + Doc string + + // Markers are all registered markers associated with the type. + Markers MarkerValues + + // Fields are all the fields associated with the type, if it's a struct. + // (if not, Fields will be nil). + Fields []FieldInfo + + // RawDecl contains the raw GenDecl that the type was declared as part of. + RawDecl *ast.GenDecl + // RawSpec contains the raw Spec that declared this type. + RawSpec *ast.TypeSpec + // RawFile contains the file in which this type was declared. + RawFile *ast.File +} + +// TypeCallback is a callback called for each type declaration in a package. +type TypeCallback func(info *TypeInfo) + +// EachType collects all markers, then calls the given callback for each type declaration in a package. +// Each individual spec is considered separate, so +// +// type ( +// Foo string +// Bar int +// Baz struct{} +// ) +// +// yields three calls to the callback. +func EachType(col *Collector, pkg *loader.Package, cb TypeCallback) error { + markers, err := col.MarkersInPackage(pkg) + if err != nil { + return err + } + + loader.EachType(pkg, func(file *ast.File, decl *ast.GenDecl, spec *ast.TypeSpec) { + var fields []FieldInfo + if structSpec, isStruct := spec.Type.(*ast.StructType); isStruct { + for _, field := range structSpec.Fields.List { + for _, name := range field.Names { + fields = append(fields, FieldInfo{ + Name: name.Name, + Doc: extractDoc(field, nil), + Tag: loader.ParseAstTag(field.Tag), + Markers: markers[field], + RawField: field, + }) + } + if field.Names == nil { + fields = append(fields, FieldInfo{ + Doc: extractDoc(field, nil), + Tag: loader.ParseAstTag(field.Tag), + Markers: markers[field], + RawField: field, + }) + } + } + } + + cb(&TypeInfo{ + Name: spec.Name.Name, + Markers: markers[spec], + Doc: extractDoc(spec, decl), + Fields: fields, + RawDecl: decl, + RawSpec: spec, + RawFile: file, + }) + }) + + return nil +} diff --git a/vendor/sigs.k8s.io/controller-tools/pkg/rbac/manifests.go b/vendor/sigs.k8s.io/controller-tools/pkg/rbac/manifests.go deleted file mode 100644 index 5800fbeb84..0000000000 --- a/vendor/sigs.k8s.io/controller-tools/pkg/rbac/manifests.go +++ /dev/null @@ -1,170 +0,0 @@ -/* -Copyright 2018 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package rbac - -import ( - "fmt" - "io/ioutil" - "os" - "path/filepath" - - "github.com/ghodss/yaml" - rbacv1 "k8s.io/api/rbac/v1" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "sigs.k8s.io/controller-tools/pkg/internal/general" -) - -// ManifestOptions represent options for generating the RBAC manifests. -type ManifestOptions struct { - InputDir string - OutputDir string - RoleFile string - BindingFile string - Name string - ServiceAccount string - Namespace string - Labels map[string]string -} - -// SetDefaults sets up the default options for RBAC Manifest generator. -func (o *ManifestOptions) SetDefaults() { - o.Name = "manager" - o.InputDir = filepath.Join(".", "pkg") - o.OutputDir = filepath.Join(".", "config", "rbac") - o.ServiceAccount = "default" - o.Namespace = "system" -} - -// RoleName returns the RBAC role name to be used in the manifests. -func (o *ManifestOptions) RoleName() string { - return o.Name + "-role" -} - -// RoleFileName returns the name of the manifest file to use for the role. -func (o *ManifestOptions) RoleFileName() string { - if len(o.RoleFile) == 0 { - return "rbac_role.yaml" - } - // TODO: validate file name - return o.RoleFile -} - -// RoleBindingName returns the RBAC role binding name to be used in the manifests. -func (o *ManifestOptions) RoleBindingName() string { - return o.Name + "-rolebinding" -} - -// RoleBindingFileName returns the name of the manifest file to use for the role binding. -func (o *ManifestOptions) RoleBindingFileName() string { - if len(o.BindingFile) == 0 { - return "rbac_role_binding.yaml" - } - // TODO: validate file name - return o.BindingFile -} - -// Validate validates the input options. -func (o *ManifestOptions) Validate() error { - if _, err := os.Stat(o.InputDir); err != nil { - return fmt.Errorf("invalid input directory '%s' %v", o.InputDir, err) - } - return nil -} - -// Generate generates RBAC manifests by parsing the RBAC annotations in Go source -// files specified in the input directory. -func Generate(o *ManifestOptions) error { - if err := o.Validate(); err != nil { - return err - } - - ops := parserOptions{ - rules: []rbacv1.PolicyRule{}, - } - err := general.ParseDir(o.InputDir, ops.parseAnnotation) - if err != nil { - return fmt.Errorf("failed to parse the input dir %v", err) - } - if len(ops.rules) == 0 { - return nil - } - roleManifest, err := getClusterRoleManifest(ops.rules, o) - if err != nil { - return fmt.Errorf("failed to generate role manifest %v", err) - } - - roleBindingManifest, err := getClusterRoleBindingManifest(o) - if err != nil { - return fmt.Errorf("failed to generate role binding manifests %v", err) - } - - err = os.MkdirAll(o.OutputDir, os.ModePerm) - if err != nil { - return fmt.Errorf("failed to create output dir %v", err) - } - roleManifestFile := filepath.Join(o.OutputDir, o.RoleFileName()) - if err := ioutil.WriteFile(roleManifestFile, roleManifest, 0666); err != nil { - return fmt.Errorf("failed to write role manifest YAML file %v", err) - } - - roleBindingManifestFile := filepath.Join(o.OutputDir, o.RoleBindingFileName()) - if err := ioutil.WriteFile(roleBindingManifestFile, roleBindingManifest, 0666); err != nil { - return fmt.Errorf("failed to write role manifest YAML file %v", err) - } - return nil -} - -func getClusterRoleManifest(rules []rbacv1.PolicyRule, o *ManifestOptions) ([]byte, error) { - role := rbacv1.ClusterRole{ - TypeMeta: metav1.TypeMeta{ - Kind: "ClusterRole", - APIVersion: "rbac.authorization.k8s.io/v1", - }, - ObjectMeta: metav1.ObjectMeta{ - Name: o.RoleName(), - Labels: o.Labels, - }, - Rules: rules, - } - return yaml.Marshal(role) -} - -func getClusterRoleBindingManifest(o *ManifestOptions) ([]byte, error) { - rolebinding := &rbacv1.ClusterRoleBinding{ - TypeMeta: metav1.TypeMeta{ - APIVersion: "rbac.authorization.k8s.io/v1", - Kind: "ClusterRoleBinding", - }, - ObjectMeta: metav1.ObjectMeta{ - Name: o.RoleBindingName(), - Labels: o.Labels, - }, - Subjects: []rbacv1.Subject{ - { - Name: o.ServiceAccount, - Namespace: o.Namespace, - Kind: "ServiceAccount", - }, - }, - RoleRef: rbacv1.RoleRef{ - Name: o.RoleName(), - Kind: "ClusterRole", - APIGroup: "rbac.authorization.k8s.io", - }, - } - return yaml.Marshal(rolebinding) -} diff --git a/vendor/sigs.k8s.io/controller-tools/pkg/rbac/parser.go b/vendor/sigs.k8s.io/controller-tools/pkg/rbac/parser.go index d28cfd9e67..88ae805dd2 100644 --- a/vendor/sigs.k8s.io/controller-tools/pkg/rbac/parser.go +++ b/vendor/sigs.k8s.io/controller-tools/pkg/rbac/parser.go @@ -1,5 +1,5 @@ /* -Copyright 2018 The Kubernetes Authors. +Copyright 2019 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -15,69 +15,244 @@ limitations under the License. */ // Package rbac contain libraries for generating RBAC manifests from RBAC -// annotations in Go source files. +// markers in Go source files. +// +// The markers take the form: +// +// +kubebuilder:rbac:groups=,resources=,verbs=,urls= package rbac import ( - "log" + "fmt" + "sort" "strings" rbacv1 "k8s.io/api/rbac/v1" - "sigs.k8s.io/controller-tools/pkg/internal/general" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + + "sigs.k8s.io/controller-tools/pkg/genall" + "sigs.k8s.io/controller-tools/pkg/markers" +) + +var ( + // RuleDefinition is a marker for defining RBAC rules. + // Call ToRule on the value to get a Kubernetes RBAC policy rule. + RuleDefinition = markers.Must(markers.MakeDefinition("kubebuilder:rbac", markers.DescribesPackage, Rule{})) ) -type parserOptions struct { - rules []rbacv1.PolicyRule +// +controllertools:marker:generateHelp:category=RBAC + +// Rule specifies an RBAC rule to all access to some resources or non-resource URLs. +type Rule struct { + // Groups specifies the API groups that this rule encompasses. + Groups []string `marker:",optional"` + // Resources specifies the API resources that this rule encompasses. + Resources []string `marker:",optional"` + // Verbs specifies the (lowercase) kubernetes API verbs that this rule encompasses. + Verbs []string + // URL specifies the non-resource URLs that this rule encompasses. + URLs []string `marker:"urls,optional"` + // Namespace specifies the scope of the Rule. + // If not set, the Rule belongs to the generated ClusterRole. + // If set, the Rule belongs to a Role, whose namespace is specified by this field. + Namespace string `marker:",optional"` } -// parseAnnotation parses RBAC annotations -func (o *parserOptions) parseAnnotation(commentText string) error { - for _, comment := range strings.Split(commentText, "\n") { - comment := strings.TrimSpace(comment) - if strings.HasPrefix(comment, "+rbac") { - if ann := general.GetAnnotation(comment, "rbac"); ann != "" { - o.rules = append(o.rules, parseRBACTag(ann)) - } +// ruleKey represents the resources and non-resources a Rule applies. +type ruleKey struct { + Groups string + Resources string + URLs string +} + +func (key ruleKey) String() string { + return fmt.Sprintf("%s + %s + %s", key.Groups, key.Resources, key.URLs) +} + +// ruleKeys implements sort.Interface +type ruleKeys []ruleKey + +func (keys ruleKeys) Len() int { return len(keys) } +func (keys ruleKeys) Swap(i, j int) { keys[i], keys[j] = keys[j], keys[i] } +func (keys ruleKeys) Less(i, j int) bool { return keys[i].String() < keys[j].String() } + +// key normalizes the Rule and returns a ruleKey object. +func (r *Rule) key() ruleKey { + r.normalize() + return ruleKey{ + Groups: strings.Join(r.Groups, "&"), + Resources: strings.Join(r.Resources, "&"), + URLs: strings.Join(r.URLs, "&"), + } +} + +// addVerbs adds new verbs into a Rule. +// The duplicates in `r.Verbs` will be removed, and then `r.Verbs` will be sorted. +func (r *Rule) addVerbs(verbs []string) { + r.Verbs = removeDupAndSort(append(r.Verbs, verbs...)) +} + +// normalize removes duplicates from each field of a Rule, and sorts each field. +func (r *Rule) normalize() { + r.Groups = removeDupAndSort(r.Groups) + r.Resources = removeDupAndSort(r.Resources) + r.Verbs = removeDupAndSort(r.Verbs) + r.URLs = removeDupAndSort(r.URLs) +} + +// removeDupAndSort removes duplicates in strs, sorts the items, and returns a +// new slice of strings. +func removeDupAndSort(strs []string) []string { + set := make(map[string]bool) + for _, str := range strs { + if _, ok := set[str]; !ok { + set[str] = true } - if strings.HasPrefix(comment, "+kubebuilder:rbac") { - if ann := general.GetAnnotation(comment, "kubebuilder:rbac"); ann != "" { - o.rules = append(o.rules, parseRBACTag(ann)) - } + } + + var result []string + for str := range set { + result = append(result, str) + } + sort.Strings(result) + return result +} + +// ToRule converts this rule to its Kubernetes API form. +func (r *Rule) ToRule() rbacv1.PolicyRule { + // fix the group names first, since letting people type "core" is nice + for i, group := range r.Groups { + if group == "core" { + r.Groups[i] = "" } } + return rbacv1.PolicyRule{ + APIGroups: r.Groups, + Verbs: r.Verbs, + Resources: r.Resources, + NonResourceURLs: r.URLs, + } +} + +// +controllertools:marker:generateHelp + +// Generator generates ClusterRole objects. +type Generator struct { + // RoleName sets the name of the generated ClusterRole. + RoleName string +} + +func (Generator) RegisterMarkers(into *markers.Registry) error { + if err := into.Register(RuleDefinition); err != nil { + return err + } + into.AddHelp(RuleDefinition, Rule{}.Help()) return nil } -// parseRBACTag parses the given RBAC annotation in to an RBAC PolicyRule. -// This is copied from Kubebuilder code. -func parseRBACTag(tag string) rbacv1.PolicyRule { - result := rbacv1.PolicyRule{} - for _, elem := range strings.Split(tag, ",") { - key, value, err := general.ParseKV(elem) +// GenerateRoles generate a slice of objs representing either a ClusterRole or a Role object +// The order of the objs in the returned slice is stable and determined by their namespaces. +func GenerateRoles(ctx *genall.GenerationContext, roleName string) ([]interface{}, error) { + rulesByNS := make(map[string][]*Rule) + for _, root := range ctx.Roots { + markerSet, err := markers.PackageMarkers(ctx.Collector, root) if err != nil { - log.Fatalf("// +kubebuilder:rbac: tags must be key value pairs. Expected "+ - "keys [groups=,resources=,verbs=] "+ - "Got string: [%s]", tag) + root.AddError(err) } - values := strings.Split(value, ";") - switch key { - case "groups": - normalized := []string{} - for _, v := range values { - if v == "core" { - normalized = append(normalized, "") - } else { - normalized = append(normalized, v) - } + + // group RBAC markers by namespace + for _, markerValue := range markerSet[RuleDefinition.Name] { + rule := markerValue.(Rule) + namespace := rule.Namespace + if _, ok := rulesByNS[namespace]; !ok { + rules := make([]*Rule, 0) + rulesByNS[namespace] = rules } - result.APIGroups = normalized - case "resources": - result.Resources = values - case "verbs": - result.Verbs = values - case "urls": - result.NonResourceURLs = values + rulesByNS[namespace] = append(rulesByNS[namespace], &rule) } } - return result + + // NormalizeRules merge Rule with the same ruleKey and sort the Rules + NormalizeRules := func(rules []*Rule) []rbacv1.PolicyRule { + ruleMap := make(map[ruleKey]*Rule) + // all the Rules having the same ruleKey will be merged into the first Rule + for _, rule := range rules { + key := rule.key() + if _, ok := ruleMap[key]; !ok { + ruleMap[key] = rule + continue + } + ruleMap[key].addVerbs(rule.Verbs) + } + + // sort the Rules in rules according to their ruleKeys + keys := make([]ruleKey, 0, len(ruleMap)) + for key := range ruleMap { + keys = append(keys, key) + } + sort.Sort(ruleKeys(keys)) + + var policyRules []rbacv1.PolicyRule + for _, key := range keys { + policyRules = append(policyRules, ruleMap[key].ToRule()) + + } + return policyRules + } + + // collect all the namespaces and sort them + var namespaces []string + for ns := range rulesByNS { + namespaces = append(namespaces, ns) + } + sort.Strings(namespaces) + + // process the items in rulesByNS by the order specified in `namespaces` to make sure that the Role order is stable + var objs []interface{} + for _, ns := range namespaces { + rules := rulesByNS[ns] + policyRules := NormalizeRules(rules) + if len(policyRules) == 0 { + continue + } + if ns == "" { + objs = append(objs, rbacv1.ClusterRole{ + TypeMeta: metav1.TypeMeta{ + Kind: "ClusterRole", + APIVersion: rbacv1.SchemeGroupVersion.String(), + }, + ObjectMeta: metav1.ObjectMeta{ + Name: roleName, + }, + Rules: policyRules, + }) + } else { + objs = append(objs, rbacv1.Role{ + TypeMeta: metav1.TypeMeta{ + Kind: "Role", + APIVersion: rbacv1.SchemeGroupVersion.String(), + }, + ObjectMeta: metav1.ObjectMeta{ + Name: roleName, + Namespace: ns, + }, + Rules: policyRules, + }) + } + } + + return objs, nil +} + +func (g Generator) Generate(ctx *genall.GenerationContext) error { + objs, err := GenerateRoles(ctx, g.RoleName) + if err != nil { + return err + } + + if len(objs) == 0 { + return nil + } + + return ctx.WriteYAML("role.yaml", objs...) } diff --git a/vendor/sigs.k8s.io/controller-tools/pkg/rbac/zz_generated.markerhelp.go b/vendor/sigs.k8s.io/controller-tools/pkg/rbac/zz_generated.markerhelp.go new file mode 100644 index 0000000000..f58a9ed940 --- /dev/null +++ b/vendor/sigs.k8s.io/controller-tools/pkg/rbac/zz_generated.markerhelp.go @@ -0,0 +1,73 @@ +// +build !ignore_autogenerated + +/* +Copyright2019 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by helpgen. DO NOT EDIT. + +package rbac + +import ( + "sigs.k8s.io/controller-tools/pkg/markers" +) + +func (Generator) Help() *markers.DefinitionHelp { + return &markers.DefinitionHelp{ + Category: "", + DetailedHelp: markers.DetailedHelp{ + Summary: "generates ClusterRole objects.", + Details: "", + }, + FieldHelp: map[string]markers.DetailedHelp{ + "RoleName": markers.DetailedHelp{ + Summary: "sets the name of the generated ClusterRole.", + Details: "", + }, + }, + } +} + +func (Rule) Help() *markers.DefinitionHelp { + return &markers.DefinitionHelp{ + Category: "RBAC", + DetailedHelp: markers.DetailedHelp{ + Summary: "specifies an RBAC rule to all access to some resources or non-resource URLs.", + Details: "", + }, + FieldHelp: map[string]markers.DetailedHelp{ + "Groups": markers.DetailedHelp{ + Summary: "specifies the API groups that this rule encompasses.", + Details: "", + }, + "Resources": markers.DetailedHelp{ + Summary: "specifies the API resources that this rule encompasses.", + Details: "", + }, + "Verbs": markers.DetailedHelp{ + Summary: "specifies the (lowercase) kubernetes API verbs that this rule encompasses.", + Details: "", + }, + "URLs": markers.DetailedHelp{ + Summary: "URL specifies the non-resource URLs that this rule encompasses.", + Details: "", + }, + "Namespace": markers.DetailedHelp{ + Summary: "specifies the scope of the Rule. If not set, the Rule belongs to the generated ClusterRole. If set, the Rule belongs to a Role, whose namespace is specified by this field.", + Details: "", + }, + }, + } +} diff --git a/vendor/sigs.k8s.io/controller-tools/pkg/util/util.go b/vendor/sigs.k8s.io/controller-tools/pkg/util/util.go deleted file mode 100644 index 9649913b38..0000000000 --- a/vendor/sigs.k8s.io/controller-tools/pkg/util/util.go +++ /dev/null @@ -1,77 +0,0 @@ -/* -Copyright 2018 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package util - -import ( - "fmt" - "io" - "log" - "os" - "path/filepath" - - "github.com/spf13/afero" -) - -// FileWriter is a io wrapper to write files -type FileWriter struct { - Fs afero.Fs -} - -// WriteCloser returns a WriteCloser to write to given path -func (fw *FileWriter) WriteCloser(path string) (io.Writer, error) { - if fw.Fs == nil { - fw.Fs = afero.NewOsFs() - } - dir := filepath.Dir(path) - err := fw.Fs.MkdirAll(dir, 0700) - if err != nil { - return nil, err - } - - fi, err := fw.Fs.OpenFile(path, os.O_WRONLY|os.O_CREATE|os.O_TRUNC, 0600) - if err != nil { - return nil, err - } - - return fi, nil -} - -// WriteFile write given content to the file path -func (fw *FileWriter) WriteFile(filePath string, content []byte) error { - if fw.Fs == nil { - fw.Fs = afero.NewOsFs() - } - f, err := fw.WriteCloser(filePath) - if err != nil { - return fmt.Errorf("failed to create %s: %v", filePath, err) - } - - if c, ok := f.(io.Closer); ok { - defer func() { - if err := c.Close(); err != nil { - log.Fatal(err) - } - }() - } - - _, err = f.Write(content) - if err != nil { - return fmt.Errorf("failed to write %s: %v", filePath, err) - } - - return nil -} diff --git a/vendor/sigs.k8s.io/controller-tools/pkg/webhook/admission.go b/vendor/sigs.k8s.io/controller-tools/pkg/webhook/admission.go deleted file mode 100644 index 13bd1bca43..0000000000 --- a/vendor/sigs.k8s.io/controller-tools/pkg/webhook/admission.go +++ /dev/null @@ -1,89 +0,0 @@ -/* -Copyright 2018 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package webhook - -import ( - "errors" - "fmt" - "regexp" - "strings" - - admissionregistrationv1beta1 "k8s.io/api/admissionregistration/v1beta1" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" -) - -// admissionWebhook contains bits needed for generating a admissionWebhook Configuration -type admissionWebhook struct { - // name is the name of the webhook - name string - // typ is the webhook type, i.e. mutating, validating - typ webhookType - // path is the path this webhook will serve. - path string - // rules maps to the rules field in admissionregistrationv1beta1.admissionWebhook - rules []admissionregistrationv1beta1.RuleWithOperations - // failurePolicy maps to the failurePolicy field in admissionregistrationv1beta1.admissionWebhook - // This optional. If not set, will be defaulted to Ignore (fail-open) by the server. - // More details: https://github.com/kubernetes/api/blob/f5c295feaba2cbc946f0bbb8b535fc5f6a0345ee/admissionregistration/v1beta1/types.go#L144-L147 - failurePolicy *admissionregistrationv1beta1.FailurePolicyType - // namespaceSelector maps to the namespaceSelector field in admissionregistrationv1beta1.admissionWebhook - // This optional. - namespaceSelector *metav1.LabelSelector -} - -func (w *admissionWebhook) setDefaults() { - if len(w.path) == 0 { - if len(w.rules) == 0 || len(w.rules[0].Resources) == 0 { - // can't do defaulting, skip it. - return - } - if w.typ == mutatingWebhook { - w.path = "/mutate-" + w.rules[0].Resources[0] - } else if w.typ == validatingWebhook { - w.path = "/validate-" + w.rules[0].Resources[0] - } - } - if len(w.name) == 0 { - reg := regexp.MustCompile("[^a-zA-Z0-9]+") - processedPath := strings.ToLower(reg.ReplaceAllString(w.path, "")) - w.name = processedPath + ".example.com" - } -} - -var _ webhook = &admissionWebhook{} - -// GetType returns the type of the webhook. -func (w *admissionWebhook) GetType() webhookType { - return w.typ -} - -// Validate validates if the webhook is valid. -func (w *admissionWebhook) Validate() error { - if len(w.rules) == 0 { - return errors.New("field rules should not be empty") - } - if len(w.name) == 0 { - return errors.New("field name should not be empty") - } - if w.typ != mutatingWebhook && w.typ != validatingWebhook { - return fmt.Errorf("unsupported Type: %v, only mutatingWebhook and validatingWebhook are supported", w.typ) - } - if len(w.path) == 0 { - return errors.New("field path should not be empty") - } - return nil -} diff --git a/vendor/sigs.k8s.io/controller-tools/pkg/webhook/generator.go b/vendor/sigs.k8s.io/controller-tools/pkg/webhook/generator.go deleted file mode 100644 index 838ab6c681..0000000000 --- a/vendor/sigs.k8s.io/controller-tools/pkg/webhook/generator.go +++ /dev/null @@ -1,334 +0,0 @@ -/* -Copyright 2018 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package webhook - -import ( - "errors" - "net" - "net/url" - "path" - "sort" - "strconv" - - "k8s.io/api/admissionregistration/v1beta1" - admissionregistration "k8s.io/api/admissionregistration/v1beta1" - corev1 "k8s.io/api/core/v1" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/runtime" - apitypes "k8s.io/apimachinery/pkg/types" - "k8s.io/apimachinery/pkg/util/intstr" -) - -type generatorOptions struct { - // webhooks maps a path to a webhoook. - webhooks map[string]webhook - - // port is the port number that the server will serve. - // It will be defaulted to 443 if unspecified. - port int32 - - // certDir is the directory that contains the server key and certificate. - certDir string - - // mutatingWebhookConfigName is the name that used for creating the MutatingWebhookConfiguration object. - mutatingWebhookConfigName string - // validatingWebhookConfigName is the name that used for creating the ValidatingWebhookConfiguration object. - validatingWebhookConfigName string - - // secret is the location for storing the certificate for the admission server. - // The server should have permission to create a secret in the namespace. - secret *apitypes.NamespacedName - - // service is a k8s service fronting the webhook server pod(s). - // One and only one of service and host can be set. - // This maps to field .Webhooks.ClientConfig.Service - // https://github.com/kubernetes/api/blob/183f3326a9353bd6d41430fc80f96259331d029c/admissionregistration/v1beta1/types.go#L260 - service *service - // host is the host name of .Webhooks.ClientConfig.URL - // https://github.com/kubernetes/api/blob/183f3326a9353bd6d41430fc80f96259331d029c/admissionregistration/v1beta1/types.go#L250 - // One and only one of service and host can be set. - // If neither service nor host is unspecified, host will be defaulted to "localhost". - host *string -} - -// service contains information for creating a Service -type service struct { - // name of the Service - name string - // namespace of the Service - namespace string - // selectors is the selector of the Service. - // This must select the pods that runs this webhook server. - selectors map[string]string -} - -// setDefaults does defaulting for the generatorOptions. -func (o *generatorOptions) setDefaults() { - if o.webhooks == nil { - o.webhooks = map[string]webhook{} - } - if o.port <= 0 { - o.port = 443 - } - if len(o.certDir) == 0 { - o.certDir = path.Join("/tmp", "k8s-webhook-server", "serving-certs") - } - - if len(o.mutatingWebhookConfigName) == 0 { - o.mutatingWebhookConfigName = "mutating-webhook-configuration" - } - if len(o.validatingWebhookConfigName) == 0 { - o.validatingWebhookConfigName = "validating-webhook-configuration" - } - if o.host == nil && o.service == nil { - varString := "localhost" - o.host = &varString - } -} - -// Generate creates the AdmissionWebhookConfiguration objects and Service if any. -// It also provisions the certificate for the admission server. -func (o *generatorOptions) Generate() ([]runtime.Object, error) { - // do defaulting if necessary - o.setDefaults() - - webhookConfigurations, err := o.whConfigs() - if err != nil { - return nil, err - } - svc := o.getService() - objects := append(webhookConfigurations, svc) - - return objects, nil -} - -// whConfigs creates a mutatingWebhookConfiguration and(or) a validatingWebhookConfiguration. -func (o *generatorOptions) whConfigs() ([]runtime.Object, error) { - for _, webhook := range o.webhooks { - if err := webhook.Validate(); err != nil { - return nil, err - } - } - - objs := []runtime.Object{} - mutatingWH, err := o.mutatingWHConfig() - if err != nil { - return nil, err - } - if mutatingWH != nil { - objs = append(objs, mutatingWH) - } - validatingWH, err := o.validatingWHConfigs() - if err != nil { - return nil, err - } - if validatingWH != nil { - objs = append(objs, validatingWH) - } - return objs, nil -} - -// mutatingWHConfig creates mutatingWebhookConfiguration. -func (o *generatorOptions) mutatingWHConfig() (runtime.Object, error) { - mutatingWebhooks := []v1beta1.Webhook{} - for path, webhook := range o.webhooks { - if webhook.GetType() != mutatingWebhook { - continue - } - - aw := webhook.(*admissionWebhook) - wh, err := o.admissionWebhook(path, aw) - if err != nil { - return nil, err - } - mutatingWebhooks = append(mutatingWebhooks, *wh) - } - - sort.Slice(mutatingWebhooks, func(i, j int) bool { - return mutatingWebhooks[i].Name < mutatingWebhooks[j].Name - }) - - if len(mutatingWebhooks) > 0 { - return &admissionregistration.MutatingWebhookConfiguration{ - TypeMeta: metav1.TypeMeta{ - APIVersion: metav1.GroupVersion{Group: admissionregistration.GroupName, Version: "v1beta1"}.String(), - Kind: "MutatingWebhookConfiguration", - }, - ObjectMeta: metav1.ObjectMeta{ - Name: o.mutatingWebhookConfigName, - Annotations: map[string]string{ - // TODO(DirectXMan12): Change the annotation to the format that cert-manager decides to use. - "alpha.admissionwebhook.cert-manager.io": "true", - }, - }, - Webhooks: mutatingWebhooks, - }, nil - } - return nil, nil -} - -func (o *generatorOptions) validatingWHConfigs() (runtime.Object, error) { - validatingWebhooks := []v1beta1.Webhook{} - for path, webhook := range o.webhooks { - var aw *admissionWebhook - if webhook.GetType() != validatingWebhook { - continue - } - - aw = webhook.(*admissionWebhook) - wh, err := o.admissionWebhook(path, aw) - if err != nil { - return nil, err - } - validatingWebhooks = append(validatingWebhooks, *wh) - } - - sort.Slice(validatingWebhooks, func(i, j int) bool { - return validatingWebhooks[i].Name < validatingWebhooks[j].Name - }) - - if len(validatingWebhooks) > 0 { - return &admissionregistration.ValidatingWebhookConfiguration{ - TypeMeta: metav1.TypeMeta{ - APIVersion: metav1.GroupVersion{Group: admissionregistration.GroupName, Version: "v1beta1"}.String(), - Kind: "ValidatingWebhookConfiguration", - }, - ObjectMeta: metav1.ObjectMeta{ - Name: o.validatingWebhookConfigName, - Annotations: map[string]string{ - // TODO(DirectXMan12): Change the annotation to the format that cert-manager decides to use. - "alpha.admissionwebhook.cert-manager.io": "true", - }, - }, - Webhooks: validatingWebhooks, - }, nil - } - return nil, nil -} - -func (o *generatorOptions) admissionWebhook(path string, wh *admissionWebhook) (*admissionregistration.Webhook, error) { - if wh.namespaceSelector == nil && o.service != nil && len(o.service.namespace) > 0 { - wh.namespaceSelector = &metav1.LabelSelector{ - MatchExpressions: []metav1.LabelSelectorRequirement{ - { - Key: "control-plane", - Operator: metav1.LabelSelectorOpDoesNotExist, - }, - }, - } - } - - webhook := &admissionregistration.Webhook{ - Name: wh.name, - Rules: wh.rules, - FailurePolicy: wh.failurePolicy, - NamespaceSelector: wh.namespaceSelector, - } - cc, err := o.getClientConfigWithPath(path) - if err != nil { - return nil, err - } - webhook.ClientConfig = *cc - return webhook, nil -} - -// getClientConfigWithPath constructs a WebhookClientConfig based on the server generatorOptions. -// It will use path to the set the path in WebhookClientConfig. -func (o *generatorOptions) getClientConfigWithPath(path string) (*admissionregistration.WebhookClientConfig, error) { - cc, err := o.getClientConfig() - if err != nil { - return nil, err - } - return cc, setPath(cc, path) -} - -func (o *generatorOptions) getClientConfig() (*admissionregistration.WebhookClientConfig, error) { - if o.host != nil && o.service != nil { - return nil, errors.New("URL and service can't be set at the same time") - } - cc := &admissionregistration.WebhookClientConfig{ - // Put an non-empty and not harmful CABundle here. - // Not doing this will cause the field - CABundle: []byte(`\n`), - } - if o.host != nil { - u := url.URL{ - Scheme: "https", - Host: net.JoinHostPort(*o.host, strconv.Itoa(int(o.port))), - } - urlString := u.String() - cc.URL = &urlString - } - if o.service != nil { - cc.Service = &admissionregistration.ServiceReference{ - Name: o.service.name, - Namespace: o.service.namespace, - // Path will be set later - } - } - return cc, nil -} - -// setPath sets the path in the WebhookClientConfig. -func setPath(cc *admissionregistration.WebhookClientConfig, path string) error { - if cc.URL != nil { - u, err := url.Parse(*cc.URL) - if err != nil { - return err - } - u.Path = path - urlString := u.String() - cc.URL = &urlString - } - if cc.Service != nil { - cc.Service.Path = &path - } - return nil -} - -// getService creates a corev1.Service object fronting the admission server. -func (o *generatorOptions) getService() runtime.Object { - if o.service == nil { - return nil - } - svc := &corev1.Service{ - TypeMeta: metav1.TypeMeta{ - APIVersion: "v1", - Kind: "Service", - }, - ObjectMeta: metav1.ObjectMeta{ - Name: o.service.name, - Namespace: o.service.namespace, - Annotations: map[string]string{ - // Secret here only need name, since it will be in the same namespace as the service. - // TODO(DirectXMan12): Change the annotation to the format that cert-manager decides to use. - "alpha.service.cert-manager.io/serving-cert-secret-name": o.secret.Name, - }, - }, - Spec: corev1.ServiceSpec{ - Selector: o.service.selectors, - Ports: []corev1.ServicePort{ - { - // When using service, kube-apiserver will send admission request to port 443. - Port: 443, - TargetPort: intstr.IntOrString{Type: intstr.Int, IntVal: o.port}, - }, - }, - }, - } - return svc -} diff --git a/vendor/sigs.k8s.io/controller-tools/pkg/webhook/manifests.go b/vendor/sigs.k8s.io/controller-tools/pkg/webhook/manifests.go deleted file mode 100644 index 3bea5853e0..0000000000 --- a/vendor/sigs.k8s.io/controller-tools/pkg/webhook/manifests.go +++ /dev/null @@ -1,151 +0,0 @@ -/* -Copyright 2018 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package webhook - -import ( - "bytes" - "fmt" - "path" - "strings" - "text/template" - - "github.com/ghodss/yaml" - "github.com/spf13/afero" - - "sigs.k8s.io/controller-tools/pkg/internal/general" -) - -// Options represent options for generating the webhook manifests. -type Options struct { - // WriterOptions specifies the input and output - WriterOptions - - generatorOptions -} - -// Generate generates RBAC manifests by parsing the RBAC annotations in Go source -// files specified in the input directory. -func Generate(o *Options) error { - if err := o.WriterOptions.Validate(); err != nil { - return err - } - - err := general.ParseDir(o.InputDir, o.parseAnnotation) - if err != nil { - return fmt.Errorf("failed to parse the input dir: %v", err) - } - - if len(o.webhooks) == 0 { - return nil - } - - objs, err := o.Generate() - if err != nil { - return err - } - - err = o.WriteObjectsToDisk(objs...) - if err != nil { - return err - } - - return o.controllerManagerPatch() -} - -func (o *Options) controllerManagerPatch() error { - var kustomizeLabelPatch = `apiVersion: apps/v1 -kind: StatefulSet -metadata: - name: controller-manager -spec: - template: - metadata: -{{- with .Labels }} - labels: -{{ toYaml . | indent 8 }} -{{- end }} - spec: - containers: - - name: manager - ports: - - containerPort: {{ .Port }} - name: webhook-server - protocol: TCP - volumeMounts: - - mountPath: {{ .CertDir }} - name: cert - readOnly: true - volumes: - - name: cert - secret: - defaultMode: 420 - secretName: {{ .SecretName }} -` - - type KustomizeLabelPatch struct { - Labels map[string]string - SecretName string - Port int32 - CertDir string - } - - p := KustomizeLabelPatch{ - Labels: o.service.selectors, - SecretName: o.secret.Name, - Port: o.port, - CertDir: o.certDir, - } - funcMap := template.FuncMap{ - "toYaml": toYAML, - "indent": indent, - } - temp, err := template.New("kustomizeLabelPatch").Funcs(funcMap).Parse(kustomizeLabelPatch) - if err != nil { - return err - } - buf := bytes.NewBuffer(nil) - if err := temp.Execute(buf, p); err != nil { - return err - } - return afero.WriteFile(o.outFs, path.Join(o.PatchOutputDir, "manager_patch.yaml"), buf.Bytes(), 0644) -} - -func toYAML(m map[string]string) (string, error) { - d, err := yaml.Marshal(m) - return string(d), err -} - -func indent(n int, s string) (string, error) { - buf := bytes.NewBuffer(nil) - for _, elem := range strings.Split(s, "\n") { - for i := 0; i < n; i++ { - _, err := buf.WriteRune(' ') - if err != nil { - return "", err - } - } - _, err := buf.WriteString(elem) - if err != nil { - return "", err - } - _, err = buf.WriteRune('\n') - if err != nil { - return "", err - } - } - return strings.TrimRight(buf.String(), " \n"), nil -} diff --git a/vendor/sigs.k8s.io/controller-tools/pkg/webhook/parser.go b/vendor/sigs.k8s.io/controller-tools/pkg/webhook/parser.go index 9ac73cef04..ed4994270d 100644 --- a/vendor/sigs.k8s.io/controller-tools/pkg/webhook/parser.go +++ b/vendor/sigs.k8s.io/controller-tools/pkg/webhook/parser.go @@ -14,223 +14,199 @@ See the License for the specific language governing permissions and limitations under the License. */ +// Package webhook contains libraries for generating webhookconfig manifests +// from markers in Go source files. +// +// The markers take the form: +// +// +kubebuilder:webhook:failurePolicy=,groups=<[]string>,resources=<[]string>,verbs=<[]string>,versions=<[]string>,name=,path=,mutating= package webhook import ( - "errors" - "fmt" - "log" - "strconv" "strings" - admissionregistrationv1beta1 "k8s.io/api/admissionregistration/v1beta1" - "k8s.io/apimachinery/pkg/types" - "k8s.io/apimachinery/pkg/util/sets" - "sigs.k8s.io/controller-tools/pkg/internal/general" -) + admissionreg "k8s.io/api/admissionregistration/v1beta1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" -const webhookAnnotationPrefix = "kubebuilder:webhook" + "sigs.k8s.io/controller-tools/pkg/genall" + "sigs.k8s.io/controller-tools/pkg/markers" +) var ( - webhookTags = sets.NewString([]string{"groups", "versions", "resources", "verbs", "type", "name", "path", "failure-policy"}...) - serverTags = sets.NewString([]string{"port", "cert-dir", "service", "selector", "secret", "host", "mutating-webhook-config-name", "validating-webhook-config-name"}...) + // ConfigDefinition s a marker for defining Webhook manifests. + // Call ToWebhook on the value to get a Kubernetes Webhook. + ConfigDefinition = markers.Must(markers.MakeDefinition("kubebuilder:webhook", markers.DescribesPackage, Config{})) ) -// parseAnnotation parses webhook annotations -func (o *Options) parseAnnotation(commentText string) error { - webhookKVMap, serverKVMap := map[string]string{}, map[string]string{} - for _, comment := range strings.Split(commentText, "\n") { - comment := strings.TrimSpace(comment) - anno := general.GetAnnotation(comment, webhookAnnotationPrefix) - if len(anno) == 0 { - continue - } - for _, elem := range strings.Split(anno, ",") { - key, value, err := general.ParseKV(elem) - if err != nil { - log.Fatalf("// +kubebuilder:webhook: tags must be key value pairs. Example "+ - "keys [groups=,resources=,verbs=] "+ - "Got string: [%s]", anno) - } - switch { - case webhookTags.Has(key): - webhookKVMap[key] = value - case serverTags.Has(key): - serverKVMap[key] = value - } - } - } +// +controllertools:marker:generateHelp:category=Webhook + +// Config specifies how a webhook should be served. +// +// It specifies only the details that are intrinsic to the application serving +// it (e.g. the resources it can handle, or the path it serves on). +type Config struct { + // Mutating marks this as a mutating webhook (it's validating only if false) + // + // Mutating webhooks are allowed to change the object in their response, + // and are called *after* all validating webhooks. Mutating webhooks may + // choose to reject an object, similarly to a validating webhook. + Mutating bool + // FailurePolicy specifies what should happen if the API server cannot reach the webhook. + // + // It may be either "ignore" (to skip the webhook and continue on) or "fail" (to reject + // the object in question). + FailurePolicy string + + // Groups specifies the API groups that this webhook receives requests for. + Groups []string + // Resources specifies the API resources that this webhook receives requests for. + Resources []string + // Verbs specifies the Kubernetes API verbs that this webhook receives requests for. + // + // Only modification-like verbs may be specified. + // May be "create", "update", "delete", "connect", or "*" (for all). + Verbs []string + // Versions specifies the API versions that this webhook receives requests for. + Versions []string + + // Name indicates the name of this webhook configuration. + Name string + // Path specifies that path that the API server should connect to this webhook on. + Path string +} - if err := o.parseWebhookAnnotation(webhookKVMap); err != nil { - return err +// verbToAPIVariant converts a marker's verb to the proper value for the API. +// Unrecognized verbs are passed through. +func verbToAPIVariant(verbRaw string) admissionreg.OperationType { + switch strings.ToLower(verbRaw) { + case strings.ToLower(string(admissionreg.Create)): + return admissionreg.Create + case strings.ToLower(string(admissionreg.Update)): + return admissionreg.Update + case strings.ToLower(string(admissionreg.Delete)): + return admissionreg.Delete + case strings.ToLower(string(admissionreg.Connect)): + return admissionreg.Connect + case strings.ToLower(string(admissionreg.OperationAll)): + return admissionreg.OperationAll + default: + return admissionreg.OperationType(verbRaw) } - return o.parseServerAnnotation(serverKVMap) } -// parseWebhookAnnotation parses webhook annotations in the same comment group -// nolint: gocyclo -func (o *Options) parseWebhookAnnotation(kvMap map[string]string) error { - if len(kvMap) == 0 { - return nil +// ToRule converts this rule to its Kubernetes API form. +func (c Config) ToWebhook() admissionreg.Webhook { + whConfig := admissionreg.RuleWithOperations{ + Rule: admissionreg.Rule{ + APIGroups: c.Groups, + APIVersions: c.Versions, + Resources: c.Resources, + }, + Operations: make([]admissionreg.OperationType, len(c.Verbs)), } - rule := admissionregistrationv1beta1.RuleWithOperations{} - w := &admissionWebhook{} - for key, value := range kvMap { - switch key { - case "groups": - values := strings.Split(value, ";") - normalized := []string{} - for _, v := range values { - if v == "core" { - normalized = append(normalized, "") - } else { - normalized = append(normalized, v) - } - } - rule.APIGroups = values - - case "versions": - values := strings.Split(value, ";") - rule.APIVersions = values - - case "resources": - values := strings.Split(value, ";") - rule.Resources = values - - case "verbs": - values := strings.Split(value, ";") - var ops []admissionregistrationv1beta1.OperationType - for _, v := range values { - switch strings.ToLower(v) { - case strings.ToLower(string(admissionregistrationv1beta1.Create)): - ops = append(ops, admissionregistrationv1beta1.Create) - case strings.ToLower(string(admissionregistrationv1beta1.Update)): - ops = append(ops, admissionregistrationv1beta1.Update) - case strings.ToLower(string(admissionregistrationv1beta1.Delete)): - ops = append(ops, admissionregistrationv1beta1.Delete) - case strings.ToLower(string(admissionregistrationv1beta1.Connect)): - ops = append(ops, admissionregistrationv1beta1.Connect) - case strings.ToLower(string(admissionregistrationv1beta1.OperationAll)): - ops = append(ops, admissionregistrationv1beta1.OperationAll) - default: - return fmt.Errorf("unknown operation: %v", v) - } - } - rule.Operations = ops - - case "type": - switch strings.ToLower(value) { - case "mutating": - w.typ = mutatingWebhook - case "validating": - w.typ = validatingWebhook - default: - return fmt.Errorf("unknown webhook type: %v", value) - } - case "name": - w.name = value - - case "path": - w.path = value - - case "failure-policy": - switch strings.ToLower(value) { - case strings.ToLower(string(admissionregistrationv1beta1.Ignore)): - fp := admissionregistrationv1beta1.Ignore - w.failurePolicy = &fp - case strings.ToLower(string(admissionregistrationv1beta1.Fail)): - fp := admissionregistrationv1beta1.Fail - w.failurePolicy = &fp - default: - return fmt.Errorf("unknown webhook failure policy: %v", value) - } + for i, verbRaw := range c.Verbs { + whConfig.Operations[i] = verbToAPIVariant(verbRaw) + } + + // fix the group names, since letting people type "core" is nice + for i, group := range whConfig.APIGroups { + if group == "core" { + whConfig.APIGroups[i] = "" } } - w.rules = []admissionregistrationv1beta1.RuleWithOperations{rule} - if o.webhooks == nil { - o.webhooks = map[string]webhook{} + + var failurePolicy admissionreg.FailurePolicyType + switch strings.ToLower(c.FailurePolicy) { + case strings.ToLower(string(admissionreg.Ignore)): + failurePolicy = admissionreg.Ignore + case strings.ToLower(string(admissionreg.Fail)): + failurePolicy = admissionreg.Fail + default: + failurePolicy = admissionreg.FailurePolicyType(c.FailurePolicy) + } + path := c.Path + return admissionreg.Webhook{ + Name: c.Name, + Rules: []admissionreg.RuleWithOperations{whConfig}, + FailurePolicy: &failurePolicy, + ClientConfig: admissionreg.WebhookClientConfig{ + Service: &admissionreg.ServiceReference{ + Name: "webhook-service", + Namespace: "system", + Path: &path, + }, + // OpenAPI marks the field as required before 1.13 because of a bug that got fixed in + // https://github.com/kubernetes/api/commit/e7d9121e9ffd63cea0288b36a82bcc87b073bd1b + // Put "\n" as an placeholder as a workaround til 1.13+ is almost everywhere. + CABundle: []byte("\n"), + }, } - o.webhooks[w.path] = w - return nil } -// parseWebhookAnnotation parses webhook server annotations in the same comment group -// nolint: gocyclo -func (o *Options) parseServerAnnotation(kvMap map[string]string) error { - if len(kvMap) == 0 { - return nil - } - for key, value := range kvMap { - switch key { - case "port": - port, err := strconv.Atoi(value) - if err != nil { - return err - } - o.port = int32(port) - case "cert-dir": - o.certDir = value - case "service": - // format: - split := strings.Split(value, ":") - if len(split) != 2 || len(split[0]) == 0 || len(split[1]) == 0 { - return fmt.Errorf("invalid service format: expect , but got %q", value) - } - if o.service == nil { - o.service = &service{} - } - o.service.namespace = split[0] - o.service.name = split[1] - case "selector": - // selector of the service. Format: - split := strings.Split(value, ";") - if len(split) == 0 { - return fmt.Errorf("invalid selector format: expect , but got %q", value) - } - if o.service == nil { - o.service = &service{} - } - for _, v := range split { - l := strings.Split(v, ":") - if len(l) != 2 || len(l[0]) == 0 || len(l[1]) == 0 { - return fmt.Errorf("invalid selector format: expect , but got %q", value) - } - if o.service.selectors == nil { - o.service.selectors = map[string]string{} - } - o.service.selectors[l[0]] = l[1] - } - case "host": - if len(value) == 0 { - return errors.New("host should not be empty if specified") - } - o.host = &value +// +controllertools:marker:generateHelp - case "mutating-webhook-config-name": - if len(value) == 0 { - return errors.New("mutating-webhook-config-name should not be empty if specified") - } - o.mutatingWebhookConfigName = value +// Generator generates (partial) {Mutating,Validating}WebhookConfiguration objects. +type Generator struct{} - case "validating-webhook-config-name": - if len(value) == 0 { - return errors.New("validating-webhook-config-name should not be empty if specified") - } - o.validatingWebhookConfigName = value +func (Generator) RegisterMarkers(into *markers.Registry) error { + if err := into.Register(ConfigDefinition); err != nil { + return err + } + into.AddHelp(ConfigDefinition, Config{}.Help()) + return nil +} - case "secret": - // format: - split := strings.Split(value, ":") - if len(split) != 2 || len(split[0]) == 0 || len(split[1]) == 0 { - return fmt.Errorf("invalid secret format: expect , but got %q", value) - } - if o.secret == nil { - o.secret = &types.NamespacedName{} +func (Generator) Generate(ctx *genall.GenerationContext) error { + var mutatingCfgs []admissionreg.Webhook + var validatingCfgs []admissionreg.Webhook + for _, root := range ctx.Roots { + markerSet, err := markers.PackageMarkers(ctx.Collector, root) + if err != nil { + root.AddError(err) + } + + for _, cfg := range markerSet[ConfigDefinition.Name] { + cfg := cfg.(Config) + if cfg.Mutating { + mutatingCfgs = append(mutatingCfgs, cfg.ToWebhook()) + } else { + validatingCfgs = append(validatingCfgs, cfg.ToWebhook()) } - o.secret.Namespace = split[0] - o.secret.Name = split[1] } } + + var objs []interface{} + if len(mutatingCfgs) > 0 { + objs = append(objs, &admissionreg.MutatingWebhookConfiguration{ + TypeMeta: metav1.TypeMeta{ + Kind: "MutatingWebhookConfiguration", + APIVersion: admissionreg.SchemeGroupVersion.String(), + }, + ObjectMeta: metav1.ObjectMeta{ + Name: "mutating-webhook-configuration", + }, + Webhooks: mutatingCfgs, + }) + } + + if len(validatingCfgs) > 0 { + objs = append(objs, &admissionreg.ValidatingWebhookConfiguration{ + TypeMeta: metav1.TypeMeta{ + Kind: "ValidatingWebhookConfiguration", + APIVersion: admissionreg.SchemeGroupVersion.String(), + }, + ObjectMeta: metav1.ObjectMeta{ + Name: "validating-webhook-configuration", + }, + Webhooks: validatingCfgs, + }) + + } + + if err := ctx.WriteYAML("manifests.yaml", objs...); err != nil { + return err + } + return nil } diff --git a/vendor/sigs.k8s.io/controller-tools/pkg/webhook/types.go b/vendor/sigs.k8s.io/controller-tools/pkg/webhook/types.go deleted file mode 100644 index a7129e921c..0000000000 --- a/vendor/sigs.k8s.io/controller-tools/pkg/webhook/types.go +++ /dev/null @@ -1,38 +0,0 @@ -/* -Copyright 2018 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package webhook - -// webhookType defines the type of a webhook -type webhookType int - -const ( - _ = iota - // mutatingWebhook represents mutating type webhook - mutatingWebhook webhookType = iota - // validatingWebhook represents validating type webhook - validatingWebhook -) - -// webhook defines the basics that a webhook should support. -type webhook interface { - // GetType returns the Type of the webhook. - // e.g. mutating or validating - GetType() webhookType - // Validate validates if the webhook itself is valid. - // If invalid, a non-nil error will be returned. - Validate() error -} diff --git a/vendor/sigs.k8s.io/controller-tools/pkg/webhook/writer.go b/vendor/sigs.k8s.io/controller-tools/pkg/webhook/writer.go deleted file mode 100644 index ecbb45f2a1..0000000000 --- a/vendor/sigs.k8s.io/controller-tools/pkg/webhook/writer.go +++ /dev/null @@ -1,92 +0,0 @@ -package webhook - -import ( - "bytes" - "fmt" - "path" - "path/filepath" - - "github.com/ghodss/yaml" - "github.com/spf13/afero" - - "k8s.io/apimachinery/pkg/runtime" -) - -// WriterOptions specifies the input and output. -type WriterOptions struct { - InputDir string - OutputDir string - PatchOutputDir string - - // inFs is filesystem to be used for reading input - inFs afero.Fs - // outFs is filesystem to be used for writing out the result - outFs afero.Fs -} - -// SetDefaults sets up the default options for RBAC Manifest generator. -func (o *WriterOptions) SetDefaults() { - if o.inFs == nil { - o.inFs = afero.NewOsFs() - } - if o.outFs == nil { - o.outFs = afero.NewOsFs() - } - - if len(o.InputDir) == 0 { - o.InputDir = filepath.Join(".", "pkg", "webhook") - } - if len(o.OutputDir) == 0 { - o.OutputDir = filepath.Join(".", "config", "webhook") - } - if len(o.PatchOutputDir) == 0 { - o.PatchOutputDir = filepath.Join(".", "config", "default") - } -} - -// Validate validates the input options. -func (o *WriterOptions) Validate() error { - if _, err := o.inFs.Stat(o.InputDir); err != nil { - return fmt.Errorf("invalid input directory '%s' %v", o.InputDir, err) - } - return nil -} - -// WriteObjectsToDisk writes object to the location specified in WriterOptions. -func (o *WriterOptions) WriteObjectsToDisk(objects ...runtime.Object) error { - exists, err := afero.DirExists(o.outFs, o.OutputDir) - if err != nil { - return err - } - if !exists { - err = o.outFs.MkdirAll(o.OutputDir, 0766) - if err != nil { - return err - } - } - - var buf bytes.Buffer - isFirstObject := true - for _, obj := range objects { - if !isFirstObject { - _, err = buf.WriteString("---\n") - if err != nil { - return err - } - } - marshalled, err := yaml.Marshal(obj) - if err != nil { - return err - } - _, err = buf.Write(marshalled) - if err != nil { - return err - } - isFirstObject = false - } - err = afero.WriteFile(o.outFs, path.Join(o.OutputDir, "webhookmanifests.yaml"), buf.Bytes(), 0644) - if err != nil { - return err - } - return nil -} diff --git a/vendor/sigs.k8s.io/controller-tools/pkg/webhook/zz_generated.markerhelp.go b/vendor/sigs.k8s.io/controller-tools/pkg/webhook/zz_generated.markerhelp.go new file mode 100644 index 0000000000..6e1c9d65e0 --- /dev/null +++ b/vendor/sigs.k8s.io/controller-tools/pkg/webhook/zz_generated.markerhelp.go @@ -0,0 +1,80 @@ +// +build !ignore_autogenerated + +/* +Copyright2019 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by helpgen. DO NOT EDIT. + +package webhook + +import ( + "sigs.k8s.io/controller-tools/pkg/markers" +) + +func (Config) Help() *markers.DefinitionHelp { + return &markers.DefinitionHelp{ + Category: "Webhook", + DetailedHelp: markers.DetailedHelp{ + Summary: "specifies how a webhook should be served. ", + Details: " It specifies only the details that are intrinsic to the application serving it (e.g. the resources it can handle, or the path it serves on).", + }, + FieldHelp: map[string]markers.DetailedHelp{ + "Mutating": markers.DetailedHelp{ + Summary: "marks this as a mutating webhook (it's validating only if false) ", + Details: " Mutating webhooks are allowed to change the object in their response, and are called *after* all validating webhooks. Mutating webhooks may choose to reject an object, similarly to a validating webhook.", + }, + "FailurePolicy": markers.DetailedHelp{ + Summary: "specifies what should happen if the API server cannot reach the webhook. ", + Details: " It may be either \"ignore\" (to skip the webhook and continue on) or \"fail\" (to reject the object in question).", + }, + "Groups": markers.DetailedHelp{ + Summary: "specifies the API groups that this webhook receives requests for.", + Details: "", + }, + "Resources": markers.DetailedHelp{ + Summary: "specifies the API resources that this webhook receives requests for.", + Details: "", + }, + "Verbs": markers.DetailedHelp{ + Summary: "specifies the Kubernetes API verbs that this webhook receives requests for. ", + Details: " Only modification-like verbs may be specified. May be \"create\", \"update\", \"delete\", \"connect\", or \"*\" (for all).", + }, + "Versions": markers.DetailedHelp{ + Summary: "specifies the API versions that this webhook receives requests for.", + Details: "", + }, + "Name": markers.DetailedHelp{ + Summary: "indicates the name of this webhook configuration.", + Details: "", + }, + "Path": markers.DetailedHelp{ + Summary: "specifies that path that the API server should connect to this webhook on.", + Details: "", + }, + }, + } +} + +func (Generator) Help() *markers.DefinitionHelp { + return &markers.DefinitionHelp{ + Category: "", + DetailedHelp: markers.DetailedHelp{ + Summary: "generates (partial) {Mutating,Validating}WebhookConfiguration objects.", + Details: "", + }, + FieldHelp: map[string]markers.DetailedHelp{}, + } +} diff --git a/vendor/sigs.k8s.io/testing_frameworks/integration/apiserver.go b/vendor/sigs.k8s.io/testing_frameworks/integration/apiserver.go index 4f1e28267d..dc912ffc03 100644 --- a/vendor/sigs.k8s.io/testing_frameworks/integration/apiserver.go +++ b/vendor/sigs.k8s.io/testing_frameworks/integration/apiserver.go @@ -71,6 +71,15 @@ type APIServer struct { // Start starts the apiserver, waits for it to come up, and returns an error, // if occurred. func (s *APIServer) Start() error { + if s.processState == nil { + if err := s.setProcessState(); err != nil { + return err + } + } + return s.processState.Start(s.Out, s.Err) +} + +func (s *APIServer) setProcessState() error { if s.EtcdURL == nil { return fmt.Errorf("expected EtcdURL to be configured") } @@ -110,11 +119,7 @@ func (s *APIServer) Start() error { s.processState.Args, err = internal.RenderTemplates( internal.DoAPIServerArgDefaulting(s.Args), s, ) - if err != nil { - return err - } - - return s.processState.Start(s.Out, s.Err) + return err } // Stop stops this process gracefully, waits for its termination, and cleans up diff --git a/vendor/sigs.k8s.io/testing_frameworks/integration/etcd.go b/vendor/sigs.k8s.io/testing_frameworks/integration/etcd.go index 43cb314d7b..ded0c0fb1e 100644 --- a/vendor/sigs.k8s.io/testing_frameworks/integration/etcd.go +++ b/vendor/sigs.k8s.io/testing_frameworks/integration/etcd.go @@ -61,6 +61,15 @@ type Etcd struct { // Start starts the etcd, waits for it to come up, and returns an error, if one // occoured. func (e *Etcd) Start() error { + if e.processState == nil { + if err := e.setProcessState(); err != nil { + return err + } + } + return e.processState.Start(e.Out, e.Err) +} + +func (e *Etcd) setProcessState() error { var err error e.processState = &internal.ProcessState{} @@ -88,11 +97,7 @@ func (e *Etcd) Start() error { e.processState.Args, err = internal.RenderTemplates( internal.DoEtcdArgDefaulting(e.Args), e, ) - if err != nil { - return err - } - - return e.processState.Start(e.Out, e.Err) + return err } // Stop stops this process gracefully, waits for its termination, and cleans up diff --git a/vendor/sigs.k8s.io/testing_frameworks/integration/internal/process.go b/vendor/sigs.k8s.io/testing_frameworks/integration/internal/process.go index d7a0885668..9e536f3cb5 100644 --- a/vendor/sigs.k8s.io/testing_frameworks/integration/internal/process.go +++ b/vendor/sigs.k8s.io/testing_frameworks/integration/internal/process.go @@ -38,6 +38,10 @@ type ProcessState struct { // Deprecated: Use HealthCheckEndpoint in favour of StartMessage StartMessage string Args []string + + // ready holds wether the process is currently in ready state (hit the ready condition) or not. + // It will be set to true on a successful `Start()` and set to false on a successful `Stop()` + ready bool } type DefaultedProcessInput struct { @@ -107,6 +111,10 @@ func DoDefaulting( type stopChannel chan struct{} func (ps *ProcessState) Start(stdout, stderr io.Writer) (err error) { + if ps.ready { + return nil + } + command := exec.Command(ps.Path, ps.Args...) ready := make(chan bool) @@ -131,6 +139,7 @@ func (ps *ProcessState) Start(stdout, stderr io.Writer) (err error) { select { case <-ready: + ps.ready = true return nil case <-timedOut: if pollerStopCh != nil { @@ -194,7 +203,7 @@ func (ps *ProcessState) Stop() error { case <-timedOut: return fmt.Errorf("timeout waiting for process %s to stop", path.Base(ps.Path)) } - + ps.ready = false if ps.DirNeedsCleaning { return os.RemoveAll(ps.Dir) }